diff --git a/fastreid/evaluation/__init__.py b/fastreid/evaluation/__init__.py
index 32f3faf..5f51f6c 100644
--- a/fastreid/evaluation/__init__.py
+++ b/fastreid/evaluation/__init__.py
@@ -1,8 +1,6 @@
 from .evaluator import DatasetEvaluator, inference_context, inference_on_dataset
-from .rank import evaluate_rank
 from .reid_evaluation import ReidEvaluator
 from .clas_evaluator import ClasEvaluator
-from .roc import evaluate_roc
 from .testing import print_csv_format, verify_results
 
 __all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/fastreid/evaluation/rank_cylib/Makefile b/fastreid/evaluation/rank_cylib/Makefile
index 812a558..fa9736a 100644
--- a/fastreid/evaluation/rank_cylib/Makefile
+++ b/fastreid/evaluation/rank_cylib/Makefile
@@ -1,7 +1,6 @@
 all:
 	python3 setup.py build_ext --inplace
 	rm -rf build
-	python3 test_cython.py
 clean:
 	rm -rf build
 	rm -f rank_cy.c *.so
diff --git a/fastreid/evaluation/rank_cylib/__init__.py b/fastreid/evaluation/rank_cylib/__init__.py
index 980c5fe..6316124 100644
--- a/fastreid/evaluation/rank_cylib/__init__.py
+++ b/fastreid/evaluation/rank_cylib/__init__.py
@@ -2,4 +2,19 @@
 """
 @author:  liaoxingyu
 @contact: sherlockliao01@gmail.com
-"""
\ No newline at end of file
+"""
+
+
+def compile_helper():
+    """Compile helper function at runtime. Make sure this
+    is invoked on a single process."""
+    import os
+    import subprocess
+
+    path = os.path.abspath(os.path.dirname(__file__))
+    ret = subprocess.run(["make", "-C", path])
+    if ret.returncode != 0:
+        print("Making cython reid evaluation module failed, exiting.")
+        import sys
+
+        sys.exit(1)
diff --git a/fastreid/evaluation/rank_cylib/test_cython.py b/fastreid/evaluation/rank_cylib/test_cython.py
index 8fd5542..40537cb 100644
--- a/fastreid/evaluation/rank_cylib/test_cython.py
+++ b/fastreid/evaluation/rank_cylib/test_cython.py
@@ -5,8 +5,8 @@ import os.path as osp
 
 sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..')
 
-from fastreid.evaluation import evaluate_rank
-from fastreid.evaluation import evaluate_roc
+from fastreid.evaluation.rank import evaluate_rank
+from fastreid.evaluation.roc import evaluate_roc
 
 """
 Test the speed of cython-based evaluation code. The speed improvements
@@ -24,8 +24,8 @@ import sys
 import os.path as osp
 import numpy as np
 sys.path.insert(0, osp.dirname(osp.abspath(__file__)) + '/../../..')
-from fastreid.evaluation import evaluate_rank
-from fastreid.evaluation import evaluate_roc
+from fastreid.evaluation.rank import evaluate_rank
+from fastreid.evaluation.roc import evaluate_roc
 num_q = 30
 num_g = 300
 dim = 512
diff --git a/fastreid/evaluation/reid_evaluation.py b/fastreid/evaluation/reid_evaluation.py
index 921b39b..ee726d2 100644
--- a/fastreid/evaluation/reid_evaluation.py
+++ b/fastreid/evaluation/reid_evaluation.py
@@ -5,6 +5,7 @@
 """
 import copy
 import logging
+import time
 import itertools
 from collections import OrderedDict
 
@@ -17,8 +18,7 @@ from fastreid.utils import comm
 from fastreid.utils.compute_dist import build_dist
 from .evaluator import DatasetEvaluator
 from .query_expansion import aqe
-from .rank import evaluate_rank
-from .roc import evaluate_roc
+from .rank_cylib import compile_helper
 
 logger = logging.getLogger(__name__)
 
@@ -32,6 +32,7 @@ class ReidEvaluator(DatasetEvaluator):
         self._cpu_device = torch.device('cpu')
 
         self._predictions = []
+        self._compile_dependencies()
 
     def reset(self):
         self._predictions = []
@@ -102,6 +103,7 @@ class ReidEvaluator(DatasetEvaluator):
             rerank_dist = build_dist(query_features, gallery_features, metric="jaccard", k1=k1, k2=k2)
             dist = rerank_dist * (1 - lambda_value) + dist * lambda_value
 
+        from .rank import evaluate_rank
         cmc, all_AP, all_INP = evaluate_rank(dist, query_pids, gallery_pids, query_camids, gallery_camids)
 
         mAP = np.mean(all_AP)
@@ -113,6 +115,7 @@ class ReidEvaluator(DatasetEvaluator):
         self._results["metric"] = (mAP + cmc[0]) / 2 * 100
 
         if self.cfg.TEST.ROC.ENABLED:
+            from .roc import evaluate_roc
             scores, labels = evaluate_roc(dist, query_pids, gallery_pids, query_camids, gallery_camids)
             fprs, tprs, thres = metrics.roc_curve(labels, scores)
 
@@ -121,3 +124,20 @@ class ReidEvaluator(DatasetEvaluator):
                 self._results["TPR@FPR={:.0e}".format(fpr)] = tprs[ind]
 
         return copy.deepcopy(self._results)
+
+    def _compile_dependencies(self):
+        # Since we only evaluate results in rank(0), so we just need to compile
+        # cython evaluation tool on rank(0)
+        if comm.is_main_process():
+            try:
+                from .rank_cylib.rank_cy import evaluate_cy
+            except ImportError:
+                start_time = time.time()
+                logger.info("> compiling reid evaluation cython tool")
+
+                compile_helper()
+
+                logger.info(
+                    ">>> done with reid evaluation cython tool. Compilation time: {:.3f} "
+                    "seconds".format(time.time() - start_time))
+        comm.synchronize()