mirror of https://github.com/alibaba/EasyCV.git
move test model to nfs (#222)
parent
7f08eb6b3f
commit
5f75b22438
|
@ -1,3 +0,0 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:8298b88539874b9914b90122575880a80ca0534499e9be9953e17fc177a1c2d2
|
||||
size 3421031
|
|
@ -1,3 +0,0 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:8570f45c7e642288b23a1c8722ba2b9b40939f1d55c962d13c789157b16edf01
|
||||
size 117072344
|
|
@ -1,3 +0,0 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:4f2260f781f08466ce04625f8cd14c2d8fed37f9c5c71c17dcf622c0c6e94410
|
||||
size 793200649
|
|
@ -1,3 +0,0 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:047c8118fc5ca88ba5ae1fab72f2cd6b070501fe3af2f3cba5cfa9a89b44b03e
|
||||
size 167287506
|
|
@ -1,3 +0,0 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:f572656370050193134ef50e27745cdcdda54986d396ff78e6169608efcdc3c1
|
||||
size 254976311
|
|
@ -1,3 +0,0 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:163a344e29b965cdb6c6c24e189e84a269580d63237253f359de35e944ec5421
|
||||
size 528712836
|
|
@ -1,3 +0,0 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:a33e6b1f5623057c6920226767c91a44a072acc27ece5ba24fdeb2a9a1bb2ba2
|
||||
size 528548036
|
|
@ -1,3 +0,0 @@
|
|||
version https://git-lfs.github.com/spec/v1
|
||||
oid sha256:94d7df6a4ff3c605916378304b2a00404a23d4965d226a657417061647cb46a6
|
||||
size 45361179
|
|
@ -3,6 +3,7 @@
|
|||
import unittest
|
||||
|
||||
import cv2
|
||||
from ut_config import PRETRAINED_MODEL_FACE_2D_KEYPOINTS
|
||||
|
||||
from easycv.predictors.face_keypoints_predictor import FaceKeypointsPredictor
|
||||
|
||||
|
@ -13,7 +14,7 @@ class FaceKeypointsPredictorWithoutDetectorTest(unittest.TestCase):
|
|||
print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
|
||||
self.image_path = './data/test/face_2d_keypoints/data/002258.png'
|
||||
self.save_image_path = './data/test/face_2d_keypoints/data/result_002258.png'
|
||||
self.model_path = './data/test/face_2d_keypoints/models/epoch_400.pth'
|
||||
self.model_path = PRETRAINED_MODEL_FACE_2D_KEYPOINTS
|
||||
self.model_config_path = './configs/face/face_96x96_wingloss.py'
|
||||
|
||||
def test_single(self):
|
||||
|
|
|
@ -2,6 +2,8 @@
|
|||
|
||||
import unittest
|
||||
|
||||
from ut_config import PRETRAINED_MODEL_HAND_KEYPOINTS
|
||||
|
||||
from easycv.predictors.hand_keypoints_predictor import HandKeypointsPredictor
|
||||
from easycv.utils.config_tools import mmcv_config_fromfile
|
||||
|
||||
|
@ -16,7 +18,7 @@ class HandKeypointsPredictorTest(unittest.TestCase):
|
|||
print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
|
||||
self.image_path = 'data/test/pose/hand/data/hand.jpg'
|
||||
self.save_image_path = 'data/test/pose/hand/data/hand_result.jpg'
|
||||
self.model_path = 'data/test/pose/hand/hrnet_w18_256x256.pth'
|
||||
self.model_path = PRETRAINED_MODEL_HAND_KEYPOINTS
|
||||
self.model_config_path = 'configs/pose/hand/hrnet_w18_coco_wholebody_hand_256x256_dark.py'
|
||||
|
||||
def test_single(self):
|
||||
|
|
|
@ -2,11 +2,13 @@
|
|||
|
||||
import unittest
|
||||
|
||||
from ut_config import (PRETRAINED_MODEL_WHOLEBODY,
|
||||
PRETRAINED_MODEL_WHOLEBODY_DETECTION)
|
||||
|
||||
from easycv.predictors.wholebody_keypoints_predictor import \
|
||||
WholeBodyKeypointsPredictor
|
||||
from easycv.utils.config_tools import mmcv_config_fromfile
|
||||
|
||||
DEFAULT_WHOLEBODY_DETECTION_MODEL_PATH = 'data/test/pose/wholebody/models/epoch_290.pth'
|
||||
DEFAULT_WHOLEBODY_DETECTION_MODEL_PATH = PRETRAINED_MODEL_WHOLEBODY_DETECTION
|
||||
DEFAULT_WHOLEBODY_DETECTION_CONFIG_FILE = 'configs/detection/yolox/yolox_x_8xb8_300e_coco.py'
|
||||
|
||||
|
||||
|
@ -16,7 +18,7 @@ class WholeBodyKeypointsPredictorTest(unittest.TestCase):
|
|||
print(('Testing %s.%s' % (type(self).__name__, self._testMethodName)))
|
||||
self.image_path = 'data/test/pose/wholebody/data/img_test_wholebody.jpg'
|
||||
self.save_image_path = 'img_test_wholebody_ret.jpg'
|
||||
self.model_path = 'data/test/pose/wholebody/models/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth'
|
||||
self.model_path = PRETRAINED_MODEL_WHOLEBODY
|
||||
self.model_config_path = 'configs/pose/wholebody/hrnet_w48_coco_wholebody_384x288_dark_plus.py'
|
||||
|
||||
def test_single(self):
|
||||
|
|
|
@ -153,6 +153,16 @@ PRETRAINED_MODEL_SEGFORMER = os.path.join(
|
|||
PRETRAINED_MODEL_BEVFORMER_BASE = os.path.join(
|
||||
BASE_LOCAL_PATH,
|
||||
'pretrained_models/detection3d/bevformer/bevformer_base_epoch_24.pth')
|
||||
PRETRAINED_MODEL_FACE_2D_KEYPOINTS = os.path.join(
|
||||
BASE_LOCAL_PATH, 'pretrained_models/face_2d_keypoints/epoch_400.pth')
|
||||
PRETRAINED_MODEL_HAND_KEYPOINTS = os.path.join(
|
||||
BASE_LOCAL_PATH, 'pretrained_models/pose/hand/hrnet/hrnet_w18_256x256.pth')
|
||||
PRETRAINED_MODEL_WHOLEBODY_DETECTION = os.path.join(
|
||||
BASE_LOCAL_PATH, 'pretrained_models/pose/wholebody/epoch_290.pth')
|
||||
PRETRAINED_MODEL_WHOLEBODY = os.path.join(
|
||||
BASE_LOCAL_PATH,
|
||||
'pretrained_models/pose/wholebody/hrnet_w48_coco_wholebody_384x288_dark-f5726563_20200918.pth'
|
||||
)
|
||||
MODEL_CONFIG_SEGFORMER = (
|
||||
'./configs/segmentation/segformer/segformer_b0_coco.py')
|
||||
SMALL_COCO_WHOLE_BODY_HAND_ROOT = 'data/test/pose/hand/small_whole_body_hand_coco'
|
||||
|
|
Loading…
Reference in New Issue