update pytorchtocaffe tool

Summary: update pytorch to caffe toolchain to support model layers and bash scripts
pull/150/head
liaoxingyu 2020-06-09 14:45:27 +08:00
parent 3732f94405
commit 27c48c8f02
8 changed files with 4799 additions and 4836 deletions

View File

@ -1,4 +1,4 @@
# The Caffe in PytorchToCaffe Provides some convenient API
# The Caffe in nn_tools Provides some convenient API
If there are some problem in parse your prototxt or caffemodel, Please replace
the caffe.proto with your own version and compile it with command
`protoc --python_out ./ caffe.proto`

View File

@ -565,7 +565,6 @@ message LayerParameter {
optional RecurrentParameter recurrent_param = 146;
optional ReductionParameter reduction_param = 136;
optional ReLUParameter relu_param = 123;
optional ReLU6Parameter relu6_param = 208;
optional ReshapeParameter reshape_param = 133;
optional ROIPoolingParameter roi_pooling_param = 8266711; //roi pooling
optional ScaleParameter scale_param = 142;
@ -601,7 +600,7 @@ message LayerParameter {
optional CropParameter crop_param = 167;
optional DetectionEvaluateParameter detection_evaluate_param = 168;
optional DetectionOutputParameter detection_output_param = 169;
optional NormalizeParameter norm_param = 170;
//optional NormalizeParameter normalize_param = 170;
optional MultiBoxLossParameter multibox_loss_param = 171;
optional PermuteParameter permute_param = 172;
optional VideoDataParameter video_data_param = 173;
@ -633,7 +632,7 @@ message LayerParameter {
optional UpsampleParameter upsample_param = 100003;
optional MatMulParameter matmul_param = 100005;
optional PassThroughParameter pass_through_param = 100004;
//optional NormalizeParameter norm_param = 100001;
optional NormalizeParameter norm_param = 100001;
}
//*********************add by wdd******************
@ -659,13 +658,13 @@ message PassThroughParameter {
optional uint32 block_width = 3 [default = 0];
}
//message NormalizeParameter{
//optional bool across_spatial = 1 [default = true];
//optional FillerParameter scale_filler = 2;
//optional bool channel_shared = 3 [default = true];
//optional float eps = 4 [default = 1e-10];
//optional float sqrt_a = 5 [default = 1];
//}
message NormalizeParameter{
optional bool across_spatial = 1 [default = true];
optional FillerParameter scale_filler = 2;
optional bool channel_shared = 3 [default = true];
optional float eps = 4 [default = 1e-10];
optional float sqrt_a = 5 [default = 1];
}
@ -1593,15 +1592,19 @@ message MultiBoxLossParameter {
}
// Message that stores parameters used by NormalizeLayer
message NormalizeParameter {
optional bool across_spatial = 1 [default = true];
// Initial value of scale. Default is 1.0 for all
optional FillerParameter scale_filler = 2;
// Whether or not scale parameters are shared across channels.
optional bool channel_shared = 3 [default = true];
// Epsilon for not dividing by zero while normalizing variance
optional float eps = 4 [default = 1e-10];
}
//message NormalizeParameter {
// //optional bool across_spatial = 1 [default = true];
// // Initial value of scale. Default is 1.0 for all
// //optional FillerParameter scale_filler = 2;
// // Whether or not scale parameters are shared across channels.
// //optional bool channel_shared = 3 [default = true];
// // Epsilon for not dividing by zero while normalizing variance
// //optional float eps = 4 [default = 1e-10];
// //**************************************************
// optional string normalize_type = 1 [default = "L2"];
// optional bool fix_gradient = 2 [default = false];
// optional bool bp_norm = 3 [default = false];
//}
message PermuteParameter {
// The new orders of the axes of data. Notice it should be with
@ -1789,16 +1792,6 @@ message ReLUParameter {
optional Engine engine = 2 [default = DEFAULT];
}
// Message that stores parameters used by ReLU6Layer
message ReLU6Parameter {
enum Engine {
DEFAULT = 0;
CAFFE = 1;
CUDNN = 2;
}
optional Engine engine = 2 [default = DEFAULT];
}
message ReshapeParameter {
// Specify the output dimensions. If some of the dimensions are set to 0,
// the corresponding dimension from the bottom layer is used (unchanged).

View File

@ -7,7 +7,6 @@ from .layer_param import Layer_param
class _Net(object):
def __init__(self):
self.net=pb.NetParameter()
self.needChange = {}
def layer_index(self,layer_name):
# find a layer's index by name. if the layer was found, return the layer position in the net, else return -1.
@ -40,20 +39,6 @@ class _Net(object):
return
raise(AttributeError, "cannot found layer %s" % str(layer_name))
def remove_layer_by_type(self,type_name):
for i,layer in enumerate(self.net.layer):
if layer.type == type_name:
# self.change_layer_bottom(layer.top,layer.bottom)
s1 = "\"" + layer.top[0] + "\""
s2 = "\"" + layer.bottom[0] + "\""
self.needChange[s1]=s2
del self.net.layer[i]
return
def get_layer_by_name(self, layer_name):
# get the layer by layer_name
for layer in self.net.layer:
@ -67,10 +52,7 @@ class _Net(object):
for layer in prototxt.layer:
del layer.blobs[:]
with open(path,'w') as f:
string = text_format.MessageToString(prototxt)
for origin_name in self.needChange.keys():
string = string.replace(origin_name,self.needChange[origin_name])
f.write(string)
f.write(text_format.MessageToString(prototxt))
def layer(self,layer_name):
return self.get_layer_by_name(layer_name)
@ -154,4 +136,4 @@ class Caffemodel(_Net):
class Net():
def __init__(self,*args,**kwargs):
raise(TypeError,'the class Net is no longer used, please use Caffemodel or Prototxt instead')
raise(TypeError,'the class Net is no longer used, please use Caffemodel or Prototxt instead')

File diff suppressed because one or more lines are too long

View File

@ -71,46 +71,9 @@ class Layer_param():
conv_param.dilation.extend(pair_reduce(dilation))
if groups:
conv_param.group=groups
if groups != 1:
conv_param.engine = 1
self.param.convolution_param.CopyFrom(conv_param)
def norm_param(self, eps):
"""
add a conv_param layer if you spec the layer type "Convolution"
Args:
num_output: a int
kernel_size: int list
stride: a int list
weight_filler_type: the weight filer type
bias_filler_type: the bias filler type
Returns:
"""
l2norm_param = pb.NormalizeParameter()
l2norm_param.across_spatial = False
l2norm_param.channel_shared = False
l2norm_param.eps = eps
self.param.norm_param.CopyFrom(l2norm_param)
def permute_param(self, order1, order2, order3, order4):
"""
add a conv_param layer if you spec the layer type "Convolution"
Args:
num_output: a int
kernel_size: int list
stride: a int list
weight_filler_type: the weight filer type
bias_filler_type: the bias filler type
Returns:
"""
permute_param = pb.PermuteParameter()
permute_param.order.extend([order1, order2, order3, order4])
self.param.permute_param.CopyFrom(permute_param)
def pool_param(self,type='MAX',kernel_size=2,stride=2,pad=None, ceil_mode = True):
def pool_param(self,type='MAX',kernel_size=2,stride=2,pad=None, ceil_mode = False):
pool_param=pb.PoolingParameter()
pool_param.pool=pool_param.PoolMethod.Value(type)
pool_param.kernel_size=pair_process(kernel_size)
@ -158,10 +121,22 @@ class Layer_param():
if isinstance(size,int):
upsample_param.upsample_h = size
else:
upsample_param.upsample_h = size[0] * scale_factor
upsample_param.\
upsample_w = size[1] * scale_factor
upsample_param.upsample_h = size[0]
upsample_param.upsample_w = size[1]
#upsample_param.upsample_h = size[0] * scale_factor
#upsample_param.upsample_w = size[1] * scale_factor
self.param.upsample_param.CopyFrom(upsample_param)
def interp_param(self,size=None, scale_factor=None):
interp_param=pb.InterpParameter()
if scale_factor:
if isinstance(scale_factor,int):
interp_param.zoom_factor = scale_factor
if size:
print('size:', size)
interp_param.height = size[0]
interp_param.width = size[1]
self.param.interp_param.CopyFrom(interp_param)
def add_data(self,*args):
"""Args are data numpy array
@ -180,4 +155,4 @@ class Layer_param():
pass
def set_enum(param,key,value):
setattr(param,key,param.Value(value))
setattr(param,key,param.Value(value))

View File

@ -1 +1 @@
raise ImportError('the nn_tools.Caffe.net is no longer used, please use nn_tools.Caffe.caffe_net')
raise ImportError,'the nn_tools.Caffe.net is no longer used, please use nn_tools.Caffe.caffe_net'

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,10 @@
python caffe_inference.py --model-def "logs/caffe_R34/baseline_R34.prototxt" \
--model-weights "logs/caffe_R34/baseline_R34.caffemodel" \
--height 256 --width 128 \
--input \
'/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1182_c5s3_015240_04.jpg' \
'/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1182_c6s3_038217_01.jpg' \
'/export/home/DATA/Market-1501-v15.09.15/bounding_box_test/1183_c5s3_006943_05.jpg' \
--output "caffe_R34_output"
'/export/home/DATA/DukeMTMC-reID/bounding_box_train/0728_c4_f0161265.jpg' \
--output "caffe_R34_output"