modify position
parent
0644c970a6
commit
5c20b55ead
Before Width: | Height: | Size: 30 KiB After Width: | Height: | Size: 30 KiB |
Before Width: | Height: | Size: 10 KiB After Width: | Height: | Size: 10 KiB |
|
@ -6,7 +6,7 @@
|
|||
|
||||
## 二、准备工作
|
||||
|
||||
首先我们需要选定研究的模型,本文设定ResNet50作为研究模型,将resnet.py从[模型库](../../ppcls/modeling/architecture/)拷贝到当前目录下,并下载预训练模型[预训练模型](../../docs/zh_CN/models/models_intro), 复制resnet50的模型链接,使用下列命令下载并解压预训练模型。
|
||||
首先需要选定研究的模型,本文设定ResNet50作为研究模型,将resnet.py从[模型库](../../../ppcls/modeling/architecture/)拷贝到当前目录下,并下载预训练模型[预训练模型](../../zh_CN/models/models_intro), 复制resnet50的模型链接,使用下列命令下载并解压预训练模型。
|
||||
|
||||
```bash
|
||||
wget The Link for Pretrained Model
|
||||
|
@ -53,18 +53,17 @@ python tools/feature_maps_visualization/fm_vis.py -i the image you want to test
|
|||
```
|
||||
参数说明:
|
||||
+ `-i`:待预测的图片文件路径,如 `./test.jpeg`
|
||||
+ `-c`:特征图维度,如 `./resnet50-vd/model`
|
||||
+ `-c`:特征图维度,如 `./resnet50_vd/model`
|
||||
+ `-p`:权重文件路径,如 `./ResNet50_pretrained/`
|
||||
+ `--show`:是否展示图片,默认值 False
|
||||
+ `--save`:是否保存图片,默认值:True
|
||||
+ `--save_path`:保存路径,如:`./tools/`
|
||||
+ `--use_gpu`:是否使用 GPU 预测,默认值:True
|
||||
|
||||
## 四、结果
|
||||
输入图片:
|
||||
|
||||

|
||||

|
||||
|
||||
输出特征图:
|
||||
|
||||

|
||||

|
|
@ -0,0 +1,2 @@
|
|||
wget https://paddle-imagenet-models-name.bj.bcebos.com/ResNet50_pretrained.tar
|
||||
tar -xf ResNet50_pretrained.tar
|
|
@ -28,7 +28,6 @@ def parse_args():
|
|||
parser.add_argument("-c", "--channel_num", type=int)
|
||||
parser.add_argument("-p", "--pretrained_model", type=str)
|
||||
parser.add_argument("--show", type=str2bool, default=False)
|
||||
parser.add_argument("--save", type=str2bool, default=True)
|
||||
parser.add_argument("--save_path", type=str)
|
||||
parser.add_argument("--use_gpu", type=str2bool, default=True)
|
||||
|
||||
|
@ -66,9 +65,7 @@ def main():
|
|||
place = fluid.CUDAPlace(gpu_id)
|
||||
else:
|
||||
place = fluid.CPUPlace()
|
||||
fm = None
|
||||
|
||||
print(args.pretrained_model)
|
||||
|
||||
pre_weights_dict = fluid.load_program_state(args.pretrained_model)
|
||||
with fluid.dygraph.guard(place):
|
||||
net = ResNet50()
|
||||
|
@ -83,12 +80,10 @@ def main():
|
|||
net.set_dict(pre_weights_dict_new)
|
||||
net.eval()
|
||||
_, fm = net(data)
|
||||
assert args.channel_num >= 0 and args.channel_num <= fm.shape[1], "the channel is out of the range"
|
||||
assert args.channel_num >= 0 and args.channel_num <= fm.shape[1], "the channel is out of the range, should be in {} but got {}".format([0, fm.shape[1]], args.channel_num)
|
||||
fm = (np.squeeze(fm[0][args.channel_num].numpy())*255).astype(np.uint8)
|
||||
print(fm)
|
||||
if fm is not None:
|
||||
if args.save:
|
||||
print(args.save_path)
|
||||
cv2.imwrite(args.save_path, fm)
|
||||
if args.show:
|
||||
cv2.show(fm)
|
||||
|
|
|
@ -0,0 +1,215 @@
|
|||
import numpy as np
|
||||
import argparse
|
||||
import ast
|
||||
import paddle
|
||||
import paddle.fluid as fluid
|
||||
from paddle.fluid.param_attr import ParamAttr
|
||||
from paddle.fluid.layer_helper import LayerHelper
|
||||
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear
|
||||
from paddle.fluid.dygraph.base import to_variable
|
||||
|
||||
from paddle.fluid import framework
|
||||
|
||||
import math
|
||||
import sys
|
||||
import time
|
||||
|
||||
class ConvBNLayer(fluid.dygraph.Layer):
|
||||
def __init__(self,
|
||||
num_channels,
|
||||
num_filters,
|
||||
filter_size,
|
||||
stride=1,
|
||||
groups=1,
|
||||
act=None,
|
||||
name=None):
|
||||
super(ConvBNLayer, self).__init__()
|
||||
|
||||
self._conv = Conv2D(
|
||||
num_channels=num_channels,
|
||||
num_filters=num_filters,
|
||||
filter_size=filter_size,
|
||||
stride=stride,
|
||||
padding=(filter_size - 1) // 2,
|
||||
groups=groups,
|
||||
act=None,
|
||||
param_attr=ParamAttr(name=name + "_weights"),
|
||||
bias_attr=False)
|
||||
if name == "conv1":
|
||||
bn_name = "bn_" + name
|
||||
else:
|
||||
bn_name = "bn" + name[3:]
|
||||
self._batch_norm = BatchNorm(num_filters,
|
||||
act=act,
|
||||
param_attr=ParamAttr(name=bn_name + '_scale'),
|
||||
bias_attr=ParamAttr(bn_name + '_offset'),
|
||||
moving_mean_name=bn_name + '_mean',
|
||||
moving_variance_name=bn_name + '_variance')
|
||||
|
||||
def forward(self, inputs):
|
||||
y = self._conv(inputs)
|
||||
y = self._batch_norm(y)
|
||||
return y
|
||||
|
||||
|
||||
class BottleneckBlock(fluid.dygraph.Layer):
|
||||
def __init__(self,
|
||||
num_channels,
|
||||
num_filters,
|
||||
stride,
|
||||
shortcut=True,
|
||||
name=None):
|
||||
super(BottleneckBlock, self).__init__()
|
||||
|
||||
self.conv0 = ConvBNLayer(
|
||||
num_channels=num_channels,
|
||||
num_filters=num_filters,
|
||||
filter_size=1,
|
||||
act='relu',
|
||||
name=name+"_branch2a")
|
||||
self.conv1 = ConvBNLayer(
|
||||
num_channels=num_filters,
|
||||
num_filters=num_filters,
|
||||
filter_size=3,
|
||||
stride=stride,
|
||||
act='relu',
|
||||
name=name+"_branch2b")
|
||||
self.conv2 = ConvBNLayer(
|
||||
num_channels=num_filters,
|
||||
num_filters=num_filters * 4,
|
||||
filter_size=1,
|
||||
act=None,
|
||||
name=name+"_branch2c")
|
||||
|
||||
if not shortcut:
|
||||
self.short = ConvBNLayer(
|
||||
num_channels=num_channels,
|
||||
num_filters=num_filters * 4,
|
||||
filter_size=1,
|
||||
stride=stride,
|
||||
name=name + "_branch1")
|
||||
|
||||
self.shortcut = shortcut
|
||||
|
||||
self._num_channels_out = num_filters * 4
|
||||
|
||||
def forward(self, inputs):
|
||||
y = self.conv0(inputs)
|
||||
conv1 = self.conv1(y)
|
||||
conv2 = self.conv2(conv1)
|
||||
|
||||
if self.shortcut:
|
||||
short = inputs
|
||||
else:
|
||||
short = self.short(inputs)
|
||||
|
||||
y = fluid.layers.elementwise_add(x=short, y=conv2)
|
||||
|
||||
layer_helper = LayerHelper(self.full_name(), act='relu')
|
||||
return layer_helper.append_activation(y)
|
||||
|
||||
|
||||
class ResNet(fluid.dygraph.Layer):
|
||||
def __init__(self, layers=50, class_dim=1000):
|
||||
super(ResNet, self).__init__()
|
||||
|
||||
self.layers = layers
|
||||
supported_layers = [50, 101, 152]
|
||||
assert layers in supported_layers, \
|
||||
"supported layers are {} but input layer is {}".format(supported_layers, layers)
|
||||
self.fm = None
|
||||
|
||||
if layers == 50:
|
||||
depth = [3, 4, 6, 3]
|
||||
elif layers == 101:
|
||||
depth = [3, 4, 23, 3]
|
||||
elif layers == 152:
|
||||
depth = [3, 8, 36, 3]
|
||||
num_channels = [64, 256, 512, 1024]
|
||||
num_filters = [64, 128, 256, 512]
|
||||
|
||||
self.conv = ConvBNLayer(
|
||||
num_channels=3,
|
||||
num_filters=64,
|
||||
filter_size=7,
|
||||
stride=2,
|
||||
act='relu',
|
||||
name="conv1")
|
||||
self.pool2d_max = Pool2D(
|
||||
pool_size=3,
|
||||
pool_stride=2,
|
||||
pool_padding=1,
|
||||
pool_type='max')
|
||||
|
||||
self.bottleneck_block_list = []
|
||||
for block in range(len(depth)):
|
||||
shortcut = False
|
||||
for i in range(depth[block]):
|
||||
if layers in [101, 152] and block == 2:
|
||||
if i == 0:
|
||||
conv_name="res"+str(block+2)+"a"
|
||||
else:
|
||||
conv_name="res"+str(block+2)+"b"+str(i)
|
||||
else:
|
||||
conv_name="res"+str(block+2)+chr(97+i)
|
||||
bottleneck_block = self.add_sublayer(
|
||||
'bb_%d_%d' % (block, i),
|
||||
BottleneckBlock(
|
||||
num_channels=num_channels[block]
|
||||
if i == 0 else num_filters[block] * 4,
|
||||
num_filters=num_filters[block],
|
||||
stride=2 if i == 0 and block != 0 else 1,
|
||||
shortcut=shortcut,
|
||||
name=conv_name))
|
||||
self.bottleneck_block_list.append(bottleneck_block)
|
||||
shortcut = True
|
||||
|
||||
self.pool2d_avg = Pool2D(
|
||||
pool_size=7, pool_type='avg', global_pooling=True)
|
||||
|
||||
self.pool2d_avg_output = num_filters[len(num_filters) - 1] * 4 * 1 * 1
|
||||
|
||||
stdv = 1.0 / math.sqrt(2048 * 1.0)
|
||||
|
||||
self.out = Linear(self.pool2d_avg_output,
|
||||
class_dim,
|
||||
param_attr=ParamAttr(
|
||||
initializer=fluid.initializer.Uniform(-stdv, stdv), name="fc_0.w_0"),
|
||||
bias_attr=ParamAttr(name="fc_0.b_0"))
|
||||
|
||||
def forward(self, inputs):
|
||||
y = self.conv(inputs)
|
||||
y = self.pool2d_max(y)
|
||||
self.fm = y
|
||||
for bottleneck_block in self.bottleneck_block_list:
|
||||
y = bottleneck_block(y)
|
||||
y = self.pool2d_avg(y)
|
||||
y = fluid.layers.reshape(y, shape=[-1, self.pool2d_avg_output])
|
||||
y = self.out(y)
|
||||
return y, self.fm
|
||||
|
||||
|
||||
def ResNet50(**args):
|
||||
model = ResNet(layers=50, **args)
|
||||
return model
|
||||
|
||||
|
||||
def ResNet101(**args):
|
||||
model = ResNet(layers=101, **args)
|
||||
return model
|
||||
|
||||
|
||||
def ResNet152(**args):
|
||||
model = ResNet(layers=152, **args)
|
||||
return model
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import numpy as np
|
||||
place = fluid.CPUPlace()
|
||||
with fluid.dygraph.guard(place):
|
||||
model = ResNet50()
|
||||
img = np.random.uniform(0, 255, [1, 3, 224, 224]).astype('float32')
|
||||
img = fluid.dygraph.to_variable(img)
|
||||
res = model(img)
|
||||
print(res.shape)
|
Loading…
Reference in New Issue