fix alexnet
parent
7ed80c5232
commit
3962b3853a
ppcls/modeling/architectures
|
@ -31,5 +31,6 @@ from .mobilenet_v1 import MobileNetV1_x0_25, MobileNetV1_x0_5, MobileNetV1_x0_75
|
|||
from .mobilenet_v2 import MobileNetV2_x0_25, MobileNetV2_x0_5, MobileNetV2_x0_75, MobileNetV2, MobileNetV2_x1_5, MobileNetV2_x2_0
|
||||
from .mobilenet_v3 import MobileNetV3_small_x0_35, MobileNetV3_small_x0_5, MobileNetV3_small_x0_75, MobileNetV3_small_x1_0, MobileNetV3_small_x1_25, MobileNetV3_large_x0_35, MobileNetV3_large_x0_5, MobileNetV3_large_x0_75, MobileNetV3_large_x1_0, MobileNetV3_large_x1_25
|
||||
from .shufflenet_v2 import ShuffleNetV2_x0_25, ShuffleNetV2_x0_33, ShuffleNetV2_x0_5, ShuffleNetV2, ShuffleNetV2_x1_5, ShuffleNetV2_x2_0, ShuffleNetV2_swish
|
||||
from .alexnet import AlexNet
|
||||
|
||||
from .distillation_models import ResNet50_vd_distill_MobileNetV3_large_x1_0
|
||||
|
|
|
@ -1,103 +1,132 @@
|
|||
import paddle
|
||||
import paddle.fluid as fluid
|
||||
from paddle.fluid.param_attr import ParamAttr
|
||||
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, BatchNorm, Linear, Dropout
|
||||
from paddle import ParamAttr
|
||||
import paddle.nn as nn
|
||||
import paddle.nn.functional as F
|
||||
from paddle.nn import Conv2d, Pool2D, BatchNorm, Linear, Dropout, ReLU
|
||||
from paddle.nn.initializer import Uniform
|
||||
import math
|
||||
|
||||
__all__ = ["AlexNet"]
|
||||
|
||||
class ConvPoolLayer(fluid.dygraph.Layer):
|
||||
def __init__(self,
|
||||
inputc_channels,
|
||||
output_channels,
|
||||
filter_size,
|
||||
stride,
|
||||
padding,
|
||||
stdv,
|
||||
groups=1,
|
||||
act=None,
|
||||
name=None):
|
||||
|
||||
class ConvPoolLayer(nn.Layer):
|
||||
def __init__(self,
|
||||
inputc_channels,
|
||||
output_channels,
|
||||
filter_size,
|
||||
stride,
|
||||
padding,
|
||||
stdv,
|
||||
groups=1,
|
||||
act=None,
|
||||
name=None):
|
||||
super(ConvPoolLayer, self).__init__()
|
||||
|
||||
self._conv = Conv2D(num_channels=inputc_channels,
|
||||
num_filters=output_channels,
|
||||
filter_size=filter_size,
|
||||
stride=stride,
|
||||
padding=padding,
|
||||
groups=groups,
|
||||
param_attr=ParamAttr(name=name + "_weights",
|
||||
initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(name=name + "_offset",
|
||||
initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
act=act)
|
||||
self._pool = Pool2D(pool_size=3,
|
||||
pool_stride=2,
|
||||
pool_padding=0,
|
||||
pool_type="max")
|
||||
self.relu = ReLU() if act == "relu" else None
|
||||
|
||||
self._conv = Conv2d(
|
||||
in_channels=inputc_channels,
|
||||
out_channels=output_channels,
|
||||
kernel_size=filter_size,
|
||||
stride=stride,
|
||||
padding=padding,
|
||||
groups=groups,
|
||||
weight_attr=ParamAttr(
|
||||
name=name + "_weights", initializer=Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(
|
||||
name=name + "_offset", initializer=Uniform(-stdv, stdv)))
|
||||
self._pool = Pool2D(
|
||||
pool_size=3, pool_stride=2, pool_padding=0, pool_type="max")
|
||||
|
||||
def forward(self, inputs):
|
||||
x = self._conv(inputs)
|
||||
if self.relu is not None:
|
||||
x = self.relu(x)
|
||||
x = self._pool(x)
|
||||
return x
|
||||
|
||||
|
||||
class AlexNetDY(fluid.dygraph.Layer):
|
||||
class AlexNetDY(nn.Layer):
|
||||
def __init__(self, class_dim=1000):
|
||||
super(AlexNetDY, self).__init__()
|
||||
|
||||
stdv = 1.0/math.sqrt(3*11*11)
|
||||
stdv = 1.0 / math.sqrt(3 * 11 * 11)
|
||||
self._conv1 = ConvPoolLayer(
|
||||
3, 64, 11, 4, 2, stdv, act="relu", name="conv1")
|
||||
stdv = 1.0/math.sqrt(64*5*5)
|
||||
3, 64, 11, 4, 2, stdv, act="relu", name="conv1")
|
||||
stdv = 1.0 / math.sqrt(64 * 5 * 5)
|
||||
self._conv2 = ConvPoolLayer(
|
||||
64, 192, 5, 1, 2, stdv, act="relu", name="conv2")
|
||||
stdv = 1.0/math.sqrt(192*3*3)
|
||||
self._conv3 = Conv2D(192, 384, 3, stride=1, padding=1,
|
||||
param_attr=ParamAttr(name="conv3_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(name="conv3_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
act="relu")
|
||||
stdv = 1.0/math.sqrt(384*3*3)
|
||||
self._conv4 = Conv2D(384, 256, 3, stride=1, padding=1,
|
||||
param_attr=ParamAttr(name="conv4_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(name="conv4_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
act="relu")
|
||||
stdv = 1.0/math.sqrt(256*3*3)
|
||||
stdv = 1.0 / math.sqrt(192 * 3 * 3)
|
||||
self._conv3 = Conv2d(
|
||||
192,
|
||||
384,
|
||||
3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
weight_attr=ParamAttr(
|
||||
name="conv3_weights", initializer=Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(
|
||||
name="conv3_offset", initializer=Uniform(-stdv, stdv)))
|
||||
stdv = 1.0 / math.sqrt(384 * 3 * 3)
|
||||
self._conv4 = Conv2d(
|
||||
384,
|
||||
256,
|
||||
3,
|
||||
stride=1,
|
||||
padding=1,
|
||||
weight_attr=ParamAttr(
|
||||
name="conv4_weights", initializer=Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(
|
||||
name="conv4_offset", initializer=Uniform(-stdv, stdv)))
|
||||
stdv = 1.0 / math.sqrt(256 * 3 * 3)
|
||||
self._conv5 = ConvPoolLayer(
|
||||
256, 256, 3, 1, 1, stdv, act="relu", name="conv5")
|
||||
stdv = 1.0/math.sqrt(256*6*6)
|
||||
stdv = 1.0 / math.sqrt(256 * 6 * 6)
|
||||
|
||||
self._drop1 = Dropout(p=0.5)
|
||||
self._fc6 = Linear(input_dim=256*6*6,
|
||||
output_dim=4096,
|
||||
param_attr=ParamAttr(name="fc6_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(name="fc6_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
act="relu")
|
||||
|
||||
self._fc6 = Linear(
|
||||
in_features=256 * 6 * 6,
|
||||
out_features=4096,
|
||||
weight_attr=ParamAttr(
|
||||
name="fc6_weights", initializer=Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(
|
||||
name="fc6_offset", initializer=Uniform(-stdv, stdv)))
|
||||
|
||||
self._drop2 = Dropout(p=0.5)
|
||||
self._fc7 = Linear(input_dim=4096,
|
||||
output_dim=4096,
|
||||
param_attr=ParamAttr(name="fc7_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(name="fc7_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
act="relu")
|
||||
self._fc8 = Linear(input_dim=4096,
|
||||
output_dim=class_dim,
|
||||
param_attr=ParamAttr(name="fc8_weights", initializer=fluid.initializer.Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(name="fc8_offset", initializer=fluid.initializer.Uniform(-stdv, stdv)))
|
||||
self._fc7 = Linear(
|
||||
in_features=4096,
|
||||
out_features=4096,
|
||||
weight_attr=ParamAttr(
|
||||
name="fc7_weights", initializer=Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(
|
||||
name="fc7_offset", initializer=Uniform(-stdv, stdv)))
|
||||
self._fc8 = Linear(
|
||||
in_features=4096,
|
||||
out_features=class_dim,
|
||||
weight_attr=ParamAttr(
|
||||
name="fc8_weights", initializer=Uniform(-stdv, stdv)),
|
||||
bias_attr=ParamAttr(
|
||||
name="fc8_offset", initializer=Uniform(-stdv, stdv)))
|
||||
|
||||
def forward(self, inputs):
|
||||
x = self._conv1(inputs)
|
||||
x = self._conv2(x)
|
||||
x = self._conv3(x)
|
||||
x = F.relu(x)
|
||||
x = self._conv4(x)
|
||||
x = F.relu(x)
|
||||
x = self._conv5(x)
|
||||
x = fluid.layers.flatten(x, axis=0)
|
||||
x = paddle.flatten(x, start_axis=1, stop_axis=-1)
|
||||
x = self._drop1(x)
|
||||
x = self._fc6(x)
|
||||
x = F.relu(x)
|
||||
x = self._drop2(x)
|
||||
x = self._fc7(x)
|
||||
x = F.relu(x)
|
||||
x = self._fc8(x)
|
||||
return x
|
||||
|
||||
|
||||
def AlexNet(**args):
|
||||
model = AlexNetDY(**args)
|
||||
return model
|
||||
|
|
Loading…
Reference in New Issue