mirror of https://github.com/alibaba/EasyCV.git
205 lines
6.3 KiB
Python
205 lines
6.3 KiB
Python
from os import path
|
|
import os
|
|
import torch
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
from collections import OrderedDict
|
|
import numpy as np
|
|
|
|
from io import BytesIO
|
|
from urllib import request
|
|
|
|
class Flatten(nn.Module):
|
|
def __init__(self):
|
|
super(Flatten, self).__init__()
|
|
|
|
def forward(self, x):
|
|
"""
|
|
Arguments:
|
|
x: a float tensor with shape [batch_size, c, h, w].
|
|
Returns:
|
|
a float tensor with shape [batch_size, c*h*w].
|
|
"""
|
|
|
|
# without this pretrained model isn't working
|
|
x = x.transpose(3, 2).contiguous()
|
|
|
|
return x.view(x.size(0), -1)
|
|
|
|
def get_url_weights(path, dir_path ,url_index="http://pai-vision-data-hz.oss-cn-zhangjiakou.aliyuncs.com/release/mtcnn/"):
|
|
wpath = os.path.join(dir_path, path)
|
|
if os.path.exists(wpath):
|
|
weights = np.load(wpath, allow_pickle=True)[
|
|
()
|
|
]
|
|
else:
|
|
try:
|
|
s = request.urlopen(os.path.join(url_index, path)).read()
|
|
m = BytesIO(s)
|
|
weights = np.load(m, allow_pickle=True).item()
|
|
except:
|
|
print("Failed to load %s from %s, please ensure access to %s or provide face detector model !"%(path, os.path.join(url_index, path, path)))
|
|
weights = None
|
|
|
|
if weights is not None:
|
|
print("load weight from %s / %s success!"%(wpath, os.path.join(url_index, path) ))
|
|
|
|
return weights
|
|
|
|
|
|
|
|
|
|
class PNet(nn.Module):
|
|
def __init__(self, dir_path=None):
|
|
|
|
super(PNet, self).__init__()
|
|
|
|
# suppose we have input with size HxW, then
|
|
# after first layer: H - 2,
|
|
# after pool: ceil((H - 2)/2),
|
|
# after second conv: ceil((H - 2)/2) - 2,
|
|
# after last conv: ceil((H - 2)/2) - 4,
|
|
# and the same for W
|
|
|
|
self.features = nn.Sequential(
|
|
OrderedDict(
|
|
[
|
|
("conv1", nn.Conv2d(3, 10, 3, 1)),
|
|
("prelu1", nn.PReLU(10)),
|
|
("pool1", nn.MaxPool2d(2, 2, ceil_mode=True)),
|
|
("conv2", nn.Conv2d(10, 16, 3, 1)),
|
|
("prelu2", nn.PReLU(16)),
|
|
("conv3", nn.Conv2d(16, 32, 3, 1)),
|
|
("prelu3", nn.PReLU(32)),
|
|
]
|
|
)
|
|
)
|
|
|
|
self.conv4_1 = nn.Conv2d(32, 2, 1, 1)
|
|
self.conv4_2 = nn.Conv2d(32, 4, 1, 1)
|
|
|
|
if dir_path is None:
|
|
dir_path = path.dirname(__file__)
|
|
|
|
weights = get_url_weights("weights/pnet.npy", dir_path)
|
|
|
|
for n, p in self.named_parameters():
|
|
p.data = torch.FloatTensor(weights[n])
|
|
|
|
def forward(self, x):
|
|
"""
|
|
Arguments:
|
|
x: a float tensor with shape [batch_size, 3, h, w].
|
|
Returns:
|
|
b: a float tensor with shape [batch_size, 4, h', w'].
|
|
a: a float tensor with shape [batch_size, 2, h', w'].
|
|
"""
|
|
x = self.features(x)
|
|
a = self.conv4_1(x)
|
|
b = self.conv4_2(x)
|
|
a = F.softmax(a, dim=1)
|
|
return b, a
|
|
|
|
|
|
class RNet(nn.Module):
|
|
def __init__(self, dir_path=None):
|
|
|
|
super(RNet, self).__init__()
|
|
|
|
self.features = nn.Sequential(
|
|
OrderedDict(
|
|
[
|
|
("conv1", nn.Conv2d(3, 28, 3, 1)),
|
|
("prelu1", nn.PReLU(28)),
|
|
("pool1", nn.MaxPool2d(3, 2, ceil_mode=True)),
|
|
("conv2", nn.Conv2d(28, 48, 3, 1)),
|
|
("prelu2", nn.PReLU(48)),
|
|
("pool2", nn.MaxPool2d(3, 2, ceil_mode=True)),
|
|
("conv3", nn.Conv2d(48, 64, 2, 1)),
|
|
("prelu3", nn.PReLU(64)),
|
|
("flatten", Flatten()),
|
|
("conv4", nn.Linear(576, 128)),
|
|
("prelu4", nn.PReLU(128)),
|
|
]
|
|
)
|
|
)
|
|
|
|
self.conv5_1 = nn.Linear(128, 2)
|
|
self.conv5_2 = nn.Linear(128, 4)
|
|
|
|
if dir_path is None:
|
|
dir_path = path.dirname(__file__)
|
|
weights = get_url_weights("weights/rnet.npy", dir_path)
|
|
|
|
for n, p in self.named_parameters():
|
|
p.data = torch.FloatTensor(weights[n])
|
|
|
|
def forward(self, x):
|
|
"""
|
|
Arguments:
|
|
x: a float tensor with shape [batch_size, 3, h, w].
|
|
Returns:
|
|
b: a float tensor with shape [batch_size, 4].
|
|
a: a float tensor with shape [batch_size, 2].
|
|
"""
|
|
x = self.features(x)
|
|
a = self.conv5_1(x)
|
|
b = self.conv5_2(x)
|
|
a = F.softmax(a, dim=1)
|
|
return b, a
|
|
|
|
|
|
class ONet(nn.Module):
|
|
def __init__(self, dir_path=None):
|
|
|
|
super(ONet, self).__init__()
|
|
|
|
self.features = nn.Sequential(
|
|
OrderedDict(
|
|
[
|
|
("conv1", nn.Conv2d(3, 32, 3, 1)),
|
|
("prelu1", nn.PReLU(32)),
|
|
("pool1", nn.MaxPool2d(3, 2, ceil_mode=True)),
|
|
("conv2", nn.Conv2d(32, 64, 3, 1)),
|
|
("prelu2", nn.PReLU(64)),
|
|
("pool2", nn.MaxPool2d(3, 2, ceil_mode=True)),
|
|
("conv3", nn.Conv2d(64, 64, 3, 1)),
|
|
("prelu3", nn.PReLU(64)),
|
|
("pool3", nn.MaxPool2d(2, 2, ceil_mode=True)),
|
|
("conv4", nn.Conv2d(64, 128, 2, 1)),
|
|
("prelu4", nn.PReLU(128)),
|
|
("flatten", Flatten()),
|
|
("conv5", nn.Linear(1152, 256)),
|
|
("drop5", nn.Dropout(0.25)),
|
|
("prelu5", nn.PReLU(256)),
|
|
]
|
|
)
|
|
)
|
|
|
|
self.conv6_1 = nn.Linear(256, 2)
|
|
self.conv6_2 = nn.Linear(256, 4)
|
|
self.conv6_3 = nn.Linear(256, 10)
|
|
|
|
if dir_path is None:
|
|
dir_path = path.dirname(__file__)
|
|
weights = get_url_weights("weights/onet.npy", dir_path)
|
|
|
|
for n, p in self.named_parameters():
|
|
p.data = torch.FloatTensor(weights[n])
|
|
|
|
def forward(self, x):
|
|
"""
|
|
Arguments:
|
|
x: a float tensor with shape [batch_size, 3, h, w].
|
|
Returns:
|
|
c: a float tensor with shape [batch_size, 10].
|
|
b: a float tensor with shape [batch_size, 4].
|
|
a: a float tensor with shape [batch_size, 2].
|
|
"""
|
|
x = self.features(x)
|
|
a = self.conv6_1(x)
|
|
b = self.conv6_2(x)
|
|
c = self.conv6_3(x)
|
|
a = F.softmax(a, dim=1)
|
|
return c, b, a
|