module updates
parent
86784cfdbf
commit
b5659d1195
|
@ -1,9 +1,13 @@
|
|||
# This file contains modules common to various models
|
||||
|
||||
|
||||
from utils.utils import *
|
||||
|
||||
|
||||
def autopad(k):
|
||||
# Pad to 'same'
|
||||
return k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
|
||||
|
||||
|
||||
def DWConv(c1, c2, k=1, s=1, act=True):
|
||||
# Depthwise convolution
|
||||
return Conv(c1, c2, k, s, g=math.gcd(c1, c2), act=act)
|
||||
|
@ -11,10 +15,9 @@ def DWConv(c1, c2, k=1, s=1, act=True):
|
|||
|
||||
class Conv(nn.Module):
|
||||
# Standard convolution
|
||||
def __init__(self, c1, c2, k=1, s=1, g=1, act=True): # ch_in, ch_out, kernel, stride, groups
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
||||
super(Conv, self).__init__()
|
||||
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # padding
|
||||
self.conv = nn.Conv2d(c1, c2, k, s, p, groups=g, bias=False)
|
||||
self.conv = nn.Conv2d(c1, c2, k, s, p or autopad(k), groups=g, bias=False)
|
||||
self.bn = nn.BatchNorm2d(c2)
|
||||
self.act = nn.LeakyReLU(0.1, inplace=True) if act else nn.Identity()
|
||||
|
||||
|
@ -46,7 +49,7 @@ class BottleneckCSP(nn.Module):
|
|||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
||||
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
||||
self.cv4 = Conv(c2, c2, 1, 1)
|
||||
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
||||
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
||||
self.act = nn.LeakyReLU(0.1, inplace=True)
|
||||
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
|
||||
|
@ -79,9 +82,9 @@ class Flatten(nn.Module):
|
|||
|
||||
class Focus(nn.Module):
|
||||
# Focus wh information into c-space
|
||||
def __init__(self, c1, c2, k=1):
|
||||
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
|
||||
super(Focus, self).__init__()
|
||||
self.conv = Conv(c1 * 4, c2, k, 1)
|
||||
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
|
||||
|
||||
def forward(self, x): # x(b,c,w,h) -> y(b,4c,w/2,h/2)
|
||||
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
|
||||
|
|
|
@ -1,6 +1,40 @@
|
|||
# This file contains experimental modules
|
||||
|
||||
from models.common import *
|
||||
|
||||
|
||||
class CrossConv(nn.Module):
|
||||
# Cross Convolution
|
||||
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion
|
||||
super(CrossConv, self).__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, (1, 3), 1)
|
||||
self.cv2 = Conv(c_, c2, (3, 1), 1, g=g)
|
||||
self.add = shortcut and c1 == c2
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))
|
||||
|
||||
|
||||
class C3(nn.Module):
|
||||
# Cross Convolution CSP
|
||||
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
|
||||
super(C3, self).__init__()
|
||||
c_ = int(c2 * e) # hidden channels
|
||||
self.cv1 = Conv(c1, c_, 1, 1)
|
||||
self.cv2 = nn.Conv2d(c1, c_, 1, 1, bias=False)
|
||||
self.cv3 = nn.Conv2d(c_, c_, 1, 1, bias=False)
|
||||
self.cv4 = Conv(2 * c_, c2, 1, 1)
|
||||
self.bn = nn.BatchNorm2d(2 * c_) # applied to cat(cv2, cv3)
|
||||
self.act = nn.LeakyReLU(0.1, inplace=True)
|
||||
self.m = nn.Sequential(*[CrossConv(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
|
||||
|
||||
def forward(self, x):
|
||||
y1 = self.cv3(self.m(self.cv1(x)))
|
||||
y2 = self.cv2(x)
|
||||
return self.cv4(self.act(self.bn(torch.cat((y1, y2), dim=1))))
|
||||
|
||||
|
||||
class Sum(nn.Module):
|
||||
# Weighted sum of 2 or more layers https://arxiv.org/abs/1911.09070
|
||||
def __init__(self, n, weight=False): # n: number of inputs
|
||||
|
|
Loading…
Reference in New Issue