-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathunet.py
119 lines (94 loc) · 3.38 KB
/
unet.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
# Code taken (and modified a bit) from
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pl_examples/models/unet.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class UNet(nn.Module):
"""
Architecture based on U-Net: Convolutional Networks for Biomedical Image Segmentation
Link - https://arxiv.org/abs/1505.04597
Parameters:
num_classes: Number of output classes required (default 19 for KITTI dataset)
num_layers: Number of layers in each side of U-net
features_start: Number of features in first layer
"""
def __init__(
self,
num_classes: int = 1,
num_layers: int = 4,
features_start: int = 64,
dropout=False,
):
super().__init__()
self.num_layers = num_layers
self.dropout = dropout
layers = [DoubleConv(192, features_start)]
feats = features_start
for _ in range(num_layers - 1):
layers.append(Down(feats, feats * 2, self.dropout))
feats *= 2
for _ in range(num_layers - 1):
layers.append(Up(feats, feats // 2))
feats //= 2
layers.append(nn.Conv2d(feats, num_classes, kernel_size=1))
self.layers = nn.ModuleList(layers)
def forward(self, x):
xi = [self.layers[0](x)]
# Down path
for layer in self.layers[1 : self.num_layers]:
xi.append(layer(xi[-1]))
# Up path
for i, layer in enumerate(self.layers[self.num_layers : -1]):
xi[-1] = layer(xi[-1], xi[-2 - i])
return self.layers[-1](xi[-1])
class DoubleConv(nn.Module):
"""
Double Convolution and BN and ReLU
(3x3 conv -> BN -> ReLU) ** 2
"""
def __init__(self, in_ch: int, out_ch: int):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True),
)
def forward(self, x):
return self.net(x)
class Down(nn.Module):
"""
Combination of MaxPool2d and DoubleConv in series
"""
def __init__(self, in_ch: int, out_ch: int, dropout=False):
super().__init__()
self.net = nn.Sequential(
nn.MaxPool2d(kernel_size=2, stride=2), DoubleConv(in_ch, out_ch),
)
if dropout:
self.net.add_module("dropout", nn.Dropout2d(dropout))
def forward(self, x):
return self.net(x)
class Up(nn.Module):
"""
Upsampling (by transpose convolutions)
followed by concatenation of feature map from contracting path,
followed by double 3x3 convolution.
"""
def __init__(self, in_ch: int, out_ch: int):
super().__init__()
self.upsample = nn.ConvTranspose2d(in_ch, in_ch // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.upsample(x1)
# Pad x1 to the size of x2
diff_h = x2.shape[2] - x1.shape[2]
diff_w = x2.shape[3] - x1.shape[3]
x1 = F.pad(
x1, [diff_w // 2, diff_w - diff_w // 2, diff_h // 2, diff_h - diff_h // 2]
)
# Concatenate along the channels axis
x = torch.cat([x2, x1], dim=1)
return self.conv(x)