Part-3 CNN
Lab-10-0 Convolution Neural Network
- 합성곱 신경만 네트워크(Convolution Neural Network)
- Visdom, Datasets
- MNIST, CIFAR-10
- VGG & ResNet
Lab-10-1 Convolution
- 합성곱(Convolution) 연산
- 필터(Filter)
- 스트라이드(Stride)
- 패딩(Padding)
- 풀링(Pooling)
Convolution?
- 이미지 위에서 stride 값 만큼 filter(kernel)을 이동시키면서 겹쳐지는 부분의 각 원소의 값을 곱해 모두 더한 값을 출력하는 연산
Stride and Padding
- Stride: filter를 한번에 얼마나 이동 할 것인가
- padding: zero-padding
Convolution의 output 크기
$Output size = { {input size - filter size + (2*padding)} \over stride} + 1$
EX
1
2
import torch
import torch.nn as nn
1
2
3
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=11, stride=4, padding=0)
inputs = torch.Tensor(1, 1, 227, 227)
conv(inputs).shape
1
torch.Size([1, 1, 55, 55])
1
2
3
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(7, 7), stride=2, padding=0)
inputs = torch.Tensor(1, 1, 64, 64)
conv(inputs).shape
1
torch.Size([1, 1, 29, 29])
1
2
3
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(5, 5), stride=1, padding=2)
inputs = torch.Tensor(1, 1, 32, 32)
conv(inputs).shape
1
torch.Size([1, 1, 32, 32])
1
2
3
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=5, stride=1, padding=0)
inputs = torch.Tensor(1, 1, 32, 64)
conv(inputs).shape
1
torch.Size([1, 1, 28, 60])
1
2
3
conv = nn.Conv2d(in_channels=1, out_channels=1, kernel_size=(3, 3), stride=1, padding=1)
inputs = torch.Tensor(1, 1, 64, 32)
conv(inputs).shape
1
torch.Size([1, 1, 64, 32])
Pooling
- Max Pooling
- Average Pooling
- …
CNN implementatino
1
2
3
4
5
6
7
8
inputs = torch.Tensor(1, 1, 28, 28)
print(inputs.shape)
conv1 = nn.Conv2d(in_channels=1, out_channels=5, kernel_size=(5, 5))
temp = conv1(inputs)
print(temp.size())
pool1 = nn.MaxPool2d(2)
out = pool1(temp)
print(out.size())
1
2
3
torch.Size([1, 1, 28, 28])
torch.Size([1, 5, 24, 24])
torch.Size([1, 5, 12, 12])
Lab-10-2 Mnist CNN
- 딥러닝 학습 단계
- CNN
1
2
3
4
5
6
7
8
9
10
11
12
13
14
inputs = torch.Tensor(1, 1, 28, 28)
conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
pool = nn.MaxPool2d(2)
out = conv1(inputs)
out = pool(out)
out = conv2(out)
out = pool(out)
print(out.size())
out = out.view(out.size(0), -1)
print(out.shape)
1
2
torch.Size([1, 64, 7, 7])
torch.Size([1, 3136])
1
2
3
fc = nn.Linear(3136, 10)
fc(out)
1
2
tensor([[ 0.0743, -0.0372, -0.0891, -0.0524, 0.0265, -0.1187, -0.0263, -0.0442,
-0.0113, 0.0294]], grad_fn=<AddmmBackward0>)
1
2
3
4
5
6
import torch
import torch.nn as nn
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torch.nn.init
1
2
3
4
5
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.manual_seed(777)
if device=="cuda":
torch.cuda.manual_seed(777)
1
2
3
4
5
6
7
8
9
10
# Parameters
leraning_rate = 0.001
trainig_epochs = 15
batch_size = 100
# MNIST dataset
mnist_train = dset.MNIST(root="MNIST_data/", train=True, transform=transforms.ToTensor(), download=True)
mnist_test = dset.MNIST(root="MNIST_data/", train=False, transform=transforms.ToTensor(), download=True)
data_loader = torch.utils.data.DataLoader(dataset=mnist_train, batch_size=batch_size, shuffle=True, drop_last=True)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
# CNN
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.layer2 = nn.Sequential(
nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2)
)
self.fc = nn.Linear(7*7*64, 10, bias=True)
torch.nn.init.xavier_uniform_(self.fc.weight)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
1
2
3
model = CNN().to(device)
model
1
2
3
4
5
6
7
8
9
10
11
12
13
CNN(
(layer1): Sequential(
(0): Conv2d(1, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(layer2): Sequential(
(0): Conv2d(32, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU()
(2): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(fc): Linear(in_features=3136, out_features=10, bias=True)
)
1
2
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=leraning_rate)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# training
total_batch = len(data_loader)
for epoch in range(trainig_epochs):
avg_cost = 0
for x, y in data_loader:
x = x.to(device)
y = y.to(device)
optimizer.zero_grad()
hypothesis = model(x)
cost = criterion(hypothesis, y)
cost.backward()
optimizer.step()
avg_cost += cost / total_batch
print(f"[Epoch: {epoch+1}] cost = {avg_cost}")
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
[Epoch: 1] cost = 0.2255527824163437
[Epoch: 2] cost = 0.06297089904546738
[Epoch: 3] cost = 0.04629518464207649
[Epoch: 4] cost = 0.03738237917423248
[Epoch: 5] cost = 0.03142663091421127
[Epoch: 6] cost = 0.02627108059823513
[Epoch: 7] cost = 0.021880438551306725
[Epoch: 8] cost = 0.018407942727208138
[Epoch: 9] cost = 0.01604434661567211
[Epoch: 10] cost = 0.01318534929305315
[Epoch: 11] cost = 0.010261600837111473
[Epoch: 12] cost = 0.010166279971599579
[Epoch: 13] cost = 0.008695926517248154
[Epoch: 14] cost = 0.00602313969284296
[Epoch: 15] cost = 0.0067491657100617886
1
2
3
4
5
6
7
8
9
# test
with torch.no_grad():
X_test = mnist_test.data.view(len(mnist_test), 1, 28, 28).float().to(device)
Y_test = mnist_test.targets.to(device)
prediction = model(X_test)
correct_prediction = torch.argmax(prediction, 1)==Y_test
accuracy = correct_prediction.float().mean()
print(f"Accuracy: {accuracy.item()}")
1
Accuracy: 0.9868999719619751
Lab-10-3 visdom
- Visdom
- CNN
1
2
import visdom
vis = visdom.Visdom()
1
Setting up a new session...
1
2
# Text
vis.text("Hello World", env="main")
1
'window_3b81227143b7da'
1
2
# Image
vis.image(torch.randn(3, 200, 200))
1
'window_3b812289ac3150'
1
2
# Images
vis.images(torch.randn(3, 3, 28, 28))
1
'window_3b81229dea2500'
MNIST and CIFAR10
1
2
MNIST = dset.MNIST(root="./data/MNIST_data/", train=True, transform=transforms.ToTensor(), download=True)
cifar10 = dset.CIFAR10(root="./data/cifar10/", train=True, transform=transforms.ToTensor(), download=True)
1
Files already downloaded and verified
1
2
3
data = cifar10.__getitem__(0)
print(data[0].shape)
vis.image(data[0], env="main")
1
2
3
4
5
6
7
torch.Size([3, 32, 32])
'window_3b81237c4fe020'
1
2
3
data = MNIST.__getitem__(0)
print(data[0].shape)
vis.image(data[0], env="main")
1
2
3
4
5
6
7
torch.Size([1, 28, 28])
'window_3b8123981c20d2'
Check dataset
1
2
3
4
5
6
7
data_loader = torch.utils.data.DataLoader(dataset=MNIST, batch_size=32, shuffle=True)
for num, value in enumerate(data_loader):
value = value[0]
print(value.shape)
vis.images(value)
break
1
torch.Size([32, 1, 28, 28])
1
vis.close(env="main")
1
2
3
4
[WinError 10061] 대상 컴퓨터에서 연결을 거부했으므로 연결하지 못했습니다
on_close() takes 1 positional argument but 3 were given
[WinError 10061] 대상 컴퓨터에서 연결을 거부했으므로 연결하지 못했습니다
on_close() takes 1 positional argument but 3 were given
Line Plot
1
2
Y_data = torch.randn(5)
plt = vis.line(Y=Y_data)
1
2
X_data = torch.Tensor([1, 2, 3, 4, 5])
plt = vis.line(Y=Y_data, X=X_data)
Line update
1
2
3
4
Y_append = torch.randn(1)
X_append = torch.Tensor([6])
vis.line(Y=Y_append, X=X_append, win=plt, update="append")
1
'window_3b8123fec69874'
Lab-10-4-1 ImageFolder1
- Image Folder
splitfolders
와 유사한듯?
1
2
3
4
5
6
trans = transforms.Compose([
transforms.Resize((64, 64)),
transforms.ToTensor()
])
train_data = dset.ImageFolder(root="", transform=trans)
Lab-10-4-2 ImageFolder2
Lab-10-5 Advance CNN(VGG)
- VGG
1
2
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
__all__ = [
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
'vgg19_bn', 'vgg19',
]
model_urls = {
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
class VGG(nn.Module):
def __init__(self, features, num_classes=1000, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
self.classifier = nn.Sequential(
nn.Linear(512*7*7, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, num_classes)
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], #8 + 3 =11 == vgg11
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], # 10 + 3 = vgg 13
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], #13 + 3 = vgg 16
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], # 16 +3 =vgg 19
'custom' : [64, 64, 64,'M', 128, 128, 128, 'M', 256, 256, 256, 'M']
}
1
2
3
conv = make_layers(cfg["custom"], batch_norm=True)
conv
1
CNN = VGG(make_layers(cfg["custom"]), num_classes=10, init_weights=True)
1
CNN
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
VGG(
(features): Sequential(
(0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(1): ReLU(inplace=True)
(2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(3): ReLU(inplace=True)
(4): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(5): ReLU(inplace=True)
(6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(7): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(8): ReLU(inplace=True)
(9): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(10): ReLU(inplace=True)
(11): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(12): ReLU(inplace=True)
(13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
(14): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(15): ReLU(inplace=True)
(16): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(17): ReLU(inplace=True)
(18): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
(19): ReLU(inplace=True)
(20): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
)
(avgpool): AdaptiveAvgPool2d(output_size=(7, 7))
(classifier): Sequential(
(0): Linear(in_features=25088, out_features=4096, bias=True)
(1): ReLU(inplace=True)
(2): Dropout(p=0.5, inplace=False)
(3): Linear(in_features=4096, out_features=4096, bias=True)
(4): ReLU(inplace=True)
(5): Dropout(p=0.5, inplace=False)
(6): Linear(in_features=4096, out_features=10, bias=True)
)
)
Lab-10-6-1 Advance CNN(RESNET-1)
- ResNet
1
2
3
4
5
6
7
8
9
10
11
12
13
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
1
2
3
4
5
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = conv1x1(planes, planes*self.expansion)
self.bn3 = nn.BatchNorm2d(planes*self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
class ResNet(nn.Module):
# model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) #resnet 50
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride), #conv1x1(256, 512, 2)
nn.BatchNorm2d(planes * block.expansion), #batchnrom2d(512)
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion #self.inplanes = 128 * 4
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes)) # * 3
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
Lab-10-6-2 Advance CNN(RESNET-2)
1
2
3
4
5
6
7
8
9
10
11
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import visdom
vis = visdom.Visdom()
# vis.close(env="main")
1
Setting up a new session...
Define value tracker
1
2
def value_tracker(value_plot, value, num):
vis.line(X=num, Y=value, win=value_plot, update="append")
1
2
3
4
5
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.manual_seed(777)
if device=="cuda":
torch.cuda.manual_seed(777)
Transforms Normalize
How to Calculate mean and std in Normalize
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
transform = transforms.Compose([
transforms.ToTensor()
])
trainset = torchvision.datasets.CIFAR10(root="./data/cifar10/", train=True, download=True, transform=transform)
print(trainset.data.shape)
train_data_mean = trainset.data.mean(axis=(0, 1, 2))
train_data_std = trainset.data.std(axis=(0, 1, 2))
print("정균화 전")
print(train_data_mean)
print(train_data_std)
train_data_mean = train_data_mean/255
train_data_std = train_data_std/255
print("정규화 후")
print(train_data_mean)
print(train_data_std)
1
2
3
4
5
6
7
8
Files already downloaded and verified
(50000, 32, 32, 3)
정균화 전
[125.30691805 122.95039414 113.86538318]
[62.99321928 62.08870764 66.70489964]
정규화 후
[0.49139968 0.48215841 0.44653091]
[0.24703223 0.24348513 0.26158784]
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize(train_data_mean, train_data_std)
])
trainset = torchvision.datasets.CIFAR10(root="./data/cifar10/", train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=256, shuffle=True, num_workers=4)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(train_data_mean, train_data_std)
])
testset = torchvision.datasets.CIFAR10(root="./data/cifar10/", train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=256, shuffle=False, num_workers=4)
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
1
2
Files already downloaded and verified
Files already downloaded and verified
ResNet50
1
import torchvision.models.resnet as resnet
1
2
3
conv1x1 = resnet.conv1x1
Bottleneck = resnet.Bottleneck
BasciBlock = resnet.BasicBlock
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False):
super(ResNet, self).__init__()
self.inplanes = 16
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
#self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 16, layers[0], stride=1)
self.layer2 = self._make_layer(block, 32, layers[1], stride=1)
self.layer3 = self._make_layer(block, 64, layers[2], stride=2)
self.layer4 = self._make_layer(block, 128, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(128 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
#x.shape =[1, 16, 32,32]
x = self.bn1(x)
x = self.relu(x)
#x = self.maxpool(x)
x = self.layer1(x)
#x.shape =[1, 128, 32,32]
x = self.layer2(x)
#x.shape =[1, 256, 32,32]
x = self.layer3(x)
#x.shape =[1, 512, 16,16]
x = self.layer4(x)
#x.shape =[1, 1024, 8,8]
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
1
resnet50 = ResNet(resnet.Bottleneck, [3, 4, 6, 3], 10, True).to(device)
1
2
3
4
# test
a=torch.Tensor(1,3,32,32).to(device)
out = resnet50(a)
print(out)
1
2
tensor([[ 0.0265, -0.0315, 0.0268, 0.0229, -0.0438, 0.0290, 0.0302, -0.0332,
0.0009, -0.0227]], device='cuda:0', grad_fn=<AddmmBackward0>)
1
2
3
criterion = nn.CrossEntropyLoss().to(device)
optimizer = torch.optim.SGD(resnet50.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
lr_sche = optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.5)
Make Plot
1
2
loss_plt = vis.line(Y=torch.Tensor(1).zero_(), opts=dict(title="loss_tracker", legend=["loss"], showlegned=True))
acc_plt = vis.line(Y=torch.Tensor(1).zero_(), opts=dict(title="Accuracy", legend=["Acc"], showlegend=True))
Define acc_check function
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
def acc_check(net, test_set, epoch, save=1):
correct = 0
total = 0
with torch.no_grad():
for data in test_set:
images, labels = data
images = images.to(device)
labels = labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
acc = (100 * correct / total)
print('Accuracy of the network on the 10000 test images: %d %%' % acc)
if save:
torch.save(net.state_dict(), "./model_epoch_{}_acc_{}.pth".format(epoch, int(acc)))
return acc
Train
1
2
import warnings
warnings.filterwarnings("ignore")
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
print(len(trainloader))
epochs = 150
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
lr_sche.step()
for i, data in enumerate(trainloader, 0):
# get the inputs
inputs, labels = data
inputs = inputs.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = resnet50(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 30 == 29: # print every 30 mini-batches
value_tracker(loss_plt, torch.Tensor([running_loss/30]), torch.Tensor([i + epoch*len(trainloader) ]))
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 30))
running_loss = 0.0
#Check Accuracy
acc = acc_check(resnet50, testloader, epoch, save=1)
value_tracker(acc_plt, torch.Tensor([acc]), torch.Tensor([epoch]))
print('Finished Training')
Model Accuracy Testing
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data
images = images.to(device)
labels = labels.to(device)
outputs = resnet50(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
1
2
3
4
[WinError 10061] 대상 컴퓨터에서 연결을 거부했으므로 연결하지 못했습니다
on_close() takes 1 positional argument but 3 were given
[WinError 10061] 대상 컴퓨터에서 연결을 거부했으므로 연결하지 못했습니다
on_close() takes 1 positional argument but 3 were given
Lab-10-7 Next step of CNN
- 이미지 분류(Classification): DenseNet, SENet, MobileNet, SqueezeNet, AutoML(NAS, NASNet)
- 사물 탐지(Object Detection): Ref
- 사물 추적(Object Tracking): MDNet, GOTUEN, CFNet, ROLO, Tracking the Untrackable
- 이미지 분할(Segmentation): FCN, U-Net, Mask RCNN
Comments powered by Disqus.