Basic CNN

Basic CNN

基本概念

  1. 卷积:保留输入的空间特征
  2. 下采样:通道数不变,改变特征层的高度和宽度。目的就是减少数据量
  3. 全连接层:映射指定的特征维度

图像:$C\times W \times H$

卷积过程

单通道:卷积核与对应输入对应元素相乘求和,得到一个元素,然后从左至右,从上至下移动。
多通道:每个通道与对应卷积核做单通道运算,最终按元素求和;此外若想输出多个通道的特征层,则需要多个卷积核,可以增加通道数。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# -*- coding: UTF-8 -*-
import torch

batch_size = 1
kernel_size = 3
width,height = 100,100
in_channels,out_channels = 5,10

input = torch.randn(batch_size,in_channels,width,height)
conv_layer = torch.nn.Conv2d(in_channels,out_channels,kernel_size=kernel_size)
out_put = conv_layer(input)

print(input.shape)
print(out_put.shape)
print(conv_layer.weight.shape)

Padding
$5\times 5$的输入与大小为$3\times 3$的卷积核得到的输出为$3\times 3$,若希望输出大小与输入保持不变则可以使用padding = 1进行填充。
$Output = \frac{Input - Kernel + 2\times Padding}{Stride} + 1$

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# -*- coding: UTF-8 -*-
import torch

input = [3,4,5,6,7,
2,4,6,8,2,
1,6,7,8,4,
9,7,4,6,2,
3,7,5,4,1]
input = torch.Tensor(input).view(1,1,5,5)
conv_layer = torch.nn.Conv2d(1,1,kernel_size=3,padding=1,bias=False)

kernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)
conv_layer.weight.data = kernel.data

output = conv_layer(input)
print(output)

Stride
使用步长参数可以减少输出特征的宽带和高度
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
# -*- coding: UTF-8 -*-
import torch

input = [3,4,5,6,7,
2,4,6,8,2,
1,6,7,8,4,
9,7,4,6,2,
3,7,5,4,1]
input = torch.Tensor(input).view(1,1,5,5)
conv_layer = torch.nn.Conv2d(1,1,kernel_size=3,stride=2,bias=False)

kernel = torch.Tensor([1,2,3,4,5,6,7,8,9]).view(1,1,3,3)
conv_layer.weight.data = kernel.data

output = conv_layer(input)
print(output)

MaxPooling
下采样,不改变输入的通道数
1
2
3
4
5
6
7
8
9
10
11
12
# -*- coding: UTF-8 -*-
import torch

input = [3,4,5,6,
2,4,6,8,
1,6,7,8,
9,7,4,6]
input = torch.Tensor(input).view(1,1,4,4)
maxpooling_layer = torch.nn.MaxPool2d(kernel_size=2)

output = maxpooling_layer(input)
print(output)

Minist

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
import torch.nn as nn
import torch.nn.functional as F

class Net(nn.Module):
def __init__(self):
super(Net,self).__init__()
self.conv1 = nn.Conv2d(1,10,kernel_size=5)
self.conv2 = nn.Conv2d(10,20,kernel_size=5)
self.pooling = nn.MaxPool2d(2)
self.fc = nn.Linear(320,10)

def forward(self,x):
batch_size = x.size(0)
x = F.relu(self.pooling(self.conv1(x)))
x = F.relu(self.pooling(self.conv2(x)))
x = x.view(batch_size,-1)

return self.fc(x)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import sys

import torch
import torch.nn as nn
import torch.optim as optim

from tqdm import tqdm

from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader

from model1 import Net

transform = transforms.Compose([
# 将像素转化为张量
transforms.ToTensor(),
# 均值 标准差 -> 归一化为0-1正态分布
transforms.Normalize((0.1307,),(0.3081))
])

batch_size = 512

train_dataset = datasets.MNIST(root='./dataset/',train=True,download=False,transform=transform)

train_loader = DataLoader(train_dataset,shuffle=True,batch_size=batch_size)

test_dataset = datasets.MNIST(root='./dataset/',train=False,download=False,transform=transform)

test_loader = DataLoader(test_dataset,shuffle=False,batch_size=batch_size)

model = Net()
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),lr=0.01)

epoches = 1000
test_num = len(test_dataset)
train_steps = len(train_loader)
for epoch in range(epoches):
model.train()
running_loss = 0.0
train_bar = tqdm(train_loader,file=sys.stdout)
for step,data in enumerate(train_bar):
inputs,targets = data

optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs,targets)
loss.backward()
optimizer.step()

running_loss += loss.item()

train_bar.desc = 'train epoch[{}/{}] loss:{:.3f}'.format(epoch + 1,epoches,loss.item())

model.eval()
acc = 0.0
with torch.no_grad():
test_bar = tqdm(test_loader,file=sys.stdout)
for test_datas in test_bar:
test_inputs,test_targets = test_datas
test_outputs = model(test_inputs)
# _,predict_y = torch.max(test_outputs,dim=1)
predict_y = torch.max(test_outputs,dim=1)[1]
acc += torch.eq(predict_y,test_targets).sum().item()
val_acc = acc / test_num
print('[epoch %d] train_loss: %.3f val_acc: %.3f' % (epoch + 1,running_loss / train_steps, val_acc))

使用GPU训练模型

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import sys

import torch
import torch.nn as nn
import torch.optim as optim

from tqdm import tqdm

from torchvision import transforms
from torchvision import datasets
from torch.utils.data import DataLoader

from model1 import Net

transform = transforms.Compose([
# 将像素转化为张量
transforms.ToTensor(),
# 均值 标准差 -> 归一化为0-1正态分布
transforms.Normalize((0.1307,),(0.3081))
])

batch_size = 512

train_dataset = datasets.MNIST(root='./dataset/',train=True,download=False,transform=transform)

train_loader = DataLoader(train_dataset,shuffle=True,batch_size=batch_size)

test_dataset = datasets.MNIST(root='./dataset/',train=False,download=False,transform=transform)

test_loader = DataLoader(test_dataset,shuffle=False,batch_size=batch_size)

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = Net()
model.to(device)
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(),lr=0.01)

epoches = 1000
test_num = len(test_dataset)
train_steps = len(train_loader)
for epoch in range(epoches):
model.train()
running_loss = 0.0
train_bar = tqdm(train_loader,file=sys.stdout)
for step,data in enumerate(train_bar):
inputs,targets = data
inputs,targets = inputs.to(device),targets.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs,targets)
loss.backward()
optimizer.step()

running_loss += loss.item()

train_bar.desc = 'train epoch[{}/{}] loss:{:.3f}'.format(epoch + 1,epoches,loss.item())

model.eval()
acc = 0.0
with torch.no_grad():
test_bar = tqdm(test_loader,file=sys.stdout)
for test_datas in test_bar:
test_inputs,test_targets = test_datas
test_inputs,test_targets = test_inputs.to(device),test_targets.to(device)
test_outputs = model(test_inputs)
# _,predict_y = torch.max(test_outputs,dim=1)
predict_y = torch.max(test_outputs,dim=1)[1]
acc += torch.eq(predict_y,test_targets).sum().item()
val_acc = acc / test_num
print('[epoch %d] train_loss: %.3f val_acc: %.3f' % (epoch + 1,running_loss / train_steps, val_acc))
Donate comment here