pytorch学习笔记-高阶篇(卷积神经网络实战3)
本篇主要是卷积神经网络的实战,网络结构主要是resnet,数据集用的是自建pokemon数据集,包含了整个流程,从数据集的处理,训练,测试等。
一、加载数据
数据预处理
- Image Resize
·224×224 for ResNet18 - Data Argumentation
·Rotate
·Crop - Normalize
·Mean,std - ToTensor
import torch
import os
import glob
import random
import csv
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
from PIL import Image
class Pokemon(Dataset):
def __init__(self, root, resize, mode):
"""
Args:
root:存放图片数据集的路径
resize: 图片resize的要求
mode: train/test
"""
super(Pokemon, self).__init__()
self.root = root
self.resize = resize
self.mode = mode
# “squirtle”:0
self.name2label = {}
for name in sorted(os.listdir(os.path.join(root))):
if not os.path.isdir(os.path.join(root, name)):
continue
# 把当前长度作为label值
self.name2label[name] = len(self.name2label.keys())
print(self.name2label)
# {'bulbasaur': 0, 'charmander': 1, 'mewtwo': 2, 'pikachu': 3, 'squirtle': 4}
# image_path, label
self.images, self.labels = self.load_csv('images.csv')
# 对数据集按6:2:2作裁剪
if mode == 'train': # 60%
self.images = self.images[:int(0.6 * len(self.images))]
self.labels = self.labels[:int(0.6 * len(self.labels))]
elif mode == 'val': # 20% = 60%->80%
self.images = self.images[int(0.6 * len(self.images)):int(0.8 * len(self.images))]
self.labels = self.labels[int(0.6 * len(self.labels)):int(0.8 * len(self.labels))]
else: # 20% = 80%->100%
self.images = self.images[int(0.8 * len(self.images)):]
self.labels = self.labels[int(0.8 * len(self.labels)):]
# 加载(不存在则创建)csv文件
def load_csv(self, filename):
# 如果csv文件不存在则创建
if os.path.exists(os.path.join(self.root, filename)):
images = []
for name in self.name2label.keys():
# 'pokemon\\mewtwo\\00001.png'
images += glob.glob(os.path.join(self.root, name, '*.png'))
images += glob.glob(os.path.join(self.root, name, '*.jpg'))
images += glob.glob(os.path.join(self.root, name, '*.jpeg'))
print(len(images), images)
# 1167 ['pokemon\\bulbasaur\\00000000.png'.....
random.shuffle(images)
with open(os.path.join(self.root, filename), mode='w', newline='') as f:
writer = csv.writer(f)
for img in images:
# pokemon\\bulbasaur\\00000000.png
# 分割路径,取倒数第二个
name = img.split(os.sep)[-2]
label = self.name2label[name]
# pokemon\\bulbasaur\\00000000.png,0
writer.writerow([img, label])
print('written into csv file:', filename)
# 加载csv文件
images, labels = [], []
with open(os.path.join(self.root, filename)) as f:
reader = csv.reader(f)
for row in reader:
# pokemon\\bulbasaur\\00000000.png,0
img, label = row
label = int(label)
images.append(img)
labels.append(label)
assert len(images) == len(labels)
return images, labels
def __len__(self):
return len(self.images)
# 为了可视化可接受,不用作数据处理等
def denormalize(self, x_hat):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# x_hat = (x-mean)/std
# x = x_hat*std + mean
# x:[c, h, w]
# mean:[3] => [3, 1, 1] 之后交给自动的broadingcast完成
mean = torch.tensor(mean).unsqueeze(1).unsqueeze(1)
std = torch.tensor(std).unsqueeze(1).unsqueeze(1)
x = x_hat * std + mean
return x
def __getitem__(self, idx):
# idx~[0~len(images)]
# self.images, self.labels
# img:'pokemon\\bulbasaur\\00000000.png'
img, label = self.images[idx], self.labels[idx]
tf = transforms.Compose([
# string path => image data
lambda x: Image.open(x).convert('RGB'),
# 压缩到比想要的目标大一点的shape,方便后面裁剪操作
transforms.Resize((int(self.resize * 1.25), int(self.resize * 1.25))),
transforms.RandomRotation(15),
# 中心裁剪
transforms.CenterCrop(self.resize),
transforms.ToTensor(),
# 数据统计ImageNet得到
# 数据从[0~1] => [-1~1] 数据可视化会出现问题
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
img = tf(img)
label = torch.tensor(label)
return img, label
def main():
import visdom
import time
import torchvision
viz = visdom.Visdom()
# 方法1 直接调用API
# transform = transforms.Compose([
# transforms.Resize((64, 64)),
# transforms.ToTensor(),
# ])
# db = torchvision.datasets.ImageFolder(root='pokemon', transform=transform)
# loader = DataLoader(db, batch_size=32, shuffle=True)
# print(db.class_to_idx)
# for x, y in loader:
# viz.images(x, nrow=8, win='batch', opts=dict(title='batch'))
# viz.text(str(y.numpy()), win='label', opts=dict(title='batch_y'))
# time.sleep(10)
# 方法2,自己写
db = Pokemon('pokemon', 64, 'train')
x, y = next(iter(db))
print('sample:', x.shape, y.shape, y)
viz.image(db.denormalize(x), win='sample_x', opts=dict(title='sample_x'))
# num_workers=8便于多线程拿图
loader = DataLoader(db, batch_size=32, shuffle=True, num_workers=8)
for x, y in loader:
viz.images(db.denormalize(x), nrow=8, win='batch', opts=dict(title='batch'))
viz.text(str(y.numpy()), win='label', opts=dict(title='batch_y'))
time.sleep(10)
if __name__ == '__main__':
main()
这是第一次生成的csv文件内容
打开visdom服务
python -m visdom.server
图片加载结果
二、建立模型
- Inherit from base class
- Define forward graph
import torch
from torch import nn
from torch.nn import functional as F
class ResBlk(nn.Module):
"""
resnet block
"""
def __init__(self, ch_in, ch_out, stride=1):
"""
:param ch_in:
:param ch_out:
"""
super(ResBlk, self).__init__()
# h,w 会按stride缩减 h => h/stride
self.conv1 = nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm2d(ch_out)
self.conv2 = nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.BatchNorm2d(ch_out)
self.extra = nn.Sequential()
if ch_out != ch_in:
# [b, ch_in, h, w] => [b, ch_out, h, w]
self.extra = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=1, stride=stride),
nn.BatchNorm2d(ch_out)
)
def forward(self, x):
"""
:param x: [b, ch, h, w]
:return:
"""
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
# short cut.
# extra module: [b, ch_in, h, w] => [b, ch_out, h, w]
# element-wise add:
out = self.extra(x) + out
out = F.relu(out)
return out
class ResNet18(nn.Module):
def __init__(self, num_class):
super(ResNet18, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=3, padding=0),
nn.BatchNorm2d(16)
)
# followed 4 blocks
# [b, 16, h, w] => [b, 32, h ,w]
self.blk1 = ResBlk(16, 32, stride=3)
# [b, 32, h, w] => [b, 64, h, w]
self.blk2 = ResBlk(32, 64, stride=3)
# # [b, 64, h, w] => [b, 128, h, w]
self.blk3 = ResBlk(64, 128, stride=2)
# # [b, 128, h, w] => [b, 256, h, w]
self.blk4 = ResBlk(128, 256, stride=2)
# 一开始到这为止x的shape里后两维度比较大,采用增大block的stride的方法变成3,3
# [b, 256, 3, 3]
self.outlayer = nn.Linear(256*3*3, num_class)
def forward(self, x):
"""
:param x:
:return:
"""
x = F.relu(self.conv1(x))
# [b, 64, h, w] => [b, 1024, h, w]
x = self.blk1(x)
x = self.blk2(x)
x = self.blk3(x)
x = self.blk4(x)
# print(x.shape)
# 经过4个block后x的shape: [b, 256, 3, 3]
x = x.view(x.size(0), -1)
x = self.outlayer(x)
return x
def main():
blk = ResBlk(64, 128)
tmp = torch.randn(2, 64, 224, 224)
out = blk(tmp)
print('block:', out.shape)
model = ResNet18(5)
tmp = torch.randn(2, 3, 224, 224)
out = model(tmp)
print('resnet:', out.shape)
p = sum(map(lambda p:p.numel(), model.parameters()))
print('parameters size:', p)
if __name__ == '__main__':
main()
三、训练和测试
标准流程:
训练 => 验证(保存最好状态)=> 测试
import torch
from torch import optim
from torch import nn
import visdom
import torchvision
from torch.utils.data import DataLoader
from pokemon import Pokemon
from resnet import ResNet18
batchsz = 32
lr = 1e-3
epochs = 10
device = torch.device('cuda')
# 设置CPU生成随机数的种子,方便下次复现实验结果
torch.manual_seed(1234)
train_db = Pokemon('pokemon', 224, mode='train')
val_db = Pokemon('pokemon', 224, mode='val')
test_db = Pokemon('pokemon', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True, num_workers=4)
val_loader = DataLoader(val_db, batch_size=batchsz, shuffle=True, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, shuffle=True, num_workers=2)
viz = visdom.Visdom()
def evalute(model, loader):
correct = 0
total = len(loader.dataset)
for x, y in loader:
x, y = x.to(device), y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
model = ResNet18(5).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
# 输入是logits,包括了softmax等操作
criteon = nn.CrossEntropyLoss()
# train
best_acc, best_opoch = 0, 0
global_step = 0
viz.line([0], [-1], win='loss', opts=dict(title='loss'))
viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
for epoch in range(epochs):
for step, (x, y) in enumerate(train_loader):
# x:[b, 3, 224, 224], y[b]
x, y = x.to(device), y.to(device)
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
viz.line([loss.item()], [global_step], win='loss', update='append')
global_step += 1
if epoch % 2 == 0:
val_acc = evalute(model, val_loader)
if val_acc>best_acc:
best_epoch = epoch
best_epoch = val_acc
torch.save(model.state_dict(), 'best.md1')
viz.line([val_acc], [global_step], win='val_acc', update='append')
print('best acc: ', best_acc, 'best_epoch: ', best_epoch)
model.load_state_dict(torch.load('best.md1'))
print('loaded from ckpt!')
# 加载最好状态后作测试
test_acc = evalute(model, test_loader)
print('test acc:', test_acc)
if __name__ == '__main__':
main()
四、迁移学习
以图示的一个二分类问题为例,要把图示的圆形和矩形分成两类,但显然根据已有的有限的数据集(图二),可以作出的划分有多种,但很明显中间的那种应该是比较理想的。
但是通过经验积累我们可能以前做过梯形和五边形的分类(图三),这种分类和我们现在要做的圆形和矩形的分类难免或者或多或少存在一些可以借鉴的地方,可以借以前学习的(或者一些通用的)经验来帮助现在的训练学习过程。这就是迁移学习
import torch
from torch import optim
from torch import nn
import visdom
import torchvision
from torch.utils.data import DataLoader
from pokemon import Pokemon
# from resnet import ResNet18
# 用torchvision 提供的resnet18可以直接使用训练好的model
from torchvision.models import resnet18
from utils import Flatten
batchsz = 32
lr = 1e-3
epochs = 10
device = torch.device('cuda')
# 设置CPU生成随机数的种子,方便下次复现实验结果
torch.manual_seed(1234)
train_db = Pokemon('pokemon', 224, mode='train')
val_db = Pokemon('pokemon', 224, mode='val')
test_db = Pokemon('pokemon', 224, mode='test')
train_loader = DataLoader(train_db, batch_size=batchsz, shuffle=True, num_workers=4)
val_loader = DataLoader(val_db, batch_size=batchsz, shuffle=True, num_workers=2)
test_loader = DataLoader(test_db, batch_size=batchsz, shuffle=True, num_workers=2)
viz = visdom.Visdom()
def evalute(model, loader):
correct = 0
total = len(loader.dataset)
for x, y in loader:
x, y = x.to(device), y.to(device)
with torch.no_grad():
logits = model(x)
pred = logits.argmax(dim=1)
correct += torch.eq(pred, y).sum().float().item()
return correct / total
def main():
# model = ResNet18(5).to(device)
trained_model = resnet18(pretrained=True)
# 取出前17层
model = nn.Sequential(*list(trained_model.children())[:-1], # [b, 512, 1, 1]
# [b, 512, 1, 1] => [b, 512]
Flatten(),
nn.Linear(512, 5)
).to(device)
# x = torch.randn(2, 3, 224, 224)
# print(model(x).shape)
optimizer = optim.Adam(model.parameters(), lr=lr)
# 输入是logits,包括了softmax等操作
criteon = nn.CrossEntropyLoss()
# train
best_acc, best_opoch = 0, 0
global_step = 0
viz.line([0], [-1], win='loss', opts=dict(title='loss'))
viz.line([0], [-1], win='val_acc', opts=dict(title='val_acc'))
for epoch in range(epochs):
for step, (x, y) in enumerate(train_loader):
# x:[b, 3, 224, 224], y[b]
x, y = x.to(device), y.to(device)
logits = model(x)
loss = criteon(logits, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
viz.line([loss.item()], [global_step], win='loss', update='append')
global_step += 1
if epoch % 2 == 0:
val_acc = evalute(model, val_loader)
if val_acc>best_acc:
best_epoch = epoch
best_acc = val_acc
torch.save(model.state_dict(), 'best.md1')
viz.line([val_acc], [global_step], win='val_acc', update='append')
print('best acc: ', best_acc, 'best_epoch: ', best_epoch)
model.load_state_dict(torch.load('best.md1'))
print('loaded from ckpt!')
# 加载最好状态后作测试
test_acc = evalute(model, test_loader)
print('test acc:', test_acc)
if __name__ == '__main__':
main()
visdom可视化结果
本博客所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 不听话的兔子君!