Image Data Processing with CNN training


Pre: Start building a folder

Because the original data is messy, so we start a new dataset path ./data to copy the Train Sets in original data.

!mkdir data
!cp ./datasets/meowmeowmeowmeowmeow-gtsrb-german-traffic-sign-momodel/train ./data/train_images -r

Slicing data

每个图片名称前六位表示类别,中间6位数表示组数,后6位表示组内序号,一组有30张图片

Divide the first three groups into validation sets

def initialize_data(folder):      
    # make validation_data by using images 00000*, 00001* and 00002* in each class
    train_folder = folder + '/train_images'
    val_folder = folder + '/val_images'
    if not os.path.isdir(val_folder):
        print(val_folder + ' not found, making a validation set')
        os.mkdir(val_folder)
        for dirs in os.listdir(train_folder):
            os.mkdir(val_folder + '/' + dirs)
            for f in os.listdir(train_folder + '/' + dirs):
                if f[6:11]==('00000') or f[6:11]==('00001') or f[6:11]==('00002'):
                    # move file to validation folder
                    os.rename(train_folder + '/' + dirs + '/' + f, val_folder + '/' + dirs + '/' + f)
data_path = "./data"
initialize_data(data_path)

Data Augmentation

import os
import torchvision.transforms as transforms
# data augmentation for training and test time
# Resize all images to 256 * 256 and normalize them to mean = 0 and standard-deviation = 1 based on statistics collected from the training set
data_transforms = transforms.Compose([
    transforms.Resize((256, 256)),
    transforms.RandomCrop(256, padding=32),  #先四周填充0,再将图像随机裁剪成32*32
    # transforms.RandomRotation(degrees=15),#随机旋转
    # transforms.RandomHorizontalFlip(),#随机水平翻转
    # transforms.CenterCrop(224),#中心裁剪到224*224
    transforms.ToTensor(),
    transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629)),
    Cutout(n_holes=1, length=16)
])

val_transforms = transforms.Compose([
    transforms.Resize((256, 256)),
    # transforms.CenterCrop(224),#中心裁剪到224*224
    transforms.ToTensor(),
    transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629)),
])

# Resize, normalize and jitter image brightness
data_jitter_brightness = transforms.Compose([
    transforms.Resize((256, 256)),
#     transforms.ColorJitter(brightness=-5),
    transforms.ColorJitter(brightness=5),
    transforms.ToTensor(),
    transforms.Normalize((0.3337, 0.3064, 0.3171), ( 0.2672, 0.2564, 0.2629))
])

Processing data

Pay attention on transform

And num_workers may occur errors, so we need to watch its value.

def processing_data(data_path,batch_size,use_gpu):
    import torch
    from torchvision import datasets, transforms
    """
    数据处理
    :param data_path: 数据集路径
    :return:  train_loader,val_loader:处理后的训练集,验证集loader
    """
    #这里是把字典序转为按数字大小排列的序号 比如10这个文件夹的图片,原本通过datasets.ImageFolder获取到的label是2(按字典序排列 0,1,10,11,12,13...),现在转换为10
    def char_order2int_order(charoder):
        a = []
        for i in range(43):
            a.append(str(i))
        a.sort()
        kv={}
        for i in range (43):
            kv[i]=int(a[i])
        return kv[charoder]

    train_loader = torch.utils.data.DataLoader(
        torch.utils.data.ConcatDataset([datasets.ImageFolder(data_path + '/train_images',                                            					  					  transform=data_transforms,
                                        					target_transform=char_order2int_order),
                  					  datasets.ImageFolder(data_path + '/train_images',
                 					  					  transform=data_jitter_brightness,
                                        				    target_transform=char_order2int_order)]), 
        							 batch_size=batch_size, 
        							 shuffle=True, num_workers=4, pin_memory=use_gpu
    										)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(data_path + '/val_images',
                             transform=val_transforms,
                             target_transform=char_order2int_order),
        							 batch_size=batch_size,
       								 shuffle=False, 
       								 num_workers=4, 
       								 pin_memory=use_gpu
    									 )
    return train_loader,val_loader

Define a net and Train it

Take simple CNN net as an example.

This CNN net is modified from VGG, but it has smaller depth.

import torch.nn as nn
import torch.nn.functional as F

nclasses = 43  # GTSRB as 43 classes
class Net(nn.Module):

    def __init__(self):

        super(Net, self).__init__()

        # block 1:         3 x 32 x 32 --> 64 x 16 x 16        
        self.conv1a = nn.Conv2d(3,   64,  kernel_size=3, padding=1 )
        self.conv1b = nn.Conv2d(64,  64,  kernel_size=3, padding=1 )
        self.pool1  = nn.MaxPool2d(2,2)

        # block 2:         64 x 16 x 16 --> 128 x 8 x 8
        self.conv2a = nn.Conv2d(64,  128, kernel_size=3, padding=1 )
        self.conv2b = nn.Conv2d(128, 128, kernel_size=3, padding=1 )
        self.pool2  = nn.MaxPool2d(2,2)

        # block 3:         128 x 8 x 8 --> 256 x 4 x 4        
        self.conv3a = nn.Conv2d(128, 256, kernel_size=3, padding=1 )
        self.conv3b = nn.Conv2d(256, 256, kernel_size=3, padding=1 )
        self.pool3  = nn.MaxPool2d(2,2)
        
        #block 4:          256 x 4 x 4 --> 512 x 2 x 2
        self.conv4a = nn.Conv2d(256, 512, kernel_size=3, padding=1 )
        self.pool4  = nn.MaxPool2d(2,2)

        # linear layers:   512 x 2 x 2 --> 2048 --> 4096 --> 4096 --> 43
        self.linear1 = nn.Linear(2048, 4096)
        self.linear2 = nn.Linear(4096,4096)
        self.linear3 = nn.Linear(4096, 43)


    def forward(self, x):

        # block 1:         3 x 32 x 32 --> 64 x 16 x 16
        x = self.conv1a(x)
        x = torch.relu(x)
        x = self.conv1b(x)
        x = torch.relu(x)
        x = self.pool1(x)

        # block 2:         64 x 16 x 16 --> 128 x 8 x 8
        x = self.conv2a(x)
        x = torch.relu(x)
        x = self.conv2b(x)
        x = torch.relu(x)
        x = self.pool2(x)

        # block 3:         128 x 8 x 8 --> 256 x 4 x 4
        x = self.conv3a(x)
        x = torch.relu(x)
        x = self.conv3b(x)
        x = torch.relu(x)
        x = self.pool3(x)

        #block 4:          256 x 4 x 4 --> 512 x 2 x 2
        x = self.conv4a(x)
        x = torch.relu(x)
        x = self.pool4(x)

        # linear layers:   512 x 2 x 2 --> 2048 --> 4096 --> 4096 --> 43
        x = x.view(-1, 2048)
        x = self.linear1(x)
        x = torch.relu(x)
        x = self.linear2(x)
        x = torch.relu(x)
        x = self.linear3(x) 
        
        return x

import torch.optim as optim
import torchvision


import numpy as np
from tqdm import tqdm
def train(epoch):
    model.train()
    correct = 0
    training_loss = 0
    for batch_idx, (data, target) in enumerate(tqdm(train_loader)):
        data, target = Variable(data), Variable(target)
        if use_gpu:
            data = data.cuda()
            target = target.cuda()
        optimizer.zero_grad()
        output = model(data)
        mycrit=nn.CrossEntropyLoss()
        loss = mycrit(output, target)
        loss.backward()
        optimizer.step()
        max_index = output.max(dim=1)[1]
        correct += (max_index == target).sum()
        training_loss += loss
    print('\nTraining set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        training_loss / len(train_loader.dataset), correct, len(train_loader.dataset),
        100. * correct / len(train_loader.dataset)))
    return training_loss / len(train_loader.dataset), 100. * correct / len(train_loader.dataset)


def validation():
    from torch.autograd import Variable
    model.eval()
    validation_loss = 0
    correct = 0
    for batch_idx,(data, target) in enumerate(tqdm(val_loader)):
        with torch.no_grad():
            data, target = Variable(data), Variable(target)
            if use_gpu:
                data = data.cuda()
                target = target.cuda()
            output = model(data)
            mycrit=nn.CrossEntropyLoss()
            validation_loss += mycrit(output, target).data.item()  # sum up batch loss
            pred = output.data.max(1, keepdim=True)[1]  # get the index of the max log-probability
            correct += pred.eq(target.data.view_as(pred)).cpu().sum()

#     scheduler.step(np.around(validation_loss, 2))
    print('\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        validation_loss/len(val_loader.dataset), correct, len(val_loader.dataset),
        100. * correct / len(val_loader.dataset)))
    return validation_loss/len(val_loader.dataset), 100. * correct / len(val_loader.dataset)
# import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
import numpy as np
import time

#参数设置
class args:
    data = "./data"
    batch_size = 64
    epochs = 2
    lr = 0.0001
    seed = 1
    log_interval = 10
    
torch.manual_seed(args.seed)

if torch.cuda.is_available():
    use_gpu = True
    print("Using GPU")
else:
	use_gpu = False
	print("Using CPU")

train_loader,val_loader=processing_data(args.data,args.batch_size,use_gpu)
model = Net() # 输出的类别数目

#优化器和学习率调整
optimizer = optim.Adam(filter(lambda p: p.requires_grad,model.parameters()),lr=args.lr)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min',patience=5,factor=0.5,verbose=True)


res={"loss":[],"val_loss":[],"accuracy":[],"val_accuracy":[]}
#开始训练
start = time.time()
if use_gpu:
    model.cuda()
  
for epoch in range(1, args.epochs + 1):
    loss,acc=train(epoch)
    res["loss"].append(loss.cpu())
    res["accuracy"].append(acc.cpu())
    loss,acc=validation()
    res["val_loss"].append(loss)
    res["val_accuracy"].append(acc)
    model_file = 'results/model_' + str(epoch) + '.pth'
    torch.save(model.state_dict(), model_file,_use_new_zipfile_serialization=False)
    print('\nSaved model to ' + model_file )
          
print("模型训练总时长:",time.time()-start)
import pickle
with open("./results/res","wb") as f:
    pickle.dump(res,f)

Author: cipher
Reprint policy: All articles in this blog are used except for special statements CC BY 4.0 reprint policy. If reproduced, please indicate source cipher !
  TOC