2023年6月21日发(作者:)
MobileNetV1实战:使⽤MobileNetV1实现植物幼苗分类⽂章⽬录摘要本例提取了植物幼苗数据集中的部分数据做数据集,数据集共有12种类别,演⽰如何使⽤pytorch版本的MobileNetV1图像分类模型实现分类任务。通过本⽂你和学到:1、如何⾃定义MobileNetV1模型。2、如何⾃定义数据集加载⽅式?3、如何使⽤Cutout数据增强?4、如何使⽤Mixup数据增强。5、如何实现训练和验证。6、预测的两种写法。MobileNetV1的论⽂翻译:MobileNetV1解析:Keras版本:数据增强Cutout和Mixup为了提⾼成绩我在代码中加⼊Cutout和Mixup这两种增强⽅式。实现这两种增强需要安装torchtoolbox。安装命令:pip install torchtoolboxCutout实现,在transforms中。from orm import Cutout#
数据预处理transform = e([ ((224, 224)), Cutout(), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])Mixup实现,在train⽅法中。需要导⼊包:from import mixup_data, mixup_criterion for batch_idx, (data, target) in enumerate(train_loader): data, target = (device, non_blocking=True), (device, non_blocking=True) data, labels_a, labels_b, lam = mixup_data(data, target, alpha) _grad() output = model(data) loss = mixup_criterion(criterion, output, labels_a, labels_b, lam) rd() () print_loss = ()项⽬结构MobileNetV1_demo├─data│ └─train│ ├─Black-grass│ ├─Charlock│ ├─Cleavers│ ├─Common Chickweed│ ├─Common wheat│ ├─Fat Hen│ ├─Loose Silky-bent│ ├─Maize│ ├─Scentless Mayweed│ ├─Shepherds Purse│ ├─Small-flowered Cranesbill│ └─Sugar beet├─dataset│ └─└─models│ └─├─├─└─导⼊项⽬使⽤的库import as optimimport torchimport as nnimport elimport port butedimport orms as transformsfrom t import SeedlingDatafrom ad import Variablefrom netv1 import MobileNetV1from import mixup_data, mixup_criterionfrom orm import Cutout设置全局参数设置学习率、BatchSize、epoch等参数,判断环境中是否存在GPU,如果没有则使⽤CPU。建议使⽤GPU,CPU太慢了。#
设置全局参数modellr = 1e-4BATCH_SIZE = 16EPOCHS = 300DEVICE = ('cuda' if _available() else 'cpu')图像预处理与增强数据处理⽐较简单,加⼊了Cutout、做了Resize和归⼀化。#
数据预处理transform = e([ ((224, 224)), Cutout(), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])transform_test = e([ ((224, 224)), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])读取数据将数据集解压后放到data⽂件夹下⾯,如图:然后我们在dataset⽂件夹下⾯新建 和,在⽂件夹写⼊下⾯的代码:# coding:utf8import osfrom PIL import Imagefrom import datafrom torchvision import transforms as Tfrom _selection import train_test_split
Labels = {'Black-grass': 0, 'Charlock': 1, 'Cleavers': 2, 'Common Chickweed': 3, 'Common wheat': 4, 'Fat Hen': 5, 'Loose Silky-bent': 6, 'Maize': 7, 'Scentless Mayweed': 8, 'Shepherds Purse': 9, 'Small-flowered Cranesbill': 10, 'Sugar beet': 11}
class SeedlingData (t):
def __init__(self, root, transforms=None, train=True, test=False): """ 主要⽬标: 获取所有图⽚的地址,并根据训练,验证,测试划分数据 """ = test orms = transforms
if : imgs = [(root, img) for img in r(root)] = imgs else: imgs_labels = [(root, img) for img in r(root)] imgs = [] for imglable in imgs_labels: for imgname in r(imglable): imgpath = (imglable, imgname) (imgpath) trainval_files, val_files = train_test_split(imgs, test_size=0.3, random_state=42) if train: = trainval_files else: = val_files
def __getitem__(self, index): """ ⼀次返回⼀张图⽚的数据 """ img_path = [index] img_path=img_e("",'/') if : label = -1 else: labelname = img_('/')[-2] label = Labels[labelname] data = (img_path).convert('RGB') data = orms(data) return data, label
def __len__(self): return len()说⼀下代码的核⼼逻辑:第⼀步 建⽴字典,定义类别对应的ID,⽤数字代替类别。第⼆步 在__init__⾥⾯编写获取图⽚路径的⽅法。测试集只有⼀层路径直接读取,训练集在train⽂件夹下⾯是类别⽂件夹,先获取到类别,再获取到具体的图⽚路径。然后使⽤sklearn中切分数据集的⽅法,按照7:3的⽐例切分训练集和验证集。第三步 在__getitem__⽅法中定义读取单个图⽚和类别的⽅法,由于图像中有位深度32位的,所以我在读取图像的时候做了转换。然后我们在调⽤SeedlingData读取数据 ,记着导⼊刚才写的(from t import SeedlingData)dataset_train = SeedlingData('data/train', transforms=transform, train=True)dataset_test = SeedlingData("data/train", transforms=transform_test, train=False)#
读取数据print(dataset_)#
导⼊数据train_loader = ader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)test_loader = ader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)设置模型设置loss函数为ntropyLoss()。设置模型为MobileNetV1,num_classes设置为12。优化器设置为adam。学习率调整策略选择为余弦退⽕。#
实例化模型并且移动到GPUcriterion = ntropyLoss()model_ft = MobileNetV1(num_classes=12)model_(DEVICE)#
选择简单暴⼒的Adam优化器,学习率调低optimizer = (model_ters(), lr=modellr)cosine_schedule = _AnnealingLR(optimizer=optimizer,T_max=20,eta_min=1e-9)定义训练和验证函数#
定义训练过程alpha=0.2def train(model, device, train_loader, optimizer, epoch): () sum_loss = 0 total_num = len(train_t) print(total_num, len(train_loader)) for batch_idx, (data, target) in enumerate(train_loader): data, target = (device, non_blocking=True), (device, non_blocking=True) data, labels_a, labels_b, lam = mixup_data(data, target, alpha) _grad() output = model(data) loss = mixup_criterion(criterion, output, labels_a, labels_b, lam) rd() () lr = _dict()['param_groups'][0]['lr'] print_loss = () sum_loss += print_loss if (batch_idx + 1) % 10 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]tLoss: {:.6f}tLR:{:.9f}'.format( epoch, (batch_idx + 1) * len(data), len(train_t), 100. * (batch_idx + 1) / len(train_loader), (),lr)) ave_loss = sum_loss / len(train_loader) print('epoch:{},loss:{}'.format(epoch, ave_loss))ACC=0#
验证过程def val(model, device, test_loader): global ACC () test_loss = 0 correct = 0 total_num = len(test_t) print(total_num, len(test_loader)) with _grad(): for data, target in test_loader: data, target = Variable(data).to(device), Variable(target).to(device) output = model(data) loss = criterion(output, target) _, pred = (, 1) correct += (pred == target) print_loss = () test_loss += print_loss correct = () acc = correct / total_num avgloss = test_loss / len(test_loader) print('nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)n'.format( avgloss, correct, len(test_t), 100 * acc)) if acc > ACC: (model_ft, 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth') ACC = acc#
训练for epoch in range(1, EPOCHS + 1): train(model_ft, DEVICE, train_loader, optimizer, epoch) cosine_() val(model_ft, DEVICE, test_loader)运⾏结果:测试我介绍两种常⽤的测试⽅式,第⼀种是通⽤的,通过⾃⼰⼿动加载数据集然后做预测,具体操作如下:测试集存放的⽬录如下图:第⼀步 定义类别,这个类别的顺序和训练时的类别顺序对应,⼀定不要改变顺序第⼆步 定义transforms,transforms和验证集的transforms⼀样即可,别做数据增强。第三步 加载model,并将模型放在DEVICE⾥,第四步 读取图⽚并预测图⽚的类别,在这⾥注意,读取图⽚⽤PIL库的Image。不要⽤cv2,transforms不⽀持。import butedimport orms as transformsfrom PIL import Imagefrom ad import Variableimport osclasses = ('Black-grass', 'Charlock', 'Cleavers', 'Common Chickweed', 'Common wheat','Fat Hen', 'Loose Silky-bent', 'Maize','Scentless Mayweed','Shepherds Purse','Small-flowered Cranesbill','Sugar beet')transform_test = e([ ((224, 224)), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
DEVICE = ("cuda:0" if _available() else "cpu")model = ("")()(DEVICE)
path='data/test/'testList=r(path)for file in testList: img=(path+file) img=transform_test(img) eze_(0) img = Variable(img).to(DEVICE) out=model(img) # Predict _, pred = (, 1) print('Image Name:{},predict:{}'.format(file,classes[()]))运⾏结果:第⼆种 使⽤⾃定义的Dataset读取图⽚import butedimport orms as transformsfrom t import SeedlingDatafrom ad import Variable
classes = ('Black-grass', 'Charlock', 'Cleavers', 'Common Chickweed', 'Common wheat','Fat Hen', 'Loose Silky-bent', 'Maize','Scentless Mayweed','Shepherds Purse','Small-flowered Cranesbill','Sugar beet')transform_test = e([ ((224, 224)), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
DEVICE = ("cuda:0" if _available() else "cpu")model = ("")()(DEVICE)
dataset_test =SeedlingData('data/test/', transform_test,test=True)print(len(dataset_test))#
对应⽂件夹的label
for index in range(len(dataset_test)): item = dataset_test[index] img, label = item eze_(0) data = Variable(img).to(DEVICE) output = model(data) _, pred = (, 1) print('Image Name:{},predict:{}'.format(dataset_[index], classes[()])) index += 1
运⾏结果:
2023年6月21日发(作者:)
MobileNetV1实战:使⽤MobileNetV1实现植物幼苗分类⽂章⽬录摘要本例提取了植物幼苗数据集中的部分数据做数据集,数据集共有12种类别,演⽰如何使⽤pytorch版本的MobileNetV1图像分类模型实现分类任务。通过本⽂你和学到:1、如何⾃定义MobileNetV1模型。2、如何⾃定义数据集加载⽅式?3、如何使⽤Cutout数据增强?4、如何使⽤Mixup数据增强。5、如何实现训练和验证。6、预测的两种写法。MobileNetV1的论⽂翻译:MobileNetV1解析:Keras版本:数据增强Cutout和Mixup为了提⾼成绩我在代码中加⼊Cutout和Mixup这两种增强⽅式。实现这两种增强需要安装torchtoolbox。安装命令:pip install torchtoolboxCutout实现,在transforms中。from orm import Cutout#
数据预处理transform = e([ ((224, 224)), Cutout(), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])Mixup实现,在train⽅法中。需要导⼊包:from import mixup_data, mixup_criterion for batch_idx, (data, target) in enumerate(train_loader): data, target = (device, non_blocking=True), (device, non_blocking=True) data, labels_a, labels_b, lam = mixup_data(data, target, alpha) _grad() output = model(data) loss = mixup_criterion(criterion, output, labels_a, labels_b, lam) rd() () print_loss = ()项⽬结构MobileNetV1_demo├─data│ └─train│ ├─Black-grass│ ├─Charlock│ ├─Cleavers│ ├─Common Chickweed│ ├─Common wheat│ ├─Fat Hen│ ├─Loose Silky-bent│ ├─Maize│ ├─Scentless Mayweed│ ├─Shepherds Purse│ ├─Small-flowered Cranesbill│ └─Sugar beet├─dataset│ └─└─models│ └─├─├─└─导⼊项⽬使⽤的库import as optimimport torchimport as nnimport elimport port butedimport orms as transformsfrom t import SeedlingDatafrom ad import Variablefrom netv1 import MobileNetV1from import mixup_data, mixup_criterionfrom orm import Cutout设置全局参数设置学习率、BatchSize、epoch等参数,判断环境中是否存在GPU,如果没有则使⽤CPU。建议使⽤GPU,CPU太慢了。#
设置全局参数modellr = 1e-4BATCH_SIZE = 16EPOCHS = 300DEVICE = ('cuda' if _available() else 'cpu')图像预处理与增强数据处理⽐较简单,加⼊了Cutout、做了Resize和归⼀化。#
数据预处理transform = e([ ((224, 224)), Cutout(), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])transform_test = e([ ((224, 224)), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])读取数据将数据集解压后放到data⽂件夹下⾯,如图:然后我们在dataset⽂件夹下⾯新建 和,在⽂件夹写⼊下⾯的代码:# coding:utf8import osfrom PIL import Imagefrom import datafrom torchvision import transforms as Tfrom _selection import train_test_split
Labels = {'Black-grass': 0, 'Charlock': 1, 'Cleavers': 2, 'Common Chickweed': 3, 'Common wheat': 4, 'Fat Hen': 5, 'Loose Silky-bent': 6, 'Maize': 7, 'Scentless Mayweed': 8, 'Shepherds Purse': 9, 'Small-flowered Cranesbill': 10, 'Sugar beet': 11}
class SeedlingData (t):
def __init__(self, root, transforms=None, train=True, test=False): """ 主要⽬标: 获取所有图⽚的地址,并根据训练,验证,测试划分数据 """ = test orms = transforms
if : imgs = [(root, img) for img in r(root)] = imgs else: imgs_labels = [(root, img) for img in r(root)] imgs = [] for imglable in imgs_labels: for imgname in r(imglable): imgpath = (imglable, imgname) (imgpath) trainval_files, val_files = train_test_split(imgs, test_size=0.3, random_state=42) if train: = trainval_files else: = val_files
def __getitem__(self, index): """ ⼀次返回⼀张图⽚的数据 """ img_path = [index] img_path=img_e("",'/') if : label = -1 else: labelname = img_('/')[-2] label = Labels[labelname] data = (img_path).convert('RGB') data = orms(data) return data, label
def __len__(self): return len()说⼀下代码的核⼼逻辑:第⼀步 建⽴字典,定义类别对应的ID,⽤数字代替类别。第⼆步 在__init__⾥⾯编写获取图⽚路径的⽅法。测试集只有⼀层路径直接读取,训练集在train⽂件夹下⾯是类别⽂件夹,先获取到类别,再获取到具体的图⽚路径。然后使⽤sklearn中切分数据集的⽅法,按照7:3的⽐例切分训练集和验证集。第三步 在__getitem__⽅法中定义读取单个图⽚和类别的⽅法,由于图像中有位深度32位的,所以我在读取图像的时候做了转换。然后我们在调⽤SeedlingData读取数据 ,记着导⼊刚才写的(from t import SeedlingData)dataset_train = SeedlingData('data/train', transforms=transform, train=True)dataset_test = SeedlingData("data/train", transforms=transform_test, train=False)#
读取数据print(dataset_)#
导⼊数据train_loader = ader(dataset_train, batch_size=BATCH_SIZE, shuffle=True)test_loader = ader(dataset_test, batch_size=BATCH_SIZE, shuffle=False)设置模型设置loss函数为ntropyLoss()。设置模型为MobileNetV1,num_classes设置为12。优化器设置为adam。学习率调整策略选择为余弦退⽕。#
实例化模型并且移动到GPUcriterion = ntropyLoss()model_ft = MobileNetV1(num_classes=12)model_(DEVICE)#
选择简单暴⼒的Adam优化器,学习率调低optimizer = (model_ters(), lr=modellr)cosine_schedule = _AnnealingLR(optimizer=optimizer,T_max=20,eta_min=1e-9)定义训练和验证函数#
定义训练过程alpha=0.2def train(model, device, train_loader, optimizer, epoch): () sum_loss = 0 total_num = len(train_t) print(total_num, len(train_loader)) for batch_idx, (data, target) in enumerate(train_loader): data, target = (device, non_blocking=True), (device, non_blocking=True) data, labels_a, labels_b, lam = mixup_data(data, target, alpha) _grad() output = model(data) loss = mixup_criterion(criterion, output, labels_a, labels_b, lam) rd() () lr = _dict()['param_groups'][0]['lr'] print_loss = () sum_loss += print_loss if (batch_idx + 1) % 10 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]tLoss: {:.6f}tLR:{:.9f}'.format( epoch, (batch_idx + 1) * len(data), len(train_t), 100. * (batch_idx + 1) / len(train_loader), (),lr)) ave_loss = sum_loss / len(train_loader) print('epoch:{},loss:{}'.format(epoch, ave_loss))ACC=0#
验证过程def val(model, device, test_loader): global ACC () test_loss = 0 correct = 0 total_num = len(test_t) print(total_num, len(test_loader)) with _grad(): for data, target in test_loader: data, target = Variable(data).to(device), Variable(target).to(device) output = model(data) loss = criterion(output, target) _, pred = (, 1) correct += (pred == target) print_loss = () test_loss += print_loss correct = () acc = correct / total_num avgloss = test_loss / len(test_loader) print('nVal set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)n'.format( avgloss, correct, len(test_t), 100 * acc)) if acc > ACC: (model_ft, 'model_' + str(epoch) + '_' + str(round(acc, 3)) + '.pth') ACC = acc#
训练for epoch in range(1, EPOCHS + 1): train(model_ft, DEVICE, train_loader, optimizer, epoch) cosine_() val(model_ft, DEVICE, test_loader)运⾏结果:测试我介绍两种常⽤的测试⽅式,第⼀种是通⽤的,通过⾃⼰⼿动加载数据集然后做预测,具体操作如下:测试集存放的⽬录如下图:第⼀步 定义类别,这个类别的顺序和训练时的类别顺序对应,⼀定不要改变顺序第⼆步 定义transforms,transforms和验证集的transforms⼀样即可,别做数据增强。第三步 加载model,并将模型放在DEVICE⾥,第四步 读取图⽚并预测图⽚的类别,在这⾥注意,读取图⽚⽤PIL库的Image。不要⽤cv2,transforms不⽀持。import butedimport orms as transformsfrom PIL import Imagefrom ad import Variableimport osclasses = ('Black-grass', 'Charlock', 'Cleavers', 'Common Chickweed', 'Common wheat','Fat Hen', 'Loose Silky-bent', 'Maize','Scentless Mayweed','Shepherds Purse','Small-flowered Cranesbill','Sugar beet')transform_test = e([ ((224, 224)), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
DEVICE = ("cuda:0" if _available() else "cpu")model = ("")()(DEVICE)
path='data/test/'testList=r(path)for file in testList: img=(path+file) img=transform_test(img) eze_(0) img = Variable(img).to(DEVICE) out=model(img) # Predict _, pred = (, 1) print('Image Name:{},predict:{}'.format(file,classes[()]))运⾏结果:第⼆种 使⽤⾃定义的Dataset读取图⽚import butedimport orms as transformsfrom t import SeedlingDatafrom ad import Variable
classes = ('Black-grass', 'Charlock', 'Cleavers', 'Common Chickweed', 'Common wheat','Fat Hen', 'Loose Silky-bent', 'Maize','Scentless Mayweed','Shepherds Purse','Small-flowered Cranesbill','Sugar beet')transform_test = e([ ((224, 224)), or(), ize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])
DEVICE = ("cuda:0" if _available() else "cpu")model = ("")()(DEVICE)
dataset_test =SeedlingData('data/test/', transform_test,test=True)print(len(dataset_test))#
对应⽂件夹的label
for index in range(len(dataset_test)): item = dataset_test[index] img, label = item eze_(0) data = Variable(img).to(DEVICE) output = model(data) _, pred = (, 1) print('Image Name:{},predict:{}'.format(dataset_[index], classes[()])) index += 1
运⾏结果:
发布评论