2023年6月21日发(作者:)

Pytorch修改Imageloader⽀持划分验证集和训练集"""author:JIN"""from import VisionDatasetfrom _selection import StratifiedShuffleSplitfrom PIL import Imageimport osimport port warnings('ignore')class Custom_split_dataloader(VisionDataset): def __init__(self, root, test_size=None,train=True,transform=None, ): =root classes, class_to_idx = self._find_classes() samples = self._make_dataset(, class_to_idx) = t_loader s = classes _to_idx = class_to_idx s = samples s = [s[1] for s in samples] orm=transform

=train _size=test_size [s_train_index, s_val_index]= list(StratifiedShuffleSplit( n_splits=1, test_size= _size,random_state=724 ).split(s, s))[0] s_train = [s[i] for i in s_train_index] s_val = [s[i] for i in s_val_index] _dir = root orm = transform if ==True: = s_train else: = s_val s= def pil_loader(self,path): # open path as file to avoid ResourceWarning (/python-pillow/Pillow/issues/835) with open(path, 'rb') as f: img = (f) return t('RGB')

def accimage_loader(self,path): import accimage try: return (path) except IOError: # Potentially a decoding problem, fall back to return _loader(path)

def default_loader(self,path): from torchvision import get_image_backend if get_image_backend() == 'accimage': return ge_loader(path) else: return _loader(path)

def _find_classes(self, dir): classes = [ for d in r(dir) if _dir()] () class_to_idx = {classes[i]: i for i in range(len(classes))} return classes, class_to_idx

def _make_dataset(self, directory, class_to_idx): instances = [] directory = user(directory) for target_class in sorted(class_to_()): class_index = class_to_idx[target_class] target_dir = (directory, target_class) if not (target_dir): continue for root, _, fnames in sorted((target_dir, followlinks=True)): for fname in sorted(fnames): path = (root, fname) item = path, class_index (item) return instances

def __getitem__(self, index): path, target = s[index] sample = (path) sample = orm(sample) return sample, target def __len__(self): return len(s)

使⽤⽅法,将训练集按照5:1⽐例划分为训练和验证print("====>load traindatset ")train_data_root = 'dataset/train/'

train_dataset = Custom_split_dataloader(train_data_root, transform=data_transform,test_size=0.2,train=True)train_loader = ader(train_dataset, batch_size=ize, shuffle=True,drop_last=True,num_workers=s)print("====>load valdatset ")val_dataset = Custom_split_dataloader(train_data_root, transform=data_transform,test_size=0.2,train=False)val_loader = ader(val_dataset, batch_size=ize, shuffle=True,drop_last=True,num_workers=s)

2023年6月21日发(作者:)

Pytorch修改Imageloader⽀持划分验证集和训练集"""author:JIN"""from import VisionDatasetfrom _selection import StratifiedShuffleSplitfrom PIL import Imageimport osimport port warnings('ignore')class Custom_split_dataloader(VisionDataset): def __init__(self, root, test_size=None,train=True,transform=None, ): =root classes, class_to_idx = self._find_classes() samples = self._make_dataset(, class_to_idx) = t_loader s = classes _to_idx = class_to_idx s = samples s = [s[1] for s in samples] orm=transform

=train _size=test_size [s_train_index, s_val_index]= list(StratifiedShuffleSplit( n_splits=1, test_size= _size,random_state=724 ).split(s, s))[0] s_train = [s[i] for i in s_train_index] s_val = [s[i] for i in s_val_index] _dir = root orm = transform if ==True: = s_train else: = s_val s= def pil_loader(self,path): # open path as file to avoid ResourceWarning (/python-pillow/Pillow/issues/835) with open(path, 'rb') as f: img = (f) return t('RGB')

def accimage_loader(self,path): import accimage try: return (path) except IOError: # Potentially a decoding problem, fall back to return _loader(path)

def default_loader(self,path): from torchvision import get_image_backend if get_image_backend() == 'accimage': return ge_loader(path) else: return _loader(path)

def _find_classes(self, dir): classes = [ for d in r(dir) if _dir()] () class_to_idx = {classes[i]: i for i in range(len(classes))} return classes, class_to_idx

def _make_dataset(self, directory, class_to_idx): instances = [] directory = user(directory) for target_class in sorted(class_to_()): class_index = class_to_idx[target_class] target_dir = (directory, target_class) if not (target_dir): continue for root, _, fnames in sorted((target_dir, followlinks=True)): for fname in sorted(fnames): path = (root, fname) item = path, class_index (item) return instances

def __getitem__(self, index): path, target = s[index] sample = (path) sample = orm(sample) return sample, target def __len__(self): return len(s)

使⽤⽅法,将训练集按照5:1⽐例划分为训练和验证print("====>load traindatset ")train_data_root = 'dataset/train/'

train_dataset = Custom_split_dataloader(train_data_root, transform=data_transform,test_size=0.2,train=True)train_loader = ader(train_dataset, batch_size=ize, shuffle=True,drop_last=True,num_workers=s)print("====>load valdatset ")val_dataset = Custom_split_dataloader(train_data_root, transform=data_transform,test_size=0.2,train=False)val_loader = ader(val_dataset, batch_size=ize, shuffle=True,drop_last=True,num_workers=s)