
Transform:数据的预处理/数据增强
发布日期:2021-05-14 14:41:31
浏览次数:18
分类:精选文章
本文共 10516 字,大约阅读时间需要 35 分钟。
Transform:数据的预处理/数据增强
补充:数据增强(对数据集进行变换,丰富数据集,从而使模型更具泛化能力)
数据增强一
1.
2.数据增强二
1.
2. 3.torchvision.transforms :常用的图像预处理方法
torchvision.datasets :常用数据集的dataset实现,MNIST, CIFAR-10, ImageNet等 torchvision.model :常用的模型预训练,AlexNet, VGG,ResNet, GoogLeNet等上面的三个数据包为pytorch的视觉工具包中的主要工具包,还是比较重要的
torchvision.transforms :常用的图像预处理方法
- 数据中心化
- 数据标准化
- 缩放
- 裁剪
- 旋转
- 翻转
- 填充
- 噪声添加
- 灰度变换
- 线性变换
- 仿射变换
- 亮度、饱和度及对比度变换
import torchimport torch.nn as nnimport matplotlib.pyplot as pltimport numpy as nptorch.manual_seed(10)lr = 0.01 # 学习率# 生成虚拟数据sample_nums = 100mean_value = 1.7bias = 5 # 5n_data = torch.ones(sample_nums, 2)x0 = torch.normal(mean_value * n_data, 1) + bias # 类别0 数据 shape=(100, 2)y0 = torch.zeros(sample_nums) # 类别0 标签 shape=(100, 1)x1 = torch.normal(-mean_value * n_data, 1) + bias # 类别1 数据 shape=(100, 2)y1 = torch.ones(sample_nums) # 类别1 标签 shape=(100, 1)train_x = torch.cat((x0, x1), 0)train_y = torch.cat((y0, y1), 0)# 定义模型class LR(nn.Module): def __init__(self): super(LR, self).__init__() self.features = nn.Linear(2, 1) self.sigmoid = nn.Sigmoid() def forward(self, x): x = self.features(x) x = self.sigmoid(x) return xlr_net = LR()# 定义损失函数与优化器loss_fn = nn.BCELoss()optimizer = torch.optim.SGD(lr_net.parameters(), lr=0.01, momentum=0.9)for iteration in range(1000): # 前向传播 y_pred = lr_net(train_x) # 计算 MSE loss loss = loss_fn(y_pred, train_y) # 反向传播 loss.backward() # 更新参数 optimizer.step() # 清空梯度 optimizer.zero_grad() # 绘图 if iteration % 40 == 0: mask = y_pred.ge(0.5).float().squeeze() # 以0.5为阈值进行分类 correct = (mask == train_y).sum() # 计算正确预测的样本个数 acc = correct.item() / train_y.size(0) # 计算精度 plt.scatter(x0.data.numpy()[:, 0], x0.data.numpy()[:, 1], c='r', label='class 0') plt.scatter(x1.data.numpy()[:, 0], x1.data.numpy()[:, 1], c='b', label='class 1') w0, w1 = lr_net.features.weight[0] w0, w1 = float(w0.item()), float(w1.item()) plot_b = float(lr_net.features.bias[0].item()) plot_x = np.arange(-6, 6, 0.1) plot_y = (-w0 * plot_x - plot_b) / w1 plt.xlim(-5, 10) plt.ylim(-7, 10) plt.plot(plot_x, plot_y) plt.text(-5, 5, 'Loss=%.4f' % loss.data.numpy(), fontdict={ 'size': 20, 'color': 'red'}) plt.title("Iteration: {}\nw0:{:.2f} w1:{:.2f} b: {:.2f} accuracy:{:.2%}".format(iteration, w0, w1, plot_b, acc)) plt.legend() plt.show() plt.pause(0.5) if acc > 0.99: break
将数据的均值变为0 ,标准差变为1
为什么进行标准化,为了加快模型的收敛(改逻辑回归的bias可知)
(可以数学证明吗???) 为什么会慢就是模型的初始化是零均值的,如果不标准化,模型找最优分界平面就很慢。Transform图像增强



import osimport numpy as npimport torchimport randomfrom torch.utils.data import DataLoaderimport torchvision.transforms as transformsfrom tools.my_dataset import RMBDatasetfrom PIL import Imagefrom matplotlib import pyplot as pltdef set_seed(seed=1): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed)set_seed(1) # 设置随机种子# 参数设置MAX_EPOCH = 10BATCH_SIZE = 1LR = 0.01log_interval = 10val_interval = 1rmb_label = { "1": 0, "100": 1}def transform_invert(img_, transform_train): """ 将data 进行反transfrom操作 :param img_: tensor :param transform_train: torchvision.transforms :return: PIL image """ if 'Normalize' in str(transform_train): norm_transform = list(filter(lambda x: isinstance(x, transforms.Normalize), transform_train.transforms)) mean = torch.tensor(norm_transform[0].mean, dtype=img_.dtype, device=img_.device) std = torch.tensor(norm_transform[0].std, dtype=img_.dtype, device=img_.device) img_.mul_(std[:, None, None]).add_(mean[:, None, None]) img_ = img_.transpose(0, 2).transpose(0, 1) # C*H*W --> H*W*C img_ = np.array(img_) * 255 if img_.shape[2] == 3: img_ = Image.fromarray(img_.astype('uint8')).convert('RGB') elif img_.shape[2] == 1: img_ = Image.fromarray(img_.astype('uint8').squeeze()) else: raise Exception("Invalid img shape, expected 1 or 3 in axis 2, but got {}!".format(img_.shape[2]) ) return img_# ============================ step 1/5 数据 ============================split_dir = os.path.join("..", "..", "data", "rmb_split")train_dir = os.path.join(split_dir, "train")valid_dir = os.path.join(split_dir, "valid")norm_mean = [0.485, 0.456, 0.406]norm_std = [0.229, 0.224, 0.225]train_transform = transforms.Compose([ transforms.Resize((224, 224)), # 1 CenterCrop # transforms.CenterCrop(512), # 512 # 2 RandomCrop # transforms.RandomCrop(224, padding=16), # transforms.RandomCrop(224, padding=(16, 64)), # transforms.RandomCrop(224, padding=16, fill=(255, 0, 0)), # transforms.RandomCrop(512, pad_if_needed=True), # pad_if_needed=True # transforms.RandomCrop(224, padding=64, padding_mode='edge'), # transforms.RandomCrop(224, padding=64, padding_mode='reflect'), # transforms.RandomCrop(1024, padding=1024, padding_mode='symmetric'), # 3 RandomResizedCrop # transforms.RandomResizedCrop(size=224, scale=(0.5, 0.5)), # 4 FiveCrop # transforms.FiveCrop(112), # transforms.Lambda(lambda crops: torch.stack([(transforms.ToTensor()(crop)) for crop in crops])), # 5 TenCrop # transforms.TenCrop(112, vertical_flip=False), # transforms.Lambda(lambda crops: torch.stack([(transforms.ToTensor()(crop)) for crop in crops])), # 1 Horizontal Flip # transforms.RandomHorizontalFlip(p=1), # 2 Vertical Flip # transforms.RandomVerticalFlip(p=0.5), # 3 RandomRotation # transforms.RandomRotation(90), # transforms.RandomRotation((90), expand=True), # transforms.RandomRotation(30, center=(0, 0)), # transforms.RandomRotation(30, center=(0, 0), expand=True), # expand only for center rotation transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std),])valid_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std)])# 构建MyDataset实例train_data = RMBDataset(data_dir=train_dir, transform=train_transform)valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)# 构建DataLodertrain_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)# ============================ step 5/5 训练 ============================for epoch in range(MAX_EPOCH): for i, data in enumerate(train_loader): inputs, labels = data # B C H W img_tensor = inputs[0, ...] # C H W img = transform_invert(img_tensor, train_transform) plt.imshow(img) plt.show() plt.pause(0.5) plt.close() # bs, ncrops, c, h, w = inputs.shape # for n in range(ncrops): # img_tensor = inputs[0, n, ...] # C H W # img = transform_invert(img_tensor, train_transform) # plt.imshow(img) # plt.show() # plt.pause(1)
import osimport numpy as npimport torchimport randomfrom matplotlib import pyplot as pltfrom torch.utils.data import DataLoaderimport torchvision.transforms as transformsfrom tools.my_dataset import RMBDatasetfrom tools.common_tools import transform_invertdef set_seed(seed=1): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed)set_seed(1) # 设置随机种子# 参数设置MAX_EPOCH = 10BATCH_SIZE = 1LR = 0.01log_interval = 10val_interval = 1rmb_label = { "1": 0, "100": 1}# ============================ step 1/5 数据 ============================split_dir = os.path.join("..", "..", "data", "rmb_split")train_dir = os.path.join(split_dir, "train")valid_dir = os.path.join(split_dir, "valid")norm_mean = [0.485, 0.456, 0.406]norm_std = [0.229, 0.224, 0.225]train_transform = transforms.Compose([ transforms.Resize((224, 224)), # 1 Pad # transforms.Pad(padding=32, fill=(255, 0, 0), padding_mode='constant'), # transforms.Pad(padding=(8, 64), fill=(255, 0, 0), padding_mode='constant'), # transforms.Pad(padding=(8, 16, 32, 64), fill=(255, 0, 0), padding_mode='constant'), # transforms.Pad(padding=(8, 16, 32, 64), fill=(255, 0, 0), padding_mode='symmetric'), # 2 ColorJitter # transforms.ColorJitter(brightness=0.5), # transforms.ColorJitter(contrast=0.5), # transforms.ColorJitter(saturation=0.5), # transforms.ColorJitter(hue=0.3), # 3 Grayscale # transforms.Grayscale(num_output_channels=3), # 4 Affine # transforms.RandomAffine(degrees=30), # transforms.RandomAffine(degrees=0, translate=(0.2, 0.2), fillcolor=(255, 0, 0)), # transforms.RandomAffine(degrees=0, scale=(0.7, 0.7)), # transforms.RandomAffine(degrees=0, shear=(0, 0, 0, 45)), # transforms.RandomAffine(degrees=0, shear=90, fillcolor=(255, 0, 0)), # 5 Erasing # transforms.ToTensor(), # transforms.RandomErasing(p=1, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=(254/255, 0, 0)), # transforms.RandomErasing(p=1, scale=(0.02, 0.33), ratio=(0.3, 3.3), value='1234'), # 1 RandomChoice # transforms.RandomChoice([transforms.RandomVerticalFlip(p=1), transforms.RandomHorizontalFlip(p=1)]), # 2 RandomApply # transforms.RandomApply([transforms.RandomAffine(degrees=0, shear=45, fillcolor=(255, 0, 0)), # transforms.Grayscale(num_output_channels=3)], p=0.5), # 3 RandomOrder # transforms.RandomOrder([transforms.RandomRotation(15), # transforms.Pad(padding=32), # transforms.RandomAffine(degrees=0, translate=(0.01, 0.1), scale=(0.9, 1.1))]), transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std),])valid_transform = transforms.Compose([ transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(norm_mean, norm_std)])# 构建MyDataset实例train_data = RMBDataset(data_dir=train_dir, transform=train_transform)valid_data = RMBDataset(data_dir=valid_dir, transform=valid_transform)# 构建DataLodertrain_loader = DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True)valid_loader = DataLoader(dataset=valid_data, batch_size=BATCH_SIZE)# ============================ step 5/5 训练 ============================for epoch in range(MAX_EPOCH): for i, data in enumerate(train_loader): inputs, labels = data # B C H W img_tensor = inputs[0, ...] # C H W img = transform_invert(img_tensor, train_transform) plt.imshow(img) plt.show() plt.pause(0.5) plt.close()
发表评论
最新留言
能坚持,总会有不一样的收获!
[***.219.124.196]2025年04月22日 03时42分42秒
关于作者

喝酒易醉,品茶养心,人生如梦,品茶悟道,何以解忧?唯有杜康!
-- 愿君每日到此一游!
推荐文章
Problem G. The Stones Game【取石子博弈 & 思维】
2021-05-10
Unable to execute dex: Multiple dex files
2021-05-10
Java多线程
2021-05-10
Unity监听日记
2021-05-10
openssl服务器证书操作
2021-05-10
expect 模拟交互 ftp 上传文件到指定目录下
2021-05-10
linux系统下双屏显示
2021-05-10
PDF.js —— vue项目中使用pdf.js显示pdf文件(流)
2021-05-10
我用wxPython搭建GUI量化系统之最小架构的运行
2021-05-10
我用wxPython搭建GUI量化系统之多只股票走势对比界面
2021-05-10
selenium+python之切换窗口
2021-05-10
重载和重写的区别:
2021-05-10
搭建Vue项目步骤
2021-05-10
账号转账演示事务
2021-05-10
idea创建工程时错误提醒的是architectCatalog=internal
2021-05-10
SpringBoot找不到@EnableRety注解
2021-05-10
简易计算器案例
2021-05-10
在Vue中使用样式——使用内联样式
2021-05-10
Explore Optimization
2021-05-10