1
0

update for change of exp2 and add exp3

This commit is contained in:
2025-11-30 16:24:32 +08:00
parent af890d899e
commit 48fcdfcc80
17 changed files with 859 additions and 124 deletions

114
exp2/modified/mnist.py Normal file
View File

@@ -0,0 +1,114 @@
from pathlib import Path
import numpy
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import v2 as tvtrans
from torchvision import datasets
import torch.nn.functional as F
class CNN(torch.nn.Module):
"""卷积神经网络模型"""
def __init__(self):
super(CNN, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 32, kernel_size=(3, 3))
self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2))
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=(3, 3))
self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2))
self.conv3 = torch.nn.Conv2d(64, 64, kernel_size=(3, 3))
self.flatten = torch.nn.Flatten()
# 28x28过第一轮卷积后变为26x26过第一轮池化后变为13x13
# 过第二轮卷积后变为11x11过第二轮池化后变为5x5
# 过第三轮卷积后变为3x3。
# 最后一轮卷积核个数为64。
self.fc1 = torch.nn.Linear(64 * 3 * 3, 64)
torch.nn.init.xavier_normal_(self.fc1.weight)
torch.nn.init.zeros_(self.fc1.bias)
self.fc2 = torch.nn.Linear(64, 10)
torch.nn.init.xavier_normal_(self.fc2.weight)
torch.nn.init.zeros_(self.fc2.bias)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.flatten(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
class MnistDataset(Dataset):
"""用于加载Mnist的自定义数据集"""
shape: int
transform: tvtrans.Transform
images_data: numpy.ndarray
labels_data: torch.Tensor
def __init__(self, images: numpy.ndarray, labels: numpy.ndarray, transform: tvtrans.Transform):
images_len: int = images.shape[0]
labels_len: int = labels.shape[0]
assert (images_len == labels_len)
self.shape = images_len
self.images_data = images
self.labels_data = torch.from_numpy(labels)
self.transform = transform
def __getitem__(self, index):
return self.transform(self.images_data[index]), self.labels_data[index]
def __len__(self):
return self.shape
class MnistDataSource:
"""用于读取MNIST数据的数据读取器"""
train_loader: DataLoader
test_loader: DataLoader
def __init__(self, batch_size: int):
dataset_path = Path(__file__).resolve().parent.parent / 'datasets' / 'mnist.npz'
dataset = numpy.load(dataset_path)
# 所有图片均为黑底白字
# 6万张训练图片60000x28x28。标签只有第一维。
train_images: numpy.ndarray = dataset['x_train']
train_labels: numpy.ndarray = dataset['y_train']
# 1万张测试图片10000x28x28。标签只有第一维。
test_images: numpy.ndarray = dataset['x_test']
test_labels: numpy.ndarray = dataset['y_test']
# 定义数据转换器
trans = tvtrans.Compose([
# 从uint8转换为float32并自动归一化到0-1区间
# tvtrans.ToTensor(),
tvtrans.ToImage(),
tvtrans.ToDtype(torch.float32, scale=True),
# 为了符合后面图像的输入颜色通道条件,要在最后挤出一个新的维度
#tvtrans.Lambda(lambda x: x.unsqueeze(-1))
# 这个特定的标准化参数 (0.1307, 0.3081) 是 MNIST 数据集的标准化参数这些数值是MNIST训练集的全局均值和标准差。
# 这种标准化有助于模型训练时的数值稳定性和收敛速度。
#tvtrans.Normalize((0.1307,), (0.3081,)),
])
# 创建数据集
train_dataset = MnistDataset(train_images, train_labels, transform=trans)
test_dataset = MnistDataset(test_images, test_labels, transform=trans)
# 赋值到自身
self.train_loader = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=False)
self.test_loader = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)

View File

@@ -1,7 +1,10 @@
from pathlib import Path
import sys
import torch
from train import CNN
import numpy
from PIL import Image, ImageFile
import matplotlib.pyplot as plt
from mnist import CNN
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
import gpu_utils
@@ -36,7 +39,7 @@ class Predictor:
file_path = Path(__file__).resolve().parent.parent / 'models' / 'cnn.pth'
self.cnn.load_state_dict(torch.load(file_path))
def predict(self, image: list[list[bool]]) -> PredictResult:
def predict_sketchpad(self, image: list[list[bool]]) -> PredictResult:
input = torch.Tensor(image).float().to(self.device)
assert(input.dim() == 2)
assert(input.size(0) == 28)
@@ -51,4 +54,42 @@ class Predictor:
with torch.no_grad():
output = self.cnn(input)
return PredictResult(output)
def predict_image(self, image: ImageFile.ImageFile) -> PredictResult:
# 确保图像为灰度图像然后转换为numpy数组。
# 注意这里的numpy数组是只读的所以要先拷贝一份
grayscale_image = image.convert('L')
numpy_data = numpy.reshape(grayscale_image, (28, 28), copy=True)
# 转换到Tensor设置dtype并传到GPU上
data = torch.from_numpy(numpy_data).float().to(self.device)
# 归一化到255又因为图像输入是白底黑字需要做转换。
data.div_(255.0).sub_(1).mul_(-1)
# 同理,挤出维度并预测
input = data.unsqueeze(0).unsqueeze(0)
with torch.no_grad():
output = self.cnn(input)
return PredictResult(output)
def main():
predictor = Predictor()
# 遍历测试目录中的所有图片,并处理。
test_dir = Path(__file__).resolve().parent.parent / 'test_images'
for image_path in test_dir.glob('*.png'):
if image_path.is_file():
print(f'Predicting {image_path} ...')
image = Image.open(image_path)
rv = predictor.predict_image(image)
print(f'Predict digit: {rv.chosen_number()}')
plt.figure(f'Image - {image_path}')
plt.imshow(image)
plt.axis('on')
plt.title(f'Predict digit: {rv.chosen_number()}')
plt.show()
if __name__ == "__main__":
main()

View File

@@ -169,7 +169,7 @@ class SketchpadApp:
def execute(self):
"""执行按钮功能 - 将画板数据传递给后端"""
prediction = self.predictor.predict(self.canvas_data)
prediction = self.predictor.predict_sketchpad(self.canvas_data)
self.show_in_table(prediction)
def reset(self):

View File

@@ -1,145 +1,57 @@
from pathlib import Path
import sys
import typing
import numpy
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import v2 as tvtrans
import matplotlib.pyplot as plt
import torch.nn.functional as F
import torchinfo
import ignite.engine
import ignite.metrics
from ignite.engine import Engine, Events
from ignite.handlers.tqdm_logger import ProgressBar
from mnist import CNN, MnistDataSource
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
import gpu_utils
class CNN(torch.nn.Module):
"""卷积神经网络模型"""
def __init__(self):
super(CNN, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 32, kernel_size=(3, 3))
self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2))
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=(3, 3))
self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2))
self.conv3 = torch.nn.Conv2d(64, 64, kernel_size=(3, 3))
self.flatten = torch.nn.Flatten()
# 28x28过第一轮卷积后变为26x26过第一轮池化后变为13x13
# 过第二轮卷积后变为11x11过第二轮池化后变为5x5
# 过第三轮卷积后变为3x3。
# 最后一轮卷积核个数为64。
self.fc1 = torch.nn.Linear(64 * 3 * 3, 64)
self.fc2 = torch.nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.flatten(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
class MnistDataset(Dataset):
"""用于加载Mnist的自定义数据集"""
shape: int
transform: tvtrans.Transform
images_data: numpy.ndarray
labels_data: torch.Tensor
def __init__(self, images: numpy.ndarray, labels: numpy.ndarray, transform: tvtrans.Transform):
images_len: int = images.size(0)
labels_len: int = labels.size(0)
assert (images_len == labels_len)
self.shape = images_len
self.images_data = images
self.labels_data = torch.from_numpy(labels)
self.transform = transform
def __getitem__(self, index):
return self.transform(self.images_data[index]), self.labels_data[index]
def __len__(self):
return self.shape
class DataSource:
"""用于读取MNIST数据的数据读取器"""
train_data: DataLoader
test_data: DataLoader
def __init__(self, batch_size: int):
datasets_path = Path(__file__).resolve().parent.parent / 'datasets' / 'mnist.npz'
datasets = numpy.load(datasets_path)
# 所有图片均为黑底白字
# 6万张训练图片60000x28x28。标签只有第一维。
train_images = datasets['x_train']
train_labels = datasets['y_train']
# 1万张测试图片10000x28x28。标签只有第一维。
test_images = datasets['x_test']
test_labels = datasets['y_test']
# 定义数据转换器
trans = tvtrans.Compose([
# 从uint8转换为float32并自动归一化到0-1区间
# tvtrans.ToTensor(),
tvtrans.ToImage(),
tvtrans.ToDtype(torch.float32, scale=True),
# 为了符合后面图像的输入颜色通道条件,要在最后挤出一个新的维度
#tvtrans.Lambda(lambda x: x.unsqueeze(-1))
])
# 创建数据集
train_dataset = MnistDataset(train_images,
train_labels,
transform=trans)
test_dataset = MnistDataset(test_images, test_labels, transform=trans)
# 赋值到自身
self.train_data = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=False)
self.test_data = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
class Trainer:
N_EPOCH: typing.ClassVar[int] = 5
N_BATCH_SIZE: typing.ClassVar[int] = 1000
device: torch.device
data_source: DataSource
cnn: CNN
data_source: MnistDataSource
model: CNN
def __init__(self):
self.device = gpu_utils.get_gpu_device()
self.data_source = DataSource(Trainer.N_BATCH_SIZE)
self.cnn = CNN().to(self.device)
self.data_source = MnistDataSource(Trainer.N_BATCH_SIZE)
self.model = CNN().to(self.device)
# 展示模型结构。批次为指定批次数量通道只有一个灰度通道大小28x28。
torchinfo.summary(self.model, (Trainer.N_BATCH_SIZE, 1, 28, 28))
def train(self):
optimizer = torch.optim.Adam(self.cnn.parameters())
optimizer = torch.optim.Adam(self.model.parameters(), eps=1e-7)
# optimizer = torch.optim.AdamW(
# self.model.parameters(),
# lr=0.001, # 两者默认学习率都是 0.001
# betas=(0.9, 0.999), # 两者默认值相同
# eps=1e-07, # 【关键】匹配 TensorFlow 的默认 epsilon
# weight_decay=0.0, # 两者默认都是 0
# amsgrad=False # 两者默认都是 False
# )
loss_func = torch.nn.CrossEntropyLoss()
for epoch in range(Trainer.N_EPOCH):
self.cnn.train()
self.model.train()
batch_images: torch.Tensor
batch_labels: torch.Tensor
for batch_index, (batch_images, batch_labels) in enumerate(self.data_source.train_data):
for batch_index, (batch_images, batch_labels) in enumerate(self.data_source.train_loader):
gpu_images = batch_images.to(self.device)
gpu_labels = batch_labels.to(self.device)
optimizer.zero_grad()
prediction: torch.Tensor = self.cnn(gpu_images)
prediction: torch.Tensor = self.model(gpu_images)
loss: torch.Tensor = loss_func(prediction, gpu_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
@@ -151,20 +63,20 @@ class Trainer:
file_dir_path = Path(__file__).resolve().parent.parent / 'models'
file_dir_path.mkdir(parents=True, exist_ok=True)
file_path = file_dir_path / 'cnn.pth'
torch.save(self.cnn.state_dict(), file_path)
torch.save(self.model.state_dict(), file_path)
print(f'模型已保存至:{file_path}')
def test(self):
self.cnn.eval()
self.model.eval()
correct_sum = 0
total_sum = 0
with torch.no_grad():
for batch_images, batch_labels in self.data_source.test_data:
for batch_images, batch_labels in self.data_source.test_loader:
gpu_images = batch_images.to(self.device)
gpu_labels = batch_labels.to(self.device)
possibilities: torch.Tensor = self.cnn(gpu_images)
possibilities: torch.Tensor = self.model(gpu_images)
# 输出出来是10个数字各自的可能性所以要选取最高可能性的那个对比
# 在dim=1上找最大的那个就选那个。dim=0是批次所以不管他。
_, prediction = possibilities.max(1)
@@ -175,13 +87,105 @@ class Trainer:
test_acc = 100. * correct_sum / total_sum
print(f"准确率: {test_acc:.4f}%,共测试了{total_sum}张图片")
def main():
trainer = Trainer()
trainer.train()
trainer.save()
trainer.test()
# N_EPOCH: int = 5
# N_BATCH_SIZE: int = 1000
# N_LOG_INTERVAL: int = 10
# class Trainer:
# device: torch.device
# data_source: MnistDataSource
# model: CNN
# trainer: Engine
# train_evaluator: Engine
# test_evaluator: Engine
# def __init__(self):
# self.device = gpu_utils.get_gpu_device()
# self.model = CNN().to(self.device)
# self.data_source = MnistDataSource(batch_size=N_BATCH_SIZE)
# # 展示模型结构。批次为指定批次数量通道只有一个灰度通道大小28x28。
# torchinfo.summary(self.model, (N_BATCH_SIZE, 1, 28, 28))
# #optimizer = torch.optim.Adam(self.model.parameters(), eps=1e-7)
# optimizer = torch.optim.AdamW(
# self.model.parameters(),
# lr=0.001, # 两者默认学习率都是 0.001
# betas=(0.9, 0.999), # 两者默认值相同
# eps=1e-07, # 【关键】匹配 TensorFlow 的默认 epsilon
# weight_decay=0.0, # 两者默认都是 0
# amsgrad=False # 两者默认都是 False
# )
# criterion = torch.nn.CrossEntropyLoss()
# self.trainer = ignite.engine.create_supervised_trainer(
# self.model, optimizer, criterion, self.device
# )
# eval_metrics = {
# "accuracy": ignite.metrics.Accuracy(device=self.device),
# "loss": ignite.metrics.Loss(criterion, device=self.device)
# }
# self.train_evaluator = ignite.engine.create_supervised_evaluator(
# self.model, metrics=eval_metrics, device=self.device)
# self.test_evaluator = ignite.engine.create_supervised_evaluator(
# self.model, metrics=eval_metrics, device=self.device)
# self.trainer.add_event_handler(
# Events.ITERATION_COMPLETED(every=N_LOG_INTERVAL),
# lambda engine: self.log_intrain_loss(engine)
# )
# self.trainer.add_event_handler(
# Events.EPOCH_COMPLETED,
# lambda trainer: self.log_train_results(trainer)
# )
# self.trainer.add_event_handler(
# Events.COMPLETED,
# lambda _: self.log_test_results()
# )
# self.trainer.add_event_handler(
# Events.COMPLETED,
# lambda _: self.save_model()
# )
# progressbar = ProgressBar()
# progressbar.attach(self.trainer)
# def run(self):
# self.trainer.run(self.data_source.train_loader, max_epochs=N_EPOCH)
# def log_intrain_loss(self, engine: Engine):
# print(f"Epoch: {engine.state.epoch}, Loss: {engine.state.output:.4f}\r", end="")
# def log_train_results(self, trainer: Engine):
# self.train_evaluator.run(self.data_source.train_loader)
# metrics = self.train_evaluator.state.metrics
# print()
# print(f"Training - Epoch: {trainer.state.epoch}, Avg Accuracy: {metrics['accuracy']:.4f}, Avg Loss: {metrics['loss']:.4f}")
# def log_test_results(self):
# self.test_evaluator.run(self.data_source.test_loader)
# metrics = self.test_evaluator.state.metrics
# print(f"Test - Avg Accuracy: {metrics['accuracy']:.4f} Avg Loss: {metrics['loss']:.4f}")
# def save_model(self):
# file_dir_path = Path(__file__).resolve().parent.parent / 'models'
# file_dir_path.mkdir(parents=True, exist_ok=True)
# file_path = file_dir_path / 'cnn.pth'
# torch.save(self.model.state_dict(), file_path)
# print(f'Model was saved into: {file_path}')
# def main():
# trainer = Trainer()
# trainer.run()
if __name__ == "__main__":
gpu_utils.print_gpu_availability()

2
exp2/test_images/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
# Ignore all test images
*.png