1
0

first commit

This commit is contained in:
2025-11-24 14:20:38 +08:00
commit 936f852466
15 changed files with 2657 additions and 0 deletions

2
exp2/datasets/.gitignore vendored Normal file
View File

@@ -0,0 +1,2 @@
# Ignore datasets file
mnist.npz

0
exp2/modified/predict.py Normal file
View File

139
exp2/modified/train.py Normal file
View File

@@ -0,0 +1,139 @@
from pathlib import Path
import sys
import numpy
import torch
from torch.utils.data import DataLoader, Dataset
import matplotlib.pyplot as plt
import torch.nn.functional as F
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
import gpu_utils
class CNN(torch.nn.Module):
"""卷积神经网络模型"""
def __init__(self):
super(CNN, self).__init__()
# 使用Ceil模式设置MaxPooling因为tensorflow默认是这个模式。
self.conv1 = torch.nn.Conv2d(1, 32, kernel_size=(3, 3))
self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), ceil_mode=True)
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=(3, 3))
self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), ceil_mode=True)
self.conv3 = torch.nn.Conv2d(64, 64, kernel_size=(3, 3))
self.flatten = torch.nn.Flatten()
# 28x28过第一轮卷积后变为26x26过第一轮池化后变为13x13
# 过第二轮卷积后变为11x11过第二轮池化后变为5x5
# 过第三轮卷积后变为3x3。
# 最后一轮卷积核个数为64。
self.fc1 = torch.nn.Linear(64 * 3 * 3, 64)
self.fc2 = torch.nn.Linear(64, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool1(x)
x = F.relu(self.conv2(x))
x = self.pool2(x)
x = F.relu(self.conv3(x))
x = self.flatten(x)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.softmax(x, dim=1)
class MnistDataset(Dataset):
"""用于加载Mnist的自定义数据集"""
shape: int
x_data: torch.Tensor
y_data: torch.Tensor
def __init__(self, x_data: torch.Tensor, y_data: torch.Tensor):
x_len = x_data.shape[0]
y_len = y_data.shape[0]
assert (x_len == y_len)
self.shape = x_len
self.x_data = x_data
self.y_data = y_data
def __getitem__(self, index):
return self.x_data[index], self.y_data[index]
def __len__(self):
return self.shape
class DataSource:
"""用于读取MNIST数据的数据读取器"""
train_data: DataLoader
test_data: DataLoader
def __init__(self, batch_size: int):
datasets_path = Path(
__file__).resolve().parent.parent / 'datasets' / 'mnist.npz'
datasets = numpy.load(datasets_path)
# 6万张训练图片60000x28x28。标签只有第一维。
train_images = torch.from_numpy(datasets['x_train'])
train_label = torch.from_numpy(datasets['y_train'])
# 1万张测试图片10000x28x28。标签只有第一维。
test_images = torch.from_numpy(datasets['x_test'])
test_label = torch.from_numpy(datasets['y_test'])
# 为了符合后面图像的输入颜色通道条件,要在最后挤出一个新的维度
train_images.unsqueeze(-1)
test_images.unsqueeze(-1)
# 像素值归一化
train_images /= 255.0
test_images /= 255.0
# 创建数据集
train_dataset = MnistDataset(train_images, train_label)
test_dataset = MnistDataset(test_images, test_label)
# 赋值到自身
self.train_data = DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True)
self.test_data = DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
def main():
n_epoch = 5
n_batch_size = 25
device = gpu_utils.get_gpu_device()
data_source = DataSource(n_batch_size)
cnn = CNN().to(device)
optimizer = torch.optim.Adam(cnn.parameters())
loss_func = torch.nn.CrossEntropyLoss()
for epoch in range(n_epoch):
cnn.train()
batch_images: torch.Tensor
batch_labels: torch.Tensor
for batch_index, (batch_images, batch_labels) in enumerate(data_source.train_data):
gpu_images = batch_images.to(device)
gpu_labels = batch_labels.to(device)
optimizer.zero_grad()
prediction: torch.Tensor = cnn(gpu_images)
loss: torch.Tensor = loss_func(prediction, gpu_labels)
loss.backward()
optimizer.step()
loss_showcase = loss.item()
print(f'Epoch: {epoch+1}, Batch: {batch_index}, Loss: {loss.item():.4f}')
if __name__ == "__main__":
gpu_utils.print_gpu_availability()
main()

45
exp2/source/predict.py Normal file
View File

@@ -0,0 +1,45 @@
import tensorflow as tf
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from train import CNN
'''
python 3.9
tensorflow 2.0.0b0
pillow(PIL) 4.3.0
'''
class Predict(object):
def __init__(self):
latest = tf.train.latest_checkpoint('./ckpt')
self.cnn = CNN()
# 恢复网络权重
self.cnn.model.load_weights(latest)
def predict(self, image_path):
# 以黑白方式读取图片
img = Image.open(image_path).convert('L')
img = np.reshape(img, (28, 28, 1)) / 255.
x = np.array([1 - img])
y = self.cnn.model.predict(x)
# 因为x只传入了一张图片取y[0]即可
# np.argmax()取得最大值的下标,即代表的数字
print(image_path)
# print(y[0])
print(' -> Predict digit', np.argmax(y[0]))
plt.figure("Image") # 图像窗口名称
plt.imshow(img)
plt.axis('on') # 关掉坐标轴为 off
plt.title(np.argmax(y[0])) # 图像题目 # 必须有这个,要不然无法显示
plt.show()
if __name__ == "__main__":
app = Predict()
app.predict('./test_images/0.png')
app.predict('./test_images/1.png')
app.predict('./test_images/4.png')

61
exp2/source/train.py Normal file
View File

@@ -0,0 +1,61 @@
import os
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
'''
python 3.9
tensorflow 2.0.0b0
'''
class CNN(object):
def __init__(self):
model = models.Sequential()
# 第1层卷积卷积核大小为3*332个28*28为待训练图片的大小
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
# 第2层卷积卷积核大小为3*364个
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
# 第三层卷积卷积核大小为3*364个
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
self.model = model
class DataSource(object):
def __init__(self):
# mnist数据集存储的位置如何不存在将自动下载
data_path = os.path.abspath(os.path.dirname(
__file__)) + '.'
(train_images, train_labels), (test_images,
test_labels) = datasets.mnist.load_data(path=data_path)
# 6万张训练图片1万张测试图片
train_images = train_images.reshape((60000, 28, 28, 1))
test_images = test_images.reshape((10000, 28, 28, 1))
# 像素值映射到 0 - 1 之间
train_images, test_images = train_images / 255.0, test_images / 255.0
self.train_images, self.train_labels = train_images, train_labels
self.test_images, self.test_labels = test_images, test_labels
class Train:
def __init__(self):
self.cnn = CNN()
self.data = DataSource()
def train(self):
check_path = './ckpt/cp-{epoch:04d}.ckpt'
# period 每隔5epoch保存一次
save_model_cb = tf.keras.callbacks.ModelCheckpoint(
check_path, save_weights_only=True, verbose=1, period=5)
self.cnn.model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
self.cnn.model.fit(self.data.train_images, self.data.train_labels,
epochs=5, callbacks=[save_model_cb])
test_loss, test_acc = self.cnn.model.evaluate(
self.data.test_images, self.data.test_labels)
print("准确率: %.4f,共测试了%d张图片 " % (test_acc, len(self.data.test_labels)))
if __name__ == "__main__":
app = Train()
app.train()