first commit
This commit is contained in:
10
.gitignore
vendored
Normal file
10
.gitignore
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
# Python-generated files
|
||||
__pycache__/
|
||||
*.py[oc]
|
||||
build/
|
||||
dist/
|
||||
wheels/
|
||||
*.egg-info
|
||||
|
||||
# Virtual environments
|
||||
.venv
|
||||
1
.python-version
Normal file
1
.python-version
Normal file
@@ -0,0 +1 @@
|
||||
3.11
|
||||
0
__init__.py
Normal file
0
__init__.py
Normal file
0
exp1/__init__.py
Normal file
0
exp1/__init__.py
Normal file
90
exp1/modified.py
Normal file
90
exp1/modified.py
Normal file
@@ -0,0 +1,90 @@
|
||||
from enum import IntEnum, auto
|
||||
from pathlib import Path
|
||||
import sys
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
import torch.nn.functional as F
|
||||
|
||||
sys.path.append(str(Path(__file__).resolve().parent.parent))
|
||||
import gpu_utils
|
||||
|
||||
|
||||
class CurveKind(IntEnum):
|
||||
"""生成假数据时使用的曲线"""
|
||||
Polynomials = auto()
|
||||
Sine = auto()
|
||||
|
||||
|
||||
class DataSource:
|
||||
"""用于拟合的随机生成的假数据"""
|
||||
|
||||
x: torch.Tensor
|
||||
y: torch.Tensor
|
||||
|
||||
def __init__(self, device: torch.device, curve_kind: CurveKind):
|
||||
match curve_kind:
|
||||
case CurveKind.Polynomials:
|
||||
x = torch.linspace(-1, 1, steps=100).reshape(-1, 1)
|
||||
y = -x.pow(3) + 2 * x.pow(2) + 0.2 * torch.rand(x.size())
|
||||
case CurveKind.Sine:
|
||||
# 正弦在0-2之间变化才不是类似线性的
|
||||
x = torch.linspace(0, 2, steps=100).reshape(-1, 1)
|
||||
y = x.sin() + 0.2 * torch.rand(x.size())
|
||||
|
||||
self.x = x.to(device)
|
||||
self.y = y.to(device)
|
||||
|
||||
|
||||
class Net(torch.nn.Module):
|
||||
"""继承torch的module用于表示网络"""
|
||||
|
||||
def __init__(self, n_feature, n_hidden, n_output):
|
||||
super(Net, self).__init__() #继承_init_功能
|
||||
#定理每层用什么样的形式
|
||||
self.hidden1 = torch.nn.Linear(n_feature, n_hidden) #隐藏层线性输出
|
||||
self.hidden2 = torch.nn.Linear(n_hidden, n_hidden) #输出层线性输出
|
||||
self.hidden3 = torch.nn.Linear(n_hidden, n_hidden) #输出层线性输出
|
||||
self.predict = torch.nn.Linear(n_hidden, n_output) #输出层线性输出
|
||||
|
||||
def forward(self, x): #这同时也是module中的forward功能
|
||||
#正向传播输入值,神经网络分析出输出值
|
||||
x = F.relu(self.hidden1(x)) #激励函数(隐藏层的线性值)
|
||||
x = F.relu(self.hidden2(x))
|
||||
x = F.relu(self.hidden3(x))
|
||||
x = self.predict(x) #输出值
|
||||
return x
|
||||
|
||||
|
||||
def main():
|
||||
device = gpu_utils.get_gpu_device()
|
||||
test_data = DataSource(device, CurveKind.Polynomials)
|
||||
net = Net(n_feature=1, n_hidden=20, n_output=1).to(device)
|
||||
|
||||
#optimizer是训练的工具
|
||||
optimizer = torch.optim.SGD(net.parameters(), lr=0.01) #传入net的所有参数,学习率
|
||||
loss_func = torch.nn.MSELoss() #预测值和真实值的误差计算公式(均方差)
|
||||
|
||||
for t in range(2000):
|
||||
optimizer.zero_grad() #清空上一步的残余更新参数值
|
||||
prediction: torch.Tensor = net(test_data.x) #喂给net训练数据x,输出预测值
|
||||
loss: torch.Tensor = loss_func(prediction, test_data.y) #计算两者的误差
|
||||
loss.backward() #误差反向传播,计算参数更新值
|
||||
optimizer.step() #将参数更新值施加到net的parameters上
|
||||
|
||||
#plot and show learning process
|
||||
plt.cla()
|
||||
plt.scatter(test_data.x.cpu().data.numpy(), test_data.y.cpu().data.numpy())
|
||||
plt.scatter(test_data.x.cpu().data.numpy(), prediction.cpu().data.numpy())
|
||||
plt.text(0.5,
|
||||
0,
|
||||
'Loss=%.4f' % loss.cpu().data.numpy(),
|
||||
fontdict={
|
||||
'size': 20,
|
||||
'color': 'red'
|
||||
})
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gpu_utils.print_gpu_availability()
|
||||
main()
|
||||
61
exp1/source.py
Normal file
61
exp1/source.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import torch
|
||||
import matplotlib.pyplot as plt
|
||||
import torch.nn.functional as F
|
||||
|
||||
|
||||
class Net(torch.nn.Module): #继承 torch 的module
|
||||
|
||||
def __init__(self, n_feature, n_hidden, n_output):
|
||||
super(Net, self).__init__() #继承_init_功能
|
||||
#定理每层用什么样的形式
|
||||
self.hidden1 = torch.nn.Linear(n_feature, n_hidden) #隐藏层线性输出
|
||||
self.hidden2 = torch.nn.Linear(n_hidden, n_hidden) #输出层线性输出
|
||||
self.hidden3 = torch.nn.Linear(n_hidden, n_hidden) #输出层线性输出
|
||||
self.predict = torch.nn.Linear(n_hidden, n_output) #输出层线性输出
|
||||
|
||||
def forward(self, x): #这同时也是module中的forward功能
|
||||
#正向传播输入值,神经网络分析出输出值
|
||||
x = F.relu(self.hidden1(x)) #激励函数(隐藏层的线性值)
|
||||
x = F.relu(self.hidden2(x))
|
||||
x = F.relu(self.hidden3(x))
|
||||
x = self.predict(x) #输出值
|
||||
return x
|
||||
|
||||
|
||||
def main():
|
||||
x = torch.unsqueeze(torch.linspace(-1, 1, 100),
|
||||
dim=1) #x data(tensor),shape=(100,1)
|
||||
y = -x.pow(3) + 2 * x.pow(2) + 0.2 * torch.rand(x.size())
|
||||
#y=math.sin(x)+o.2*torch.rand(x.size())
|
||||
|
||||
net = Net(n_feature=1, n_hidden=20, n_output=1)
|
||||
|
||||
#optimizer是训练的工具
|
||||
optimizer = torch.optim.SGD(net.parameters(), lr=0.01) #传入net的所有参数,学习率
|
||||
loss_func = torch.nn.MSELoss() #预测值和真实值的误差计算公式(均方差)
|
||||
|
||||
for t in range(2000):
|
||||
prediction = net(x) #喂给net训练数据x,输出预测值
|
||||
loss = loss_func(prediction, y) #计算两者的误差
|
||||
optimizer.zero_grad() #清空上一步的残余更新参数值
|
||||
loss.backward() #误差反向传播,计算参数更新值
|
||||
optimizer.step() #将参数更新值施加到net的parameters上
|
||||
|
||||
if t % 5 == 0:
|
||||
#plot and show learning process
|
||||
plt.cla()
|
||||
plt.scatter(x.data.numpy(), y.data.numpy())
|
||||
plt.scatter(x.data.numpy(), prediction.data.numpy())
|
||||
plt.text(0.5,
|
||||
0,
|
||||
'Loss=%.4f' % loss.data.numpy(),
|
||||
fontdict={
|
||||
'size': 20,
|
||||
'color': 'red'
|
||||
})
|
||||
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
2
exp2/datasets/.gitignore
vendored
Normal file
2
exp2/datasets/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
# Ignore datasets file
|
||||
mnist.npz
|
||||
0
exp2/modified/predict.py
Normal file
0
exp2/modified/predict.py
Normal file
139
exp2/modified/train.py
Normal file
139
exp2/modified/train.py
Normal file
@@ -0,0 +1,139 @@
|
||||
from pathlib import Path
|
||||
import sys
|
||||
import numpy
|
||||
import torch
|
||||
from torch.utils.data import DataLoader, Dataset
|
||||
import matplotlib.pyplot as plt
|
||||
import torch.nn.functional as F
|
||||
|
||||
sys.path.append(str(Path(__file__).resolve().parent.parent.parent))
|
||||
import gpu_utils
|
||||
|
||||
|
||||
class CNN(torch.nn.Module):
|
||||
"""卷积神经网络模型"""
|
||||
|
||||
def __init__(self):
|
||||
super(CNN, self).__init__()
|
||||
|
||||
# 使用Ceil模式设置MaxPooling,因为tensorflow默认是这个模式。
|
||||
self.conv1 = torch.nn.Conv2d(1, 32, kernel_size=(3, 3))
|
||||
self.pool1 = torch.nn.MaxPool2d(kernel_size=(2, 2), ceil_mode=True)
|
||||
self.conv2 = torch.nn.Conv2d(32, 64, kernel_size=(3, 3))
|
||||
self.pool2 = torch.nn.MaxPool2d(kernel_size=(2, 2), ceil_mode=True)
|
||||
self.conv3 = torch.nn.Conv2d(64, 64, kernel_size=(3, 3))
|
||||
self.flatten = torch.nn.Flatten()
|
||||
# 28x28过第一轮卷积后变为26x26,过第一轮池化后变为13x13
|
||||
# 过第二轮卷积后变为11x11,过第二轮池化后变为5x5
|
||||
# 过第三轮卷积后变为3x3。
|
||||
# 最后一轮卷积核个数为64。
|
||||
self.fc1 = torch.nn.Linear(64 * 3 * 3, 64)
|
||||
self.fc2 = torch.nn.Linear(64, 10)
|
||||
|
||||
def forward(self, x):
|
||||
x = F.relu(self.conv1(x))
|
||||
x = self.pool1(x)
|
||||
x = F.relu(self.conv2(x))
|
||||
x = self.pool2(x)
|
||||
x = F.relu(self.conv3(x))
|
||||
x = self.flatten(x)
|
||||
x = F.relu(self.fc1(x))
|
||||
x = self.fc2(x)
|
||||
return F.softmax(x, dim=1)
|
||||
|
||||
|
||||
class MnistDataset(Dataset):
|
||||
"""用于加载Mnist的自定义数据集"""
|
||||
|
||||
shape: int
|
||||
x_data: torch.Tensor
|
||||
y_data: torch.Tensor
|
||||
|
||||
def __init__(self, x_data: torch.Tensor, y_data: torch.Tensor):
|
||||
x_len = x_data.shape[0]
|
||||
y_len = y_data.shape[0]
|
||||
assert (x_len == y_len)
|
||||
self.shape = x_len
|
||||
|
||||
self.x_data = x_data
|
||||
self.y_data = y_data
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.x_data[index], self.y_data[index]
|
||||
|
||||
def __len__(self):
|
||||
return self.shape
|
||||
|
||||
|
||||
class DataSource:
|
||||
"""用于读取MNIST数据的数据读取器"""
|
||||
|
||||
train_data: DataLoader
|
||||
test_data: DataLoader
|
||||
|
||||
def __init__(self, batch_size: int):
|
||||
datasets_path = Path(
|
||||
__file__).resolve().parent.parent / 'datasets' / 'mnist.npz'
|
||||
datasets = numpy.load(datasets_path)
|
||||
|
||||
# 6万张训练图片:60000x28x28。标签只有第一维。
|
||||
train_images = torch.from_numpy(datasets['x_train'])
|
||||
train_label = torch.from_numpy(datasets['y_train'])
|
||||
# 1万张测试图片:10000x28x28。标签只有第一维。
|
||||
test_images = torch.from_numpy(datasets['x_test'])
|
||||
test_label = torch.from_numpy(datasets['y_test'])
|
||||
|
||||
# 为了符合后面图像的输入颜色通道条件,要在最后挤出一个新的维度
|
||||
train_images.unsqueeze(-1)
|
||||
test_images.unsqueeze(-1)
|
||||
# 像素值归一化
|
||||
train_images /= 255.0
|
||||
test_images /= 255.0
|
||||
|
||||
# 创建数据集
|
||||
train_dataset = MnistDataset(train_images, train_label)
|
||||
test_dataset = MnistDataset(test_images, test_label)
|
||||
|
||||
# 赋值到自身
|
||||
self.train_data = DataLoader(dataset=train_dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=True)
|
||||
self.test_data = DataLoader(dataset=test_dataset,
|
||||
batch_size=batch_size,
|
||||
shuffle=False)
|
||||
|
||||
|
||||
def main():
|
||||
n_epoch = 5
|
||||
n_batch_size = 25
|
||||
|
||||
device = gpu_utils.get_gpu_device()
|
||||
data_source = DataSource(n_batch_size)
|
||||
cnn = CNN().to(device)
|
||||
|
||||
optimizer = torch.optim.Adam(cnn.parameters())
|
||||
loss_func = torch.nn.CrossEntropyLoss()
|
||||
|
||||
for epoch in range(n_epoch):
|
||||
cnn.train()
|
||||
|
||||
batch_images: torch.Tensor
|
||||
batch_labels: torch.Tensor
|
||||
for batch_index, (batch_images, batch_labels) in enumerate(data_source.train_data):
|
||||
gpu_images = batch_images.to(device)
|
||||
gpu_labels = batch_labels.to(device)
|
||||
|
||||
optimizer.zero_grad()
|
||||
prediction: torch.Tensor = cnn(gpu_images)
|
||||
loss: torch.Tensor = loss_func(prediction, gpu_labels)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
|
||||
loss_showcase = loss.item()
|
||||
print(f'Epoch: {epoch+1}, Batch: {batch_index}, Loss: {loss.item():.4f}')
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
gpu_utils.print_gpu_availability()
|
||||
main()
|
||||
45
exp2/source/predict.py
Normal file
45
exp2/source/predict.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import tensorflow as tf
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
from train import CNN
|
||||
|
||||
'''
|
||||
python 3.9
|
||||
tensorflow 2.0.0b0
|
||||
pillow(PIL) 4.3.0
|
||||
'''
|
||||
|
||||
|
||||
class Predict(object):
|
||||
def __init__(self):
|
||||
latest = tf.train.latest_checkpoint('./ckpt')
|
||||
self.cnn = CNN()
|
||||
# 恢复网络权重
|
||||
self.cnn.model.load_weights(latest)
|
||||
|
||||
def predict(self, image_path):
|
||||
# 以黑白方式读取图片
|
||||
img = Image.open(image_path).convert('L')
|
||||
img = np.reshape(img, (28, 28, 1)) / 255.
|
||||
x = np.array([1 - img])
|
||||
y = self.cnn.model.predict(x)
|
||||
|
||||
# 因为x只传入了一张图片,取y[0]即可
|
||||
# np.argmax()取得最大值的下标,即代表的数字
|
||||
print(image_path)
|
||||
# print(y[0])
|
||||
print(' -> Predict digit', np.argmax(y[0]))
|
||||
plt.figure("Image") # 图像窗口名称
|
||||
plt.imshow(img)
|
||||
plt.axis('on') # 关掉坐标轴为 off
|
||||
plt.title(np.argmax(y[0])) # 图像题目 # 必须有这个,要不然无法显示
|
||||
plt.show()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
app = Predict()
|
||||
app.predict('./test_images/0.png')
|
||||
app.predict('./test_images/1.png')
|
||||
app.predict('./test_images/4.png')
|
||||
61
exp2/source/train.py
Normal file
61
exp2/source/train.py
Normal file
@@ -0,0 +1,61 @@
|
||||
import os
|
||||
import tensorflow as tf
|
||||
from tensorflow.keras import datasets, layers, models
|
||||
|
||||
'''
|
||||
python 3.9
|
||||
tensorflow 2.0.0b0
|
||||
'''
|
||||
class CNN(object):
|
||||
def __init__(self):
|
||||
model = models.Sequential()
|
||||
# 第1层卷积,卷积核大小为3*3,32个,28*28为待训练图片的大小
|
||||
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
|
||||
model.add(layers.MaxPooling2D((2, 2)))
|
||||
# 第2层卷积,卷积核大小为3*3,64个
|
||||
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
|
||||
model.add(layers.MaxPooling2D((2, 2)))
|
||||
# 第三层卷积,卷积核大小为3*3,64个
|
||||
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
|
||||
model.add(layers.Flatten())
|
||||
model.add(layers.Dense(64, activation='relu'))
|
||||
model.add(layers.Dense(10, activation='softmax'))
|
||||
model.summary()
|
||||
self.model = model
|
||||
|
||||
class DataSource(object):
|
||||
def __init__(self):
|
||||
# mnist数据集存储的位置,如何不存在将自动下载
|
||||
data_path = os.path.abspath(os.path.dirname(
|
||||
__file__)) + '.'
|
||||
(train_images, train_labels), (test_images,
|
||||
test_labels) = datasets.mnist.load_data(path=data_path)
|
||||
# 6万张训练图片,1万张测试图片
|
||||
train_images = train_images.reshape((60000, 28, 28, 1))
|
||||
test_images = test_images.reshape((10000, 28, 28, 1))
|
||||
# 像素值映射到 0 - 1 之间
|
||||
train_images, test_images = train_images / 255.0, test_images / 255.0
|
||||
self.train_images, self.train_labels = train_images, train_labels
|
||||
self.test_images, self.test_labels = test_images, test_labels
|
||||
|
||||
class Train:
|
||||
def __init__(self):
|
||||
self.cnn = CNN()
|
||||
self.data = DataSource()
|
||||
def train(self):
|
||||
check_path = './ckpt/cp-{epoch:04d}.ckpt'
|
||||
# period 每隔5epoch保存一次
|
||||
save_model_cb = tf.keras.callbacks.ModelCheckpoint(
|
||||
check_path, save_weights_only=True, verbose=1, period=5)
|
||||
self.cnn.model.compile(optimizer='adam',
|
||||
loss='sparse_categorical_crossentropy',
|
||||
metrics=['accuracy'])
|
||||
self.cnn.model.fit(self.data.train_images, self.data.train_labels,
|
||||
epochs=5, callbacks=[save_model_cb])
|
||||
test_loss, test_acc = self.cnn.model.evaluate(
|
||||
self.data.test_images, self.data.test_labels)
|
||||
print("准确率: %.4f,共测试了%d张图片 " % (test_acc, len(self.data.test_labels)))
|
||||
|
||||
if __name__ == "__main__":
|
||||
app = Train()
|
||||
app.train()
|
||||
17
gpu_utils.py
Normal file
17
gpu_utils.py
Normal file
@@ -0,0 +1,17 @@
|
||||
import torch
|
||||
|
||||
|
||||
def print_gpu_availability():
|
||||
"""打印PyTorch的GPU可用性"""
|
||||
if torch.cuda.is_available():
|
||||
print(f"GPU可用:{torch.cuda.get_device_name(0)}")
|
||||
else:
|
||||
print("GPU不可用")
|
||||
|
||||
|
||||
def get_gpu_device() -> torch.device:
|
||||
"""获取PyTorch的GPU设备"""
|
||||
if torch.cuda.is_available():
|
||||
return torch.device("cuda")
|
||||
else:
|
||||
raise Exception("找不到CUDA!")
|
||||
26
pyproject.toml
Normal file
26
pyproject.toml
Normal file
@@ -0,0 +1,26 @@
|
||||
[project]
|
||||
name = "experiment"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.11"
|
||||
dependencies = [
|
||||
"datasets>=4.3.0",
|
||||
"matplotlib>=3.10.7",
|
||||
"numpy>=2.3.4",
|
||||
"torch>=2.9.0",
|
||||
"torchvision>=0.24.0",
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
torch = [
|
||||
{ index = "pytorch-cu126", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
||||
]
|
||||
torchvision = [
|
||||
{ index = "pytorch-cu126", marker = "sys_platform == 'linux' or sys_platform == 'win32'" },
|
||||
]
|
||||
|
||||
[[tool.uv.index]]
|
||||
name = "pytorch-cu126"
|
||||
url = "https://download.pytorch.org/whl/cu126"
|
||||
explicit = true
|
||||
Reference in New Issue
Block a user