1. 导入函数包
import torch
from torch.autograd import Variable
import torch.optim as optim
import paddle
from paddle.autograd import backward
import paddle.optimizer as optim
导入数据和对数据进行预处理方法一致,程序代码一样。
2. 张量的定义
xx = Variable(torch.FloatTensor(X[start:end]))
yy = Variable(torch.FloatTensor(Y[start:end]))
xx = paddle.to_tensor(X[start:end], dtype='float32')
yy = paddle.to_tensor(Y[start:end], dtype='float32')
3. 定义神经网络架构
# 定义神经网络架构,features.shape[1]个输入层单元,10个隐含层,1个输出层
input_size = features.shape[1]
hidden_size = 10
output_size = 1
batch_size = 128
neu = torch.nn.Sequential(
torch.nn.Linear(input_size, hidden_size),
torch.nn.Sigmoid(),
torch.nn.Linear(hidden_size, output_size),
)
# 使用PyTorch自带的损失函数
cost = torch.nn.MSELoss()
# 使用PyTorch自带的优化器来自动实现优化算法:
optimizer = torch.optim.SGD(neu.parameters(), lr=0.01)
# 定义神经网络架构,features.shape[1]个输入层单元,10个隐含层,1个输出层
input_size = features.shape[1]
hidden_size = 10
output_size = 1
batch_size = 128
neu = paddle.nn.Sequential(
paddle.nn.Linear(input_size, hidden_size),
paddle.nn.Sigmoid(),
paddle.nn.Linear(hidden_size, output_size),
)
# 使用PaddlePaddle自带的损失函数
cost = paddle.nn.MSELoss()
# 使用PaddlePaddle自带的优化器来自动实现优化算法:
optimizer = paddle.optimizer.SGD(parameters=neu.parameters(), learning_rate=0.01)
4. 分批训练的梯度下降算法
# 神经网络训练循环
for i in range(1000):
# 每128个样本点被划分成一批,在循环的时候一批一批地读取
batch_loss = []
# start 和 end 分别是提取一批数据的起始和终止下标
for start in range(0, len(X), batch_size):
end = start + batch_size if start + batch_size < len(X) else len(X)
xx = Variable(torch.FloatTensor(X[start:end]))
yy = Variable(torch.FloatTensor(Y[start:end]))
predict = neu(xx)
loss = cost(predict, yy)
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_loss.append(loss.data.numpy())
# 每隔 100 步输出损失值
if i % 100 == 0:
losses.append(np.mean(batch_loss))
print(i, np.mean(batch_loss))
# 神经网络训练循环
for i in range(1000):
# 每128个样本点被划分成一批,在循环的时候一批一批地读取
batch_loss = []
# start 和 end 分别是提取一批数据的起始和终止下标
for start in range(0, len(X), batch_size):
end = start + batch_size if start + batch_size < len(X) else len(X)
xx = paddle.to_tensor(X[start:end], dtype='float32')
yy = paddle.to_tensor(Y[start:end], dtype='float32')
predict = neu(xx)
loss = cost(predict, yy)
loss.backward()
optimizer.step()
optimizer.clear_grad()
batch_loss.append(loss.numpy())
# 每隔 100 步输出损失值
if i % 100 == 0:
losses.append(np.mean(batch_loss))
print(i, np.mean(batch_loss))