#we use the api to read data and we apply the features and labels as parameters of api, assigning batch_size by #data_iter defload_array(data_arrays, batch_size, is_train=True): #@ #is_train represents whether the data will be shuffled in every training time """构造一个pyTorch数据迭代器""" dataset = data.TensorDataset(*data_arrays) return data.DataLoader(dataset, batch_size, shuffle=is_train)
#Firstly we need to initialize model parameters, like weight and bias. Here we assign every weight from average #value 0 and standard variance 0.01 normal distributions. #bias = 0 net[0].weight.data.normal_(0, 0.01) net[0].bias.data.fill_(0) #finish setting params
tensor([0.])
3.3.5 define loss function
1 2 3
#we apply class MSELoss to calculate mediate square error, which is also called square L2 parameter. #In default, it returns average value of all samples. loss = nn.MSELoss()
3.3.6 define optimization method
1 2
#In module optim of PyTorch, it realize many variants of SGD(small group random gradient descent) trainer = torch.optim.SGD(net.parameters(), lr = 0.03)
3.3.7 Train
1 2 3 4 5 6 7 8 9
num_epochs = 3 for epoch inrange(num_epochs): for X, y in data_iter: l = loss(net(X), y)#net(X)生成预测并且计算损失l trainer.zero_grad() l.backward()#反向传播计算梯度,对两个参数求导,使其误差最终最小 trainer.step() l = loss(net(features), labels) print(f'epoch {epoch + 1}, loss {l:f}')#输出每一轮损失
epoch 1, loss 0.000159
epoch 2, loss 0.000099
epoch 3, loss 0.000101
1 2 3 4 5
#下面我们比较生成数据集的 真实参数 和 通过有限数据训练获得的 模型参数。要访问参数我首先从net访问所需要的层,然后读取该层的权重和偏置。 w = net[0].weight.data print("w的估计误差:", true_w - w.reshape(true_w.shape))#真实值减去我们优化的结果 b = net[0].bias.data print("b的估计误差: ", true_b - b)