importtorch.nnasnnclassMyModel(nn.Module):def__init__(self):super(MyModel,self).__init__()# Define Neural Networkself.net=nn.Sequential(nn.Linear(10,32),nn.Sigmoid(),nn.Linear(32,1))# Forward Passdefforward(self,x):returnself.net(x)# Contruct Model and Move to device (cpu/cuda)model=MyModel().to(device)# Set Loss Functioncriterion=nn.MSELoss()# Set Optimizeroptimizer=torch.optim.SGD(model.parameters(),0.1)
forepochinrange(n_epoch):# Iterate n_epochsmodel.train()# Set model to train modelforx,yintr_set:# Iterate through the dataloaderoptimizer.zero_grad()# Set gradient to zerox,y=x.to(device),y.to(device)# Move data to device (cpu/cuda)pred=model(x)# Forward pass (compute output)loss=criterion(pred,y)# Compute lossloss.backward()# Compute gradient (backpropagation)optimizer.step()# Update model with optimizer
model.eval()# Set model to evaluation modetotal_loss=0forx,yindv_set:# Iterate through the dataloaderx,y=x.to(device),y.to(device)# Move data to device (cpu/cuda)withtorch.no_grad():# Disable gradient calculationpred=model(x)# Forward pass (compute output) loss=criterion(pred,y)# Compute losstotal_loss+=loss.cpu().item()*len(x)# Accumulate lossavg_loss=total_loss/len(dv_set.dataset)# Compute averaged loss
model.eval()# Set model to evaluation modetotal_loss=0forx,yindv_set:# Iterate through the dataloaderx,y=x.to(device),y.to(device)# Move data to device (cpu/cuda)withtorch.no_grad():# Disable gradient calculationpred=model(x)# Forward pass (compute output) loss=criterion(pred,y)# Collect prediction
存储/载入模型
存储模型
1
torch.save(model.state_dict(),path)# Path need to be specified
载入模型
1
2
ckpt=torch.load(path)# Path need to be specifiedmodel.load_state_dict(ckpt)