1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54
| import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim
transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) train_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True) test_dataset = datasets.MNIST(root='./data', train=False, transform=transform) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True) test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=False)
class SimplecNN(nn.Module): def _init_(self): super(SimplecNN,self)._init_() self.conv1 = nn.Conv2d(1,32,kernel_size=3,stride=1,padding=1) self.conv2 = nn.Conv2d(32,64,kernel_size=3,stride=1,padding=1) self.fc1 = nn.Linear(64*7*7,128) self.fc2 = nn.Linear(128,10) def forward(self,x): x = F.relu(self.conv1(x)) x = F.max_pool2d(x,2) x = F.relu(self.conv2(x)) x = F.max_pool2d(x,2) x = x.view(-1,64*7*7) x = F.relu(self.fc1(x)) x = self.fc2(x) return x model = SimplecNN() criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(),lr=0.01) num_epochs = 5 model.train() for epoch in range(num_epochs): total_loss = 0 for images,labels in train_loader: outputs = model(images) loss = criterion(outputs,labels) optimizer.zero_grad() loss.backward() optimizer.step() total_loss += loss.item() print(f"Epoch [{epoch+1}/{num_epochs}], Loss: {total_loss:.4f}") model.eval() correct = 0 total = 0 with torch.no_grad(): for images,labels in test_loader: outputs = model(images) _,predicted = torch.max(outputs,1) total += labels.size(0) correct += (predicted == labels).sum().item()
|