alexnet.py 1.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. import torch
  2. import torch.nn as nn
  3. # 定义修改后的AlexNet模型
  4. class AlexNet(nn.Module):
  5. def __init__(self):
  6. super(AlexNet, self).__init__()
  7. # 定义每一个就卷积层
  8. self.layer1 = nn.Sequential(
  9. # 卷积层 #输入图像为1*28*28
  10. nn.Conv2d(3, 32, kernel_size=3, padding=1),
  11. # 池化层
  12. nn.MaxPool2d(kernel_size=2, stride=2), # 池化层特征图通道数不改变,每个特征图的分辨率变小
  13. # 激活函数Relu
  14. nn.ReLU(inplace=True),
  15. )
  16. self.layer2 = nn.Sequential(
  17. nn.Conv2d(32, 64, kernel_size=3, padding=1),
  18. nn.MaxPool2d(kernel_size=2, stride=2),
  19. nn.ReLU(inplace=True),
  20. )
  21. self.layer3 = nn.Sequential(
  22. nn.Conv2d(64, 128, kernel_size=3, padding=1),
  23. )
  24. self.layer4 = nn.Sequential(
  25. nn.Conv2d(128, 256, kernel_size=3, padding=1),
  26. )
  27. self.layer5 = nn.Sequential(
  28. nn.Conv2d(256, 256, kernel_size=3, padding=1),
  29. nn.MaxPool2d(kernel_size=3, stride=2),
  30. nn.ReLU(inplace=True),
  31. )
  32. # 定义全连接层
  33. self.fc1 = nn.Linear(256 * 3 * 3, 1024)
  34. self.fc2 = nn.Linear(1024, 512)
  35. self.fc3 = nn.Linear(512, 10)
  36. # 对应十个类别的输出
  37. def forward(self, x):
  38. x = self.layer1(x)
  39. x = self.layer2(x)
  40. x = self.layer3(x)
  41. x = self.layer4(x)
  42. x = self.layer5(x)
  43. # print(x.shape)
  44. x = x.view(-1, 256 * 3 * 3)
  45. x = self.fc1(x)
  46. x = self.fc2(x)
  47. x = self.fc3(x)
  48. return x