1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556 |
- import torch
- import torch.nn as nn
- # 定义修改后的AlexNet模型
- class AlexNet(nn.Module):
- def __init__(self):
- super(AlexNet, self).__init__()
- # 定义每一个就卷积层
- self.layer1 = nn.Sequential(
- # 卷积层 #输入图像为1*28*28
- nn.Conv2d(3, 32, kernel_size=3, padding=1),
- # 池化层
- nn.MaxPool2d(kernel_size=2, stride=2), # 池化层特征图通道数不改变,每个特征图的分辨率变小
- # 激活函数Relu
- nn.ReLU(inplace=True),
- )
- self.layer2 = nn.Sequential(
- nn.Conv2d(32, 64, kernel_size=3, padding=1),
- nn.MaxPool2d(kernel_size=2, stride=2),
- nn.ReLU(inplace=True),
- )
- self.layer3 = nn.Sequential(
- nn.Conv2d(64, 128, kernel_size=3, padding=1),
- )
- self.layer4 = nn.Sequential(
- nn.Conv2d(128, 256, kernel_size=3, padding=1),
- )
- self.layer5 = nn.Sequential(
- nn.Conv2d(256, 256, kernel_size=3, padding=1),
- nn.MaxPool2d(kernel_size=3, stride=2),
- nn.ReLU(inplace=True),
- )
- # 定义全连接层
- self.fc1 = nn.Linear(256 * 3 * 3, 1024)
- self.fc2 = nn.Linear(1024, 512)
- self.fc3 = nn.Linear(512, 10)
- # 对应十个类别的输出
- def forward(self, x):
- x = self.layer1(x)
- x = self.layer2(x)
- x = self.layer3(x)
- x = self.layer4(x)
- x = self.layer5(x)
- # print(x.shape)
- x = x.view(-1, 256 * 3 * 3)
- x = self.fc1(x)
- x = self.fc2(x)
- x = self.fc3(x)
- return x
|