补充了模型的描述

This commit is contained in:
古代中二龙 2019-11-19 16:07:27 +08:00 committed by GitHub
parent ce7d56b487
commit c431d75e60
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -133,23 +133,25 @@
"class ConvNet(nn.Module):\n",
" def __init__(self):\n",
" super().__init__()\n",
" # 1,28x28\n",
" self.conv1=nn.Conv2d(1,10,5) # 10, 24x24\n",
" self.conv2=nn.Conv2d(10,20,3) # 128, 10x10\n",
" self.fc1 = nn.Linear(20*10*10,500)\n",
" self.fc2 = nn.Linear(500,10)\n",
" # batch*1*28*28每次会送入batch个样本输入通道数1黑白图像图像分辨率是28x28\n",
" # 下面的卷积层Conv2d的第一个参数指输入通道数第二个参数指输出通道数第三个参数指卷积核的大小\n",
" self.conv1 = nn.Conv2d(1, 10, 5) # 输入通道数1输出通道数10核的大小5\n",
" self.conv2 = nn.Conv2d(10, 20, 3) # 输入通道数10输出通道数20核的大小3\n",
" # 下面的全连接层Linear的第一个参数指输入通道数第二个参数指输出通道数\n",
" self.fc1 = nn.Linear(20*10*10, 500) # 输入通道数是2000输出通道数是500\n",
" self.fc2 = nn.Linear(500, 10) # 输入通道数是500输出通道数是10即10分类\n",
" def forward(self,x):\n",
" in_size = x.size(0)\n",
" out = self.conv1(x) #24\n",
" out = F.relu(out)\n",
" out = F.max_pool2d(out, 2, 2) #12\n",
" out = self.conv2(out) #10\n",
" out = F.relu(out)\n",
" out = out.view(in_size,-1)\n",
" out = self.fc1(out)\n",
" out = F.relu(out)\n",
" out = self.fc2(out)\n",
" out = F.log_softmax(out,dim=1)\n",
" in_size = x.size(0) # 在本例中in_size=512也就是BATCH_SIZE的值。输入的x可以看成是512*1*28*28的张量。\n",
" out = self.conv1(x) # batch*1*28*28 -> batch*10*24*2428x28的图像经过一次核为5x5的卷积输出变为24x24\n",
" out = F.relu(out) # batch*10*24*24激活函数ReLU不改变形状\n",
" out = F.max_pool2d(out, 2, 2) # batch*10*24*24 -> batch*10*12*122*2的池化层会减半\n",
" out = self.conv2(out) # batch*10*12*12 -> batch*20*10*10再卷积一次核的大小是3\n",
" out = F.relu(out) # batch*20*10*10\n",
" out = out.view(in_size, -1) # batch*20*10*10 -> batch*2000out的第二维是-1说明是自动推算本例中第二维是20*10*10\n",
" out = self.fc1(out) # batch*2000 -> batch*500\n",
" out = F.relu(out) # batch*500\n",
" out = self.fc2(out) # batch*500 -> batch*10\n",
" out = F.log_softmax(out, dim=1) # 计算log(softmax(x))\n",
" return out"
]
},