1. 神经网络中的 forward 必须重写
2. 自己想要尝试nn.Conv()的功能的话设置的tensor数组一定是小数,因为数据集ToTensor变换之 后就全都是小数
# 例子:
x = torch.tensor([[1., 2., 1.,2.,2.],
[0., 1., 2.,1.,3.],
[3., 2., 2.,0.,4.],
[4.,0.,2.,1.,1.],
[2.,5.,1.,2.,2.]])
# 或者这样:
x = torch.tensor([[1, 2, 1,2,2],
[0, 1, 2,1,3],
[3, 2, 2,0,4],
[4,0,2,1,1],
[2,5,1,2,2]], dtype=torch.float32)
# ToTensor之后就全都是小数
tensor([[[0.6196, 0.6235, 0.6471, ..., 0.5373, 0.4941, 0.4549],
[0.5961, 0.5922, 0.6235, ..., 0.5333, 0.4902, 0.4667],
[0.5922, 0.5922, 0.6196, ..., 0.5451, 0.5098, 0.4706],
...,
[0.2667, 0.1647, 0.1216, ..., 0.1490, 0.0510, 0.1569],
[0.2392, 0.1922, 0.1373, ..., 0.1020, 0.1137, 0.0784],
[0.2118, 0.2196, 0.1765, ..., 0.0941, 0.1333, 0.0824]],
[[0.4392, 0.4353, 0.4549, ..., 0.3725, 0.3569, 0.3333],
[0.4392, 0.4314, 0.4471, ..., 0.3725, 0.3569, 0.3451],
[0.4314, 0.4275, 0.4353, ..., 0.3843, 0.3725, 0.3490],
...,
[0.4863, 0.3922, 0.3451, ..., 0.3804, 0.2510, 0.3333],
[0.4549, 0.4000, 0.3333, ..., 0.3216, 0.3216, 0.2510],
[0.4196, 0.4118, 0.3490, ..., 0.3020, 0.3294, 0.2627]],
[[0.1922, 0.1843, 0.2000, ..., 0.1412, 0.1412, 0.1294],
[0.2000, 0.1569, 0.1765, ..., 0.1216, 0.1255, 0.1333],
[0.1843, 0.1294, 0.1412, ..., 0.1333, 0.1333, 0.1294],
...,
[0.6941, 0.5804, 0.5373, ..., 0.5725, 0.4235, 0.4980],
[0.6588, 0.5804, 0.5176, ..., 0.5098, 0.4941, 0.4196],
[0.6275, 0.5843, 0.5176, ..., 0.4863, 0.5059, 0.4314]]])
3. 池化操作中的Ceil_model可以在池化核和输入图像尺寸不完全匹配时选择是否保留超出的部分,
步长stride默认为kernel size的大小
4. 这样的继承写法是python2.7 的写法
class my_net(nn.Module):
def __init__(self):
super(my_net, self).__init__() # python 2.7
self.conv1 = nn.Conv2d(1, 6, 3, padding=1)
self.pooling = nn.MaxPool2d(3,ceil_mode=False)



