require ‘nn’;
mlp = nn.Sequential() mlp:add(nn.Linear(10, 25)) -- Linear module (10 inputs, 25 hidden units) mlp:add(nn.Tanh()) -- apply hyperbolic tangent transfer function on each hidden units mlp:add(nn.Linear(25, 1)) -- Linear module (25 inputs, 1 output) > mlp nn.Sequential { [input -> (1) -> (2) -> (3) -> output] (1): nn.Linear(10 -> 25) (2): nn.Tanh (3): nn.Linear(25 -> 1) } > print(mlp:forward(torch.randn(10))) -0.1815 [torch.Tensor of dimension 1]插入网络层
model = nn.Sequential() model:add(nn.Linear(10, 20)) model:add(nn.Linear(20, 30)) model:insert(nn.Linear(20, 20), 2) > model nn.Sequential { [input -> (1) -> (2) -> (3) -> output] (1): nn.Linear(10 -> 20) (2): nn.Linear(20 -> 20) -- The inserted layer (3): nn.Linear(20 -> 30) }删除网络层
model = nn.Sequential() model:add(nn.Linear(10, 20)) model:add(nn.Linear(20, 20)) model:add(nn.Linear(20, 30)) model:remove(2) > model nn.Sequential { [input -> (1) -> (2) -> output] (1): nn.Linear(10 -> 20) (2): nn.Linear(20 -> 30) }module = Parallel( inputDimension, outputDimension )
mlp = nn.Parallel(2,1); -- Parallel container will associate a module to each slice of dimension 2 -- (column space), and concatenate the outputs over the 1st dimension. mlp:add(nn.Linear(10,3)); -- Linear module (input 10, output 3), applied on 1st slice of dimension 2 mlp:add(nn.Linear(10,2)) -- Linear module (input 10, output 2), applied on 2nd slice of dimension 2 -- After going through the Linear module the outputs are -- concatenated along the unique dimension, to form 1D Tensor > mlp:forward(torch.randn(10,2)) -- of size 5. -0.5300 -1.1015 0.7764 0.2819 -0.6026 [torch.Tensor of dimension 5]module = nn.Concat( dim )
mlp = nn.Concat(1); mlp:add(nn.Linear(5,3)) mlp:add(nn.Linear(5,7)) > print(mlp:forward(torch.randn(5))) 0.7486 0.1349 0.7924 -0.0371 -0.4794 0.3044 -0.0835 -0.7928 0.7856 -0.1815 [torch.Tensor of dimension 10]数据集有两点要求:首先dataset[index]返回第Index个数据;其次就是dataset:size()函数返回整个数据集的example的个数。
以上为一种简单的训练方式,更多的模型优化方式可参考Optimization package