-
Notifications
You must be signed in to change notification settings - Fork 2
/
siamese_model2.lua
87 lines (62 loc) · 2.43 KB
/
siamese_model2.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
require 'nn'
function build_network_model(gpu)
local base_encoder = nn.Sequential()
base_encoder:add(nn.SpatialConvolution( 3, 64, 11, 11, 4, 4, 5, 5))
base_encoder:add(nn.PReLU())
base_encoder:add(nn.SpatialConvolution(64, 64, 5, 5, 2, 2, 2, 2))
base_encoder:add(nn.PReLU())
base_encoder:add(nn.SpatialConvolution(64, 128, 3, 3, 2, 2, 1, 1))
base_encoder:add(nn.PReLU())
local base_encoder_init = require('weight-init')(base_encoder, 'MSRinit')
local base_encoder_clone = base_encoder_init:clone()
base_encoder_clone:share(base_encoder_init, 'weight', 'bias', 'gradWeight', 'gradBias')
local siamese_encoder = nn.ParallelTable()
siamese_encoder:add(base_encoder_init)
siamese_encoder:add(base_encoder_clone)
local top_encoder = nn.Sequential()
top_encoder:add(nn.SpatialConvolution(256, 256, 3, 3, 2, 2, 1, 1))
top_encoder:add(nn.PReLU())
top_encoder:add(nn.SpatialConvolution(256, 256, 3, 3, 2, 2, 1, 1))
top_encoder:add(nn.PReLU())
top_encoder:add(nn.SpatialConvolution(256, 512, 3, 3, 2, 2, 1, 1))
top_encoder:add(nn.PReLU())
top_encoder:add(nn.SpatialConvolution(512, 1024, 3, 3, 2, 2, 1, 1))
top_encoder:add(nn.PReLU())
top_encoder:add(nn.SpatialConvolution(1024, 4096, 3, 3, 2, 2, 1, 1))
top_encoder:add(nn.Dropout(0.5))
top_encoder:add(nn.PReLU())
top_encoder:add(nn.SpatialConvolution(4096, 4096, 1, 1, 1, 1, 0, 0))
top_encoder:add(nn.Dropout(0.5))
top_encoder:add(nn.PReLU())
local pred_layer = nn.SpatialConvolution(4096, 3, 1, 1, 1, 1, 0, 0)
pred_layer.weight:zero()
pred_layer.bias:zero()
local top_encoder_init = require('weight-init')(top_encoder, 'MSRinit')
local model = nn.Sequential()
model:add(siamese_encoder)
model:add(nn.JoinTable(2))
model:add(top_encoder_init)
model:add(pred_layer)
model:add(nn.View(-1,3))
local input = torch.Tensor(2, 3, 240, 320)
if gpu>0 then
model = model:cuda()
input = input:cuda()
cudnn.convert(model, cudnn)
local optnet = require 'optnet'
optnet.optimizeMemory(model, {input,input}, {inplace=true, mode='training'})
end
return model
end
--[[
require 'cudnn'
local model = build_network_model(0)
local input = torch.Tensor(2, 3, 240, 320)
--input = input:cuda()
--model = model:cuda()
local output = model:forward({input,input})
print(model)
print(output:size())
params, grad_params = model:getParameters()
print('Number of parameters ' .. params:nElement())
--]]