forked from jfsantos/seq2seq
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Attention.lua
442 lines (391 loc) · 13.1 KB
/
Attention.lua
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
require 'nn';
require 'nngraph';
require 'AddBias';
require 'RNNAttention';
require 'Recurrent';
require 'MonotonicAlignment';
require 'Squeeze';
require 'ExpandAs';
require 'AddDim';
require 'TemporalConvolutionZeroBias';
--nngraph.setDebug(true)
local Attention, parent = torch.class('nn.Attention','nn.Module')
function Attention:__init(decoder_recurrent, -- recurrent part of the decoder ~ f
decoder_mlp, -- MLP that converts state to y ~ g
scoreDepth, -- length of input vector to e
hybridAttendFilterSize, -- size of conv kernel on prev alpha
hybridAttendFeatureMaps, -- # kernels in conv on prev alpha
stateDepth, -- size of hidden state
annotationDepth, -- size of annotations
outputDepth, -- size of output layer
monoAlignPenalty, -- monotonic alignment penalty
penaltyLambda) -- strength of penalty
--T -- length of output sequence
--)
parent.__init(self)
self.decoder_recurrent = decoder_recurrent
self.scoreDepth = scoreDepth
self.hybridAttendFilterSize = hybridAttendFilterSize
self.hybridAttendFeatureMaps = hybridAttendFeatureMaps
self.stateDepth = stateDepth
self.annotationDepth = annotationDepth
self.MonotonicAlignmentPenalty = monoAlignPenalty or false
self.lambda = penaltyLambda or 0.0
--self.T = T
------------------ construct attentional decoder ------------------
-- First, construct Vh separately, which will be less computationally intensive than
-- if we add it to the RNN
------------------ Vh ------------------
local h = nn.Identity()()
local _Vh = nn.TemporalConvolutionZeroBias(annotationDepth,scoreDepth,1)(h)
local Vh = nn.gModule({h},{_Vh})
Vh.name = "Vh"
self.Vh = Vh
-- Next, construct the decoder
------------------ inputs ------------------
local input = nn.Identity()()
local prev_hidden = nn.Identity()()
local nonrecurrent, prev_y = input:split(2)
local prev_alpha,prev_s,prev_mem = prev_hidden:split(3)
local prev_s = nn.Identity()(prev_s)
self.prev_s = prev_s
prev_s.name = 'prev_s'
-- prev_alpha ~ L
-- prev_s ~ stateDepth
-- prev_mem ~ stateDepth
local Vh_inp, h = nonrecurrent:split(2)
------------------ Ws ------------------
local prev_s_reshaped = nn.View(stateDepth,1)(prev_s)
local ws = nn.TemporalConvolution(1,scoreDepth,stateDepth)(prev_s_reshaped)
local Ws = nn.ExpandAs(1,2)({ws,Vh_inp})
--local Ws = nn.Reshape(L,scoreDepth)(nn.Replicate(L,1,2)(ws))
self.ws = ws
Ws.name = 'Ws'
-- L x scoreDepth
------------------ UF ------------------
local UF, Z
if hybridAttendFeatureMaps > 0 then
local pad_left,pad_right
if hybridAttendFilterSize % 2 == 1 then
-- odd
pad_left = math.floor((hybridAttendFilterSize-1)/2)
pad_right = pad_left
else
-- even
pad_left = hybridAttendFilterSize/2
pad_right = pad_left-1
end
local prev_alpha_reshaped = nn.AddDim(1,1)(prev_alpha)
-- alphaReshaped ~ L x 1
local padded_alpha = nn.Padding(1,pad_right,2)(nn.Padding(1,-pad_left,2)(prev_alpha_reshaped))
padded_alpha.name = "padded alpha"
local F = nn.TemporalConvolution(1,hybridAttendFeatureMaps,hybridAttendFilterSize)(padded_alpha)
UF = nn.TemporalConvolutionZeroBias(hybridAttendFeatureMaps,scoreDepth,1)(F)
UF.name = 'UF'
self.UF = UF
-- L x scoreDepth
Z = nn.CAddTable()({Ws,Vh_inp,UF})
-- L x scoreDepth
else
Z = nn.CAddTable()({Ws,Vh_inp})
end
self.Z = Z
Z.name = 'Z'
------------------ tanh ------------------
local tanh = nn.Tanh()(Z)
tanh.name = 'tanh'
self.tanh = tanh
-- L x scoreDepth
------------------ e_t ------------------
local e = nn.TemporalConvolutionZeroBias(scoreDepth,1,1)(tanh)
e.name = 'e'
self.e = e
-- L x 1
------------------ alpha_t ------------------
--local alpha = nn.SoftMax()(nn.View(L)(e))
local alpha = nn.SoftMax()(nn.Squeeze(2,2)(e))
alpha.name = 'alpha'
-- L
------------------ monotonic alignment penalty ------------------
if self.MonotonicAlignmentPenalty then
print('using monotonic alignment penalty ' .. self.lambda)
alpha = nn.MonotonicAlignment(self.lambda)({alpha,prev_alpha})
end
alpha.name = 'penalty'
-- L
------------------ c_t ------------------
-- alpha ~ L
-- h ~ L x annotationDepth
local alpha_view = nn.Replicate(1,1,1)(alpha)
-- 1 x L
local c = nn.View(-1):setNumInputDims(2)(nn.MM()({alpha_view,h}))
self.c = c
-- annotationDepth
------------------ decoder_recurrent ------------------
-- inputs:
-- c_t ~ input ~ annotationDepth
-- y_{t-1} ~ input ~ outputDepth
-- s_{t-1} ~ prev_output ~ stateDepth
-- mem_{t-1} ~ prev_memory ~ stateDepth
-- outputs:
-- s_t ~ output ~ stateDepth
-- mem_t ~ memory ~ stateDepth
--local y_in = nn.Tanh()(nn.Linear(outputDepth,stateDepth)(prev_y))
--local c_in = nn.Tanh()(nn.Linear(annotationDepth,stateDepth)(c))
local y_in = nn.Linear(outputDepth,stateDepth)(prev_y)
local c_in = nn.Linear(annotationDepth,stateDepth)(c)
local dec_rec_inp = nn.Linear(2*stateDepth,stateDepth)(nn.JoinTable(1,1)({c_in,y_in}))
local s,mem = decoder_recurrent({dec_rec_inp,prev_s,prev_mem}):split(2)
self.s = s
s.name = 's'
self.mem = mem
mem.name = 'mem'
decoder_recurrent.name = 'decoder_recurrent'
------------------ decoder_mlp ------------------
-- inputs:
-- s_t ~ input ~ stateDepth
-- c_t ~ input ~ annotationDepth
-- outputs:
-- y_t ~ output ~ outputDepth
local y = decoder_mlp({s,c})
------------------ decoder_base ------------------
-- decoder_base = attention + recurrent + mlp
--
-- inputs:
-- nonrecurrent ~ input ~ {Vh(h),h}
-- y_{t-1} ~ output ~ outputDepth
-- alpha_{t-1} ~ hidden ~ L (encoder length)
-- s_{t-1} ~ hidden ~ stateDepth
-- mem_{t-1} ~ hidden ~ stateDepth
-- outputs:
-- alpha_t ~ hidden ~ L (encoder length)
-- s_t ~ hidden ~ stateDepth
-- mem_t ~ hidden ~ stateDepth
-- y_t ~ output ~ outputDepth
local hidden = nn.Identity()({alpha, s, mem})
local decoder_base_ = nn.gModule({input,prev_hidden},{y,hidden})
decoder_base_.name = "decoder_base_"
self.decoder_base_ = decoder_base_
local dimhidden = {0,stateDepth,stateDepth}
local dimoutput = outputDepth
local decoder_base = nn.Recurrent(decoder_base_,dimhidden,dimoutput)
self.decoder_base = decoder_base
------------------ encoder output + decoder base ------------------
-- inputs:
-- h ~ input ~ annotationDepth
-- Vh(h) ~ input ~ scoreDepth
-- outputs:
-- y_{1:T} ~ output ~ T x outputDepth
local numRecurrentInputs = 0
local numNonRecurrentInputs = 2
local h = nn.Identity()()
local y = nn.Identity()()
local rnn_inp = {nn.Identity()({Vh(h),h}),y}
local rnn = nn.RNNAttention(decoder_base,outputDepth,false)
self.rnn = rnn
--nngraph.annotateNodes()
local decoder = nn.gModule({h,y},{rnn(rnn_inp)})
decoder.name = "decoder"
self.decoder = decoder
self.modules = {decoder}
end
function Attention:getRNNlayer(layername)
local rnn = self.rnn
local layer = {}
local sequence_dim = rnn.sequence_dim
local batchSize = rnn.batchSize
assert(batchSize ~= nil, 'forward must be run at least once to recover ' .. layername)
for t = 1, rnn.T do
local nodes = rnn.rnn[t].recurrent.backwardnodes
for i = 1, #nodes do
f = nodes[i]
if f.name == layername then
local output = f.data.module.output
local size = output:size():totable()
if batchSize == 0 then
layer[t] = output:contiguous():view(1,unpack(size))
else
local b = table.remove(size,1)
assert(b == batchSize, 'inconsistent tensor sizes')
layer[t] = output:contiguous():view(batchSize,1,unpack(size))
end
end
end
end
return nn.JoinTable(sequence_dim):type(layer[1]:type()):forward(layer)
end
function Attention:alpha()
return self:getRNNlayer('alpha')
end
function Attention:penalty()
return self:getRNNlayer('penalty')
end
function Attention:Ws()
return self:getRNNlayer('Ws')
end
function Attention:setpenalty(penalty)
local rnn = self.rnn
local nodes = rnn.recurrent.recurrent.backwardnodes
local foundpenalty = false
for i = 1, #nodes do
local f = nodes[i]
if f.name == 'penalty' then
f.data.module.lambda = opt.penalty
print('setting penalty to ' .. opt.penalty)
foundpenalty = true
end
end
assert(foundpenalty == true, 'could not find penalty node')
for t = 1, rnn.T do
local nodes = rnn.rnn[t].recurrent.backwardnodes
for i =1, #nodes do
local f = nodes[i]
if f.name == 'penalty' then
f.data.module.lambda = opt.penalty
end
end
end
end
function Attention:parameters()
return self.decoder:parameters()
end
function Attention:training()
self.decoder:training()
end
function Attention:evaluate()
self.decoder:evaluate()
end
function Attention:double()
self.decoder = self.decoder:double()
return self:type('torch.DoubleTensor')
end
function Attention:float()
self.decoder = self.decoder:float()
return self:type('torch.FloatTensor')
end
function Attention:cuda()
self.decoder = self.decoder:cuda()
return self:type('torch.CudaTensor')
end
function Attention:setT(T)
self.rnn:setT(T)
end
function Attention:updateOutput(input)
local x,y = unpack(input)
local L,T
if x:nDimension() == 2 then
-- nonbatch mode
L = x:size(1)
T = y:size(1)
elseif x:nDimension() == 3 then
L = x:size(2)
T = y:size(2)
else
error('x must be 2d or 3d')
end
self.rnn:apply2clones(function(x) x.dimhidden = {L,self.stateDepth,self.stateDepth} end)
self:setT(T)
self.output = self.decoder:forward(input)
return self.output
end
function Attention:updateGradInput(input, gradOutput)
self.gradInput = self.decoder:backward(input, gradOutput)
return self.gradInput
end
function Attention:BeamSearch(annotations,eos,K,maxseqlength)
assert(eos ~= nil, 'must specify eos index')
assert(type(eos) == 'number', 'eos index must be a number')
local K = K or 5
local maxseqlength = maxseqlength or annotations:size(1)
local decoder = model.decoder
local deepcopy
deepcopy = function(x)
local y
if type(x) == 'table' then
y = {}
for k,v in pairs(x) do
y[k] = deepcopy(v)
end
else
y = x:clone()
end
return y
end
local Vh = self.Vh
local vh = Vh:forward(annotations)
local decoder_base = self.decoder_base
local y_prev = decoder.rnn.zeros_y
local nonrecurrent = {vh,annotations}
local hidden = nil
local dec_base_inp = {{nonrecurrent,y_prev},hidden}
local L = annotations:size(1)
decoder_base.dimhidden = {L,decoder.stateDepth,decoder.stateDepth}
local output = decoder_base:forward(dec_base_inp)
local logprobs, hidden = unpack(output)
local N = logprobs:size(1)
local val, ind = torch.topk(logprobs:float(),K,1,true)
local p_beam = {}
local y_beam = {}
local h_beam = {}
local finished = 0
local y_finished = {}
local p_finished = {}
for k = 1,K do
if ind[k] == eos then
table.insert(y_finished,ind[{{k}}])
table.insert(p_finished,val[k])
finished = finished + 1
else
table.insert(y_beam, ind[{{k}}])
table.insert(h_beam, hidden)
table.insert(p_beam, val[k])
end
end
local count = 0
while finished < K and count < maxseqlength do
count = count + 1
local p_next = {}
local h_next = {}
for k = 1, K - finished do
y_prev = y_beam[k][y_beam[k]:size(1)]
h_prev = h_beam[k]
local labelmask = torch.zeros(model.outputDepth):cuda()
labelmask[y_prev] = 1
local hidden = deepcopy(h_prev)
local dec_base_inp = {{nonrecurrent,labelmask},hidden}
local output = decoder_base:forward(dec_base_inp)
p_next[k], h_next[k] = unpack(deepcopy(output))
p_next[k] = p_next[k] + p_beam[k]
end
local val, ind = torch.topk(nn.JoinTable(1):cuda():forward(p_next):float(),K,1,true)
local I = torch.floor((ind:double()-1)/N) + 1
local J = ind:double() - (I-1)*N
local y_beam_next = {}
local h_beam_next = {}
local p_beam_next = {}
for k = 1, K - finished do
local i = I[k]
local j = J[k]
local y_next = y_beam[i].new():resize(1):fill(j)
local y_next = torch.cat(y_beam[i],y_next)
if j == eos or count == maxseqlength then
table.insert(y_finished,y_next)
table.insert(p_finished,p_next[i][j])
finished = finished + 1
else
table.insert(y_beam_next,y_next)
table.insert(h_beam_next,h_next[i])
table.insert(p_beam_next,p_next[i][j])
end
end
y_beam = y_beam_next
h_beam = h_beam_next
p_beam = p_beam_next
end
--if count == maxseqlength then
-- print 'beam search reached max sequence length'
--end
local pval, best = torch.Tensor(p_finished):max(1)
local prediction = y_finished[best[1]]
return prediction
end