super(SiameseNet, self).__init__()
self.embed nn.Embedding(args.char_size, args.embedding_size)
self.embed.weight.data.copy_(torch.from_numpy(embed))
self.lstm nn.LSTM(args.embedding_size, args.lstm_hidden_size,
num_layers 2, dropout 0.2,
bidirectional True, batch_first True)
self.dense nn.Linear(args.lstm_hidden_size * 2, args.linear_hidden_size)
self.dropout nn.Dropout(p 0.3)
def forward(self, a, b):
emb_a self.embed(a)
emb_b self.embed(b)
lstm_a, _ self.lstm(emb_a)
lstm_b, _ self.lstm(emb_b)
avg_a torch.mean(lstm_a, dim 1)
avg_b torch.mean(lstm_b, dim 1)
out_a torch.tanh(self.dense(avg_a))
out_a self.dropout(out_a)
out_b torch.tanh(self.dense(avg_b))
out_b self.dropout(out_b)
cosine torch.cosine_similarity(out_a, out_b, dim 1, eps 1e-8)
return cosine