HariSekhar commited on
Commit
f08f01e
1 Parent(s): c8a81ea

Upload 6 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ eng_marathi/train.en filter=lfs diff=lfs merge=lfs -text
37
+ eng_marathi/train.mr filter=lfs diff=lfs merge=lfs -text
eng_marathi/.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
eng_marathi/__pycache__/transformer.cpython-311.pyc ADDED
Binary file (22.8 kB). View file
 
eng_marathi/main.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformer import Transformer # this is the transformer.py file
2
+ import torch
3
+ import numpy as np
4
+ import chardet
5
+ import matplotlib.pyplot as plt
6
+ from torch import nn
7
+ english_file = r'C:\Users\haris\Downloads\eng_marathi\train.en' # only 100 instances are used for experiment
8
+ marathi_file = r'C:\Users\haris\Downloads\eng_marathi\train.mr' # only 100 instances are used for experiment
9
+
10
+ # Generated this by filtering Appendix code
11
+
12
+ START_TOKEN = '<START>'
13
+ PADDING_TOKEN = '<PADDING>'
14
+ END_TOKEN = '<END>'
15
+
16
+ marathi_vocabulary = [START_TOKEN, ' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/',
17
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', '<', '=', '>', '?', 'ˌ',
18
+ 'ँ', 'ఆ', 'ఇ', 'ా', 'ి', 'ీ', 'ు', 'ూ',
19
+ 'अ', 'आ', 'इ', 'ई', 'उ', 'ऊ', 'ऋ', 'ॠ', 'ऌ', 'ऎ', 'ए', 'ऐ', 'ऒ', 'ओ', 'औ',
20
+ 'क', 'ख', 'ग', 'घ', 'ङ',
21
+ 'च', 'छ', 'ज', 'झ', 'ञ',
22
+ 'ट', 'ठ', 'ड', 'ढ', 'ण',
23
+ 'त', 'थ', 'द', 'ध', 'न',
24
+ 'प', 'फ', 'ब', 'भ', 'म',
25
+ 'य', 'र', 'ऱ', 'ल', 'ळ', 'व', 'श', 'ष', 'स', 'ह',
26
+ '़', 'ऽ', 'ा', 'ि', 'ी', 'ु', 'ू', 'ृ', 'ॄ', 'ॅ', 'े', 'ै', 'ॉ', 'ो', 'ौ', '्', 'ॐ', '।', '॥', '॰', 'ॱ', PADDING_TOKEN, END_TOKEN]
27
+
28
+ english_vocabulary = [START_TOKEN, ' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/',
29
+ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
30
+ ':', '<', '=', '>', '?', '@',
31
+ '[', '\\', ']', '^', '_', '`',
32
+ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',
33
+ 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x',
34
+ 'y', 'z',
35
+ '{', '|', '}', '~', PADDING_TOKEN, END_TOKEN]
36
+ index_to_marathi = {k:v for k,v in enumerate(marathi_vocabulary)}
37
+ marathi_to_index = {v:k for k,v in enumerate(marathi_vocabulary)}
38
+ index_to_english = {k:v for k,v in enumerate(english_vocabulary)}
39
+ english_to_index = {v:k for k,v in enumerate(english_vocabulary)}
40
+
41
+ # Open the file in binary mode to detect its encoding
42
+ with open(marathi_file, 'rb') as file:
43
+ raw_data = file.read(10000) # Read some bytes to check the encoding
44
+ result = chardet.detect(raw_data)
45
+ encoding = result['encoding']
46
+ print(f"Detected encoding: {encoding}")
47
+ # Correct way to open the Marathi file with the right encoding
48
+ with open(marathi_file, 'r', encoding=encoding) as file:
49
+ marathi_sentences = file.readlines()
50
+
51
+ # If you are reusing the same file, ensure you specify the encoding every time.
52
+ with open(english_file, 'r', encoding='utf-8') as file:
53
+ english_sentences = file.readlines()
54
+
55
+ # Now process the sentences as needed
56
+ TOTAL_SENTENCES = 20000
57
+ english_sentences = english_sentences[:TOTAL_SENTENCES]
58
+ marathi_sentences = marathi_sentences[:TOTAL_SENTENCES]
59
+ english_sentences = [sentence.rstrip('\n').lower() for sentence in english_sentences]
60
+ marathi_sentences = [sentence.rstrip('\n') for sentence in marathi_sentences]
61
+
62
+
63
+ max_sequence_length = 200
64
+
65
+ def is_valid_tokens(sentence, vocab):
66
+ for token in list(set(sentence)):
67
+ if token not in vocab:
68
+ return False
69
+ return True
70
+
71
+ def is_valid_length(sentence, max_sequence_length):
72
+ return len(list(sentence)) < (max_sequence_length - 1) # need to re-add the end token so leaving 1 space
73
+
74
+ valid_sentence_indicies = []
75
+ for index in range(len(marathi_sentences)):
76
+ marathi_sentence, english_sentence = marathi_sentences[index], english_sentences[index]
77
+ if is_valid_length(marathi_sentence, max_sequence_length) \
78
+ and is_valid_length(english_sentence, max_sequence_length) \
79
+ and is_valid_tokens(marathi_sentence, marathi_vocabulary):
80
+ valid_sentence_indicies.append(index)
81
+
82
+ print(f"Number of sentences: {len(marathi_sentences)}")
83
+ print(f"Number of valid sentences: {len(valid_sentence_indicies)}")
84
+
85
+ marathi_sentences = [marathi_sentences[i] for i in valid_sentence_indicies]
86
+ english_sentences = [english_sentences[i] for i in valid_sentence_indicies]
87
+
88
+
89
+
90
+ d_model = 512
91
+ batch_size = 64
92
+ ffn_hidden = 2048
93
+ num_heads = 8
94
+ drop_prob = 0.1
95
+ num_layers = 4
96
+ max_sequence_length = 200
97
+ mr_vocab_size = len(marathi_vocabulary)
98
+
99
+ transformer = Transformer(d_model,
100
+ ffn_hidden,
101
+ num_heads,
102
+ drop_prob,
103
+ num_layers,
104
+ max_sequence_length,
105
+ mr_vocab_size,
106
+ english_to_index,
107
+ marathi_to_index,
108
+ START_TOKEN,
109
+ END_TOKEN,
110
+ PADDING_TOKEN)
111
+
112
+ from torch.utils.data import Dataset, DataLoader
113
+
114
+ class TextDataset(Dataset):
115
+
116
+ def __init__(self, english_sentences, marathi_sentences):
117
+ self.english_sentences = english_sentences
118
+ self.marathi_sentences = marathi_sentences
119
+
120
+ def __len__(self):
121
+ return len(self.english_sentences)
122
+
123
+ def __getitem__(self, idx):
124
+ return self.english_sentences[idx], self.marathi_sentences[idx]
125
+
126
+
127
+ dataset = TextDataset(english_sentences, marathi_sentences)
128
+ train_loader = DataLoader(dataset, batch_size)
129
+ iterator = iter(train_loader)
130
+ from torch import nn
131
+
132
+ criterian = nn.CrossEntropyLoss(ignore_index=marathi_to_index[PADDING_TOKEN],
133
+ reduction='none')
134
+
135
+ # When computing the loss, we are ignoring cases when the label is the padding token
136
+ for params in transformer.parameters():
137
+ if params.dim() > 1:
138
+ nn.init.xavier_uniform_(params)
139
+
140
+ optim = torch.optim.Adam(transformer.parameters(), lr=1e-4)
141
+ device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
142
+ NEG_INFTY = -1e9
143
+
144
+ def create_masks(eng_batch, mr_batch):
145
+ num_sentences = len(eng_batch)
146
+ look_ahead_mask = torch.full([max_sequence_length, max_sequence_length] , True)
147
+ look_ahead_mask = torch.triu(look_ahead_mask, diagonal=1)
148
+ encoder_padding_mask = torch.full([num_sentences, max_sequence_length, max_sequence_length] , False)
149
+ decoder_padding_mask_self_attention = torch.full([num_sentences, max_sequence_length, max_sequence_length] , False)
150
+ decoder_padding_mask_cross_attention = torch.full([num_sentences, max_sequence_length, max_sequence_length] , False)
151
+
152
+ for idx in range(num_sentences):
153
+ eng_sentence_length, mr_sentence_length = len(eng_batch[idx]), len(mr_batch[idx])
154
+ eng_chars_to_padding_mask = np.arange(eng_sentence_length + 1, max_sequence_length)
155
+ mr_chars_to_padding_mask = np.arange(mr_sentence_length + 1, max_sequence_length)
156
+ encoder_padding_mask[idx, :, eng_chars_to_padding_mask] = True
157
+ encoder_padding_mask[idx, eng_chars_to_padding_mask, :] = True
158
+ decoder_padding_mask_self_attention[idx, :, mr_chars_to_padding_mask] = True
159
+ decoder_padding_mask_self_attention[idx, mr_chars_to_padding_mask, :] = True
160
+ decoder_padding_mask_cross_attention[idx, :, eng_chars_to_padding_mask] = True
161
+ decoder_padding_mask_cross_attention[idx, mr_chars_to_padding_mask, :] = True
162
+
163
+ encoder_self_attention_mask = torch.where(encoder_padding_mask, NEG_INFTY, 0)
164
+ decoder_self_attention_mask = torch.where(look_ahead_mask + decoder_padding_mask_self_attention, NEG_INFTY, 0)
165
+ decoder_cross_attention_mask = torch.where(decoder_padding_mask_cross_attention, NEG_INFTY, 0)
166
+ return encoder_self_attention_mask, decoder_self_attention_mask, decoder_cross_attention_mask
167
+ transformer.train()
168
+ transformer.to(device)
169
+ num_epochs = 100
170
+ epoch_losses = []
171
+
172
+ for epoch in range(num_epochs):
173
+ print(f"Epoch {epoch}")
174
+ total_loss = 0
175
+ count_batches = 0
176
+ iterator = iter(train_loader)
177
+ for batch_num, batch in enumerate(iterator):
178
+ transformer.train()
179
+ eng_batch, mr_batch = batch
180
+ encoder_self_attention_mask, decoder_self_attention_mask, decoder_cross_attention_mask = create_masks(eng_batch, mr_batch)
181
+ optim.zero_grad()
182
+ mr_predictions = transformer(eng_batch,
183
+ mr_batch,
184
+ encoder_self_attention_mask.to(device),
185
+ decoder_self_attention_mask.to(device),
186
+ decoder_cross_attention_mask.to(device),
187
+ enc_start_token=False,
188
+ enc_end_token=False,
189
+ dec_start_token=True,
190
+ dec_end_token=True)
191
+ labels = transformer.decoder.sentence_embedding.batch_tokenize(mr_batch, start_token=False, end_token=True)
192
+ loss = criterian(
193
+ mr_predictions.view(-1, mr_vocab_size).to(device),
194
+ labels.view(-1).to(device)
195
+ ).to(device)
196
+ valid_indicies = torch.where(labels.view(-1) == marathi_to_index[PADDING_TOKEN], False, True)
197
+ loss = loss.sum() / valid_indicies.sum()
198
+ loss.backward()
199
+ optim.step()
200
+ total_loss += loss.item()
201
+ count_batches += 1
202
+ #train_losses.append(loss.item())
203
+ if batch_num % 100 == 0:
204
+ print(f"Iteration {batch_num} : {loss.item()}")
205
+ print(f"English: {eng_batch[0]}")
206
+ print(f"marathi Translation: {mr_batch[0]}")
207
+ mr_sentence_predicted = torch.argmax(mr_predictions[0], axis=1)
208
+ predicted_sentence = ""
209
+ for idx in mr_sentence_predicted:
210
+ if idx == marathi_to_index[END_TOKEN]:
211
+ break
212
+ predicted_sentence += index_to_marathi[idx.item()]
213
+ print(f"marathi Prediction: {predicted_sentence}")
214
+ average_loss = total_loss / count_batches
215
+ epoch_losses.append(average_loss)
216
+ print(f"Average Loss for Epoch {epoch}: {average_loss}")
217
+
218
+ transformer.eval()
219
+ mr_sentence = ("",)
220
+ eng_sentence = ("should we go to the mall?",)
221
+ for word_counter in range(max_sequence_length):
222
+ encoder_self_attention_mask, decoder_self_attention_mask, decoder_cross_attention_mask= create_masks(eng_sentence, mr_sentence)
223
+ predictions = transformer(eng_sentence,
224
+ mr_sentence,
225
+ encoder_self_attention_mask.to(device),
226
+ decoder_self_attention_mask.to(device),
227
+ decoder_cross_attention_mask.to(device),
228
+ enc_start_token=False,
229
+ enc_end_token=False,
230
+ dec_start_token=True,
231
+ dec_end_token=False)
232
+ next_token_prob_distribution = predictions[0][word_counter] # not actual probs
233
+ next_token_index = torch.argmax(next_token_prob_distribution).item()
234
+ next_token = index_to_marathi[next_token_index]
235
+ mr_sentence = (mr_sentence[0] + next_token, )
236
+ if next_token == END_TOKEN:
237
+ break
238
+
239
+ print(f"Evaluation translation (should we go to the mall?) : {mr_sentence}")
240
+ print("-------------------------------------------")
241
+
eng_marathi/train.en ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2282d06b06b233b698aff839399116c845396734412b57d6be5e94c5aa7b590c
3
+ size 242539039
eng_marathi/train.mr ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2492cbc8a21b71c09e3fc4af8f16051d8ea5a27ef91c3566906db9c3a50b5552
3
+ size 636142032
eng_marathi/transformer.py ADDED
@@ -0,0 +1,304 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import math
4
+ from torch import nn
5
+ import torch.nn.functional as F
6
+
7
+ def get_device():
8
+ return torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
9
+
10
+ def scaled_dot_product(q, k, v, mask=None):
11
+ d_k = q.size()[-1]
12
+ scaled = torch.matmul(q, k.transpose(-1, -2)) / math.sqrt(d_k)
13
+ if mask is not None:
14
+ scaled = scaled.permute(1, 0, 2, 3) + mask
15
+ scaled = scaled.permute(1, 0, 2, 3)
16
+ attention = F.softmax(scaled, dim=-1)
17
+ values = torch.matmul(attention, v)
18
+ return values, attention
19
+
20
+ class PositionalEncoding(nn.Module):
21
+ def __init__(self, d_model, max_sequence_length):
22
+ super().__init__()
23
+ self.max_sequence_length = max_sequence_length
24
+ self.d_model = d_model
25
+
26
+ def forward(self):
27
+ even_i = torch.arange(0, self.d_model, 2).float()
28
+ denominator = torch.pow(10000, even_i/self.d_model)
29
+ position = (torch.arange(self.max_sequence_length)
30
+ .reshape(self.max_sequence_length, 1))
31
+ even_PE = torch.sin(position / denominator)
32
+ odd_PE = torch.cos(position / denominator)
33
+ stacked = torch.stack([even_PE, odd_PE], dim=2)
34
+ PE = torch.flatten(stacked, start_dim=1, end_dim=2)
35
+ return PE
36
+
37
+ class SentenceEmbedding(nn.Module):
38
+ "For a given sentence, create an embedding"
39
+ def __init__(self, max_sequence_length, d_model, language_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN):
40
+ super().__init__()
41
+ self.vocab_size = len(language_to_index)
42
+ self.max_sequence_length = max_sequence_length
43
+ self.embedding = nn.Embedding(self.vocab_size, d_model)
44
+ self.language_to_index = language_to_index
45
+ self.position_encoder = PositionalEncoding(d_model, max_sequence_length)
46
+ self.dropout = nn.Dropout(p=0.1)
47
+ self.START_TOKEN = START_TOKEN
48
+ self.END_TOKEN = END_TOKEN
49
+ self.PADDING_TOKEN = PADDING_TOKEN
50
+
51
+ def batch_tokenize(self, batch, start_token, end_token):
52
+
53
+ def tokenize(sentence, start_token, end_token):
54
+ sentence_word_indicies = [self.language_to_index[token] for token in list(sentence)]
55
+ if start_token:
56
+ sentence_word_indicies.insert(0, self.language_to_index[self.START_TOKEN])
57
+ if end_token:
58
+ sentence_word_indicies.append(self.language_to_index[self.END_TOKEN])
59
+ for _ in range(len(sentence_word_indicies), self.max_sequence_length):
60
+ sentence_word_indicies.append(self.language_to_index[self.PADDING_TOKEN])
61
+ return torch.tensor(sentence_word_indicies)
62
+
63
+ tokenized = []
64
+ for sentence_num in range(len(batch)):
65
+ tokenized.append( tokenize(batch[sentence_num], start_token, end_token) )
66
+ tokenized = torch.stack(tokenized)
67
+ return tokenized.to(get_device())
68
+
69
+ def forward(self, x, start_token, end_token): # sentence
70
+ x = self.batch_tokenize(x, start_token, end_token)
71
+ x = self.embedding(x)
72
+ pos = self.position_encoder().to(get_device())
73
+ x = self.dropout(x + pos)
74
+ return x
75
+
76
+
77
+ class MultiHeadAttention(nn.Module):
78
+ def __init__(self, d_model, num_heads):
79
+ super().__init__()
80
+ self.d_model = d_model
81
+ self.num_heads = num_heads
82
+ self.head_dim = d_model // num_heads
83
+ self.qkv_layer = nn.Linear(d_model , 3 * d_model)
84
+ self.linear_layer = nn.Linear(d_model, d_model)
85
+
86
+ def forward(self, x, mask):
87
+ batch_size, sequence_length, d_model = x.size()
88
+ qkv = self.qkv_layer(x)
89
+ qkv = qkv.reshape(batch_size, sequence_length, self.num_heads, 3 * self.head_dim)
90
+ qkv = qkv.permute(0, 2, 1, 3)
91
+ q, k, v = qkv.chunk(3, dim=-1)
92
+ values, attention = scaled_dot_product(q, k, v, mask)
93
+ values = values.permute(0, 2, 1, 3).reshape(batch_size, sequence_length, self.num_heads * self.head_dim)
94
+ out = self.linear_layer(values)
95
+ return out
96
+
97
+
98
+ class LayerNormalization(nn.Module):
99
+ def __init__(self, parameters_shape, eps=1e-5):
100
+ super().__init__()
101
+ self.parameters_shape=parameters_shape
102
+ self.eps=eps
103
+ self.gamma = nn.Parameter(torch.ones(parameters_shape))
104
+ self.beta = nn.Parameter(torch.zeros(parameters_shape))
105
+
106
+ def forward(self, inputs):
107
+ dims = [-(i + 1) for i in range(len(self.parameters_shape))]
108
+ mean = inputs.mean(dim=dims, keepdim=True)
109
+ var = ((inputs - mean) ** 2).mean(dim=dims, keepdim=True)
110
+ std = (var + self.eps).sqrt()
111
+ y = (inputs - mean) / std
112
+ out = self.gamma * y + self.beta
113
+ return out
114
+
115
+
116
+ class PositionwiseFeedForward(nn.Module):
117
+ def __init__(self, d_model, hidden, drop_prob=0.1):
118
+ super(PositionwiseFeedForward, self).__init__()
119
+ self.linear1 = nn.Linear(d_model, hidden)
120
+ self.linear2 = nn.Linear(hidden, d_model)
121
+ self.relu = nn.ReLU()
122
+ self.dropout = nn.Dropout(p=drop_prob)
123
+
124
+ def forward(self, x):
125
+ x = self.linear1(x)
126
+ x = self.relu(x)
127
+ x = self.dropout(x)
128
+ x = self.linear2(x)
129
+ return x
130
+
131
+
132
+ class EncoderLayer(nn.Module):
133
+ def __init__(self, d_model, ffn_hidden, num_heads, drop_prob):
134
+ super(EncoderLayer, self).__init__()
135
+ self.attention = MultiHeadAttention(d_model=d_model, num_heads=num_heads)
136
+ self.norm1 = LayerNormalization(parameters_shape=[d_model])
137
+ self.dropout1 = nn.Dropout(p=drop_prob)
138
+ self.ffn = PositionwiseFeedForward(d_model=d_model, hidden=ffn_hidden, drop_prob=drop_prob)
139
+ self.norm2 = LayerNormalization(parameters_shape=[d_model])
140
+ self.dropout2 = nn.Dropout(p=drop_prob)
141
+
142
+ def forward(self, x, self_attention_mask):
143
+ residual_x = x.clone()
144
+ x = self.attention(x, mask=self_attention_mask)
145
+ x = self.dropout1(x)
146
+ x = self.norm1(x + residual_x)
147
+ residual_x = x.clone()
148
+ x = self.ffn(x)
149
+ x = self.dropout2(x)
150
+ x = self.norm2(x + residual_x)
151
+ return x
152
+
153
+ class SequentialEncoder(nn.Sequential):
154
+ def forward(self, *inputs):
155
+ x, self_attention_mask = inputs
156
+ for module in self._modules.values():
157
+ x = module(x, self_attention_mask)
158
+ return x
159
+
160
+ class Encoder(nn.Module):
161
+ def __init__(self,
162
+ d_model,
163
+ ffn_hidden,
164
+ num_heads,
165
+ drop_prob,
166
+ num_layers,
167
+ max_sequence_length,
168
+ language_to_index,
169
+ START_TOKEN,
170
+ END_TOKEN,
171
+ PADDING_TOKEN):
172
+ super().__init__()
173
+ self.sentence_embedding = SentenceEmbedding(max_sequence_length, d_model, language_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
174
+ self.layers = SequentialEncoder(*[EncoderLayer(d_model, ffn_hidden, num_heads, drop_prob)
175
+ for _ in range(num_layers)])
176
+
177
+ def forward(self, x, self_attention_mask, start_token, end_token):
178
+ x = self.sentence_embedding(x, start_token, end_token)
179
+ x = self.layers(x, self_attention_mask)
180
+ return x
181
+
182
+
183
+ class MultiHeadCrossAttention(nn.Module):
184
+ def __init__(self, d_model, num_heads):
185
+ super().__init__()
186
+ self.d_model = d_model
187
+ self.num_heads = num_heads
188
+ self.head_dim = d_model // num_heads
189
+ self.kv_layer = nn.Linear(d_model , 2 * d_model)
190
+ self.q_layer = nn.Linear(d_model , d_model)
191
+ self.linear_layer = nn.Linear(d_model, d_model)
192
+
193
+ def forward(self, x, y, mask):
194
+ batch_size, sequence_length, d_model = x.size() # in practice, this is the same for both languages...so we can technically combine with normal attention
195
+ kv = self.kv_layer(x)
196
+ q = self.q_layer(y)
197
+ kv = kv.reshape(batch_size, sequence_length, self.num_heads, 2 * self.head_dim)
198
+ q = q.reshape(batch_size, sequence_length, self.num_heads, self.head_dim)
199
+ kv = kv.permute(0, 2, 1, 3)
200
+ q = q.permute(0, 2, 1, 3)
201
+ k, v = kv.chunk(2, dim=-1)
202
+ values, attention = scaled_dot_product(q, k, v, mask) # We don't need the mask for cross attention, removing in outer function!
203
+ values = values.permute(0, 2, 1, 3).reshape(batch_size, sequence_length, d_model)
204
+ out = self.linear_layer(values)
205
+ return out
206
+
207
+
208
+ class DecoderLayer(nn.Module):
209
+ def __init__(self, d_model, ffn_hidden, num_heads, drop_prob):
210
+ super(DecoderLayer, self).__init__()
211
+ self.self_attention = MultiHeadAttention(d_model=d_model, num_heads=num_heads)
212
+ self.layer_norm1 = LayerNormalization(parameters_shape=[d_model])
213
+ self.dropout1 = nn.Dropout(p=drop_prob)
214
+
215
+ self.encoder_decoder_attention = MultiHeadCrossAttention(d_model=d_model, num_heads=num_heads)
216
+ self.layer_norm2 = LayerNormalization(parameters_shape=[d_model])
217
+ self.dropout2 = nn.Dropout(p=drop_prob)
218
+
219
+ self.ffn = PositionwiseFeedForward(d_model=d_model, hidden=ffn_hidden, drop_prob=drop_prob)
220
+ self.layer_norm3 = LayerNormalization(parameters_shape=[d_model])
221
+ self.dropout3 = nn.Dropout(p=drop_prob)
222
+
223
+ def forward(self, x, y, self_attention_mask, cross_attention_mask):
224
+ _y = y.clone()
225
+ y = self.self_attention(y, mask=self_attention_mask)
226
+ y = self.dropout1(y)
227
+ y = self.layer_norm1(y + _y)
228
+
229
+ _y = y.clone()
230
+ y = self.encoder_decoder_attention(x, y, mask=cross_attention_mask)
231
+ y = self.dropout2(y)
232
+ y = self.layer_norm2(y + _y)
233
+
234
+ _y = y.clone()
235
+ y = self.ffn(y)
236
+ y = self.dropout3(y)
237
+ y = self.layer_norm3(y + _y)
238
+ return y
239
+
240
+
241
+ class SequentialDecoder(nn.Sequential):
242
+ def forward(self, *inputs):
243
+ x, y, self_attention_mask, cross_attention_mask = inputs
244
+ for module in self._modules.values():
245
+ y = module(x, y, self_attention_mask, cross_attention_mask)
246
+ return y
247
+
248
+ class Decoder(nn.Module):
249
+ def __init__(self,
250
+ d_model,
251
+ ffn_hidden,
252
+ num_heads,
253
+ drop_prob,
254
+ num_layers,
255
+ max_sequence_length,
256
+ language_to_index,
257
+ START_TOKEN,
258
+ END_TOKEN,
259
+ PADDING_TOKEN):
260
+ super().__init__()
261
+ self.sentence_embedding = SentenceEmbedding(max_sequence_length, d_model, language_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
262
+ self.layers = SequentialDecoder(*[DecoderLayer(d_model, ffn_hidden, num_heads, drop_prob) for _ in range(num_layers)])
263
+
264
+ def forward(self, x, y, self_attention_mask, cross_attention_mask, start_token, end_token):
265
+ y = self.sentence_embedding(y, start_token, end_token)
266
+ y = self.layers(x, y, self_attention_mask, cross_attention_mask)
267
+ return y
268
+
269
+
270
+ class Transformer(nn.Module):
271
+ def __init__(self,
272
+ d_model,
273
+ ffn_hidden,
274
+ num_heads,
275
+ drop_prob,
276
+ num_layers,
277
+ max_sequence_length,
278
+ kn_vocab_size,
279
+ english_to_index,
280
+ kannada_to_index,
281
+ START_TOKEN,
282
+ END_TOKEN,
283
+ PADDING_TOKEN
284
+ ):
285
+ super().__init__()
286
+ self.encoder = Encoder(d_model, ffn_hidden, num_heads, drop_prob, num_layers, max_sequence_length, english_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
287
+ self.decoder = Decoder(d_model, ffn_hidden, num_heads, drop_prob, num_layers, max_sequence_length, kannada_to_index, START_TOKEN, END_TOKEN, PADDING_TOKEN)
288
+ self.linear = nn.Linear(d_model, kn_vocab_size)
289
+ self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
290
+
291
+ def forward(self,
292
+ x,
293
+ y,
294
+ encoder_self_attention_mask=None,
295
+ decoder_self_attention_mask=None,
296
+ decoder_cross_attention_mask=None,
297
+ enc_start_token=False,
298
+ enc_end_token=False,
299
+ dec_start_token=False, # We should make this true
300
+ dec_end_token=False): # x, y are batch of sentences
301
+ x = self.encoder(x, encoder_self_attention_mask, start_token=enc_start_token, end_token=enc_end_token)
302
+ out = self.decoder(x, y, decoder_self_attention_mask, decoder_cross_attention_mask, start_token=dec_start_token, end_token=dec_end_token)
303
+ out = self.linear(out)
304
+ return out