File size: 861 Bytes
8682de4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#!/usr/bin/env python
from gensim.models import Word2Vec
import gensim 

def tokenize_sentence(sentence):
    return sentence.split()

with open('korpus_malti_tok.txt', 'r', encoding='utf-8') as file:
    sentences = file.read().splitlines()

data = [tokenize_sentence(sentence) for sentence in sentences]

model = Word2Vec(data, 
                     vector_size=300, 
                     window=10, 
                     min_count=20, 
                     workers=16, 
                     sample=1e-5, 
                     alpha=0.03, 
                     min_alpha=0.0007, 
                     negative=20)

model.build_vocab(data, progress_per=1000)
print(model.corpus_count)

model.train(data, total_examples=model.corpus_count, epochs=15)

model.wv.save_word2vec_format('mt_word2vec_2.txt', binary=False)

print('Émbeddings successfully trained!')