maltese_embeddings / mt_word2vec.py
DGurgurov's picture
Upload 5 files
8682de4
raw
history blame contribute delete
861 Bytes
#!/usr/bin/env python
from gensim.models import Word2Vec
import gensim
def tokenize_sentence(sentence):
return sentence.split()
with open('korpus_malti_tok.txt', 'r', encoding='utf-8') as file:
sentences = file.read().splitlines()
data = [tokenize_sentence(sentence) for sentence in sentences]
model = Word2Vec(data,
vector_size=300,
window=10,
min_count=20,
workers=16,
sample=1e-5,
alpha=0.03,
min_alpha=0.0007,
negative=20)
model.build_vocab(data, progress_per=1000)
print(model.corpus_count)
model.train(data, total_examples=model.corpus_count, epochs=15)
model.wv.save_word2vec_format('mt_word2vec_2.txt', binary=False)
print('Émbeddings successfully trained!')