import transformers | |
import os | |
import torch | |
MAX_LEN = 150 #256 | |
TRAIN_BATCH_SIZE = 8 | |
VALID_BATCH_SIZE = 4 | |
EPOCHS = 5 | |
# Folder to contain all the datasets | |
DATASET_LOCATION = "" # | |
MODEL_PATH = "model.bin" | |
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') | |
# 7 EPOCH Version | |
BERT_PATH = "FFZG-cleopatra/bert-emoji-latvian-twitter" | |
# TODO check if lower casing is required | |
# BertTokenizer | |
TOKENIZER = transformers.BertTokenizer.from_pretrained( | |
BERT_PATH, | |
do_lower_case=True | |
) | |
#################################################################################################################################### | |