Snizhanna commited on
Commit
4031604
·
verified ·
1 Parent(s): 3c9ab88

Upload 6 files

Browse files
lr_classifier_default.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9afdd0d293944860b9b83ce7667c220312e48269d2c123882f734545bb0a565d
3
+ size 1607150
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ pandas
3
+ scikit-learn==1.2.2
4
+ stanza
5
+ nltk
6
+ transformers
7
+ torch
rf_classifier_param.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10f6b3cfbd97c6867ac246e12c7ced59074b4a73bef553303839f3ed6019ee39
3
+ size 9135666
text_preprocessing.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from nltk.tokenize import TweetTokenizer
2
+ import stanza
3
+ import re
4
+
5
+ tk = TweetTokenizer()
6
+ uk_nlp = stanza.Pipeline(lang='uk', verbose=False)
7
+
8
+ def substitute_user_mentions_and_links(text):
9
+ # Regular expression to match user mentions (e.g., @username)
10
+ user_mention_pattern = r'@\w+'
11
+
12
+ # Regular expression to match links (e.g., http://example.com)
13
+ link_pattern = r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
14
+
15
+ # Substitute user mentions
16
+ text = re.sub(user_mention_pattern, '', text)
17
+
18
+ # Substitute links
19
+ text = re.sub(link_pattern, '', text)
20
+
21
+ # Substitute latin chars
22
+ text = re.sub(r'[a-zA-Z]+', '', text)
23
+
24
+ return text.lower()
25
+
26
+ def remove_some_punc_numbers(text):
27
+ chars_to_remove = r'[\#\$\%\&\*\+\,\-\/\:\;\<\=\>\@\[\\\]\^\_\{\|\}\~\d\.\–]'
28
+
29
+ result = re.sub(chars_to_remove, '', ' '.join(text))
30
+
31
+ return result.lower()
32
+
33
+ pattern = r'\b(\w+)\s*\'\s*(\w+)\b'
34
+
35
+ # Define a function to join words separated by single quotes
36
+ def join_words(match):
37
+ return match.group(1) + "'" + match.group(2)
38
+
39
+ def lemmatize(text):
40
+ lemmas_st = []
41
+ for sent in uk_nlp(text).sentences:
42
+ for word in sent.words:
43
+ lemmas_st.append(word.lemma)
44
+ return lemmas_st
45
+
46
+ def preprocess_text(input_text):
47
+
48
+ text_mod = substitute_user_mentions_and_links(input_text)
49
+ tokenized = tk.tokenize(text_mod)
50
+ spec_char_remv = remove_some_punc_numbers(tokenized)
51
+ apostrophe_fixed = re.sub(pattern, join_words, spec_char_remv)
52
+ spaces_fixed = re.sub(r'\s+', ' ', apostrophe_fixed)
53
+ lemmatized = lemmatize(spaces_fixed)
54
+
55
+ return text_mod, lemmatized
tfidf_vectorizer.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:311d11b3cebc097ade6884e27c9e9841e68edfad6294610c52816677ed4173df
3
+ size 9719891
utils_models.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification
3
+
4
+ def map_num_to_label(num):
5
+ return "сарказм" if num==1 else "не сарказм"
6
+
7
+ def load_roberta():
8
+ model_ckpt = "ukr-roberta-base-finetuned-sarc"
9
+ tokenizer = AutoTokenizer.from_pretrained(model_ckpt)
10
+ id2label = {1: "sarcastic",0: "not_sarcastic"}
11
+ label2id = {"sarcastic": 1, "not_sarcastic": 0}
12
+ hf_model = AutoModelForSequenceClassification.from_pretrained(model_ckpt, num_labels=2, label2id=label2id, id2label=id2label)
13
+ return hf_model, tokenizer
14
+
15
+ def predict_roberta(model, tokenizer, text):
16
+ tokenized_input = tokenizer(text, return_tensors="pt")
17
+ predictions = model(**tokenized_input)
18
+ prediction = predictions.logits.argmax().item()
19
+ return map_num_to_label(prediction)
20
+
21
+ def identity_tokenizer(text):
22
+ return text
23
+
24
+ def predict_lr_rf(model, vectorizer, text):
25
+ prediction = model.predict(vectorizer.transform([text]))
26
+ return map_num_to_label(prediction)
27
+