Spaces:
Running
Running
Commit
·
5792077
1
Parent(s):
4d5674d
removed sarcasm
Browse fileswill later be added into it's own space
- app.py +0 -1
- backend/home.py +0 -1
- backend/services.py +2 -2
app.py
CHANGED
@@ -8,7 +8,6 @@ import backend.home
|
|
8 |
import backend.processor
|
9 |
import backend.sa
|
10 |
import backend.qa
|
11 |
-
import backend.sarcasm
|
12 |
|
13 |
st.set_page_config(
|
14 |
page_title="TEST", page_icon="📖", initial_sidebar_state="expanded", layout="wide"
|
|
|
8 |
import backend.processor
|
9 |
import backend.sa
|
10 |
import backend.qa
|
|
|
11 |
|
12 |
st.set_page_config(
|
13 |
page_title="TEST", page_icon="📖", initial_sidebar_state="expanded", layout="wide"
|
backend/home.py
CHANGED
@@ -16,7 +16,6 @@ def write():
|
|
16 |
- Arabic Text Preprocessor: Test how text imput is treated by our preprocessor
|
17 |
- Arabic Language Generation: Generate Arabic text using our AraGPT2 language models
|
18 |
- Arabic Sentiment Analysis: Test the senitment analysis model that won the [Arabic Senitment Analysis competition @ KAUST](https://www.kaggle.com/c/arabic-sentiment-analysis-2021-kaust)
|
19 |
-
- Arabic Sarcasm Detection: Test MARBERT trained for sarcasm detection
|
20 |
- Arabic Question Answering: Test our AraELECTRA QA capabilities
|
21 |
"""
|
22 |
)
|
|
|
16 |
- Arabic Text Preprocessor: Test how text imput is treated by our preprocessor
|
17 |
- Arabic Language Generation: Generate Arabic text using our AraGPT2 language models
|
18 |
- Arabic Sentiment Analysis: Test the senitment analysis model that won the [Arabic Senitment Analysis competition @ KAUST](https://www.kaggle.com/c/arabic-sentiment-analysis-2021-kaust)
|
|
|
19 |
- Arabic Question Answering: Test our AraELECTRA QA capabilities
|
20 |
"""
|
21 |
)
|
backend/services.py
CHANGED
@@ -216,7 +216,7 @@ class SentimentAnalyzer:
|
|
216 |
# "sa_no_aoa_in_neutral": NewArabicPreprocessorBalanced(model_name='UBC-NLP/MARBERT'),
|
217 |
# "sa_cnnbert": CNNMarbertArabicPreprocessor(model_name='UBC-NLP/MARBERT'),
|
218 |
# "sa_sarcasm": SarcasmArabicPreprocessor(model_name='UBC-NLP/MARBERT'),
|
219 |
-
"sar_trial10": SarcasmArabicPreprocessor(model_name='UBC-NLP/MARBERT'),
|
220 |
# "sa_no_AOA": NewArabicPreprocessorBalanced(model_name='UBC-NLP/MARBERT'),
|
221 |
}
|
222 |
|
@@ -224,7 +224,7 @@ class SentimentAnalyzer:
|
|
224 |
"sa_trial5_1": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sa_trial5_1",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_trial5_1")],
|
225 |
# "sa_no_aoa_in_neutral": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sa_no_aoa_in_neutral",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_no_aoa_in_neutral")],
|
226 |
# "sa_cnnbert": [CNNTextClassificationPipeline("{}/train_{}/best_model".format("sa_cnnbert",i), device=-1, return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_cnnbert")],
|
227 |
-
"sa_sarcasm": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sa_sarcasm",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_sarcasm")],
|
228 |
# "sar_trial10": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sar_trial10",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sar_trial10")],
|
229 |
# "sa_no_AOA": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sa_no_AOA",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_no_AOA")],
|
230 |
}
|
|
|
216 |
# "sa_no_aoa_in_neutral": NewArabicPreprocessorBalanced(model_name='UBC-NLP/MARBERT'),
|
217 |
# "sa_cnnbert": CNNMarbertArabicPreprocessor(model_name='UBC-NLP/MARBERT'),
|
218 |
# "sa_sarcasm": SarcasmArabicPreprocessor(model_name='UBC-NLP/MARBERT'),
|
219 |
+
# "sar_trial10": SarcasmArabicPreprocessor(model_name='UBC-NLP/MARBERT'),
|
220 |
# "sa_no_AOA": NewArabicPreprocessorBalanced(model_name='UBC-NLP/MARBERT'),
|
221 |
}
|
222 |
|
|
|
224 |
"sa_trial5_1": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sa_trial5_1",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_trial5_1")],
|
225 |
# "sa_no_aoa_in_neutral": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sa_no_aoa_in_neutral",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_no_aoa_in_neutral")],
|
226 |
# "sa_cnnbert": [CNNTextClassificationPipeline("{}/train_{}/best_model".format("sa_cnnbert",i), device=-1, return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_cnnbert")],
|
227 |
+
# "sa_sarcasm": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sa_sarcasm",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_sarcasm")],
|
228 |
# "sar_trial10": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sar_trial10",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sar_trial10")],
|
229 |
# "sa_no_AOA": [pipeline("sentiment-analysis", model="{}/train_{}/best_model".format("sa_no_AOA",i), device=-1,return_all_scores =True) for i in tqdm(range(0,5), desc=f"Loading pipeline for model: sa_no_AOA")],
|
230 |
}
|