Spaces:
Running
Running
dingusagar
commited on
Commit
•
742920d
1
Parent(s):
edb9001
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,7 @@ from ollama import ChatResponse
|
|
7 |
# Default model
|
8 |
OLLAMA_MODEL = "llama3.2:3b"
|
9 |
# OLLAMA_MODEL = "llama3.2:1b"
|
10 |
-
OLLAMA_MODEL = "llama3.2:3b-instruct-q2_K"
|
11 |
|
12 |
# Load BERT MODEL
|
13 |
from transformers import pipeline, DistilBertTokenizerFast
|
@@ -29,10 +29,15 @@ bert_label_map = {
|
|
29 |
'LABEL_1': 'NTA',
|
30 |
}
|
31 |
|
|
|
|
|
|
|
|
|
|
|
32 |
def ask_bert(prompt):
|
33 |
print(f"Getting response from Fine-tuned BERT")
|
34 |
result = classifier([prompt])[0]
|
35 |
-
label =
|
36 |
confidence = f"{result['score']*100:.2f}"
|
37 |
return label, confidence
|
38 |
|
@@ -99,9 +104,6 @@ Use second person terms like you in the explanation.
|
|
99 |
response += chunk['message']['content']
|
100 |
yield response
|
101 |
|
102 |
-
def gradio_bert_interface(prompt):
|
103 |
-
response, confidence = ask_bert(prompt)
|
104 |
-
return f"{response} with confidence {confidence}%"
|
105 |
|
106 |
# Separate function for Ollama response
|
107 |
def gradio_ollama_interface(prompt, bert_class=""):
|
@@ -111,14 +113,15 @@ def gradio_interface(prompt, selected_model):
|
|
111 |
for chunk in ask_ollama(prompt):
|
112 |
yield chunk
|
113 |
elif selected_model == MODEL_CHOICE_BERT:
|
114 |
-
|
115 |
-
|
|
|
116 |
return response
|
117 |
elif selected_model == MODEL_CHOICE_BERT_LLAMA:
|
118 |
label, confidence = ask_bert(prompt)
|
119 |
-
initial_response = f"BERT model
|
120 |
yield initial_response
|
121 |
-
for chunk in ask_ollama(prompt, expected_class=label):
|
122 |
yield initial_response + "\n" + chunk
|
123 |
else:
|
124 |
return "Something went wrong. Select the correct model configuration from settings. "
|
|
|
7 |
# Default model
|
8 |
OLLAMA_MODEL = "llama3.2:3b"
|
9 |
# OLLAMA_MODEL = "llama3.2:1b"
|
10 |
+
# OLLAMA_MODEL = "llama3.2:3b-instruct-q2_K"
|
11 |
|
12 |
# Load BERT MODEL
|
13 |
from transformers import pipeline, DistilBertTokenizerFast
|
|
|
29 |
'LABEL_1': 'NTA',
|
30 |
}
|
31 |
|
32 |
+
bert_label_map_formatted = {
|
33 |
+
'LABEL_0': 'You are the A**hole (YTA)',
|
34 |
+
'LABEL_1': 'Not the A**hole (NTA)',
|
35 |
+
}
|
36 |
+
|
37 |
def ask_bert(prompt):
|
38 |
print(f"Getting response from Fine-tuned BERT")
|
39 |
result = classifier([prompt])[0]
|
40 |
+
label = result['label']
|
41 |
confidence = f"{result['score']*100:.2f}"
|
42 |
return label, confidence
|
43 |
|
|
|
104 |
response += chunk['message']['content']
|
105 |
yield response
|
106 |
|
|
|
|
|
|
|
107 |
|
108 |
# Separate function for Ollama response
|
109 |
def gradio_ollama_interface(prompt, bert_class=""):
|
|
|
113 |
for chunk in ask_ollama(prompt):
|
114 |
yield chunk
|
115 |
elif selected_model == MODEL_CHOICE_BERT:
|
116 |
+
label, confidence = ask_bert(prompt)
|
117 |
+
label = bert_label_map_formatted[label]
|
118 |
+
response = f"{label} with confidence {confidence}"
|
119 |
return response
|
120 |
elif selected_model == MODEL_CHOICE_BERT_LLAMA:
|
121 |
label, confidence = ask_bert(prompt)
|
122 |
+
initial_response = f"Response from BERT model: {bert_label_map_formatted[label]} with confidence {confidence}%\n\nGenerating explanation using Llama model...\n"
|
123 |
yield initial_response
|
124 |
+
for chunk in ask_ollama(prompt, expected_class=bert_label_map[label]):
|
125 |
yield initial_response + "\n" + chunk
|
126 |
else:
|
127 |
return "Something went wrong. Select the correct model configuration from settings. "
|