Spaces:
Sleeping
Sleeping
Rama-Alyoubi
commited on
Upload 5 files
Browse files- .gitattributes +3 -0
- app.py +432 -0
- background_image.png +3 -0
- chat_page.png +3 -0
- cleaned_fixed_text.txt +0 -0
- human_rate.png +3 -0
.gitattributes
CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
background_image.png filter=lfs diff=lfs merge=lfs -text
|
37 |
+
chat_page.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
human_rate.png filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import base64
|
3 |
+
import torch
|
4 |
+
from transformers import AutoTokenizer, AutoModel
|
5 |
+
import requests
|
6 |
+
from huggingface_hub import login
|
7 |
+
|
8 |
+
# Get Hugging Face token from environment variable
|
9 |
+
hf_token = os.getenv("HF_API_KEY")
|
10 |
+
if hf_token is None:
|
11 |
+
st.error("API Key not found. Please set the HF_API_KEY secret in your Space.")
|
12 |
+
|
13 |
+
# Set up Hugging Face login using the token from the environment
|
14 |
+
login(hf_token)
|
15 |
+
# Load tokenizer and model
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained("aubmindlab/bert-base-arabertv02")
|
17 |
+
model = AutoModel.from_pretrained("aubmindlab/bert-base-arabertv02")
|
18 |
+
|
19 |
+
# Helper functions
|
20 |
+
def get_base64(bin_file):
|
21 |
+
with open(bin_file, 'rb') as f:
|
22 |
+
data = f.read()
|
23 |
+
return base64.b64encode(data).decode()
|
24 |
+
|
25 |
+
|
26 |
+
def set_background(png_file):
|
27 |
+
try:
|
28 |
+
bin_str = get_base64(png_file)
|
29 |
+
page_bg_img = f'''
|
30 |
+
<style>
|
31 |
+
.stApp {{
|
32 |
+
background-image: url("data:image/png;base64,{bin_str}");
|
33 |
+
background-size: contain;
|
34 |
+
background-position: center;
|
35 |
+
background-attachment: fixed;
|
36 |
+
background-repeat: no-repeat;
|
37 |
+
filter: brightness(0.9) saturate(1.2);
|
38 |
+
}}
|
39 |
+
.stApp::before {{
|
40 |
+
content: "";
|
41 |
+
position: absolute;
|
42 |
+
top: 0;
|
43 |
+
left: 0;
|
44 |
+
right: 0;
|
45 |
+
bottom: 0;
|
46 |
+
background: url("data:image/png;base64,{bin_str}") center center / cover;
|
47 |
+
filter: brightness(0.6);
|
48 |
+
z-index: -1;
|
49 |
+
}}
|
50 |
+
</style>
|
51 |
+
'''
|
52 |
+
st.markdown(page_bg_img, unsafe_allow_html=True)
|
53 |
+
except FileNotFoundError:
|
54 |
+
st.error("Background image file not found.")
|
55 |
+
|
56 |
+
st.set_page_config(page_title="رحلات الزمان والمكان مع علّام", page_icon=":books:", layout="wide")
|
57 |
+
set_background('background_image.png')
|
58 |
+
|
59 |
+
class CustomRetriever:
|
60 |
+
def __init__(self, tokenizer, model, max_length=512):
|
61 |
+
self.tokenizer = tokenizer
|
62 |
+
self.model = model
|
63 |
+
self.max_length = max_length
|
64 |
+
self.embeddings = None
|
65 |
+
|
66 |
+
def tokenize_and_embed(self, text):
|
67 |
+
inputs = self.tokenizer(text, return_tensors="pt", truncation=True, max_length=self.max_length, padding=True)
|
68 |
+
with torch.no_grad():
|
69 |
+
outputs = self.model(**inputs)
|
70 |
+
return outputs.last_hidden_state.mean(dim=1)
|
71 |
+
|
72 |
+
def set_corpus_embeddings(self, corpus):
|
73 |
+
"""Precomputes and stores embeddings for a corpus."""
|
74 |
+
self.embeddings = []
|
75 |
+
for passage in corpus:
|
76 |
+
self.embeddings.append(self.tokenize_and_embed(passage))
|
77 |
+
self.embeddings = torch.vstack(self.embeddings)
|
78 |
+
|
79 |
+
def retrieve(self, question):
|
80 |
+
"""Generates query embeddings and retrieves top matching passages."""
|
81 |
+
query_embedding = self.tokenize_and_embed(question)
|
82 |
+
similarities = torch.nn.functional.cosine_similarity(query_embedding, self.embeddings)
|
83 |
+
|
84 |
+
# Adjust top_k to handle fewer passages than k
|
85 |
+
top_k_count = min(3, len(self.embeddings)) # Use min to avoid index out of range
|
86 |
+
top_k = similarities.topk(top_k_count).indices
|
87 |
+
return top_k
|
88 |
+
|
89 |
+
# IAM Token generation for IBM Cloud API
|
90 |
+
def get_iam_token(api_key):
|
91 |
+
auth_url = "https://iam.cloud.ibm.com/identity/token"
|
92 |
+
response = requests.post(auth_url, data={
|
93 |
+
'apikey': api_key,
|
94 |
+
'grant_type': 'urn:ibm:params:oauth:grant-type:apikey'
|
95 |
+
}, headers={
|
96 |
+
'Content-Type': 'application/x-www-form-urlencoded',
|
97 |
+
'Accept': 'application/json'
|
98 |
+
})
|
99 |
+
if response.status_code == 200:
|
100 |
+
return response.json()["access_token"]
|
101 |
+
else:
|
102 |
+
raise Exception(f"Error: {response.status_code} - {response.text}")
|
103 |
+
|
104 |
+
# GenerateAnswer class
|
105 |
+
def generate_answer(context, question, iam_token):
|
106 |
+
# Examples to demonstrate the structure the model should follow
|
107 |
+
examples = """
|
108 |
+
|
109 |
+
Q: ماهي اقسام الكلام؟
|
110 |
+
|
111 |
+
A: كان علاء الدين شاباً يحب المغامرات والقصص، وفي أحد الأيام، أمسك بمصباحه السحري وتمنى أن يطير به إلى عالم الكلمات والحروف. فإذا به يرتفع في الهواء، حاملاً علاء الدين في رحلة عبر السماء.
|
112 |
+
وصل علاء الدين إلى قصرٍ بديع، كان مبنيّاً من كلمات متلألئة. وعندما دخل، وجد ثلاثة أصدقاء يعيشون فيه: الأول كان اسمه "الاسم"، والثاني "الفعل"، والثالث "الحرف".
|
113 |
+
قال "الاسم": "أنا المسؤول عن كل الأسماء التي تراها حولك، من إنسان إلى شجرة، إلى كتاب، كل شيء له اسم أعطيه إياه." أما "الفعل"، فقال: "وأنا أعطي الأشياء القدرة على الحركة والعمل، فأقول إن أحمد يقرأ، أو الطائرة تطير." وبينما كانا يتحدثان، قال "الحرف": "أنا أربط الكلمات ببعضها لتصبح جملة ذات معنى، فأن�� كالغراء الذي يجمع الحروف معاً."
|
114 |
+
جلس علاء الدين مع أصدقائه الجدد، وتحدثوا طويلاً عن الكلمات والجمل. فهم علاء الدين أن الكلمات تشبه قطع الليجو، وعندما نركبها معاً نحصل على صورة كاملة. وأن "الاسم" هو الأساس، و"الفعل" هو الحركة، و"الحرف" هو الرابط بينهما.
|
115 |
+
عاد علاء الدين إلى أرضه وهو يحمل في قلبه الكثير من المعرفة عن الكلمات. وتذكر دائماً أن الكلمات هي أقوى سلاح لدينا، وأن علينا أن نستخدمها بحكمة وبناء.
|
116 |
+
هل أعجبتك القصة يا صغيري؟ تذكر دائماً أن تقرأ الكثير من الكتب، لأن القراءة توسع مداركك وتزيد.
|
117 |
+
|
118 |
+
"""
|
119 |
+
|
120 |
+
# Characters list for storytelling context
|
121 |
+
characters_list = """
|
122 |
+
- "شهرزاد": الحكواتية الشهيرة التي تروي القصص بمهارة لتسحر الملك شهريار، يمكنها أن تروي للأطفال قصة جديدة لتوضيح المفهوم.
|
123 |
+
- "شهريار": الملك الذي يستمع للقصص، ويمكن أن يمثل الطفل الذي يريد معرفة المزيد ويطرح الأسئلة.
|
124 |
+
- "السندباد البحري": البحار الذي يسافر إلى أماكن بعيدة ويواجه تحديات مختلفة، يمكنه أخذ الأطفال في مغامرات لفهم المفاهيم الصعبة.
|
125 |
+
- "علاء الدين": الشاب الذي يملك المصباح السحري، يمكنه استخدام الجني لمساعدة الأطفال على فهم الأمور السحرية أو الغامضة.
|
126 |
+
- "مرجانة": الفتاة الذكية التي تساعد علي بابا، يمكنها أن تقدم حلولًا سريعة وأفكارًا مفيدة.
|
127 |
+
"""
|
128 |
+
|
129 |
+
# Instructions for how the response should be generated
|
130 |
+
instructions = f"""
|
131 |
+
أنت معلم للأطفال وتريد أن تجعل عملية التعلم مشوقة ومليئة بالخيال. أجب على السؤال باستخدام أسلوب سرد يشبه قصص ألف ليلة وليلة.
|
132 |
+
اختر من الشخصيات التالية لتساعدك في الشرح:
|
133 |
+
{characters_list}
|
134 |
+
احرص على:
|
135 |
+
- استخدام شخصية أو أكثر لجعل الشرح ممتعًا.
|
136 |
+
- تقديم مغامرة بسيطة مع وصف المكان والأحداث لجعل المشهد حيويًا وشيقًا.
|
137 |
+
- التأكد من أن الشرح مناسب للأطفال بين 6 و12 سنة باستخدام جمل قصيرة وكلمات بسيطة.
|
138 |
+
- إشراك الطفل في القصة عبر طرح أسئلة، مثل: "هل ترى الفرق الآن؟" أو "هل ترغب في استكشاف المزيد؟".
|
139 |
+
- إنهاء القصة بنبرة إيجابية ومشجعة.
|
140 |
+
|
141 |
+
هذا مثال يجب أن تقتدي به عند الإجابة:
|
142 |
+
{examples}
|
143 |
+
"""
|
144 |
+
|
145 |
+
# Construct the prompt with the retrieved context, question, and instructions
|
146 |
+
|
147 |
+
input_text = f"""
|
148 |
+
السؤال: {question}
|
149 |
+
\nالمعلومات المرجعية:\n{context}
|
150 |
+
\nالتعليمات:\n{instructions}
|
151 |
+
\nالإجابة:"""
|
152 |
+
|
153 |
+
# API endpoint and model settings
|
154 |
+
url = "https://eu-de.ml.cloud.ibm.com/ml/v1/text/generation?version=2023-05-29"
|
155 |
+
model_id = "sdaia/allam-1-13b-instruct"
|
156 |
+
project_id = "ed6b7fdf-5e8e-4bbd-8f93-356d126fc962"
|
157 |
+
headers = {
|
158 |
+
"Accept": "application/json",
|
159 |
+
"Content-Type": "application/json",
|
160 |
+
"Authorization": f"Bearer {iam_token}"
|
161 |
+
}
|
162 |
+
|
163 |
+
body = {
|
164 |
+
"input": input_text,
|
165 |
+
"parameters": {
|
166 |
+
"decoding_method": "sample",
|
167 |
+
"max_new_tokens": 900,
|
168 |
+
"temperature": 0.7,
|
169 |
+
"top_k": 50,
|
170 |
+
"top_p": 0.9,
|
171 |
+
"repetition_penalty": 1.15
|
172 |
+
},
|
173 |
+
"model_id": model_id,
|
174 |
+
"project_id": project_id
|
175 |
+
}
|
176 |
+
|
177 |
+
response = requests.post(url, headers=headers, json=body)
|
178 |
+
|
179 |
+
if response.status_code == 200:
|
180 |
+
data = response.json()
|
181 |
+
if "results" in data and len(data["results"]) > 0:
|
182 |
+
generated_text = data["results"][0].get("generated_text", "No generated text found")
|
183 |
+
return generated_text
|
184 |
+
else:
|
185 |
+
return "No generated text found."
|
186 |
+
else:
|
187 |
+
return f"Error: {response.status_code} - {response.text}"
|
188 |
+
|
189 |
+
class ArabicRAG:
|
190 |
+
def __init__(self, corpus, num_passages=3):
|
191 |
+
# Set corpus as an attribute for easy access
|
192 |
+
self.corpus = corpus
|
193 |
+
self.num_passages = num_passages
|
194 |
+
|
195 |
+
# Initialize the retriever with AraBERT
|
196 |
+
max_length = 512
|
197 |
+
self.retriever = CustomRetriever(tokenizer, model, max_length)
|
198 |
+
self.retriever.set_corpus_embeddings(self.corpus) # Precompute embeddings for the corpus
|
199 |
+
|
200 |
+
def forward(self, question, iam_token):
|
201 |
+
"""
|
202 |
+
Retrieve relevant context based on the user question and generate an answer.
|
203 |
+
"""
|
204 |
+
relevant_indices = self.retriever.retrieve(question)
|
205 |
+
context = "\n".join([self.corpus[i] for i in relevant_indices])
|
206 |
+
prediction = generate_answer(context=context, question=question, iam_token=iam_token)
|
207 |
+
return prediction
|
208 |
+
|
209 |
+
# API Key and IAM Token setup
|
210 |
+
api_key = 'l9cHEdqwQcXTGQ5toy6w02ogU8KR89g3w94ojrI8mgN1'
|
211 |
+
iam_token = get_iam_token(api_key)
|
212 |
+
|
213 |
+
# Split passages and initialize ArabicRAG
|
214 |
+
with open('cleaned_fixed_text.txt', 'r', encoding='utf-8') as f:
|
215 |
+
cleaned_fixed_text = f.read()
|
216 |
+
|
217 |
+
passages = cleaned_fixed_text.split(". ") # Splitting by double newline for demonstration
|
218 |
+
compiled_rag = ArabicRAG(corpus=passages, num_passages=3)
|
219 |
+
|
220 |
+
# Define the main page layout
|
221 |
+
def main_page():
|
222 |
+
# CSS Styling for the button
|
223 |
+
st.markdown('''
|
224 |
+
<style>
|
225 |
+
.start-button {
|
226 |
+
display: flex;
|
227 |
+
justify-content: center;
|
228 |
+
align-items: center;
|
229 |
+
position: fixed;
|
230 |
+
bottom: 170px;
|
231 |
+
left: 50%;
|
232 |
+
transform: translateX(-50%);
|
233 |
+
width: auto; max-width: 100%;
|
234 |
+
}
|
235 |
+
.start-button button {
|
236 |
+
background-color: rgba(150, 98, 179, 0.8); /* Add some transparency but not fully transparent */
|
237 |
+
color: white;
|
238 |
+
border: none;
|
239 |
+
padding: 15px 100px;
|
240 |
+
font-size: 30px;
|
241 |
+
border-radius: 30px;
|
242 |
+
box-shadow: inset 4px 4px 8px rgba(0, 0, 0, 0.3),
|
243 |
+
4px 4px 8px rgba(0, 0, 0, 0.2);
|
244 |
+
cursor: pointer;
|
245 |
+
}
|
246 |
+
.start-button button:hover {
|
247 |
+
background-color: #7A5FA9;
|
248 |
+
}
|
249 |
+
</style>
|
250 |
+
''', unsafe_allow_html=True)
|
251 |
+
|
252 |
+
# Add a button in the center of the screen with styling and make it functional
|
253 |
+
st.markdown('<div class="start-button">', unsafe_allow_html=True)
|
254 |
+
if st.button("ابدأ المغامرة", key="start_button_key"):
|
255 |
+
st.session_state['page'] = 'chatbot'
|
256 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
257 |
+
|
258 |
+
# Define the chatbot page layout
|
259 |
+
def chatbot_page():
|
260 |
+
# Title for the chatbot page
|
261 |
+
set_background('chat_page.png')
|
262 |
+
# CSS for fixed chat display area and input area
|
263 |
+
st.markdown('''
|
264 |
+
<style>
|
265 |
+
/* Chat display area */
|
266 |
+
.chat-display {
|
267 |
+
position: fixed;
|
268 |
+
top: 30px;
|
269 |
+
right: 20px;
|
270 |
+
width: 70%; /* Adjust width as desired */
|
271 |
+
height: 80vh;
|
272 |
+
padding: 15px;
|
273 |
+
background-color: #f9f9f9;
|
274 |
+
border: 1px solid #ddd;
|
275 |
+
border-radius: 10px;
|
276 |
+
overflow-y: auto;
|
277 |
+
box-shadow: 2px 2px 10px rgba(0, 0, 0, 0.1);
|
278 |
+
}
|
279 |
+
/* User and bot message styles */
|
280 |
+
.chat-bubble-user {
|
281 |
+
background-color: #d1e7dd;
|
282 |
+
padding: 10px;
|
283 |
+
margin: 5px;
|
284 |
+
border-radius: 10px;
|
285 |
+
text-align: left;
|
286 |
+
}
|
287 |
+
.chat-bubble-bot {
|
288 |
+
background-color: #f8d7da;
|
289 |
+
color: white;
|
290 |
+
padding: 10px;
|
291 |
+
margin: 5px;
|
292 |
+
border-radius: 10px;
|
293 |
+
text-align: left;
|
294 |
+
}
|
295 |
+
/* Fixed input area at the right side */
|
296 |
+
.input-container {
|
297 |
+
position: fixed;
|
298 |
+
bottom: 20px;
|
299 |
+
right: 20px;
|
300 |
+
width: 30%;
|
301 |
+
display: flex;
|
302 |
+
flex-direction: column;
|
303 |
+
gap: 10px;
|
304 |
+
}
|
305 |
+
.chat-input {
|
306 |
+
width: 100%;
|
307 |
+
padding: 15px;
|
308 |
+
font-size: 18px;
|
309 |
+
border-radius: 10px;
|
310 |
+
border: 1px solid #ccc;
|
311 |
+
box-shadow: 2px 2px 8px rgba(0, 0, 0, 0.1);
|
312 |
+
}
|
313 |
+
.chat-button {
|
314 |
+
padding: 10px;
|
315 |
+
font-size: 18px;
|
316 |
+
border-radius: 10px;
|
317 |
+
border: none;
|
318 |
+
cursor: pointer;
|
319 |
+
background-color: #7A5FA9;
|
320 |
+
color: white;
|
321 |
+
}
|
322 |
+
.chat-button:hover {
|
323 |
+
background-color: #5a3f73;
|
324 |
+
}
|
325 |
+
</style>
|
326 |
+
''', unsafe_allow_html=True)
|
327 |
+
|
328 |
+
# Initialize conversation history
|
329 |
+
if 'conversation' not in st.session_state:
|
330 |
+
st.session_state['conversation'] = []
|
331 |
+
|
332 |
+
# Capture and process input
|
333 |
+
user_question = st.text_input("اكتب رسالتك هنا...", key="user_question_input")
|
334 |
+
|
335 |
+
# Display two buttons: one for generating the answer, and one for evaluation
|
336 |
+
st.markdown('<div class="input-container">', unsafe_allow_html=True)
|
337 |
+
if st.button("إرسال السؤال", key="send_button_key"):
|
338 |
+
if user_question:
|
339 |
+
response = compiled_rag.forward(user_question, iam_token)
|
340 |
+
if len(response.strip()) < 20: # Assuming a good answer should be at least 20 characters
|
341 |
+
st.write("The generated answer was too short, retrying...")
|
342 |
+
response = compiled_rag.forward(user_question, iam_token)
|
343 |
+
|
344 |
+
st.session_state['conversation'].append({"role": "user", "content": user_question})
|
345 |
+
st.session_state['conversation'].append({"role": "bot", "content": response})
|
346 |
+
st.experimental_rerun()
|
347 |
+
|
348 |
+
if st.button("تقييم الاستجابة", key="evaluation_button_key"):
|
349 |
+
st.session_state['page'] = 'evaluation'
|
350 |
+
|
351 |
+
st.markdown('</div>', unsafe_allow_html=True)
|
352 |
+
|
353 |
+
# Display conversation history
|
354 |
+
for message in st.session_state['conversation']:
|
355 |
+
if message['role'] == 'user':
|
356 |
+
st.markdown(f'''
|
357 |
+
<script>
|
358 |
+
document.getElementById("chat-display").innerHTML += '<div class="chat-bubble-user">{message["content"]}</div>';
|
359 |
+
</script>
|
360 |
+
''', unsafe_allow_html=True)
|
361 |
+
else:
|
362 |
+
st.markdown(f'''
|
363 |
+
<script>
|
364 |
+
document.getElementById("chat-display").innerHTML += '<div class="chat-bubble-bot">{message["content"]}</div>';
|
365 |
+
</script>
|
366 |
+
''', unsafe_allow_html=True)
|
367 |
+
|
368 |
+
# Define the evaluation page layout
|
369 |
+
def evaluation_page():
|
370 |
+
set_background('human_rate.png')
|
371 |
+
st.markdown('''
|
372 |
+
<style>
|
373 |
+
.star-rating {
|
374 |
+
display: flex;
|
375 |
+
flex-direction: row-reverse;
|
376 |
+
justify-content: center;
|
377 |
+
position: fixed;
|
378 |
+
bottom: 100px;
|
379 |
+
left: 50%;
|
380 |
+
transform: translateX(-50%);
|
381 |
+
}
|
382 |
+
|
383 |
+
.star-rating input[type="radio"] {
|
384 |
+
display: none;
|
385 |
+
}
|
386 |
+
|
387 |
+
.star-rating label {
|
388 |
+
font-size: 2em;
|
389 |
+
color: #ccc;
|
390 |
+
cursor: pointer;
|
391 |
+
transition: color 0.2s;
|
392 |
+
}
|
393 |
+
|
394 |
+
.star-rating input[type="radio"]:checked ~ label {
|
395 |
+
color: #FFD700;
|
396 |
+
}
|
397 |
+
|
398 |
+
.star-rating label:hover,
|
399 |
+
.star-rating label:hover ~ label {
|
400 |
+
color: #FFD700;
|
401 |
+
}
|
402 |
+
</style>
|
403 |
+
|
404 |
+
<div class="star-rating">
|
405 |
+
<input type="radio" id="star5" name="rating" value="5"><label for="star5">★</label>
|
406 |
+
<input type="radio" id="star4" name="rating" value="4"><label for="star4">★</label>
|
407 |
+
<input type="radio" id="star3" name="rating" value="3"><label for="star3">★</label>
|
408 |
+
<input type="radio" id="star2" name="rating" value="2"><label for="star2">★</label>
|
409 |
+
<input type="radio" id="star1" name="rating" value="1"><label for="star1">★</label>
|
410 |
+
</div>
|
411 |
+
|
412 |
+
<script>
|
413 |
+
const stars = document.querySelectorAll('.star-rating input');
|
414 |
+
stars.forEach(star => {
|
415 |
+
star.addEventListener('change', (event) => {
|
416 |
+
const rating = event.target.value;
|
417 |
+
window.parent.postMessage({type: 'rating', value: rating}, '*');
|
418 |
+
});
|
419 |
+
});
|
420 |
+
</script>
|
421 |
+
''', unsafe_allow_html=True)
|
422 |
+
|
423 |
+
# Main loop
|
424 |
+
if 'page' not in st.session_state:
|
425 |
+
st.session_state['page'] = 'main'
|
426 |
+
|
427 |
+
if st.session_state['page'] == 'main':
|
428 |
+
main_page()
|
429 |
+
elif st.session_state['page'] == 'chatbot':
|
430 |
+
chatbot_page()
|
431 |
+
elif st.session_state['page'] == 'evaluation':
|
432 |
+
evaluation_page()
|
background_image.png
ADDED
Git LFS Details
|
chat_page.png
ADDED
Git LFS Details
|
cleaned_fixed_text.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
human_rate.png
ADDED
Git LFS Details
|