|
import gradio as gr |
|
from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
|
|
|
model_name = "aubmindlab/aragpt2-base" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
def generate_response(user_input): |
|
prompt = f"مستخدم: {user_input}\nنظام الدعم النفسي:" |
|
inputs = tokenizer.encode(prompt, return_tensors="pt") |
|
|
|
|
|
outputs = model.generate(inputs, max_length=100, num_return_sequences=1, pad_token_id=tokenizer.eos_token_id) |
|
response = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
response = response.split("نظام الدعم النفسي:")[-1].strip() |
|
return response |
|
|
|
|
|
with gr.Blocks() as app: |
|
gr.Markdown("# تطبيق الدعم النفسي") |
|
gr.Markdown("اكتب مشكلتك هنا وسنساعدك بأفضل طريقة ممكنة.") |
|
|
|
with gr.Row(): |
|
input_text = gr.Textbox(label="اكتب مشكلتك هنا", placeholder="مثال: أشعر بالقلق الشديد...") |
|
output_text = gr.Textbox(label="الرد") |
|
|
|
submit_button = gr.Button("أرسل") |
|
submit_button.click(generate_response, inputs=input_text, outputs=output_text) |
|
|
|
|
|
app.launch() |