Spaces:
Sleeping
Sleeping
File size: 4,437 Bytes
8047655 01f0fd2 8555d17 2b78cfb c2f3067 2b78cfb eda6c61 2b78cfb 8047655 6479586 8047655 6ea353f e495a4f 6ea353f 8047655 2b78cfb 8047655 8555d17 8047655 2b78cfb a77b866 8047655 6479586 7992bbe 0ea152c 7992bbe 8047655 2b78cfb 8047655 e495a4f 6ea353f b7621e0 84d8aab 6479586 2868373 84d8aab 8047655 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
import gradio as gr
import requests
API_KEY = "one23@123"
LCG_SERVICE_URL_v1 = "http://bore.testsprep.online:8081/v1/theory_lcg"
LCG_SERVICE_URL_v2 = "http://bore.testsprep.online:8082/v1/theory_lcg"
LCG_SERVICE_URL_v3 = "http://bore.testsprep.online:8083/v1/theory_lcg"
LCG_SERVICE_URL_v4 = "http://bore.testsprep.online:8084/v1/theory_lcg"
MODEL2SERVICE = {
'llama-3.1-sft-awq-v6.3': LCG_SERVICE_URL_v2,
'storm-llama-3.1-sft-awq-v6.3': LCG_SERVICE_URL_v3
}
weights_mapping = {
'beginner': {
'easy': 0.6,
'medium': 0.2,
'hard': 0.2
},
'intermediate': {
'easy': 0.2,
'medium': 0.6,
'hard': 0.2
},
'advanced': {
'easy': 0.2,
'medium': 0.2,
'hard': 0.6
}
}
LIST_USER_LEVEL = ["beginner", "intermediate", "advanced"]
LIST_MODELS = list(MODEL2SERVICE.keys())
template_los = """0. Identify the challenges in Matching sentence endings: More endings than questions, Some endings may be grammatically correct but not connected to the main ideas in the text, Information for possible endings is placed randomly in the passage
1. Understand and apply the steps for answering Matching sentence endings questions effectively: Read and underline keywords in incomplete sentences and endings, Scan using keywords in incomplete sentences to locate the information area, and Match incomplete sentences with endings and compare to the information area"""
def get_response_message(config):
headers = {
'accept': 'application/json',
'Authorization': f'Bearer {API_KEY}',
'Content-Type': 'application/json'
}
data = {
"model": config["model_name"],
"input_data": {
"user_level": config["user_level"],
"num_questions": config["num_questions"],
"question_type": config["question_type"],
"language": config["language"],
"explanation_language": config["explanation_language"],
"context": config["context"],
"learning_outcomes": [lo.strip() for lo in config['learning_outcomes'].split('\n')],
"mode": config["mode"],
"weights": {
"easy": 0,
"hard": 0,
"medium": 0
}
},
"do_sample": True,
"temperature": 0.7,
"top_p": 0.9,
"n": 1,
"max_tokens": 8192,
"stop": "string",
"stream": False
}
try:
response = requests.post(MODEL2SERVICE[config["model_name"]], headers=headers, json=data)
return response.json()["data"]
except Exception as e:
return {"error_message": f"{e}"}
def generate_questions(model_name, user_level, num_questions, question_type, language, explanation_language, context,
learning_outcomes, mode):
if mode == "Reviewing" and not context.strip():
return {"error": "Với chế độ `revision`, `context` không được để trống."}
config = {
"model_name": model_name,
"user_level": user_level,
"num_questions": int(num_questions),
"question_type": question_type,
"language": language,
"explanation_language": explanation_language,
"context": context,
"learning_outcomes": learning_outcomes,
"mode": mode
}
return get_response_message(config)
iface = gr.Interface(
fn=generate_questions,
inputs=[
gr.Dropdown(LIST_MODELS, label="Model Usage", value=LIST_MODELS[0]),
gr.Dropdown(LIST_USER_LEVEL, label="User Level", value=LIST_USER_LEVEL[0]),
gr.Number(value=5, label="Number of Questions"),
gr.Dropdown(["short_answer", "single_choice", "multiple_choice"], label="Question Type", value="single_choice"),
gr.Dropdown(["en", "vi"], label="Language", value="en"),
gr.Dropdown(["en", "vi"], label="Explanation Language", value="en"),
gr.Textbox(lines=5, placeholder="Enter context here...",
label="Context (Lesson content or Reading comprehension passage)"),
gr.Textbox(lines=5, value=template_los, label="Learning Outcomes"),
gr.Dropdown(["revision", "practice"], label="Mode", value="practice")
],
outputs=gr.JSON(label="Generated Questions"),
title="Learning Content Generation",
description="Generate questions based on user input and learning outcomes."
)
iface.launch()
|