phucpx247 commited on
Commit
2b78cfb
·
1 Parent(s): 7992bbe
Files changed (1) hide show
  1. app.py +21 -7
app.py CHANGED
@@ -2,7 +2,17 @@ import gradio as gr
2
  import requests
3
 
4
  API_KEY = "Prep@123"
5
- LCG_SERVICE_URL = "http://bore.testsprep.online:8082/v1/theory_lcg"
 
 
 
 
 
 
 
 
 
 
6
 
7
  weights_mapping = {
8
  'beginner': {
@@ -33,7 +43,7 @@ def get_response_message(config):
33
  'Content-Type': 'application/json'
34
  }
35
  data = {
36
- "model": "lcg_sft",
37
  "input_data": {
38
  "user_level": config["user_level"],
39
  "num_questions": config["num_questions"],
@@ -57,16 +67,19 @@ def get_response_message(config):
57
  "stop": "string",
58
  "stream": False
59
  }
 
 
 
 
 
 
60
 
61
- response = requests.post(LCG_SERVICE_URL, headers=headers, json=data)
62
- return response.json()["data"]
63
-
64
-
65
- def generate_questions(user_level, num_questions, question_type, language, explanation_language, context, learning_outcomes, mode):
66
  if mode == "Reviewing" and not context.strip():
67
  return {"error": "Với chế độ Reviewing, Context không được để trống."}
68
 
69
  config = {
 
70
  "user_level": user_level,
71
  "num_questions": int(num_questions),
72
  "question_type": question_type,
@@ -82,6 +95,7 @@ def generate_questions(user_level, num_questions, question_type, language, expla
82
  iface = gr.Interface(
83
  fn=generate_questions,
84
  inputs=[
 
85
  gr.Dropdown(["beginner", "intermediate", "advanced"], label="User Level"),
86
  gr.Number(value=5, label="Number of Questions"),
87
  gr.Dropdown(["Short answer", "Single choice", "Multiple choice"], label="Question Type"),
 
2
  import requests
3
 
4
  API_KEY = "Prep@123"
5
+ LCG_SERVICE_URL_v1 = "http://bore.testsprep.online:8082/v1/theory_lcg"
6
+ LCG_SERVICE_URL_v2 = "http://bore.testsprep.online:8081/v1/theory_lcg"
7
+ LCG_SERVICE_URL_v3 = "http://bore.testsprep.online:8083/v1/theory_lcg"
8
+
9
+
10
+ MODEL2SERVICE = {
11
+ 'llama-3.1-sft-awq': LCG_SERVICE_URL_v1,
12
+ 'hermes-3-llama3.1-sft-lora': LCG_SERVICE_URL_v2,
13
+ 'qwen2-1.5b-full-sft': LCG_SERVICE_URL_v3
14
+ }
15
+
16
 
17
  weights_mapping = {
18
  'beginner': {
 
43
  'Content-Type': 'application/json'
44
  }
45
  data = {
46
+ "model": config["model_name"],
47
  "input_data": {
48
  "user_level": config["user_level"],
49
  "num_questions": config["num_questions"],
 
67
  "stop": "string",
68
  "stream": False
69
  }
70
+ try:
71
+ response = requests.post(MODEL2SERVICE[config["model_name"]], headers=headers, json=data)
72
+ return response.json()["data"]
73
+
74
+ except:
75
+ return {"message": f"Hiện tại chúng tôi chưa hỗ trợ mô hình {config["model_name"]}"}
76
 
77
+ def generate_questions(model_name, user_level, num_questions, question_type, language, explanation_language, context, learning_outcomes, mode):
 
 
 
 
78
  if mode == "Reviewing" and not context.strip():
79
  return {"error": "Với chế độ Reviewing, Context không được để trống."}
80
 
81
  config = {
82
+ "model_name": model_name,
83
  "user_level": user_level,
84
  "num_questions": int(num_questions),
85
  "question_type": question_type,
 
95
  iface = gr.Interface(
96
  fn=generate_questions,
97
  inputs=[
98
+ gr.Dropdown(list(MODEL2SERVICE.keys()), label="Model Usage"),
99
  gr.Dropdown(["beginner", "intermediate", "advanced"], label="User Level"),
100
  gr.Number(value=5, label="Number of Questions"),
101
  gr.Dropdown(["Short answer", "Single choice", "Multiple choice"], label="Question Type"),