AIRider commited on
Commit
6c72519
·
verified ·
1 Parent(s): a78ec29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -47
app.py CHANGED
@@ -1,15 +1,11 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
- import traceback
4
  import os
5
 
6
  hf_token = os.getenv("HF_TOKEN")
7
 
8
- def get_model_response(client, messages, max_tokens, temperature, top_p, model_name):
9
- prompt = "\n".join([f"{m['role']}: {m['content']}" for m in messages])
10
-
11
  try:
12
- # 먼저 chat_completion 시도
13
  response = client.chat_completion(
14
  messages,
15
  max_tokens=max_tokens,
@@ -18,72 +14,53 @@ def get_model_response(client, messages, max_tokens, temperature, top_p, model_n
18
  stream=True
19
  )
20
  for message in response:
21
- if hasattr(message.choices[0], 'delta'):
22
- token = message.choices[0].delta.content
23
- else:
24
- token = message.choices[0].text
25
  if token:
26
  yield token
27
- except Exception as chat_error:
28
- try:
29
- # chat_completion 실패 시 text_generation 시도
30
- response = client.text_generation(
31
- prompt,
32
- max_new_tokens=max_tokens,
33
- temperature=temperature,
34
- top_p=top_p,
35
- stream=True
36
- )
37
- for token in response:
38
- yield token
39
- except Exception as text_error:
40
- # 두 방법 모두 실패 시 오류 메시지 반환
41
- yield f"모델 {model_name}에 대한 추론 실패:\n"
42
- yield f"Chat 오류: {str(chat_error)}\n"
43
- yield f"Text 오류: {str(text_error)}"
44
 
45
  def respond(message, history, system_message, max_tokens, temperature, top_p, selected_model):
46
  try:
47
  client = InferenceClient(model=selected_model, token=hf_token)
48
 
49
  messages = [{"role": "system", "content": system_message}]
50
- for val in history:
51
- if val[0]:
52
- messages.append({"role": "user", "content": val[0]})
53
- if val[1]:
54
- messages.append({"role": "assistant", "content": val[1]})
55
  messages.append({"role": "user", "content": message})
56
 
57
  response = ""
58
- for token in get_model_response(client, messages, max_tokens, temperature, top_p, selected_model):
59
  response += token
60
  yield response
61
 
62
  if not response:
63
  yield "모델이 응답을 생성하지 못했습니다. 다른 입력이나 모델을 시도해보세요."
64
  except Exception as e:
65
- error_msg = f"오류 발생: {str(e)}\n\n상세 오류:\n{traceback.format_exc()}"
66
- yield error_msg
67
 
68
- # 원래의 모델 목록 복원
69
  models = {
70
  "deepseek-ai/DeepSeek-Coder-V2-Instruct": "DeepSeek-Coder-V2-Instruct",
71
  "CohereForAI/c4ai-command-r-plus": "Cohere Command-R Plus",
72
  "meta-llama/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct"
73
  }
74
 
75
- demo = gr.ChatInterface(
76
- respond,
77
- additional_inputs=[
78
- gr.Textbox(value="""너는 나의 최고의 비서이다.
79
- 내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.
80
- 반드시 한글로 답변할것.""", label="시스템 메시지"),
81
- gr.Slider(minimum=1, maximum=2000, value=500, step=100, label="최대 새 토큰 수"),
82
- gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="온도"),
83
- gr.Slider(minimum=0.1, maximum=1.0, value=0.90, step=0.05, label="Top-p (핵 샘플링)"),
84
- gr.Radio(list(models.keys()), value=list(models.keys())[0], label="언어 모델 선택", info="사용할 언어 모델을 선택하세요")
85
- ],
86
- )
 
 
 
 
 
87
 
88
  if __name__ == "__main__":
89
  if not hf_token:
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
3
  import os
4
 
5
  hf_token = os.getenv("HF_TOKEN")
6
 
7
+ def get_model_response(client, messages, max_tokens, temperature, top_p):
 
 
8
  try:
 
9
  response = client.chat_completion(
10
  messages,
11
  max_tokens=max_tokens,
 
14
  stream=True
15
  )
16
  for message in response:
17
+ token = message.choices[0].delta.content if hasattr(message.choices[0], 'delta') else message.choices[0].text
 
 
 
18
  if token:
19
  yield token
20
+ except Exception as e:
21
+ yield f"모델 추론 실패: {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  def respond(message, history, system_message, max_tokens, temperature, top_p, selected_model):
24
  try:
25
  client = InferenceClient(model=selected_model, token=hf_token)
26
 
27
  messages = [{"role": "system", "content": system_message}]
28
+ messages.extend([{"role": "user" if i % 2 == 0 else "assistant", "content": m} for h in history for i, m in enumerate(h) if m])
 
 
 
 
29
  messages.append({"role": "user", "content": message})
30
 
31
  response = ""
32
+ for token in get_model_response(client, messages, max_tokens, temperature, top_p):
33
  response += token
34
  yield response
35
 
36
  if not response:
37
  yield "모델이 응답을 생성하지 못했습니다. 다른 입력이나 모델을 시도해보세요."
38
  except Exception as e:
39
+ yield f"오류 발생: {str(e)}"
 
40
 
 
41
  models = {
42
  "deepseek-ai/DeepSeek-Coder-V2-Instruct": "DeepSeek-Coder-V2-Instruct",
43
  "CohereForAI/c4ai-command-r-plus": "Cohere Command-R Plus",
44
  "meta-llama/Meta-Llama-3.1-8B-Instruct": "Meta-Llama-3.1-8B-Instruct"
45
  }
46
 
47
+ with gr.Blocks() as demo:
48
+ chatbot = gr.Chatbot()
49
+ msg = gr.Textbox()
50
+ clear = gr.ClearButton([msg, chatbot])
51
+
52
+ with gr.Accordion("Additional Inputs", open=True):
53
+ system_message = gr.Textbox(
54
+ value="너는 나의 최고의 비서이다.\n내가 요구하는것들을 최대한 자세하고 정확하게 답변하라.\n반드시 한글로 답변할것.",
55
+ label="시스템 메시지",
56
+ lines=20
57
+ )
58
+ max_tokens = gr.Slider(minimum=1, maximum=2000, value=500, step=100, label="최대 새 토큰 수")
59
+ temperature = gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="온도")
60
+ top_p = gr.Slider(minimum=0.1, maximum=1.0, value=0.90, step=0.05, label="Top-p (핵 샘플링)")
61
+ model = gr.Radio(list(models.keys()), value=list(models.keys())[0], label="언어 모델 선택", info="사용할 언어 모델을 선택하세요")
62
+
63
+ msg.submit(respond, [msg, chatbot, system_message, max_tokens, temperature, top_p, model], chatbot)
64
 
65
  if __name__ == "__main__":
66
  if not hf_token: