AIRider commited on
Commit
8ae421b
·
verified ·
1 Parent(s): 87dda7a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -25
app.py CHANGED
@@ -16,14 +16,9 @@ models = {
16
  def get_client(model):
17
  return InferenceClient(model=model, token=hf_token)
18
 
19
- MAX_HISTORY_LENGTH = 5 # 히스토리에 유지할 최대 대화 수
20
-
21
- def truncate_history(history):
22
- return history[-MAX_HISTORY_LENGTH:] if len(history) > MAX_HISTORY_LENGTH else history
23
-
24
  def respond(message, system_message, max_tokens, temperature, top_p, selected_model):
25
  stop_event.clear()
26
- client = InferenceClient(model=selected_model, token=hf_token)
27
 
28
  messages = [
29
  {"role": "system", "content": system_message},
@@ -48,23 +43,15 @@ def respond(message, system_message, max_tokens, temperature, top_p, selected_mo
48
  except Exception as e:
49
  yield [(message, f"오류 발생: {str(e)}")]
50
 
51
- def stop_generation():
52
- stop_event.set()
53
- return "생성이 중단되었습니다."
54
-
55
- def stop_generation():
56
- stop_event.set()
57
- return "생성이 중단되었습니다."
58
-
59
- def regenerate(chat_history, system_message, max_tokens, temperature, top_p, selected_model):
60
- if not chat_history:
61
- return "대화 내역이 없습니다."
62
- last_user_message = chat_history[-1][0]
63
- return respond(last_user_message, chat_history[:-1], system_message, max_tokens, temperature, top_p, selected_model)
64
 
65
- def continue_writing(last_response, system_message, max_tokens, temperature, top_p, selected_model):
 
66
  stop_event.clear()
67
- client = InferenceClient(model=selected_model, token=hf_token)
68
 
69
  prompt = f"이전 응답을 이어서 작성해주세요. 이전 응답: {last_response}"
70
  messages = [
@@ -85,12 +72,15 @@ def continue_writing(last_response, system_message, max_tokens, temperature, top
85
  break
86
  if chunk:
87
  response += chunk
88
- yield [("계속 작성", response)]
89
 
90
  except Exception as e:
91
- yield [("계속 작성", f"오류 발생: {str(e)}")]
 
 
 
 
92
 
93
- # Gradio 인터페이스 수정
94
  with gr.Blocks() as demo:
95
  chatbot = gr.Chatbot()
96
  msg = gr.Textbox(label="메시지 입력")
@@ -116,7 +106,7 @@ with gr.Blocks() as demo:
116
  send.click(respond, inputs=[msg, system_message, max_tokens, temperature, top_p, model], outputs=[chatbot])
117
  msg.submit(respond, inputs=[msg, system_message, max_tokens, temperature, top_p, model], outputs=[chatbot])
118
  continue_btn.click(continue_writing,
119
- inputs=[lambda: chatbot[-1][1] if chatbot else "", system_message, max_tokens, temperature, top_p, model],
120
  outputs=[chatbot])
121
  stop.click(stop_generation, outputs=[msg])
122
  clear.click(lambda: None, outputs=[chatbot])
 
16
  def get_client(model):
17
  return InferenceClient(model=model, token=hf_token)
18
 
 
 
 
 
 
19
  def respond(message, system_message, max_tokens, temperature, top_p, selected_model):
20
  stop_event.clear()
21
+ client = get_client(selected_model)
22
 
23
  messages = [
24
  {"role": "system", "content": system_message},
 
43
  except Exception as e:
44
  yield [(message, f"오류 발생: {str(e)}")]
45
 
46
+ def get_last_response(chatbot):
47
+ if chatbot and len(chatbot) > 0:
48
+ return chatbot[-1][1]
49
+ return ""
 
 
 
 
 
 
 
 
 
50
 
51
+ def continue_writing(chatbot, system_message, max_tokens, temperature, top_p, selected_model):
52
+ last_response = get_last_response(chatbot)
53
  stop_event.clear()
54
+ client = get_client(selected_model)
55
 
56
  prompt = f"이전 응답을 이어서 작성해주세요. 이전 응답: {last_response}"
57
  messages = [
 
72
  break
73
  if chunk:
74
  response += chunk
75
+ yield chatbot + [("계속 작성", response)]
76
 
77
  except Exception as e:
78
+ yield chatbot + [("계속 작성", f"오류 발생: {str(e)}")]
79
+
80
+ def stop_generation():
81
+ stop_event.set()
82
+ return "생성이 중단되었습니다."
83
 
 
84
  with gr.Blocks() as demo:
85
  chatbot = gr.Chatbot()
86
  msg = gr.Textbox(label="메시지 입력")
 
106
  send.click(respond, inputs=[msg, system_message, max_tokens, temperature, top_p, model], outputs=[chatbot])
107
  msg.submit(respond, inputs=[msg, system_message, max_tokens, temperature, top_p, model], outputs=[chatbot])
108
  continue_btn.click(continue_writing,
109
+ inputs=[chatbot, system_message, max_tokens, temperature, top_p, model],
110
  outputs=[chatbot])
111
  stop.click(stop_generation, outputs=[msg])
112
  clear.click(lambda: None, outputs=[chatbot])