SG34 commited on
Commit
35ee52f
·
verified ·
1 Parent(s): 4077845

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -2
app.py CHANGED
@@ -1,6 +1,7 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
 
4
 
5
  MODELS = {
6
  "google/gemma-2-9b": "google/gemma-2-9b-it",
@@ -12,16 +13,35 @@ MODELS = {
12
  "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
13
  "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
14
  "Cohere Command R+": "CohereForAI/c4ai-command-r-plus",
15
- "Cohere Aya-23-35B": "CohereForAI/aya-23-35B"
 
16
  }
17
 
 
 
 
18
  def get_client(model_name):
 
 
19
  model_id = MODELS[model_name]
20
  hf_token = os.getenv("HF_TOKEN")
21
  if not hf_token:
22
  raise ValueError("HF_TOKEN environment variable is required")
23
  return InferenceClient(model_id, token=hf_token)
24
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
  def respond(
26
  message,
27
  chat_history,
@@ -32,6 +52,12 @@ def respond(
32
  system_message,
33
  ):
34
  try:
 
 
 
 
 
 
35
  client = get_client(model_name)
36
  except ValueError as e:
37
  chat_history.append((message, str(e)))
@@ -116,4 +142,4 @@ with gr.Blocks() as demo:
116
  clear_button.click(clear_conversation, outputs=chatbot, queue=False)
117
 
118
  if __name__ == "__main__":
119
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import os
4
+ import openai # OpenAI API를 사용하기 위해 추가
5
 
6
  MODELS = {
7
  "google/gemma-2-9b": "google/gemma-2-9b-it",
 
13
  "Mixtral 8x7B": "mistralai/Mistral-7B-Instruct-v0.3",
14
  "Mixtral Nous-Hermes": "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
15
  "Cohere Command R+": "CohereForAI/c4ai-command-r-plus",
16
+ "Cohere Aya-23-35B": "CohereForAI/aya-23-35B",
17
+ "OpenAI GPT-4o Mini": "openai/gpt-4o-mini" # 새로운 모델 추가
18
  }
19
 
20
+ # OpenAI API 클라이언트 설정
21
+ openai.api_key = os.getenv("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")
22
+
23
  def get_client(model_name):
24
+ if model_name == "OpenAI GPT-4o Mini":
25
+ return None # OpenAI API를 직접 호출할 것이므로 HuggingFace 클라이언트는 사용하지 않음
26
  model_id = MODELS[model_name]
27
  hf_token = os.getenv("HF_TOKEN")
28
  if not hf_token:
29
  raise ValueError("HF_TOKEN environment variable is required")
30
  return InferenceClient(model_id, token=hf_token)
31
 
32
+ def call_openai_api(content, system_message, max_tokens, temperature, top_p):
33
+ response = openai.ChatCompletion.create(
34
+ model="gpt-4o-mini", # 또는 다른 모델 ID 사용
35
+ messages=[
36
+ {"role": "system", "content": system_message},
37
+ {"role": "user", "content": content},
38
+ ],
39
+ max_tokens=max_tokens,
40
+ temperature=temperature,
41
+ top_p=top_p,
42
+ )
43
+ return response.choices[0].message['content']
44
+
45
  def respond(
46
  message,
47
  chat_history,
 
52
  system_message,
53
  ):
54
  try:
55
+ if model_name == "OpenAI GPT-4o Mini":
56
+ assistant_message = call_openai_api(message, system_message, max_tokens, temperature, top_p)
57
+ chat_history.append((message, assistant_message))
58
+ yield chat_history
59
+ return
60
+
61
  client = get_client(model_name)
62
  except ValueError as e:
63
  chat_history.append((message, str(e)))
 
142
  clear_button.click(clear_conversation, outputs=chatbot, queue=False)
143
 
144
  if __name__ == "__main__":
145
+ demo.launch()