burman-ai commited on
Commit
0f1304a
·
verified ·
1 Parent(s): 89ccc9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -8
app.py CHANGED
@@ -13,12 +13,23 @@ print("OpenAI client initialized.")
13
 
14
  def respond(
15
  message,
16
- history: list[tuple[str, str]]
 
 
 
 
 
 
 
17
  ):
18
  print(f"Received message: {message}")
19
  print(f"History: {history}")
 
20
 
21
- messages = []
 
 
 
22
 
23
  for val in history:
24
  if val[0]:
@@ -28,18 +39,18 @@ def respond(
28
 
29
  messages.append({"role": "user", "content": message})
30
 
31
- model_to_use = "meta-llama/Llama-3.1-8B-Instruct"
32
 
33
  response = ""
34
 
35
  for message_chunk in client.chat.completions.create(
36
  model=model_to_use,
37
- max_tokens=2048,
38
  stream=True,
39
- temperature=0.7,
40
- top_p=0.95,
41
- frequency_penalty=0.0,
42
- seed=None,
43
  messages=messages,
44
  ):
45
  token_text = message_chunk.choices[0].delta.content
@@ -48,8 +59,27 @@ def respond(
48
 
49
  chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="ChatGPT is initializing...", likeable=True, layout="panel")
50
 
 
 
 
 
 
 
 
 
 
 
51
  demo = gr.ChatInterface(
52
  fn=respond,
 
 
 
 
 
 
 
 
 
53
  fill_height=True,
54
  chatbot=chatbot,
55
  theme="Nymbo/Nymbo_Theme",
@@ -58,3 +88,4 @@ demo = gr.ChatInterface(
58
  if __name__ == "__main__":
59
  print("Launching the ChatGPT-Llama...")
60
  demo.launch()
 
 
13
 
14
  def respond(
15
  message,
16
+ history: list[tuple[str, str]],
17
+ system_message,
18
+ max_tokens,
19
+ temperature,
20
+ top_p,
21
+ frequency_penalty,
22
+ seed,
23
+ custom_model
24
  ):
25
  print(f"Received message: {message}")
26
  print(f"History: {history}")
27
+ print(f"System message: {system_message}")
28
 
29
+ if seed == -1:
30
+ seed = None
31
+
32
+ messages = [{"role": "system", "content": system_message}]
33
 
34
  for val in history:
35
  if val[0]:
 
39
 
40
  messages.append({"role": "user", "content": message})
41
 
42
+ model_to_use = custom_model.strip() if custom_model.strip() != "" else "meta-llama/Llama-3.1-8B-Instruct"
43
 
44
  response = ""
45
 
46
  for message_chunk in client.chat.completions.create(
47
  model=model_to_use,
48
+ max_tokens=max_tokens,
49
  stream=True,
50
+ temperature=temperature,
51
+ top_p=top_p,
52
+ frequency_penalty=frequency_penalty,
53
+ seed=seed,
54
  messages=messages,
55
  ):
56
  token_text = message_chunk.choices[0].delta.content
 
59
 
60
  chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="ChatGPT is initializing...", likeable=True, layout="panel")
61
 
62
+ system_message_box = gr.Label(value="You can select Max Tokens, Temperature, Top-P, Seed")
63
+
64
+ max_tokens_slider = gr.Slider(1024, 2048, value=1024, step=100, label="Max new tokens")
65
+ temperature_slider = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature")
66
+ top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P")
67
+ frequency_penalty_slider = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Frequency Penalty")
68
+ seed_slider = gr.Slider(-1, 65535, value=-1, step=1, label="Seed (-1 for random)")
69
+
70
+ custom_model_box = gr.Textbox(value="meta-llama/Llama-3.2-3B-Instruct", label="AI Mode is ")
71
+
72
  demo = gr.ChatInterface(
73
  fn=respond,
74
+ additional_inputs=[
75
+ system_message_box,
76
+ max_tokens_slider,
77
+ temperature_slider,
78
+ top_p_slider,
79
+ frequency_penalty_slider,
80
+ seed_slider,
81
+ custom_model_box,
82
+ ],
83
  fill_height=True,
84
  chatbot=chatbot,
85
  theme="Nymbo/Nymbo_Theme",
 
88
  if __name__ == "__main__":
89
  print("Launching the ChatGPT-Llama...")
90
  demo.launch()
91
+