burman-ai commited on
Commit
b633776
·
verified ·
1 Parent(s): db8b55b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -6
app.py CHANGED
@@ -15,12 +15,20 @@ def respond(
15
  message,
16
  history: list[tuple[str, str]],
17
  system_message,
 
 
 
 
 
18
  custom_model
19
  ):
20
  print(f"Received message: {message}")
21
  print(f"History: {history}")
22
  print(f"System message: {system_message}")
23
 
 
 
 
24
  messages = [{"role": "system", "content": system_message}]
25
 
26
  for val in history:
@@ -37,11 +45,12 @@ def respond(
37
 
38
  for message_chunk in client.chat.completions.create(
39
  model=model_to_use,
40
- max_tokens=2048,
41
- temperature=0.7,
42
- top_p=0.95,
43
- frequency_penalty=0.0,
44
- seed=None,
 
45
  messages=messages,
46
  ):
47
  token_text = message_chunk.choices[0].delta.content
@@ -50,13 +59,25 @@ def respond(
50
 
51
  chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="ChatGPT is initializing...", likeable=True, layout="panel")
52
 
53
- system_message_box = gr.Label(value="System message")
 
 
 
 
 
 
 
54
  custom_model_box = gr.Textbox(value="meta-llama/Llama-3.2-3B-Instruct", label="AI Mode is ")
55
 
56
  demo = gr.ChatInterface(
57
  fn=respond,
58
  additional_inputs=[
59
  system_message_box,
 
 
 
 
 
60
  custom_model_box,
61
  ],
62
  fill_height=True,
@@ -67,3 +88,5 @@ demo = gr.ChatInterface(
67
  if __name__ == "__main__":
68
  print("Launching the ChatGPT-Llama...")
69
  demo.launch()
 
 
 
15
  message,
16
  history: list[tuple[str, str]],
17
  system_message,
18
+ max_tokens,
19
+ temperature,
20
+ top_p,
21
+ frequency_penalty,
22
+ seed,
23
  custom_model
24
  ):
25
  print(f"Received message: {message}")
26
  print(f"History: {history}")
27
  print(f"System message: {system_message}")
28
 
29
+ if seed == -1:
30
+ seed = None
31
+
32
  messages = [{"role": "system", "content": system_message}]
33
 
34
  for val in history:
 
45
 
46
  for message_chunk in client.chat.completions.create(
47
  model=model_to_use,
48
+ max_tokens=max_tokens,
49
+ stream=True,
50
+ temperature=temperature,
51
+ top_p=top_p,
52
+ frequency_penalty=frequency_penalty,
53
+ seed=seed,
54
  messages=messages,
55
  ):
56
  token_text = message_chunk.choices[0].delta.content
 
59
 
60
  chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="ChatGPT is initializing...", likeable=True, layout="panel")
61
 
62
+ system_message_box = gr.Label(value="You can select Max Tokens, Temperature, Top-P, Seed")
63
+
64
+ max_tokens_slider = gr.Slider(1024, 2048, value=1024, step=100, label="Max new tokens")
65
+ temperature_slider = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature")
66
+ top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P")
67
+ frequency_penalty_slider = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Frequency Penalty")
68
+ seed_slider = gr.Slider(-1, 65535, value=-1, step=1, label="Seed (-1 for random)")
69
+
70
  custom_model_box = gr.Textbox(value="meta-llama/Llama-3.2-3B-Instruct", label="AI Mode is ")
71
 
72
  demo = gr.ChatInterface(
73
  fn=respond,
74
  additional_inputs=[
75
  system_message_box,
76
+ max_tokens_slider,
77
+ temperature_slider,
78
+ top_p_slider,
79
+ frequency_penalty_slider,
80
+ seed_slider,
81
  custom_model_box,
82
  ],
83
  fill_height=True,
 
88
  if __name__ == "__main__":
89
  print("Launching the ChatGPT-Llama...")
90
  demo.launch()
91
+
92
+