burman-ai commited on
Commit
5272ce5
·
verified ·
1 Parent(s): 19532c8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -29
app.py CHANGED
@@ -15,20 +15,12 @@ def respond(
15
  message,
16
  history: list[tuple[str, str]],
17
  system_message,
18
- max_tokens,
19
- temperature,
20
- top_p,
21
- frequency_penalty,
22
- seed,
23
  custom_model
24
  ):
25
  print(f"Received message: {message}")
26
  print(f"History: {history}")
27
  print(f"System message: {system_message}")
28
 
29
- if seed == -1:
30
- seed = None
31
-
32
  messages = [{"role": "system", "content": system_message}]
33
 
34
  for val in history:
@@ -45,12 +37,6 @@ def respond(
45
 
46
  for message_chunk in client.chat.completions.create(
47
  model=model_to_use,
48
- max_tokens=max_tokens,
49
- stream=True,
50
- temperature=temperature,
51
- top_p=top_p,
52
- frequency_penalty=frequency_penalty,
53
- seed=seed,
54
  messages=messages,
55
  ):
56
  token_text = message_chunk.choices[0].delta.content
@@ -59,25 +45,13 @@ def respond(
59
 
60
  chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="ChatGPT is initializing...", likeable=True, layout="panel")
61
 
62
- system_message_box = gr.Label(value="You can select Max Tokens, Temperature, Top-P, Seed")
63
-
64
- max_tokens_slider = gr.Slider(1024, 2048, value=1024, step=100, label="Max new tokens")
65
- temperature_slider = gr.Slider(0.1, 1.0, value=0.7, step=0.1, label="Temperature")
66
- top_p_slider = gr.Slider(0.1, 1.0, value=0.95, step=0.05, label="Top-P")
67
- frequency_penalty_slider = gr.Slider(-2.0, 2.0, value=0.0, step=0.1, label="Frequency Penalty")
68
- seed_slider = gr.Slider(-1, 65535, value=-1, step=1, label="Seed (-1 for random)")
69
-
70
  custom_model_box = gr.Textbox(value="meta-llama/Llama-3.2-3B-Instruct", label="AI Mode is ")
71
 
72
  demo = gr.ChatInterface(
73
  fn=respond,
74
  additional_inputs=[
75
  system_message_box,
76
- max_tokens_slider,
77
- temperature_slider,
78
- top_p_slider,
79
- frequency_penalty_slider,
80
- seed_slider,
81
  custom_model_box,
82
  ],
83
  fill_height=True,
@@ -87,5 +61,4 @@ demo = gr.ChatInterface(
87
 
88
  if __name__ == "__main__":
89
  print("Launching the ChatGPT-Llama...")
90
- demo.launch()
91
-
 
15
  message,
16
  history: list[tuple[str, str]],
17
  system_message,
 
 
 
 
 
18
  custom_model
19
  ):
20
  print(f"Received message: {message}")
21
  print(f"History: {history}")
22
  print(f"System message: {system_message}")
23
 
 
 
 
24
  messages = [{"role": "system", "content": system_message}]
25
 
26
  for val in history:
 
37
 
38
  for message_chunk in client.chat.completions.create(
39
  model=model_to_use,
 
 
 
 
 
 
40
  messages=messages,
41
  ):
42
  token_text = message_chunk.choices[0].delta.content
 
45
 
46
  chatbot = gr.Chatbot(height=600, show_copy_button=True, placeholder="ChatGPT is initializing...", likeable=True, layout="panel")
47
 
48
+ system_message_box = gr.Label(value="System message")
 
 
 
 
 
 
 
49
  custom_model_box = gr.Textbox(value="meta-llama/Llama-3.2-3B-Instruct", label="AI Mode is ")
50
 
51
  demo = gr.ChatInterface(
52
  fn=respond,
53
  additional_inputs=[
54
  system_message_box,
 
 
 
 
 
55
  custom_model_box,
56
  ],
57
  fill_height=True,
 
61
 
62
  if __name__ == "__main__":
63
  print("Launching the ChatGPT-Llama...")
64
+ demo.launch()