artificialguybr commited on
Commit
e5b736f
1 Parent(s): d09c482

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +47 -46
app.py CHANGED
@@ -63,20 +63,7 @@ def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetit
63
  print("Starting chat...")
64
  updated_history = call_nvidia_api(history, max_tokens, temperature, top_p)
65
  return updated_history, ""
66
-
67
- def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
68
- if not chat_history or (chat_history and chat_history[-1]["role"] != "user"):
69
- chat_history = user(message, chat_history, system_message if not chat_history else None)
70
- else:
71
- chat_history = user(message, chat_history)
72
- chat_history = call_nvidia_api(chat_history, max_tokens, temperature, top_p)
73
-
74
- formatted_chat_history = []
75
- for msg in chat_history:
76
- if msg["role"] == "user" or msg["role"] == "assistant":
77
- formatted_chat_history.append([msg["content"].strip()])
78
-
79
- return formatted_chat_history, chat_history
80
  # Gradio interface setup
81
  with gr.Blocks() as demo:
82
  with gr.Row():
@@ -91,42 +78,56 @@ with gr.Blocks() as demo:
91
  <p> <strong>How to Use:</strong></p>
92
  <ol>
93
  <li>Enter your <strong>message</strong> in the textbox to start a conversation or ask a question.</li>
94
- <li>Adjust the <strong>Temperature</strong> and <strong>Top P</strong> sliders to control the creativity and diversity of the responses.</li>
95
- <li>Set the <strong>Max Tokens</strong> slider to determine the length of the response.</li>
96
- <li>Use the <strong>System Message</strong> textbox if you wish to provide a specific context or instruction for the AI.</li>
97
- <li>Click <strong>Send message</strong> to submit your query and receive a response from LLAMA 2 70B.</li>
98
- <li>Press <strong>New topic</strong> to clear the chat history and start a new conversation thread.</li>
99
  </ol>
100
  <p> <strong>Powered by NVIDIA's cutting-edge AI API, LLAMA 2 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.</strong></p>
101
  <p> <strong>HF Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)</p>
102
  <p> <strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a></p>
103
  """
104
- gr.Markdown(description)
105
- chatbot = gr.Chatbot()
106
- message = gr.Textbox(label="What do you want to chat about?", placeholder="Ask me anything.", lines=3)
107
- system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5, visible=False)
108
- max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024, interactive=True, visible=False)
109
- temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2, interactive=True, visible=False)
110
- top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7, interactive=True, visible=False)
111
- chat_history_state = gr.State([])
112
-
113
- submit = gr.Button(value="Send message")
114
- clear = gr.Button(value="New topic")
115
-
116
- additional_inputs = gr.Accordion("Additional Inputs", open=False)
117
- with additional_inputs:
118
- gr.Row([system_msg, max_tokens, temperature, top_p])
119
-
120
- submit.click(
121
- fn=update_chatbot,
122
- inputs=[message, chat_history_state, system_msg, max_tokens, temperature, top_p],
123
- outputs=[chatbot, chat_history_state]
124
- )
125
-
126
- clear.click(
127
- fn=clear_chat,
128
- inputs=[chat_history_state],
129
- outputs=[chatbot, chat_history_state]
130
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
 
132
  demo.launch()
 
63
  print("Starting chat...")
64
  updated_history = call_nvidia_api(history, max_tokens, temperature, top_p)
65
  return updated_history, ""
66
+
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  # Gradio interface setup
68
  with gr.Blocks() as demo:
69
  with gr.Row():
 
78
  <p> <strong>How to Use:</strong></p>
79
  <ol>
80
  <li>Enter your <strong>message</strong> in the textbox to start a conversation or ask a question.</li>
81
+ <li>Adjust the parameters in the "Additional Inputs" accordion to control the model's behavior.</li>
82
+ <li>Use the buttons below the chatbot to submit your query, clear the chat history, or perform other actions.</li>
 
 
 
83
  </ol>
84
  <p> <strong>Powered by NVIDIA's cutting-edge AI API, LLAMA 2 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.</strong></p>
85
  <p> <strong>HF Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)</p>
86
  <p> <strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a></p>
87
  """
88
+ gr.Markdown(description)
89
+ chatbot = gr.ChatInterface(
90
+ fn=chat,
91
+ title="LLAMA 2 70B Chatbot",
92
+ submit_btn="Submit",
93
+ stop_btn="Stop",
94
+ retry_btn="🔄 Retry",
95
+ undo_btn="↩️ Undo",
96
+ clear_btn="🗑️ Clear"
97
+ )
98
+ message = chatbot.chatbox
99
+ system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
100
+ max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024, interactive=True)
101
+ temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2, interactive=True)
102
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7, interactive=True)
103
+ chat_history_state = gr.State([])
104
+
105
+ with gr.Accordion("Additional Inputs"):
106
+ gr.Markdown("Customize the model's behavior using the inputs below.")
107
+ system_msg_container = gr.Container(system_msg, label="System Message")
108
+ max_tokens_container = gr.Container(max_tokens, label="Max Tokens")
109
+ temperature_container = gr.Container(temperature, label="Temperature")
110
+ top_p_container = gr.Container(top_p, label="Top P")
111
+
112
+ def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
113
+ print("Updating chatbot...")
114
+ if not chat_history or (chat_history and chat_history[-1]["role"] != "user"):
115
+ chat_history = user(message, chat_history, system_message if not chat_history else None)
116
+ else:
117
+ chat_history = user(message, chat_history)
118
+ chat_history, _ = chat(chat_history, system_message, max_tokens, temperature, top_p, 40, 1.1)
119
+ return chat_history
120
+
121
+ chatbot.submit(
122
+ fn=update_chatbot,
123
+ inputs=[message, chat_history_state, system_msg, max_tokens, temperature, top_p],
124
+ outputs=chat_history_state
125
+ )
126
+
127
+ chatbot.clear(
128
+ fn=clear_chat,
129
+ inputs=[chat_history_state, message],
130
+ outputs=[chat_history_state, message]
131
+ )
132
 
133
  demo.launch()