artificialguybr commited on
Commit
0c5bb4b
1 Parent(s): 58bcb60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -44
app.py CHANGED
@@ -3,10 +3,9 @@ import requests
3
  import json
4
  import os
5
 
6
- API_KEY = os.getenv('API_KEY')
7
  INVOKE_URL = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/0e349b44-440a-44e1-93e9-abe8dcb27158"
8
  FETCH_URL_FORMAT = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/"
9
-
10
  headers = {
11
  "Authorization": f"Bearer {API_KEY}",
12
  "Accept": "application/json",
@@ -15,16 +14,10 @@ headers = {
15
 
16
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
17
 
18
- def clear_chat(chat_history_state, chat_message):
19
- print("Clearing chat...")
20
- chat_history_state = []
21
- chat_message = ''
22
- return chat_history_state, chat_message
23
-
24
  def user(message, history, system_message=None):
25
  print(f"User message: {message}")
26
  history = history or []
27
- if system_message: # Check if a system message is provided and should be added
28
  history.append({"role": "system", "content": system_message})
29
  history.append({"role": "user", "content": message})
30
  return history
@@ -37,8 +30,7 @@ def call_nvidia_api(history, max_tokens, temperature, top_p):
37
  "max_tokens": max_tokens,
38
  "stream": False
39
  }
40
-
41
- print(f"Payload enviado: {payload}") # Imprime o payload enviado
42
 
43
  session = requests.Session()
44
  response = session.post(INVOKE_URL, headers=headers, json=payload)
@@ -47,61 +39,55 @@ def call_nvidia_api(history, max_tokens, temperature, top_p):
47
  request_id = response.headers.get("NVCF-REQID")
48
  fetch_url = FETCH_URL_FORMAT + request_id
49
  response = session.get(fetch_url, headers=headers)
50
-
51
- response.raise_for_status()
52
- response_body = response.json()
53
 
54
- print(f"Payload recebido: {response_body}") # Imprime o payload recebido
55
-
56
- if response_body["choices"]:
57
- assistant_message = response_body["choices"][0]["message"]["content"]
58
- history.append({"role": "assistant", "content": assistant_message})
59
-
60
- return history
61
 
62
  def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
63
  print("Starting chat...")
64
- updated_history = call_nvidia_api(history, max_tokens, temperature, top_p)
 
65
  return updated_history, ""
66
 
67
  def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
68
  print("Updating chatbot...")
69
- if not chat_history or (chat_history and chat_history[-1]["role"] != "user"):
70
- chat_history = user(message, chat_history, system_message if not chat_history else None)
71
- else:
72
- chat_history = user(message, chat_history)
73
- chat_history, _ = call_nvidia_api(chat_history, max_tokens, temperature, top_p)
74
  return chat_history
75
 
76
- system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
77
- max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
78
- temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
79
- top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
80
-
81
  with gr.Blocks() as demo:
82
  with gr.Row():
83
  with gr.Column():
84
  gr.Markdown("LLAMA 2 70B Free Demo")
85
- description="""
86
  <div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
87
  <strong>Explore the Capabilities of LLAMA 2 70B</strong>
88
  </div>
89
- <p>Llama 2 is a large language AI model capable of generating text and code in response to prompts.
90
- </p>
91
- <p> <strong>How to Use:</strong></p>
92
  <ol>
93
  <li>Enter your <strong>message</strong> in the textbox to start a conversation or ask a question.</li>
94
  <li>Adjust the parameters in the "Additional Inputs" accordion to control the model's behavior.</li>
95
  <li>Use the buttons below the chatbot to submit your query, clear the chat history, or perform other actions.</li>
96
  </ol>
97
- <p> <strong>Powered by NVIDIA's cutting-edge AI API, LLAMA 2 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.</strong></p>
98
- <p> <strong>HF Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)</p>
99
- <p> <strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a></p>
100
  """
101
-
102
  gr.Markdown(description)
 
103
  chat_history_state = gr.State([])
104
 
 
 
 
 
 
105
  chatbot = gr.ChatInterface(
106
  fn=lambda message, history: update_chatbot(message, history, system_msg.value, max_tokens.value, temperature.value, top_p.value),
107
  additional_inputs=[system_msg, max_tokens, temperature, top_p],
@@ -114,9 +100,6 @@ with gr.Blocks() as demo:
114
  chat_history_state.value = []
115
  chatbot.textbox.value = ""
116
 
117
- chatbot.clear(
118
- fn=clear_chat,
119
- outputs=[chat_history_state, chatbot.textbox]
120
- )
121
 
122
  demo.launch()
 
3
  import json
4
  import os
5
 
6
+ API_KEY = os.getenv('API_KEY')
7
  INVOKE_URL = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/functions/0e349b44-440a-44e1-93e9-abe8dcb27158"
8
  FETCH_URL_FORMAT = "https://api.nvcf.nvidia.com/v2/nvcf/pexec/status/"
 
9
  headers = {
10
  "Authorization": f"Bearer {API_KEY}",
11
  "Accept": "application/json",
 
14
 
15
  BASE_SYSTEM_MESSAGE = "I carefully provide accurate, factual, thoughtful, nuanced answers and am brilliant at reasoning."
16
 
 
 
 
 
 
 
17
  def user(message, history, system_message=None):
18
  print(f"User message: {message}")
19
  history = history or []
20
+ if system_message:
21
  history.append({"role": "system", "content": system_message})
22
  history.append({"role": "user", "content": message})
23
  return history
 
30
  "max_tokens": max_tokens,
31
  "stream": False
32
  }
33
+ print(f"Payload enviado: {payload}")
 
34
 
35
  session = requests.Session()
36
  response = session.post(INVOKE_URL, headers=headers, json=payload)
 
39
  request_id = response.headers.get("NVCF-REQID")
40
  fetch_url = FETCH_URL_FORMAT + request_id
41
  response = session.get(fetch_url, headers=headers)
42
+ response.raise_for_status()
43
+ response_body = response.json()
44
+ print(f"Payload recebido: {response_body}")
45
 
46
+ if response_body["choices"]:
47
+ assistant_message = response_body["choices"][0]["message"]["content"]
48
+ history.append({"role": "assistant", "content": assistant_message})
49
+ return history
 
 
 
50
 
51
  def chat(history, system_message, max_tokens, temperature, top_p, top_k, repetition_penalty):
52
  print("Starting chat...")
53
+ updated_history = user(None, history, system_message)
54
+ updated_history = call_nvidia_api(updated_history, max_tokens, temperature, top_p)
55
  return updated_history, ""
56
 
57
  def update_chatbot(message, chat_history, system_message, max_tokens, temperature, top_p):
58
  print("Updating chatbot...")
59
+ chat_history = user(message, chat_history, system_message if not chat_history else None)
60
+ chat_history = call_nvidia_api(chat_history, max_tokens, temperature, top_p)
 
 
 
61
  return chat_history
62
 
 
 
 
 
 
63
  with gr.Blocks() as demo:
64
  with gr.Row():
65
  with gr.Column():
66
  gr.Markdown("LLAMA 2 70B Free Demo")
67
+ description = """
68
  <div style="text-align: center; font-size: 1.5em; margin-bottom: 20px;">
69
  <strong>Explore the Capabilities of LLAMA 2 70B</strong>
70
  </div>
71
+ <p>Llama 2 is a large language AI model capable of generating text and code in response to prompts.</p>
72
+ <p><strong>How to Use:</strong></p>
 
73
  <ol>
74
  <li>Enter your <strong>message</strong> in the textbox to start a conversation or ask a question.</li>
75
  <li>Adjust the parameters in the "Additional Inputs" accordion to control the model's behavior.</li>
76
  <li>Use the buttons below the chatbot to submit your query, clear the chat history, or perform other actions.</li>
77
  </ol>
78
+ <p><strong>Powered by NVIDIA's cutting-edge AI API, LLAMA 2 70B offers an unparalleled opportunity to interact with an AI model of exceptional conversational ability, accessible to everyone at no cost.</strong></p>
79
+ <p><strong>HF Created by:</strong> @artificialguybr (<a href="https://twitter.com/artificialguybr">Twitter</a>)</p>
80
+ <p><strong>Discover more:</strong> <a href="https://artificialguy.com">artificialguy.com</a></p>
81
  """
 
82
  gr.Markdown(description)
83
+
84
  chat_history_state = gr.State([])
85
 
86
+ system_msg = gr.Textbox(BASE_SYSTEM_MESSAGE, label="System Message", placeholder="System prompt.", lines=5)
87
+ max_tokens = gr.Slider(20, 1024, label="Max Tokens", step=20, value=1024)
88
+ temperature = gr.Slider(0.0, 1.0, label="Temperature", step=0.1, value=0.2)
89
+ top_p = gr.Slider(0.0, 1.0, label="Top P", step=0.05, value=0.7)
90
+
91
  chatbot = gr.ChatInterface(
92
  fn=lambda message, history: update_chatbot(message, history, system_msg.value, max_tokens.value, temperature.value, top_p.value),
93
  additional_inputs=[system_msg, max_tokens, temperature, top_p],
 
100
  chat_history_state.value = []
101
  chatbot.textbox.value = ""
102
 
103
+ chatbot.clear(fn=clear_chat, outputs=[chat_history_state, chatbot.textbox])
 
 
 
104
 
105
  demo.launch()