CryptoScoutv1 commited on
Commit
c082f30
·
verified ·
1 Parent(s): 5064e05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -6
app.py CHANGED
@@ -4,6 +4,16 @@ from llama_index.llms.base import ChatMessage
4
  from llama_index.llms import Perplexity
5
  from openai import OpenAI
6
 
 
 
 
 
 
 
 
 
 
 
7
  # Initialize default variables and models for both LLMs
8
  DEFAULT_LLM1_MODEL = "pplx-70b-online"
9
  DEFAULT_LLM2_MODEL = "gpt-4"
@@ -87,12 +97,14 @@ def chat_with_llms(message):
87
  def chat_with_llms(message):
88
  if not message.strip():
89
  return "No input provided. Please enter a message."
 
90
  # First LLM Chain
91
  messages_1 = []
92
  if SYSTEM_MESSAGE_1: # Add system message only if it's not empty
93
  messages_1.append(ChatMessage(role="system", content=SYSTEM_MESSAGE_1))
94
  messages_1.append(ChatMessage(role="user", content=(PREP_PROMPT_1 if PREP_PROMPT_1 else "") + message + (AFTER_PROMPT_1 if AFTER_PROMPT_1 else "")))
95
  response_1 = llm1.chat(messages_1).message.content
 
96
  # Second LLM Chain
97
  messages_2 = []
98
  if SYSTEM_MESSAGE_2: # Add system message only if it's not empty
@@ -100,22 +112,27 @@ def chat_with_llms(message):
100
  messages_2.append({"role": "user", "content": (PREP_PROMPT_2 if PREP_PROMPT_2 else "") + response_1})
101
  completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
102
  response_2 = completion.choices[0].message.content
 
103
  return response_2 # Return only the response from LLM 2
 
104
  ##SHOW FULL INPUTS and OUTPUTS FOR LLM1 AND 2 and exclude any empty values from inputs##
105
  def chat_with_llms(message):
106
  if not message.strip():
107
  return "No input provided. Please enter a message."
 
108
  # First LLM Chain
109
  messages_1 = []
110
  if SYSTEM_MESSAGE_1: # Add system message only if it's not empty
111
  messages_1.append(ChatMessage(role="system", content=SYSTEM_MESSAGE_1))
112
  messages_1.append(ChatMessage(role="user", content=(PREP_PROMPT_1 if PREP_PROMPT_1 else "") + message + (AFTER_PROMPT_1 if AFTER_PROMPT_1 else "")))
113
  response_1 = llm1.chat(messages_1).message.content
 
114
  # Full message chain for LLM 1
115
  full_message_chain_llm1 = 'LLM 1 Conversation:\n'
116
  for msg in messages_1:
117
  full_message_chain_llm1 += f"{msg.role.title()}: {msg.content}\n"
118
  full_message_chain_llm1 += f"Assistant: {response_1}\n\n"
 
119
  # Second LLM Chain
120
  messages_2 = []
121
  if SYSTEM_MESSAGE_2: # Add system message only if it's not empty
@@ -123,25 +140,31 @@ def chat_with_llms(message):
123
  messages_2.append({"role": "user", "content": (PREP_PROMPT_2 if PREP_PROMPT_2 else "") + response_1})
124
  completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
125
  response_2 = completion.choices[0].message.content
 
126
  # Full message chain for LLM 2
127
  full_message_chain_llm2 = 'LLM 2 Conversation:\n'
128
  for msg in messages_2:
129
  full_message_chain_llm2 += f"{msg['role'].title()}: {msg['content']}\n"
130
  full_message_chain_llm2 += f"Assistant: {response_2}\n"
 
131
  return full_message_chain_llm1 + full_message_chain_llm2
 
132
  ### SHOW FULL LLMS INPUTS ##
133
  def chat_with_llms(message):
134
  # Check if the input message is empty or only contains whitespace
135
  if not message.strip():
136
  return "No input provided. Please enter a message."
 
137
  # First LLM Chain
138
  messages_1 = [ChatMessage(role="system", content=SYSTEM_MESSAGE_1), ChatMessage(role="user", content=PREP_PROMPT_1 + message + AFTER_PROMPT_1)]
139
  response_1 = llm1.chat(messages_1).message.content
 
140
  # Build the full message chain for LLM 1
141
  full_message_chain_llm1 = 'LLM 1 Conversation:\n'
142
  for msg in messages_1:
143
  full_message_chain_llm1 += f"{msg.role.title()}: {msg.content}\n"
144
  full_message_chain_llm1 += f"Assistant: {response_1}\n\n"
 
145
  # Second LLM Chain
146
  messages_2 = [
147
  {"role": "system", "content": SYSTEM_MESSAGE_2},
@@ -149,18 +172,25 @@ def chat_with_llms(message):
149
  ]
150
  completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
151
  response_2 = completion.choices[0].message.content
 
152
  # Build the full message chain for LLM 2
153
  full_message_chain_llm2 = 'LLM 2 Conversation:\n'
154
  for msg in messages_2:
155
  full_message_chain_llm2 += f"{msg['role'].title()}: {msg['content']}\n"
156
  full_message_chain_llm2 += f"Assistant: {response_2}\n"
 
157
  # Return the full conversation histories of both LLMs
158
  return full_message_chain_llm1 + full_message_chain_llm2
 
159
  '''
 
160
  # Gradio interface for updating LLM 1 variables and model
161
  llm1_interface = gr.Interface(
162
  fn=update_variables_and_model_llm1,
163
- inputs=["text", "text", "text", "text"], # Added model name input
 
 
 
164
  outputs="text",
165
  title="Update Variables and Model for LLM 1"
166
  )
@@ -170,10 +200,15 @@ reset_llm1_interface = gr.Interface(
170
  outputs="text",
171
  title="Reset Variables for LLM 1"
172
  )
 
 
173
  # Gradio interface for updating LLM 2 variables and model
174
  llm2_interface = gr.Interface(
175
  fn=update_variables_and_model_llm2,
176
- inputs=["text", "text", "text"], # Added model name input
 
 
 
177
  outputs="text",
178
  title="Update Variables and Model for LLM 2"
179
  )
@@ -184,8 +219,6 @@ reset_llm2_interface = gr.Interface(
184
  title="Reset Variables for LLM 2"
185
  )
186
 
187
- # Interfaces for resetting variables remain the same
188
-
189
  # Update Gradio chat interface
190
  chat_interface = gr.Interface(
191
  fn=chat_with_llms,
@@ -197,5 +230,5 @@ chat_interface = gr.Interface(
197
  # Tabbed interface
198
  gr.TabbedInterface(
199
  [chat_interface, llm1_interface, reset_llm1_interface, llm2_interface, reset_llm2_interface],
200
- ["Chat", "Update LLM 1", "Reset LLM 1", "Update LLM 2", "Reset LLM 2"]
201
- ).launch()
 
4
  from llama_index.llms import Perplexity
5
  from openai import OpenAI
6
 
7
+
8
+ MODEL_CHOICES_LLM1 = [
9
+ "pplx-7b-online", "pplx-70b-online", "codellama-34b-instruct", "llama-2-70b-chat", "mistral-7b-instruct",
10
+ "mixtral-8x7b-instruct", "pplx-7b-chat", "pplx-70b-chat" # Add actual model names here
11
+ ]
12
+
13
+ MODEL_CHOICES_LLM2 = [
14
+ "gpt-4", "gpt-4-1106-preview", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", " gpt-3.5-turbo-0613", "dall-e-3" # Add actual model names here
15
+ ]
16
+
17
  # Initialize default variables and models for both LLMs
18
  DEFAULT_LLM1_MODEL = "pplx-70b-online"
19
  DEFAULT_LLM2_MODEL = "gpt-4"
 
97
  def chat_with_llms(message):
98
  if not message.strip():
99
  return "No input provided. Please enter a message."
100
+
101
  # First LLM Chain
102
  messages_1 = []
103
  if SYSTEM_MESSAGE_1: # Add system message only if it's not empty
104
  messages_1.append(ChatMessage(role="system", content=SYSTEM_MESSAGE_1))
105
  messages_1.append(ChatMessage(role="user", content=(PREP_PROMPT_1 if PREP_PROMPT_1 else "") + message + (AFTER_PROMPT_1 if AFTER_PROMPT_1 else "")))
106
  response_1 = llm1.chat(messages_1).message.content
107
+
108
  # Second LLM Chain
109
  messages_2 = []
110
  if SYSTEM_MESSAGE_2: # Add system message only if it's not empty
 
112
  messages_2.append({"role": "user", "content": (PREP_PROMPT_2 if PREP_PROMPT_2 else "") + response_1})
113
  completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
114
  response_2 = completion.choices[0].message.content
115
+
116
  return response_2 # Return only the response from LLM 2
117
+
118
  ##SHOW FULL INPUTS and OUTPUTS FOR LLM1 AND 2 and exclude any empty values from inputs##
119
  def chat_with_llms(message):
120
  if not message.strip():
121
  return "No input provided. Please enter a message."
122
+
123
  # First LLM Chain
124
  messages_1 = []
125
  if SYSTEM_MESSAGE_1: # Add system message only if it's not empty
126
  messages_1.append(ChatMessage(role="system", content=SYSTEM_MESSAGE_1))
127
  messages_1.append(ChatMessage(role="user", content=(PREP_PROMPT_1 if PREP_PROMPT_1 else "") + message + (AFTER_PROMPT_1 if AFTER_PROMPT_1 else "")))
128
  response_1 = llm1.chat(messages_1).message.content
129
+
130
  # Full message chain for LLM 1
131
  full_message_chain_llm1 = 'LLM 1 Conversation:\n'
132
  for msg in messages_1:
133
  full_message_chain_llm1 += f"{msg.role.title()}: {msg.content}\n"
134
  full_message_chain_llm1 += f"Assistant: {response_1}\n\n"
135
+
136
  # Second LLM Chain
137
  messages_2 = []
138
  if SYSTEM_MESSAGE_2: # Add system message only if it's not empty
 
140
  messages_2.append({"role": "user", "content": (PREP_PROMPT_2 if PREP_PROMPT_2 else "") + response_1})
141
  completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
142
  response_2 = completion.choices[0].message.content
143
+
144
  # Full message chain for LLM 2
145
  full_message_chain_llm2 = 'LLM 2 Conversation:\n'
146
  for msg in messages_2:
147
  full_message_chain_llm2 += f"{msg['role'].title()}: {msg['content']}\n"
148
  full_message_chain_llm2 += f"Assistant: {response_2}\n"
149
+
150
  return full_message_chain_llm1 + full_message_chain_llm2
151
+
152
  ### SHOW FULL LLMS INPUTS ##
153
  def chat_with_llms(message):
154
  # Check if the input message is empty or only contains whitespace
155
  if not message.strip():
156
  return "No input provided. Please enter a message."
157
+
158
  # First LLM Chain
159
  messages_1 = [ChatMessage(role="system", content=SYSTEM_MESSAGE_1), ChatMessage(role="user", content=PREP_PROMPT_1 + message + AFTER_PROMPT_1)]
160
  response_1 = llm1.chat(messages_1).message.content
161
+
162
  # Build the full message chain for LLM 1
163
  full_message_chain_llm1 = 'LLM 1 Conversation:\n'
164
  for msg in messages_1:
165
  full_message_chain_llm1 += f"{msg.role.title()}: {msg.content}\n"
166
  full_message_chain_llm1 += f"Assistant: {response_1}\n\n"
167
+
168
  # Second LLM Chain
169
  messages_2 = [
170
  {"role": "system", "content": SYSTEM_MESSAGE_2},
 
172
  ]
173
  completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
174
  response_2 = completion.choices[0].message.content
175
+
176
  # Build the full message chain for LLM 2
177
  full_message_chain_llm2 = 'LLM 2 Conversation:\n'
178
  for msg in messages_2:
179
  full_message_chain_llm2 += f"{msg['role'].title()}: {msg['content']}\n"
180
  full_message_chain_llm2 += f"Assistant: {response_2}\n"
181
+
182
  # Return the full conversation histories of both LLMs
183
  return full_message_chain_llm1 + full_message_chain_llm2
184
+
185
  '''
186
+
187
  # Gradio interface for updating LLM 1 variables and model
188
  llm1_interface = gr.Interface(
189
  fn=update_variables_and_model_llm1,
190
+ inputs=[
191
+ gr.Dropdown(choices=MODEL_CHOICES_LLM1, label="LLM 1 - PPX - Models", value=DEFAULT_LLM1_MODEL), # Dropdown for LLM1
192
+ "text", "text", "text"
193
+ ],
194
  outputs="text",
195
  title="Update Variables and Model for LLM 1"
196
  )
 
200
  outputs="text",
201
  title="Reset Variables for LLM 1"
202
  )
203
+
204
+
205
  # Gradio interface for updating LLM 2 variables and model
206
  llm2_interface = gr.Interface(
207
  fn=update_variables_and_model_llm2,
208
+ inputs=[
209
+ gr.Dropdown(choices=MODEL_CHOICES_LLM2, label="LLM 2 - OpenAI - Models", value=DEFAULT_LLM2_MODEL), # Dropdown for LLM2
210
+ "text", "text"
211
+ ],
212
  outputs="text",
213
  title="Update Variables and Model for LLM 2"
214
  )
 
219
  title="Reset Variables for LLM 2"
220
  )
221
 
 
 
222
  # Update Gradio chat interface
223
  chat_interface = gr.Interface(
224
  fn=chat_with_llms,
 
230
  # Tabbed interface
231
  gr.TabbedInterface(
232
  [chat_interface, llm1_interface, reset_llm1_interface, llm2_interface, reset_llm2_interface],
233
+ ["Chat", "LLM 1 - PPX", "Reset LLM 1", "LLM 2 - OpenAI", "Reset LLM 2"]
234
+ ).launch()