CryptoScoutv1 commited on
Commit
ee38f27
·
verified ·
1 Parent(s): c082f30

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +90 -228
app.py CHANGED
@@ -1,234 +1,96 @@
1
  import os
 
 
 
2
  import gradio as gr
3
- from llama_index.llms.base import ChatMessage
4
- from llama_index.llms import Perplexity
5
- from openai import OpenAI
6
 
7
-
8
- MODEL_CHOICES_LLM1 = [
9
- "pplx-7b-online", "pplx-70b-online", "codellama-34b-instruct", "llama-2-70b-chat", "mistral-7b-instruct",
10
- "mixtral-8x7b-instruct", "pplx-7b-chat", "pplx-70b-chat" # Add actual model names here
11
- ]
12
-
13
- MODEL_CHOICES_LLM2 = [
14
- "gpt-4", "gpt-4-1106-preview", "gpt-3.5-turbo", "gpt-3.5-turbo-1106", " gpt-3.5-turbo-0613", "dall-e-3" # Add actual model names here
15
- ]
16
-
17
- # Initialize default variables and models for both LLMs
18
- DEFAULT_LLM1_MODEL = "pplx-70b-online"
19
- DEFAULT_LLM2_MODEL = "gpt-4"
20
- SYSTEM_MESSAGE_1, PREP_PROMPT_1, AFTER_PROMPT_1, LLM1_MODEL = "", "", "", DEFAULT_LLM1_MODEL
21
- SYSTEM_MESSAGE_2, PREP_PROMPT_2, LLM2_MODEL = "", "", DEFAULT_LLM2_MODEL
22
-
23
- # Initialize Perplexity model (LLM 1)
24
- pplx_api_key = os.getenv('PPX_API_KEY')
25
- llm1 = Perplexity(api_key=pplx_api_key, model=LLM1_MODEL, temperature=0.5)
26
-
27
- # Initialize OpenAI GPT model (LLM 2)
28
- openai_api_key = os.getenv('OPENAI_API_KEY')
29
- client = OpenAI(api_key=openai_api_key)
30
- OpenAItemp = 1.0
31
-
32
- ## LLM Chain Functions ##
33
- def update_variables_and_model_llm1(model_name, system_message_1, prep_prompt_1, after_prompt_1):
34
- global SYSTEM_MESSAGE_1, PREP_PROMPT_1, AFTER_PROMPT_1, LLM1_MODEL, llm1
35
- SYSTEM_MESSAGE_1, PREP_PROMPT_1, AFTER_PROMPT_1 = system_message_1.strip(), prep_prompt_1.strip(), after_prompt_1.strip()
36
- LLM1_MODEL = model_name.strip()
37
- llm1 = Perplexity(api_key=pplx_api_key, model=LLM1_MODEL, temperature=0.5)
38
- return "Variables and model updated for LLM 1!"
39
-
40
- def update_variables_and_model_llm2(model_name, system_message_2, prep_prompt_2):
41
- global SYSTEM_MESSAGE_2, PREP_PROMPT_2, LLM2_MODEL
42
- SYSTEM_MESSAGE_2, PREP_PROMPT_2 = system_message_2.strip(), prep_prompt_2.strip()
43
- LLM2_MODEL = model_name.strip()
44
- return "Variables and model updated for LLM 2!"
45
-
46
- def reset_variables_llm1():
47
- global SYSTEM_MESSAGE_1, PREP_PROMPT_1, AFTER_PROMPT_1, LLM1_MODEL, llm1
48
- SYSTEM_MESSAGE_1, PREP_PROMPT_1, AFTER_PROMPT_1 = "", "", ""
49
- LLM1_MODEL = DEFAULT_LLM1_MODEL
50
- llm1 = Perplexity(api_key=pplx_api_key, model=LLM1_MODEL, temperature=0.5)
51
- return "Variables and model reset for LLM 1!"
52
-
53
- def reset_variables_llm2():
54
- global SYSTEM_MESSAGE_2, PREP_PROMPT_2, LLM2_MODEL
55
- SYSTEM_MESSAGE_2, PREP_PROMPT_2 = "", ""
56
- LLM2_MODEL = DEFAULT_LLM2_MODEL
57
- return "Variables and model reset for LLM 2!"
58
-
59
-
60
- ## FINAL OUTPUT with only send pre,after,system if filled in and not an empty space##
61
- def chat_with_llms(message):
62
- if not message.strip():
63
- return "No input provided. Please enter a message."
64
-
65
- # First LLM Chain
66
- messages_1 = []
67
- if SYSTEM_MESSAGE_1: # Add system message only if it's not empty
68
- messages_1.append(ChatMessage(role="system", content=SYSTEM_MESSAGE_1))
69
- # Prepare the user message for LLM 1
70
- user_message_1 = ""
71
- if PREP_PROMPT_1:
72
- user_message_1 += PREP_PROMPT_1
73
- user_message_1 += message
74
- if AFTER_PROMPT_1:
75
- user_message_1 += AFTER_PROMPT_1
76
- messages_1.append(ChatMessage(role="user", content=user_message_1))
77
- response_1 = llm1.chat(messages_1).message.content
78
-
79
- # Second LLM Chain
80
- messages_2 = []
81
- if SYSTEM_MESSAGE_2: # Add system message only if it's not empty
82
- messages_2.append({"role": "system", "content": SYSTEM_MESSAGE_2})
83
- # Prepare the user message for LLM 2
84
- user_message_2 = ""
85
- if PREP_PROMPT_2:
86
- user_message_2 += PREP_PROMPT_2
87
- user_message_2 += response_1
88
- messages_2.append({"role": "user", "content": user_message_2})
89
- completion = client.chat.completions.create(model=LLM2_MODEL, temperature=OpenAItemp, messages=messages_2)
90
- response_2 = completion.choices[0].message.content
91
-
92
- return response_2 # Return only the response from LLM 2
93
-
94
-
95
- ### FINAL CONTENT + DO NOT SEND EMPTY VALUES ###
96
- '''
97
- def chat_with_llms(message):
98
- if not message.strip():
99
- return "No input provided. Please enter a message."
100
-
101
- # First LLM Chain
102
- messages_1 = []
103
- if SYSTEM_MESSAGE_1: # Add system message only if it's not empty
104
- messages_1.append(ChatMessage(role="system", content=SYSTEM_MESSAGE_1))
105
- messages_1.append(ChatMessage(role="user", content=(PREP_PROMPT_1 if PREP_PROMPT_1 else "") + message + (AFTER_PROMPT_1 if AFTER_PROMPT_1 else "")))
106
- response_1 = llm1.chat(messages_1).message.content
107
-
108
- # Second LLM Chain
109
- messages_2 = []
110
- if SYSTEM_MESSAGE_2: # Add system message only if it's not empty
111
- messages_2.append({"role": "system", "content": SYSTEM_MESSAGE_2})
112
- messages_2.append({"role": "user", "content": (PREP_PROMPT_2 if PREP_PROMPT_2 else "") + response_1})
113
- completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
114
- response_2 = completion.choices[0].message.content
115
-
116
- return response_2 # Return only the response from LLM 2
117
-
118
- ##SHOW FULL INPUTS and OUTPUTS FOR LLM1 AND 2 and exclude any empty values from inputs##
119
- def chat_with_llms(message):
120
- if not message.strip():
121
- return "No input provided. Please enter a message."
122
-
123
- # First LLM Chain
124
- messages_1 = []
125
- if SYSTEM_MESSAGE_1: # Add system message only if it's not empty
126
- messages_1.append(ChatMessage(role="system", content=SYSTEM_MESSAGE_1))
127
- messages_1.append(ChatMessage(role="user", content=(PREP_PROMPT_1 if PREP_PROMPT_1 else "") + message + (AFTER_PROMPT_1 if AFTER_PROMPT_1 else "")))
128
- response_1 = llm1.chat(messages_1).message.content
129
-
130
- # Full message chain for LLM 1
131
- full_message_chain_llm1 = 'LLM 1 Conversation:\n'
132
- for msg in messages_1:
133
- full_message_chain_llm1 += f"{msg.role.title()}: {msg.content}\n"
134
- full_message_chain_llm1 += f"Assistant: {response_1}\n\n"
135
-
136
- # Second LLM Chain
137
- messages_2 = []
138
- if SYSTEM_MESSAGE_2: # Add system message only if it's not empty
139
- messages_2.append({"role": "system", "content": SYSTEM_MESSAGE_2})
140
- messages_2.append({"role": "user", "content": (PREP_PROMPT_2 if PREP_PROMPT_2 else "") + response_1})
141
- completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
142
- response_2 = completion.choices[0].message.content
143
-
144
- # Full message chain for LLM 2
145
- full_message_chain_llm2 = 'LLM 2 Conversation:\n'
146
- for msg in messages_2:
147
- full_message_chain_llm2 += f"{msg['role'].title()}: {msg['content']}\n"
148
- full_message_chain_llm2 += f"Assistant: {response_2}\n"
149
-
150
- return full_message_chain_llm1 + full_message_chain_llm2
151
-
152
- ### SHOW FULL LLMS INPUTS ##
153
- def chat_with_llms(message):
154
- # Check if the input message is empty or only contains whitespace
155
- if not message.strip():
156
- return "No input provided. Please enter a message."
157
-
158
- # First LLM Chain
159
- messages_1 = [ChatMessage(role="system", content=SYSTEM_MESSAGE_1), ChatMessage(role="user", content=PREP_PROMPT_1 + message + AFTER_PROMPT_1)]
160
- response_1 = llm1.chat(messages_1).message.content
161
-
162
- # Build the full message chain for LLM 1
163
- full_message_chain_llm1 = 'LLM 1 Conversation:\n'
164
- for msg in messages_1:
165
- full_message_chain_llm1 += f"{msg.role.title()}: {msg.content}\n"
166
- full_message_chain_llm1 += f"Assistant: {response_1}\n\n"
167
-
168
- # Second LLM Chain
169
- messages_2 = [
170
- {"role": "system", "content": SYSTEM_MESSAGE_2},
171
- {"role": "user", "content": PREP_PROMPT_2 + response_1}
172
- ]
173
- completion = client.chat.completions.create(model="gpt-4", temperature=OpenAItemp, messages=messages_2)
174
- response_2 = completion.choices[0].message.content
175
-
176
- # Build the full message chain for LLM 2
177
- full_message_chain_llm2 = 'LLM 2 Conversation:\n'
178
- for msg in messages_2:
179
- full_message_chain_llm2 += f"{msg['role'].title()}: {msg['content']}\n"
180
- full_message_chain_llm2 += f"Assistant: {response_2}\n"
181
-
182
- # Return the full conversation histories of both LLMs
183
- return full_message_chain_llm1 + full_message_chain_llm2
184
-
185
- '''
186
-
187
- # Gradio interface for updating LLM 1 variables and model
188
- llm1_interface = gr.Interface(
189
- fn=update_variables_and_model_llm1,
190
- inputs=[
191
- gr.Dropdown(choices=MODEL_CHOICES_LLM1, label="LLM 1 - PPX - Models", value=DEFAULT_LLM1_MODEL), # Dropdown for LLM1
192
- "text", "text", "text"
193
- ],
194
- outputs="text",
195
- title="Update Variables and Model for LLM 1"
196
- )
197
- reset_llm1_interface = gr.Interface(
198
- fn=reset_variables_llm1,
199
- inputs=[],
200
- outputs="text",
201
- title="Reset Variables for LLM 1"
202
- )
203
-
204
-
205
- # Gradio interface for updating LLM 2 variables and model
206
- llm2_interface = gr.Interface(
207
- fn=update_variables_and_model_llm2,
208
- inputs=[
209
- gr.Dropdown(choices=MODEL_CHOICES_LLM2, label="LLM 2 - OpenAI - Models", value=DEFAULT_LLM2_MODEL), # Dropdown for LLM2
210
- "text", "text"
211
- ],
212
  outputs="text",
213
- title="Update Variables and Model for LLM 2"
 
214
  )
215
- reset_llm2_interface = gr.Interface(
216
- fn=reset_variables_llm2,
217
- inputs=[],
218
- outputs="text",
219
- title="Reset Variables for LLM 2"
220
- )
221
-
222
- # Update Gradio chat interface
223
- chat_interface = gr.Interface(
224
- fn=chat_with_llms,
225
- inputs=["text"],
226
- outputs="text",
227
- examples=[["Hello!"]]
228
- ).queue()
229
 
230
- # Tabbed interface
231
- gr.TabbedInterface(
232
- [chat_interface, llm1_interface, reset_llm1_interface, llm2_interface, reset_llm2_interface],
233
- ["Chat", "LLM 1 - PPX", "Reset LLM 1", "LLM 2 - OpenAI", "Reset LLM 2"]
234
- ).launch()
 
1
  import os
2
+ from crewai import Agent, Task, Crew, Process
3
+ from langchain.tools import DuckDuckGoSearchRun
4
+ from langchain.agents import Tool
5
  import gradio as gr
 
 
 
6
 
7
+ # Initialize CoinGecko API for cryptocurrency data
8
+ duckduckgo_search_tool = DuckDuckGoSearchRun()
9
+
10
+ from langchain_google_genai import ChatGoogleGenerativeAI
11
+ # Set up API keys and environment variables
12
+ api_gemini = 'AIzaSyB4feCeVxvjfd6a6L1jtaV_NHUMSh0MGQk' # Replace with your actual API key
13
+ os.environ["api_gemini"] = api_gemini
14
+ # Define the Large Language Model (LLM)
15
+ llm = ChatGoogleGenerativeAI(model="gemini-pro", verbose=True, temperature=0.1, google_api_key=api_gemini)
16
+
17
+ def create_crewai_crypto_setup(crypto_symbol):
18
+ # Define the Main Research Agent for cryptocurrency analysis
19
+ research_agent = Agent(
20
+ role="Main Research Agent",
21
+ goal=f"Conduct thorough research on {crypto_symbol}, covering its market performance, technical analysis, and recent developments.",
22
+ backstory="An expert in cryptocurrency analysis, skilled in evaluating digital currencies based on technical indicators, market trends, and blockchain developments.",
23
+ verbose=True,
24
+ allow_delegation=False,
25
+ tools=[duckduckgo_search_tool],
26
+ llm=llm,
27
+ )
28
+
29
+ # Define the Report Formatting Agent for creating investment plans
30
+ report_formatting_agent = Agent(
31
+ role="Investment Plan Creator",
32
+ goal="Compile and format the researched information into a structured and actionable investment plan for the cryptocurrency.",
33
+ backstory="Specializes in synthesizing market analysis into clear, actionable investment strategies for cryptocurrency investors.",
34
+ verbose=True,
35
+ allow_delegation=False,
36
+ llm=llm,
37
+ )
38
+
39
+ # Define Tasks for the Main Research Agent focused on cryptocurrency
40
+ task1 = Task(
41
+ description=f"Analyze the price history and current market trends of {crypto_symbol}.",
42
+ agent=research_agent,
43
+ )
44
+
45
+ task2 = Task(
46
+ description=f"Conduct technical analysis on {crypto_symbol}, focusing on key indicators like RSI, MACD, and support/resistance levels.",
47
+ agent=research_agent,
48
+ )
49
+
50
+ task3 = Task(
51
+ description=f"Review recent news, developments, and community sentiment around {crypto_symbol}.",
52
+ agent=research_agent,
53
+ )
54
+
55
+ task4 = Task(
56
+ description=f"Identify potential investment opportunities and risks for {crypto_symbol}.",
57
+ agent=research_agent,
58
+ )
59
+
60
+ # Define Task for the Report Formatting Agent to create a comprehensive investment plan
61
+ task5 = Task(
62
+ description=f"""Create a detailed investment plan for {crypto_symbol}, incorporating price targets, technical analysis, investment strategies, and timelines.
63
+ Ensure the plan includes actionable advice on when to buy, sell, and hold, along with risk management strategies.""",
64
+ agent=report_formatting_agent,
65
+ )
66
+ task6 = Task(
67
+ description=f"""Condense the detailed investment plan for {crypto_symbol} into a concise, note-like format. Ensure the output highlights key price targets, technical indicators, and strategic advice within a 2000-character limit. Focus on succinctness and clarity, preserving the most critical investment insights.""",
68
+ agent=report_formatting_agent,
69
+ )
70
+
71
+ # Update the Crew to include the new condensing task
72
+ crypto_crew = Crew(
73
+ agents=[research_agent, report_formatting_agent],
74
+ tasks=[task1, task2, task3, task4, task5, task6], # Include the new task in the task list
75
+ verbose=2,
76
+ process=Process.sequential,
77
+ )
78
+
79
+
80
+ crew_result = crypto_crew.kickoff()
81
+ return crew_result
82
+
83
+ # Gradio Interface for the cryptocurrency investment plan app
84
+ def run_crewai_crypto_app(crypto_symbol):
85
+ crew_result = create_crewai_crypto_setup(crypto_symbol)
86
+ return crew_result
87
+
88
+ iface = gr.Interface(
89
+ fn=run_crewai_crypto_app,
90
+ inputs="text",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
  outputs="text",
92
+ title="CrewAI Cryptocurrency Investment Plan",
93
+ description="Enter a cryptocurrency symbol to analyze and generate a comprehensive investment plan."
94
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ iface.launch()