diegocp01 commited on
Commit
9a1e263
·
verified ·
1 Parent(s): c28dccc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +70 -19
app.py CHANGED
@@ -20,37 +20,88 @@ openai.api_key = os.getenv('OPENAI_API_KEY')
20
  # Below is the new deadline calculator tool
21
 
22
  @tool
23
- def chatgpt_conversation(prompt: str) -> str:
24
- """A tool that interacts with the OpenAI API to simulate a conversation with a dynamic prompt.
25
 
26
  Args:
27
- prompt: The message to ChatGPT.
28
  Returns:
29
- The assistant's response to the prompt.
30
  """
31
- response = openai.chat.completions.create(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
32
  model="gpt-4o-mini",
33
  messages=[
34
- {"role": "user", "content": prompt}
35
  ],
36
  )
37
- return response.choices[0].message.content
 
38
 
39
 
40
  @tool
41
- def get_current_time_in_timezone(timezone: str) -> str:
42
- """A tool that fetches the current local time in a specified timezone.
 
43
  Args:
44
- timezone: A string representing a valid timezone (e.g., 'America/New_York').
 
 
45
  """
46
- try:
47
- # Create timezone object
48
- tz = pytz.timezone(timezone)
49
- # Get current time in that timezone
50
- local_time = datetime.datetime.now(tz).strftime("%Y-%m-%d %H:%M:%S")
51
- return f"The current local time in {timezone} is: {local_time}"
52
- except Exception as e:
53
- return f"Error fetching time for timezone '{timezone}': {str(e)}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
 
55
 
56
  final_answer = FinalAnswerTool()
@@ -74,7 +125,7 @@ with open("prompts.yaml", 'r') as stream:
74
 
75
  agent = CodeAgent(
76
  model=model,
77
- tools=[final_answer, get_current_time_in_timezone, chatgpt_conversation], ## add your tools here (don't remove final answer)
78
  max_steps=6,
79
  verbosity_level=1,
80
  grammar=None,
 
20
  # Below is the new deadline calculator tool
21
 
22
  @tool
23
+ def multi_perspective_brainstorming(query: str) -> str:
24
+ """A tool that generates and ranks creative ideas by simulating a brainstorming swarm of AI instances.
25
 
26
  Args:
27
+ query: An open-ended query to brainstorm (e.g., 'Generate marketing ideas for a coffee shop').
28
  Returns:
29
+ A prioritized list of the top ideas synthesized from multiple perspectives.
30
  """
31
+ # Define brainstorming perspectives
32
+ perspectives = [
33
+ {"focus": "Social Media", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on social media: {query}"},
34
+ {"focus": "Loyalty Programs", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on loyalty programs: {query}"},
35
+ {"focus": "Sustainability", "prompt": f"Generate 3 creative marketing ideas for a coffee shop focused on sustainability: {query}"},
36
+ ]
37
+
38
+ # Collect ideas from each perspective
39
+ all_ideas = []
40
+ for perspective in perspectives:
41
+ response = openai.chat.completions.create(
42
+ model="gpt-4o-mini",
43
+ messages=[
44
+ {"role": "user", "content": perspective["prompt"]}
45
+ ],
46
+ )
47
+ ideas = response.choices[0].message.content.split("\n") # Assume ideas are newline-separated
48
+ all_ideas.extend([f"{perspective['focus']}: {idea.strip()}" for idea in ideas if idea.strip()])
49
+
50
+ # Rank the ideas by simulating a consensus
51
+ ranking_prompt = (
52
+ f"From the following list of ideas, rank the top 5 based on creativity, feasibility, and impact for '{query}':\n"
53
+ f"{'\n'.join(all_ideas)}"
54
+ )
55
+ ranked_response = openai.chat.completions.create(
56
  model="gpt-4o-mini",
57
  messages=[
58
+ {"role": "user", "content": ranking_prompt}
59
  ],
60
  )
61
+
62
+ return ranked_response.choices[0].message.content
63
 
64
 
65
  @tool
66
+ def realtime_collaborative_assistant(query: str) -> str:
67
+ """A tool that simulates a roundtable discussion with AI experts to provide a well-rounded response.
68
+
69
  Args:
70
+ query: The user’s question or topic to discuss (e.g., 'How can I improve my website’s UX?').
71
+ Returns:
72
+ A synthesized response combining insights from multiple AI perspectives.
73
  """
74
+ # Define expert personas with distinct roles
75
+ experts = [
76
+ {"role": "UX Designer", "prompt": f"As a UX designer, provide practical suggestions for: {query}"},
77
+ {"role": "Marketing Strategist", "prompt": f"As a marketing strategist, suggest how to approach: {query}"},
78
+ {"role": "Tech Analyst", "prompt": f"As a tech analyst, offer technical insights on: {query}"},
79
+ ]
80
+
81
+ # Collect responses from each AI expert
82
+ expert_opinions = []
83
+ for expert in experts:
84
+ response = openai.chat.completions.create(
85
+ model="gpt-4o-mini",
86
+ messages=[
87
+ {"role": "user", "content": expert["prompt"]}
88
+ ],
89
+ )
90
+ expert_opinions.append(f"{expert['role']}: {response.choices[0].message.content}")
91
+
92
+ # Synthesize the responses into a cohesive answer
93
+ synthesis_prompt = (
94
+ f"Synthesize the following expert opinions into a concise, well-rounded response to the query '{query}':\n"
95
+ f"{'\n'.join(expert_opinions)}"
96
+ )
97
+ final_response = openai.chat.completions.create(
98
+ model="gpt-4o-mini",
99
+ messages=[
100
+ {"role": "user", "content": synthesis_prompt}
101
+ ],
102
+ )
103
+
104
+ return final_response.choices[0].message.content
105
 
106
 
107
  final_answer = FinalAnswerTool()
 
125
 
126
  agent = CodeAgent(
127
  model=model,
128
+ tools=[final_answer, multi_perspective_brainstorming, realtime_collaborative_assistant], ## add your tools here (don't remove final answer)
129
  max_steps=6,
130
  verbosity_level=1,
131
  grammar=None,