qqwjq1981 commited on
Commit
f70cfde
·
verified ·
1 Parent(s): 8022cc9

Upload 4 files

Browse files
Files changed (4) hide show
  1. action_map.json +16 -0
  2. app.py +918 -154
  3. curify_ideas_reasoning.json +70 -0
  4. qr.svg +1 -0
action_map.json ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_to_evernote": "add_to_evernote",
3
+ "find_reference": "find_reference",
4
+ "generate_summary": "generate_summary",
5
+ "suggest_relevance": "suggest_relevance",
6
+ "tool_research": "tool_research",
7
+ "generate_comparison_table": "generate_comparison_table",
8
+ "generate_integration_memo": "generate_integration_memo",
9
+ "analyze_issue": "analyze_issue",
10
+ "generate_issue_memo": "generate_issue_memo",
11
+ "list_ideas": "list_ideas",
12
+ "construct_matrix": "construct_matrix",
13
+ "prioritize_ideas": "prioritize_ideas",
14
+ "setup_action_plan": "setup_action_plan",
15
+ "unsupported_task": "unsupported_task"
16
+ }
app.py CHANGED
@@ -1,16 +1,23 @@
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
4
- # In[14]:
5
 
6
 
7
- import subprocess
8
- import sys
9
 
10
- # Install dependencies if not already installed
11
- subprocess.check_call([sys.executable, "-m", "pip", "install", "gradio", "transformers", "sentence-transformers", "openai"])
12
 
13
- # In[16]:
 
 
 
 
 
 
 
 
 
 
14
 
15
  import os
16
  import yaml
@@ -28,8 +35,48 @@ import gradio as gr
28
 
29
  import json
30
 
 
 
 
 
 
 
 
31
 
32
- # In[17]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
 
34
 
35
  # Read the YAML file
@@ -40,7 +87,383 @@ with open('./curify_api.yaml', 'r') as yaml_file:
40
  openai_api_key = data.get('openai').get('api_key')
41
  os.environ["OPENAI_API_KEY"] = openai_api_key
42
 
43
- # In[18]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
 
45
 
46
  def convert_to_listed_json(input_string):
@@ -61,11 +484,9 @@ def convert_to_listed_json(input_string):
61
  return json_object
62
  except json.JSONDecodeError as e:
63
  return None
64
-
65
  return None
66
  #raise ValueError(f"Invalid JSON format: {e}")
67
 
68
-
69
  def validate_and_extract_json(json_string):
70
  """
71
  Validates the JSON string, extracts fields with possible variants using fuzzy matching.
@@ -99,92 +520,150 @@ def json_to_pandas(dat_json, dat_schema = {'name':"", 'description':""}):
99
  return dat_df
100
 
101
 
102
- # In[19]:
103
-
104
-
105
- from transformers import pipeline
106
-
107
- summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
108
-
109
- def summarize_content(text):
110
- summary = summarizer(text, max_length=350, min_length=40, do_sample=False)
111
- return summary[0]['summary_text']
112
-
113
-
114
- # In[20]:
115
 
116
 
117
  client = OpenAI(
118
  api_key= os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted
119
  )
120
 
121
- # Send the prompt to the OpenAI API
122
- def call_openai_api(prompt):
123
- response = client.chat.completions.create(
124
- model="gpt-4o",
125
- messages=[{"role": "system", "content": "You are a helpful assistant."},
126
- {"role": "user", "content": prompt}],
127
- max_tokens=5000
128
- )
129
 
130
- return response.choices[0].message.content.strip()
 
 
 
 
 
131
 
132
- def fn_task_analysis(project_context, task_description):
133
- prompt = (
134
- f"You are working in the context of {project_context}. "
135
- f"Your task is to analyze the task and break down into reasoning steps: {task_description}"
136
- "For analyzer, please analyze 1) which project this item belongs to. It's possible that the idea may be a personal reflection or random thoughts, not in an existing project."
137
- "2) whether this idea is concrete todo or vague."
138
- "3) what is the category of the task."
139
- "Please output in JSON with description, project_association, is_concrete, task_category as keys."
140
- )
141
- return call_openai_api(prompt)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
 
143
- # Function to break down a task (e.g., Research Paper Review) and create a reasoning path
144
- def generate_reasoning_path(project_context, task_description):
145
- res_task_analysis = fn_task_analysis(project_context, task_description)
146
  prompt = (
147
  f"You are working in the context of {project_context}. "
148
- f"Your task is to analyze the task and break down into reasoning steps: {task_description}"
149
- f"Please use the results of task analysis: {res_task_analysis}. "
150
- "Guideline for breaking down the task: "
151
- "If the task is to build some features, I would like to receive a prioritized design doc and Gradio-based feature demo as deliverable."
152
- "If the task is related to some blogs, papers, talks, I would like you to find the exact reference, generate a summary and convert it to a podcast."
153
- "If the message has some reflections about feelings or opinions, please translate to English, polish it and publish it onto substack."
154
- "If the task is tool research, reason about if it is a competitive tool or an integration tool. For competitive tools, generate a table to compare the tool and our relevant tool. For integration tools, decide on possible integration."
155
- "If the task is questionnaire or interview, please deliver a questionnaire design."
156
- "If the message is pointing to some personal or project issues, please use the framework and write a brief memo: a) What Happened? — Understanding the Problem. b) What Can We Do About It? — Generating Solutions c) So What? — Evaluating the Impact and Moving Forward. "
157
- "For idea brainstorming, I expect you to list potential ideas, construct the Feasibility Matrix or Impact/Effort Matrix, prioritize these ideas, setup an action plan with todos, build the prototype."
158
- "Please output the action and priority of each step, you do not need to give explanation."
159
- "Please ignore the low priority steps in the output."
160
- "Please output the reasoning steps in JSON with reasoning_steps as key."
 
 
 
161
  )
162
-
163
- res_steps = call_openai_api(prompt)
164
- #return res_task_analysis, res_steps
165
 
166
  try:
167
  json_task_analysis = validate_and_extract_json(res_task_analysis)
168
- json_steps = validate_and_extract_json(res_steps)
169
 
170
- return json_task_analysis, json_steps
171
  except ValueError as e:
172
- return None, None
 
173
 
174
- # Function to store the reasoning path as JSON and use it for task execution
175
- def store_and_execute_task(task_description, reasoning_path, json_key = 'reasoning_steps'):
176
- if reasoning_path is not None and isinstance(reasoning_path, dict) and json_key in reasoning_path:
177
 
178
- reasoning_steps = reasoning_path[json_key]
179
- # Example logic to simulate execution (this is just a placeholder)
180
- # for step in task_steps:
181
- # step["status"] = "completed" # Mark as completed after execution
182
-
183
- return reasoning_steps
184
- return None
185
 
186
 
187
- # In[21]:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
 
189
 
190
  # Initialize dataframes for the schema
@@ -238,112 +717,397 @@ def df_to_string(df, empty_message = ''):
238
  return df.to_string(index=False)
239
 
240
 
241
- # In[22]:
 
242
 
 
 
243
 
244
- def curify_ideas(project_description, task_description):
 
 
 
 
 
245
 
246
- # May need a task split step that semantically splits the task.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
247
 
248
  str_projects = extract_ideas('AI-powered tools for productivity', project_description)
249
  json_projects = convert_to_listed_json(str_projects)
250
 
251
- # Generate reasoning path
252
- task_analysis, reasoning_path = generate_reasoning_path(project_description, task_description)
253
-
254
- # Store and simulate execution of task
255
- task_data = store_and_execute_task(task_description, reasoning_path)
256
 
257
- return json_to_pandas(json_projects), json_to_pandas(task_data), task_analysis
258
 
 
259
 
260
- # In[23]:
261
 
 
262
 
263
- project_description = 'work on a number of projects including curify (digest, ideas, careers, projects etc), and writing a book on LLM for recommendation system, educating my 3.5-year-old boy and working on a paper for LLM reasoning.'
264
 
265
- # convert_to_listed_json(extract_ideas('AI-powered tools for productivity', project_description))
 
266
 
267
- task_description = 'Build an interview bot for the curify digest project.'
268
- task_analysis, reasoning_path = generate_reasoning_path(project_description, task_description)
 
 
 
 
269
 
270
- store_and_execute_task(task_description, reasoning_path)
 
 
 
 
 
271
 
 
 
 
 
 
 
 
 
 
 
272
 
273
- # In[ ]:
274
 
 
 
275
 
276
- # Gradio Demo
277
- with gr.Blocks(
278
- css="""
279
- .gradio-table td {
280
- white-space: normal !important;
281
- word-wrap: break-word !important;
282
- }
283
- .gradio-table {
284
- width: 100% !important; /* Adjust to 100% to fit the container */
285
- table-layout: fixed !important; /* Fixed column widths */
286
- overflow-x: hidden !important; /* Disable horizontal scrolling */
287
- }
288
- .gradio-container {
289
- overflow-x: hidden !important; /* Disable horizontal scroll for entire container */
290
- padding: 0 !important; /* Remove any default padding */
291
- }
292
- .gradio-column {
293
- max-width: 100% !important; /* Ensure columns take up full width */
294
- overflow: hidden !important; /* Hide overflow to prevent horizontal scroll */
295
- }
296
- .gradio-row {
297
- overflow-x: hidden !important; /* Prevent horizontal scroll on rows */
298
- }
299
- """
300
- ) as demo:
301
-
302
- gr.Markdown("## Curify: Unified AI Tools for Productivity")
 
 
 
 
 
 
 
 
 
 
303
 
304
- with gr.Tab("Curify Idea"):
305
- with gr.Row():
306
- # Column 1: Webpage rendering
307
- with gr.Column():
308
- gr.Markdown("## Enter project descriptions.")
309
-
310
- project_input = gr.Textbox(
311
- placeholder="Describe your project...",
312
- label=None,
313
- lines=5)
314
-
315
- gr.Markdown("## Enter task message.")
316
- idea_input = gr.Textbox(
317
- label=None,
318
- placeholder="Describe the task you want to execute (e.g., Research Paper Review)")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
 
320
- task_btn = gr.Button("Generating task steps...")
 
 
 
 
 
 
 
321
 
322
- gr.Markdown("## Projects Overview")
323
- project_list = gr.DataFrame(
324
- type="pandas"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
325
  )
326
 
327
- # Column 2: Summary and Perspectives
328
- with gr.Column():
329
- gr.Markdown("## Task analysis")
330
- task_analysis_txt = gr.Textbox(
331
- label=None,
332
- placeholder="Here is an analysis of your task...",
333
- lines=3)
334
-
335
- gr.Markdown("## Execution path")
336
- task_steps = gr.DataFrame(
337
- type="pandas"
338
  )
339
 
340
- task_btn.click(
341
- curify_ideas,
342
- inputs=[project_input, idea_input],
343
- outputs=[project_list, task_steps, task_analysis_txt]
344
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
345
 
346
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
 
348
 
349
  # In[ ]:
 
1
  #!/usr/bin/env python
2
  # coding: utf-8
3
 
4
+ # In[1]:
5
 
6
 
7
+ pip install gradio newspaper3k transformers sentence-transformers openai todoist-api-python flask twilio fastapi uvicorn ffmpy google-cloud-storage fpdf
 
8
 
 
 
9
 
10
+ # In[2]:
11
+
12
+
13
+ #pip install evernote-sdk-python3
14
+ # import evernote.edam.notestore.NoteStore as NoteStore
15
+ # import evernote.edam.type.ttypes as Types
16
+ # from evernote.api.client import EvernoteClient
17
+
18
+
19
+ # In[3]:
20
+
21
 
22
  import os
23
  import yaml
 
35
 
36
  import json
37
 
38
+ import sqlite3
39
+ import uuid
40
+ import socket
41
+ import difflib
42
+ import time
43
+ import shutil
44
+ import requests
45
 
46
+ import json
47
+ import markdown
48
+ from fpdf import FPDF
49
+ import hashlib
50
+
51
+ from transformers import pipeline
52
+ from transformers.pipelines.audio_utils import ffmpeg_read
53
+
54
+ from todoist_api_python.api import TodoistAPI
55
+
56
+ # from flask import Flask, request, jsonify
57
+ from twilio.rest import Client
58
+
59
+ import asyncio
60
+ import uvicorn
61
+ from fastapi import FastAPI, Request, HTTPException
62
+ from fastapi.responses import HTMLResponse, JSONResponse
63
+ import nest_asyncio
64
+ from twilio.twiml.messaging_response import MessagingResponse
65
+
66
+ from requests.auth import HTTPBasicAuth
67
+
68
+ from google.cloud import storage, exceptions # Import exceptions for error handling
69
+ from google.cloud.exceptions import NotFound
70
+
71
+
72
+ import logging
73
+
74
+ # Configure logging
75
+ logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s")
76
+ logger = logging.getLogger(__name__)
77
+
78
+
79
+ # In[4]:
80
 
81
 
82
  # Read the YAML file
 
87
  openai_api_key = data.get('openai').get('api_key')
88
  os.environ["OPENAI_API_KEY"] = openai_api_key
89
 
90
+ # Access the API keys and other configuration data
91
+ todoist_api_key = data.get('todoist').get('api_key')
92
+
93
+ EVERNOTE_API_TOKEN = data.get('evernote').get('api_key')
94
+
95
+ account_sid = data.get('twilio').get('account_sid')
96
+ auth_token = data.get('twilio').get('auth_token')
97
+ twilio_phone_number = data.get('twilio').get('phone_number')
98
+
99
+ twillo_client = Client(account_sid, auth_token)
100
+
101
+ # Set the GOOGLE_APPLICATION_CREDENTIALS environment variable
102
+ os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = data.get('google_gcs').get('key_file_path')
103
+
104
+ # Load Reasoning Graph JSON File
105
+ def load_reasoning_json(filepath):
106
+ """Load JSON file and return the dictionary."""
107
+ with open(filepath, "r") as file:
108
+ data = json.load(file)
109
+ return data
110
+
111
+ # Load Action Map
112
+ def load_action_map(filepath):
113
+ """Load action map JSON file and map strings to actual function objects."""
114
+ with open(filepath, "r") as file:
115
+ action_map_raw = json.load(file)
116
+ # Map string names to actual functions using globals()
117
+ return {action: globals()[func_name] for action, func_name in action_map_raw.items()}
118
+
119
+
120
+ # In[5]:
121
+
122
+
123
+ # Define all actions as functions
124
+
125
+ def find_reference(task_topic):
126
+ """Finds a reference related to the task topic."""
127
+ print(f"Finding reference for topic: {task_topic}")
128
+ return f"Reference found for topic: {task_topic}"
129
+
130
+ def generate_summary(reference):
131
+ """Generates a summary of the reference."""
132
+ print(f"Generating summary for reference: {reference}")
133
+ return f"Summary of {reference}"
134
+
135
+ def suggest_relevance(summary):
136
+ """Suggests how the summary relates to the project."""
137
+ print(f"Suggesting relevance of summary: {summary}")
138
+ return f"Relevance of {summary} suggested"
139
+
140
+ def tool_research(task_topic):
141
+ """Performs tool research and returns analysis."""
142
+ print("Performing tool research")
143
+ return "Tool analysis data"
144
+
145
+ def generate_comparison_table(tool_analysis):
146
+ """Generates a comparison table for a competitive tool."""
147
+ print(f"Generating comparison table for analysis: {tool_analysis}")
148
+ return f"Comparison table for {tool_analysis}"
149
+
150
+ def generate_integration_memo(tool_analysis):
151
+ """Generates an integration memo for a tool."""
152
+ print(f"Generating integration memo for analysis: {tool_analysis}")
153
+ return f"Integration memo for {tool_analysis}"
154
+
155
+ def analyze_issue(task_topic):
156
+ """Analyzes an issue and returns the analysis."""
157
+ print("Analyzing issue")
158
+ return "Issue analysis data"
159
+
160
+ def generate_issue_memo(issue_analysis):
161
+ """Generates an issue memo based on the analysis."""
162
+ print(f"Generating issue memo for analysis: {issue_analysis}")
163
+ return f"Issue memo for {issue_analysis}"
164
+
165
+ def list_ideas(task_topic):
166
+ """Lists potential ideas for brainstorming."""
167
+ print("Listing ideas")
168
+ return ["Idea 1", "Idea 2", "Idea 3"]
169
+
170
+ def construct_matrix(ideas):
171
+ """Constructs a matrix (e.g., feasibility or impact/effort) for the ideas."""
172
+ print(f"Constructing matrix for ideas: {ideas}")
173
+ return {"Idea 1": "High Impact/Low Effort", "Idea 2": "Low Impact/High Effort", "Idea 3": "High Impact/High Effort"}
174
+
175
+ def prioritize_ideas(matrix):
176
+ """Prioritizes ideas based on the matrix."""
177
+ print(f"Prioritizing ideas based on matrix: {matrix}")
178
+ return ["Idea 3", "Idea 1", "Idea 2"]
179
+
180
+ def setup_action_plan(prioritized_ideas):
181
+ """Sets up an action plan based on the prioritized ideas."""
182
+ print(f"Setting up action plan for ideas: {prioritized_ideas}")
183
+ return f"Action plan created for {prioritized_ideas}"
184
+
185
+ def unsupported_task(task_topic):
186
+ """Handles unsupported tasks."""
187
+ print("Task not supported")
188
+ return "Unsupported task"
189
+
190
+
191
+ # In[6]:
192
+
193
+
194
+ todoist_api = TodoistAPI(todoist_api_key)
195
+
196
+ # Fetch recent Todoist task
197
+ def fetch_todoist_task():
198
+ try:
199
+ tasks = todoist_api.get_tasks()
200
+ if tasks:
201
+ recent_task = tasks[0] # Fetch the most recent task
202
+ return f"Recent Task: {recent_task.content}"
203
+ return "No tasks found in Todoist."
204
+ except Exception as e:
205
+ return f"Error fetching tasks: {str(e)}"
206
+
207
+ def add_to_todoist(task_topic, todoist_priority = 3):
208
+ try:
209
+ # Create a task in Todoist using the Todoist API
210
+ # Assuming you have a function `todoist_api.add_task()` that handles the API request
211
+ todoist_api.add_task(
212
+ content=task_topic,
213
+ priority=todoist_priority
214
+ )
215
+ msg = f"Task added: {task_topic} with priority {todoist_priority}"
216
+ logger.debug(msg)
217
+
218
+ return msg
219
+ except Exception as e:
220
+ # Return an error message if something goes wrong
221
+ return f"An error occurred: {e}"
222
+
223
+ # def save_todo(reasoning_steps):
224
+ # """
225
+ # Save reasoning steps to Todoist as tasks.
226
+
227
+ # Args:
228
+ # reasoning_steps (list of dict): A list of steps with "step" and "priority" keys.
229
+ # """
230
+ # try:
231
+ # # Validate that reasoning_steps is a list
232
+ # if not isinstance(reasoning_steps, list):
233
+ # raise ValueError("The input reasoning_steps must be a list.")
234
+
235
+ # # Iterate over the reasoning steps
236
+ # for step in reasoning_steps:
237
+ # # Ensure each step is a dictionary and contains required keys
238
+ # if not isinstance(step, dict) or "step" not in step or "priority" not in step:
239
+ # logger.error(f"Invalid step data: {step}, skipping.")
240
+ # continue
241
+
242
+ # task_content = step["step"]
243
+ # priority_level = step["priority"]
244
+
245
+ # # Map priority to Todoist's priority levels (1 - low, 4 - high)
246
+ # priority_mapping = {"Low": 1, "Medium": 2, "High": 4}
247
+ # todoist_priority = priority_mapping.get(priority_level, 1) # Default to low if not found
248
+
249
+ # # Create a task in Todoist using the Todoist API
250
+ # # Assuming you have a function `todoist_api.add_task()` that handles the API request
251
+ # todoist_api.add_task(
252
+ # content=task_content,
253
+ # priority=todoist_priority
254
+ # )
255
+
256
+ # logger.debug(f"Task added: {task_content} with priority {priority_level}")
257
+
258
+ # return "All tasks processed."
259
+ # except Exception as e:
260
+ # # Return an error message if something goes wrong
261
+ # return f"An error occurred: {e}"
262
+
263
+
264
+ # In[7]:
265
+
266
+
267
+ # evernote_client = EvernoteClient(token=EVERNOTE_API_TOKEN, sandbox=False)
268
+ # note_store = evernote_client.get_note_store()
269
+
270
+ # def add_to_evernote(task_topic, notebook_title="Inspirations"):
271
+ # """
272
+ # Add a task topic to the 'Inspirations' notebook in Evernote. If the notebook doesn't exist, create it.
273
+
274
+ # Args:
275
+ # task_topic (str): The content of the task to be added.
276
+ # notebook_title (str): The title of the Evernote notebook. Default is 'Inspirations'.
277
+ # """
278
+ # try:
279
+ # # Check if the notebook exists
280
+ # notebooks = note_store.listNotebooks()
281
+ # notebook = next((nb for nb in notebooks if nb.name == notebook_title), None)
282
+
283
+ # # If the notebook doesn't exist, create it
284
+ # if not notebook:
285
+ # notebook = Types.Notebook()
286
+ # notebook.name = notebook_title
287
+ # notebook = note_store.createNotebook(notebook)
288
+
289
+ # # Search for an existing note with the same title
290
+ # filter = NoteStore.NoteFilter()
291
+ # filter.notebookGuid = notebook.guid
292
+ # filter.words = notebook_title
293
+ # notes_metadata_result = note_store.findNotesMetadata(filter, 0, 1, NoteStore.NotesMetadataResultSpec(includeTitle=True))
294
+
295
+ # # If a note with the title exists, append to it; otherwise, create a new note
296
+ # if notes_metadata_result.notes:
297
+ # note_guid = notes_metadata_result.notes[0].guid
298
+ # existing_note = note_store.getNote(note_guid, True, False, False, False)
299
+ # existing_note.content = existing_note.content.replace("</en-note>", f"<div>{task_topic}</div></en-note>")
300
+ # note_store.updateNote(existing_note)
301
+ # else:
302
+ # # Create a new note
303
+ # note = Types.Note()
304
+ # note.title = notebook_title
305
+ # note.notebookGuid = notebook.guid
306
+ # note.content = f'<?xml version="1.0" encoding="UTF-8"?>' \
307
+ # f'<!DOCTYPE en-note SYSTEM "http://xml.evernote.com/pub/enml2.dtd">' \
308
+ # f'<en-note><div>{task_topic}</div></en-note>'
309
+ # note_store.createNote(note)
310
+
311
+ # print(f"Task '{task_topic}' successfully added to Evernote under '{notebook_title}'.")
312
+ # except Exception as e:
313
+ # print(f"Error adding task to Evernote: {e}")
314
+
315
+ # Mock Functions for Task Actions
316
+ def add_to_evernote(task_topic):
317
+ return f"Task added to Evernote with title '{task_topic}'."
318
+
319
+
320
+ # In[8]:
321
+
322
+
323
+ # Access the API keys and other configuration data
324
+ TASK_WORKFLOW_TREE = load_reasoning_json(data.get('curify_ideas').get('reasoning_path'))
325
+ action_map = load_action_map(data.get('curify_ideas').get('action_map'))
326
+
327
+
328
+ # In[9]:
329
+
330
+
331
+ def generate_task_hash(task_description):
332
+ try:
333
+ # Ensure task_description is a string
334
+ if not isinstance(task_description, str):
335
+ logger.warning("task_description is not a string, attempting conversion.")
336
+ task_description = str(task_description)
337
+
338
+ # Safely encode with UTF-8 and ignore errors
339
+ encoded_description = task_description.encode("utf-8", errors="ignore")
340
+ task_hash = hashlib.md5(encoded_description).hexdigest()
341
+
342
+ logger.debug(f"Generated task hash: {task_hash}")
343
+ return task_hash
344
+ except Exception as e:
345
+ # Log any unexpected issues
346
+ logger.error(f"Error generating task hash: {e}", exc_info=True)
347
+ return 'output'
348
+
349
+ def save_to_google_storage(bucket_name, file_path, destination_blob_name, expiration_minutes = 1440):
350
+ gcs_client = storage.Client() # Initialize Google Cloud Storage client
351
+
352
+ # Check if the bucket exists; if not, create it
353
+ try:
354
+ bucket = gcs_client.get_bucket(bucket_name)
355
+ except NotFound:
356
+ print(f"❌ Bucket '{bucket_name}' not found. Please check the bucket name.")
357
+ bucket = gcs_client.create_bucket(bucket_name)
358
+ print(f"✅ Bucket '{bucket_name}' created.")
359
+ except Exception as e:
360
+ print(f"❌ An unexpected error occurred: {e}")
361
+ raise
362
+ # Get a reference to the blob
363
+ blob = bucket.blob(destination_blob_name)
364
+
365
+ # Upload the file
366
+ blob.upload_from_filename(file_path)
367
+
368
+ # Generate a signed URL for the file
369
+ signed_url = blob.generate_signed_url(
370
+ version="v4",
371
+ expiration=timedelta(minutes=expiration_minutes),
372
+ method="GET"
373
+ )
374
+ print(f"✅ File uploaded to Google Cloud Storage. Signed URL: {signed_url}")
375
+ return signed_url
376
+
377
+ # Function to generate and save a document
378
+ def generate_document(task_description, md_content, user_name = 'jayw', bucket_name='curify'):
379
+ logger.debug("starting to generate document")
380
+ # Hash the task description to generate a unique filename
381
+ task_hash = generate_task_hash(task_description)
382
+
383
+ # Truncate the hash if needed (64 characters is sufficient for uniqueness)
384
+ max_hash_length = 64 # Adjust if needed
385
+ truncated_hash = task_hash[:max_hash_length]
386
+
387
+ # Generate PDF file locally
388
+ local_filename = f"{truncated_hash}.pdf" # Use the truncated hash as the local file name
389
+ pdf = FPDF()
390
+ pdf.add_page()
391
+ pdf.set_font("Arial", size=12)
392
+
393
+ # Process dictionary and render content
394
+ for key, value in md_content.items():
395
+ # Add key as a header
396
+ pdf.set_font("Arial", style='B', size=12) # Bold font for headers
397
+ pdf.multi_cell(0, 10, f"# {key}")
398
+
399
+ # Add value
400
+ pdf.set_font("Arial", size=12) # Regular font for content
401
+ if isinstance(value, list): # Handle lists
402
+ for item in value:
403
+ pdf.multi_cell(0, 10, f"- {item}")
404
+ else: # Handle single strings
405
+ pdf.multi_cell(0, 10, value)
406
+
407
+ pdf.output(local_filename)
408
+
409
+ # Organize files into user-specific folders
410
+ destination_blob_name = f"{user_name}/{truncated_hash}.pdf"
411
+
412
+ # Upload to Google Cloud Storage and get the public URL
413
+ public_url = save_to_google_storage(bucket_name, local_filename, destination_blob_name)
414
+ logger.debug("finished generating document")
415
+ return public_url
416
+
417
+
418
+ # In[10]:
419
+
420
+
421
+ def execute_with_retry(sql, params=(), attempts=5, delay=1, db_name = 'curify_ideas.db'):
422
+ for attempt in range(attempts):
423
+ try:
424
+ with sqlite3.connect(db_name) as conn:
425
+ cursor = conn.cursor()
426
+ cursor.execute(sql, params)
427
+ conn.commit()
428
+ break
429
+ except sqlite3.OperationalError as e:
430
+ if "database is locked" in str(e) and attempt < attempts - 1:
431
+ time.sleep(delay)
432
+ else:
433
+ raise e
434
+
435
+ def enable_wal_mode(db_name = 'curify_ideas.db'):
436
+ with sqlite3.connect(db_name) as conn:
437
+ cursor = conn.cursor()
438
+ cursor.execute("PRAGMA journal_mode=WAL;")
439
+ conn.commit()
440
+
441
+ # Create SQLite DB and table
442
+ def create_db(db_name = 'curify_ideas.db'):
443
+ with sqlite3.connect(db_name, timeout=30) as conn:
444
+ c = conn.cursor()
445
+ c.execute('''CREATE TABLE IF NOT EXISTS sessions (
446
+ session_id TEXT,
447
+ ip_address TEXT,
448
+ project_desc TEXT,
449
+ idea_desc TEXT,
450
+ idea_analysis TEXT,
451
+ prioritization_steps TEXT,
452
+ timestamp DATETIME,
453
+ PRIMARY KEY (session_id, timestamp)
454
+ )
455
+ ''')
456
+ conn.commit()
457
+
458
+ # Function to insert session data into the SQLite database
459
+ def insert_session_data(session_id, ip_address, project_desc, idea_desc, idea_analysis, prioritization_steps, db_name = 'curify_ideas.db'):
460
+ execute_with_retry('''
461
+ INSERT INTO sessions (session_id, ip_address, project_desc, idea_desc, idea_analysis, prioritization_steps, timestamp)
462
+ VALUES (?, ?, ?, ?, ?, ?, ?)
463
+ ''', (session_id, ip_address, project_desc, idea_desc, json.dumps(idea_analysis), json.dumps(prioritization_steps), datetime.now()), db_name)
464
+
465
+
466
+ # In[11]:
467
 
468
 
469
  def convert_to_listed_json(input_string):
 
484
  return json_object
485
  except json.JSONDecodeError as e:
486
  return None
 
487
  return None
488
  #raise ValueError(f"Invalid JSON format: {e}")
489
 
 
490
  def validate_and_extract_json(json_string):
491
  """
492
  Validates the JSON string, extracts fields with possible variants using fuzzy matching.
 
520
  return dat_df
521
 
522
 
523
+ # In[12]:
 
 
 
 
 
 
 
 
 
 
 
 
524
 
525
 
526
  client = OpenAI(
527
  api_key= os.environ.get("OPENAI_API_KEY"), # This is the default and can be omitted
528
  )
529
 
530
+ # Function to call OpenAI API with compact error handling
531
+ def call_openai_api(prompt, model="gpt-4o", max_tokens=5000, retries=3, backoff_factor=2):
532
+ """
533
+ Send a prompt to the OpenAI API and handle potential errors robustly.
 
 
 
 
534
 
535
+ Parameters:
536
+ prompt (str): The user input or task prompt to send to the model.
537
+ model (str): The OpenAI model to use (default is "gpt-4").
538
+ max_tokens (int): The maximum number of tokens in the response.
539
+ retries (int): Number of retry attempts in case of transient errors.
540
+ backoff_factor (int): Backoff time multiplier for retries.
541
 
542
+ Returns:
543
+ str: The model's response content if successful.
544
+ """
545
+ for attempt in range(1, retries + 1):
546
+ try:
547
+ response = client.chat.completions.create(
548
+ model="gpt-4o",
549
+ messages=[{"role": "user", "content": prompt}],
550
+ max_tokens=5000,
551
+ )
552
+ return response.choices[0].message.content.strip()
553
+
554
+ except (openai.RateLimitError, openai.APIConnectionError) as e:
555
+ logging.warning(f"Transient error: {e}. Attempt {attempt} of {retries}. Retrying...")
556
+ except (openai.BadRequestError, openai.AuthenticationError) as e:
557
+ logging.error(f"Unrecoverable error: {e}. Check your inputs or API key.")
558
+ break
559
+ except Exception as e:
560
+ logging.error(f"Unexpected error: {e}. Attempt {attempt} of {retries}. Retrying...")
561
+
562
+ # Exponential backoff before retrying
563
+ if attempt < retries:
564
+ time.sleep(backoff_factor * attempt)
565
+
566
+ raise RuntimeError(f"Failed to fetch response from OpenAI API after {retries} attempts.")
567
 
568
+ def fn_analyze_task(project_context, task_description):
 
 
569
  prompt = (
570
  f"You are working in the context of {project_context}. "
571
+ f"Your task is to analyze the task: {task_description} "
572
+ "Please analyze the following aspects: "
573
+ "1) Determine which project this item belongs to. If the idea does not belong to any existing project, categorize it under 'Other'. "
574
+ "2) Assess whether this idea can be treated as a concrete task. "
575
+ "3) Evaluate whether a document can be generated as an intermediate result. "
576
+ "4) Identify the appropriate category of the task. Possible categories are: 'Blogs/Papers', 'Tools', 'Brainstorming', 'Issues', and 'Others'. "
577
+ "5) Extract the topic of the task. "
578
+ "Please provide the output in JSON format using the structure below: "
579
+ "{"
580
+ " \"description\": \"\", "
581
+ " \"project_association\": \"\", "
582
+ " \"is_task\": \"Yes/No\", "
583
+ " \"is_document\": \"Yes/No\", "
584
+ " \"task_category\": \"\", "
585
+ " \"task_topic\": \"\" "
586
+ "}"
587
  )
588
+ res_task_analysis = call_openai_api(prompt)
 
 
589
 
590
  try:
591
  json_task_analysis = validate_and_extract_json(res_task_analysis)
 
592
 
593
+ return json_task_analysis
594
  except ValueError as e:
595
+ logger.debug("ValueError occurred: %s", str(e), exc_info=True) # Log the exception details
596
+ return None
597
 
 
 
 
598
 
599
+ # In[13]:
 
 
 
 
 
 
600
 
601
 
602
+ # Recursive Task Executor
603
+ def fn_process_task(project_desc_table, task_description, bucket_name='curify'):
604
+
605
+ project_context = project_desc_table.to_string(index=False)
606
+ task_analysis = fn_analyze_task(project_context, task_description)
607
+
608
+ if task_analysis:
609
+ execution_status = []
610
+ execution_results = task_analysis.copy()
611
+ execution_results['deliverables'] = ''
612
+
613
+ def traverse(node, previous_output=None):
614
+ if not node: # If the node is None or invalid
615
+ return {}, pd.DataFrame(), {}
616
+
617
+ # Check if there is a condition to evaluate
618
+ if "check" in node:
619
+ value = execution_results[node["check"]] # Evaluate the check condition
620
+ traverse(node.get(value, node.get("default")), previous_output)
621
+
622
+ # If the node contains an action
623
+ elif "action" in node:
624
+ action_name = node["action"]
625
+ input_key = node.get("input", 'task_topic')
626
+
627
+ if input_key in execution_results.keys():
628
+ inputs = {input_key: execution_results[input_key]}
629
+ else:
630
+ logger.error(f"Workflow action {action_name} input key {input_key} not in execution_results.")
631
+ return {}, pd.DataFrame(), {}
632
+
633
+ logger.debug(f"Executing: {action_name} with inputs: {inputs}")
634
+
635
+ # Execute the action function
636
+ action_func = action_map.get(action_name, unsupported_task)
637
+ output = action_func(**inputs)
638
+
639
+ # Store execution results or append to previous outputs
640
+ execution_status.append({"action": action_name, "output": output})
641
+
642
+ # Check if 'output' field exists in the node
643
+ if 'output' in node:
644
+ # If 'output' exists, assign the output to execution_results with the key from node['output']
645
+ execution_results[node['output']] = output
646
+ else:
647
+ # If 'output' does not exist, append the output to 'deliverables'
648
+ execution_results['deliverables'] += output
649
+
650
+ # Traverse to the next node, if it exists
651
+ if "next" in node and node["next"]:
652
+ traverse(node["next"], previous_output)
653
+
654
+ try:
655
+ traverse(TASK_WORKFLOW_TREE["start"])
656
+ execution_results['doc_url'] = generate_document(task_description, execution_results)
657
+ return task_analysis, pd.DataFrame(execution_status), execution_results
658
+ except Exception as e:
659
+ logger.error(f"Traverse Error: {e}")
660
+ return task_analysis, pd.DataFrame(), {}
661
+ else:
662
+ logger.error("Empty task analysis.")
663
+ return {}, pd.DataFrame(), {}
664
+
665
+
666
+ # In[14]:
667
 
668
 
669
  # Initialize dataframes for the schema
 
717
  return df.to_string(index=False)
718
 
719
 
720
+ # In[15]:
721
+
722
 
723
+ # Shared state variables
724
+ shared_state = {"project_desc_table": pd.DataFrame(), "task_analysis_txt": "", "execution_status": pd.DataFrame(), "execution_results": {}}
725
 
726
+ # Button Action: Fetch State
727
+ def fetch_updated_state():
728
+ response = requests.get("http://localhost:5000/state")
729
+ state = response.json()
730
+ """Fetch the updated shared state from FastAPI."""
731
+ return pd.DataFrame(state["project_desc_table"]), state["task_analysis_txt"], pd.DataFrame(state["execution_status"]), state["execution_results"]
732
 
733
+ def update_gradio_state(task_analysis_txt, execution_status, execution_results):
734
+ # You can update specific components like Textbox or State
735
+ shared_state['task_analysis_txt'] = task_analysis_txt
736
+ shared_state['execution_status'] = execution_status
737
+ shared_state['execution_results'] = execution_results
738
+ return True
739
+
740
+
741
+ # In[16]:
742
+
743
+
744
+ # Initialize the database
745
+ new_db = 'curify.db'
746
+
747
+ # Copy the old database to a new one
748
+ shutil.copy("curify_idea.db", new_db)
749
+
750
+ #create_db(new_db)
751
+ #enable_wal_mode(new_db)
752
+ def project_extraction(project_description):
753
 
754
  str_projects = extract_ideas('AI-powered tools for productivity', project_description)
755
  json_projects = convert_to_listed_json(str_projects)
756
 
757
+ return json_to_pandas(json_projects)
 
 
 
 
758
 
 
759
 
760
+ # In[17]:
761
 
 
762
 
763
+ # project_description = 'work on a number of projects including curify (digest, ideas, careers, projects etc), and writing a book on LLM for recommendation system, educating my 3.5-year-old boy and working on a paper for LLM reasoning.'
764
 
765
+ # # convert_to_listed_json(extract_ideas('AI-powered tools for productivity', project_description))
766
 
767
+ # task_description = 'Build an interview bot for the curify digest project.'
768
+ # task_analysis, reasoning_path = generate_reasoning_path(project_description, task_description)
769
 
770
+ # steps = store_and_execute_task(task_description, reasoning_path)
771
+ def message_back(task_message, execution_status, doc_url, from_whatsapp):
772
+ # Convert task steps to a simple numbered list
773
+ task_steps_list = "\n".join(
774
+ [f"{i + 1}. {step['action']} - {step.get('output', '')}" for i, step in enumerate(execution_status.to_dict(orient="records"))]
775
+ )
776
 
777
+ # Format the body message
778
+ body_message = (
779
+ f"*Task Message:*\n{task_message}\n\n"
780
+ f"*Execution Status:*\n{task_steps_list}\n\n"
781
+ f"*Doc URL:*\n{doc_url}\n\n"
782
+ )
783
 
784
+ # Send response back to WhatsApp
785
+ try:
786
+ twillo_client.messages.create(
787
+ from_=twilio_phone_number,
788
+ to=from_whatsapp,
789
+ body=body_message
790
+ )
791
+ except Exception as e:
792
+ logger.error(f"Twilio Error: {e}")
793
+ raise HTTPException(status_code=500, detail=f"Error sending WhatsApp message: {str(e)}")
794
 
795
+ return {"status": "success"}
796
 
797
+ # Initialize the Whisper pipeline
798
+ whisper_pipeline = pipeline("automatic-speech-recognition", model="openai/whisper-medium")
799
 
800
+ # Function to transcribe audio from a media URL
801
+ def transcribe_audio_from_media_url(media_url):
802
+ try:
803
+ media_response = requests.get(media_url, auth=HTTPBasicAuth(account_sid, auth_token))
804
+ # Download the media file
805
+ media_response.raise_for_status()
806
+ audio_data = media_response.content
807
+
808
+ # Save the audio data to a file for processing
809
+ audio_file_path = "temp_audio_file.mp3"
810
+ with open(audio_file_path, "wb") as audio_file:
811
+ audio_file.write(audio_data)
812
+
813
+ # Transcribe the audio using Whisper
814
+ transcription = whisper_pipeline(audio_file_path, return_timestamps=True)
815
+ logger.debug(f"Transcription: {transcription['text']}")
816
+ return transcription["text"]
817
+
818
+ except Exception as e:
819
+ logger.error(f"An error occurred: {e}")
820
+ return None
821
+
822
+
823
+ # In[18]:
824
+
825
+
826
+ app = FastAPI()
827
+
828
+ # @app.get("/state")
829
+ # async def fetch_state():
830
+ # return shared_state
831
+
832
+ @app.route("/whatsapp-webhook/", methods=["POST"])
833
+ async def whatsapp_webhook(request: Request):
834
+ form_data = await request.form()
835
+ # Log the form data to debug
836
+ print("Received data:", form_data)
837
 
838
+ # Extract message and user information
839
+ incoming_msg = form_data.get("Body", "").strip()
840
+ from_number = form_data.get("From", "")
841
+ media_url = form_data.get("MediaUrl0", "")
842
+ media_type = form_data.get("MediaContentType0", "")
843
+
844
+ # Initialize response variables
845
+ transcription = None
846
+
847
+ if media_type.startswith("audio"):
848
+ # If the media is an audio or video file, process it
849
+ try:
850
+ transcription = transcribe_audio_from_media_url(media_url)
851
+ except Exception as e:
852
+ return JSONResponse(
853
+ {"error": f"Failed to process voice input: {str(e)}"}, status_code=500
854
+ )
855
+ # Determine message content: use transcription if available, otherwise use text message
856
+ processed_input = transcription if transcription else incoming_msg
857
+
858
+ logger.debug(f"Processed input: {processed_input}")
859
+ try:
860
+ # Generate response
861
+ project_desc_table, _ = fetch_updated_state()
862
+ if not project_desc_table.empty:
863
+ task_analysis_txt, execution_status, execution_results = fn_process_task(project_desc_table, processed_input)
864
+ update_gradio_state(task_analysis_txt, execution_status, execution_results)
865
+
866
+ doc_url = 'Fail to generate doc'
867
+ if 'doc_url' in execution_results:
868
+ doc_url = execution_results['doc_url']
869
+
870
+ # Respond to the user on WhatsApp with the processed idea
871
+ response = message_back(processed_input, execution_status, doc_url, from_number)
872
+ logger.debug(response)
873
+
874
+ return JSONResponse(content=str(response))
875
+ except Exception as e:
876
+ logger.error(f"Error during task processing: {e}")
877
+ return {"error": str(e)}
878
+
879
+
880
+ # In[19]:
881
+
882
+
883
+ # Mock Gmail Login Function
884
+ def mock_login(email):
885
+ if email.endswith("@gmail.com"):
886
+ return f"✅ Logged in as {email}", gr.update(visible=False), gr.update(visible=True)
887
+ else:
888
+ return "❌ Invalid Gmail address. Please try again.", gr.update(), gr.update()
889
+
890
+ # User Onboarding Function
891
+ def onboarding_survey(role, industry, project_description):
892
+ return (project_extraction(project_description),
893
+ gr.update(visible=False), gr.update(visible=True))
894
+
895
+ # Mock Integration Functions
896
+ def integrate_todoist():
897
+ return "✅ Successfully connected to Todoist!"
898
+
899
+ def integrate_evernote():
900
+ return "✅ Successfully connected to Evernote!"
901
+
902
+ def integrate_calendar():
903
+ return "✅ Successfully connected to Google Calendar!"
904
+
905
+ def load_svg_with_size(file_path, width="600px", height="400px"):
906
+ # Read the SVG content from the file
907
+ with open(file_path, "r", encoding="utf-8") as file:
908
+ svg_content = file.read()
909
 
910
+ # Add inline styles to control width and height
911
+ styled_svg = f"""
912
+ <div style="width: {width}; height: {height}; overflow: auto;">
913
+ {svg_content}
914
+ </div>
915
+ """
916
+ return styled_svg
917
+
918
 
919
+ # In[20]:
920
+
921
+
922
+ # Gradio Demo
923
+ def create_gradio_interface(state=None):
924
+ with gr.Blocks(
925
+ css="""
926
+ .gradio-table td {
927
+ white-space: normal !important;
928
+ word-wrap: break-word !important;
929
+ }
930
+ .gradio-table {
931
+ width: 100% !important; /* Adjust to 100% to fit the container */
932
+ table-layout: fixed !important; /* Fixed column widths */
933
+ overflow-x: hidden !important; /* Disable horizontal scrolling */
934
+ }
935
+ .gradio-container {
936
+ overflow-x: hidden !important; /* Disable horizontal scroll for entire container */
937
+ padding: 0 !important; /* Remove any default padding */
938
+ }
939
+ .gradio-column {
940
+ max-width: 100% !important; /* Ensure columns take up full width */
941
+ overflow: hidden !important; /* Hide overflow to prevent horizontal scroll */
942
+ }
943
+ .gradio-row {
944
+ overflow-x: hidden !important; /* Prevent horizontal scroll on rows */
945
+ }
946
+ """) as demo:
947
+
948
+ # Page 1: Mock Gmail Login
949
+ with gr.Group(visible=True) as login_page:
950
+ gr.Markdown("### **1️⃣ Login with Gmail**")
951
+ email_input = gr.Textbox(label="Enter your Gmail Address", placeholder="[email protected]")
952
+ login_button = gr.Button("Login")
953
+ login_result = gr.Textbox(label="Login Status", interactive=False, visible=False)
954
+ # Page 2: User Onboarding
955
+ with gr.Group(visible=False) as onboarding_page:
956
+ gr.Markdown("### **2️⃣ Tell Us About Yourself**")
957
+ role = gr.Textbox(label="What is your role?", placeholder="e.g. Developer, Designer")
958
+ industry = gr.Textbox(label="Which industry are you in?", placeholder="e.g. Software, Finance")
959
+ project_description = gr.Textbox(label="Describe your project", placeholder="e.g. A task management app")
960
+ submit_survey = gr.Button("Submit")
961
+
962
+ # Page 3: Mock Integrations with Separate Buttons
963
+ with gr.Group(visible=False) as integrations_page:
964
+ gr.Markdown("### **3️⃣ Connect Integrations**")
965
+ gr.Markdown("Click on the buttons below to connect each tool:")
966
+
967
+ # Separate Buttons and Results for Each Integration
968
+ todoist_button = gr.Button("Connect to Todoist")
969
+ todoist_result = gr.Textbox(label="Todoist Status", interactive=False, visible=False)
970
+
971
+ evernote_button = gr.Button("Connect to Evernote")
972
+ evernote_result = gr.Textbox(label="Evernote Status", interactive=False, visible=False)
973
+
974
+ calendar_button = gr.Button("Connect to Google Calendar")
975
+ calendar_result = gr.Textbox(label="Google Calendar Status", interactive=False, visible=False)
976
+
977
+ # Skip Button to proceed directly to next page
978
+ skip_integrations = gr.Button("Skip ➡️")
979
+ next_button = gr.Button("Proceed to QR Code")
980
+
981
+ with gr.Group(visible=False) as qr_code_page:
982
+ # Page 4: QR Code and Curify Ideas
983
+ gr.Markdown("## Curify: Unified AI Tools for Productivity")
984
+
985
+ with gr.Tab("Curify Idea"):
986
+ with gr.Row():
987
+ with gr.Column():
988
+ gr.Markdown("#### ** QR Code**")
989
+ # Path to your local SVG file
990
+ svg_file_path = "qr.svg"
991
+ # Load the SVG content
992
+ svg_content = load_svg_with_size(svg_file_path, width="200px", height="200px")
993
+ gr.HTML(svg_content)
994
+
995
+ # Column 1: Webpage rendering
996
+ with gr.Column():
997
+
998
+ gr.Markdown("## Projects Overview")
999
+ project_desc_table = gr.DataFrame(
1000
+ type="pandas"
1001
+ )
1002
+
1003
+ gr.Markdown("## Enter task message.")
1004
+ idea_input = gr.Textbox(
1005
+ label=None,
1006
+ placeholder="Describe the task you want to execute (e.g., Research Paper Review)")
1007
+
1008
+ task_btn = gr.Button("Generate Task Steps")
1009
+ fetch_state_btn = gr.Button("Fetch Updated State")
1010
+
1011
+ with gr.Column():
1012
+ gr.Markdown("## Task analysis")
1013
+ task_analysis_txt = gr.Textbox(
1014
+ label=None,
1015
+ placeholder="Here is the execution status of your task...")
1016
+
1017
+ gr.Markdown("## Execution status")
1018
+ execution_status = gr.DataFrame(
1019
+ type="pandas"
1020
+ )
1021
+ gr.Markdown("## Execution output")
1022
+ execution_results = gr.JSON(
1023
+ label=None
1024
+ )
1025
+ state_output = gr.State() # Add a state output to hold the state
1026
+
1027
+ task_btn.click(
1028
+ fn_process_task,
1029
+ inputs=[project_desc_table, idea_input],
1030
+ outputs=[task_analysis_txt, execution_status, execution_results]
1031
  )
1032
 
1033
+ fetch_state_btn.click(
1034
+ fetch_updated_state,
1035
+ inputs=None,
1036
+ outputs=[project_desc_table, task_analysis_txt, execution_status, execution_results]
 
 
 
 
 
 
 
1037
  )
1038
 
1039
+ # Page 1 -> Page 2 Transition
1040
+ login_button.click(
1041
+ mock_login,
1042
+ inputs=email_input,
1043
+ outputs=[login_result, login_page, onboarding_page]
1044
+ )
1045
+
1046
+ # Page 2 -> Page 3 Transition (Submit and Skip)
1047
+ submit_survey.click(
1048
+ onboarding_survey,
1049
+ inputs=[role, industry, project_description],
1050
+ outputs=[project_desc_table, onboarding_page, integrations_page]
1051
+ )
1052
+
1053
+ # Integration Buttons
1054
+ todoist_button.click(integrate_todoist, outputs=todoist_result)
1055
+ evernote_button.click(integrate_evernote, outputs=evernote_result)
1056
+ calendar_button.click(integrate_calendar, outputs=calendar_result)
1057
+
1058
+ # Skip Integrations and Proceed
1059
+ skip_integrations.click(
1060
+ lambda: (gr.update(visible=False), gr.update(visible=True)),
1061
+ outputs=[integrations_page, qr_code_page]
1062
+ )
1063
+
1064
+ # # Set the load_fn to initialize the state when the page is loaded
1065
+ # demo.load(
1066
+ # curify_ideas,
1067
+ # inputs=[project_input, idea_input],
1068
+ # outputs=[task_steps, task_analysis_txt, state_output]
1069
+ # )
1070
+ return demo
1071
+ # Load function to initialize the state
1072
+ # demo.load(load_fn, inputs=None, outputs=[state]) # Initialize the state when the page is loaded
1073
 
1074
+
1075
+ # In[21]:
1076
+
1077
+
1078
+ # Function to launch Gradio
1079
+ def launch_gradio():
1080
+ demo = create_gradio_interface()
1081
+ demo.launch(share=True, inline=False) # Gradio in the foreground
1082
+
1083
+ # Function to run FastAPI server using uvicorn in the background
1084
+ async def run_fastapi():
1085
+ config = uvicorn.Config(app, host="0.0.0.0", port=5000, reload=True, log_level="debug")
1086
+ server = uvicorn.Server(config)
1087
+ await server.serve()
1088
+
1089
+ # FastAPI endpoint to display a message
1090
+ @app.get("/", response_class=HTMLResponse)
1091
+ async def index():
1092
+ return "FastAPI is running. Visit Gradio at the provided public URL."
1093
+
1094
+ # Main entry point for the asynchronous execution
1095
+ async def main():
1096
+ # Run Gradio in the foreground and FastAPI in the background
1097
+ loop = asyncio.get_event_loop()
1098
+
1099
+ # Run Gradio in a separate thread (non-blocking)
1100
+ loop.run_in_executor(None, launch_gradio)
1101
+
1102
+ # Run FastAPI in the background (asynchronous)
1103
+ await run_fastapi()
1104
+
1105
+ if __name__ == "__main__":
1106
+ import nest_asyncio
1107
+ nest_asyncio.apply() # Allow nested use of asyncio event loops in Jupyter notebooks
1108
+
1109
+ # Run the main function to launch both services concurrently
1110
+ asyncio.run(main())
1111
 
1112
 
1113
  # In[ ]:
curify_ideas_reasoning.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "start": {
3
+ "check": "is_task",
4
+ "No": {
5
+ "action": "add_to_evernote",
6
+ "input": "task_topic"
7
+ },
8
+ "Yes": {
9
+ "check": "task_category",
10
+ "Blogs/Papers": {
11
+ "action": "find_reference",
12
+ "input": "task_topic",
13
+ "output": "reference",
14
+ "next": {
15
+ "action": "generate_summary",
16
+ "input": "reference",
17
+ "output": "summary",
18
+ "next": {
19
+ "action": "suggest_relevance",
20
+ "input": "summary"
21
+ }
22
+ }
23
+ },
24
+ "Tools": {
25
+ "action": "tool_research",
26
+ "output": "tool_analysis",
27
+ "next": {
28
+ "check": "tool_type",
29
+ "Competitive": {
30
+ "action": "generate_comparison_table",
31
+ "input": "tool_analysis"
32
+ },
33
+ "Integration": {
34
+ "action": "generate_integration_memo",
35
+ "input": "tool_analysis"
36
+ }
37
+ }
38
+ },
39
+ "Issues": {
40
+ "action": "analyze_issue",
41
+ "output": "issue_analysis",
42
+ "next": {
43
+ "action": "generate_issue_memo",
44
+ "input": "issue_analysis"
45
+ }
46
+ },
47
+ "Brainstorming": {
48
+ "action": "list_ideas",
49
+ "output": "ideas",
50
+ "next": {
51
+ "action": "construct_matrix",
52
+ "input": "ideas",
53
+ "output": "matrix",
54
+ "next": {
55
+ "action": "prioritize_ideas",
56
+ "input": "matrix",
57
+ "output": "prioritized_ideas",
58
+ "next": {
59
+ "action": "setup_action_plan",
60
+ "input": "prioritized_ideas"
61
+ }
62
+ }
63
+ }
64
+ },
65
+ "Others": {
66
+ "action": "unsupported_task"
67
+ }
68
+ }
69
+ }
70
+ }
qr.svg ADDED