Aravind366 commited on
Commit
7d5594f
Β·
verified Β·
1 Parent(s): f2b1867

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +139 -900
app.py CHANGED
@@ -1,925 +1,164 @@
1
- # -*- coding: utf-8 -*-
 
2
  import streamlit as st
3
- import requests
4
  from openai import OpenAI
5
- import google.generativeai as genai
6
- import anthropic
7
- import streamlit.components.v1 as components
8
- import re
9
- # from supabase import create_client # Keep commented if not using history/auth
10
- import base64
11
- import time
12
-
13
- # ---------- Helper: Safe Rerun ----------
14
- def safe_rerun():
15
- """Safely trigger a Streamlit rerun if the function exists."""
16
- if hasattr(st, "experimental_rerun"):
17
- st.experimental_rerun()
18
- elif hasattr(st, "rerun"):
19
- st.rerun()
20
- else:
21
- st.warning("Rerun function not available. Please update Streamlit.")
22
-
23
- # ---------- Setup & API Client Initialization ----------
24
- # (Keep your existing API key loading logic here - unchanged)
25
- openai_client = None
26
- genai_client = None
27
- deepseek_api_key = None
28
- claude_client = None
29
- secrets_available = {"openai": False, "gemini": False, "deepseek": False, "claude": False}
30
- secret_errors = []
31
-
32
- # OpenAI API Key
33
- try:
34
- openai_api_key = st.secrets.get("OPENAI_API_KEY")
35
- if openai_api_key:
36
- openai_client = OpenAI(api_key=openai_api_key)
37
- secrets_available["openai"] = True
38
- else:
39
- secret_errors.append("Streamlit Secret `OPENAI_API_KEY` not found.")
40
- except KeyError:
41
- secret_errors.append("Streamlit Secret `OPENAI_API_KEY` not found.")
42
- except Exception as e:
43
- secret_errors.append(f"Error initializing OpenAI client: {e}")
44
-
45
- # Gemini API Key (Google GenAI)
46
- try:
47
- gemini_api_key = st.secrets.get("GEMINI_API_KEY")
48
- if gemini_api_key:
49
- genai.configure(api_key=gemini_api_key)
50
- genai_client = genai
51
- secrets_available["gemini"] = True
52
- else:
53
- secret_errors.append("Streamlit Secret `GEMINI_API_KEY` not found.")
54
- except KeyError:
55
- secret_errors.append("Streamlit Secret `GEMINI_API_KEY` not found.")
56
- except Exception as e:
57
- secret_errors.append(f"Error initializing Google GenAI client: {e}")
58
-
59
- # DeepSeek API Key
60
- try:
61
- deepseek_api_key = st.secrets.get("DEEPSEEK_API_KEY")
62
- if deepseek_api_key:
63
- secrets_available["deepseek"] = True
64
- else:
65
- secret_errors.append("Streamlit Secret `DEEPSEEK_API_KEY` not found.")
66
- except KeyError:
67
- secret_errors.append("Streamlit Secret `DEEPSEEK_API_KEY` not found.")
68
- except Exception as e:
69
- secret_errors.append(f"Error reading DeepSeek API key: {e}")
70
-
71
- # CLAUDE API Key and Client Initialization
72
- try:
73
- claude_api_key = st.secrets.get("CLAUDE_API_KEY")
74
- if claude_api_key:
75
- claude_client = anthropic.Anthropic(api_key=claude_api_key)
76
- secrets_available["claude"] = True
77
- else:
78
- secret_errors.append("Streamlit Secret `CLAUDE_API_KEY` not found.")
79
- except KeyError:
80
- secret_errors.append("Streamlit Secret `CLAUDE_API_KEY` not found.")
81
- except Exception as e:
82
- secret_errors.append(f"Error initializing Claude client: {e}")
83
-
84
- any_secret_loaded = any(secrets_available.values())
85
-
86
- # ---------- Model Configuration ----------
87
- # (Keep your existing SUPPORTED_MODELS dictionary population logic here - unchanged)
88
- SUPPORTED_MODELS = {}
89
-
90
- # OpenAI Models
91
- if secrets_available["openai"] and openai_client:
92
- SUPPORTED_MODELS.update({
93
- "GPT-4o (OpenAI)": {"id": "gpt-4o", "provider": "openai", "client": openai_client},
94
- "GPT-4o Mini (OpenAI)": {"id": "gpt-4o-mini", "provider": "openai", "client": openai_client},
95
- "GPT-4 Turbo (OpenAI)": {"id": "gpt-4-turbo", "provider": "openai", "client": openai_client},
96
- "GPT-4 (OpenAI)": {"id": "gpt-4", "provider": "openai", "client": openai_client},
97
- "GPT-3.5 Turbo (OpenAI)": {"id": "gpt-3.5-turbo", "provider": "openai", "client": openai_client},
98
- })
99
-
100
- # Gemini Models
101
- if secrets_available["gemini"] and genai_client:
102
- SUPPORTED_MODELS.update({
103
- "Gemini 1.5 Pro (Google)": {"id": "gemini-1.5-pro-latest", "provider": "gemini", "client": genai_client},
104
- "Gemini 1.5 Flash (Google)": {"id": "gemini-1.5-flash-latest", "provider": "gemini", "client": genai_client},
105
- "Gemini 1.0 Pro (Google)": {"id": "gemini-1.0-pro", "provider": "gemini", "client": genai_client},
106
- })
107
-
108
- # DeepSeek Models
109
- if secrets_available["deepseek"] and deepseek_api_key:
110
- SUPPORTED_MODELS.update({
111
- "DeepSeek Chat": {"id": "deepseek-chat", "provider": "deepseek", "client": None},
112
- "DeepSeek Coder": {"id": "deepseek-coder", "provider": "deepseek", "client": None},
113
- })
114
-
115
- # Claude Models
116
- if secrets_available["claude"] and claude_client:
117
- SUPPORTED_MODELS.update({
118
- "Claude 3.5 Sonnet (Anthropic)": {"id": "claude-3-5-sonnet-20240620", "provider": "claude", "client": claude_client},
119
- "Claude 3 Haiku (Anthropic)": {"id": "claude-3-haiku-20240307", "provider": "claude", "client": claude_client},
120
- "Claude 3 Opus (Anthropic)": {"id": "claude-3-opus-20240229", "provider": "claude", "client": claude_client},
121
- })
122
-
123
- # Determine default model based on preference and availability
124
- DEFAULT_MODEL_PREFERENCE = [
125
- "GPT-4o Mini (OpenAI)",
126
- "Gemini 1.5 Flash (Google)",
127
- "Claude 3 Haiku (Anthropic)",
128
- "DeepSeek Chat",
129
- "GPT-3.5 Turbo (OpenAI)",
130
- ]
131
- DEFAULT_MODEL = next((m for m in DEFAULT_MODEL_PREFERENCE if m in SUPPORTED_MODELS), None)
132
- if not DEFAULT_MODEL and SUPPORTED_MODELS:
133
- DEFAULT_MODEL = next(iter(SUPPORTED_MODELS)) # Fallback to the first available
134
-
135
- # ---------- Helper Functions for Generation ----------
136
- # (Keep your existing _generate_with_... provider functions here - unchanged)
137
- def _generate_with_openai_provider(client, model_id, prompt, max_tokens, system_message=None):
138
- messages = []
139
- if system_message:
140
- messages.append({"role": "system", "content": system_message})
141
- messages.append({"role": "user", "content": prompt})
142
- try:
143
- response = client.chat.completions.create(
144
- model=model_id,
145
- messages=messages,
146
- temperature=0.6,
147
- max_tokens=max_tokens
148
- )
149
- return response.choices[0].message.content
150
- except Exception as e:
151
- st.error(f"❌ OpenAI API Error ({model_id}): {e}")
152
- return f"Error: OpenAI API call failed for {model_id}. Details: {e}"
153
-
154
- def _generate_with_gemini_provider(client, model_id, prompt, max_tokens, system_message=None):
155
- # Note: Gemini API v1 doesn't explicitly support system_message like OpenAI.
156
- # It's often prepended to the user prompt or handled via specific model tuning.
157
- # We will prepend it for basic compatibility.
158
- full_prompt = f"{system_message}\n\n{prompt}" if system_message else prompt
159
- try:
160
- model = client.GenerativeModel(
161
- model_id,
162
- safety_settings={
163
- 'HARM_CATEGORY_HARASSMENT': 'block_none', # More permissive for testing
164
- 'HARM_CATEGORY_HATE_SPEECH': 'block_none',
165
- 'HARM_CATEGORY_SEXUALLY_EXPLICIT': 'block_none',
166
- 'HARM_CATEGORY_DANGEROUS_CONTENT': 'block_none',
167
- },
168
- generation_config=client.types.GenerationConfig(temperature=0.7) # max_output_tokens isn't directly controllable here for safety reasons
169
- )
170
- # Gemini requires specific handling for potentially blocked content.
171
- response = model.generate_content(full_prompt)
172
-
173
- if response.parts:
174
- return "".join(part.text for part in response.parts if hasattr(part, 'text'))
175
- elif hasattr(response, 'text') and response.text: # Handle simpler text responses if applicable
176
- return response.text
177
- elif response.prompt_feedback.block_reason:
178
- reason = response.prompt_feedback.block_reason
179
- st.warning(f"Gemini response blocked ({model_id}). Reason: {reason}")
180
- return f"Error: Response blocked by API safety filters ({model_id}): {reason}"
181
- else:
182
- # Handle cases where the response might be empty but not explicitly blocked
183
- # Check candidate information if available
184
- if response.candidates and response.candidates[0].finish_reason != "STOP":
185
- st.warning(f"Gemini generation stopped unexpectedly ({model_id}). Reason: {response.candidates[0].finish_reason}")
186
- return f"Error: Generation stopped unexpectedly ({model_id}). Reason: {response.candidates[0].finish_reason}"
187
- else:
188
- st.warning(f"Gemini returned an empty or unexpected response ({model_id}).")
189
- return f"Error: Gemini returned an empty response for {model_id}."
190
-
191
- except Exception as e:
192
- st.error(f"❌ Gemini SDK error ({model_id}): {e}")
193
- error_detail = getattr(e, 'message', str(e))
194
- # Attempt to parse specific Gemini API errors if possible
195
- if "API key not valid" in error_detail:
196
- return f"Error: Invalid Gemini API Key ({model_id}). Please check your Streamlit secrets."
197
- # Add more specific error handling if needed
198
- return f"Error: Gemini SDK call failed for {model_id}. Details: {error_detail}"
199
-
200
-
201
- def _generate_with_deepseek_provider(api_key, model_id, prompt, max_tokens, system_message=None):
202
- headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
203
- messages = []
204
- if system_message:
205
- messages.append({"role": "system", "content": system_message})
206
- messages.append({"role": "user", "content": prompt})
207
- payload = {
208
- "model": model_id,
209
- "messages": messages,
210
- "temperature": 0.6,
211
- "max_tokens": max_tokens
212
- }
213
- try:
214
- response = requests.post("https://api.deepseek.com/chat/completions", headers=headers, json=payload, timeout=90)
215
- response.raise_for_status()
216
- response_data = response.json()
217
- if ("choices" in response_data and response_data["choices"] and
218
- "message" in response_data["choices"][0] and
219
- "content" in response_data["choices"][0]["message"]):
220
- return response_data["choices"][0]["message"]["content"]
221
- else:
222
- st.warning(f"DeepSeek returned an unexpected response structure ({model_id}): {response_data}")
223
- return f"Error: DeepSeek returned an unexpected structure for {model_id}."
224
- except requests.exceptions.RequestException as e:
225
- st.error(f"❌ DeepSeek API Request Error ({model_id}): {e}")
226
- return f"Error: DeepSeek API request failed for {model_id}. Details: {e}"
227
- except Exception as e:
228
- st.error(f"❌ DeepSeek Error processing response ({model_id}): {e}")
229
- return f"Error: DeepSeek processing failed for {model_id}. Details: {e}"
230
-
231
- def _generate_with_claude_provider(client, model_id, prompt, max_tokens, system_message=None):
232
- try:
233
- # Anthropic API v3 uses system parameter directly
234
- message = client.messages.create(
235
- model=model_id,
236
- max_tokens=max_tokens,
237
- system=system_message if system_message else None, # Add system message here
238
- messages=[
239
- {"role": "user", "content": prompt}
240
- ]
241
- )
242
- # Handle response content which is a list of blocks
243
- content = ""
244
- if message.content:
245
- content = "\n".join([block.text for block in message.content if hasattr(block, "text")])
246
- return content
247
- except Exception as e:
248
- st.error(f"❌ Claude API Error ({model_id}): {e}")
249
- # Provide more specific feedback if it's an authentication error
250
- if isinstance(e, anthropic.AuthenticationError):
251
- return f"Error: Claude authentication failed ({model_id}). Check your API key."
252
- return f"Error: Claude API call failed for {model_id}. Details: {e}"
253
-
254
-
255
- def generate_with_selected_model(selected_model_name, prompt, max_tokens=2000, system_message=None):
256
- """Generates text using the chosen model, handling provider specifics."""
257
- if not any_secret_loaded or not SUPPORTED_MODELS:
258
- st.error("Error: No API keys loaded or models available. Configure secrets.")
259
- return None # Return None on critical failure
260
-
261
- if selected_model_name not in SUPPORTED_MODELS:
262
- st.error(f"Selected model '{selected_model_name}' is not configured or unavailable.")
263
- original_choice = selected_model_name
264
- selected_model_name = DEFAULT_MODEL
265
- if not selected_model_name:
266
- st.error("Fatal: Default model is also unavailable. Cannot proceed.")
267
- return None # Return None on critical failure
268
- st.warning(f"Falling back from '{original_choice}' to default model: {DEFAULT_MODEL}")
269
- # Update session state if fallback occurs
270
- st.session_state.model_choice = DEFAULT_MODEL
271
-
272
- model_config = SUPPORTED_MODELS[selected_model_name]
273
- provider = model_config["provider"]
274
- model_id = model_config["id"]
275
- client = model_config.get("client") # Might be None for DeepSeek
276
-
277
- st.info(f"Generating with: **{selected_model_name}**")
278
- start_time = time.time()
279
- result = f"Error: Provider '{provider}' not implemented." # Default error
280
-
281
- try:
282
- if provider == "openai":
283
- if not client: result = f"Error: OpenAI client not initialized for {selected_model_name}."
284
- else: result = _generate_with_openai_provider(client, model_id, prompt, max_tokens, system_message)
285
- elif provider == "gemini":
286
- if not client: result = f"Error: Gemini client not initialized for {selected_model_name}."
287
- else: result = _generate_with_gemini_provider(client, model_id, prompt, max_tokens, system_message)
288
- elif provider == "deepseek":
289
- if not deepseek_api_key: result = f"Error: DeepSeek API key not available for {selected_model_name}."
290
- else: result = _generate_with_deepseek_provider(deepseek_api_key, model_id, prompt, max_tokens, system_message)
291
- elif provider == "claude":
292
- if not client: result = f"Error: Claude client not initialized for {selected_model_name}."
293
- else: result = _generate_with_claude_provider(client, model_id, prompt, max_tokens, system_message)
294
- except Exception as e:
295
- st.error(f"❌ Unexpected error during generation with {selected_model_name}: {e}")
296
- result = f"Error: Unexpected failure during {provider} generation. Details: {e}"
297
-
298
- end_time = time.time()
299
- duration = end_time - start_time
300
- st.caption(f"Generation took {duration:.2f} seconds.")
301
-
302
- # Check if the result indicates an error before returning
303
- if isinstance(result, str) and result.startswith("Error:"):
304
- st.error(f"Generation failed for {selected_model_name}. Check logs or API status.")
305
- return None # Explicitly return None for errors
306
- return result
307
-
308
- # --- Mermaid Diagram Helper ---
309
- def is_valid_mermaid(code):
310
- if not isinstance(code, str): return False
311
- code_lower = code.strip().lower()
312
- # Simplified check for common diagram types
313
- return bool(re.search(r"^\s*(%%.*?\n)*\s*(graph|flowchart|sequenceDiagram|classDiagram|stateDiagram|erDiagram|gantt|pie|gitGraph)", code_lower, re.MULTILINE))
314
 
315
- def render_mermaid_diagram(mermaid_code, key):
316
- if not isinstance(mermaid_code, str) or not mermaid_code.strip():
317
- st.warning(f"Mermaid code is empty or invalid (Key: {key}).")
318
- return
319
-
320
- # Clean potentially markdown-formatted code
321
- cleaned_code = re.sub(r"^```mermaid\s*\n?", "", mermaid_code, flags=re.IGNORECASE | re.MULTILINE).strip()
322
- cleaned_code = re.sub(r"\n?```\s*$", "", cleaned_code).strip()
323
-
324
- if not is_valid_mermaid(cleaned_code):
325
- st.warning(f"⚠️ Mermaid diagram might not render correctly (Key: {key}). Check syntax. Displaying raw code.")
326
- st.code(cleaned_code, language="mermaid")
327
- return
328
-
329
- # Ensure unique IDs for multiple diagrams
330
- container_id = f"mermaid-container-{key}"
331
- mermaid_id = f"mermaid-{key}"
332
-
333
- components.html(
334
- f"""
335
- <div id="{container_id}" style="background-color: white; padding: 10px; border-radius: 5px; overflow: auto;">
336
- <pre class="mermaid" id="{mermaid_id}">
337
- {cleaned_code}
338
- </pre>
339
- </div>
340
- <script type="module">
341
- try {{
342
- // Ensure mermaid is loaded (using import for modern browsers)
343
- const mermaid = (await import('https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs')).default;
344
- mermaid.initialize({{ startOnLoad: false, theme: 'default' }});
345
- // Wait for the element to be ready in the DOM
346
- const checkElement = setInterval(() => {{
347
- const el = document.getElementById('{mermaid_id}');
348
- if (el) {{
349
- clearInterval(checkElement);
350
- mermaid.run({{ nodes: [el] }});
351
- }}
352
- }}, 100);
353
- // Timeout to prevent infinite loop
354
- setTimeout(() => clearInterval(checkElement), 5000);
355
- }} catch (e) {{
356
- console.error("Mermaid rendering error (Key: {key}):", e);
357
- const container = document.getElementById('{container_id}');
358
- if(container) container.innerHTML = "<p style='color:red;'>Error rendering Mermaid diagram. Check browser console.</p>";
359
- }}
360
- </script>
361
- """,
362
- height=500, scrolling=True,
363
  )
364
-
365
- # ---------- Initialize Session State (Provibe Workflow) ----------
366
- if 'current_step' not in st.session_state:
367
- st.session_state.current_step = "input_idea" # input_idea -> refine_idea -> review_idea -> generate_docs -> display_docs
368
- if 'processing' not in st.session_state: # General flag to disable buttons during AI calls
369
- st.session_state.processing = False
370
- if 'initial_product_idea' not in st.session_state:
371
- st.session_state.initial_product_idea = ""
372
- if 'tech_stack_hint' not in st.session_state: # Keep tech hint if useful for prompts
373
- st.session_state.tech_stack_hint = ""
374
- if 'model_choice' not in st.session_state:
375
- st.session_state.model_choice = DEFAULT_MODEL
376
- if 'refined_idea_content' not in st.session_state: # Stores the AI-refined idea
377
- st.session_state.refined_idea_content = None
378
- if 'confirmed_idea_content' not in st.session_state: # Stores the user-confirmed/edited idea
379
- st.session_state.confirmed_idea_content = None
380
- if 'prd_content' not in st.session_state: # Stores generated PRD
381
- st.session_state.prd_content = None
382
- if 'selected_docs_to_generate' not in st.session_state: # Stores user selection for optional docs
383
- st.session_state.selected_docs_to_generate = {}
384
- if 'generated_docs' not in st.session_state: # Stores content of generated optional docs
385
- st.session_state.generated_docs = {}
386
-
387
-
388
- # ---------- Define Document Options (Align with Provibe Output) ----------
389
- # Key: unique identifier, used in session state
390
- # label: Display name for checkbox
391
- # prompt_func: Lambda function to generate the specific prompt for this doc type.
392
- # Takes confirmed_idea and tech_hint as input.
393
- # display_func: Optional function to display content (e.g., st.markdown). Defaults to st.markdown.
394
- # download_filename: Filename for download button.
395
- # mime: MIME type for download.
396
- # render_func: Optional function for special rendering (e.g., Mermaid).
397
- # code_language: Optional language hint for st.code display.
398
- # system_message: Optional system message specific to this document type.
399
-
400
- doc_options = {
401
- "prd": {
402
- "label": "Product Requirements Document (PRD)",
403
- "prompt_func": lambda idea, hint: f"""
404
- # --- PROMPT: Insert your specific PRD generation prompt here ---
405
- # Example: Write a comprehensive Product Requirements Document (PRD) based strictly on the following confirmed product description. Include sections like Introduction, Goals, Target Audience, Features (with details), User Stories, Design Considerations, Non-Functional Requirements, Open Issues, and Future Considerations. Ensure the PRD is detailed, clear, and actionable for a development team.
406
- # --- End PRD Prompt ---
407
-
408
- **Confirmed Product Description:**
409
- ---
410
- {idea}
411
- ---
412
- **Optional Preferences/Hints (Consider if relevant):**
413
- {hint if hint else "None provided"}
414
- """,
415
- "system_message": "You are an expert Product Manager tasked with writing a detailed and professional PRD.",
416
- "max_tokens": 3500, # Allow more tokens for PRD
417
- "display_func": lambda content, key: st.markdown(content),
418
- "download_filename": "prd.md",
419
- "mime": "text/markdown",
420
- },
421
- "user_flow_text": {
422
- "label": "User Flow (Text Description)",
423
- "prompt_func": lambda idea, hint: f"""
424
- # --- PROMPT: Insert your specific User Flow (Text) generation prompt here ---
425
- # Example: Based on the product description below, outline the primary user flow step-by-step, from initial interaction to achieving the core goal. Describe each step clearly.
426
- # --- End User Flow (Text) Prompt ---
427
-
428
- **Product Description:**
429
- ---
430
- {idea}
431
- ---
432
- **Preferences/Hints:** {hint if hint else "None provided"}
433
- """,
434
- "system_message": "You are a UX designer describing a key user journey.",
435
- "max_tokens": 1000,
436
- "display_func": lambda content, key: st.markdown(content),
437
- "download_filename": "user_flow.md",
438
- "mime": "text/markdown",
439
- },
440
- "user_flow_mermaid": {
441
- "label": "User Flow Diagram (Mermaid)",
442
- "prompt_func": lambda idea, hint: f"""
443
- # --- PROMPT: Insert your specific User Flow (Mermaid) generation prompt here ---
444
- # Example: Generate a Mermaid flowchart diagram representing the primary user flow for the product described below. Use standard flowchart syntax (graph TD, nodes, arrows). Ensure the diagram is clear and accurately reflects the user journey. Start the code block with ```mermaid and end it with ```. Do not include any other text before or after the code block.
445
- # --- End User Flow (Mermaid) Prompt ---
446
-
447
- **Product Description:**
448
- ---
449
- {idea}
450
- ---
451
- **Preferences/Hints:** {hint if hint else "None provided"}
452
- """,
453
- "system_message": "You are an expert in creating Mermaid diagrams, specifically flowcharts for user journeys.",
454
- "max_tokens": 1000,
455
- "render_func": render_mermaid_diagram, # Special rendering
456
- "code_language": "mermaid",
457
- "download_filename": "user_flow_diagram.mmd",
458
- "mime": "text/plain",
459
- },
460
- "frontend_arch": {
461
- "label": "Frontend Architecture Notes",
462
- "prompt_func": lambda idea, hint: f"""
463
- # --- PROMPT: Insert your Frontend Architecture prompt here ---
464
- # Example: Based on the product description and hints, suggest a suitable frontend architecture. Describe key components, recommended libraries/frameworks (consider hints like 'React Native'), state management approach, and potential component breakdown.
465
- # --- End Frontend Architecture Prompt ---
466
-
467
- **Product Description:**
468
- ---
469
- {idea}
470
- ---
471
- **Preferences/Hints:** {hint if hint else "None provided"}
472
- """,
473
- "system_message": "You are a frontend architect designing a web/mobile application.",
474
- "max_tokens": 1500,
475
- "display_func": lambda content, key: st.markdown(content),
476
- "download_filename": "frontend_architecture.md",
477
- "mime": "text/markdown",
478
- },
479
- "backend_arch": {
480
- "label": "Backend Architecture Notes",
481
- "prompt_func": lambda idea, hint: f"""
482
- # --- PROMPT: Insert your Backend Architecture prompt here ---
483
- # Example: Based on the product description and hints, propose a backend architecture. Discuss potential API design (e.g., RESTful), choice of language/framework, database considerations (type, scaling), authentication/authorization strategy, and key microservices or modules if applicable.
484
- # --- End Backend Architecture Prompt ---
485
-
486
- **Product Description:**
487
- ---
488
- {idea}
489
- ---
490
- **Preferences/Hints:** {hint if hint else "None provided"}
491
- """,
492
- "system_message": "You are a backend/systems architect designing the server-side logic and infrastructure.",
493
- "max_tokens": 1500,
494
- "display_func": lambda content, key: st.markdown(content),
495
- "download_filename": "backend_architecture.md",
496
- "mime": "text/markdown",
497
- },
498
- "system_arch_mermaid": {
499
- "label": "System Architecture Diagram (Mermaid)",
500
- "prompt_func": lambda idea, hint: f"""
501
- # --- PROMPT: Insert your System Architecture (Mermaid) prompt here ---
502
- # Example: Generate a Mermaid diagram illustrating the high-level system architecture for the product described below. Include key components like frontend client, backend API, database, authentication service, and any major third-party integrations mentioned or implied. Use appropriate Mermaid diagram syntax (e.g., graph TD or C4 model elements if suitable). Start the code block with ```mermaid and end it with ```. Do not include any other text before or after the code block.
503
- # --- End System Architecture (Mermaid) Prompt ---
504
-
505
- **Product Description:**
506
- ---
507
- {idea}
508
- ---
509
- **Preferences/Hints:** {hint if hint else "None provided"}
510
- """,
511
- "system_message": "You create system architecture diagrams using Mermaid syntax.",
512
- "max_tokens": 1000,
513
- "render_func": render_mermaid_diagram,
514
- "code_language": "mermaid",
515
- "download_filename": "system_architecture.mmd",
516
- "mime": "text/plain",
517
- },
518
- "db_schema": {
519
- "label": "Database Schema (SQL)",
520
- "prompt_func": lambda idea, hint: f"""
521
- # --- PROMPT: Insert your Database Schema (SQL) prompt here ---
522
- # Example: Based on the product description, design a preliminary relational database schema. Provide SQL `CREATE TABLE` statements for the primary entities, including relevant columns, data types, primary keys, and foreign key relationships. Assume a PostgreSQL syntax unless hints suggest otherwise.
523
- # --- End Database Schema (SQL) Prompt ---
524
-
525
- **Product Description:**
526
- ---
527
- {idea}
528
- ---
529
- **Preferences/Hints:** {hint if hint else "None provided"}
530
- """,
531
- "system_message": "You are a database administrator designing a schema.",
532
- "max_tokens": 1500,
533
- "display_func": lambda content, key: st.code(content, language='sql'), # Use code block for SQL
534
- "code_language": "sql",
535
- "download_filename": "database_schema.sql",
536
- "mime": "text/x-sql",
537
- },
538
- "project_structure": {
539
- "label": "Project Folder Structure",
540
- "prompt_func": lambda idea, hint: f"""
541
- # --- PROMPT: Insert your Project Structure prompt here ---
542
- # Example: Suggest a logical file and folder structure for a project implementing the described product. Consider frontend, backend, shared components, tests, etc., based on the description and any tech stack hints. Present it as a simple tree structure.
543
- # --- End Project Structure Prompt ---
544
-
545
- **Product Description:**
546
- ---
547
- {idea}
548
- ---
549
- **Preferences/Hints:** {hint if hint else "None provided"}
550
- """,
551
- "system_message": "You are suggesting a clean project layout for a software development team.",
552
- "max_tokens": 800,
553
- "display_func": lambda content, key: st.code(content, language='bash'), # Use code block for structure
554
- "code_language": "bash",
555
- "download_filename": "project_structure.txt",
556
- "mime": "text/plain",
557
- },
558
- # Add other document types as needed, following the same structure
559
  }
560
 
 
 
 
561
 
562
- # ---------- UI Layout (Provibe Workflow) ----------
563
- st.set_page_config(layout="wide", page_title="Provibe Prompt Tester")
564
- st.title("πŸ§ͺ Provibe Prompt Tester")
565
- st.caption("Test and refine prompts for the Provibe document generation workflow.")
566
-
567
- # Display API Key Errors
568
- if secret_errors:
569
- st.error("API Key Configuration Issues:")
570
- for error in secret_errors:
571
- st.error(f"- {error}")
572
- if not any_secret_loaded or not SUPPORTED_MODELS:
573
- st.error("No API keys loaded or LLM models available. Please configure necessary Streamlit secrets (e.g., OPENAI_API_KEY, GEMINI_API_KEY). Cannot proceed.")
574
- st.stop() # Stop execution if no models are usable
575
-
576
- # --- Workflow Steps ---
577
-
578
- # ---------- Step 1: Input Initial Idea ----------
579
- if st.session_state.current_step == "input_idea":
580
- st.header("Step 1: Input Product Idea")
581
- with st.form(key="idea_form"):
582
- initial_idea_input = st.text_area(
583
- "πŸ’‘ Enter the initial product idea:", height=150,
584
- value=st.session_state.initial_product_idea,
585
- help="The raw concept or description."
586
- )
587
- tech_hint_input = st.text_input(
588
- "βš™οΈ Optional: Tech Stack Hints or Constraints",
589
- placeholder="e.g., Use React, target mobile, needs offline support",
590
- value=st.session_state.tech_stack_hint,
591
- help="Any preferences to guide AI generation."
592
- )
593
- available_model_names = list(SUPPORTED_MODELS.keys())
594
- default_model_key = st.session_state.get('model_choice', DEFAULT_MODEL)
595
- default_index = available_model_names.index(default_model_key) if default_model_key in available_model_names else 0
596
- model_choice_input = st.selectbox(
597
- "🧠 Choose AI model for all steps:",
598
- options=available_model_names,
599
- index=default_index,
600
- key="model_select", # Ensure a unique key
601
- help="This model will be used for refinement and document generation."
602
- )
603
- submit_idea_button = st.form_submit_button(
604
- label="➑️ Refine Idea with AI",
605
- use_container_width=True,
606
- disabled=st.session_state.processing
607
- )
608
-
609
- if submit_idea_button and initial_idea_input:
610
- st.session_state.initial_product_idea = initial_idea_input
611
- st.session_state.tech_stack_hint = tech_hint_input
612
- st.session_state.model_choice = model_choice_input
613
- st.session_state.refined_idea_content = None # Clear previous refinement
614
- st.session_state.confirmed_idea_content = None # Clear previous confirmation
615
- st.session_state.prd_content = None
616
- st.session_state.generated_docs = {}
617
- st.session_state.current_step = "refine_idea"
618
- st.session_state.processing = True # Start processing
619
- safe_rerun()
620
- elif submit_idea_button:
621
- st.warning("Please enter a product idea.")
622
 
 
623
 
624
- # ---------- Step 2: AI Refinement ----------
625
- if st.session_state.current_step == "refine_idea":
626
- st.header("Step 2: Refining Idea...")
627
- st.info(f"Using **{st.session_state.model_choice}** to refine the idea. Please wait.")
628
- with st.spinner("AI is thinking..."):
 
629
 
630
- # --- PROMPT: Define the Idea Refinement Prompt ---
631
- refinement_prompt = f"""
632
- # --- PROMPT: Insert your specific Idea Refinement prompt here ---
633
- # Example: Analyze the following product idea and technical hints. Generate a concise, well-structured 'Refined Product Description' that clarifies the core value proposition, key features, and target audience. This description will be the basis for generating all other documents. Focus on clarity and completeness for a development team.
634
- # --- End Idea Refinement Prompt ---
635
 
636
- **Initial Product Idea:**
637
- ---
638
- {st.session_state.initial_product_idea}
639
- ---
640
- **Optional Preferences/Hints Provided:**
641
- {st.session_state.tech_stack_hint if st.session_state.tech_stack_hint else "None provided"}
642
- """
643
- # --- End Refinement Prompt ---
644
 
645
- system_message_refine = "You are an AI assistant helping to refine a product idea into a clear specification."
646
- max_tokens_refine = 1200
 
647
 
648
- refined_content = generate_with_selected_model(
649
- st.session_state.model_choice,
650
- refinement_prompt,
651
- max_tokens=max_tokens_refine,
652
- system_message=system_message_refine
653
- )
654
 
655
- if refined_content:
656
- st.session_state.refined_idea_content = refined_content
657
- st.session_state.current_step = "review_idea"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
658
  else:
659
- st.error("Failed to refine the idea. Check API errors or model selection.")
660
- # Option to go back or retry might be added here
661
- st.session_state.current_step = "input_idea" # Go back if failed
662
-
663
- st.session_state.processing = False # End processing
664
- safe_rerun()
665
-
666
-
667
- # ---------- Step 3: Review and Confirm Idea ----------
668
- if st.session_state.current_step == "review_idea":
669
- st.header("Step 3: Review and Confirm Refined Idea")
670
- if st.session_state.refined_idea_content:
671
- st.info("Review the AI-refined description below. Edit it as needed. This **final text** will be used to generate all documents.")
672
- edited_idea = st.text_area(
673
- "✏️ **Edit Refined Description:**",
674
- value=st.session_state.refined_idea_content,
675
- height=350,
676
- key="refined_idea_edit_area",
677
- help="Make any necessary corrections or additions."
678
- )
679
-
680
- button_col1, button_col2 = st.columns(2)
681
- with button_col1:
682
- confirm_button = st.button(
683
- "βœ… Confirm & Proceed to Generate Docs",
684
- key="confirm_idea_button",
685
- use_container_width=True,
686
- disabled=st.session_state.processing
687
- )
688
- with button_col2:
689
- back_button = st.button(
690
- "⬅️ Back to Idea Input",
691
- key="back_to_input_button",
692
- use_container_width=True,
693
- disabled=st.session_state.processing
694
- )
695
-
696
- if confirm_button:
697
- if not edited_idea.strip():
698
- st.warning("The refined description cannot be empty.")
699
- else:
700
- st.session_state.confirmed_idea_content = edited_idea
701
- # Reset generation states
702
- st.session_state.prd_content = None
703
- st.session_state.generated_docs = {}
704
- st.session_state.selected_docs_to_generate = {k: False for k in doc_options} # Reset selections
705
- st.session_state.current_step = "generate_docs"
706
- safe_rerun()
707
- if back_button:
708
- st.session_state.current_step = "input_idea"
709
- st.session_state.refined_idea_content = None # Clear refinement
710
- safe_rerun()
711
 
712
- else:
713
- st.error("No refined idea content found. Please go back to Step 1.")
714
- if st.button("⬅️ Back to Idea Input"):
715
- st.session_state.current_step = "input_idea"
716
- safe_rerun()
 
 
 
717
 
 
 
718
 
719
- # ---------- Step 4: Select and Generate Documents ----------
720
- if st.session_state.current_step == "generate_docs":
721
- st.header("Step 4: Generate Product Documents")
722
- if st.session_state.confirmed_idea_content:
723
- st.markdown("**Based on this confirmed description:**")
724
- with st.expander("View Confirmed Description", expanded=False):
725
- st.markdown(f"> {st.session_state.confirmed_idea_content}") # Display quoted
726
 
727
- st.subheader("Select Documents to Generate:")
728
- # Use columns for better layout of checkboxes
729
- num_doc_options = len(doc_options)
730
- cols = st.columns(min(num_doc_options, 3)) # Max 3 columns
731
- doc_keys = list(doc_options.keys())
732
 
733
- for i, key in enumerate(doc_keys):
734
- config = doc_options[key]
735
- with cols[i % 3]:
736
- # Ensure state exists for each checkbox
737
- if key not in st.session_state.selected_docs_to_generate:
738
- st.session_state.selected_docs_to_generate[key] = False
739
- st.session_state.selected_docs_to_generate[key] = st.checkbox(
740
- config["label"],
741
- value=st.session_state.selected_docs_to_generate.get(key, False),
742
- key=f"checkbox_{key}",
743
- disabled=st.session_state.processing
744
- )
745
-
746
- generate_button = st.button(
747
- "πŸš€ Generate Selected Documents",
748
- key="generate_docs_button",
749
- use_container_width=True,
750
- disabled=st.session_state.processing
751
- )
752
- back_to_review_button = st.button(
753
- "⬅️ Back to Review Idea",
754
- key="back_to_review_button",
755
- use_container_width=True,
756
- disabled=st.session_state.processing
757
- )
758
-
759
-
760
- if generate_button:
761
- selected_keys = [k for k, v in st.session_state.selected_docs_to_generate.items() if v]
762
- if not selected_keys:
763
- st.warning("Please select at least one document type to generate.")
764
- else:
765
- st.session_state.processing = True
766
- st.session_state.generated_docs = {} # Clear previous results before generating new ones
767
- st.info(f"⏳ Generating {len(selected_keys)} selected document(s) using {st.session_state.model_choice}...")
768
- progress_bar = st.progress(0)
769
- generation_successful = True
770
-
771
- for i, key in enumerate(selected_keys):
772
- config = doc_options[key]
773
- st.write(f" - Generating {config['label']}...")
774
- with st.spinner(f"AI processing {config['label']}..."):
775
- # Get prompt using the lambda function
776
- prompt = config["prompt_func"](
777
- st.session_state.confirmed_idea_content,
778
- st.session_state.tech_stack_hint
779
- )
780
- system_msg = config.get("system_message")
781
- max_tok = config.get("max_tokens", 2000) # Use specific or default max_tokens
782
-
783
- content = generate_with_selected_model(
784
- st.session_state.model_choice,
785
- prompt,
786
- max_tokens=max_tok,
787
- system_message=system_msg
788
- )
789
-
790
- if content:
791
- st.session_state.generated_docs[key] = content
792
- else:
793
- # Store error message if generation failed
794
- st.session_state.generated_docs[key] = f"Error: Failed to generate {config['label']}."
795
- generation_successful = False
796
- st.error(f" - Failed to generate {config['label']}. See logs above.")
797
- progress_bar.progress((i + 1) / len(selected_keys))
798
- time.sleep(0.1) # Small delay for UI update
799
-
800
- progress_bar.empty()
801
- st.session_state.processing = False
802
-
803
- if generation_successful:
804
- st.success("βœ… Document generation complete!")
805
- else:
806
- st.warning("⚠️ Some documents could not be generated.")
807
-
808
- st.session_state.current_step = "display_docs"
809
- safe_rerun() # Rerun to display results
810
-
811
- if back_to_review_button:
812
- st.session_state.current_step = "review_idea"
813
- # Keep confirmed idea, but allow editing again
814
- st.session_state.refined_idea_content = st.session_state.confirmed_idea_content
815
- safe_rerun()
816
-
817
- else:
818
- st.error("Confirmed idea content is missing. Please restart the process from Step 1.")
819
- if st.button("⬅️ Restart Process"):
820
- # Reset key states
821
- st.session_state.current_step = "input_idea"
822
- st.session_state.initial_product_idea = ""
823
- st.session_state.tech_stack_hint = ""
824
- st.session_state.refined_idea_content = None
825
- st.session_state.confirmed_idea_content = None
826
- st.session_state.prd_content = None
827
- st.session_state.generated_docs = {}
828
- st.session_state.selected_docs_to_generate = {}
829
- safe_rerun()
830
-
831
-
832
- # ---------- Step 5: Display Generated Documents ----------
833
- if st.session_state.current_step == "display_docs":
834
- st.header("Step 5: Generated Documents")
835
-
836
- if not st.session_state.generated_docs:
837
- st.info("No documents were generated in the previous step.")
838
- else:
839
- st.markdown("**Review the generated documents below:**")
840
- # Sort keys based on the order in doc_options for consistent display
841
- display_order = [key for key in doc_options if key in st.session_state.generated_docs]
842
-
843
- for key in display_order:
844
- content = st.session_state.generated_docs.get(key)
845
- if content: # Should always be true if in display_order, but check anyway
846
- config = doc_options[key]
847
- st.subheader(f"πŸ“„ {config['label']}")
848
- is_error = isinstance(content, str) and content.startswith("Error:")
849
-
850
- if is_error:
851
- st.error(content)
852
- else:
853
- # Use specific render/display functions if available
854
- if config.get("render_func"):
855
- try:
856
- config["render_func"](content, key=f"render_{key}")
857
- except Exception as render_e:
858
- st.error(f"Error rendering {config['label']}: {render_e}")
859
- st.code(content, language=config.get("code_language", None)) # Fallback to code view
860
- elif config.get("display_func"):
861
- try:
862
- config["display_func"](content, key=f"display_{key}")
863
- except Exception as display_e:
864
- st.error(f"Error displaying {config['label']}: {display_e}")
865
- st.text(content) # Fallback to text view
866
- else: # Default to markdown
867
- st.markdown(content)
868
-
869
- # Add download button for non-error content
870
- try:
871
- download_data = content.encode('utf-8') if isinstance(content, str) else str(content).encode('utf-8')
872
- st.download_button(
873
- label=f"πŸ“₯ Download {config['label']}",
874
- data=download_data,
875
- file_name=config["download_filename"],
876
- mime=config.get("mime", "text/plain"),
877
- key=f"download_{key}"
878
- )
879
- except Exception as download_e:
880
- st.warning(f"Could not prepare download for {config['label']}: {download_e}")
881
-
882
- # Optional: Show raw code for rendered/code types
883
- if config.get("render_func") or config.get("code_language"):
884
- if st.checkbox(f"πŸ” Show raw content for {config['label']}", key=f"show_raw_{key}", value=False):
885
- st.code(content, language=config.get("code_language", None))
886
 
887
- st.markdown("---") # Separator between documents
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
888
 
889
- # Navigation buttons at the end
890
- button_col1, button_col2 = st.columns(2)
891
- with button_col1:
892
- generate_more_button = st.button(
893
- "πŸ”„ Generate Different Documents",
894
- key="generate_more_button",
895
- use_container_width=True
896
- )
897
- with button_col2:
898
- restart_all_button = st.button(
899
- "βͺ Start New Idea",
900
- key="restart_all_button",
901
- use_container_width=True
902
  )
903
 
904
- if generate_more_button:
905
- st.session_state.current_step = "generate_docs" # Go back to selection
906
- st.session_state.generated_docs = {} # Clear displayed docs
907
- safe_rerun()
908
-
909
- if restart_all_button:
910
- # Full reset
911
- st.session_state.current_step = "input_idea"
912
- st.session_state.initial_product_idea = ""
913
- st.session_state.tech_stack_hint = ""
914
- st.session_state.refined_idea_content = None
915
- st.session_state.confirmed_idea_content = None
916
- st.session_state.prd_content = None
917
- st.session_state.generated_docs = {}
918
- st.session_state.selected_docs_to_generate = {}
919
- safe_rerun()
920
-
921
 
922
- # ---------- Footer ----------
923
- st.markdown("---")
924
- footer_model_choice = st.session_state.get('model_choice', 'N/A')
925
- st.caption(f"Using model: **{footer_model_choice}**. | Workflow Step: **{st.session_state.get('current_step', 'N/A')}**")
 
1
+ # Provibe App (v1.1) - Streamlit Implementation
2
+
3
  import streamlit as st
 
4
  from openai import OpenAI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
+ # Initialize OpenAI client
7
+ openai_api_key = st.secrets.get("OPENAI_API_KEY")
8
+ openai_client = OpenAI(api_key=openai_api_key) if openai_api_key else None
9
+
10
+ # ---------- Helper Functions ----------
11
+ def generate_ai_response(prompt, max_tokens=2000):
12
+ if not openai_client:
13
+ st.error("OpenAI API key not configured.")
14
+ return None
15
+ response = openai_client.chat.completions.create(
16
+ model="gpt-4-turbo",
17
+ messages=[{"role": "user", "content": prompt}],
18
+ max_tokens=max_tokens
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  )
20
+ return response.choices[0].message.content
21
+
22
+ # ---------- Session State Initialization ----------
23
+ initial_state = {
24
+ 'step': 1,
25
+ 'idea': '',
26
+ 'refined_idea': '',
27
+ 'questions': [],
28
+ 'answers': {},
29
+ 'documents': [],
30
+ 'docs_ready': {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  }
32
 
33
+ for key, default in initial_state.items():
34
+ if key not in st.session_state:
35
+ st.session_state[key] = default
36
 
37
+ # ---------- UI ----------
38
+ st.title("πŸš€ Provibe - AI-Powered Product Documentation Generator")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
+ # Step-by-step Wizard
41
 
42
+ # Step 1: Idea Input
43
+ if st.session_state.step == 1:
44
+ st.header("Step 1: Enter Your Product Idea")
45
+ with st.form("idea_form"):
46
+ idea_input = st.text_area("Describe your initial product idea:", height=150)
47
+ submit_idea = st.form_submit_button("Refine Idea")
48
 
49
+ if submit_idea and idea_input.strip():
50
+ st.session_state.idea = idea_input
51
+ st.session_state.step = 2
52
+ st.rerun()
 
53
 
54
+ # Step 2: Idea Refinement
55
+ elif st.session_state.step == 2:
56
+ st.header("Step 2: AI-Refined Idea")
57
+ if not st.session_state.refined_idea:
58
+ with st.spinner("Refining your idea..."):
59
+ refined_prompt = f"Refine and clarify this product idea:\n\n{st.session_state.idea}"
60
+ st.session_state.refined_idea = generate_ai_response(refined_prompt, 1000)
 
61
 
62
+ with st.form("refine_form"):
63
+ st.session_state.refined_idea = st.text_area("Refined Idea:", st.session_state.refined_idea, height=200)
64
+ submit_refined = st.form_submit_button("Proceed to Detailed Questions")
65
 
66
+ if submit_refined and st.session_state.refined_idea.strip():
67
+ st.session_state.step = 3
68
+ st.rerun()
 
 
 
69
 
70
+ # Step 3: AI-Generated Questions
71
+ elif st.session_state.step == 3:
72
+ st.header("Step 3: Product Details")
73
+
74
+ if not st.session_state.questions:
75
+ with st.spinner("Generating critical questions..."):
76
+ questions_prompt = f"Generate 5 critical questions to clarify details for this product:\n\n{st.session_state.refined_idea}"
77
+ raw_questions = generate_ai_response(questions_prompt, 500)
78
+ st.session_state.questions = [q.strip("- ").strip() for q in raw_questions.strip().split("\n") if q.strip()]
79
+
80
+ with st.form("questions_form"):
81
+ all_answered = True
82
+ for idx, question in enumerate(st.session_state.questions):
83
+ answer = st.text_input(f"Q{idx+1}: {question}", key=f"question_{idx}")
84
+ st.session_state.answers[idx] = answer
85
+ if not answer.strip():
86
+ all_answered = False
87
+ submit_answers = st.form_submit_button("Generate Development Plan")
88
+
89
+ if submit_answers:
90
+ if all_answered:
91
+ st.session_state.step = 4
92
+ st.rerun()
93
  else:
94
+ st.warning("Please answer all questions before proceeding.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
95
 
96
+ # Step 4: Development Plan Generation
97
+ elif st.session_state.step == 4:
98
+ st.header("Step 4: AI-Generated Development Plan")
99
+ answers_formatted = "\n".join(f"{st.session_state.questions[i]}: {a}" for i, a in st.session_state.answers.items())
100
+ if 'development_plan' not in st.session_state:
101
+ with st.spinner("Creating your development plan..."):
102
+ detail_prompt = f"""
103
+ Create a concise, clear development plan for this product:
104
 
105
+ Refined Idea:
106
+ {st.session_state.refined_idea}
107
 
108
+ Detailed Answers:
109
+ {answers_formatted}
110
+ """
111
+ st.session_state.development_plan = generate_ai_response(detail_prompt, 1500)
 
 
 
112
 
113
+ st.markdown(st.session_state.development_plan)
 
 
 
 
114
 
115
+ if st.button("Proceed to Document Generation"):
116
+ st.session_state.step = 5
117
+ st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
+ # Step 5: Document Generation
120
+ elif st.session_state.step == 5:
121
+ st.header("Step 5: Generate Detailed Documentation")
122
+ doc_types = ["Product Requirements Document", "User Flow", "System Architecture", "Database Schema"]
123
+ selected_docs = st.multiselect("Select documents to generate:", doc_types, default=doc_types)
124
+
125
+ if st.button("Generate Documents"):
126
+ st.session_state.documents = selected_docs
127
+ for doc in selected_docs:
128
+ with st.spinner(f"Generating {doc}..."):
129
+ doc_prompt = f"""
130
+ Generate a detailed {doc.lower()} for this product:
131
+
132
+ Idea:
133
+ {st.session_state.refined_idea}
134
+
135
+ Details:
136
+ {"; ".join(st.session_state.answers.values())}
137
+ """
138
+ content = generate_ai_response(doc_prompt, 2000)
139
+ st.session_state.docs_ready[doc] = content
140
+ st.success("All selected documents generated!")
141
+ st.session_state.step = 6
142
+ st.rerun()
143
 
144
+ # Display generated documents
145
+ if st.session_state.step == 6 and st.session_state.docs_ready:
146
+ st.header("πŸ“„ Generated Documents")
147
+ for doc, content in st.session_state.docs_ready.items():
148
+ st.subheader(doc)
149
+ st.markdown(content)
150
+ st.download_button(
151
+ f"Download {doc}",
152
+ data=content,
153
+ file_name=f"{doc.replace(' ', '_').lower()}.md",
154
+ mime="text/markdown",
155
+ key=f"download_{doc}"
 
156
  )
157
 
158
+ if st.button("πŸ”„ Start New Idea"):
159
+ for key in initial_state:
160
+ st.session_state[key] = initial_state[key]
161
+ st.session_state.step = 1
162
+ st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
+ st.caption("Powered by Provibe AI πŸš€")