Aravind366 commited on
Commit
f57f681
Β·
verified Β·
1 Parent(s): 7d5594f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +997 -138
app.py CHANGED
@@ -1,164 +1,1023 @@
1
- # Provibe App (v1.1) - Streamlit Implementation
2
-
3
  import streamlit as st
 
4
  from openai import OpenAI
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- # Initialize OpenAI client
7
- openai_api_key = st.secrets.get("OPENAI_API_KEY")
8
- openai_client = OpenAI(api_key=openai_api_key) if openai_api_key else None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- # ---------- Helper Functions ----------
11
- def generate_ai_response(prompt, max_tokens=2000):
12
- if not openai_client:
13
- st.error("OpenAI API key not configured.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  return None
15
- response = openai_client.chat.completions.create(
16
- model="gpt-4-turbo",
17
- messages=[{"role": "user", "content": prompt}],
18
- max_tokens=max_tokens
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  )
20
- return response.choices[0].message.content
21
-
22
- # ---------- Session State Initialization ----------
23
- initial_state = {
24
- 'step': 1,
25
- 'idea': '',
26
- 'refined_idea': '',
27
- 'questions': [],
28
- 'answers': {},
29
- 'documents': [],
30
- 'docs_ready': {}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
31
  }
32
 
33
- for key, default in initial_state.items():
34
- if key not in st.session_state:
35
- st.session_state[key] = default
36
 
37
- # ---------- UI ----------
38
- st.title("πŸš€ Provibe - AI-Powered Product Documentation Generator")
 
 
39
 
40
- # Step-by-step Wizard
 
 
 
 
 
 
 
41
 
42
- # Step 1: Idea Input
43
- if st.session_state.step == 1:
44
- st.header("Step 1: Enter Your Product Idea")
45
- with st.form("idea_form"):
46
- idea_input = st.text_area("Describe your initial product idea:", height=150)
47
- submit_idea = st.form_submit_button("Refine Idea")
48
 
49
- if submit_idea and idea_input.strip():
50
- st.session_state.idea = idea_input
51
- st.session_state.step = 2
52
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
- # Step 2: Idea Refinement
55
- elif st.session_state.step == 2:
56
- st.header("Step 2: AI-Refined Idea")
57
- if not st.session_state.refined_idea:
58
- with st.spinner("Refining your idea..."):
59
- refined_prompt = f"Refine and clarify this product idea:\n\n{st.session_state.idea}"
60
- st.session_state.refined_idea = generate_ai_response(refined_prompt, 1000)
 
 
 
 
 
 
 
 
 
 
 
61
 
62
- with st.form("refine_form"):
63
- st.session_state.refined_idea = st.text_area("Refined Idea:", st.session_state.refined_idea, height=200)
64
- submit_refined = st.form_submit_button("Proceed to Detailed Questions")
65
 
66
- if submit_refined and st.session_state.refined_idea.strip():
67
- st.session_state.step = 3
68
- st.rerun()
69
 
70
- # Step 3: AI-Generated Questions
71
- elif st.session_state.step == 3:
72
- st.header("Step 3: Product Details")
73
-
74
- if not st.session_state.questions:
75
- with st.spinner("Generating critical questions..."):
76
- questions_prompt = f"Generate 5 critical questions to clarify details for this product:\n\n{st.session_state.refined_idea}"
77
- raw_questions = generate_ai_response(questions_prompt, 500)
78
- st.session_state.questions = [q.strip("- ").strip() for q in raw_questions.strip().split("\n") if q.strip()]
79
-
80
- with st.form("questions_form"):
81
- all_answered = True
82
- for idx, question in enumerate(st.session_state.questions):
83
- answer = st.text_input(f"Q{idx+1}: {question}", key=f"question_{idx}")
84
- st.session_state.answers[idx] = answer
85
- if not answer.strip():
86
- all_answered = False
87
- submit_answers = st.form_submit_button("Generate Development Plan")
88
-
89
- if submit_answers:
90
- if all_answered:
91
- st.session_state.step = 4
92
- st.rerun()
93
- else:
94
- st.warning("Please answer all questions before proceeding.")
95
 
96
- # Step 4: Development Plan Generation
97
- elif st.session_state.step == 4:
98
- st.header("Step 4: AI-Generated Development Plan")
99
- answers_formatted = "\n".join(f"{st.session_state.questions[i]}: {a}" for i, a in st.session_state.answers.items())
100
- if 'development_plan' not in st.session_state:
101
- with st.spinner("Creating your development plan..."):
102
- detail_prompt = f"""
103
- Create a concise, clear development plan for this product:
104
 
105
- Refined Idea:
106
- {st.session_state.refined_idea}
 
 
 
 
 
 
107
 
108
- Detailed Answers:
109
- {answers_formatted}
110
- """
111
- st.session_state.development_plan = generate_ai_response(detail_prompt, 1500)
112
 
113
- st.markdown(st.session_state.development_plan)
 
 
 
 
 
114
 
115
- if st.button("Proceed to Document Generation"):
116
- st.session_state.step = 5
117
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
- # Step 5: Document Generation
120
- elif st.session_state.step == 5:
121
- st.header("Step 5: Generate Detailed Documentation")
122
- doc_types = ["Product Requirements Document", "User Flow", "System Architecture", "Database Schema"]
123
- selected_docs = st.multiselect("Select documents to generate:", doc_types, default=doc_types)
124
-
125
- if st.button("Generate Documents"):
126
- st.session_state.documents = selected_docs
127
- for doc in selected_docs:
128
- with st.spinner(f"Generating {doc}..."):
129
- doc_prompt = f"""
130
- Generate a detailed {doc.lower()} for this product:
131
-
132
- Idea:
133
- {st.session_state.refined_idea}
134
-
135
- Details:
136
- {"; ".join(st.session_state.answers.values())}
137
- """
138
- content = generate_ai_response(doc_prompt, 2000)
139
- st.session_state.docs_ready[doc] = content
140
- st.success("All selected documents generated!")
141
- st.session_state.step = 6
142
- st.rerun()
143
 
144
- # Display generated documents
145
- if st.session_state.step == 6 and st.session_state.docs_ready:
146
- st.header("πŸ“„ Generated Documents")
147
- for doc, content in st.session_state.docs_ready.items():
148
- st.subheader(doc)
149
- st.markdown(content)
150
- st.download_button(
151
- f"Download {doc}",
152
- data=content,
153
- file_name=f"{doc.replace(' ', '_').lower()}.md",
154
- mime="text/markdown",
155
- key=f"download_{doc}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  )
157
 
158
- if st.button("πŸ”„ Start New Idea"):
159
- for key in initial_state:
160
- st.session_state[key] = initial_state[key]
161
- st.session_state.step = 1
162
- st.rerun()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
 
164
- st.caption("Powered by Provibe AI πŸš€")
 
1
+ # -*- coding: utf-8 -*-
 
2
  import streamlit as st
3
+ import requests
4
  from openai import OpenAI
5
+ import google.generativeai as genai
6
+ import anthropic
7
+ import streamlit.components.v1 as components
8
+ import re
9
+ # from supabase import create_client # Keep commented if not using history/auth
10
+ import base64
11
+ import time
12
+ import json # For potentially structured Q&A
13
+
14
+ # ---------- Helper: Safe Rerun ----------
15
+ def safe_rerun():
16
+ """Safely trigger a Streamlit rerun if the function exists."""
17
+ if hasattr(st, "experimental_rerun"):
18
+ st.experimental_rerun()
19
+ elif hasattr(st, "rerun"):
20
+ st.rerun()
21
+ else:
22
+ st.warning("Rerun function not available. Please update Streamlit.")
23
+
24
+ # ---------- Setup & API Client Initialization ----------
25
+ # (Keep your existing API key loading logic here - unchanged)
26
+ openai_client = None
27
+ genai_client = None
28
+ deepseek_api_key = None
29
+ claude_client = None
30
+ secrets_available = {"openai": False, "gemini": False, "deepseek": False, "claude": False}
31
+ secret_errors = []
32
+
33
+ # (Keep your existing API key loading and client initialization logic here)
34
+ # ... (Omitted for brevity, assume it's the same as before) ...
35
+ # OpenAI API Key
36
+ try:
37
+ openai_api_key = st.secrets.get("OPENAI_API_KEY")
38
+ if openai_api_key:
39
+ openai_client = OpenAI(api_key=openai_api_key)
40
+ secrets_available["openai"] = True
41
+ else:
42
+ secret_errors.append("Streamlit Secret `OPENAI_API_KEY` not found.")
43
+ except KeyError:
44
+ secret_errors.append("Streamlit Secret `OPENAI_API_KEY` not found.")
45
+ except Exception as e:
46
+ secret_errors.append(f"Error initializing OpenAI client: {e}")
47
+
48
+ # Gemini API Key (Google GenAI)
49
+ try:
50
+ gemini_api_key = st.secrets.get("GEMINI_API_KEY")
51
+ if gemini_api_key:
52
+ genai.configure(api_key=gemini_api_key)
53
+ genai_client = genai
54
+ secrets_available["gemini"] = True
55
+ else:
56
+ secret_errors.append("Streamlit Secret `GEMINI_API_KEY` not found.")
57
+ except KeyError:
58
+ secret_errors.append("Streamlit Secret `GEMINI_API_KEY` not found.")
59
+ except Exception as e:
60
+ secret_errors.append(f"Error initializing Google GenAI client: {e}")
61
+
62
+ # DeepSeek API Key
63
+ try:
64
+ deepseek_api_key = st.secrets.get("DEEPSEEK_API_KEY")
65
+ if deepseek_api_key:
66
+ secrets_available["deepseek"] = True
67
+ else:
68
+ secret_errors.append("Streamlit Secret `DEEPSEEK_API_KEY` not found.")
69
+ except KeyError:
70
+ secret_errors.append("Streamlit Secret `DEEPSEEK_API_KEY` not found.")
71
+ except Exception as e:
72
+ secret_errors.append(f"Error reading DeepSeek API key: {e}")
73
+
74
+ # CLAUDE API Key and Client Initialization
75
+ try:
76
+ claude_api_key = st.secrets.get("CLAUDE_API_KEY")
77
+ if claude_api_key:
78
+ claude_client = anthropic.Anthropic(api_key=claude_api_key)
79
+ secrets_available["claude"] = True
80
+ else:
81
+ secret_errors.append("Streamlit Secret `CLAUDE_API_KEY` not found.")
82
+ except KeyError:
83
+ secret_errors.append("Streamlit Secret `CLAUDE_API_KEY` not found.")
84
+ except Exception as e:
85
+ secret_errors.append(f"Error initializing Claude client: {e}")
86
+
87
+ any_secret_loaded = any(secrets_available.values())
88
+
89
+
90
+ # ---------- Model Configuration ----------
91
+ # (Keep your existing SUPPORTED_MODELS dictionary population logic here - unchanged)
92
+ # ... (Omitted for brevity, assume it's the same as before) ...
93
+ SUPPORTED_MODELS = {}
94
+
95
+ # OpenAI Models
96
+ if secrets_available["openai"] and openai_client:
97
+ SUPPORTED_MODELS.update({
98
+ "GPT-4o (OpenAI)": {"id": "gpt-4o", "provider": "openai", "client": openai_client},
99
+ "GPT-4o Mini (OpenAI)": {"id": "gpt-4o-mini", "provider": "openai", "client": openai_client},
100
+ "GPT-4 Turbo (OpenAI)": {"id": "gpt-4-turbo", "provider": "openai", "client": openai_client},
101
+ "GPT-4 (OpenAI)": {"id": "gpt-4", "provider": "openai", "client": openai_client},
102
+ "GPT-3.5 Turbo (OpenAI)": {"id": "gpt-3.5-turbo", "provider": "openai", "client": openai_client},
103
+ })
104
+
105
+ # Gemini Models
106
+ if secrets_available["gemini"] and genai_client:
107
+ SUPPORTED_MODELS.update({
108
+ "Gemini 1.5 Pro (Google)": {"id": "gemini-1.5-pro-latest", "provider": "gemini", "client": genai_client},
109
+ "Gemini 1.5 Flash (Google)": {"id": "gemini-1.5-flash-latest", "provider": "gemini", "client": genai_client},
110
+ "Gemini 1.0 Pro (Google)": {"id": "gemini-1.0-pro", "provider": "gemini", "client": genai_client},
111
+ })
112
+
113
+ # DeepSeek Models
114
+ if secrets_available["deepseek"] and deepseek_api_key:
115
+ SUPPORTED_MODELS.update({
116
+ "DeepSeek Chat": {"id": "deepseek-chat", "provider": "deepseek", "client": None},
117
+ "DeepSeek Coder": {"id": "deepseek-coder", "provider": "deepseek", "client": None},
118
+ })
119
+
120
+ # Claude Models
121
+ if secrets_available["claude"] and claude_client:
122
+ SUPPORTED_MODELS.update({
123
+ "Claude 3.5 Sonnet (Anthropic)": {"id": "claude-3-5-sonnet-20240620", "provider": "claude", "client": claude_client},
124
+ "Claude 3 Haiku (Anthropic)": {"id": "claude-3-haiku-20240307", "provider": "claude", "client": claude_client},
125
+ "Claude 3 Opus (Anthropic)": {"id": "claude-3-opus-20240229", "provider": "claude", "client": claude_client},
126
+ })
127
+
128
+ # Determine default model based on preference and availability
129
+ DEFAULT_MODEL_PREFERENCE = [
130
+ "GPT-4o Mini (OpenAI)",
131
+ "Gemini 1.5 Flash (Google)",
132
+ "Claude 3 Haiku (Anthropic)",
133
+ "DeepSeek Chat",
134
+ "GPT-3.5 Turbo (OpenAI)",
135
+ ]
136
+ DEFAULT_MODEL = next((m for m in DEFAULT_MODEL_PREFERENCE if m in SUPPORTED_MODELS), None)
137
+ if not DEFAULT_MODEL and SUPPORTED_MODELS:
138
+ DEFAULT_MODEL = next(iter(SUPPORTED_MODELS)) # Fallback to the first available
139
+
140
+
141
+ # ---------- Helper Functions for Generation ----------
142
+ # (Keep your existing _generate_with_... provider functions here - unchanged)
143
+ # ... (Omitted for brevity, assume they are the same as before) ...
144
+ def _generate_with_openai_provider(client, model_id, prompt, max_tokens, system_message=None):
145
+ messages = []
146
+ if system_message:
147
+ messages.append({"role": "system", "content": system_message})
148
+ messages.append({"role": "user", "content": prompt})
149
+ try:
150
+ response = client.chat.completions.create(
151
+ model=model_id,
152
+ messages=messages,
153
+ temperature=0.6,
154
+ max_tokens=max_tokens
155
+ )
156
+ return response.choices[0].message.content
157
+ except Exception as e:
158
+ st.error(f"❌ OpenAI API Error ({model_id}): {e}")
159
+ return f"Error: OpenAI API call failed for {model_id}. Details: {e}"
160
+
161
+ def _generate_with_gemini_provider(client, model_id, prompt, max_tokens, system_message=None):
162
+ full_prompt = f"{system_message}\n\n{prompt}" if system_message else prompt
163
+ try:
164
+ model = client.GenerativeModel(
165
+ model_id,
166
+ safety_settings={
167
+ 'HARM_CATEGORY_HARASSMENT': 'block_none',
168
+ 'HARM_CATEGORY_HATE_SPEECH': 'block_none',
169
+ 'HARM_CATEGORY_SEXUALLY_EXPLICIT': 'block_none',
170
+ 'HARM_CATEGORY_DANGEROUS_CONTENT': 'block_none',
171
+ },
172
+ generation_config=client.types.GenerationConfig(temperature=0.7)
173
+ )
174
+ response = model.generate_content(full_prompt)
175
+
176
+ if response.parts:
177
+ return "".join(part.text for part in response.parts if hasattr(part, 'text'))
178
+ elif hasattr(response, 'text') and response.text:
179
+ return response.text
180
+ elif response.prompt_feedback.block_reason:
181
+ reason = response.prompt_feedback.block_reason
182
+ st.warning(f"Gemini response blocked ({model_id}). Reason: {reason}")
183
+ return f"Error: Response blocked by API safety filters ({model_id}): {reason}"
184
+ else:
185
+ if response.candidates and response.candidates[0].finish_reason != "STOP":
186
+ st.warning(f"Gemini generation stopped unexpectedly ({model_id}). Reason: {response.candidates[0].finish_reason}")
187
+ return f"Error: Generation stopped unexpectedly ({model_id}). Reason: {response.candidates[0].finish_reason}"
188
+ else:
189
+ st.warning(f"Gemini returned an empty or unexpected response ({model_id}).")
190
+ return f"Error: Gemini returned an empty response for {model_id}."
191
+
192
+ except Exception as e:
193
+ st.error(f"❌ Gemini SDK error ({model_id}): {e}")
194
+ error_detail = getattr(e, 'message', str(e))
195
+ if "API key not valid" in error_detail:
196
+ return f"Error: Invalid Gemini API Key ({model_id}). Please check your Streamlit secrets."
197
+ return f"Error: Gemini SDK call failed for {model_id}. Details: {error_detail}"
198
+
199
+
200
+ def _generate_with_deepseek_provider(api_key, model_id, prompt, max_tokens, system_message=None):
201
+ headers = {"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}
202
+ messages = []
203
+ if system_message:
204
+ messages.append({"role": "system", "content": system_message})
205
+ messages.append({"role": "user", "content": prompt})
206
+ payload = {
207
+ "model": model_id,
208
+ "messages": messages,
209
+ "temperature": 0.6,
210
+ "max_tokens": max_tokens
211
+ }
212
+ try:
213
+ response = requests.post("https://api.deepseek.com/chat/completions", headers=headers, json=payload, timeout=90)
214
+ response.raise_for_status()
215
+ response_data = response.json()
216
+ if ("choices" in response_data and response_data["choices"] and
217
+ "message" in response_data["choices"][0] and
218
+ "content" in response_data["choices"][0]["message"]):
219
+ return response_data["choices"][0]["message"]["content"]
220
+ else:
221
+ st.warning(f"DeepSeek returned an unexpected response structure ({model_id}): {response_data}")
222
+ return f"Error: DeepSeek returned an unexpected structure for {model_id}."
223
+ except requests.exceptions.RequestException as e:
224
+ st.error(f"❌ DeepSeek API Request Error ({model_id}): {e}")
225
+ return f"Error: DeepSeek API request failed for {model_id}. Details: {e}"
226
+ except Exception as e:
227
+ st.error(f"❌ DeepSeek Error processing response ({model_id}): {e}")
228
+ return f"Error: DeepSeek processing failed for {model_id}. Details: {e}"
229
+
230
+ def _generate_with_claude_provider(client, model_id, prompt, max_tokens, system_message=None):
231
+ try:
232
+ message = client.messages.create(
233
+ model=model_id,
234
+ max_tokens=max_tokens,
235
+ system=system_message if system_message else None,
236
+ messages=[
237
+ {"role": "user", "content": prompt}
238
+ ]
239
+ )
240
+ content = ""
241
+ if message.content:
242
+ content = "\n".join([block.text for block in message.content if hasattr(block, "text")])
243
+ return content
244
+ except Exception as e:
245
+ st.error(f"❌ Claude API Error ({model_id}): {e}")
246
+ if isinstance(e, anthropic.AuthenticationError):
247
+ return f"Error: Claude authentication failed ({model_id}). Check your API key."
248
+ return f"Error: Claude API call failed for {model_id}. Details: {e}"
249
 
250
+ def generate_with_selected_model(selected_model_name, prompt, max_tokens=2000, system_message=None):
251
+ """Generates text using the chosen model, handling provider specifics."""
252
+ if not any_secret_loaded or not SUPPORTED_MODELS:
253
+ st.error("Error: No API keys loaded or models available. Configure secrets.")
254
+ return None
255
+
256
+ if selected_model_name not in SUPPORTED_MODELS:
257
+ st.error(f"Selected model '{selected_model_name}' is not configured or unavailable.")
258
+ original_choice = selected_model_name
259
+ selected_model_name = DEFAULT_MODEL
260
+ if not selected_model_name:
261
+ st.error("Fatal: Default model is also unavailable. Cannot proceed.")
262
+ return None
263
+ st.warning(f"Falling back from '{original_choice}' to default model: {DEFAULT_MODEL}")
264
+ st.session_state.model_choice = DEFAULT_MODEL # Update state on fallback
265
+
266
+ model_config = SUPPORTED_MODELS[selected_model_name]
267
+ provider = model_config["provider"]
268
+ model_id = model_config["id"]
269
+ client = model_config.get("client")
270
+
271
+ st.info(f"Generating with: **{selected_model_name}**")
272
+ start_time = time.time()
273
+ result = f"Error: Provider '{provider}' not implemented."
274
 
275
+ try:
276
+ if provider == "openai":
277
+ if not client: result = f"Error: OpenAI client not initialized for {selected_model_name}."
278
+ else: result = _generate_with_openai_provider(client, model_id, prompt, max_tokens, system_message)
279
+ elif provider == "gemini":
280
+ if not client: result = f"Error: Gemini client not initialized for {selected_model_name}."
281
+ else: result = _generate_with_gemini_provider(client, model_id, prompt, max_tokens, system_message)
282
+ elif provider == "deepseek":
283
+ if not deepseek_api_key: result = f"Error: DeepSeek API key not available for {selected_model_name}."
284
+ else: result = _generate_with_deepseek_provider(deepseek_api_key, model_id, prompt, max_tokens, system_message)
285
+ elif provider == "claude":
286
+ if not client: result = f"Error: Claude client not initialized for {selected_model_name}."
287
+ else: result = _generate_with_claude_provider(client, model_id, prompt, max_tokens, system_message)
288
+ except Exception as e:
289
+ st.error(f"❌ Unexpected error during generation with {selected_model_name}: {e}")
290
+ result = f"Error: Unexpected failure during {provider} generation. Details: {e}"
291
+
292
+ end_time = time.time()
293
+ duration = end_time - start_time
294
+ # st.caption(f"Generation took {duration:.2f} seconds.") # Less verbose
295
+
296
+ if isinstance(result, str) and result.startswith("Error:"):
297
+ # Error already logged by provider function
298
  return None
299
+ return result
300
+
301
+ # --- Mermaid Diagram Helper ---
302
+ # (Keep your existing Mermaid helper functions here - unchanged)
303
+ # ... (Omitted for brevity, assume they are the same as before) ...
304
+ def is_valid_mermaid(code):
305
+ if not isinstance(code, str): return False
306
+ code_lower = code.strip().lower()
307
+ return bool(re.search(r"^\s*(%%.*?\n)*\s*(graph|flowchart|sequenceDiagram|classDiagram|stateDiagram|erDiagram|gantt|pie|gitGraph)", code_lower, re.MULTILINE))
308
+
309
+ def render_mermaid_diagram(mermaid_code, key):
310
+ if not isinstance(mermaid_code, str) or not mermaid_code.strip():
311
+ st.warning(f"Mermaid code is empty or invalid (Key: {key}).")
312
+ return
313
+
314
+ cleaned_code = re.sub(r"^```mermaid\s*\n?", "", mermaid_code, flags=re.IGNORECASE | re.MULTILINE).strip()
315
+ cleaned_code = re.sub(r"\n?```\s*$", "", cleaned_code).strip()
316
+
317
+ if not is_valid_mermaid(cleaned_code):
318
+ st.warning(f"⚠️ Mermaid diagram might not render correctly (Key: {key}). Check syntax. Displaying raw code.")
319
+ st.code(cleaned_code, language="mermaid")
320
+ return
321
+
322
+ container_id = f"mermaid-container-{key}"
323
+ mermaid_id = f"mermaid-{key}"
324
+
325
+ components.html(
326
+ f"""
327
+ <div id="{container_id}" style="background-color: white; padding: 10px; border-radius: 5px; overflow: auto;">
328
+ <pre class="mermaid" id="{mermaid_id}">
329
+ {cleaned_code}
330
+ </pre>
331
+ </div>
332
+ <script type="module">
333
+ try {{
334
+ const mermaid = (await import('https://cdn.jsdelivr.net/npm/mermaid@10/dist/mermaid.esm.min.mjs')).default;
335
+ mermaid.initialize({{ startOnLoad: false, theme: 'default' }});
336
+ const checkElement = setInterval(() => {{
337
+ const el = document.getElementById('{mermaid_id}');
338
+ if (el) {{
339
+ clearInterval(checkElement);
340
+ mermaid.run({{ nodes: [el] }});
341
+ }}
342
+ }}, 100);
343
+ setTimeout(() => clearInterval(checkElement), 5000);
344
+ }} catch (e) {{
345
+ console.error("Mermaid rendering error (Key: {key}):", e);
346
+ const container = document.getElementById('{container_id}');
347
+ if(container) container.innerHTML = "<p style='color:red;'>Error rendering Mermaid diagram. Check browser console.</p>";
348
+ }}
349
+ </script>
350
+ """,
351
+ height=500, scrolling=True,
352
  )
353
+
354
+ # ---------- Initialize Session State (Provibe Workflow) ----------
355
+ # Core workflow steps
356
+ if 'current_step' not in st.session_state:
357
+ st.session_state.current_step = "input_idea" # input_idea -> refine_idea -> review_idea -> generate_docs -> display_docs
358
+ if 'processing' not in st.session_state: # General flag for disabling buttons
359
+ st.session_state.processing = False
360
+
361
+ # Input data
362
+ if 'initial_product_idea' not in st.session_state:
363
+ st.session_state.initial_product_idea = ""
364
+ if 'tech_stack_hint' not in st.session_state:
365
+ st.session_state.tech_stack_hint = ""
366
+ if 'model_choice' not in st.session_state:
367
+ st.session_state.model_choice = DEFAULT_MODEL
368
+
369
+ # Refinement stage data (NEW states added here)
370
+ if 'refinement_sub_step' not in st.session_state:
371
+ # Tracks progress within the 'refine_idea' step
372
+ # Possible values: 'generate_questions', 'await_answers', 'generate_final_refinement'
373
+ st.session_state.refinement_sub_step = 'generate_questions'
374
+ if 'clarifying_questions' not in st.session_state:
375
+ # Stores the list of questions generated by the AI
376
+ st.session_state.clarifying_questions = []
377
+ if 'user_answers' not in st.session_state:
378
+ # Stores user answers, dictionary mapping question index to answer string
379
+ st.session_state.user_answers = {}
380
+
381
+ # Output/Generated data
382
+ if 'refined_idea_content' not in st.session_state: # Stores the final AI-refined idea (after Q&A)
383
+ st.session_state.refined_idea_content = None
384
+ if 'confirmed_idea_content' not in st.session_state: # Stores the user-confirmed/edited idea
385
+ st.session_state.confirmed_idea_content = None
386
+ if 'selected_docs_to_generate' not in st.session_state: # Stores user selection for optional docs
387
+ st.session_state.selected_docs_to_generate = {}
388
+ if 'generated_docs' not in st.session_state: # Stores content of generated optional docs
389
+ st.session_state.generated_docs = {}
390
+
391
+
392
+ # ---------- Define Document Options (Align with Provibe Output) ----------
393
+ # (Keep your existing doc_options dictionary here - unchanged)
394
+ # ... (Omitted for brevity, assume it's the same as before, including PRD option) ...
395
+ doc_options = {
396
+ "prd": {
397
+ "label": "Product Requirements Document (PRD)",
398
+ "prompt_func": lambda idea, hint: f"""
399
+ # --- PROMPT: Insert your specific PRD generation prompt here ---
400
+ # Example: Write a comprehensive Product Requirements Document (PRD) based strictly on the following confirmed product description. Include sections like Introduction, Goals, Target Audience, Features (with details), User Stories, Design Considerations, Non-Functional Requirements, Open Issues, and Future Considerations. Ensure the PRD is detailed, clear, and actionable for a development team.
401
+ # --- End PRD Prompt ---
402
+
403
+ **Confirmed Product Description:**
404
+ ---
405
+ {idea}
406
+ ---
407
+ **Optional Preferences/Hints (Consider if relevant):**
408
+ {hint if hint else "None provided"}
409
+ """,
410
+ "system_message": "You are an expert Product Manager tasked with writing a detailed and professional PRD.",
411
+ "max_tokens": 3500, # Allow more tokens for PRD
412
+ "display_func": lambda content, key: st.markdown(content),
413
+ "download_filename": "prd.md",
414
+ "mime": "text/markdown",
415
+ },
416
+ "user_flow_text": {
417
+ "label": "User Flow (Text Description)",
418
+ "prompt_func": lambda idea, hint: f"""
419
+ # --- PROMPT: Insert your specific User Flow (Text) generation prompt here ---
420
+ # Example: Based on the product description below, outline the primary user flow step-by-step, from initial interaction to achieving the core goal. Describe each step clearly.
421
+ # --- End User Flow (Text) Prompt ---
422
+
423
+ **Product Description:**
424
+ ---
425
+ {idea}
426
+ ---
427
+ **Preferences/Hints:** {hint if hint else "None provided"}
428
+ """,
429
+ "system_message": "You are a UX designer describing a key user journey.",
430
+ "max_tokens": 1000,
431
+ "display_func": lambda content, key: st.markdown(content),
432
+ "download_filename": "user_flow.md",
433
+ "mime": "text/markdown",
434
+ },
435
+ "user_flow_mermaid": {
436
+ "label": "User Flow Diagram (Mermaid)",
437
+ "prompt_func": lambda idea, hint: f"""
438
+ # --- PROMPT: Insert your specific User Flow (Mermaid) generation prompt here ---
439
+ # Example: Generate a Mermaid flowchart diagram representing the primary user flow for the product described below. Use standard flowchart syntax (graph TD, nodes, arrows). Ensure the diagram is clear and accurately reflects the user journey. Start the code block with ```mermaid and end it with ```. Do not include any other text before or after the code block.
440
+ # --- End User Flow (Mermaid) Prompt ---
441
+
442
+ **Product Description:**
443
+ ---
444
+ {idea}
445
+ ---
446
+ **Preferences/Hints:** {hint if hint else "None provided"}
447
+ """,
448
+ "system_message": "You are an expert in creating Mermaid diagrams, specifically flowcharts for user journeys.",
449
+ "max_tokens": 1000,
450
+ "render_func": render_mermaid_diagram, # Special rendering
451
+ "code_language": "mermaid",
452
+ "download_filename": "user_flow_diagram.mmd",
453
+ "mime": "text/plain",
454
+ },
455
+ "frontend_arch": {
456
+ "label": "Frontend Architecture Notes",
457
+ "prompt_func": lambda idea, hint: f"""
458
+ # --- PROMPT: Insert your Frontend Architecture prompt here ---
459
+ # Example: Based on the product description and hints, suggest a suitable frontend architecture. Describe key components, recommended libraries/frameworks (consider hints like 'React Native'), state management approach, and potential component breakdown.
460
+ # --- End Frontend Architecture Prompt ---
461
+
462
+ **Product Description:**
463
+ ---
464
+ {idea}
465
+ ---
466
+ **Preferences/Hints:** {hint if hint else "None provided"}
467
+ """,
468
+ "system_message": "You are a frontend architect designing a web/mobile application.",
469
+ "max_tokens": 1500,
470
+ "display_func": lambda content, key: st.markdown(content),
471
+ "download_filename": "frontend_architecture.md",
472
+ "mime": "text/markdown",
473
+ },
474
+ "backend_arch": {
475
+ "label": "Backend Architecture Notes",
476
+ "prompt_func": lambda idea, hint: f"""
477
+ # --- PROMPT: Insert your Backend Architecture prompt here ---
478
+ # Example: Based on the product description and hints, propose a backend architecture. Discuss potential API design (e.g., RESTful), choice of language/framework, database considerations (type, scaling), authentication/authorization strategy, and key microservices or modules if applicable.
479
+ # --- End Backend Architecture Prompt ---
480
+
481
+ **Product Description:**
482
+ ---
483
+ {idea}
484
+ ---
485
+ **Preferences/Hints:** {hint if hint else "None provided"}
486
+ """,
487
+ "system_message": "You are a backend/systems architect designing the server-side logic and infrastructure.",
488
+ "max_tokens": 1500,
489
+ "display_func": lambda content, key: st.markdown(content),
490
+ "download_filename": "backend_architecture.md",
491
+ "mime": "text/markdown",
492
+ },
493
+ "system_arch_mermaid": {
494
+ "label": "System Architecture Diagram (Mermaid)",
495
+ "prompt_func": lambda idea, hint: f"""
496
+ # --- PROMPT: Insert your System Architecture (Mermaid) prompt here ---
497
+ # Example: Generate a Mermaid diagram illustrating the high-level system architecture for the product described below. Include key components like frontend client, backend API, database, authentication service, and any major third-party integrations mentioned or implied. Use appropriate Mermaid diagram syntax (e.g., graph TD or C4 model elements if suitable). Start the code block with ```mermaid and end it with ```. Do not include any other text before or after the code block.
498
+ # --- End System Architecture (Mermaid) Prompt ---
499
+
500
+ **Product Description:**
501
+ ---
502
+ {idea}
503
+ ---
504
+ **Preferences/Hints:** {hint if hint else "None provided"}
505
+ """,
506
+ "system_message": "You create system architecture diagrams using Mermaid syntax.",
507
+ "max_tokens": 1000,
508
+ "render_func": render_mermaid_diagram,
509
+ "code_language": "mermaid",
510
+ "download_filename": "system_architecture.mmd",
511
+ "mime": "text/plain",
512
+ },
513
+ "db_schema": {
514
+ "label": "Database Schema (SQL)",
515
+ "prompt_func": lambda idea, hint: f"""
516
+ # --- PROMPT: Insert your Database Schema (SQL) prompt here ---
517
+ # Example: Based on the product description, design a preliminary relational database schema. Provide SQL `CREATE TABLE` statements for the primary entities, including relevant columns, data types, primary keys, and foreign key relationships. Assume a PostgreSQL syntax unless hints suggest otherwise.
518
+ # --- End Database Schema (SQL) Prompt ---
519
+
520
+ **Product Description:**
521
+ ---
522
+ {idea}
523
+ ---
524
+ **Preferences/Hints:** {hint if hint else "None provided"}
525
+ """,
526
+ "system_message": "You are a database administrator designing a schema.",
527
+ "max_tokens": 1500,
528
+ "display_func": lambda content, key: st.code(content, language='sql'), # Use code block for SQL
529
+ "code_language": "sql",
530
+ "download_filename": "database_schema.sql",
531
+ "mime": "text/x-sql",
532
+ },
533
+ "project_structure": {
534
+ "label": "Project Folder Structure",
535
+ "prompt_func": lambda idea, hint: f"""
536
+ # --- PROMPT: Insert your Project Structure prompt here ---
537
+ # Example: Suggest a logical file and folder structure for a project implementing the described product. Consider frontend, backend, shared components, tests, etc., based on the description and any tech stack hints. Present it as a simple tree structure.
538
+ # --- End Project Structure Prompt ---
539
+
540
+ **Product Description:**
541
+ ---
542
+ {idea}
543
+ ---
544
+ **Preferences/Hints:** {hint if hint else "None provided"}
545
+ """,
546
+ "system_message": "You are suggesting a clean project layout for a software development team.",
547
+ "max_tokens": 800,
548
+ "display_func": lambda content, key: st.code(content, language='bash'), # Use code block for structure
549
+ "code_language": "bash",
550
+ "download_filename": "project_structure.txt",
551
+ "mime": "text/plain",
552
+ },
553
  }
554
 
 
 
 
555
 
556
+ # ---------- UI Layout (Provibe Workflow) ----------
557
+ st.set_page_config(layout="wide", page_title="Provibe Prompt Tester")
558
+ st.title("πŸ§ͺ Provibe Prompt Tester (with Q&A Refinement)")
559
+ st.caption("Test and refine prompts for the Provibe document generation workflow, including interactive Q&A.")
560
 
561
+ # Display API Key Errors
562
+ if secret_errors:
563
+ st.error("API Key Configuration Issues:")
564
+ for error in secret_errors:
565
+ st.error(f"- {error}")
566
+ if not any_secret_loaded or not SUPPORTED_MODELS:
567
+ st.error("No API keys loaded or LLM models available. Configure secrets.")
568
+ st.stop()
569
 
570
+ # --- Workflow Steps ---
 
 
 
 
 
571
 
572
+ # ---------- Step 1: Input Initial Idea ----------
573
+ if st.session_state.current_step == "input_idea":
574
+ st.header("Step 1: Input Product Idea")
575
+ with st.form(key="idea_form"):
576
+ initial_idea_input = st.text_area(
577
+ "πŸ’‘ Enter the initial product idea:", height=150,
578
+ value=st.session_state.initial_product_idea,
579
+ help="The raw concept or description."
580
+ )
581
+ tech_hint_input = st.text_input(
582
+ "βš™οΈ Optional: Tech Stack Hints or Constraints",
583
+ placeholder="e.g., Use React, target mobile, needs offline support",
584
+ value=st.session_state.tech_stack_hint,
585
+ help="Any preferences to guide AI generation."
586
+ )
587
+ available_model_names = list(SUPPORTED_MODELS.keys())
588
+ default_model_key = st.session_state.get('model_choice', DEFAULT_MODEL)
589
+ default_index = available_model_names.index(default_model_key) if default_model_key in available_model_names else 0
590
+ model_choice_input = st.selectbox(
591
+ "🧠 Choose AI model for all steps:",
592
+ options=available_model_names,
593
+ index=default_index,
594
+ key="model_select",
595
+ help="This model will be used for refinement and document generation."
596
+ )
597
+ submit_idea_button = st.form_submit_button(
598
+ label="➑️ Start Interactive Refinement",
599
+ use_container_width=True,
600
+ disabled=st.session_state.processing
601
+ )
602
 
603
+ if submit_idea_button and initial_idea_input:
604
+ st.session_state.initial_product_idea = initial_idea_input
605
+ st.session_state.tech_stack_hint = tech_hint_input
606
+ st.session_state.model_choice = model_choice_input
607
+ # Reset states for the refinement process
608
+ st.session_state.clarifying_questions = []
609
+ st.session_state.user_answers = {}
610
+ st.session_state.refined_idea_content = None
611
+ st.session_state.confirmed_idea_content = None
612
+ st.session_state.generated_docs = {}
613
+ st.session_state.selected_docs_to_generate = {}
614
+ # Set the next step and the initial sub-step for refinement
615
+ st.session_state.current_step = "refine_idea"
616
+ st.session_state.refinement_sub_step = "generate_questions"
617
+ st.session_state.processing = True # Start processing
618
+ safe_rerun()
619
+ elif submit_idea_button:
620
+ st.warning("Please enter a product idea.")
621
 
 
 
 
622
 
623
+ # ---------- Step 2: Interactive Refinement (Q&A) ----------
624
+ if st.session_state.current_step == "refine_idea":
625
+ st.header("Step 2: Interactive Idea Refinement")
626
 
627
+ # --- Sub-Step 2a: Generate Clarifying Questions ---
628
+ if st.session_state.refinement_sub_step == "generate_questions":
629
+ st.info(f"Using **{st.session_state.model_choice}** to generate clarifying questions. Please wait.")
630
+ with st.spinner("AI is preparing questions..."):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
631
 
632
+ # --- PROMPT: Define the Question Generation Prompt ---
633
+ question_gen_prompt = f"""
634
+ # --- PROMPT: Insert your Question Generation prompt here ---
635
+ # Example: Based on the initial product idea and hints below, generate 3-5 specific clarifying questions for the user. These questions should help elicit more detail about key features, target audience, technical constraints, or core functionality needed to write a better product specification. Output *only* the questions, each on a new line, starting with '- '. Do not include numbering or any other text.
636
+ # --- End Question Generation Prompt ---
 
 
 
637
 
638
+ **Initial Product Idea:**
639
+ ---
640
+ {st.session_state.initial_product_idea}
641
+ ---
642
+ **Optional Preferences/Hints Provided:**
643
+ {st.session_state.tech_stack_hint if st.session_state.tech_stack_hint else "None provided"}
644
+ """
645
+ # --- End Question Generation Prompt ---
646
 
647
+ system_message_qa = "You are an AI assistant helping to clarify a product idea by asking relevant questions."
648
+ max_tokens_qa = 300
 
 
649
 
650
+ questions_raw = generate_with_selected_model(
651
+ st.session_state.model_choice,
652
+ question_gen_prompt,
653
+ max_tokens=max_tokens_qa,
654
+ system_message=system_message_qa
655
+ )
656
 
657
+ if questions_raw and not questions_raw.startswith("Error:"):
658
+ # Parse the questions (assuming one question per line, maybe starting with '- ')
659
+ st.session_state.clarifying_questions = [
660
+ q.strip('- ') for q in questions_raw.strip().split('\n') if q.strip() and q.strip() != '-'
661
+ ]
662
+ if st.session_state.clarifying_questions:
663
+ st.session_state.user_answers = {i: "" for i in range(len(st.session_state.clarifying_questions))} # Initialize empty answers
664
+ st.session_state.refinement_sub_step = "await_answers"
665
+ else:
666
+ st.warning("AI generated questions but they seem empty or incorrectly formatted. Proceeding without Q&A.")
667
+ # Fallback: Skip Q&A and go directly to final refinement based only on initial idea
668
+ st.session_state.refinement_sub_step = "generate_final_refinement"
669
+ st.session_state.clarifying_questions = [] # Ensure it's empty
670
+ st.session_state.user_answers = {}
671
+
672
+ else:
673
+ st.error("Failed to generate clarifying questions. Check API errors or model selection.")
674
+ st.warning("Proceeding to refine based only on the initial idea (skipping Q&A).")
675
+ st.session_state.refinement_sub_step = "generate_final_refinement" # Skip Q&A on failure
676
+ st.session_state.clarifying_questions = [] # Ensure it's empty
677
+ st.session_state.user_answers = {}
678
+
679
+ st.session_state.processing = False # Done generating questions (or failed)
680
+ safe_rerun()
681
+
682
+ # --- Sub-Step 2b: Display Questions and Collect Answers ---
683
+ elif st.session_state.refinement_sub_step == "await_answers":
684
+ st.info("Please answer the following questions to help refine the product idea:")
685
+ with st.form("answers_form"):
686
+ # Display generated questions and input fields for answers
687
+ for i, question in enumerate(st.session_state.clarifying_questions):
688
+ st.session_state.user_answers[i] = st.text_area(
689
+ f"❓ {question}",
690
+ key=f"answer_{i}",
691
+ value=st.session_state.user_answers.get(i, ""), # Preserve answers on rerun
692
+ height=100
693
+ )
694
+
695
+ submit_answers_button = st.form_submit_button(
696
+ "➑️ Submit Answers & Generate Refined Description",
697
+ use_container_width=True,
698
+ disabled=st.session_state.processing
699
+ )
700
+
701
+ if submit_answers_button:
702
+ # Basic check if answers are provided (optional)
703
+ # if not all(st.session_state.user_answers.values()):
704
+ # st.warning("Please try to answer all questions for the best result.")
705
+ # else:
706
+ st.session_state.refinement_sub_step = "generate_final_refinement"
707
+ st.session_state.processing = True # Start final refinement generation
708
+ safe_rerun()
709
+
710
+ # Option to go back
711
+ if st.button("⬅️ Back to Idea Input (Discard Q&A)", disabled=st.session_state.processing):
712
+ st.session_state.current_step = "input_idea"
713
+ # Clear Q&A state
714
+ st.session_state.clarifying_questions = []
715
+ st.session_state.user_answers = {}
716
+ safe_rerun()
717
+
718
+
719
+ # --- Sub-Step 2c: Generate Final Refined Description (using Q&A) ---
720
+ elif st.session_state.refinement_sub_step == "generate_final_refinement":
721
+ st.info(f"Using **{st.session_state.model_choice}** to generate the final refined description based on the idea and your answers. Please wait.")
722
+ with st.spinner("AI is synthesizing the refined description..."):
723
+
724
+ # Prepare Q&A string for the prompt
725
+ qa_summary = "\n".join([
726
+ f"Q: {st.session_state.clarifying_questions[i]}\nA: {answer}"
727
+ for i, answer in st.session_state.user_answers.items() if answer # Include only answered questions
728
+ ]) if st.session_state.user_answers else "No questions were answered."
729
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
730
 
731
+ # --- PROMPT: Define the Final Refinement Prompt (using Q&A) ---
732
+ final_refinement_prompt = f"""
733
+ # --- PROMPT: Insert your Final Refinement prompt (using Q&A) here ---
734
+ # Example: Based on the initial product idea, user preferences, and the following question-answer pairs, generate a concise yet comprehensive 'Refined Product Description'. Synthesize all the information into a well-structured description covering the core value proposition, key features, target audience, and any clarified technical aspects. This description will be the basis for generating all subsequent documents.
735
+ # --- End Final Refinement Prompt ---
736
+
737
+ **Initial Product Idea:**
738
+ ---
739
+ {st.session_state.initial_product_idea}
740
+ ---
741
+ **Optional Preferences/Hints Provided:**
742
+ {st.session_state.tech_stack_hint if st.session_state.tech_stack_hint else "None provided"}
743
+ ---
744
+ **Clarifying Questions & User Answers:**
745
+ ---
746
+ {qa_summary}
747
+ ---
748
+ """
749
+ # --- End Final Refinement Prompt ---
750
+
751
+ system_message_final_refine = "You are an AI assistant synthesizing information into a final product specification."
752
+ max_tokens_final_refine = 1500 # Allow slightly more tokens for synthesis
753
+
754
+ final_refined_content = generate_with_selected_model(
755
+ st.session_state.model_choice,
756
+ final_refinement_prompt,
757
+ max_tokens=max_tokens_final_refine,
758
+ system_message=system_message_final_refine
759
+ )
760
+
761
+ if final_refined_content and not final_refined_content.startswith("Error:"):
762
+ st.session_state.refined_idea_content = final_refined_content
763
+ st.session_state.current_step = "review_idea" # Move to the next main step
764
+ else:
765
+ st.error("Failed to generate the final refined description.")
766
+ # Option to retry or go back might be added here
767
+ st.session_state.current_step = "input_idea" # Go back if failed
768
+
769
+ st.session_state.processing = False # End processing
770
+ safe_rerun()
771
+
772
+
773
+ # ---------- Step 3: Review and Confirm Final Idea ----------
774
+ if st.session_state.current_step == "review_idea":
775
+ st.header("Step 3: Review and Confirm Final Refined Idea")
776
+ if st.session_state.refined_idea_content:
777
+ st.info("Review the AI's final refined description below (generated using your answers). Edit it as needed. This **final text** will be used to generate all documents.")
778
+
779
+ # Display Q&A for context if available
780
+ if st.session_state.clarifying_questions and st.session_state.user_answers:
781
+ with st.expander("View Q&A used for this refinement"):
782
+ for i, q in enumerate(st.session_state.clarifying_questions):
783
+ st.markdown(f"**Q:** {q}")
784
+ st.markdown(f"**A:** {st.session_state.user_answers.get(i, '_No answer_')}")
785
+ st.markdown("---")
786
+
787
+
788
+ edited_idea = st.text_area(
789
+ "✏️ **Edit Final Refined Description:**",
790
+ value=st.session_state.refined_idea_content,
791
+ height=350,
792
+ key="final_refined_idea_edit_area",
793
+ help="Make any necessary corrections or additions."
794
  )
795
 
796
+ button_col1, button_col2 = st.columns(2)
797
+ with button_col1:
798
+ confirm_button = st.button(
799
+ "βœ… Confirm & Proceed to Generate Docs",
800
+ key="confirm_final_idea_button",
801
+ use_container_width=True,
802
+ disabled=st.session_state.processing
803
+ )
804
+ with button_col2:
805
+ back_button = st.button(
806
+ "⬅️ Back to Idea Input (Start Over)",
807
+ key="back_to_input_final_button",
808
+ use_container_width=True,
809
+ disabled=st.session_state.processing
810
+ )
811
+
812
+ if confirm_button:
813
+ if not edited_idea.strip():
814
+ st.warning("The refined description cannot be empty.")
815
+ else:
816
+ st.session_state.confirmed_idea_content = edited_idea
817
+ # Reset generation states
818
+ st.session_state.generated_docs = {}
819
+ st.session_state.selected_docs_to_generate = {k: False for k in doc_options} # Reset selections
820
+ st.session_state.current_step = "generate_docs"
821
+ safe_rerun()
822
+ if back_button:
823
+ st.session_state.current_step = "input_idea"
824
+ # Clear refinement & Q&A state
825
+ st.session_state.refined_idea_content = None
826
+ st.session_state.clarifying_questions = []
827
+ st.session_state.user_answers = {}
828
+ safe_rerun()
829
+
830
+ else:
831
+ st.error("No refined idea content found. Please go back to Step 1.")
832
+ if st.button("⬅️ Back to Idea Input"):
833
+ st.session_state.current_step = "input_idea"
834
+ safe_rerun()
835
+
836
+
837
+ # ---------- Step 4: Select and Generate Documents ----------
838
+ if st.session_state.current_step == "generate_docs":
839
+ st.header("Step 4: Generate Product Documents")
840
+ if st.session_state.confirmed_idea_content:
841
+ st.markdown("**Based on this confirmed final description:**")
842
+ with st.expander("View Confirmed Description", expanded=False):
843
+ st.markdown(f"> {st.session_state.confirmed_idea_content}")
844
+
845
+ st.subheader("Select Documents to Generate:")
846
+ num_doc_options = len(doc_options)
847
+ cols = st.columns(min(num_doc_options, 3))
848
+ doc_keys = list(doc_options.keys())
849
+
850
+ for i, key in enumerate(doc_keys):
851
+ config = doc_options[key]
852
+ with cols[i % 3]:
853
+ if key not in st.session_state.selected_docs_to_generate:
854
+ st.session_state.selected_docs_to_generate[key] = False
855
+ st.session_state.selected_docs_to_generate[key] = st.checkbox(
856
+ config["label"],
857
+ value=st.session_state.selected_docs_to_generate.get(key, False),
858
+ key=f"checkbox_{key}",
859
+ disabled=st.session_state.processing
860
+ )
861
+
862
+ generate_button = st.button(
863
+ "πŸš€ Generate Selected Documents",
864
+ key="generate_docs_button",
865
+ use_container_width=True,
866
+ disabled=st.session_state.processing
867
+ )
868
+ back_to_review_button = st.button(
869
+ "⬅️ Back to Review Final Idea",
870
+ key="back_to_review_final_button",
871
+ use_container_width=True,
872
+ disabled=st.session_state.processing
873
+ )
874
+
875
+
876
+ if generate_button:
877
+ selected_keys = [k for k, v in st.session_state.selected_docs_to_generate.items() if v]
878
+ if not selected_keys:
879
+ st.warning("Please select at least one document type to generate.")
880
+ else:
881
+ st.session_state.processing = True
882
+ st.session_state.generated_docs = {}
883
+ st.info(f"⏳ Generating {len(selected_keys)} selected document(s) using {st.session_state.model_choice}...")
884
+ progress_bar = st.progress(0)
885
+ generation_successful = True
886
+
887
+ for i, key in enumerate(selected_keys):
888
+ config = doc_options[key]
889
+ st.write(f" - Generating {config['label']}...")
890
+ with st.spinner(f"AI processing {config['label']}..."):
891
+ prompt = config["prompt_func"](
892
+ st.session_state.confirmed_idea_content,
893
+ st.session_state.tech_stack_hint
894
+ )
895
+ system_msg = config.get("system_message")
896
+ max_tok = config.get("max_tokens", 2000)
897
+
898
+ content = generate_with_selected_model(
899
+ st.session_state.model_choice,
900
+ prompt,
901
+ max_tokens=max_tok,
902
+ system_message=system_msg
903
+ )
904
+
905
+ if content and not content.startswith("Error:"):
906
+ st.session_state.generated_docs[key] = content
907
+ else:
908
+ st.session_state.generated_docs[key] = f"Error: Failed to generate {config['label']}."
909
+ generation_successful = False
910
+ st.error(f" - Failed to generate {config['label']}. See logs above.")
911
+ progress_bar.progress((i + 1) / len(selected_keys))
912
+ time.sleep(0.1)
913
+
914
+ progress_bar.empty()
915
+ st.session_state.processing = False
916
+
917
+ if generation_successful:
918
+ st.success("βœ… Document generation complete!")
919
+ else:
920
+ st.warning("⚠️ Some documents could not be generated.")
921
+
922
+ st.session_state.current_step = "display_docs"
923
+ safe_rerun()
924
+
925
+ if back_to_review_button:
926
+ st.session_state.current_step = "review_idea"
927
+ # Keep confirmed idea, but allow editing again
928
+ # The refined_idea_content should still hold the content before editing
929
+ safe_rerun()
930
+
931
+ else:
932
+ st.error("Confirmed idea content is missing. Please restart the process from Step 1.")
933
+ if st.button("⬅️ Restart Process"):
934
+ # Reset key states
935
+ st.session_state.current_step = "input_idea"
936
+ st.session_state.initial_product_idea = ""
937
+ st.session_state.tech_stack_hint = ""
938
+ st.session_state.refined_idea_content = None
939
+ st.session_state.confirmed_idea_content = None
940
+ st.session_state.clarifying_questions = []
941
+ st.session_state.user_answers = {}
942
+ st.session_state.generated_docs = {}
943
+ st.session_state.selected_docs_to_generate = {}
944
+ safe_rerun()
945
+
946
+
947
+ # ---------- Step 5: Display Generated Documents ----------
948
+ if st.session_state.current_step == "display_docs":
949
+ st.header("Step 5: Generated Documents")
950
+
951
+ if not st.session_state.generated_docs:
952
+ st.info("No documents were generated in the previous step.")
953
+ else:
954
+ st.markdown("**Review the generated documents below:**")
955
+ display_order = [key for key in doc_options if key in st.session_state.generated_docs]
956
+
957
+ for key in display_order:
958
+ content = st.session_state.generated_docs.get(key)
959
+ if content:
960
+ config = doc_options[key]
961
+ st.subheader(f"πŸ“„ {config['label']}")
962
+ is_error = isinstance(content, str) and content.startswith("Error:")
963
+
964
+ if is_error:
965
+ st.error(content)
966
+ else:
967
+ # Display/Render content
968
+ if config.get("render_func"):
969
+ try: config["render_func"](content, key=f"render_{key}")
970
+ except Exception as e: st.error(f"Render Error: {e}"); st.code(content)
971
+ elif config.get("display_func"):
972
+ try: config["display_func"](content, key=f"display_{key}")
973
+ except Exception as e: st.error(f"Display Error: {e}"); st.text(content)
974
+ else: st.markdown(content)
975
+
976
+ # Download button
977
+ try:
978
+ download_data = content.encode('utf-8') if isinstance(content, str) else str(content).encode('utf-8')
979
+ st.download_button(
980
+ label=f"πŸ“₯ Download {config['label']}", data=download_data,
981
+ file_name=config["download_filename"], mime=config.get("mime", "text/plain"),
982
+ key=f"download_{key}"
983
+ )
984
+ except Exception as e: st.warning(f"Download Error: {e}")
985
+
986
+ # Show raw content option
987
+ if config.get("render_func") or config.get("code_language"):
988
+ if st.checkbox(f"πŸ” Show raw content for {config['label']}", key=f"show_raw_{key}", value=False):
989
+ st.code(content, language=config.get("code_language", None))
990
+
991
+ st.markdown("---")
992
+
993
+ # Navigation buttons
994
+ button_col1, button_col2 = st.columns(2)
995
+ with button_col1:
996
+ generate_more_button = st.button("πŸ”„ Generate Different Documents", key="generate_more_button", use_container_width=True)
997
+ with button_col2:
998
+ restart_all_button = st.button("βͺ Start New Idea", key="restart_all_button", use_container_width=True)
999
+
1000
+ if generate_more_button:
1001
+ st.session_state.current_step = "generate_docs"
1002
+ st.session_state.generated_docs = {}
1003
+ safe_rerun()
1004
+
1005
+ if restart_all_button:
1006
+ st.session_state.current_step = "input_idea"
1007
+ st.session_state.initial_product_idea = ""
1008
+ st.session_state.tech_stack_hint = ""
1009
+ st.session_state.refined_idea_content = None
1010
+ st.session_state.confirmed_idea_content = None
1011
+ st.session_state.clarifying_questions = []
1012
+ st.session_state.user_answers = {}
1013
+ st.session_state.generated_docs = {}
1014
+ st.session_state.selected_docs_to_generate = {}
1015
+ safe_rerun()
1016
+
1017
+
1018
+ # ---------- Footer ----------
1019
+ st.markdown("---")
1020
+ footer_model_choice = st.session_state.get('model_choice', 'N/A')
1021
+ st.caption(f"Using model: **{footer_model_choice}** | Workflow Step: **{st.session_state.get('current_step', 'N/A')}**"
1022
+ f"{' (Sub-step: ' + st.session_state.get('refinement_sub_step', 'N/A') + ')' if st.session_state.get('current_step') == 'refine_idea' else ''}")
1023