Ashhar
commited on
Commit
·
c1f566c
1
Parent(s):
7c9530c
story db + prompt changes
Browse files- .streamlit/config.toml +2 -0
- app.py +60 -302
- constants.py +157 -0
- data/storiesDb.py +4 -1
- pages/popular-stories.py +61 -0
- utils.py +62 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
[client]
|
2 |
+
showSidebarNavigation = false
|
app.py
CHANGED
@@ -1,14 +1,13 @@
|
|
1 |
import streamlit as st
|
2 |
import os
|
3 |
-
import datetime as DT
|
4 |
-
import pytz
|
5 |
import time
|
6 |
import json
|
7 |
import re
|
8 |
from typing import List, Literal, TypedDict, Tuple
|
9 |
from transformers import AutoTokenizer
|
10 |
from gradio_client import Client
|
11 |
-
|
|
|
12 |
|
13 |
from openai import OpenAI
|
14 |
import anthropic
|
@@ -25,151 +24,7 @@ ModelConfig = TypedDict("ModelConfig", {
|
|
25 |
"tokenizer": AutoTokenizer
|
26 |
})
|
27 |
|
28 |
-
modelType: ModelType = os.environ.get("MODEL_TYPE") or "
|
29 |
-
modelType: ModelType = "CLAUDE"
|
30 |
-
|
31 |
-
JSON_SEPARATOR = ">>>>"
|
32 |
-
EXCEPTION_KEYWORD = "<<EXCEPTION>>"
|
33 |
-
|
34 |
-
SYSTEM_MSG = f"""
|
35 |
-
=> Context:
|
36 |
-
You're an storytelling assistant who guides users through four phases of narrative development, helping them craft compelling personal or professional stories.
|
37 |
-
The story created should be in simple language, yet evoke great emotions.
|
38 |
-
|
39 |
-
-----
|
40 |
-
=> Key Points:
|
41 |
-
Ask one question at a time, give the options in a numbered and well formatted manner in different lines.
|
42 |
-
Summarise options chosen so far in each step.
|
43 |
-
Every response should have a question unless it's the end of flow.
|
44 |
-
|
45 |
-
-----
|
46 |
-
=> Format & Syntax:
|
47 |
-
Whenever any of the below rules are satisfied, then append your FINAL response with this exact keyword "{JSON_SEPARATOR}", and only AFTER this, append with a JSON described in the matching rule below.
|
48 |
-
Apply at most one rule at a time, the most relevant one.
|
49 |
-
Do not write anything after the JSON
|
50 |
-
|
51 |
-
- Rule 1: If your response has multiple numbered options to choose from, append JSON in this format (alway check for this rule):
|
52 |
-
```
|
53 |
-
{{
|
54 |
-
"options": [{{ "id": "1", "label": "Option 1"}}, {{ "id": "2", "label": "Option 2"}}]
|
55 |
-
}}
|
56 |
-
```
|
57 |
-
Do not write "Choose one of the options below:"
|
58 |
-
Keep options to less than 9.
|
59 |
-
|
60 |
-
- Rule 2: If the USER has chosen to adapt a well known story, append this JSON:
|
61 |
-
```
|
62 |
-
{{
|
63 |
-
"action": "SHOW_STORY_DATABASE"
|
64 |
-
}}
|
65 |
-
```
|
66 |
-
------
|
67 |
-
=> Task Definition:
|
68 |
-
You take the user through a flow of questions as defined below. You'll navigate the user through three tiers, moving closer to the final story.
|
69 |
-
Before giving any response, make sure to evaluate the "Format" rules described above.
|
70 |
-
|
71 |
-
## Tier 1: Story Creation
|
72 |
-
You initiate the storytelling process through a series of engaging prompts:
|
73 |
-
|
74 |
-
#### Story Origin:
|
75 |
-
- Asks users to choose between personal anecdotes or adapting a well-known real story
|
76 |
-
- If they choose to choose to adapt from a well known story, show them a database of the stories to choose from
|
77 |
-
|
78 |
-
#### Story Use Case:
|
79 |
-
Asks users to define the purpose of building a story. It can be one of the following (provide very short description for each):
|
80 |
-
- Personal Branding: To create a narrative that highlights an individual's unique experiences, skills, and values for use in professional networking, job applications, or personal websites.
|
81 |
-
- Company Origin: To craft a compelling narrative about how a company or organization was founded, its mission, and key milestones for use in marketing materials or investor presentations.
|
82 |
-
- Product Launch: To develop an engaging narrative around a new product or service, focusing on the problem it solves and its unique value proposition for use in marketing campaigns or sales pitches.
|
83 |
-
- Customer Success / Testimonials: To showcase how a product or service has positively impacted a customer's life or business, creating a relatable narrative for potential customers.
|
84 |
-
- Team Building: To create a shared narrative that reinforces company values, promotes team cohesion, or introduces new team members, for use in internal communications or team-building exercises.
|
85 |
-
|
86 |
-
#### Story Time Frame:
|
87 |
-
Allows story selection from various life stages (childhood, mid-career, recent experiences).
|
88 |
-
Or Age-wise (below 8, 8-13, 13-15 and so on).
|
89 |
-
|
90 |
-
#### Story Focus:
|
91 |
-
Prompts users to select behaviours or leadership qualities to highlight in the story. Allow users to choose upto 3-5 qualities.
|
92 |
-
- Resourcefulness (ability to find creative solutions)
|
93 |
-
- Sincerity (genuine and honest in intentions and words)
|
94 |
-
- Decisiveness (ability to make firm and timely decisions)
|
95 |
-
- Kindness (concern and compassion for others' well-being)
|
96 |
-
- Ambition (drive to achieve goals and succeed)
|
97 |
-
- Patience (ability to endure difficult situations calmly)
|
98 |
-
- Boldness (willingness to take risks and speak up)
|
99 |
-
- Fairness (commitment to justice and equal treatment)
|
100 |
-
- Proactive (taking initiative and anticipating challenges)
|
101 |
-
|
102 |
-
#### Story Type:
|
103 |
-
Prompts users to select the kind of story they want to tell:
|
104 |
-
- Where we came from: A founding Story
|
105 |
-
- Why we can't stay here: A case-for-change story
|
106 |
-
- Where we're going: A vision story
|
107 |
-
- How we're going to get there: A strategy story
|
108 |
-
- Why I lead the way I do: Leadership philosophy story
|
109 |
-
- Why you should want to work here: A rallying story
|
110 |
-
- Personal stories: Who you are, what you do, how you do it, and who you do it for
|
111 |
-
- What we believe: A story about values
|
112 |
-
- Who we serve: A customer story
|
113 |
-
- What we do for our customers: A sales story
|
114 |
-
- How we're different: A marketing story
|
115 |
-
|
116 |
-
#### Guided Storytelling Framework:
|
117 |
-
You then lead users through a structured narrative development via the following prompts:
|
118 |
-
- Describe the day it happened
|
119 |
-
- What was the Call to Action / Invitation
|
120 |
-
- Describing the obstacles (up to three) in 4 lines
|
121 |
-
- Exploring emotions/fears experienced during the incident
|
122 |
-
- Recognize the helpers / any objects of help in the incident
|
123 |
-
- Detailing the resolution / Reaching the final goal
|
124 |
-
- Reflecting on personal growth or lessons learned (What did you do that changed your life forever?)
|
125 |
-
|
126 |
-
Now, show the story created so far using Story-Spine structure as the default style, and then ask for confirmation before proceeding to the next tier.
|
127 |
-
If the user has any suggestions, incorporate them and then show the story again.
|
128 |
-
|
129 |
-
|
130 |
-
## Tier 2: Story Enhancement
|
131 |
-
#### After initial story creation, you offer congratulations on completing the first draft and gives 2 options:
|
132 |
-
Option 1 - Provides option for one-on-one sessions with expert storytelling coaches - the booking can be done that at https://calendly.com/
|
133 |
-
Options 2 - Provides further options for introducing users to more sophisticated narratives.
|
134 |
-
|
135 |
-
#### If Option 2 chosen, show these options with simple explanation and chose one.
|
136 |
-
You take the story and integrates it into different options of storytelling narrative structure:
|
137 |
-
The Story Hanger
|
138 |
-
The Story Spine
|
139 |
-
Hero's Journey
|
140 |
-
Beginning to End / Beginning to End
|
141 |
-
In Media Res (Start the story in the middle)
|
142 |
-
Nested Loops
|
143 |
-
The Cliffhanger
|
144 |
-
|
145 |
-
After taking user's preference, you show the final story and ask for confirmation before moving to the next tier.
|
146 |
-
Allow them to iterate over different narratives to see what fits best for them.
|
147 |
-
|
148 |
-
|
149 |
-
## Tier 3: Story Polishing
|
150 |
-
The final phase focuses on refining the narrative further:
|
151 |
-
- You add suggestions to the story:
|
152 |
-
- Impactful quotes/poems / similes/comparisons
|
153 |
-
|
154 |
-
#### Creative enhancements:
|
155 |
-
- Some lines or descriptions for inspiration
|
156 |
-
- Tips for maximising emotional resonance and memorability
|
157 |
-
|
158 |
-
By guiding users through these three tiers, you aim to cater to novice storytellers, offering a comprehensive platform for narrative skill development through its adaptive approach.
|
159 |
-
You end it with the final story and seek any suggestions from the user to refine the story further.
|
160 |
-
|
161 |
-
Once the user confirms, you congratulate them with emojis on completing the story and provide the final story in a beatifully formatted manner.
|
162 |
-
Note that the final story should include twist, turns and events that make it really engaging and enjoyable to read.
|
163 |
-
|
164 |
-
"""
|
165 |
-
|
166 |
-
USER_ICON = "icons/man.png"
|
167 |
-
AI_ICON = "icons/Kommuneity.png"
|
168 |
-
IMAGE_LOADER = "icons/Wedges.svg"
|
169 |
-
TEXT_LOADER = "icons/balls.svg"
|
170 |
-
DB_LOADER = "icons/db_loader.svg"
|
171 |
-
START_MSG = "I want to create a story 😊"
|
172 |
-
|
173 |
|
174 |
MODEL_CONFIG: dict[ModelType, ModelConfig] = {
|
175 |
"GPT4": {
|
@@ -208,60 +63,12 @@ def __countTokens(text):
|
|
208 |
|
209 |
st.set_page_config(
|
210 |
page_title="Kommuneity Story Creator",
|
211 |
-
page_icon=AI_ICON,
|
212 |
# menu_items={"About": None}
|
213 |
)
|
214 |
-
ipAddress = st.context.headers.get("x-forwarded-for")
|
215 |
-
|
216 |
-
|
217 |
-
def __nowInIST() -> DT.datetime:
|
218 |
-
return DT.datetime.now(pytz.timezone("Asia/Kolkata"))
|
219 |
-
|
220 |
-
|
221 |
-
def pprint(log: str):
|
222 |
-
now = __nowInIST()
|
223 |
-
now = now.strftime("%Y-%m-%d %H:%M:%S")
|
224 |
-
print(f"[{now}] [{ipAddress}] {log}")
|
225 |
|
226 |
-
|
227 |
-
pprint("\n")
|
228 |
-
pprint("\n")
|
229 |
-
|
230 |
-
st.markdown(
|
231 |
-
"""
|
232 |
-
<style>
|
233 |
-
@keyframes blinker {
|
234 |
-
0% {
|
235 |
-
opacity: 1;
|
236 |
-
}
|
237 |
-
50% {
|
238 |
-
opacity: 0.2;
|
239 |
-
}
|
240 |
-
100% {
|
241 |
-
opacity: 1;
|
242 |
-
}
|
243 |
-
}
|
244 |
-
|
245 |
-
.blinking {
|
246 |
-
animation: blinker 3s ease-out infinite;
|
247 |
-
}
|
248 |
-
|
249 |
-
.code {
|
250 |
-
color: green;
|
251 |
-
border-radius: 3px;
|
252 |
-
padding: 2px 4px; /* Padding around the text */
|
253 |
-
font-family: 'Courier New', Courier, monospace; /* Monospace font */
|
254 |
-
}
|
255 |
-
|
256 |
-
div[aria-label="dialog"] {
|
257 |
-
width: 80vw;
|
258 |
-
height: 600px;
|
259 |
-
}
|
260 |
-
|
261 |
-
</style>
|
262 |
-
""",
|
263 |
-
unsafe_allow_html=True
|
264 |
-
)
|
265 |
|
266 |
|
267 |
def __isInvalidResponse(response: str):
|
@@ -278,17 +85,17 @@ def __isInvalidResponse(response: str):
|
|
278 |
return True
|
279 |
|
280 |
# LLM API threw exception
|
281 |
-
if EXCEPTION_KEYWORD in response:
|
282 |
return True
|
283 |
|
284 |
# json response without json separator
|
285 |
-
if ('{\n "options"' in response) and (JSON_SEPARATOR not in response):
|
286 |
return True
|
287 |
-
if ('{\n "action"' in response) and (JSON_SEPARATOR not in response):
|
288 |
return True
|
289 |
|
290 |
# only options with no text
|
291 |
-
if response.startswith(JSON_SEPARATOR):
|
292 |
return True
|
293 |
|
294 |
|
@@ -299,22 +106,14 @@ def __matchingKeywordsCount(keywords: List[str], text: str):
|
|
299 |
])
|
300 |
|
301 |
|
302 |
-
def __isStringNumber(s: str) -> bool:
|
303 |
-
try:
|
304 |
-
float(s)
|
305 |
-
return True
|
306 |
-
except ValueError:
|
307 |
-
return False
|
308 |
-
|
309 |
-
|
310 |
def __getRawImagePromptDetails(prompt: str, response: str) -> Tuple[str, str, str]:
|
311 |
regex = r'[^a-z0-9 \n\.\-]|((the) +)'
|
312 |
|
313 |
cleanedResponse = re.sub(regex, '', response.lower())
|
314 |
-
pprint(f"{cleanedResponse=}")
|
315 |
|
316 |
cleanedPrompt = re.sub(regex, '', prompt.lower())
|
317 |
-
pprint(f"{cleanedPrompt=}")
|
318 |
|
319 |
if (
|
320 |
__matchingKeywordsCount(
|
@@ -325,7 +124,7 @@ def __getRawImagePromptDetails(prompt: str, response: str) -> Tuple[str, str, st
|
|
325 |
):
|
326 |
return (
|
327 |
f"Extract the name of selected story from this text and add few more details about this story:\n{response}",
|
328 |
-
"Effect: bokeh",
|
329 |
"Painting your character ...",
|
330 |
)
|
331 |
|
@@ -354,7 +153,7 @@ def __getRawImagePromptDetails(prompt: str, response: str) -> Tuple[str, str, st
|
|
354 |
possibleStoryEndIdx = [response.find("tier 2"), response.find("tier-2")]
|
355 |
storyEndIdx = max(possibleStoryEndIdx)
|
356 |
relevantResponse = response[:storyEndIdx]
|
357 |
-
pprint(f"{relevantResponse=}")
|
358 |
return (
|
359 |
"Extract the story plot from this text:\n{response}",
|
360 |
"""
|
@@ -373,10 +172,10 @@ def __getImagePromptDetails(prompt: str, response: str):
|
|
373 |
(enhancePrompt, imagePrompt, loaderText) = __getRawImagePromptDetails(prompt, response)
|
374 |
|
375 |
if imagePrompt or enhancePrompt:
|
376 |
-
pprint(f"[Raw] {enhancePrompt=} | {imagePrompt=}")
|
377 |
|
378 |
promptEnhanceModelType: ModelType = "LLAMA"
|
379 |
-
pprint(f"{promptEnhanceModelType=}")
|
380 |
|
381 |
modelConfig = MODEL_CONFIG[promptEnhanceModelType]
|
382 |
client = modelConfig["client"]
|
@@ -392,7 +191,7 @@ def __getImagePromptDetails(prompt: str, response: str):
|
|
392 |
"role": "user",
|
393 |
"content": f"{promptPrefix} create a prompt for image generation (limit to less than 500 words)\n\n{imagePrompt}"
|
394 |
}],
|
395 |
-
"temperature":
|
396 |
"max_tokens": 2000
|
397 |
}
|
398 |
|
@@ -409,19 +208,19 @@ def __getImagePromptDetails(prompt: str, response: str):
|
|
409 |
responseMessage = response.choices[0].message
|
410 |
imagePrompt = responseMessage.content
|
411 |
|
412 |
-
pprint(f"[Enhanced] {imagePrompt=}")
|
413 |
|
414 |
return (imagePrompt, loaderText)
|
415 |
|
416 |
|
417 |
def __getMessages():
|
418 |
def getContextSize():
|
419 |
-
currContextSize = __countTokens(SYSTEM_MSG) + __countTokens(st.session_state.messages) + 100
|
420 |
-
pprint(f"{currContextSize=}")
|
421 |
return currContextSize
|
422 |
|
423 |
while getContextSize() > MAX_CONTEXT:
|
424 |
-
pprint("Context size exceeded, removing first message")
|
425 |
st.session_state.messages.pop(0)
|
426 |
|
427 |
return st.session_state.messages
|
@@ -429,8 +228,8 @@ def __getMessages():
|
|
429 |
|
430 |
def __logLlmRequest(messagesFormatted: list):
|
431 |
contextSize = __countTokens(messagesFormatted)
|
432 |
-
pprint(f"{contextSize=} | {MODEL}")
|
433 |
-
# pprint(f"{messagesFormatted=}")
|
434 |
|
435 |
|
436 |
def predict():
|
@@ -444,14 +243,15 @@ def predict():
|
|
444 |
with client.messages.stream(
|
445 |
model=MODEL,
|
446 |
messages=messagesFormatted,
|
447 |
-
|
|
|
448 |
max_tokens=4000,
|
449 |
) as stream:
|
450 |
for text in stream.text_stream:
|
451 |
yield text
|
452 |
else:
|
453 |
messagesFormatted.append(
|
454 |
-
{"role": "system", "content": SYSTEM_MSG}
|
455 |
)
|
456 |
messagesFormatted.extend(__getMessages())
|
457 |
__logLlmRequest(messagesFormatted)
|
@@ -467,14 +267,14 @@ def predict():
|
|
467 |
for chunk in response:
|
468 |
choices = chunk.choices
|
469 |
if not choices:
|
470 |
-
pprint("Empty chunk")
|
471 |
continue
|
472 |
chunkContent = chunk.choices[0].delta.content
|
473 |
if chunkContent:
|
474 |
yield chunkContent
|
475 |
except Exception as e:
|
476 |
-
pprint(f"LLM API Error: {e}")
|
477 |
-
yield EXCEPTION_KEYWORD
|
478 |
|
479 |
|
480 |
def __generateImage(prompt: str):
|
@@ -488,10 +288,11 @@ def __generateImage(prompt: str):
|
|
488 |
num_inference_steps=4,
|
489 |
api_name="/infer"
|
490 |
)
|
491 |
-
pprint(f"imageResult={result}")
|
492 |
return result
|
493 |
|
494 |
|
|
|
495 |
st.title("Kommuneity Story Creator 🪄")
|
496 |
|
497 |
|
@@ -500,7 +301,7 @@ def __resetButtonState():
|
|
500 |
|
501 |
|
502 |
def __resetSelectedStory():
|
503 |
-
st.session_state.selectedStory =
|
504 |
|
505 |
|
506 |
def __setStartMsg(msg):
|
@@ -517,121 +318,78 @@ if "buttonValue" not in st.session_state:
|
|
517 |
__resetButtonState()
|
518 |
|
519 |
if "selectedStory" not in st.session_state:
|
520 |
-
|
521 |
-
|
522 |
-
if "startMsg" not in st.session_state:
|
523 |
-
st.session_state.startMsg = ""
|
524 |
-
st.button(START_MSG, on_click=lambda: __setStartMsg(START_MSG))
|
525 |
-
|
526 |
-
if "showStoryDbDialog" not in st.session_state:
|
527 |
-
st.session_state.showStoryDbDialog = False
|
528 |
-
|
529 |
-
|
530 |
-
def __disableStoryDbDialog():
|
531 |
-
if st.session_state.showStoryDbDialog:
|
532 |
-
st.session_state.showStoryDbDialog = False
|
533 |
-
st.rerun()
|
534 |
-
|
535 |
-
|
536 |
-
def __enableStoryDbDialog():
|
537 |
-
if not st.session_state.showStoryDbDialog:
|
538 |
-
st.session_state.showStoryDbDialog = True
|
539 |
-
st.rerun()
|
540 |
|
|
|
|
|
541 |
|
542 |
-
if st.session_state
|
543 |
-
|
544 |
-
|
545 |
-
storyPlaceholder = st.empty()
|
546 |
-
col1, col2, col3 = storyPlaceholder.columns([1, 1, 1])
|
547 |
-
col2.image(DB_LOADER)
|
548 |
-
col2.write(
|
549 |
-
"""
|
550 |
-
<div class='blinking code'>
|
551 |
-
Loading from database ...
|
552 |
-
</div>
|
553 |
-
""",
|
554 |
-
unsafe_allow_html=True
|
555 |
-
)
|
556 |
|
557 |
-
stories = storiesDb.getAllStories()
|
558 |
-
with storyPlaceholder.container(border=False, height=500):
|
559 |
-
for idx, story in enumerate(stories):
|
560 |
-
storyTitle = story['Story Title']
|
561 |
-
storyDetails = story['Story Text']
|
562 |
-
with st.expander(storyTitle):
|
563 |
-
st.markdown(storyDetails)
|
564 |
-
if st.button(
|
565 |
-
"Select",
|
566 |
-
key=f"select_{idx}",
|
567 |
-
type="primary",
|
568 |
-
use_container_width=True
|
569 |
-
):
|
570 |
-
st.session_state.selectedStory = storyTitle
|
571 |
-
__disableStoryDbDialog()
|
572 |
-
|
573 |
-
__openStoryDbDialog()
|
574 |
|
575 |
for chat in st.session_state.chatHistory:
|
576 |
role = chat["role"]
|
577 |
content = chat["content"]
|
578 |
imagePath = chat.get("image")
|
579 |
-
avatar = AI_ICON if role == "assistant" else USER_ICON
|
580 |
with st.chat_message(role, avatar=avatar):
|
581 |
st.markdown(content)
|
582 |
if imagePath:
|
583 |
st.image(imagePath)
|
584 |
|
|
|
|
|
|
|
585 |
|
586 |
if prompt := (
|
587 |
st.chat_input()
|
588 |
or st.session_state["buttonValue"]
|
589 |
-
or st.session_state["selectedStory"]
|
590 |
or st.session_state["startMsg"]
|
591 |
):
|
592 |
__resetButtonState()
|
593 |
__resetSelectedStory()
|
594 |
__setStartMsg("")
|
595 |
-
__disableStoryDbDialog()
|
596 |
|
597 |
-
with st.chat_message("user", avatar=USER_ICON):
|
598 |
st.markdown(prompt)
|
599 |
-
pprint(f"{prompt=}")
|
600 |
st.session_state.chatHistory.append({"role": "user", "content": prompt })
|
601 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
602 |
|
603 |
-
with st.chat_message("assistant", avatar=AI_ICON):
|
604 |
responseContainer = st.empty()
|
605 |
|
606 |
def __printAndGetResponse():
|
607 |
response = ""
|
608 |
-
responseContainer.image(TEXT_LOADER)
|
609 |
responseGenerator = predict()
|
610 |
|
611 |
for chunk in responseGenerator:
|
612 |
response += chunk
|
613 |
if __isInvalidResponse(response):
|
614 |
-
pprint(f"InvalidResponse={response}")
|
615 |
return
|
616 |
|
617 |
-
if JSON_SEPARATOR not in response:
|
618 |
responseContainer.markdown(response)
|
619 |
|
620 |
return response
|
621 |
|
622 |
response = __printAndGetResponse()
|
623 |
while not response:
|
624 |
-
pprint("Empty response. Retrying..")
|
625 |
-
time.sleep(0.
|
626 |
response = __printAndGetResponse()
|
627 |
|
628 |
-
pprint(f"{response=}")
|
629 |
|
630 |
def selectButton(optionLabel):
|
631 |
st.session_state["buttonValue"] = optionLabel
|
632 |
-
pprint(f"Selected: {optionLabel}")
|
633 |
|
634 |
-
responseParts = response.split(JSON_SEPARATOR)
|
635 |
|
636 |
jsonStr = None
|
637 |
if len(responseParts) > 1:
|
@@ -652,11 +410,11 @@ if prompt := (
|
|
652 |
unsafe_allow_html=True
|
653 |
)
|
654 |
# imgContainer.markdown(f"`{loaderText}`")
|
655 |
-
imgContainer.image(IMAGE_LOADER)
|
656 |
(imagePath, seed) = __generateImage(imagePrompt)
|
657 |
imageContainer.image(imagePath)
|
658 |
except Exception as e:
|
659 |
-
pprint(e)
|
660 |
imageContainer.empty()
|
661 |
|
662 |
st.session_state.chatHistory.append({
|
@@ -684,9 +442,9 @@ if prompt := (
|
|
684 |
on_click=lambda label=option["label"]: selectButton(label)
|
685 |
)
|
686 |
elif action:
|
687 |
-
|
688 |
-
|
|
|
689 |
# st.code(jsonStr, language="json")
|
690 |
except Exception as e:
|
691 |
-
pprint(e)
|
692 |
-
|
|
|
1 |
import streamlit as st
|
2 |
import os
|
|
|
|
|
3 |
import time
|
4 |
import json
|
5 |
import re
|
6 |
from typing import List, Literal, TypedDict, Tuple
|
7 |
from transformers import AutoTokenizer
|
8 |
from gradio_client import Client
|
9 |
+
import constants as C
|
10 |
+
import utils as U
|
11 |
|
12 |
from openai import OpenAI
|
13 |
import anthropic
|
|
|
24 |
"tokenizer": AutoTokenizer
|
25 |
})
|
26 |
|
27 |
+
modelType: ModelType = os.environ.get("MODEL_TYPE") or "CLAUDE"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
MODEL_CONFIG: dict[ModelType, ModelConfig] = {
|
30 |
"GPT4": {
|
|
|
63 |
|
64 |
st.set_page_config(
|
65 |
page_title="Kommuneity Story Creator",
|
66 |
+
page_icon=C.AI_ICON,
|
67 |
# menu_items={"About": None}
|
68 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
|
70 |
+
U.pprint("\n")
|
71 |
+
U.pprint("\n")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
|
73 |
|
74 |
def __isInvalidResponse(response: str):
|
|
|
85 |
return True
|
86 |
|
87 |
# LLM API threw exception
|
88 |
+
if C.EXCEPTION_KEYWORD in response:
|
89 |
return True
|
90 |
|
91 |
# json response without json separator
|
92 |
+
if ('{\n "options"' in response) and (C.JSON_SEPARATOR not in response):
|
93 |
return True
|
94 |
+
if ('{\n "action"' in response) and (C.JSON_SEPARATOR not in response):
|
95 |
return True
|
96 |
|
97 |
# only options with no text
|
98 |
+
if response.startswith(C.JSON_SEPARATOR):
|
99 |
return True
|
100 |
|
101 |
|
|
|
106 |
])
|
107 |
|
108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
def __getRawImagePromptDetails(prompt: str, response: str) -> Tuple[str, str, str]:
|
110 |
regex = r'[^a-z0-9 \n\.\-]|((the) +)'
|
111 |
|
112 |
cleanedResponse = re.sub(regex, '', response.lower())
|
113 |
+
U.pprint(f"{cleanedResponse=}")
|
114 |
|
115 |
cleanedPrompt = re.sub(regex, '', prompt.lower())
|
116 |
+
U.pprint(f"{cleanedPrompt=}")
|
117 |
|
118 |
if (
|
119 |
__matchingKeywordsCount(
|
|
|
124 |
):
|
125 |
return (
|
126 |
f"Extract the name of selected story from this text and add few more details about this story:\n{response}",
|
127 |
+
"Effect: dramatic, bokeh",
|
128 |
"Painting your character ...",
|
129 |
)
|
130 |
|
|
|
153 |
possibleStoryEndIdx = [response.find("tier 2"), response.find("tier-2")]
|
154 |
storyEndIdx = max(possibleStoryEndIdx)
|
155 |
relevantResponse = response[:storyEndIdx]
|
156 |
+
U.pprint(f"{relevantResponse=}")
|
157 |
return (
|
158 |
"Extract the story plot from this text:\n{response}",
|
159 |
"""
|
|
|
172 |
(enhancePrompt, imagePrompt, loaderText) = __getRawImagePromptDetails(prompt, response)
|
173 |
|
174 |
if imagePrompt or enhancePrompt:
|
175 |
+
U.pprint(f"[Raw] {enhancePrompt=} | {imagePrompt=}")
|
176 |
|
177 |
promptEnhanceModelType: ModelType = "LLAMA"
|
178 |
+
U.pprint(f"{promptEnhanceModelType=}")
|
179 |
|
180 |
modelConfig = MODEL_CONFIG[promptEnhanceModelType]
|
181 |
client = modelConfig["client"]
|
|
|
191 |
"role": "user",
|
192 |
"content": f"{promptPrefix} create a prompt for image generation (limit to less than 500 words)\n\n{imagePrompt}"
|
193 |
}],
|
194 |
+
"temperature": 1,
|
195 |
"max_tokens": 2000
|
196 |
}
|
197 |
|
|
|
208 |
responseMessage = response.choices[0].message
|
209 |
imagePrompt = responseMessage.content
|
210 |
|
211 |
+
U.pprint(f"[Enhanced] {imagePrompt=}")
|
212 |
|
213 |
return (imagePrompt, loaderText)
|
214 |
|
215 |
|
216 |
def __getMessages():
|
217 |
def getContextSize():
|
218 |
+
currContextSize = __countTokens(C.SYSTEM_MSG) + __countTokens(st.session_state.messages) + 100
|
219 |
+
U.pprint(f"{currContextSize=}")
|
220 |
return currContextSize
|
221 |
|
222 |
while getContextSize() > MAX_CONTEXT:
|
223 |
+
U.pprint("Context size exceeded, removing first message")
|
224 |
st.session_state.messages.pop(0)
|
225 |
|
226 |
return st.session_state.messages
|
|
|
228 |
|
229 |
def __logLlmRequest(messagesFormatted: list):
|
230 |
contextSize = __countTokens(messagesFormatted)
|
231 |
+
U.pprint(f"{contextSize=} | {MODEL}")
|
232 |
+
# U.pprint(f"{messagesFormatted=}")
|
233 |
|
234 |
|
235 |
def predict():
|
|
|
243 |
with client.messages.stream(
|
244 |
model=MODEL,
|
245 |
messages=messagesFormatted,
|
246 |
+
temperature=0.6,
|
247 |
+
system=C.SYSTEM_MSG,
|
248 |
max_tokens=4000,
|
249 |
) as stream:
|
250 |
for text in stream.text_stream:
|
251 |
yield text
|
252 |
else:
|
253 |
messagesFormatted.append(
|
254 |
+
{"role": "system", "content": C.SYSTEM_MSG}
|
255 |
)
|
256 |
messagesFormatted.extend(__getMessages())
|
257 |
__logLlmRequest(messagesFormatted)
|
|
|
267 |
for chunk in response:
|
268 |
choices = chunk.choices
|
269 |
if not choices:
|
270 |
+
U.pprint("Empty chunk")
|
271 |
continue
|
272 |
chunkContent = chunk.choices[0].delta.content
|
273 |
if chunkContent:
|
274 |
yield chunkContent
|
275 |
except Exception as e:
|
276 |
+
U.pprint(f"LLM API Error: {e}")
|
277 |
+
yield C.EXCEPTION_KEYWORD
|
278 |
|
279 |
|
280 |
def __generateImage(prompt: str):
|
|
|
288 |
num_inference_steps=4,
|
289 |
api_name="/infer"
|
290 |
)
|
291 |
+
U.pprint(f"imageResult={result}")
|
292 |
return result
|
293 |
|
294 |
|
295 |
+
U.applyCommonStyles()
|
296 |
st.title("Kommuneity Story Creator 🪄")
|
297 |
|
298 |
|
|
|
301 |
|
302 |
|
303 |
def __resetSelectedStory():
|
304 |
+
st.session_state.selectedStory = {}
|
305 |
|
306 |
|
307 |
def __setStartMsg(msg):
|
|
|
318 |
__resetButtonState()
|
319 |
|
320 |
if "selectedStory" not in st.session_state:
|
321 |
+
__resetSelectedStory()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
322 |
|
323 |
+
if "storyChosen" not in st.session_state:
|
324 |
+
st.session_state.storyChosen = False
|
325 |
|
326 |
+
if "startMsg" not in st.session_state:
|
327 |
+
__setStartMsg("")
|
328 |
+
st.button(C.START_MSG, on_click=lambda: __setStartMsg(C.START_MSG))
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
329 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
330 |
|
331 |
for chat in st.session_state.chatHistory:
|
332 |
role = chat["role"]
|
333 |
content = chat["content"]
|
334 |
imagePath = chat.get("image")
|
335 |
+
avatar = C.AI_ICON if role == "assistant" else C.USER_ICON
|
336 |
with st.chat_message(role, avatar=avatar):
|
337 |
st.markdown(content)
|
338 |
if imagePath:
|
339 |
st.image(imagePath)
|
340 |
|
341 |
+
# U.pprint(f"{st.session_state.buttonValue=}")
|
342 |
+
# U.pprint(f"{st.session_state.selectedStory=}")
|
343 |
+
# U.pprint(f"{st.session_state.startMsg=}")
|
344 |
|
345 |
if prompt := (
|
346 |
st.chat_input()
|
347 |
or st.session_state["buttonValue"]
|
348 |
+
or st.session_state["selectedStory"].get("title")
|
349 |
or st.session_state["startMsg"]
|
350 |
):
|
351 |
__resetButtonState()
|
352 |
__resetSelectedStory()
|
353 |
__setStartMsg("")
|
|
|
354 |
|
355 |
+
with st.chat_message("user", avatar=C.USER_ICON):
|
356 |
st.markdown(prompt)
|
357 |
+
U.pprint(f"{prompt=}")
|
358 |
st.session_state.chatHistory.append({"role": "user", "content": prompt })
|
359 |
st.session_state.messages.append({"role": "user", "content": prompt})
|
360 |
|
361 |
+
with st.chat_message("assistant", avatar=C.AI_ICON):
|
362 |
responseContainer = st.empty()
|
363 |
|
364 |
def __printAndGetResponse():
|
365 |
response = ""
|
366 |
+
responseContainer.image(C.TEXT_LOADER)
|
367 |
responseGenerator = predict()
|
368 |
|
369 |
for chunk in responseGenerator:
|
370 |
response += chunk
|
371 |
if __isInvalidResponse(response):
|
372 |
+
U.pprint(f"InvalidResponse={response}")
|
373 |
return
|
374 |
|
375 |
+
if C.JSON_SEPARATOR not in response:
|
376 |
responseContainer.markdown(response)
|
377 |
|
378 |
return response
|
379 |
|
380 |
response = __printAndGetResponse()
|
381 |
while not response:
|
382 |
+
U.pprint("Empty response. Retrying..")
|
383 |
+
time.sleep(0.7)
|
384 |
response = __printAndGetResponse()
|
385 |
|
386 |
+
U.pprint(f"{response=}")
|
387 |
|
388 |
def selectButton(optionLabel):
|
389 |
st.session_state["buttonValue"] = optionLabel
|
390 |
+
U.pprint(f"Selected: {optionLabel}")
|
391 |
|
392 |
+
responseParts = response.split(C.JSON_SEPARATOR)
|
393 |
|
394 |
jsonStr = None
|
395 |
if len(responseParts) > 1:
|
|
|
410 |
unsafe_allow_html=True
|
411 |
)
|
412 |
# imgContainer.markdown(f"`{loaderText}`")
|
413 |
+
imgContainer.image(C.IMAGE_LOADER)
|
414 |
(imagePath, seed) = __generateImage(imagePrompt)
|
415 |
imageContainer.image(imagePath)
|
416 |
except Exception as e:
|
417 |
+
U.pprint(e)
|
418 |
imageContainer.empty()
|
419 |
|
420 |
st.session_state.chatHistory.append({
|
|
|
442 |
on_click=lambda label=option["label"]: selectButton(label)
|
443 |
)
|
444 |
elif action:
|
445 |
+
U.pprint(f"{action=}")
|
446 |
+
if action == "SHOW_STORY_DATABASE" and not st.session_state.storyChosen:
|
447 |
+
st.switch_page("pages/popular-stories.py")
|
448 |
# st.code(jsonStr, language="json")
|
449 |
except Exception as e:
|
450 |
+
U.pprint(e)
|
|
constants.py
ADDED
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
JSON_SEPARATOR = ">>>>"
|
2 |
+
EXCEPTION_KEYWORD = "<<EXCEPTION>>"
|
3 |
+
|
4 |
+
SYSTEM_MSG = f"""
|
5 |
+
=> Context:
|
6 |
+
You're an storytelling assistant who guides users through four phases of narrative development, helping them craft compelling personal or professional stories.
|
7 |
+
The story created should be in simple language, yet evoke great emotions.
|
8 |
+
|
9 |
+
-----
|
10 |
+
=> Key Points:
|
11 |
+
Ask one question at a time, give the options in a numbered and well formatted manner in different lines.
|
12 |
+
Summarise options chosen so far in each step.
|
13 |
+
Every response should have a question unless it's the end of flow.
|
14 |
+
Don't warn about copyright and intellectual property rights.
|
15 |
+
Feel free to use emojis if it helps.
|
16 |
+
|
17 |
+
-----
|
18 |
+
=> Format & Syntax:
|
19 |
+
- Rule 1:
|
20 |
+
If your response has multiple numbered options to choose from, then append this exact keyword "{JSON_SEPARATOR}" to your FINAL response, and only AFTER this, append a JSON in this format (alway check for this rule):
|
21 |
+
Keep options to less than 9.
|
22 |
+
```
|
23 |
+
{{
|
24 |
+
"options": [{{ "id": "1", "label": "Option 1"}}, {{ "id": "2", "label": "Option 2"}}]
|
25 |
+
}}
|
26 |
+
```
|
27 |
+
Do not write anything after the JSON.
|
28 |
+
Keep options to less than 9.
|
29 |
+
|
30 |
+
- Rule 2: If the USER has chosen the option to adapt a well known story in the MOST RECENT message, then append this exact keyword "{JSON_SEPARATOR}" at the end of your FINAL answer.
|
31 |
+
And only AFTER this, append a JSON in this format (apply this rule only once in the full conversation):
|
32 |
+
```
|
33 |
+
{{
|
34 |
+
"action": "SHOW_STORY_DATABASE"
|
35 |
+
}}
|
36 |
+
```
|
37 |
+
Do not write anything after the JSON.
|
38 |
+
|
39 |
+
------
|
40 |
+
=> Task Definition:
|
41 |
+
You take the user through a flow of questions as defined below. You'll navigate the user through three tiers, moving closer to the final story.
|
42 |
+
Before giving any response, make sure to evaluate the "Format" rules described above.
|
43 |
+
|
44 |
+
## Tier 1: Story Creation
|
45 |
+
You initiate the storytelling process through a series of engaging prompts:
|
46 |
+
|
47 |
+
#### Story Origin:
|
48 |
+
- Asks users to choose between personal anecdotes or adapting a well-known real-life story
|
49 |
+
- If they choose to choose to adapt from a well known story, ask the user which story they would want to adapt
|
50 |
+
|
51 |
+
#### Story Use Case:
|
52 |
+
Asks users to define the purpose of building a story. It can be one of the following (provide very short description for each):
|
53 |
+
- Personal Branding: To create a narrative that highlights an individual's unique experiences, skills, and values for use in professional networking, job applications, or personal websites.
|
54 |
+
- Company Origin: To craft a compelling narrative about how a company or organization was founded, its mission, and key milestones for use in marketing materials or investor presentations.
|
55 |
+
- Product Launch: To develop an engaging narrative around a new product or service, focusing on the problem it solves and its unique value proposition for use in marketing campaigns or sales pitches.
|
56 |
+
- Customer Success / Testimonials: To showcase how a product or service has positively impacted a customer's life or business, creating a relatable narrative for potential customers.
|
57 |
+
- Team Building: To create a shared narrative that reinforces company values, promotes team cohesion, or introduces new team members, for use in internal communications or team-building exercises.
|
58 |
+
|
59 |
+
#### Story Time Frame:
|
60 |
+
Allows story selection from various life stages (childhood, mid-career, recent experiences).
|
61 |
+
Or Age-wise (below 8, 8-13, 13-15 and so on).
|
62 |
+
|
63 |
+
#### Story Focus:
|
64 |
+
Prompts users to select behaviours or leadership qualities to highlight in the story. Allow users to choose upto 3 qualities.
|
65 |
+
- Resourcefulness (ability to find creative solutions)
|
66 |
+
- Sincerity (genuine and honest in intentions and words)
|
67 |
+
- Decisiveness (ability to make firm and timely decisions)
|
68 |
+
- Kindness (concern and compassion for others' well-being)
|
69 |
+
- Ambition (drive to achieve goals and succeed)
|
70 |
+
- Patience (ability to endure difficult situations calmly)
|
71 |
+
- Boldness (willingness to take risks and speak up)
|
72 |
+
- Fairness (commitment to justice and equal treatment)
|
73 |
+
- Proactive (taking initiative and anticipating challenges)
|
74 |
+
|
75 |
+
#### Story Type:
|
76 |
+
Prompts users to select the kind of story they want to tell:
|
77 |
+
- Where we came from: A founding Story
|
78 |
+
- Why we can't stay here: A case-for-change story
|
79 |
+
- Where we're going: A vision story
|
80 |
+
- How we're going to get there: A strategy story
|
81 |
+
- Why I lead the way I do: Leadership philosophy story
|
82 |
+
- Why you should want to work here: A rallying story
|
83 |
+
- Personal stories: Who you are, what you do, how you do it, and who you do it for
|
84 |
+
- What we believe: A story about values
|
85 |
+
- Who we serve: A customer story
|
86 |
+
- What we do for our customers: A sales story
|
87 |
+
- How we're different: A marketing story
|
88 |
+
|
89 |
+
#### Guided Storytelling Framework:
|
90 |
+
You then lead users through a structured narrative development via the following prompts, one by one:
|
91 |
+
- Describe the day it happened
|
92 |
+
- What was the Call to Action / Invitation
|
93 |
+
- Describing the obstacles (up to three) in 4 lines
|
94 |
+
- Exploring emotions/fears experienced during the incident
|
95 |
+
- Recognize the helpers / any objects of help in the incident
|
96 |
+
- Detailing the resolution / Reaching the final goal
|
97 |
+
- Reflecting on personal growth or lessons learned (What did you do that changed your life forever?)
|
98 |
+
|
99 |
+
#### Now, show the story created so far using Story-Spine structure as the default style, and then ask for confirmation before proceeding to the next tier.
|
100 |
+
If the user has any suggestions, incorporate them and then show the story again.
|
101 |
+
|
102 |
+
|
103 |
+
## Tier 2: Story Enhancement
|
104 |
+
#### After initial story creation, you offer congratulations on completing the first draft and gives 2 options:
|
105 |
+
Option 1 - Provides option for one-on-one sessions with expert storytelling coaches - the booking can be done that at https://calendly.com/
|
106 |
+
Options 2 - Provides further options for introducing users to more sophisticated narratives.
|
107 |
+
|
108 |
+
#### If Option 2 chosen, show these options with simple explanation and chose one.
|
109 |
+
You take the story and integrates it into different options of storytelling narrative structure:
|
110 |
+
- The Story Hanger
|
111 |
+
- The Story Spine
|
112 |
+
- Hero's Journey
|
113 |
+
- Beginning to End / Beginning to End
|
114 |
+
- In Media Res (Start the story in the middle)
|
115 |
+
- Nested Loops
|
116 |
+
- The Cliffhanger
|
117 |
+
Users won't be aware of what different structures mean. To ease out the process, we can give a by-line explaining each narrative structure while giving them the option to choose amongst them.
|
118 |
+
|
119 |
+
Once they have selected a narrative structure, another option can be added to choose the theme for the story. This can add help in customising the story further. Given below is the exhaustive list of themes for the users to choose from (again with an attached byline, if needed):
|
120 |
+
1. Overcoming the Monster
|
121 |
+
2. Rags to Riches
|
122 |
+
3. The Quest
|
123 |
+
4. Voyage and Return
|
124 |
+
5. Rebirth
|
125 |
+
6. Comedy
|
126 |
+
7. Tragedy
|
127 |
+
|
128 |
+
After taking user's preference, you show two versions of the final story and ask for their preference.
|
129 |
+
Allow them to iterate over different narratives to see what fits best for them.
|
130 |
+
Repeat this process until they are satisfied with the story
|
131 |
+
|
132 |
+
After they're satisfied, move to the next tier
|
133 |
+
|
134 |
+
|
135 |
+
## Tier 3: Story Polishing
|
136 |
+
The final phase focuses on refining the narrative further:
|
137 |
+
- You add suggestions to the story:
|
138 |
+
- Impactful quotes/poems / similes/comparisons
|
139 |
+
|
140 |
+
#### Creative enhancements:
|
141 |
+
- Some lines or descriptions for inspiration
|
142 |
+
- Tips for maximising emotional resonance and memorability
|
143 |
+
|
144 |
+
By guiding users through these three tiers, you aim to cater to novice storytellers, offering a comprehensive platform for narrative skill development through its adaptive approach.
|
145 |
+
You end it with the final story and seek any suggestions from the user to refine the story further. Give them meaningful suggestions to create a more immersive story.
|
146 |
+
|
147 |
+
Once the user confirms, you congratulate them with emojis on completing the story and provide the final story in a beatifully formatted manner.
|
148 |
+
Note that the final story should include twist, turns and events that make it really engaging and enjoyable to read.
|
149 |
+
|
150 |
+
"""
|
151 |
+
|
152 |
+
USER_ICON = "icons/man.png"
|
153 |
+
AI_ICON = "icons/Kommuneity.png"
|
154 |
+
IMAGE_LOADER = "icons/Wedges.svg"
|
155 |
+
TEXT_LOADER = "icons/balls.svg"
|
156 |
+
DB_LOADER = "icons/db_loader.svg"
|
157 |
+
START_MSG = "I want to create a story 😊"
|
data/storiesDb.py
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
import os
|
|
|
2 |
from supabase import create_client, Client
|
3 |
|
4 |
from dotenv import load_dotenv
|
@@ -11,4 +12,6 @@ supabase: Client = create_client(url, key)
|
|
11 |
|
12 |
def getAllStories():
|
13 |
response = supabase.table("existing_stories").select("*").execute()
|
14 |
-
|
|
|
|
|
|
1 |
import os
|
2 |
+
import random
|
3 |
from supabase import create_client, Client
|
4 |
|
5 |
from dotenv import load_dotenv
|
|
|
12 |
|
13 |
def getAllStories():
|
14 |
response = supabase.table("existing_stories").select("*").execute()
|
15 |
+
stories = response.data
|
16 |
+
# return stories
|
17 |
+
return random.sample(stories, len(stories))
|
pages/popular-stories.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from data import storiesDb
|
3 |
+
import constants as C
|
4 |
+
import utils as U
|
5 |
+
|
6 |
+
st.set_page_config(
|
7 |
+
page_title="Popular Stories Databse",
|
8 |
+
page_icon=C.AI_ICON,
|
9 |
+
# menu_items={"About": None}
|
10 |
+
)
|
11 |
+
|
12 |
+
U.applyCommonStyles()
|
13 |
+
st.markdown(
|
14 |
+
"""
|
15 |
+
<style>
|
16 |
+
|
17 |
+
div[data-testid="stAppViewBlockContainer"] {
|
18 |
+
margin-top: -75px;
|
19 |
+
}
|
20 |
+
|
21 |
+
</style>
|
22 |
+
""",
|
23 |
+
unsafe_allow_html=True
|
24 |
+
)
|
25 |
+
|
26 |
+
st.subheader("Choose a popular story")
|
27 |
+
|
28 |
+
storyPlaceholder = st.empty()
|
29 |
+
col1, col2, col3 = storyPlaceholder.columns([1.5, 2.2, 1.5])
|
30 |
+
col2.image(C.DB_LOADER)
|
31 |
+
col2.write(
|
32 |
+
"""
|
33 |
+
<div class='blinking code large bold'>
|
34 |
+
Loading from database ...
|
35 |
+
</div>
|
36 |
+
""",
|
37 |
+
unsafe_allow_html=True
|
38 |
+
)
|
39 |
+
|
40 |
+
if "dbStories" not in st.session_state:
|
41 |
+
st.session_state.dbStories = storiesDb.getAllStories()
|
42 |
+
|
43 |
+
with storyPlaceholder.container(border=False, height=500):
|
44 |
+
for idx, story in enumerate(st.session_state.dbStories):
|
45 |
+
storyTitle = story['Story Title']
|
46 |
+
storyDetails = story['Story Text']
|
47 |
+
with st.expander(storyTitle):
|
48 |
+
st.markdown(storyDetails)
|
49 |
+
if st.button(
|
50 |
+
"Select",
|
51 |
+
key=f"select_{idx}",
|
52 |
+
type="primary",
|
53 |
+
use_container_width=True
|
54 |
+
):
|
55 |
+
U.pprint(f"Selected story: {storyTitle}")
|
56 |
+
st.session_state.storyChosen = True
|
57 |
+
st.session_state.selectedStory = {
|
58 |
+
"title": storyTitle,
|
59 |
+
"text": storyDetails
|
60 |
+
}
|
61 |
+
st.switch_page("app.py")
|
utils.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import datetime as DT
|
3 |
+
import pytz
|
4 |
+
|
5 |
+
|
6 |
+
def applyCommonStyles():
|
7 |
+
st.markdown(
|
8 |
+
"""
|
9 |
+
<style>
|
10 |
+
@keyframes blinker {
|
11 |
+
0% {
|
12 |
+
opacity: 1;
|
13 |
+
}
|
14 |
+
50% {
|
15 |
+
opacity: 0.2;
|
16 |
+
}
|
17 |
+
100% {
|
18 |
+
opacity: 1;
|
19 |
+
}
|
20 |
+
}
|
21 |
+
|
22 |
+
.blinking {
|
23 |
+
animation: blinker 3s ease-out infinite;
|
24 |
+
}
|
25 |
+
|
26 |
+
.code {
|
27 |
+
color: green;
|
28 |
+
border-radius: 3px;
|
29 |
+
padding: 2px 4px; /* Padding around the text */
|
30 |
+
font-family: 'Courier New', Courier, monospace; /* Monospace font */
|
31 |
+
}
|
32 |
+
|
33 |
+
.large {
|
34 |
+
font-size: 15px;
|
35 |
+
}
|
36 |
+
|
37 |
+
.bold {
|
38 |
+
font-weight: bold;
|
39 |
+
}
|
40 |
+
|
41 |
+
div[aria-label="dialog"] {
|
42 |
+
width: 80vw;
|
43 |
+
height: 620px;
|
44 |
+
}
|
45 |
+
|
46 |
+
</style>
|
47 |
+
""",
|
48 |
+
unsafe_allow_html=True
|
49 |
+
)
|
50 |
+
|
51 |
+
|
52 |
+
ipAddress = st.context.headers.get("x-forwarded-for")
|
53 |
+
|
54 |
+
|
55 |
+
def __nowInIST() -> DT.datetime:
|
56 |
+
return DT.datetime.now(pytz.timezone("Asia/Kolkata"))
|
57 |
+
|
58 |
+
|
59 |
+
def pprint(log: str):
|
60 |
+
now = __nowInIST()
|
61 |
+
now = now.strftime("%Y-%m-%d %H:%M:%S")
|
62 |
+
print(f"[{now}] [{ipAddress}] {log}")
|