|
import streamlit as st |
|
import os |
|
import datetime as DT |
|
import pytz |
|
import time |
|
import json |
|
import re |
|
from transformers import AutoTokenizer |
|
|
|
from dotenv import load_dotenv |
|
load_dotenv() |
|
|
|
|
|
from groq import Groq |
|
client = Groq( |
|
api_key=os.environ.get("GROQ_API_KEY"), |
|
) |
|
|
|
MODEL = "llama-3.1-70b-versatile" |
|
JSON_SEPARATOR = ">>>>" |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("Xenova/Meta-Llama-3.1-Tokenizer") |
|
|
|
|
|
def countTokens(text): |
|
|
|
tokens = tokenizer.encode(text, add_special_tokens=False) |
|
|
|
return len(tokens) |
|
|
|
|
|
SYSTEM_MSG = f""" |
|
You're an storytelling assistant who guides users through four phases of narrative development, helping them craft compelling personal or professional stories. The story created should be in simple language, yet evoke great emotions. |
|
Ask one question at a time, give the options in a well formatted manner in different lines |
|
If your response has number of options to choose from, only then append your final response with this exact keyword "{JSON_SEPARATOR}", and only after this, append with the JSON of options to choose from. The JSON should be of the format: |
|
{{ |
|
"options": [ |
|
{{ "id": "1", "label": "Option 1"}}, |
|
{{ "id": "2", "label": "Option 2"}}, |
|
] |
|
}} |
|
Do not write "Choose one of the options below:" |
|
Keep options to less than 9 |
|
|
|
# Tier 1: Story Creation |
|
You initiate the storytelling process through a series of engaging prompts: |
|
Story Origin: |
|
Asks users to choose between personal anecdotes or adapting a well-known tale (creating a story database here of well-known stories to choose from). |
|
|
|
Story Use Case: |
|
Asks users to define the purpose of building a story (e.g., profile story, for social media content). |
|
|
|
Story Time Frame: |
|
Allows story selection from various life stages (childhood, mid-career, recent experiences). |
|
Or Age-wise (below 8, 8-13, 13-15 and so on). |
|
|
|
Story Focus: |
|
Prompts users to select behaviours or leadership qualities to highlight in the story. |
|
Provides a list of options based on common leadership traits: |
|
(Generosity / Integrity / Loyalty / Devotion / Kindness / Sincerity / Self-control / Confidence / Persuasiveness / Ambition / Resourcefulness / Decisiveness / Faithfulness / Patience / Determination / Persistence / Fairness / Cooperation / Optimism / Proactive / Charisma / Ethics / Relentlessness / Authority / Enthusiasm / Boldness) |
|
|
|
Story Type: |
|
Prompts users to select the kind of story they want to tell: |
|
Where we came from: A founding Story |
|
Why we can't stay here: A case-for-change story |
|
Where we're going: A vision story |
|
How we're going to get there: A strategy story |
|
Why I lead the way I do: Leadership philosophy story |
|
Why you should want to work here: A rallying story |
|
Personal stories: Who you are, what you do, how you do it, and who you do it for |
|
What we believe: A story about values |
|
Who we serve: A customer story |
|
What we do for our customers: A sales story |
|
How we're different: A marketing story |
|
|
|
Guided Storytelling Framework: |
|
You then lead users through a structured narrative development via the following prompts: |
|
- Describe the day it happened |
|
- What was the Call to Action / Invitation |
|
- Describing the obstacles (up to three) in 4 lines |
|
- Exploring emotions/fears experienced during the incident |
|
- Recognize the helpers / any objects of help in the incident |
|
- Detailing the resolution / Reaching the final goal |
|
- Reflecting on personal growth or lessons learned (What did you do that changed your life forever?) |
|
|
|
Now, show the story created so far, and ask for confirmation before proceeding to the next tier. |
|
|
|
# Tier 2: Story Enhancement |
|
After initial story creation, you offer congratulations on completing the first draft and gives 2 options: |
|
Option 1 - Provides option for one-on-one sessions with expert storytelling coaches - the booking can be done that at https://calendly.com/ |
|
Options 2 - Provides further options for introducing users to more sophisticated narratives. |
|
|
|
If Option 2 chosen, show these options with simple explanation and chose one. |
|
You take the story and integrates it into different options of storytelling narrative structure: |
|
The Story Hanger |
|
The Story Spine |
|
Hero's Journey |
|
Beginning to End / Beginning to End |
|
In Media Res (Start the story in the middle) |
|
Nested Loops |
|
The Cliffhanger |
|
|
|
After taking user's preference, you show the final story and ask for confirmation before moving to the next tier. |
|
Allow them to iterate over different narratives to see what fits best for them. |
|
|
|
# Tier 3: Story Polishing |
|
The final phase focuses on refining the narrative further: |
|
You add suggestions to the story: |
|
Impactful quotes/poems / similes/comparisons |
|
Creative enhancements: |
|
Some lines or descriptions for inspiration |
|
Tips for maximising emotional resonance and memorability |
|
By guiding users through these three tiers, you aim to cater to novice storytellers, offering a comprehensive platform for narrative skill development through its adaptive approach. |
|
You end it with the final story and seeking any suggestions from the user to refine the story further. |
|
Once the user confirms, you congratulate them with emojis on completing the story and provide the final story in a beatifully formatted manner. |
|
Note that the final story should include twist, turns and events that make it really engaging and enjoyable to read. |
|
|
|
""" |
|
|
|
USER_ICON = "man.png" |
|
AI_ICON = "Kommuneity.png" |
|
START_MSG = "I want to create a story 😊" |
|
|
|
st.set_page_config( |
|
page_title="Kommuneity Story Creator", |
|
page_icon=AI_ICON, |
|
|
|
) |
|
ipAddress = st.context.headers.get("x-forwarded-for") |
|
|
|
|
|
def __nowInIST() -> DT.datetime: |
|
return DT.datetime.now(pytz.timezone("Asia/Kolkata")) |
|
|
|
|
|
def pprint(log: str): |
|
now = __nowInIST() |
|
now = now.strftime("%Y-%m-%d %H:%M:%S") |
|
print(f"[{now}] [{ipAddress}] {log}") |
|
|
|
|
|
pprint("\n") |
|
|
|
|
|
def __isInvalidResponse(response: str): |
|
if len(re.findall(r'\n[a-z]', response)) > 3: |
|
return True |
|
|
|
if ('\n{\n "options"' in response) and (JSON_SEPARATOR not in response): |
|
return True |
|
|
|
|
|
def __resetButtonState(): |
|
st.session_state["buttonValue"] = "" |
|
|
|
|
|
def __setStartMsg(msg): |
|
st.session_state.startMsg = msg |
|
|
|
|
|
if "messages" not in st.session_state: |
|
st.session_state.messages = [] |
|
|
|
if "buttonValue" not in st.session_state: |
|
__resetButtonState() |
|
|
|
if "startMsg" not in st.session_state: |
|
st.session_state.startMsg = "" |
|
|
|
|
|
def predict(prompt): |
|
historyFormatted = [{"role": "system", "content": SYSTEM_MSG}] |
|
historyFormatted.extend(st.session_state.messages) |
|
historyFormatted.append({"role": "user", "content": prompt }) |
|
contextSize = countTokens(str(historyFormatted)) |
|
pprint(f"{contextSize=}") |
|
|
|
response = client.chat.completions.create( |
|
model="llama-3.1-70b-versatile", |
|
messages=historyFormatted, |
|
temperature=0.8, |
|
max_tokens=4000, |
|
stream=True |
|
) |
|
|
|
chunkCount = 0 |
|
for chunk in response: |
|
chunkContent = chunk.choices[0].delta.content |
|
if chunkContent: |
|
chunkCount += 1 |
|
yield chunkContent |
|
|
|
|
|
st.title("Kommuneity Story Creator 📖") |
|
if not st.session_state.startMsg: |
|
st.button(START_MSG, on_click=lambda: __setStartMsg(START_MSG)) |
|
|
|
for message in st.session_state.messages: |
|
role = message["role"] |
|
content = message["content"] |
|
avatar = AI_ICON if role == "assistant" else USER_ICON |
|
with st.chat_message(role, avatar=avatar): |
|
st.markdown(content) |
|
|
|
if prompt := (st.chat_input() or st.session_state["buttonValue"] or st.session_state.startMsg): |
|
__resetButtonState() |
|
__setStartMsg("") |
|
|
|
with st.chat_message("user", avatar=USER_ICON): |
|
st.markdown(prompt) |
|
pprint(f"{prompt=}") |
|
st.session_state.messages.append({"role": "user", "content": prompt }) |
|
|
|
with st.chat_message("assistant", avatar=AI_ICON): |
|
placeholder = st.empty() |
|
|
|
def getResponse(): |
|
response = "" |
|
responseGenerator = predict(prompt) |
|
|
|
for chunk in responseGenerator: |
|
response += chunk |
|
if __isInvalidResponse(response): |
|
return |
|
|
|
if JSON_SEPARATOR not in response: |
|
placeholder.markdown(response) |
|
|
|
return response |
|
|
|
response = getResponse() |
|
while not response: |
|
pprint("Empty response. Retrying..") |
|
time.sleep(0.5) |
|
response = getResponse() |
|
|
|
pprint(f"{response=}") |
|
|
|
def selectButton(optionLabel): |
|
st.session_state["buttonValue"] = optionLabel |
|
pprint(f"Selected: {optionLabel}") |
|
|
|
responseParts = response.split(JSON_SEPARATOR) |
|
if len(responseParts) > 1: |
|
[response, jsonStr] = responseParts |
|
|
|
try: |
|
json.loads(jsonStr) |
|
jsonObj = json.loads(jsonStr) |
|
options = jsonObj["options"] |
|
|
|
for option in options: |
|
st.button( |
|
option["label"], |
|
key=option["id"], |
|
on_click=lambda label=option["label"]: selectButton(label) |
|
) |
|
|
|
except Exception as e: |
|
pprint(e) |
|
|
|
st.session_state.messages.append({"role": "assistant", "content": response}) |
|
|