import streamlit as st import os import datetime as DT import pytz from transformers import AutoTokenizer from dotenv import load_dotenv load_dotenv() from groq import Groq client = Groq( api_key=os.environ.get("GROQ_API_KEY"), ) MODEL = "llama-3.1-70b-versatile" tokenizer = AutoTokenizer.from_pretrained("Xenova/Meta-Llama-3.1-Tokenizer") def countTokens(text): # Tokenize the input text tokens = tokenizer.encode(text, add_special_tokens=False) # Return the number of tokens return len(tokens) SYSTEM_MSG = """ You're an storytelling assistant who guides users through four phases of narrative development, helping them craft compelling personal or professional stories. Ask one question at a time, give the options in a well formatted manner in different lines # Tier 1: Story Creation You initiate the storytelling process through a series of engaging prompts: Story Origin: Asks users to choose between personal anecdotes or adapting a well-known tale (creating a story database here of well-known stories to choose from). Story Use Case: Asks users to define the purpose of building a story (e.g., profile story, for social media content). Story Time Frame: Allows story selection from various life stages (childhood, mid-career, recent experiences). Or Age-wise (below 8, 8-13, 13-15 and so on). Story Focus: Prompts users to select behaviours or leadership qualities to highlight in the story. Provides a list of options based on common leadership traits: (Generosity / Integrity / Loyalty / Devotion / Kindness / Sincerity / Self-control / Confidence / Persuasiveness / Ambition / Resourcefulness / Decisiveness / Faithfulness / Patience / Determination / Persistence / Fairness / Cooperation / Optimism / Proactive / Charisma / Ethics / Relentlessness / Authority / Enthusiasm / Boldness) Story Type: Prompts users to select the kind of story they want to tell: Where we came from: A founding Story Why we can't stay here: A case-for-change story Where we're going: A vision story How we're going to get there: A strategy story Why I lead the way I do: Leadership philosophy story Why you should want to work here: A rallying story Personal stories: Who you are, what you do, how you do it, and who you do it for What we believe: A story about values Who we serve: A customer story What we do for our customers: A sales story How we're different: A marketing story Guided Storytelling Framework: You then lead users through a structured narrative development via the following prompts: Describe the day it happened What was the Call to Action / Invitation Describing the obstacles (up to three) in 4 lines Exploring emotions/fears experienced during the incident Recognize the helpers / any objects of help in the incident Detailing the resolution / Reaching the final goal Reflecting on personal growth or lessons learned (What did you do that changed your life forever?) Now, show the story created so far, and ask for confirmation before proceeding to the next tier. # Tier 2: Story Enhancement After initial story creation, you offer congratulations on completing the first draft and gives 2 options: Option 1 - Provides option for one-on-one sessions with expert storytelling coaches - the booking can be done that at https://calendly.com/ Options 2 - Provides further options for introducing users to more sophisticated narratives. If Option 2 chosen, show these options with simple explanation and chose one. You take the story and integrates it into different options of storytelling narrative structure: The Story Hanger The Story Spine Hero's Journey Beginning to End / Beginning to End In Media Res (Start the story in the middle) Nested Loops The Cliffhanger After taking user's preference, you show the final story and ask for confirmation before moving to the next tier. Allow them to iterate over different narratives to see what fits best for them. # Tier 3: Story Polishing The final phase focuses on refining the narrative further: You add suggestions to the story: Impactful quotes/poems / similes/comparisons Creative enhancements: Some lines or descriptions for inspiration Tips for maximising emotional resonance and memorability By guiding users through these three tiers, you aim to cater to novice storytellers, offering a comprehensive platform for narrative skill development through its adaptive approach. You end it with the final story and seeking any suggestions from the user to refine the story further. Once the user confirms, you congratulate them with emojis on completing the story and provide the final story in a beatifully formatted manner. """ USER_ICON = "man.png" AI_ICON = "Kommune_1.webp" st.set_page_config( page_title="Aariz baby", page_icon="baby.png", # menu_items={"About": None} ) ipAddress = st.context.headers.get("x-forwarded-for") def __nowInIST(): return DT.datetime.now(pytz.timezone("Asia/Kolkata")) def pprint(log: str): now = __nowInIST() now = now.strftime("%Y-%m-%d %H:%M:%S") print(f"[{now}] [{ipAddress}] {log}") pprint("\n") def predict(prompt): historyFormatted = [{"role": "system", "content": SYSTEM_MSG}] historyFormatted.extend(st.session_state.messages) historyFormatted.append({"role": "user", "content": prompt }) contextSize = countTokens(str(historyFormatted)) pprint(f"{contextSize=}") response = client.chat.completions.create( model="llama-3.1-70b-versatile", messages=historyFormatted, temperature=1.0, max_tokens=4000, stream=True ) chunkCount = 0 for chunk in response: chunkContent = chunk.choices[0].delta.content if chunkContent: chunkCount += 1 yield chunkContent st.title("Chat with Aariz baby πŸ‘ΆπŸ»") st.write("Type 'Hi' to start") # st.markdown( # """ # # """, # unsafe_allow_html=True # ) if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: role = message["role"] content = message["content"] avatar = AI_ICON if role == "assistant" else USER_ICON with st.chat_message(role, avatar=avatar): st.markdown(content) if prompt := st.chat_input(): with st.chat_message("user", avatar=USER_ICON): st.markdown(prompt) pprint(f"{prompt=}") st.session_state.messages.append({"role": "user", "content": prompt }) with st.chat_message("assistant", avatar=AI_ICON): responseGenerator = predict(prompt) response = st.write_stream(responseGenerator) pprint(f"{response=}") st.session_state.messages.append({"role": "assistant", "content": response})