Ashhar
commited on
Commit
•
28f5d18
1
Parent(s):
98cfaf2
first draft
Browse files- Kommune.png +0 -0
- Kommune_1.webp +0 -0
- app.py +112 -43
- baby.png +0 -0
Kommune.png
ADDED
Kommune_1.webp
ADDED
app.py
CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
|
|
2 |
import os
|
3 |
import datetime as DT
|
4 |
import pytz
|
|
|
5 |
|
6 |
from dotenv import load_dotenv
|
7 |
load_dotenv()
|
@@ -13,48 +14,97 @@ client = Groq(
|
|
13 |
)
|
14 |
MODEL = "llama-3.1-70b-versatile"
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
SYSTEM_MSG = """
|
17 |
-
You're
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
You
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
Use
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
-
|
30 |
-
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
|
54 |
-
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
"""
|
57 |
|
|
|
|
|
|
|
58 |
st.set_page_config(
|
59 |
page_title="Aariz baby",
|
60 |
page_icon="baby.png",
|
@@ -73,10 +123,15 @@ def pprint(log: str):
|
|
73 |
print(f"[{now}] [{ipAddress}] {log}")
|
74 |
|
75 |
|
|
|
|
|
|
|
76 |
def predict(prompt):
|
77 |
historyFormatted = [{"role": "system", "content": SYSTEM_MSG}]
|
78 |
historyFormatted.extend(st.session_state.messages)
|
79 |
historyFormatted.append({"role": "user", "content": prompt })
|
|
|
|
|
80 |
|
81 |
response = client.chat.completions.create(
|
82 |
model="llama-3.1-70b-versatile",
|
@@ -95,6 +150,20 @@ def predict(prompt):
|
|
95 |
|
96 |
|
97 |
st.title("Chat with Aariz baby 👶🏻")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
98 |
|
99 |
if "messages" not in st.session_state:
|
100 |
st.session_state.messages = []
|
@@ -103,17 +172,17 @@ if "messages" not in st.session_state:
|
|
103 |
for message in st.session_state.messages:
|
104 |
role = message["role"]
|
105 |
content = message["content"]
|
106 |
-
avatar =
|
107 |
with st.chat_message(role, avatar=avatar):
|
108 |
st.markdown(content)
|
109 |
|
110 |
-
if prompt := st.chat_input(
|
111 |
-
with st.chat_message("user", avatar=
|
112 |
st.markdown(prompt)
|
113 |
pprint(f"{prompt=}")
|
114 |
st.session_state.messages.append({"role": "user", "content": prompt })
|
115 |
|
116 |
-
with st.chat_message("assistant", avatar=
|
117 |
responseGenerator = predict(prompt)
|
118 |
response = st.write_stream(responseGenerator)
|
119 |
pprint(f"{response=}")
|
|
|
2 |
import os
|
3 |
import datetime as DT
|
4 |
import pytz
|
5 |
+
from transformers import AutoTokenizer
|
6 |
|
7 |
from dotenv import load_dotenv
|
8 |
load_dotenv()
|
|
|
14 |
)
|
15 |
MODEL = "llama-3.1-70b-versatile"
|
16 |
|
17 |
+
tokenizer = AutoTokenizer.from_pretrained("Xenova/Meta-Llama-3.1-Tokenizer")
|
18 |
+
|
19 |
+
|
20 |
+
def countTokens(text):
|
21 |
+
# Tokenize the input text
|
22 |
+
tokens = tokenizer.encode(text, add_special_tokens=False)
|
23 |
+
# Return the number of tokens
|
24 |
+
return len(tokens)
|
25 |
+
|
26 |
+
|
27 |
SYSTEM_MSG = """
|
28 |
+
You're an storytelling assistant who guides users through four phases of narrative development, helping them craft compelling personal or professional stories.
|
29 |
+
Ask one question at a time, give the options in a well formatted manner in different lines
|
30 |
+
|
31 |
+
# Tier 1: Story Creation
|
32 |
+
You initiate the storytelling process through a series of engaging prompts:
|
33 |
+
Story Origin:
|
34 |
+
Asks users to choose between personal anecdotes or adapting a well-known tale (creating a story database here of well-known stories to choose from).
|
35 |
+
|
36 |
+
Story Use Case:
|
37 |
+
Asks users to define the purpose of building a story (e.g., profile story, for social media content).
|
38 |
+
|
39 |
+
Story Time Frame:
|
40 |
+
Allows story selection from various life stages (childhood, mid-career, recent experiences).
|
41 |
+
Or Age-wise (below 8, 8-13, 13-15 and so on).
|
42 |
+
|
43 |
+
Story Focus:
|
44 |
+
Prompts users to select behaviours or leadership qualities to highlight in the story.
|
45 |
+
Provides a list of options based on common leadership traits:
|
46 |
+
(Generosity / Integrity / Loyalty / Devotion / Kindness / Sincerity / Self-control / Confidence / Persuasiveness / Ambition / Resourcefulness / Decisiveness / Faithfulness / Patience / Determination / Persistence / Fairness / Cooperation / Optimism / Proactive / Charisma / Ethics / Relentlessness / Authority / Enthusiasm / Boldness)
|
47 |
+
|
48 |
+
Story Type:
|
49 |
+
Prompts users to select the kind of story they want to tell:
|
50 |
+
Where we came from: A founding Story
|
51 |
+
Why we can't stay here: A case-for-change story
|
52 |
+
Where we're going: A vision story
|
53 |
+
How we're going to get there: A strategy story
|
54 |
+
Why I lead the way I do: Leadership philosophy story
|
55 |
+
Why you should want to work here: A rallying story
|
56 |
+
Personal stories: Who you are, what you do, how you do it, and who you do it for
|
57 |
+
What we believe: A story about values
|
58 |
+
Who we serve: A customer story
|
59 |
+
What we do for our customers: A sales story
|
60 |
+
How we're different: A marketing story
|
61 |
+
|
62 |
+
Guided Storytelling Framework:
|
63 |
+
You then lead users through a structured narrative development via the following prompts:
|
64 |
+
Describe the day it happened
|
65 |
+
What was the Call to Action / Invitation
|
66 |
+
Describing the obstacles (up to three) in 4 lines
|
67 |
+
Exploring emotions/fears experienced during the incident
|
68 |
+
Recognize the helpers / any objects of help in the incident
|
69 |
+
Detailing the resolution / Reaching the final goal
|
70 |
+
Reflecting on personal growth or lessons learned (What did you do that changed your life forever?)
|
71 |
+
|
72 |
+
Now, show the story created so far, and ask for confirmation before proceeding to the next tier.
|
73 |
+
|
74 |
+
# Tier 2: Story Enhancement
|
75 |
+
After initial story creation, you offer congratulations on completing the first draft and gives 2 options:
|
76 |
+
Option 1 - Provides option for one-on-one sessions with expert storytelling coaches - the booking can be done that at https://calendly.com/
|
77 |
+
Options 2 - Provides further options for introducing users to more sophisticated narratives.
|
78 |
+
|
79 |
+
If Option 2 chosen, show these options with simple explanation and chose one.
|
80 |
+
You take the story and integrates it into different options of storytelling narrative structure:
|
81 |
+
The Story Hanger
|
82 |
+
The Story Spine
|
83 |
+
Hero's Journey
|
84 |
+
Beginning to End / Beginning to End
|
85 |
+
In Media Res (Start the story in the middle)
|
86 |
+
Nested Loops
|
87 |
+
The Cliffhanger
|
88 |
+
|
89 |
+
After taking user's preference, you show the final story and ask for confirmation before moving to the next tier.
|
90 |
+
Allow them to iterate over different narratives to see what fits best for them.
|
91 |
+
|
92 |
+
# Tier 3: Story Polishing
|
93 |
+
The final phase focuses on refining the narrative further:
|
94 |
+
You add suggestions to the story:
|
95 |
+
Impactful quotes/poems / similes/comparisons
|
96 |
+
Creative enhancements:
|
97 |
+
Some lines or descriptions for inspiration
|
98 |
+
Tips for maximising emotional resonance and memorability
|
99 |
+
By guiding users through these three tiers, you aim to cater to novice storytellers, offering a comprehensive platform for narrative skill development through its adaptive approach.
|
100 |
+
You end it with the final story and seeking any suggestions from the user to refine the story further.
|
101 |
+
Once the user confirms, you congratulate them with emojis on completing the story and provide the final story in a beatifully formatted manner.
|
102 |
+
|
103 |
"""
|
104 |
|
105 |
+
USER_ICON = "man.png"
|
106 |
+
AI_ICON = "Kommune_1.webp"
|
107 |
+
|
108 |
st.set_page_config(
|
109 |
page_title="Aariz baby",
|
110 |
page_icon="baby.png",
|
|
|
123 |
print(f"[{now}] [{ipAddress}] {log}")
|
124 |
|
125 |
|
126 |
+
pprint("\n")
|
127 |
+
|
128 |
+
|
129 |
def predict(prompt):
|
130 |
historyFormatted = [{"role": "system", "content": SYSTEM_MSG}]
|
131 |
historyFormatted.extend(st.session_state.messages)
|
132 |
historyFormatted.append({"role": "user", "content": prompt })
|
133 |
+
contextSize = countTokens(str(historyFormatted))
|
134 |
+
pprint(f"{contextSize=}")
|
135 |
|
136 |
response = client.chat.completions.create(
|
137 |
model="llama-3.1-70b-versatile",
|
|
|
150 |
|
151 |
|
152 |
st.title("Chat with Aariz baby 👶🏻")
|
153 |
+
st.write("Type 'Hi' to start")
|
154 |
+
# st.markdown(
|
155 |
+
# """
|
156 |
+
# <style>
|
157 |
+
# .appview-container {
|
158 |
+
# background-color: white;
|
159 |
+
# }
|
160 |
+
# .st-chat-message {
|
161 |
+
# color: black; /* Ensure chat messages are also in black */
|
162 |
+
# }
|
163 |
+
# </style>
|
164 |
+
# """,
|
165 |
+
# unsafe_allow_html=True
|
166 |
+
# )
|
167 |
|
168 |
if "messages" not in st.session_state:
|
169 |
st.session_state.messages = []
|
|
|
172 |
for message in st.session_state.messages:
|
173 |
role = message["role"]
|
174 |
content = message["content"]
|
175 |
+
avatar = AI_ICON if role == "assistant" else USER_ICON
|
176 |
with st.chat_message(role, avatar=avatar):
|
177 |
st.markdown(content)
|
178 |
|
179 |
+
if prompt := st.chat_input():
|
180 |
+
with st.chat_message("user", avatar=USER_ICON):
|
181 |
st.markdown(prompt)
|
182 |
pprint(f"{prompt=}")
|
183 |
st.session_state.messages.append({"role": "user", "content": prompt })
|
184 |
|
185 |
+
with st.chat_message("assistant", avatar=AI_ICON):
|
186 |
responseGenerator = predict(prompt)
|
187 |
response = st.write_stream(responseGenerator)
|
188 |
pprint(f"{response=}")
|
baby.png
DELETED
Binary file (8.21 kB)
|
|