Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .gitignore +3 -0
- README.md +30 -8
- chatgpt.py +337 -0
- gradio_app.py +38 -0
- interview_protocol.py +21 -0
- requirements.txt +106 -1
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
.idea/
|
3 |
+
__pycache__/
|
README.md
CHANGED
@@ -1,12 +1,34 @@
|
|
1 |
---
|
2 |
-
title:
|
3 |
-
|
4 |
-
colorFrom: yellow
|
5 |
-
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version:
|
8 |
-
app_file: app.py
|
9 |
-
pinned: false
|
10 |
---
|
|
|
11 |
|
12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
title: llm-autobiography
|
3 |
+
app_file: chatgpt.py
|
|
|
|
|
4 |
sdk: gradio
|
5 |
+
sdk_version: 3.50.2
|
|
|
|
|
6 |
---
|
7 |
+
# Chatbot Frontend
|
8 |
|
9 |
+
## Prerequisites
|
10 |
+
- Python 3.x
|
11 |
+
- pip
|
12 |
+
|
13 |
+
## Installation
|
14 |
+
1. Clone the repository:
|
15 |
+
```
|
16 |
+
git clone https://github.com/Zhuoxuan-Zhang/chatbot-ui.git
|
17 |
+
```
|
18 |
+
2. Navigate to the project directory:
|
19 |
+
```
|
20 |
+
cd chatbot-ui
|
21 |
+
```
|
22 |
+
3. Install the required packages:
|
23 |
+
```
|
24 |
+
pip install -r requirements.txt
|
25 |
+
```
|
26 |
+
|
27 |
+
## Running the Application
|
28 |
+
Start the chatbot by running:
|
29 |
+
```
|
30 |
+
python chatgpt.py
|
31 |
+
```
|
32 |
+
|
33 |
+
## Usage
|
34 |
+
- Open Chrome and visit `http://localhost:7860` to interact with the chatbot.
|
chatgpt.py
ADDED
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import whisper
|
3 |
+
import asyncio
|
4 |
+
import httpx
|
5 |
+
import tempfile
|
6 |
+
import aiohttp
|
7 |
+
import os
|
8 |
+
|
9 |
+
from interview_protocol import protocols as interview_protocols
|
10 |
+
|
11 |
+
# Load the Whisper model for transcription
|
12 |
+
model = whisper.load_model("base")
|
13 |
+
|
14 |
+
# Your backend base URL
|
15 |
+
base_url = "http://localhost:8080"
|
16 |
+
|
17 |
+
# Function to get the method index based on selection
|
18 |
+
def get_method_index(chapter, method):
|
19 |
+
all_methods = []
|
20 |
+
for chap in interview_protocols.values():
|
21 |
+
all_methods.extend(chap)
|
22 |
+
index = all_methods.index(method)
|
23 |
+
return index
|
24 |
+
|
25 |
+
# Function to set the API key and settings on your backend
|
26 |
+
async def set_api_key(api_key, chapter_name, topic_name, username):
|
27 |
+
url = f"{base_url}/api/set-key"
|
28 |
+
headers = {'Content-Type': 'application/json'}
|
29 |
+
data = {'api_key': api_key, 'chapter_name': chapter_name, 'topic_name': topic_name, 'username': username}
|
30 |
+
async with httpx.AsyncClient(timeout=20) as client:
|
31 |
+
try:
|
32 |
+
response = await client.post(url, json=data, headers=headers)
|
33 |
+
if response.status_code == 200:
|
34 |
+
return "API key and Interview Protocol set successfully."
|
35 |
+
else:
|
36 |
+
return f"Failed to set API key and Interview Protocol: {response.text}"
|
37 |
+
except asyncio.TimeoutError:
|
38 |
+
print("The request timed out")
|
39 |
+
return "Request timed out while setting API key."
|
40 |
+
except Exception as e:
|
41 |
+
return f"Error setting API key and Interview Protocol: {str(e)}"
|
42 |
+
|
43 |
+
# Get a response from the backend
|
44 |
+
async def get_backend_response(api_key, patient_prompt, protocol_index):
|
45 |
+
url = f"{base_url}/responses/doctor"
|
46 |
+
headers = {'Content-Type': 'application/json'}
|
47 |
+
data = {
|
48 |
+
'patient_prompt': patient_prompt,
|
49 |
+
'protocol_index': protocol_index # Use the calculated index
|
50 |
+
}
|
51 |
+
async with httpx.AsyncClient(timeout=20) as client:
|
52 |
+
try:
|
53 |
+
response = await client.post(url, json=data, headers=headers)
|
54 |
+
if response.status_code == 200:
|
55 |
+
response_data = response.json()
|
56 |
+
return response_data
|
57 |
+
else:
|
58 |
+
return f"Failed to fetch response from backend: {response.text}"
|
59 |
+
except Exception as e:
|
60 |
+
return f"Error contacting backend service: {str(e)}"
|
61 |
+
|
62 |
+
async def save_conversation_and_memory():
|
63 |
+
url = f"{base_url}/save/end_and_save"
|
64 |
+
headers = {'Content-Type': 'application/json'}
|
65 |
+
data = {}
|
66 |
+
async with httpx.AsyncClient(timeout=20) as client:
|
67 |
+
try:
|
68 |
+
response = await client.post(url, json=data, headers=headers)
|
69 |
+
if response.status_code == 200:
|
70 |
+
response_data = response.json()
|
71 |
+
return response_data.get('response', 'Saving Error!.')
|
72 |
+
else:
|
73 |
+
return f"Failed to save conversations and memory graph: {response.text}"
|
74 |
+
except Exception as e:
|
75 |
+
return f"Error contacting backend service: {str(e)}"
|
76 |
+
|
77 |
+
# Function to get conversation histories from the backend
|
78 |
+
async def get_conversation_histories(username):
|
79 |
+
url = f"{base_url}/save/download_conversations"
|
80 |
+
headers = {'Content-Type': 'application/json'}
|
81 |
+
data = {'username': username}
|
82 |
+
async with httpx.AsyncClient(timeout=20) as client:
|
83 |
+
try:
|
84 |
+
response = await client.post(url, json=data, headers=headers)
|
85 |
+
if response.status_code == 200:
|
86 |
+
conversation_data = response.json()
|
87 |
+
return conversation_data
|
88 |
+
else:
|
89 |
+
return [] # Return empty list if failed
|
90 |
+
except Exception as e:
|
91 |
+
return []
|
92 |
+
|
93 |
+
# Function to download conversation histories as separate text files
|
94 |
+
def download_conversations(username):
|
95 |
+
conversation_histories = asyncio.run(get_conversation_histories(username))
|
96 |
+
files = []
|
97 |
+
temp_dir = tempfile.mkdtemp()
|
98 |
+
for conversation_entry in conversation_histories:
|
99 |
+
file_name = conversation_entry.get('file_name', f"Conversation_{len(files)+1}.txt")
|
100 |
+
conversation = conversation_entry.get('conversation', [])
|
101 |
+
conversation_text = ""
|
102 |
+
for message_pair in conversation:
|
103 |
+
if isinstance(message_pair, list) and len(message_pair) == 2:
|
104 |
+
speaker, message = message_pair
|
105 |
+
conversation_text += f"{speaker.capitalize()}: {message}\n\n"
|
106 |
+
else:
|
107 |
+
# Handle unexpected data format
|
108 |
+
conversation_text += f"Unknown format: {message_pair}\n\n"
|
109 |
+
# Write each conversation to a file with the same file name as backend
|
110 |
+
temp_file_path = os.path.join(temp_dir, file_name)
|
111 |
+
with open(temp_file_path, 'w') as temp_file:
|
112 |
+
temp_file.write(conversation_text)
|
113 |
+
files.append(temp_file_path)
|
114 |
+
return files
|
115 |
+
|
116 |
+
async def get_biography():
|
117 |
+
url = f"{base_url}/save/generate_autobiography"
|
118 |
+
headers = {'Content-Type': 'application/json'}
|
119 |
+
async with httpx.AsyncClient(timeout=20) as client:
|
120 |
+
try:
|
121 |
+
response = await client.get(url, headers=headers)
|
122 |
+
if response.status_code == 200:
|
123 |
+
biography_data = response.json()
|
124 |
+
biography_text = biography_data.get('biography', '')
|
125 |
+
return biography_text
|
126 |
+
else:
|
127 |
+
return "Failed to generate biography."
|
128 |
+
except Exception as e:
|
129 |
+
return f"Error contacting backend service: {str(e)}"
|
130 |
+
|
131 |
+
# Function to download biography as a text file
|
132 |
+
def download_biography():
|
133 |
+
biography_text = asyncio.run(get_biography())
|
134 |
+
if not biography_text or "Failed" in biography_text or "Error" in biography_text:
|
135 |
+
# Handle error messages
|
136 |
+
return gr.update(value=None, visible=False), biography_text # Return the error message
|
137 |
+
# Write the biography to a temporary text file
|
138 |
+
with tempfile.NamedTemporaryFile(delete=False, mode='w', suffix='.txt') as temp_file:
|
139 |
+
temp_file.write(biography_text)
|
140 |
+
temp_file_path = temp_file.name
|
141 |
+
return temp_file_path, "Biography generated successfully."
|
142 |
+
|
143 |
+
def transcribe_audio(audio_file):
|
144 |
+
transcription = model.transcribe(audio_file)["text"]
|
145 |
+
return transcription
|
146 |
+
|
147 |
+
def submit_text_and_respond(edited_text, api_key, chapter, method, history):
|
148 |
+
# Get the protocol index based on selection
|
149 |
+
protocol_index = get_method_index(chapter, method)
|
150 |
+
response = asyncio.run(get_backend_response(api_key, edited_text, protocol_index))
|
151 |
+
print('------')
|
152 |
+
print(response)
|
153 |
+
if isinstance(response, str):
|
154 |
+
# Handle error messages
|
155 |
+
history.append((edited_text, response))
|
156 |
+
return history, "", []
|
157 |
+
doctor_response = response['doctor_response']['response']
|
158 |
+
memory_event = response['memory_events']
|
159 |
+
history.append((edited_text, doctor_response))
|
160 |
+
# Update memory graph
|
161 |
+
memory_graph = update_memory_graph(memory_event)
|
162 |
+
return history, "", memory_graph # Return memory_graph as output
|
163 |
+
|
164 |
+
def set_api_key_button(api_key_input, chapter_name, topic_name, username_input):
|
165 |
+
message = asyncio.run(set_api_key(api_key_input, chapter_name, topic_name, username_input))
|
166 |
+
print(message)
|
167 |
+
return message, api_key_input # Store the API key in state
|
168 |
+
|
169 |
+
def save_conversation(state):
|
170 |
+
response = asyncio.run(save_conversation_and_memory())
|
171 |
+
return response
|
172 |
+
|
173 |
+
def start_recording(audio_file):
|
174 |
+
if not audio_file:
|
175 |
+
return ""
|
176 |
+
try:
|
177 |
+
transcription = transcribe_audio(audio_file)
|
178 |
+
return transcription
|
179 |
+
except Exception as e:
|
180 |
+
return f"Failed to transcribe: {str(e)}"
|
181 |
+
|
182 |
+
def update_methods(chapter):
|
183 |
+
return gr.update(choices=interview_protocols[chapter], value=interview_protocols[chapter][0])
|
184 |
+
|
185 |
+
def update_memory_graph(memory_data):
|
186 |
+
# Process memory_data to display in the table
|
187 |
+
table_data = []
|
188 |
+
for node in memory_data:
|
189 |
+
table_data.append([
|
190 |
+
node.get('date', ''),
|
191 |
+
node.get('topic', ''),
|
192 |
+
node.get('event_description', ''),
|
193 |
+
node.get('people_involved', '')
|
194 |
+
])
|
195 |
+
return table_data
|
196 |
+
|
197 |
+
# Gradio interface setup
|
198 |
+
with gr.Blocks() as app:
|
199 |
+
with gr.Row():
|
200 |
+
# Sidebar Column
|
201 |
+
with gr.Column(scale=1, min_width=250):
|
202 |
+
gr.Markdown("## Settings")
|
203 |
+
|
204 |
+
with gr.Box():
|
205 |
+
gr.Markdown("### Interview Protocol")
|
206 |
+
# Chapter Selection
|
207 |
+
chapter_dropdown = gr.Dropdown(
|
208 |
+
label="Select Chapter",
|
209 |
+
choices=list(interview_protocols.keys()),
|
210 |
+
value=list(interview_protocols.keys())[0],
|
211 |
+
)
|
212 |
+
# Topic Selection
|
213 |
+
method_dropdown = gr.Dropdown(
|
214 |
+
label="Select Topic",
|
215 |
+
choices=interview_protocols[chapter_dropdown.value],
|
216 |
+
value=interview_protocols[chapter_dropdown.value][0],
|
217 |
+
)
|
218 |
+
|
219 |
+
# Update methods when chapter changes
|
220 |
+
chapter_dropdown.change(
|
221 |
+
fn=update_methods,
|
222 |
+
inputs=[chapter_dropdown],
|
223 |
+
outputs=[method_dropdown]
|
224 |
+
)
|
225 |
+
|
226 |
+
# Update states when selections change
|
227 |
+
def update_chapter(chapter):
|
228 |
+
return chapter
|
229 |
+
|
230 |
+
def update_method(method):
|
231 |
+
return method
|
232 |
+
|
233 |
+
chapter_state = gr.State()
|
234 |
+
method_state = gr.State()
|
235 |
+
|
236 |
+
chapter_dropdown.change(
|
237 |
+
fn=update_chapter,
|
238 |
+
inputs=[chapter_dropdown],
|
239 |
+
outputs=[chapter_state]
|
240 |
+
)
|
241 |
+
|
242 |
+
method_dropdown.change(
|
243 |
+
fn=update_method,
|
244 |
+
inputs=[method_dropdown],
|
245 |
+
outputs=[method_state]
|
246 |
+
)
|
247 |
+
|
248 |
+
with gr.Box():
|
249 |
+
gr.Markdown("### User Information")
|
250 |
+
# Username input
|
251 |
+
username_input = gr.Textbox(
|
252 |
+
label="Username", placeholder="Enter your username"
|
253 |
+
)
|
254 |
+
|
255 |
+
# API Key Input
|
256 |
+
api_key_input = gr.Textbox(
|
257 |
+
label="API Key", placeholder="Enter your API Key", type="password"
|
258 |
+
)
|
259 |
+
|
260 |
+
# Initialize Button
|
261 |
+
api_key_button = gr.Button("Initialize", variant="primary", size="large")
|
262 |
+
api_key_status = gr.Textbox(
|
263 |
+
label="Status", interactive=False, placeholder="Initialization status will appear here."
|
264 |
+
)
|
265 |
+
|
266 |
+
api_key_state = gr.State()
|
267 |
+
|
268 |
+
# Set API key and settings when the button is clicked
|
269 |
+
api_key_button.click(
|
270 |
+
fn=set_api_key_button,
|
271 |
+
inputs=[api_key_input, chapter_dropdown, method_dropdown, username_input],
|
272 |
+
outputs=[api_key_status, api_key_state]
|
273 |
+
)
|
274 |
+
|
275 |
+
# Main Content Column
|
276 |
+
with gr.Column(scale=3):
|
277 |
+
# Only the Chatbot in the first row to keep it larger
|
278 |
+
chatbot = gr.Chatbot(label="Chat with GPT", height=500)
|
279 |
+
|
280 |
+
with gr.Row():
|
281 |
+
transcription_box = gr.Textbox(
|
282 |
+
label="Transcription (You can edit this)", lines=3
|
283 |
+
)
|
284 |
+
audio_input = gr.Audio(
|
285 |
+
source="microphone", type="filepath", label="🎤 Record Audio"
|
286 |
+
)
|
287 |
+
|
288 |
+
with gr.Row():
|
289 |
+
submit_button = gr.Button("Submit", variant="primary", size="large")
|
290 |
+
save_conversation_button = gr.Button("End and Save Conversation", variant="secondary")
|
291 |
+
download_button = gr.Button("Download Conversations", variant="secondary")
|
292 |
+
download_biography_button = gr.Button("Download Biography", variant="secondary") # New button
|
293 |
+
|
294 |
+
# Move the Memory Graph Table below the chatbot
|
295 |
+
memory_graph_table = gr.Dataframe(
|
296 |
+
headers=["Date", "Topic", "Description", "People Involved"],
|
297 |
+
datatype=["str", "str", "str", "str"],
|
298 |
+
interactive=False,
|
299 |
+
label="Memory Events",
|
300 |
+
max_rows=5
|
301 |
+
)
|
302 |
+
|
303 |
+
# Set up interactions
|
304 |
+
audio_input.change(
|
305 |
+
fn=start_recording,
|
306 |
+
inputs=[audio_input],
|
307 |
+
outputs=[transcription_box]
|
308 |
+
)
|
309 |
+
|
310 |
+
state = gr.State([])
|
311 |
+
|
312 |
+
submit_button.click(
|
313 |
+
submit_text_and_respond,
|
314 |
+
inputs=[transcription_box, api_key_state, chapter_dropdown, method_dropdown, state],
|
315 |
+
outputs=[chatbot, transcription_box, memory_graph_table]
|
316 |
+
)
|
317 |
+
|
318 |
+
download_button.click(
|
319 |
+
fn=download_conversations,
|
320 |
+
inputs=[username_input],
|
321 |
+
outputs=gr.Files()
|
322 |
+
)
|
323 |
+
|
324 |
+
# Set up the download biography button
|
325 |
+
download_biography_button.click(
|
326 |
+
fn=download_biography,
|
327 |
+
inputs=None,
|
328 |
+
outputs=[gr.File(label="Biography.txt"), gr.Textbox(visible=False)]
|
329 |
+
)
|
330 |
+
|
331 |
+
save_conversation_button.click(
|
332 |
+
fn=save_conversation,
|
333 |
+
inputs=[state],
|
334 |
+
outputs=None
|
335 |
+
)
|
336 |
+
|
337 |
+
app.launch(share=True)
|
gradio_app.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import whisper
|
3 |
+
from getpass import getpass
|
4 |
+
from elevenlabs.client import ElevenLabs
|
5 |
+
from elevenlabs import play
|
6 |
+
|
7 |
+
import gradio as gr
|
8 |
+
|
9 |
+
from typing import Optional
|
10 |
+
|
11 |
+
openai_api_key = ''
|
12 |
+
|
13 |
+
client = ElevenLabs(
|
14 |
+
api_key="" # Defaults to ELEVEN_API_KEY
|
15 |
+
)
|
16 |
+
|
17 |
+
def transcribe(audio):
|
18 |
+
file = open(audio, "rb")
|
19 |
+
|
20 |
+
if file is None:
|
21 |
+
return ""
|
22 |
+
|
23 |
+
with file as f:
|
24 |
+
t_text = openai.Audio.transcribe(
|
25 |
+
model="whisper-1",
|
26 |
+
file=f,
|
27 |
+
api_key=openai_api_key
|
28 |
+
)
|
29 |
+
return t_text["text"]
|
30 |
+
|
31 |
+
demo = gr.Interface(
|
32 |
+
title = 'Medical Scriber',
|
33 |
+
fn=transcribe,
|
34 |
+
inputs=[gr.Audio(source="microphone", type="filepath")],
|
35 |
+
outputs=["text"]
|
36 |
+
).launch()
|
37 |
+
|
38 |
+
demo.launch(debug=True)
|
interview_protocol.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
protocols = {
|
2 |
+
"Life Chapters": ["Life As A Book"],
|
3 |
+
"Key Scenes in the Life Story": [
|
4 |
+
"High Point",
|
5 |
+
"Low Point",
|
6 |
+
"Turning Point",
|
7 |
+
"Positive Childhood Memory",
|
8 |
+
"Negative Childhood Memory",
|
9 |
+
"Vivid Adult Memory",
|
10 |
+
"Religious, Spiritual, or Mystical Experience",
|
11 |
+
"Wisdom Event"
|
12 |
+
],
|
13 |
+
"Future": ["Next Chapter", "Dreams and Plans", "Life Project"],
|
14 |
+
"Challenges": ["Life Challenge", "Health", "Loss", "Failure or Regret"],
|
15 |
+
"Personal Ideology": [
|
16 |
+
"Religious and Ethical Values",
|
17 |
+
"Political and Social Values",
|
18 |
+
"Change, Development Of Religious And Political Views",
|
19 |
+
"Key Value"
|
20 |
+
]
|
21 |
+
}
|
requirements.txt
CHANGED
@@ -1 +1,106 @@
|
|
1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
aiofiles==23.2.1
|
2 |
+
aiohappyeyeballs==2.4.0
|
3 |
+
aiohttp==3.10.5
|
4 |
+
aiosignal==1.3.1
|
5 |
+
altair==5.4.1
|
6 |
+
annotated-types==0.7.0
|
7 |
+
anyio==4.4.0
|
8 |
+
async-timeout==4.0.3
|
9 |
+
attrs==24.2.0
|
10 |
+
blinker==1.8.2
|
11 |
+
cachetools==5.5.0
|
12 |
+
certifi==2024.8.30
|
13 |
+
charset-normalizer==3.3.2
|
14 |
+
click==8.1.7
|
15 |
+
contourpy==1.3.0
|
16 |
+
cycler==0.12.1
|
17 |
+
distro==1.9.0
|
18 |
+
elevenlabs==1.8.0
|
19 |
+
exceptiongroup==1.2.2
|
20 |
+
fastapi==0.114.1
|
21 |
+
ffmpeg==1.4
|
22 |
+
ffmpy==0.4.0
|
23 |
+
filelock==3.16.0
|
24 |
+
fonttools==4.53.1
|
25 |
+
frozenlist==1.4.1
|
26 |
+
fsspec==2024.9.0
|
27 |
+
gitdb==4.0.11
|
28 |
+
GitPython==3.1.43
|
29 |
+
gradio==3.50.2
|
30 |
+
gradio_client==0.6.1
|
31 |
+
h11==0.14.0
|
32 |
+
httpcore==1.0.5
|
33 |
+
httpx==0.27.2
|
34 |
+
huggingface-hub==0.24.6
|
35 |
+
idna==3.8
|
36 |
+
importlib_resources==6.4.5
|
37 |
+
Jinja2==3.1.4
|
38 |
+
jiter==0.5.0
|
39 |
+
jsonschema==4.23.0
|
40 |
+
jsonschema-specifications==2023.12.1
|
41 |
+
kiwisolver==1.4.7
|
42 |
+
llvmlite==0.43.0
|
43 |
+
loguru==0.7.2
|
44 |
+
markdown-it-py==3.0.0
|
45 |
+
MarkupSafe==2.1.5
|
46 |
+
matplotlib==3.9.2
|
47 |
+
mdurl==0.1.2
|
48 |
+
more-itertools==10.5.0
|
49 |
+
mpmath==1.3.0
|
50 |
+
multidict==6.1.0
|
51 |
+
narwhals==1.7.0
|
52 |
+
networkx==3.3
|
53 |
+
numba==0.60.0
|
54 |
+
numpy==1.26.4
|
55 |
+
openai==0.28.0
|
56 |
+
openai-whisper==20231117
|
57 |
+
orjson==3.10.7
|
58 |
+
packaging==24.1
|
59 |
+
pandas==2.2.2
|
60 |
+
pillow==10.4.0
|
61 |
+
protobuf==5.28.1
|
62 |
+
pyarrow==17.0.0
|
63 |
+
PyAudio==0.2.14
|
64 |
+
pydantic==2.9.1
|
65 |
+
pydantic_core==2.23.3
|
66 |
+
pydeck==0.9.1
|
67 |
+
pydub==0.25.1
|
68 |
+
Pygments==2.18.0
|
69 |
+
pyparsing==3.1.4
|
70 |
+
python-dateutil==2.9.0.post0
|
71 |
+
python-multipart==0.0.9
|
72 |
+
pytz==2024.2
|
73 |
+
PyYAML==6.0.2
|
74 |
+
referencing==0.35.1
|
75 |
+
regex==2024.9.11
|
76 |
+
requests==2.32.3
|
77 |
+
rich==13.8.1
|
78 |
+
rpds-py==0.20.0
|
79 |
+
ruff==0.6.4
|
80 |
+
safetensors==0.4.5
|
81 |
+
semantic-version==2.10.0
|
82 |
+
shellingham==1.5.4
|
83 |
+
six==1.16.0
|
84 |
+
smmap==5.0.1
|
85 |
+
sniffio==1.3.1
|
86 |
+
starlette==0.38.5
|
87 |
+
streamlit==1.38.0
|
88 |
+
sympy==1.13.2
|
89 |
+
tenacity==8.5.0
|
90 |
+
tiktoken==0.7.0
|
91 |
+
tokenizers==0.19.1
|
92 |
+
toml==0.10.2
|
93 |
+
tomlkit==0.12.0
|
94 |
+
torch==2.4.1
|
95 |
+
tornado==6.4.1
|
96 |
+
tqdm==4.66.5
|
97 |
+
transformers==4.44.2
|
98 |
+
transforms==0.2.1
|
99 |
+
typer==0.12.5
|
100 |
+
typing_extensions==4.12.2
|
101 |
+
tzdata==2024.1
|
102 |
+
urllib3==2.2.2
|
103 |
+
uvicorn==0.30.6
|
104 |
+
webrtcvad==2.0.10
|
105 |
+
websockets==11.0.3
|
106 |
+
yarl==1.11.1
|