Spaces:
Sleeping
Sleeping
Create app.py
#1
by
dschandra
- opened
app.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import assemblyai as aai
|
2 |
+
from elevenlabs import generate, stream
|
3 |
+
from openai import OpenAI
|
4 |
+
|
5 |
+
class AI_Assistant:
|
6 |
+
def __init__(self):
|
7 |
+
aai.settings.api_key = "ASSEMBLYAI-API-KEY"
|
8 |
+
self.openai_client = OpenAI(api_key="OPENAI-API-KEY")
|
9 |
+
self.elevenlabs_api_key = "ELEVENLABS-API-KEY"
|
10 |
+
|
11 |
+
self.transcriber = None
|
12 |
+
|
13 |
+
# Context for food ordering in a restaurant
|
14 |
+
self.full_transcript = [
|
15 |
+
{"role": "system", "content": "You are a virtual assistant for a restaurant. Help customers with food ordering, menu inquiries, and table reservations."},
|
16 |
+
]
|
17 |
+
|
18 |
+
def start_transcription(self):
|
19 |
+
self.transcriber = aai.RealtimeTranscriber(
|
20 |
+
sample_rate=16000,
|
21 |
+
on_data=self.on_data,
|
22 |
+
on_error=self.on_error,
|
23 |
+
on_open=self.on_open,
|
24 |
+
on_close=self.on_close,
|
25 |
+
end_utterance_silence_threshold=1000,
|
26 |
+
)
|
27 |
+
self.transcriber.connect()
|
28 |
+
microphone_stream = aai.extras.MicrophoneStream(sample_rate=16000)
|
29 |
+
self.transcriber.stream(microphone_stream)
|
30 |
+
|
31 |
+
def stop_transcription(self):
|
32 |
+
if self.transcriber:
|
33 |
+
self.transcriber.close()
|
34 |
+
self.transcriber = None
|
35 |
+
|
36 |
+
def on_open(self, session_opened: aai.RealtimeSessionOpened):
|
37 |
+
print("Session ID:", session_opened.session_id)
|
38 |
+
|
39 |
+
def on_data(self, transcript: aai.RealtimeTranscript):
|
40 |
+
if not transcript.text:
|
41 |
+
return
|
42 |
+
|
43 |
+
if isinstance(transcript, aai.RealtimeFinalTranscript):
|
44 |
+
self.generate_ai_response(transcript)
|
45 |
+
else:
|
46 |
+
print(transcript.text, end="\r")
|
47 |
+
|
48 |
+
def on_error(self, error: aai.RealtimeError):
|
49 |
+
print("An error occurred:", error)
|
50 |
+
|
51 |
+
def on_close(self):
|
52 |
+
print("Session closed.")
|
53 |
+
|
54 |
+
def generate_ai_response(self, transcript):
|
55 |
+
self.stop_transcription()
|
56 |
+
self.full_transcript.append({"role": "user", "content": transcript.text})
|
57 |
+
print(f"\nCustomer: {transcript.text}\n")
|
58 |
+
|
59 |
+
response = self.openai_client.chat.completions.create(
|
60 |
+
model="gpt-3.5-turbo",
|
61 |
+
messages=self.full_transcript
|
62 |
+
)
|
63 |
+
|
64 |
+
ai_response = response.choices[0].message.content
|
65 |
+
self.generate_audio(ai_response)
|
66 |
+
self.start_transcription()
|
67 |
+
print("\nListening for the next input...\n")
|
68 |
+
|
69 |
+
def generate_audio(self, text):
|
70 |
+
self.full_transcript.append({"role": "assistant", "content": text})
|
71 |
+
print(f"\nAI Assistant: {text}")
|
72 |
+
|
73 |
+
audio_stream = generate(
|
74 |
+
api_key=self.elevenlabs_api_key,
|
75 |
+
text=text,
|
76 |
+
voice="Rachel",
|
77 |
+
stream=True
|
78 |
+
)
|
79 |
+
stream(audio_stream)
|
80 |
+
|
81 |
+
|
82 |
+
if __name__ == "__main__":
|
83 |
+
greeting = "Welcome to Gourmet Bistro! My name is Sandy. How may I assist you today?"
|
84 |
+
ai_assistant = AI_Assistant()
|
85 |
+
ai_assistant.generate_audio(greeting)
|
86 |
+
ai_assistant.start_transcription()
|