SajidMajeed commited on
Commit
83bdf6a
·
verified ·
1 Parent(s): e485b49

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +50 -0
app.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import whisper
4
+ from groq import Groq
5
+ from gtts import gTTS
6
+
7
+ # Initialize Whisper model for transcription
8
+ model = whisper.load_model("base")
9
+
10
+ # Set up Groq API
11
+ GROQ_API_KEY="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"
12
+ client = Groq(api_key=GROQ_API_KEY)
13
+
14
+
15
+ # Function to query the LLM using Groq API
16
+ def get_llm_response(input_text):
17
+ chat_completion = client.chat.completions.create(
18
+ messages=[{
19
+ "role": "user",
20
+ "content": input_text,
21
+ }],
22
+ model="llama3-8b-8192",
23
+ )
24
+ return chat_completion.choices[0].message.content
25
+
26
+
27
+ # Function to convert text to speech using gTTS
28
+ def text_to_speech(text,output_audio="output_audio.mp3"):
29
+ tts = gTTS(text)
30
+ tts.save(output_audio)
31
+ return output_audio
32
+
33
+
34
+ def chatbot(audio):
35
+ result=model.transcribe(audio)
36
+ user_text=result['text']
37
+ response_text=get_llm_response(user_text)
38
+ output_audio=text_to_speech(response_text)
39
+ return response_text,output_audio
40
+
41
+
42
+ # Create Gradio interface for microphone input
43
+ iface = gr.Interface(
44
+ fn=chatbot,
45
+ inputs=gr.Audio(type="filepath"), # Capturing audio from the microphone
46
+ outputs=[gr.Textbox(),gr.Audio(type="filepath")], # Outputting audio file path
47
+ live=True
48
+ )
49
+
50
+ iface.launch()