sAIvahini / 2app.py
TruthLens's picture
Rename app.py to 2app.py
58e550e verified
import streamlit as st
import requests
import io
import base64
# βœ… Set Streamlit Page Config
st.set_page_config(page_title="Sai Vahini AI Assistant", layout="centered")
# βœ… Render API URL (Ensure this matches your deployed API on Render)
RENDER_API_URL = "https://saivahini.onrender.com/process_audio"
# βœ… UI Header
st.markdown("<h1 style='text-align: center; color: #ff5733;'>Sai Vahini AI Voice Assistant πŸ•‰οΈ</h1>", unsafe_allow_html=True)
# βœ… HTML5 Audio Recorder (JavaScript + Streamlit)
audio_recorder_html = """
<script>
let mediaRecorder;
let audioChunks = [];
function startRecording() {
navigator.mediaDevices.getUserMedia({ audio: true }).then(stream => {
mediaRecorder = new MediaRecorder(stream);
mediaRecorder.start();
mediaRecorder.ondataavailable = event => {
audioChunks.push(event.data);
};
mediaRecorder.onstop = () => {
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
const reader = new FileReader();
reader.readAsDataURL(audioBlob);
reader.onloadend = () => {
const base64Audio = reader.result.split(',')[1];
fetch("/upload_audio", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify({ audio: base64Audio })
}).then(response => response.json()).then(data => {
document.getElementById("audio_url").value = data.audio_url;
});
};
};
});
}
function stopRecording() {
mediaRecorder.stop();
}
</script>
<button onclick="startRecording()">🎀 Start Recording</button>
<button onclick="stopRecording()">⏹ Stop Recording</button>
<input type="hidden" id="audio_url">
"""
# βœ… Display HTML5 Recorder
st.components.v1.html(audio_recorder_html, height=150)
# βœ… Process Button
if st.button("βœ… Process Recorded Audio"):
with st.spinner("πŸ”„ Sending audio to AI model..."):
audio_url = st.session_state.get("audio_url", None)
if audio_url:
# Convert Base64 audio to WAV format
audio_data = base64.b64decode(audio_url)
audio_bytes = io.BytesIO(audio_data)
# βœ… Send recorded audio to Render API
response = requests.post(RENDER_API_URL, files={"file": ("audio.wav", audio_bytes, "audio/wav")})
# βœ… Handle API response
if response.status_code == 200:
result = response.json()
st.success("βœ… AI Response:")
st.write("πŸ“ **Transcription:**", result.get("transcription", "No transcription"))
st.write("πŸ€– **Answer:**", result.get("response", "No response found."))
# βœ… Fetch and play AI-generated voice response
audio_response_url = result.get("audio")
if audio_response_url:
st.write("πŸ”Š **AI-generated voice response:**")
audio_response = requests.get(audio_response_url)
if audio_response.status_code == 200:
st.audio(audio_response.content, format="audio/wav")
else:
st.error(f"❌ Failed to load AI audio ({audio_response.status_code})")
else:
st.warning("⚠️ No audio response received from API.")
else:
st.error(f"❌ API Error: {response.status_code} - {response.text}")
else:
st.error("⚠️ No audio recorded. Please record first!")