awacke1's picture
Update app.py
07dd574 verified
import streamlit as st
import datetime
import os
import base64
# Initialize session state variables
if 'transcript_history' not in st.session_state:
st.session_state.transcript_history = []
# Function to create a download link for a string
def get_download_link(text, filename):
b64 = base64.b64encode(text.encode()).decode()
return f'<a href="data:text/plain;base64,{b64}" download="{filename}">Download Transcript</a>'
# Create the main layout
st.title("Speech Recognition with Transcript History")
col1, col2 = st.columns([2, 1])
with col1:
html = """
<!DOCTYPE html>
<html>
<head>
<title>Continuous Speech Demo</title>
<style>
body {
font-family: sans-serif;
padding: 20px;
max-width: 800px;
margin: 0 auto;
}
button {
padding: 10px 20px;
margin: 10px 5px;
font-size: 16px;
}
#status {
margin: 10px 0;
padding: 10px;
background: #e8f5e9;
border-radius: 4px;
}
#output {
white-space: pre-wrap;
padding: 15px;
background: #f5f5f5;
border-radius: 4px;
margin: 10px 0;
min-height: 100px;
max-height: 400px;
overflow-y: auto;
}
.controls {
margin: 10px 0;
}
</style>
</head>
<body>
<div class="controls">
<button id="start">Start Listening</button>
<button id="stop" disabled>Stop Listening</button>
<button id="clear">Clear Text</button>
</div>
<div id="status">Ready</div>
<div id="output"></div>
<script>
if (!('webkitSpeechRecognition' in window)) {
alert('Speech recognition not supported');
} else {
const recognition = new webkitSpeechRecognition();
const startButton = document.getElementById('start');
const stopButton = document.getElementById('stop');
const clearButton = document.getElementById('clear');
const status = document.getElementById('status');
const output = document.getElementById('output');
let fullTranscript = '';
let lastUpdateTime = Date.now();
// Configure recognition
recognition.continuous = true;
recognition.interimResults = true;
// Function to start recognition
const startRecognition = () => {
try {
recognition.start();
status.textContent = 'Listening...';
startButton.disabled = true;
stopButton.disabled = false;
} catch (e) {
console.error(e);
status.textContent = 'Error: ' + e.message;
}
};
// Auto-start on load
window.addEventListener('load', () => {
setTimeout(startRecognition, 1000); // Delay start by 1 second to ensure everything is loaded
});
startButton.onclick = startRecognition;
stopButton.onclick = () => {
recognition.stop();
status.textContent = 'Stopped';
startButton.disabled = false;
stopButton.disabled = true;
};
clearButton.onclick = () => {
fullTranscript = '';
output.textContent = '';
// Send clear signal to Streamlit
window.parent.postMessage({type: 'clear'}, '*');
};
recognition.onresult = (event) => {
let interimTranscript = '';
let finalTranscript = '';
for (let i = event.resultIndex; i < event.results.length; i++) {
const transcript = event.results[i][0].transcript;
if (event.results[i].isFinal) {
finalTranscript += transcript + '\\n';
} else {
interimTranscript += transcript;
}
}
if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
if (finalTranscript) {
fullTranscript += finalTranscript;
// Send to Streamlit
window.parent.postMessage({
type: 'transcript',
text: finalTranscript
}, '*');
}
lastUpdateTime = Date.now();
}
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
output.scrollTop = output.scrollHeight;
};
recognition.onend = () => {
if (!stopButton.disabled) {
try {
recognition.start();
console.log('Restarted recognition');
} catch (e) {
console.error('Failed to restart recognition:', e);
status.textContent = 'Error restarting: ' + e.message;
startButton.disabled = false;
stopButton.disabled = true;
}
}
};
recognition.onerror = (event) => {
console.error('Recognition error:', event.error);
status.textContent = 'Error: ' + event.error;
if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
startButton.disabled = false;
stopButton.disabled = true;
}
};
// Listen for messages from Streamlit
window.addEventListener('message', (event) => {
if (event.data.type === 'clear') {
fullTranscript = '';
output.textContent = '';
}
});
}
</script>
</body>
</html>
"""
# Display the HTML component
component = st.components.v1.html(html, height=400)
with col2:
# Display transcript history
st.subheader("Transcript History")
# Function to save transcript
def save_transcript(text):
if not os.path.exists('transcripts'):
os.makedirs('transcripts')
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f"transcripts/transcript_{timestamp}.md"
with open(filename, 'w', encoding='utf-8') as f:
f.write(text)
return filename
# Display transcript
if st.session_state.transcript_history:
full_transcript = "\n".join(st.session_state.transcript_history)
st.text_area("Full Transcript", value=full_transcript, height=300)
# Save transcript to file
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f"transcript_{timestamp}.md"
# Create download link
st.markdown(get_download_link(full_transcript, filename), unsafe_allow_html=True)
# Save to file system
if st.button("Save to File"):
saved_file = save_transcript(full_transcript)
st.success(f"Saved to {saved_file}")
# Handle transcript updates from JavaScript
if component:
try:
data = component
if isinstance(data, dict) and data.get('type') == 'transcript':
st.session_state.transcript_history.append(data['text'])
st.experimental_rerun()
elif isinstance(data, dict) and data.get('type') == 'clear':
st.session_state.transcript_history = []
st.experimental_rerun()
except Exception as e:
st.error(f"Error processing transcript: {e}")