RealTimeAsyncASR / backup2.app.py
awacke1's picture
Rename app.py to backup2.app.py
172a0f1 verified
import streamlit as st
import datetime
import os
# Initialize session state for transcript history if not exists
if 'transcript_history' not in st.session_state:
st.session_state.transcript_history = ""
# Create a container for the transcript history
history_container = st.empty()
text_area = st.empty()
html = """
<!DOCTYPE html>
<html>
<head>
<title>Continuous Speech Demo</title>
<style>
body {
font-family: sans-serif;
padding: 20px;
max-width: 800px;
margin: 0 auto;
}
button {
padding: 10px 20px;
margin: 10px 5px;
font-size: 16px;
}
#status {
margin: 10px 0;
padding: 10px;
background: #e8f5e9;
border-radius: 4px;
}
#output {
white-space: pre-wrap;
padding: 15px;
background: #f5f5f5;
border-radius: 4px;
margin: 10px 0;
min-height: 100px;
max-height: 400px;
overflow-y: auto;
}
.controls {
margin: 10px 0;
}
</style>
</head>
<body>
<h1>Continuous Speech Recognition</h1>
<div class="controls">
<button id="start">Start Listening</button>
<button id="stop" disabled>Stop Listening</button>
<button id="clear">Clear Text</button>
</div>
<div id="status">Ready</div>
<div id="output"></div>
<form id="transcriptForm" style="display:none;">
<input type="hidden" id="transcriptData" name="transcript" value="">
</form>
<script>
if (!('webkitSpeechRecognition' in window)) {
alert('Speech recognition not supported');
} else {
const recognition = new webkitSpeechRecognition();
const startButton = document.getElementById('start');
const stopButton = document.getElementById('stop');
const clearButton = document.getElementById('clear');
const status = document.getElementById('status');
const output = document.getElementById('output');
const transcriptForm = document.getElementById('transcriptForm');
const transcriptData = document.getElementById('transcriptData');
let fullTranscript = '';
let lastUpdateTime = Date.now();
// Function to update Streamlit
function updateStreamlit(text) {
transcriptData.value = text;
const event = new Event('submit');
transcriptForm.dispatchEvent(event);
}
// Configure recognition
recognition.continuous = true;
recognition.interimResults = true;
startButton.onclick = () => {
try {
recognition.start();
status.textContent = 'Listening...';
startButton.disabled = true;
stopButton.disabled = false;
} catch (e) {
console.error(e);
status.textContent = 'Error: ' + e.message;
}
};
stopButton.onclick = () => {
recognition.stop();
status.textContent = 'Stopped';
startButton.disabled = false;
stopButton.disabled = true;
};
clearButton.onclick = () => {
fullTranscript = '';
output.textContent = '';
updateStreamlit('');
};
recognition.onresult = (event) => {
let interimTranscript = '';
let finalTranscript = '';
// Process results
for (let i = event.resultIndex; i < event.results.length; i++) {
const transcript = event.results[i][0].transcript;
if (event.results[i].isFinal) {
finalTranscript += transcript + '\\n';
} else {
interimTranscript += transcript;
}
}
// Update if we have final results or it's been 5 seconds
if (finalTranscript || (Date.now() - lastUpdateTime > 5000)) {
if (finalTranscript) {
fullTranscript += finalTranscript;
updateStreamlit(finalTranscript);
}
lastUpdateTime = Date.now();
}
// Display results
output.textContent = fullTranscript + (interimTranscript ? '... ' + interimTranscript : '');
// Auto-scroll to bottom
output.scrollTop = output.scrollHeight;
};
recognition.onend = () => {
if (!stopButton.disabled) {
try {
recognition.start();
console.log('Restarted recognition');
} catch (e) {
console.error('Failed to restart recognition:', e);
status.textContent = 'Error restarting: ' + e.message;
startButton.disabled = false;
stopButton.disabled = true;
}
}
};
recognition.onerror = (event) => {
console.error('Recognition error:', event.error);
status.textContent = 'Error: ' + event.error;
if (event.error === 'not-allowed' || event.error === 'service-not-allowed') {
startButton.disabled = false;
stopButton.disabled = true;
}
};
// Handle form submission
transcriptForm.onsubmit = (e) => {
e.preventDefault();
const formData = new FormData(transcriptForm);
fetch('', {
method: 'POST',
body: formData
});
};
}
</script>
</body>
</html>
"""
# Function to save transcript to file
def save_transcript(text):
if not os.path.exists('transcripts'):
os.makedirs('transcripts')
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
filename = f"transcripts/transcript_{timestamp}.md"
with open(filename, 'a', encoding='utf-8') as f:
f.write(text + '\n')
# Main app
st.title("Speech Recognition with Transcript History")
# Create custom component
st.components.v1.html(html, height=600)
# Handle form data
if st.session_state.get('form_submitted', False):
transcript = st.session_state.get('transcript', '')
if transcript:
# Update the transcript history
st.session_state.transcript_history += transcript + '\n'
# Save to file
save_transcript(transcript)
# Update the display
history_container.markdown(st.session_state.transcript_history)
text_area.text_area("Full Transcript", st.session_state.transcript_history, height=200)
# Add a download button for the full transcript
if st.session_state.transcript_history:
st.download_button(
label="Download Full Transcript",
data=st.session_state.transcript_history,
file_name=f"transcript_{datetime.datetime.now().strftime('%Y%m%d_%H%M%S')}.md",
mime="text/markdown"
)
# Reset form_submitted state
if 'form_submitted' in st.session_state:
st.session_state.form_submitted = False