Update app.py
Browse files
app.py
CHANGED
@@ -31,7 +31,15 @@ from PyPDF2 import PdfReader
|
|
31 |
from templates import bot_template, css, user_template
|
32 |
from xml.etree import ElementTree as ET
|
33 |
|
34 |
-
# Constants
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
36 |
API_KEY = os.getenv('API_KEY')
|
37 |
headers = {
|
@@ -507,11 +515,11 @@ def main():
|
|
507 |
# Feedback
|
508 |
# Step: Give User a Way to Upvote or Downvote
|
509 |
feedback = st.radio("Step 8: Give your feedback", ("π Upvote", "π Downvote"))
|
510 |
-
|
511 |
if feedback == "π Upvote":
|
512 |
st.write("You upvoted π. Thank you for your feedback!")
|
513 |
else:
|
514 |
st.write("You downvoted π. Thank you for your feedback!")
|
|
|
515 |
|
516 |
load_dotenv()
|
517 |
st.write(css, unsafe_allow_html=True)
|
@@ -534,4 +542,67 @@ with st.sidebar:
|
|
534 |
create_file(filename, raw, '', should_save)
|
535 |
|
536 |
if __name__ == "__main__":
|
537 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
from templates import bot_template, css, user_template
|
32 |
from xml.etree import ElementTree as ET
|
33 |
|
34 |
+
# Whisper Constants
|
35 |
+
file_path = 'text_output.txt'
|
36 |
+
WHISPER_API_URL = 'https://tonpixzfvq3791u9.us-east-1.aws.endpoints.huggingface.cloud'
|
37 |
+
WHISPER_headers = {
|
38 |
+
"Authorization": "Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
|
39 |
+
"Content-Type": "audio/wav"
|
40 |
+
}
|
41 |
+
|
42 |
+
# Llama Constants
|
43 |
API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
|
44 |
API_KEY = os.getenv('API_KEY')
|
45 |
headers = {
|
|
|
515 |
# Feedback
|
516 |
# Step: Give User a Way to Upvote or Downvote
|
517 |
feedback = st.radio("Step 8: Give your feedback", ("π Upvote", "π Downvote"))
|
|
|
518 |
if feedback == "π Upvote":
|
519 |
st.write("You upvoted π. Thank you for your feedback!")
|
520 |
else:
|
521 |
st.write("You downvoted π. Thank you for your feedback!")
|
522 |
+
|
523 |
|
524 |
load_dotenv()
|
525 |
st.write(css, unsafe_allow_html=True)
|
|
|
542 |
create_file(filename, raw, '', should_save)
|
543 |
|
544 |
if __name__ == "__main__":
|
545 |
+
main()
|
546 |
+
|
547 |
+
|
548 |
+
|
549 |
+
def whisper(filename):
|
550 |
+
with open(filename, "rb") as f:
|
551 |
+
data = f.read
|
552 |
+
#try:
|
553 |
+
response = requests.post(WHISPER_API_URL, headers=WHISPER_headers, data=data)
|
554 |
+
#except:
|
555 |
+
# st.write('Whisper Voice Speech to Text Model is asleep. Starting up now on T4 - please give 3 minutes then retry as KEDA scales up from zero to activate running container(s).')
|
556 |
+
return response.json()
|
557 |
+
|
558 |
+
def whisper_generate_filename(prompt, file_type):
|
559 |
+
central = pytz.timezone('US/Central')
|
560 |
+
safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
|
561 |
+
replaced_prompt = prompt.replace(" ", "_").replace("\n", "_")
|
562 |
+
safe_prompt = "".join(x for x in replaced_prompt if x.isalnum() or x == "_")[:90]
|
563 |
+
return f"{safe_date_time}_{safe_prompt}.{file_type}"
|
564 |
+
|
565 |
+
def whisper_save_and_play_audio(audio_recorder):
|
566 |
+
audio_bytes = audio_recorder()
|
567 |
+
if audio_bytes:
|
568 |
+
filename = whisper_generate_filename("Recording", "wav")
|
569 |
+
with open(filename, 'wb') as f:
|
570 |
+
f.write(audio_bytes)
|
571 |
+
st.audio(audio_bytes, format="audio/wav")
|
572 |
+
return filename
|
573 |
+
|
574 |
+
def whisper_transcribe_audio(filename):
|
575 |
+
output = whisper(filename)
|
576 |
+
return output
|
577 |
+
|
578 |
+
def whisper_save_transcription(transcription):
|
579 |
+
with open(file_path, 'a') as f:
|
580 |
+
f.write(f"{transcription}\n")
|
581 |
+
|
582 |
+
def whisper_load_previous_transcriptions():
|
583 |
+
if os.path.exists(file_path):
|
584 |
+
with open(file_path, 'r') as f:
|
585 |
+
return f.read()
|
586 |
+
return ""
|
587 |
+
|
588 |
+
def whisper_main():
|
589 |
+
st.title("Speech to Text π€π")
|
590 |
+
st.write("Record your speech and get the text. π¨οΈ")
|
591 |
+
|
592 |
+
previous_transcriptions = whisper_load_previous_transcriptions()
|
593 |
+
text_area = st.text_area("Transcriptions:", previous_transcriptions, height=400)
|
594 |
+
|
595 |
+
filename = whisper_save_and_play_audio(audio_recorder)
|
596 |
+
if filename is not None:
|
597 |
+
try:
|
598 |
+
transcription = whisper_transcribe_audio(filename)
|
599 |
+
|
600 |
+
# Update the text area with new transcription
|
601 |
+
updated_transcriptions = f"{previous_transcriptions}\n{transcription}"
|
602 |
+
st.text_area("Transcriptions:", updated_transcriptions, height=400)
|
603 |
+
|
604 |
+
# Save the new transcription to file
|
605 |
+
whisper_save_transcription(transcription)
|
606 |
+
except:
|
607 |
+
st.write('Whisperer loading..')
|
608 |
+
|