| | import ffmpeg |
| | import numpy as np |
| | import librosa |
| | import os |
| | import time |
| | from transformers import pipeline |
| | import gradio as gr |
| |
|
| | |
| | |
| | transcriber = pipeline("automatic-speech-recognition", model="openai/whisper-small") |
| | summarizer = pipeline("summarization", model="mrm8488/bert2bert_shared-spanish-finetuned-summarization") |
| |
|
| | |
| | state = {"status": "Esperando transmisi贸n...", "transcriptions": [], "summary": ""} |
| |
|
| | |
| | def wait_for_stream(rtmp_url): |
| | state["status"] = "Esperando transmisi贸n..." |
| | print(state["status"]) |
| | while True: |
| | try: |
| | probe = ffmpeg.probe(rtmp_url, format='flv') |
| | if probe: |
| | state["status"] = "隆Transmisi贸n detectada!" |
| | print(state["status"]) |
| | break |
| | except ffmpeg.Error: |
| | time.sleep(5) |
| |
|
| | |
| | def process_rtmp(rtmp_url): |
| | audio_output = "stream_audio.wav" |
| | transcription = [] |
| |
|
| | state["status"] = "Transcribiendo en tiempo real..." |
| | print(state["status"]) |
| |
|
| | |
| | process = ( |
| | ffmpeg |
| | .input(rtmp_url, format='flv') |
| | .output(audio_output, format='wav', acodec='pcm_s16le', ac=1, ar=16000) |
| | .overwrite_output() |
| | .run_async(pipe_stdout=True, pipe_stderr=True) |
| | ) |
| |
|
| | try: |
| | while True: |
| | if os.path.exists(audio_output): |
| | audio_data, _ = librosa.load(audio_output, sr=16000) |
| | if len(audio_data) > 0: |
| | text = transcriber(np.array(audio_data))["text"] |
| | transcription.append(text) |
| | state["transcriptions"].append(text) |
| | print(f"Transcripci贸n: {text}") |
| | time.sleep(2) |
| | except KeyboardInterrupt: |
| | process.terminate() |
| |
|
| | state["status"] = "Transmisi贸n finalizada" |
| | print(state["status"]) |
| | return " ".join(transcription) |
| |
|
| | |
| | def finalize_summary(transcription): |
| | state["status"] = "Generando resumen..." |
| | print(state["status"]) |
| | summary = summarizer(transcription, max_length=100, min_length=30, do_sample=False)[0]["summary_text"] |
| | state["summary"] = summary |
| | state["status"] = "Resumen listo" |
| | print(state["status"]) |
| | return summary |
| |
|
| | |
| | def process_and_finalize(): |
| | rtmp_url = "rtmp://37.27.213.138/live/stream" |
| |
|
| | |
| | wait_for_stream(rtmp_url) |
| |
|
| | |
| | transcription = process_rtmp(rtmp_url) |
| |
|
| | |
| | summary = finalize_summary(transcription) |
| |
|
| | return summary |
| |
|
| | |
| | def display_status(): |
| | |
| | return f"Estado: {state['status']}\n\nTranscripciones:\n" + "\n".join(state["transcriptions"]) + f"\n\nResumen final:\n{state['summary']}" |
| |
|
| | demo = gr.Interface( |
| | fn=display_status, |
| | inputs=None, |
| | outputs="text", |
| | title="Estado de Transmisi贸n y Resumen", |
| | description="Muestra el estado de la transmisi贸n, transcripciones en tiempo real y el resumen generado." |
| | ) |
| |
|
| | if __name__ == "__main__": |
| | demo.launch() |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |