juancopi81's picture
Change logger
82c7c76
raw
history blame
9.43 kB
import gradio as gr
from typing import Any
import torch
from transformers import pipeline
from diffusers import StableDiffusionPipeline
from TTS.api import TTS
import utils
from youtubeaudioextractor import PytubeAudioExtractor
from transcriber import Transcriber
from textprocessor import TextProcessor
from videocreator import VideoCreator
TRANSCRIBER_MODEL_NAME = "juancopi81/whisper-medium-es"
lang = "es"
device = "cuda" if torch.cuda.is_available() else "cpu"
device_dict = {"cuda": 0, "cpu": -1}
dtype = torch.float16 if device == "cuda" else torch.float32
# Detect if code is running in Colab
is_colab = utils.is_google_colab()
colab_instruction = "" if is_colab else """
<p>You can skip the queue using Colab:
<a href="">
<img data-canonical-src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg"></a></p>"""
device_print = "GPU 🔥" if torch.cuda.is_available() else "CPU 🥶"
# Initialize components
audio_extractor = PytubeAudioExtractor()
transcription_pipe = pipeline(
task="automatic-speech-recognition",
model=TRANSCRIBER_MODEL_NAME,
chunk_length_s=30,
device=device_dict[device],
)
transcription_pipe.model.config.forced_decoder_ids = transcription_pipe.tokenizer.get_decoder_prompt_ids(language=lang, task="transcribe")
audio_transcriber = Transcriber(transcription_pipe)
openai_model = "text-davinci-003"
text_processor = TextProcessor(openai_model)
image_model_id = "runwayml/stable-diffusion-v1-5"
image_pipeline = StableDiffusionPipeline.from_pretrained(image_model_id,
torch_dtype=dtype,
revision="fp16")
image_pipeline = image_pipeline.to(device)
vo_model_name = TTS.list_models()[22]
# Init TTS
tts = TTS(vo_model_name)
video_creator = VideoCreator(tts, image_pipeline)
def datapipeline(url: str) -> Any:
audio_path_file = audio_extractor.extract(url)
print(f"Audio file created at: {audio_path_file}")
transcribed_text = audio_transcriber.transcribe(audio_path_file)
print("Audio transcription ready!")
json_scenes = text_processor.get_json_scenes(transcribed_text)
print("Scenes ready")
video = video_creator.create_video(json_scenes)
print("Video at", video)
return video, video
css = """
a {
color: inherit;
text-decoration: underline;
}
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.gr-button {
color: white;
border-color: #000000;
background: #000000;
}
input[type='range'] {
accent-color: #000000;
}
.dark input[type='range'] {
accent-color: #dfdfdf;
}
.container {
max-width: 730px;
margin: auto;
padding-top: 1.5rem;
}
#gallery {
min-height: 22rem;
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
border-bottom-right-radius: .5rem !important;
border-bottom-left-radius: .5rem !important;
}
#gallery>div>.h-full {
min-height: 20rem;
}
.details:hover {
text-decoration: underline;
}
.gr-button {
white-space: nowrap;
}
.gr-button:focus {
border-color: rgb(147 197 253 / var(--tw-border-opacity));
outline: none;
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
--tw-border-opacity: 1;
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
--tw-ring-opacity: .5;
}
#advanced-btn {
font-size: .7rem !important;
line-height: 19px;
margin-top: 12px;
margin-bottom: 12px;
padding: 2px 8px;
border-radius: 14px !important;
}
#advanced-options {
margin-bottom: 20px;
}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
.acknowledgments h4{
margin: 1.25em 0 .25em 0;
font-weight: bold;
font-size: 115%;
}
#container-advanced-btns{
display: flex;
flex-wrap: wrap;
justify-content: space-between;
align-items: center;
}
.animate-spin {
animation: spin 1s linear infinite;
}
@keyframes spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
#share-btn-container {
display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem;
}
#share-btn {
all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;
}
#share-btn * {
all: unset;
}
.gr-form{
flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0;
}
#prompt-container{
gap: 0;
}
#generated_id{
min-height: 700px
}
"""
block = gr.Blocks(css=css)
with block as demo:
gr.HTML(
f"""
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
"
>
<h1 style="font-weight: 900; margin-bottom: 7px;">
YouTube to Illustraded Summary
</h1>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
Enter the URL of a YouTube video (in Spanish) and you'll recive a video with an illustraded summary.
It works for audio books, history lessons, etc. Try it out with a short video (less than 4 minutes).
</p>
<p style="margin-bottom: 10px; font-size: 94%">
Running on <b>{device_print}</b>
</p>
<p>
Some samples videos you can try:
<ul>
<li>https://www.youtube.com/watch?v=sRmmQBBln9Q (Cook recipe. Infer time: c.a. 200 seconds)</li>
<li>https://www.youtube.com/watch?v=qz4Wc48KITA (Poem by Edgar Allan Poe. Infer time: c.a. 200 seconds)</li>
<li>https://www.youtube.com/watch?v=2D8CaoIY7Lk (The history of Christmas trees. Infer time: c.a. 130 seconds)</li>
<li>https://www.youtube.com/watch?v=uhmRR-Ir7Bk (Dec. 20 news. Infer time: c.a. 230 seconds)</li>
<li>https://www.youtube.com/watch?v=CT9T7Dp63x4 (Presentation of movie Lady Chatterley's Lover. Infer time: c.a. 277 seconds)</li>
</ul>
</p>
</div>
"""
)
with gr.Group():
with gr.Box():
with gr.Row().style(mobile_collapse=False, equal_height=True):
url = gr.Textbox(
label="Enter the URL of the YouTubeVideo", show_label=False, max_lines=1
).style(
border=(True, False, True, True),
rounded=(True, False, False, True),
container=False,
)
btn = gr.Button("Run").style(
margin=False,
rounded=(False, True, True, False),
)
video_output = gr.Video()
file_output = gr.File()
btn.click(datapipeline,
inputs=[url],
outputs=[video_output, file_output])
gr.HTML(
"""
<div class="footer">
<p>This demos is part of the Whisper Sprint (Dec. 2022).</a>
</p>
</div>
"""
)
gr.Markdown('''
[![Twitter Follow](https://img.shields.io/twitter/follow/juancopi81?style=social)](https://twitter.com/juancopi81)
![visitors](https://visitor-badge.glitch.me/badge?page_id=Juancopi81.yt-illustraded-summary)
''')
if not is_colab:
demo.queue(concurrency_count=1)
demo.launch(debug=is_colab, share=is_colab)