Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,65 +1,96 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
|
|
|
|
|
|
3 |
import soundfile as sf
|
4 |
-
|
5 |
-
from diffusers import StableVideoDiffusionPipeline
|
6 |
-
from transformers import MusicgenForConditionalGeneration, MusicgenProcessor
|
7 |
|
8 |
-
# Определяем устройство (GPU, если доступен)
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
|
11 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
video_pipe = StableVideoDiffusionPipeline.from_pretrained(
|
13 |
-
"stabilityai/stable-video-diffusion-img2vid",
|
14 |
-
torch_dtype=torch.float16 if device == "cuda" else torch.float32
|
15 |
).to(device)
|
16 |
|
17 |
-
# Загружаем модель для генерации музыки (MusicGen)
|
18 |
music_model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
|
19 |
music_processor = MusicgenProcessor.from_pretrained("facebook/musicgen-small")
|
20 |
|
21 |
-
#
|
22 |
-
def
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
28 |
video_path = "generated_video.mp4"
|
29 |
-
video_pipe.save_video(video_frames, video_path, fps=
|
30 |
-
|
31 |
return video_path
|
32 |
|
33 |
-
#
|
34 |
def generate_music(prompt):
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
audio_array = audio_output[0].cpu().detach().numpy().squeeze()
|
40 |
-
|
41 |
-
# Сохранение аудио в файл
|
42 |
audio_path = "generated_audio.wav"
|
43 |
-
sf.write(audio_path, audio_array, samplerate=16000)
|
44 |
-
|
45 |
return audio_path
|
46 |
|
47 |
-
#
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
|
54 |
-
|
55 |
-
|
56 |
-
|
|
|
|
|
|
|
57 |
|
58 |
-
|
59 |
-
music_output = gr.Audio(label="Сгенерированная музыка")
|
60 |
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
import subprocess
|
4 |
+
from diffusers import StableDiffusionPipeline, StableVideoDiffusionPipeline
|
5 |
+
from transformers import MusicgenForConditionalGeneration, MusicgenProcessor, pipeline
|
6 |
import soundfile as sf
|
7 |
+
from PIL import Image
|
|
|
|
|
8 |
|
|
|
9 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
10 |
|
11 |
+
# Модель перевода русского текста на английский
|
12 |
+
translator = pipeline("translation_ru_to_en", "Helsinki-NLP/opus-mt-ru-en")
|
13 |
+
|
14 |
+
# Загрузка моделей
|
15 |
+
image_pipe = StableDiffusionPipeline.from_pretrained(
|
16 |
+
"CompVis/stable-diffusion-v1-4", torch_dtype=torch.float32
|
17 |
+
).to(device)
|
18 |
+
|
19 |
video_pipe = StableVideoDiffusionPipeline.from_pretrained(
|
20 |
+
"stabilityai/stable-video-diffusion-img2vid", torch_dtype=torch.float32
|
|
|
21 |
).to(device)
|
22 |
|
|
|
23 |
music_model = MusicgenForConditionalGeneration.from_pretrained("facebook/musicgen-small")
|
24 |
music_processor = MusicgenProcessor.from_pretrained("facebook/musicgen-small")
|
25 |
|
26 |
+
# Перевод промта
|
27 |
+
def translate_prompt(prompt):
|
28 |
+
translation = translator(prompt)
|
29 |
+
return translation[0]['translation_text']
|
30 |
+
|
31 |
+
# Генерация изображения
|
32 |
+
def generate_image(prompt):
|
33 |
+
prompt_en = translate_prompt(prompt)
|
34 |
+
image = image_pipe(prompt_en, num_inference_steps=10).images[0]
|
35 |
+
image_path = "generated_image.png"
|
36 |
+
image.save(image_path)
|
37 |
+
return image_path
|
38 |
+
|
39 |
+
# Генерация видео
|
40 |
+
def generate_video(image_path):
|
41 |
+
image = Image.open(image_path)
|
42 |
+
video_frames = video_pipe(image, num_inference_steps=10, num_frames=16).frames
|
43 |
video_path = "generated_video.mp4"
|
44 |
+
video_pipe.save_video(video_frames, video_path, fps=8)
|
|
|
45 |
return video_path
|
46 |
|
47 |
+
# Генерация музыки
|
48 |
def generate_music(prompt):
|
49 |
+
prompt_en = translate_prompt(prompt)
|
50 |
+
inputs = music_processor(text=[prompt_en], return_tensors="pt")
|
51 |
+
audio_output = music_model.generate(**inputs, max_new_tokens=512)
|
52 |
+
audio_array = audio_output[0].cpu().numpy().squeeze()
|
|
|
|
|
|
|
53 |
audio_path = "generated_audio.wav"
|
54 |
+
sf.write(audio_path, audio_array, samplerate=16000)
|
|
|
55 |
return audio_path
|
56 |
|
57 |
+
# Объединение видео и музыки
|
58 |
+
def merge_video_audio(video_path, audio_path):
|
59 |
+
output_path = "final_video.mp4"
|
60 |
+
subprocess.run(
|
61 |
+
f"ffmpeg -y -i {video_path} -i {audio_path} -c:v copy -c:a aac {output_path}",
|
62 |
+
shell=True
|
63 |
+
)
|
64 |
+
return output_path
|
65 |
|
66 |
+
# Главная функция для генерации всех элементов отдельно
|
67 |
+
def create_all(prompt):
|
68 |
+
image_path = generate_image(prompt)
|
69 |
+
video_path = generate_video(image_path)
|
70 |
+
audio_path = generate_music(prompt)
|
71 |
+
final_video = merge_video_audio(video_path, audio_path)
|
72 |
|
73 |
+
return image_path, video_path, audio_path, final_video
|
|
|
74 |
|
75 |
+
# Интерфейс Gradio
|
76 |
+
with gr.Blocks() as demo:
|
77 |
+
gr.Markdown("# OnlyReels.AI 🎥🎵 - Генерация изображения, видео и музыки")
|
78 |
+
|
79 |
+
prompt_input = gr.Textbox(label="Описание (русский язык)",
|
80 |
+
placeholder="Пример: Киберпанк-город ночью, синтезаторная музыка")
|
81 |
+
btn_generate = gr.Button("✨ Сгенерировать все")
|
82 |
+
|
83 |
+
image_output = gr.Image(label="Изображение")
|
84 |
+
video_output = gr.Video(label="Видео")
|
85 |
+
audio_output = gr.Audio(label="Музыка")
|
86 |
+
final_video_output = gr.Video(label="Итоговое видео с музыкой")
|
87 |
+
|
88 |
+
btn_generate.click(
|
89 |
+
fn=create_all,
|
90 |
+
inputs=prompt_input,
|
91 |
+
outputs=[image_output, video_output, audio_output, final_video_output]
|
92 |
+
)
|
93 |
+
|
94 |
+
if __name__ == "__main__":
|
95 |
+
demo.launch()
|
96 |
+
|