Spaces:
Paused
Paused
File size: 4,698 Bytes
89149df 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 81628cf b03de00 89149df b03de00 89149df dd4c7c1 89149df dd4c7c1 89149df 8735b40 81628cf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 |
'''import sys
import asyncio
from aiohttp import web, WSMsgType
import json
from json import JSONEncoder
import numpy as np
import uuid
import logging
import os
import signal
from typing import Dict, Any, List, Optional
import base64
import io
from PIL import Image
import gradio as gr
import cv2
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def SIGSEGV_signal_arises(signalNum, stack):
logger.critical(f"{signalNum} : SIGSEGV arises")
logger.critical(f"Stack trace: {stack}")
signal.signal(signal.SIGSEGV, SIGSEGV_signal_arises)
from loader import initialize_models
from engine import Engine, base64_data_uri_to_PIL_Image
from pathlib import Path
import cv2
# Global constants
DATA_ROOT = os.environ.get('DATA_ROOT', '/tmp/data')
MODELS_DIR = os.path.join(DATA_ROOT, "models")
async def setup():
live_portrait = await initialize_models()
engine = Engine(live_portrait=live_portrait)
def get_all_frames(video_path):
cap = cv2.VideoCapture(video_path)
frames = []
while True:
ret, frame = cap.read()
if not ret:
break
frames.append(frame)
cap.release()
return frames
async def return_image(image):
binary_data = Path(image).read_bytes()
res = await engine.load_image(binary_data)
id = res['u']
_, image = await engine.transform_image(id, {
"aaa": -10,
"eee": -10,
"woo": -12
})
return image
async def return_video(video):
print(video)
gr.Info("Extracting frames..")
frames = get_all_frames(video)
gr.Info("Loading frames..")
res = await engine.load_frames(frames)
id = res['u']
height, width, _ = frames[0].shape
output_file = "output_video.mp4"
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(output_file, fourcc, 24.0, (width, height))
gr.Info("Processing..")
async for image in engine.transform_video(id, {
"aaa": -10,
"eee": -10,
"woo": -12
}):
bgr_frame = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB)
video_writer.write(cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB))
video_writer.release()
return output_file
with gr.Blocks(title="Retorno de Imagem") as interface:
gr.Markdown("## 📼 Conversor de Vídeo para Imagem")
with gr.Row(): # Organiza os componentes em uma linha
video_input = gr.Video(label="Carregue seu vídeo")
image_output = gr.Video(label="Imagem Processada",)
submit_btn = gr.Button("🔁 Processar", variant="primary") # Estilo primário
submit_btn.click(
fn=return_video, # Sua função de processamento
inputs=video_input,
outputs=image_output,
)
interface.launch(share=True)
if __name__ == "__main__":
asyncio.run(setup())'''
import sys
import asyncio
from aiohttp import web, WSMsgType
import json
from json import JSONEncoder
import numpy as np
import uuid
import logging
import os
import signal
from typing import Dict, Any, List, Optional
import base64
import io
from PIL import Image
import gradio as gr
import cv2
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
def SIGSEGV_signal_arises(signalNum, stack):
logger.critical(f"{signalNum} : SIGSEGV arises")
logger.critical(f"Stack trace: {stack}")
signal.signal(signal.SIGSEGV, SIGSEGV_signal_arises)
from loader import initialize_models
from engine import Engine, base64_data_uri_to_PIL_Image
from pathlib import Path
import cv2
# Global constants
DATA_ROOT = os.environ.get('DATA_ROOT', '/tmp/data')
MODELS_DIR = os.path.join(DATA_ROOT, "models")
async def setup():
live_portrait = await initialize_models()
engine = Engine(live_portrait=live_portrait)
async def return_video(video):
gr.Info("Processing video..")
output = await engine.process_video(video, {
"aaa": -10,
"eee": -10,
"woo": -12
})
return output
interface = gr.Interface(
fn=return_video, # Your function to process video
inputs=gr.Video(label="Carregue seu vídeo"),
outputs=gr.Video(label="Imagem Processada"),
title="Retorno de Imagem",
description="📼 Conversor de Vídeo para Imagem"
)
interface.launch(share=True)
if __name__ == "__main__":
asyncio.run(setup()) |