Spaces:
Runtime error
Runtime error
File size: 2,196 Bytes
046b3c9 28196c0 046b3c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import logging
import os
import gradio as gr
import numpy as np
from PIL import Image
from huggingface_hub import hf_hub_url, cached_download
from inference.face_detector import StatRetinaFaceDetector
from inference.model_pipeline import VSNetModelPipeline
from inference.onnx_model import ONNXModel
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
MODEL_IMG_SIZE = 256
def load_model():
REPO_ID = "Podtekatel/ARCNEGAN"
FILENAME = "arcane_exp_203_ep_281.onnx"
global model
global pipeline
model_path = cached_download(
hf_hub_url(REPO_ID, FILENAME), use_auth_token=os.getenv('HF_TOKEN')
)
model = ONNXModel(model_path)
pipeline = VSNetModelPipeline(model, StatRetinaFaceDetector(MODEL_IMG_SIZE), background_resize=1024, no_detected_resize=1024)
return model
load_model()
def inference(img):
img = np.array(img)
out_img = pipeline(img)
out_img = Image.fromarray(out_img)
return out_img
title = "ARCNStyleTransfer"
description = "Gradio Demo for Arcane Season 1 style transfer. To use it, simply upload your image, or click one of the examples to load them."
article = "This is one of my successful experiments on style transfer. I've built my own pipeline, generator model and private dataset to train this model<br>" \
"" \
"" \
"" \
"Model pipeline which used in project is improved CartoonGAN.<br>" \
"This model was trained on RTX 2080 Ti 1.5 days with batch size 7.<br>" \
"Model weights 64 MB in ONNX fp32 format, infers 25 ms on GPU and 150 ms on CPU at 256x256 resolution.<br>" \
"If you want to use this app or integrate this model into yours, please contact me at email '[email protected]'."
imgs_folder = 'demo'
examples = [[os.path.join(imgs_folder, img_filename)] for img_filename in sorted(os.listdir(imgs_folder))]
demo = gr.Interface(
fn=inference,
inputs=[gr.inputs.Image(type="pil")],
outputs=gr.outputs.Image(type="pil"),
title=title,
description=description,
article=article,
examples=examples)
demo.launch()
|