Spaces:
Build error
Build error
File size: 3,063 Bytes
046b3c9 bb610db 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 bb610db 046b3c9 28196c0 046b3c9 de7b3c0 046b3c9 de7b3c0 046b3c9 bb610db 046b3c9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import logging
import os
import gradio as gr
import numpy as np
from PIL import Image
from huggingface_hub import hf_hub_url, cached_download
from inference.face_detector import StatRetinaFaceDetector
from inference.model_pipeline import VSNetModelPipeline
from inference.onnx_model import ONNXModel
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
MODEL_IMG_SIZE = 256
usage_count = 0
def load_model():
REPO_ID = "Podtekatel/ARCNEGAN"
FILENAME_OLD = "arcane_exp_203_ep_399.onnx"
FILENAME_NEW = "arcane_exp_206_ep_138.onnx"
global model_old
global model_new
global pipeline_old
global pipeline_new
# Old model
model_path = cached_download(
hf_hub_url(REPO_ID, FILENAME_OLD), use_auth_token=os.getenv('HF_TOKEN')
)
model_old = ONNXModel(model_path)
pipeline_old = VSNetModelPipeline(model_old, StatRetinaFaceDetector(MODEL_IMG_SIZE), background_resize=1024, no_detected_resize=1024)
# New model
model_path = cached_download(
hf_hub_url(REPO_ID, FILENAME_NEW), use_auth_token=os.getenv('HF_TOKEN')
)
model_new = ONNXModel(model_path)
pipeline_new = VSNetModelPipeline(model_new, StatRetinaFaceDetector(MODEL_IMG_SIZE), background_resize=1024,
no_detected_resize=1024)
return model_old, model_new
load_model()
def inference(img, ver):
img = np.array(img)
if ver == 'version 2':
out_img = pipeline_new(img)
else:
out_img = pipeline_old(img)
out_img = Image.fromarray(out_img)
global usage_count
usage_count += 1
logging.info(f'Usage count is {usage_count}')
return out_img
title = "ARCNStyleTransfer"
description = "Gradio Demo for Arcane Season 1 style transfer. To use it, simply upload your image, or click one of the examples to load them."
article = "This is one of my successful experiments on style transfer. I've built my own pipeline, generator model and private dataset to train this model<br>" \
"" \
"" \
"" \
"Model pipeline which used in project is improved CartoonGAN.<br>" \
"This model was trained on RTX 2080 Ti 1.5 days with batch size 7.<br>" \
"Model weights 64 MB in ONNX fp32 format, infers 25 ms on GPU and 150 ms on CPU at 256x256 resolution.<br>" \
"If you want to use this app or integrate this model into yours, please contact me at email '[email protected]'."
imgs_folder = 'demo'
examples = [[os.path.join(imgs_folder, img_filename), version] for img_filename in sorted(os.listdir(imgs_folder)) for version in ['version 2']]
demo = gr.Interface(
fn=inference,
inputs=[gr.inputs.Image(type="pil"), gr.inputs.Radio(['version 1', 'version 2'], type="value", default='version 2', label='version')],
outputs=gr.outputs.Image(type="pil"),
title=title,
description=description,
article=article,
examples=examples)
demo.queue(concurrency_count=1)
demo.launch()
|