Podtekatel commited on
Commit
de7b3c0
β€’
1 Parent(s): 9cc5e70

Added new model

Browse files
Files changed (1) hide show
  1. app.py +28 -11
app.py CHANGED
@@ -18,24 +18,41 @@ logging.basicConfig(
18
  MODEL_IMG_SIZE = 256
19
  def load_model():
20
  REPO_ID = "Podtekatel/ARCNEGAN"
21
- FILENAME = "arcane_exp_203_ep_399.onnx"
 
22
 
23
- global model
24
- global pipeline
 
 
25
 
 
26
  model_path = cached_download(
27
- hf_hub_url(REPO_ID, FILENAME), use_auth_token=os.getenv('HF_TOKEN')
28
  )
29
- model = ONNXModel(model_path)
30
 
31
- pipeline = VSNetModelPipeline(model, StatRetinaFaceDetector(MODEL_IMG_SIZE), background_resize=1024, no_detected_resize=1024)
32
- return model
33
 
 
 
 
 
 
 
 
 
 
 
34
  load_model()
35
 
36
- def inference(img):
37
  img = np.array(img)
38
- out_img = pipeline(img)
 
 
 
 
39
  out_img = Image.fromarray(out_img)
40
  return out_img
41
 
@@ -52,11 +69,11 @@ article = "This is one of my successful experiments on style transfer. I've buil
52
  "If you want to use this app or integrate this model into yours, please contact me at email '[email protected]'."
53
 
54
  imgs_folder = 'demo'
55
- examples = [[os.path.join(imgs_folder, img_filename)] for img_filename in sorted(os.listdir(imgs_folder))]
56
 
57
  demo = gr.Interface(
58
  fn=inference,
59
- inputs=[gr.inputs.Image(type="pil")],
60
  outputs=gr.outputs.Image(type="pil"),
61
  title=title,
62
  description=description,
 
18
  MODEL_IMG_SIZE = 256
19
  def load_model():
20
  REPO_ID = "Podtekatel/ARCNEGAN"
21
+ FILENAME_OLD = "arcane_exp_203_ep_399.onnx"
22
+ FILENAME_NEW = "arcane_exp_206_ep_138.onnx"
23
 
24
+ global model_old
25
+ global model_new
26
+ global pipeline_old
27
+ global pipeline_new
28
 
29
+ # Old model
30
  model_path = cached_download(
31
+ hf_hub_url(REPO_ID, FILENAME_OLD), use_auth_token=os.getenv('HF_TOKEN')
32
  )
33
+ model_old = ONNXModel(model_path)
34
 
35
+ pipeline_old = VSNetModelPipeline(model_old, StatRetinaFaceDetector(MODEL_IMG_SIZE), background_resize=1024, no_detected_resize=1024)
 
36
 
37
+ # New model
38
+ model_path = cached_download(
39
+ hf_hub_url(REPO_ID, FILENAME_NEW), use_auth_token=os.getenv('HF_TOKEN')
40
+ )
41
+ model_new = ONNXModel(model_path)
42
+
43
+ pipeline_new = VSNetModelPipeline(model_new, StatRetinaFaceDetector(MODEL_IMG_SIZE), background_resize=1024,
44
+ no_detected_resize=1024)
45
+
46
+ return model_old, model_new
47
  load_model()
48
 
49
+ def inference(img, ver):
50
  img = np.array(img)
51
+ if ver == 'version 2':
52
+ out_img = pipeline_new(img)
53
+ else:
54
+ out_img = pipeline_old(img)
55
+
56
  out_img = Image.fromarray(out_img)
57
  return out_img
58
 
 
69
  "If you want to use this app or integrate this model into yours, please contact me at email '[email protected]'."
70
 
71
  imgs_folder = 'demo'
72
+ examples = [[os.path.join(imgs_folder, img_filename), version] for img_filename in sorted(os.listdir(imgs_folder)) for version in ['version 2']]
73
 
74
  demo = gr.Interface(
75
  fn=inference,
76
+ inputs=[gr.inputs.Image(type="pil"), gr.inputs.Radio(['version 1', 'version 2'], type="value", default='version 2', label='version')],
77
  outputs=gr.outputs.Image(type="pil"),
78
  title=title,
79
  description=description,