Spaces:
Running
on
L4
Running
on
L4
Commit
Β·
9d0b2aa
1
Parent(s):
f049523
update
Browse files- hugging_face/app.py +9 -8
hugging_face/app.py
CHANGED
@@ -97,9 +97,8 @@ def get_frames_from_image(image_input, image_state):
|
|
97 |
gr.update(visible=True), gr.update(visible=True), \
|
98 |
gr.update(visible=True), gr.update(visible=True),\
|
99 |
gr.update(visible=True), gr.update(visible=True), \
|
100 |
-
gr.update(visible=True), gr.update(visible=
|
101 |
-
gr.update(visible=
|
102 |
-
gr.update(visible=True), gr.update(visible=True, value=[]), \
|
103 |
gr.update(visible=True)
|
104 |
|
105 |
# extract frames from upload video
|
@@ -166,7 +165,8 @@ def get_frames_from_video(video_input, video_state):
|
|
166 |
video_info = "Video Name: {},\nFPS: {},\nTotal Frames: {},\nImage Size:{}".format(video_state["video_name"], round(video_state["fps"], 0), len(frames), image_size)
|
167 |
model.samcontroler.sam_controler.reset_image()
|
168 |
model.samcontroler.sam_controler.set_image(video_state["origin_images"][0])
|
169 |
-
return video_state, video_info, video_state["origin_images"][0],
|
|
|
170 |
gr.update(visible=True), gr.update(visible=True), \
|
171 |
gr.update(visible=True), gr.update(visible=True),\
|
172 |
gr.update(visible=True), gr.update(visible=True), \
|
@@ -445,11 +445,12 @@ description = r"""
|
|
445 |
π₯ MatAnyone is a practical human video matting framework supporting target assignment π―.<br>
|
446 |
πͺ Try to drop your video/image, assign the target masks with a few clicks, and get the the matting results π€‘!<br>
|
447 |
|
448 |
-
*Note: Due to the online GPU memory constraints, any input with too big resolution will be resized to 1080p.<br
|
449 |
-
π If you wish to run
|
|
|
450 |
"""
|
451 |
-
article = r"""
|
452 |
-
<b>If MatAnyone is helpful, please help to π the <a href='https://github.com/pq-yang/MatAnyone' target='_blank'>Github Repo</a>. Thanks!</b>
|
453 |
|
454 |
---
|
455 |
|
|
|
97 |
gr.update(visible=True), gr.update(visible=True), \
|
98 |
gr.update(visible=True), gr.update(visible=True),\
|
99 |
gr.update(visible=True), gr.update(visible=True), \
|
100 |
+
gr.update(visible=True), gr.update(visible=False), \
|
101 |
+
gr.update(visible=False), gr.update(visible=True), \
|
|
|
102 |
gr.update(visible=True)
|
103 |
|
104 |
# extract frames from upload video
|
|
|
165 |
video_info = "Video Name: {},\nFPS: {},\nTotal Frames: {},\nImage Size:{}".format(video_state["video_name"], round(video_state["fps"], 0), len(frames), image_size)
|
166 |
model.samcontroler.sam_controler.reset_image()
|
167 |
model.samcontroler.sam_controler.set_image(video_state["origin_images"][0])
|
168 |
+
return video_state, video_info, video_state["origin_images"][0], \
|
169 |
+
gr.update(visible=True, maximum=len(frames), value=1), gr.update(visible=False, maximum=len(frames), value=len(frames)), \
|
170 |
gr.update(visible=True), gr.update(visible=True), \
|
171 |
gr.update(visible=True), gr.update(visible=True),\
|
172 |
gr.update(visible=True), gr.update(visible=True), \
|
|
|
445 |
π₯ MatAnyone is a practical human video matting framework supporting target assignment π―.<br>
|
446 |
πͺ Try to drop your video/image, assign the target masks with a few clicks, and get the the matting results π€‘!<br>
|
447 |
|
448 |
+
*Note: Due to the online GPU memory constraints, any input with too big resolution will be resized to 1080p.<br>*
|
449 |
+
π <b> If you encounter any issue (e.g., frozen video output) or wish to run on higher resolution inputs, please consider <u>duplicating this space</u> or
|
450 |
+
<u>launching the <a href='https://github.com/pq-yang/MatAnyone?tab=readme-ov-file#-interactive-demo' target='_blank'>demo</a> locally</u> following the GitHub instructions.</b>
|
451 |
"""
|
452 |
+
article = r"""<h3>
|
453 |
+
<b>If MatAnyone is helpful, please help to π the <a href='https://github.com/pq-yang/MatAnyone' target='_blank'>Github Repo</a>. Thanks!</b></h3>
|
454 |
|
455 |
---
|
456 |
|