Spaces:
Running
on
Zero
Running
on
Zero
add markdown to space
Browse files
app.py
CHANGED
@@ -422,35 +422,6 @@ def get_target_anno(target):
|
|
422 |
return pose_img, target_pose, target_cond, target_keypts
|
423 |
|
424 |
|
425 |
-
# def draw_grid(ref):
|
426 |
-
# if ref is None or ref["composite"] is None: # or len(ref["layers"])==0:
|
427 |
-
# return ref
|
428 |
-
|
429 |
-
# # if len(ref["layers"]) == 1:
|
430 |
-
# # need_draw = True
|
431 |
-
# # # elif ref["composite"].shape[0] != size_memory[0] or ref["composite"].shape[1] != size_memory[1]:
|
432 |
-
# # # need_draw = True
|
433 |
-
# # else:
|
434 |
-
# # need_draw = False
|
435 |
-
|
436 |
-
# # size_memory = ref["composite"].shape[0], ref["composite"].shape[1]
|
437 |
-
# # if not need_draw:
|
438 |
-
# # return size_memory, ref
|
439 |
-
|
440 |
-
# h, w = ref["composite"].shape[:2]
|
441 |
-
# grid_h, grid_w = h // 32, w // 32
|
442 |
-
# # grid = np.zeros((h, w, 4), dtype=np.uint8)
|
443 |
-
# for i in range(1, grid_h):
|
444 |
-
# ref["composite"][i * 32, :, :3] = 255 # 0.5 * ref["composite"][i * 32, :, :3] +
|
445 |
-
# for i in range(1, grid_w):
|
446 |
-
# ref["composite"][:, i * 32, :3] = 255 # 0.5 * ref["composite"][:, i * 32, :3] +
|
447 |
-
# # if len(ref["layers"]) == 1:
|
448 |
-
# # ref["layers"].append(grid)
|
449 |
-
# # else:
|
450 |
-
# # ref["layers"][1] = grid
|
451 |
-
# return ref["composite"]
|
452 |
-
|
453 |
-
|
454 |
def get_mask_inpaint(ref):
|
455 |
inpaint_mask = np.array(ref["layers"][0])[..., -1]
|
456 |
inpaint_mask = cv2.resize(
|
@@ -919,31 +890,6 @@ def set_visible(checkbox, kpts, img_clean, img_pose_right, img_pose_left):
|
|
919 |
)
|
920 |
|
921 |
|
922 |
-
# def parse_fix_example(ex_img, ex_masked):
|
923 |
-
# original_img = ex_img
|
924 |
-
# # ex_img = cv2.resize(ex_img, (LENGTH, LENGTH), interpolation=cv2.INTER_AREA)
|
925 |
-
# # ex_masked = cv2.resize(ex_masked, (LENGTH, LENGTH), interpolation=cv2.INTER_AREA)
|
926 |
-
# inpaint_mask = np.all(ex_masked > 250, axis=-1).astype(np.uint8)
|
927 |
-
# layer = np.ones_like(ex_img) * 255
|
928 |
-
# layer = np.concatenate([layer, np.zeros_like(ex_img[..., 0:1])], axis=-1)
|
929 |
-
# layer[inpaint_mask == 1, 3] = 255
|
930 |
-
# ref_value = {
|
931 |
-
# "composite": ex_masked,
|
932 |
-
# "background": ex_img,
|
933 |
-
# "layers": [layer],
|
934 |
-
# }
|
935 |
-
# inpaint_mask = cv2.resize(
|
936 |
-
# inpaint_mask, opts.image_size, interpolation=cv2.INTER_AREA
|
937 |
-
# )
|
938 |
-
# kp_img = visualize_ref(ref_value)
|
939 |
-
# return (
|
940 |
-
# original_img,
|
941 |
-
# gr.update(value=ref_value),
|
942 |
-
# kp_img,
|
943 |
-
# inpaint_mask,
|
944 |
-
# )
|
945 |
-
|
946 |
-
|
947 |
LENGTH = 480
|
948 |
|
949 |
example_imgs = [
|
@@ -1012,8 +958,27 @@ custom_css = """
|
|
1012 |
}
|
1013 |
"""
|
1014 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1015 |
|
1016 |
with gr.Blocks(css=custom_css) as demo:
|
|
|
1017 |
with gr.Tab("Edit Hand Poses"):
|
1018 |
ref_img = gr.State(value=None)
|
1019 |
ref_cond = gr.State(value=None)
|
@@ -1579,7 +1544,8 @@ with gr.Blocks(css=custom_css) as demo:
|
|
1579 |
examples_per_page=20,
|
1580 |
)
|
1581 |
|
1582 |
-
|
|
|
1583 |
# print("Ready to launch..")
|
1584 |
# _, _, shared_url = demo.queue().launch(
|
1585 |
# share=True, server_name="0.0.0.0", server_port=7739
|
|
|
422 |
return pose_img, target_pose, target_cond, target_keypts
|
423 |
|
424 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
425 |
def get_mask_inpaint(ref):
|
426 |
inpaint_mask = np.array(ref["layers"][0])[..., -1]
|
427 |
inpaint_mask = cv2.resize(
|
|
|
890 |
)
|
891 |
|
892 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
893 |
LENGTH = 480
|
894 |
|
895 |
example_imgs = [
|
|
|
958 |
}
|
959 |
"""
|
960 |
|
961 |
+
_HEADER_ = '''
|
962 |
+
<h2><b>Official Demo</b></h2>
|
963 |
+
<h1><b>✋FoundHand: Large-Scale Domain-Specific Learning for Controllable Hand Image Generation</b></h1>
|
964 |
+
📝<a href='https://arxiv.org/abs/2412.02690' target='_blank'>Paper</a>
|
965 |
+
📢<a href='https://ivl.cs.brown.edu/research/foundhand.html' target='_blank'>Project</a>
|
966 |
+
'''
|
967 |
+
|
968 |
+
_CITE_ = """
|
969 |
+
<h2>Citation</h2>
|
970 |
+
```
|
971 |
+
@article{chen2024foundhand,
|
972 |
+
title={FoundHand: Large-Scale Domain-Specific Learning for Controllable Hand Image Generation},
|
973 |
+
author={Chen, Kefan and Min, Chaerin and Zhang, Linguang and Hampali, Shreyas and Keskin, Cem and Sridhar, Srinath},
|
974 |
+
journal={arXiv preprint arXiv:2412.02690},
|
975 |
+
year={2024}
|
976 |
+
}
|
977 |
+
```
|
978 |
+
"""
|
979 |
|
980 |
with gr.Blocks(css=custom_css) as demo:
|
981 |
+
gr.Markdown(_HEADER_)
|
982 |
with gr.Tab("Edit Hand Poses"):
|
983 |
ref_img = gr.State(value=None)
|
984 |
ref_cond = gr.State(value=None)
|
|
|
1544 |
examples_per_page=20,
|
1545 |
)
|
1546 |
|
1547 |
+
gr.Markdown(_CITE_)
|
1548 |
+
|
1549 |
# print("Ready to launch..")
|
1550 |
# _, _, shared_url = demo.queue().launch(
|
1551 |
# share=True, server_name="0.0.0.0", server_port=7739
|