Spaces:
Runtime error
Runtime error
ngthanhtinqn
commited on
Commit
β’
6b44c63
1
Parent(s):
c199bab
fix text prompt
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ hence you can get better predictions by querying the image with text templates u
|
|
18 |
demo = gr.Interface(
|
19 |
query_image,
|
20 |
inputs=[gr.Image(), "text"],
|
21 |
-
outputs="image",
|
22 |
title="Zero-Shot Object Detection with OWL-ViT",
|
23 |
description=description,
|
24 |
examples=[
|
|
|
18 |
demo = gr.Interface(
|
19 |
query_image,
|
20 |
inputs=[gr.Image(), "text"],
|
21 |
+
outputs=["image", "image"],
|
22 |
title="Zero-Shot Object Detection with OWL-ViT",
|
23 |
description=description,
|
24 |
examples=[
|
demo.py
CHANGED
@@ -87,7 +87,7 @@ def query_image(img, text_prompt):
|
|
87 |
pil_img = Image.fromarray(np.uint8(img)).convert('RGB')
|
88 |
|
89 |
text_prompt = text_prompt
|
90 |
-
texts = text_prompt.split(",")
|
91 |
|
92 |
box_threshold = 0.0
|
93 |
|
@@ -115,11 +115,6 @@ def query_image(img, text_prompt):
|
|
115 |
# boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
|
116 |
|
117 |
|
118 |
-
# Print detected objects and rescaled box coordinates
|
119 |
-
# for box, score, label in zip(boxes, scores, labels):
|
120 |
-
# box = [round(i, 2) for i in box.tolist()]
|
121 |
-
# print(f"Detected {text[label]} with confidence {round(score.item(), 3)} at location {box}")
|
122 |
-
|
123 |
boxes = boxes.cpu().detach().numpy()
|
124 |
normalized_boxes = copy.deepcopy(boxes)
|
125 |
|
@@ -174,5 +169,5 @@ def query_image(img, text_prompt):
|
|
174 |
# grounded results
|
175 |
image_with_box = plot_boxes_to_image(pil_img, pred_dict)[0]
|
176 |
|
177 |
-
|
178 |
-
return owlvit_segment_image
|
|
|
87 |
pil_img = Image.fromarray(np.uint8(img)).convert('RGB')
|
88 |
|
89 |
text_prompt = text_prompt
|
90 |
+
texts = [text_prompt.split(",")]
|
91 |
|
92 |
box_threshold = 0.0
|
93 |
|
|
|
115 |
# boxes, scores, labels = results[i]["boxes"], results[i]["scores"], results[i]["labels"]
|
116 |
|
117 |
|
|
|
|
|
|
|
|
|
|
|
118 |
boxes = boxes.cpu().detach().numpy()
|
119 |
normalized_boxes = copy.deepcopy(boxes)
|
120 |
|
|
|
169 |
# grounded results
|
170 |
image_with_box = plot_boxes_to_image(pil_img, pred_dict)[0]
|
171 |
|
172 |
+
return owlvit_segment_image, image_with_box
|
173 |
+
# return owlvit_segment_image
|