Spaces:
Sleeping
Sleeping
Add log messages
Browse files- README.md +1 -1
- app.py +5 -1
- assets/room.jpg +0 -0
- assets/{test.png → test1.png} +0 -0
- assets/test2.jpg +0 -0
- client.py +2 -2
README.md
CHANGED
@@ -6,7 +6,7 @@ colorTo: gray
|
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: apache-2.0
|
9 |
-
short_description: API endpoint for Scene understanding using
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
6 |
sdk: docker
|
7 |
pinned: false
|
8 |
license: apache-2.0
|
9 |
+
short_description: API endpoint for Scene understanding using Moondream2
|
10 |
---
|
11 |
|
12 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
CHANGED
@@ -27,18 +27,22 @@ tokenizer = AutoTokenizer.from_pretrained(model_id, revision=revision)
|
|
27 |
|
28 |
@app.get("/")
|
29 |
def read_root():
|
30 |
-
data = {"
|
31 |
return JSONResponse(content=data)
|
32 |
|
33 |
@app.post("/generate-text/")
|
34 |
async def generate_text(description: str = Form(...), file: UploadFile = File(...)):
|
|
|
|
|
35 |
# Convert uploaded file to PIL image
|
36 |
image = Image.open(file.file).convert("RGB")
|
|
|
37 |
|
38 |
# Encode the image using the model
|
39 |
enc_image = model.encode_image(image)
|
40 |
|
41 |
# Answer the question using the model and tokenizer
|
42 |
generated_text = model.answer_question(enc_image, description, tokenizer)
|
|
|
43 |
|
44 |
return {"generated_text": generated_text}
|
|
|
27 |
|
28 |
@app.get("/")
|
29 |
def read_root():
|
30 |
+
data = {"Scene": "Understanding", "Status": "Success"}
|
31 |
return JSONResponse(content=data)
|
32 |
|
33 |
@app.post("/generate-text/")
|
34 |
async def generate_text(description: str = Form(...), file: UploadFile = File(...)):
|
35 |
+
print("generate_text endpoint called with description:", description)
|
36 |
+
|
37 |
# Convert uploaded file to PIL image
|
38 |
image = Image.open(file.file).convert("RGB")
|
39 |
+
print("Image uploaded and converted successfully")
|
40 |
|
41 |
# Encode the image using the model
|
42 |
enc_image = model.encode_image(image)
|
43 |
|
44 |
# Answer the question using the model and tokenizer
|
45 |
generated_text = model.answer_question(enc_image, description, tokenizer)
|
46 |
+
print("Text generated successfully")
|
47 |
|
48 |
return {"generated_text": generated_text}
|
assets/room.jpg
ADDED
![]() |
assets/{test.png → test1.png}
RENAMED
File without changes
|
assets/test2.jpg
ADDED
![]() |
client.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
import requests
|
2 |
|
3 |
-
url = "
|
4 |
description = "Describe this image highlighting the positions of the objects. Use simple English words."
|
5 |
-
file_path = "assets/
|
6 |
|
7 |
with open(file_path, "rb") as image_file:
|
8 |
files = {"file": image_file}
|
|
|
1 |
import requests
|
2 |
|
3 |
+
url = "https://lord-reso-scene-understanding.hf.space/generate-text/"
|
4 |
description = "Describe this image highlighting the positions of the objects. Use simple English words."
|
5 |
+
file_path = "assets/room.jpg"
|
6 |
|
7 |
with open(file_path, "rb") as image_file:
|
8 |
files = {"file": image_file}
|