Update README.md
Browse files
README.md
CHANGED
@@ -1,29 +1,81 @@
|
|
1 |
---
|
2 |
-
base_model:
|
|
|
3 |
library_name: transformers
|
4 |
model_name: HazardNet-unsloth-v0.4
|
5 |
tags:
|
6 |
-
- generated_from_trainer
|
7 |
-
- unsloth
|
8 |
- trl
|
9 |
- sft
|
10 |
licence: license
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
---
|
12 |
|
13 |
# Model Card for HazardNet-unsloth-v0.4
|
14 |
|
15 |
-
This model is a fine-tuned version of [
|
16 |
It has been trained using [TRL](https://github.com/huggingface/trl).
|
17 |
|
18 |
## Quick start
|
19 |
|
20 |
```python
|
21 |
from transformers import pipeline
|
|
|
|
|
|
|
22 |
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
```
|
28 |
|
29 |
## Training procedure
|
|
|
1 |
---
|
2 |
+
base_model:
|
3 |
+
- Qwen/Qwen2-VL-2B-Instruct
|
4 |
library_name: transformers
|
5 |
model_name: HazardNet-unsloth-v0.4
|
6 |
tags:
|
|
|
|
|
7 |
- trl
|
8 |
- sft
|
9 |
licence: license
|
10 |
+
license: apache-2.0
|
11 |
+
datasets:
|
12 |
+
- Tami3/HazardQA
|
13 |
+
language:
|
14 |
+
- en
|
15 |
+
pipeline_tag: visual-question-answering
|
16 |
---
|
17 |
|
18 |
# Model Card for HazardNet-unsloth-v0.4
|
19 |
|
20 |
+
This model is a fine-tuned version of [Qwen/Qwen2-VL-2B-Instruct](https://huggingface.co/Qwen/Qwen2-VL-2B-Instruct).
|
21 |
It has been trained using [TRL](https://github.com/huggingface/trl).
|
22 |
|
23 |
## Quick start
|
24 |
|
25 |
```python
|
26 |
from transformers import pipeline
|
27 |
+
from PIL import Image
|
28 |
+
import requests
|
29 |
+
from io import BytesIO
|
30 |
|
31 |
+
# Initialize the Visual Question Answering pipeline with HazardNet
|
32 |
+
hazard_vqa = pipeline(
|
33 |
+
"visual-question-answering",
|
34 |
+
model="Tami3/HazardNet"
|
35 |
+
)
|
36 |
+
|
37 |
+
# Function to load image from a local path or URL
|
38 |
+
def load_image(image_path=None, image_url=None):
|
39 |
+
if image_path:
|
40 |
+
return Image.open(image_path).convert("RGB")
|
41 |
+
elif image_url:
|
42 |
+
response = requests.get(image_url)
|
43 |
+
response.raise_for_status() # Ensure the request was successful
|
44 |
+
return Image.open(BytesIO(response.content)).convert("RGB")
|
45 |
+
else:
|
46 |
+
raise ValueError("Provide either image_path or image_url.")
|
47 |
+
|
48 |
+
# Example 1: Loading image from a local file
|
49 |
+
try:
|
50 |
+
image_path = "path_to_your_ego_car_image.jpg" # Replace with your local image path
|
51 |
+
image = load_image(image_path=image_path)
|
52 |
+
except Exception as e:
|
53 |
+
print(f"Error loading image from path: {e}")
|
54 |
+
# Optionally, handle the error or exit
|
55 |
+
|
56 |
+
# Example 2: Loading image from a URL
|
57 |
+
# try:
|
58 |
+
# image_url = "https://example.com/path_to_image.jpg" # Replace with your image URL
|
59 |
+
# image = load_image(image_url=image_url)
|
60 |
+
# except Exception as e:
|
61 |
+
# print(f"Error loading image from URL: {e}")
|
62 |
+
# # Optionally, handle the error or exit
|
63 |
+
|
64 |
+
# Define your question about potential hazards
|
65 |
+
question = "Is there a pedestrian crossing the road ahead?"
|
66 |
+
|
67 |
+
# Get the answer from the HazardNet pipeline
|
68 |
+
try:
|
69 |
+
result = hazard_vqa(question=question, image=image)
|
70 |
+
answer = result.get('answer', 'No answer provided.')
|
71 |
+
score = result.get('score', 0.0)
|
72 |
+
|
73 |
+
print("Question:", question)
|
74 |
+
print("Answer:", answer)
|
75 |
+
print("Confidence Score:", score)
|
76 |
+
except Exception as e:
|
77 |
+
print(f"Error during inference: {e}")
|
78 |
+
# Optionally, handle the error or exit
|
79 |
```
|
80 |
|
81 |
## Training procedure
|