Update README.md
Browse files
README.md
CHANGED
@@ -1,3 +1,51 @@
|
|
1 |
---
|
2 |
-
|
|
|
3 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
---
|
2 |
+
tags:
|
3 |
+
- depth_anything
|
4 |
---
|
5 |
+
|
6 |
+
# Depth Anything model, large
|
7 |
+
|
8 |
+
## Installation
|
9 |
+
|
10 |
+
First, install the Depth Anything package:
|
11 |
+
```
|
12 |
+
git clone https://github.com/LiheYoung/Depth-Anything
|
13 |
+
cd Depth-Anything
|
14 |
+
pip install -r requirements.txt
|
15 |
+
```
|
16 |
+
|
17 |
+
## Usage
|
18 |
+
|
19 |
+
Here's how to run the model:
|
20 |
+
|
21 |
+
```python
|
22 |
+
import numpy as np
|
23 |
+
from PIL import Image
|
24 |
+
import torch
|
25 |
+
|
26 |
+
from depth_anything.dpt import DepthAnything
|
27 |
+
from depth_anything.util.transform import Resize, NormalizeImage, PrepareForNet
|
28 |
+
from torchvision.transforms import Compose
|
29 |
+
|
30 |
+
model = DepthAnything.from_pretrained("LiheYoung/depth_anything_vits14")
|
31 |
+
|
32 |
+
transform = Compose([
|
33 |
+
Resize(
|
34 |
+
width=518,
|
35 |
+
height=518,
|
36 |
+
resize_target=False,
|
37 |
+
keep_aspect_ratio=True,
|
38 |
+
ensure_multiple_of=14,
|
39 |
+
resize_method='lower_bound',
|
40 |
+
image_interpolation_method=cv2.INTER_CUBIC,
|
41 |
+
),
|
42 |
+
NormalizeImage(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
43 |
+
PrepareForNet(),
|
44 |
+
])
|
45 |
+
|
46 |
+
img = Image.open(...)
|
47 |
+
image = transform({'image': np.array(image)})['image']
|
48 |
+
image = torch.from_numpy(image).unsqueeze(0)
|
49 |
+
|
50 |
+
depth = model(image)
|
51 |
+
```
|