sitammeur commited on
Commit
4949993
·
verified ·
1 Parent(s): 8773959

Deprecation issue is fixed

Browse files
Files changed (1) hide show
  1. app.py +78 -78
app.py CHANGED
@@ -1,78 +1,78 @@
1
- # Importing the requirements
2
- import numpy as np
3
- import gradio as gr
4
- import torch
5
- from PIL import Image
6
- from transformers import DPTFeatureExtractor, DPTForDepthEstimation
7
-
8
-
9
- # Load the model and feature extractor
10
- feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-beit-large-512")
11
- model = DPTForDepthEstimation.from_pretrained("Intel/dpt-beit-large-512")
12
-
13
-
14
- def process_image(image):
15
- """
16
- Preprocesses an image, passes it through a model, and returns the formatted depth map as an image.
17
-
18
- Args:
19
- image (PIL.Image.Image): The input image.
20
-
21
- Returns:
22
- PIL.Image.Image: The formatted depth map as an image.
23
- """
24
-
25
- # Preprocess the image for the model
26
- encoding = feature_extractor(image, return_tensors="pt")
27
-
28
- # Forward pass through the model
29
- with torch.no_grad():
30
- outputs = model(**encoding)
31
- predicted_depth = outputs.predicted_depth
32
-
33
- # Interpolate the predicted depth map to the original image size
34
- prediction = torch.nn.functional.interpolate(
35
- predicted_depth.unsqueeze(1),
36
- size=image.size[::-1],
37
- mode="bicubic",
38
- align_corners=False,
39
- ).squeeze()
40
- output = prediction.cpu().numpy()
41
- formatted = (output * 255 / np.max(output)).astype("uint8")
42
-
43
- # Return the formatted depth map as an image
44
- return Image.fromarray(formatted)
45
-
46
-
47
- # Image input for the interface
48
- image = gr.Image(type="pil", label="Image")
49
-
50
- # Output for the interface
51
- answer = gr.Image(type="pil", label="Depth Map")
52
-
53
- # Examples for the interface
54
- examples = [
55
- ["cat.jpg"],
56
- ["dog.jpg"],
57
- ["bird.jpg"],
58
- ]
59
-
60
- # Title, description, and article for the interface
61
- title = "Zero Shot Depth Estimation"
62
- description = "Gradio Demo for the Intel/DPT Beit-Large-512 Depth Estimation model. This model can estimate the depth of objects in images. To use it, simply upload your image and click 'submit', or click one of the examples to load them. Read more at the links below."
63
- article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2307.14460' target='_blank'>MiDaS v3.1 – A Model Zoo for Robust Monocular Relative Depth Estimation</a> | <a href='https://huggingface.co/Intel/dpt-beit-large-512' target='_blank'>Model Page</a></p>"
64
-
65
-
66
- # Launch the interface
67
- interface = gr.Interface(
68
- fn=process_image,
69
- inputs=[image],
70
- outputs=answer,
71
- examples=examples,
72
- title=title,
73
- description=description,
74
- article=article,
75
- theme="Soft",
76
- allow_flagging="never",
77
- )
78
- interface.launch(debug=False)
 
1
+ # Importing the requirements
2
+ import numpy as np
3
+ import gradio as gr
4
+ import torch
5
+ from PIL import Image
6
+ from transformers import DPTImageProcessor, DPTForDepthEstimation
7
+
8
+
9
+ # Load the model and feature extractor
10
+ feature_extractor = DPTImageProcessor.from_pretrained("Intel/dpt-beit-large-512")
11
+ model = DPTForDepthEstimation.from_pretrained("Intel/dpt-beit-large-512")
12
+
13
+
14
+ def process_image(image):
15
+ """
16
+ Preprocesses an image, passes it through a model, and returns the formatted depth map as an image.
17
+
18
+ Args:
19
+ image (PIL.Image.Image): The input image.
20
+
21
+ Returns:
22
+ PIL.Image.Image: The formatted depth map as an image.
23
+ """
24
+
25
+ # Preprocess the image for the model
26
+ encoding = feature_extractor(image, return_tensors="pt")
27
+
28
+ # Forward pass through the model
29
+ with torch.no_grad():
30
+ outputs = model(**encoding)
31
+ predicted_depth = outputs.predicted_depth
32
+
33
+ # Interpolate the predicted depth map to the original image size
34
+ prediction = torch.nn.functional.interpolate(
35
+ predicted_depth.unsqueeze(1),
36
+ size=image.size[::-1],
37
+ mode="bicubic",
38
+ align_corners=False,
39
+ ).squeeze()
40
+ output = prediction.cpu().numpy()
41
+ formatted = (output * 255 / np.max(output)).astype("uint8")
42
+
43
+ # Return the formatted depth map as an image
44
+ return Image.fromarray(formatted)
45
+
46
+
47
+ # Image input for the interface
48
+ image = gr.Image(type="pil", label="Image")
49
+
50
+ # Output for the interface
51
+ answer = gr.Image(type="pil", label="Depth Map")
52
+
53
+ # Examples for the interface
54
+ examples = [
55
+ ["cat.jpg"],
56
+ ["dog.jpg"],
57
+ ["bird.jpg"],
58
+ ]
59
+
60
+ # Title, description, and article for the interface
61
+ title = "Zero Shot Depth Estimation"
62
+ description = "Gradio Demo for the Intel/DPT Beit-Large-512 Depth Estimation model. This model can estimate the depth of objects in images. To use it, upload your photo and click 'submit', or click one of the examples to load them. Read more at the links below."
63
+ article = "<p style='text-align: center'><a href='https://arxiv.org/pdf/2307.14460' target='_blank'>MiDaS v3.1 – A Model Zoo for Robust Monocular Relative Depth Estimation</a> | <a href='https://huggingface.co/Intel/dpt-beit-large-512' target='_blank'>Model Page</a></p>"
64
+
65
+
66
+ # Launch the interface
67
+ interface = gr.Interface(
68
+ fn=process_image,
69
+ inputs=[image],
70
+ outputs=answer,
71
+ examples=examples,
72
+ title=title,
73
+ description=description,
74
+ article=article,
75
+ theme="Soft",
76
+ allow_flagging="never",
77
+ )
78
+ interface.launch(debug=False)