Spaces:
Running
Running
Commit
·
adaa21e
0
Parent(s):
Duplicate from gainforest/tree-crown-delineation
Browse files- .gitattributes +35 -0
- README.md +14 -0
- app.py +62 -0
- example1.jpg +0 -0
- example2.jpg +0 -0
- example3.jpg +0 -0
- packages.txt +1 -0
- requirements.txt +13 -0
- unet_resnet50.ckpt +3 -0
.gitattributes
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
example4.jpg filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: Tree Crown Delineation
|
3 |
+
emoji: 🌴📊💚
|
4 |
+
colorFrom: gray
|
5 |
+
colorTo: purple
|
6 |
+
sdk: gradio
|
7 |
+
sdk_version: 3.24.1
|
8 |
+
app_file: app.py
|
9 |
+
pinned: false
|
10 |
+
license: mit
|
11 |
+
duplicated_from: gainforest/tree-crown-delineation
|
12 |
+
---
|
13 |
+
|
14 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import numpy as np
|
3 |
+
import torchvision.transforms as T
|
4 |
+
from torchgeo.trainers import SemanticSegmentationTask
|
5 |
+
import gradio as gr
|
6 |
+
from PIL import Image
|
7 |
+
import cv2
|
8 |
+
|
9 |
+
def load_model(checkpoint_path):
|
10 |
+
model = SemanticSegmentationTask.load_from_checkpoint(checkpoint_path)
|
11 |
+
return model
|
12 |
+
|
13 |
+
def preprocess_image(inp):
|
14 |
+
compose = T.Compose([T.Resize((2048, 2048)), T.ToTensor()])
|
15 |
+
inp = compose(inp).unsqueeze(0)
|
16 |
+
return inp
|
17 |
+
|
18 |
+
def predict_segmentation(model, inp):
|
19 |
+
with torch.no_grad():
|
20 |
+
y_hat = torch.nn.Softmax2d()(model(inp))
|
21 |
+
return y_hat.squeeze()
|
22 |
+
|
23 |
+
def overlay_prediction(input_image, prediction_tensor, alpha=0.5, threshold=0.25):
|
24 |
+
# Convert the prediction tensor to a PIL image and resize it to match the input image size
|
25 |
+
prediction_image = T.ToPILImage()(prediction_tensor[0])
|
26 |
+
prediction_image = prediction_image.resize(input_image.size, resample=Image.NEAREST)
|
27 |
+
|
28 |
+
# Apply the cv2.COLORMAP_INFERNO colormap
|
29 |
+
prediction_image = cv2.applyColorMap(np.array(prediction_image), cv2.COLORMAP_INFERNO)
|
30 |
+
prediction_image = Image.fromarray(prediction_image).convert("RGBA")
|
31 |
+
|
32 |
+
overlay = Image.new("RGBA", prediction_image.size, (0, 0, 0, 0))
|
33 |
+
|
34 |
+
for x in range(prediction_image.width):
|
35 |
+
for y in range(prediction_image.height):
|
36 |
+
r, g, b, a = prediction_image.getpixel((x, y))
|
37 |
+
if a / 255 > threshold:
|
38 |
+
overlay.putpixel((x, y), (r, g, b, int(255 * alpha)))
|
39 |
+
|
40 |
+
combined_image = Image.alpha_composite(input_image.convert("RGBA"), overlay)
|
41 |
+
return combined_image.convert("RGB")
|
42 |
+
|
43 |
+
def predict(inp):
|
44 |
+
model = load_model("./unet_resnet50.ckpt")
|
45 |
+
# Check if a GPU is available and move the model to the GPU if possible
|
46 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
47 |
+
model.to(device)
|
48 |
+
preprocessed_image = preprocess_image(inp)
|
49 |
+
# Move the input tensor to the GPU if available
|
50 |
+
preprocessed_image = preprocessed_image.to(device)
|
51 |
+
segmentation_result = predict_segmentation(model, preprocessed_image)
|
52 |
+
# Move the output tensor back to the CPU for post-processing
|
53 |
+
segmentation_result = segmentation_result.cpu()
|
54 |
+
output_image = overlay_prediction(inp, segmentation_result)
|
55 |
+
return output_image
|
56 |
+
|
57 |
+
gr.Interface(
|
58 |
+
fn=predict,
|
59 |
+
inputs=gr.inputs.Image(type="pil"),
|
60 |
+
outputs="image",
|
61 |
+
examples=["./example1.jpg", "./example2.jpg", "./example3.jpg"]
|
62 |
+
).launch()
|
example1.jpg
ADDED
![]() |
example2.jpg
ADDED
![]() |
example3.jpg
ADDED
![]() |
packages.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
python3-opencv
|
requirements.txt
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pip install -r requirements.txt
|
2 |
+
|
3 |
+
# Requirements for are you wearing a mask
|
4 |
+
# A YOLOv5 powered face mask decector Gradio application
|
5 |
+
|
6 |
+
gradio
|
7 |
+
torch
|
8 |
+
torchvision
|
9 |
+
numpy
|
10 |
+
opencv-python
|
11 |
+
pillow
|
12 |
+
segmentation-models-pytorch
|
13 |
+
torchgeo
|
unet_resnet50.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4245f09404a6f0070222eca9ad2b1e80e92635c3f7ac69eaf2ce6826a9159e7e
|
3 |
+
size 390784569
|