Spaces:
Runtime error
Runtime error
import torchvision.transforms as transforms | |
import random | |
import gradio as gr | |
import PIL | |
from fastai.vision.all import * | |
from huggingface_hub import from_pretrained_fastai | |
from fastai.basics import * | |
from fastai.vision import models | |
from fastai.vision.all import * | |
from fastai.metrics import * | |
from fastai.data.all import * | |
from fastai.callback import * | |
import numpy as np | |
from pathlib import Path | |
try: | |
import albumentations | |
except ImportError: | |
os.system('pip install albumentations') | |
import albumentations | |
try: | |
import toml | |
except ImportError: | |
os.system('pip install toml') | |
import toml | |
os.system('pip install -U gradio') | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
def get_y_fn (x): | |
return Path(str(x).replace("Images","Labels").replace("color","gt").replace(".jpg",".png")) | |
def transform_image(image): | |
my_transforms = transforms.Compose([transforms.ToTensor(), | |
transforms.Normalize( | |
[0.485, 0.456, 0.406], | |
[0.229, 0.224, 0.225])]) | |
image_aux = image | |
return my_transforms(image_aux).unsqueeze(0).to(device) | |
class TargetMaskConvertTransform(ItemTransform): | |
def __init__(self): | |
pass | |
def encodes(self, x): | |
img,mask = x | |
#Convert to array | |
mask = np.array(mask) | |
mask[mask==255]=1 | |
mask[mask==150]=2 | |
mask[mask==76]=3 | |
mask[mask==74]=3 | |
mask[mask==29]=4 | |
mask[mask==25]=4 | |
mask[((mask!=1)&(mask!=2)&(mask!=3)&(mask!=4))]=0 | |
# Back to PILMask | |
mask = PILMask.create(mask) | |
return img, mask | |
from albumentations import ( | |
Compose, | |
OneOf, | |
ElasticTransform, | |
GridDistortion, | |
OpticalDistortion, | |
HorizontalFlip, | |
VerticalFlip, | |
Rotate, | |
Transpose, | |
CLAHE, | |
ShiftScaleRotate | |
) | |
class SegmentationAlbumentationsTransform(ItemTransform): | |
split_idx = 0 | |
def __init__(self, aug): | |
self.aug = aug | |
def encodes(self, x): | |
img,mask = x | |
aug = self.aug(image=np.array(img), mask=np.array(mask)) | |
return PILImage.create(aug["image"]), PILMask.create(aug["mask"]) | |
# repo_id = "YOUR_USERNAME/YOUR_LEARNER_NAME" | |
repo_id = "lauragordo/model3" | |
#path_images = path/"Images" | |
#trainDLS = trainDB.dataloaders(path_images,bs=bs) | |
#learn = unet_learner(trainDLS,resnet50,metrics=[DiceMulti(), JaccardCoeff()]).to_fp16() | |
learn = from_pretrained_fastai(repo_id) | |
model=learn.model | |
model=model.cpu() | |
#labels = learner.dls.vocab | |
#classes = learner.dls.vocab[1] | |
def predict(img): | |
img = PILImage.create(img) | |
image = transforms.Resize((480,640))(img) | |
tensor = transform_image(image=image) | |
model.to(device) | |
with torch.no_grad(): | |
outputs = model(tensor) | |
outputs = torch.argmax(outputs,1) | |
mask = np.array(outputs.cpu()) | |
mask[mask==0]=255 | |
mask[mask==1]=150 | |
mask[mask==2]=76 | |
mask[mask==3]=25 | |
mask[mask==4]=0 | |
mask=np.reshape(mask,(480,640)) | |
return Image.fromarray(mask.astype('uint8')) | |
# Creamos la interfaz y la lanzamos. | |
gr.Interface(fn=predict, inputs=["image"], outputs=["image"],examples=['color_184.jpg','color_154.jpg']).launch(share=True) |