Update app.py
Browse files
app.py
CHANGED
@@ -47,20 +47,8 @@ id2label = config["id2label"]
|
|
47 |
label2id = config["label2id"]
|
48 |
image_size = config["image_size"]
|
49 |
classes_names = list(label2id.keys())
|
50 |
-
'''
|
51 |
-
# import labels
|
52 |
-
classes_names = ["Acropore_branched", "Acropore_digitised", "Acropore_tabular", "Algae_assembly",
|
53 |
-
"Algae_limestone", "Algae_sodding", "Dead_coral", "Fish", "Human_object",
|
54 |
-
"Living_coral", "Millepore", "No_acropore_encrusting", "No_acropore_massive",
|
55 |
-
"No_acropore_sub_massive", "Rock", "Sand",
|
56 |
-
"Scrap", "Sea_cucumber", "Syringodium_isoetifolium",
|
57 |
-
"Thalassodendron_ciliatum", "Useless"]
|
58 |
-
|
59 |
-
classes_nb = list(np.arange(len(classes_names)))
|
60 |
-
id2label = {int(classes_nb[i]): classes_names[i] for i in range(len(classes_nb))}
|
61 |
-
label2id = {v: k for k, v in id2label.items()}
|
62 |
-
'''
|
63 |
|
|
|
64 |
def sigmoid(_outputs):
|
65 |
return 1.0 / (1.0 + np.exp(-_outputs))
|
66 |
|
@@ -85,11 +73,11 @@ def predict(input_image):
|
|
85 |
# Define style
|
86 |
title = "DinoVd'eau image classification"
|
87 |
model_link = "https://huggingface.co/" + checkpoint_name
|
88 |
-
description = f"This
|
89 |
|
90 |
gr.Interface(
|
91 |
fn=predict,
|
92 |
-
inputs=gr.Image(shape=(
|
93 |
outputs="label",
|
94 |
title=title,
|
95 |
description=description,
|
|
|
47 |
label2id = config["label2id"]
|
48 |
image_size = config["image_size"]
|
49 |
classes_names = list(label2id.keys())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
|
51 |
+
# PREDICTIONS
|
52 |
def sigmoid(_outputs):
|
53 |
return 1.0 / (1.0 + np.exp(-_outputs))
|
54 |
|
|
|
73 |
# Define style
|
74 |
title = "DinoVd'eau image classification"
|
75 |
model_link = "https://huggingface.co/" + checkpoint_name
|
76 |
+
description = f"This application showcases the capability of artificial intelligence-based systems to identify objects within underwater images. To utilize it, you can either upload your own image or select one of the provided examples for analysis. For predictions, we use this [open-source model]({model_link})"
|
77 |
|
78 |
gr.Interface(
|
79 |
fn=predict,
|
80 |
+
inputs=gr.Image(shape=(512, 512)),
|
81 |
outputs="label",
|
82 |
title=title,
|
83 |
description=description,
|