Spaces:
Runtime error
Runtime error
PedroMartelleto
commited on
Commit
•
35677f0
1
Parent(s):
c396e65
Deploying to HF
Browse files
app.py
CHANGED
@@ -62,7 +62,7 @@ class Explainer:
|
|
62 |
["all", "absolute_value"],
|
63 |
cmap=self.default_cmap,
|
64 |
show_colorbar=True)
|
65 |
-
fig.suptitle(self.fig_title, fontsize=12)
|
66 |
return self.convert_fig_to_pil(fig)
|
67 |
|
68 |
def occlusion(self):
|
@@ -82,7 +82,7 @@ class Explainer:
|
|
82 |
titles=["Original", "Positive Attribution", "Negative Attribution", "Masked"],
|
83 |
fig_size=(18, 6)
|
84 |
)
|
85 |
-
fig.suptitle(self.fig_title, fontsize=12)
|
86 |
return self.convert_fig_to_pil(fig)
|
87 |
|
88 |
def gradcam(self):
|
@@ -101,6 +101,7 @@ class Explainer:
|
|
101 |
show_colorbar=True,
|
102 |
titles=["Original", "Positive Attribution", "Masked"],
|
103 |
fig_size=(18, 6))
|
|
|
104 |
return self.convert_fig_to_pil(fig)
|
105 |
|
106 |
def integrated_gradients(self):
|
@@ -114,7 +115,7 @@ class Explainer:
|
|
114 |
show_colorbar=True,
|
115 |
titles=["Original", "Attribution", "Masked"],
|
116 |
fig_size=(18, 6))
|
117 |
-
fig.suptitle(self.fig_title, fontsize=12)
|
118 |
return self.convert_fig_to_pil(fig)
|
119 |
|
120 |
def create_model_from_checkpoint():
|
@@ -130,10 +131,10 @@ labels = [ "benign", "malignant", "normal" ]
|
|
130 |
|
131 |
def predict(img):
|
132 |
explainer = Explainer(model, img, labels)
|
133 |
-
return [explainer.confidences, explainer.shap(), explainer.occlusion(), explainer.gradcam()
|
134 |
|
135 |
ui = gr.Interface(fn=predict,
|
136 |
inputs=gr.Image(type="pil"),
|
137 |
-
outputs=[gr.Label(num_top_classes=3), gr.Image(type="pil"), gr.Image(type="pil"), gr.Image(type="pil")
|
138 |
examples=["benign (52).png", "benign (243).png", "malignant (127).png", "malignant (201).png", "normal (81).png", "normal (101).png"]).launch()
|
139 |
ui.launch(share=True)
|
|
|
62 |
["all", "absolute_value"],
|
63 |
cmap=self.default_cmap,
|
64 |
show_colorbar=True)
|
65 |
+
fig.suptitle("SHAP | " + self.fig_title, fontsize=12)
|
66 |
return self.convert_fig_to_pil(fig)
|
67 |
|
68 |
def occlusion(self):
|
|
|
82 |
titles=["Original", "Positive Attribution", "Negative Attribution", "Masked"],
|
83 |
fig_size=(18, 6)
|
84 |
)
|
85 |
+
fig.suptitle("Occlusion | " + self.fig_title, fontsize=12)
|
86 |
return self.convert_fig_to_pil(fig)
|
87 |
|
88 |
def gradcam(self):
|
|
|
101 |
show_colorbar=True,
|
102 |
titles=["Original", "Positive Attribution", "Masked"],
|
103 |
fig_size=(18, 6))
|
104 |
+
fig.suptitle("GradCAM layer3[1].conv2 | " + self.fig_title, fontsize=12)
|
105 |
return self.convert_fig_to_pil(fig)
|
106 |
|
107 |
def integrated_gradients(self):
|
|
|
115 |
show_colorbar=True,
|
116 |
titles=["Original", "Attribution", "Masked"],
|
117 |
fig_size=(18, 6))
|
118 |
+
fig.suptitle("Integrated gradients | " + self.fig_title, fontsize=12)
|
119 |
return self.convert_fig_to_pil(fig)
|
120 |
|
121 |
def create_model_from_checkpoint():
|
|
|
131 |
|
132 |
def predict(img):
|
133 |
explainer = Explainer(model, img, labels)
|
134 |
+
return [explainer.confidences, explainer.shap(), explainer.occlusion(), explainer.gradcam()]
|
135 |
|
136 |
ui = gr.Interface(fn=predict,
|
137 |
inputs=gr.Image(type="pil"),
|
138 |
+
outputs=[gr.Label(num_top_classes=3), gr.Image(type="pil"), gr.Image(type="pil"), gr.Image(type="pil")],
|
139 |
examples=["benign (52).png", "benign (243).png", "malignant (127).png", "malignant (201).png", "normal (81).png", "normal (101).png"]).launch()
|
140 |
ui.launch(share=True)
|