Spaces:
Build error
Build error
dafajudin
commited on
Commit
•
ece5c3b
1
Parent(s):
ecbb493
edit app.py
Browse files- app.py +25 -0
- index.html +0 -85
app.py
CHANGED
@@ -1,6 +1,29 @@
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
# Load the Visual QA model
|
5 |
generator = pipeline("visual-question-answering", model="jihadzakki/blip1-medvqa")
|
6 |
|
@@ -37,6 +60,8 @@ with gr.Blocks(
|
|
37 |
secondary_hue=gr.themes.colors.red,
|
38 |
)
|
39 |
) as VisualQAApp:
|
|
|
|
|
40 |
gr.Markdown("# Visual Question Answering using BLIP Model", elem_classes="title")
|
41 |
|
42 |
with gr.Row():
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import pipeline
|
3 |
|
4 |
+
# Project description
|
5 |
+
description = """
|
6 |
+
# Kalbe Farma - Visual Question Answering (VQA) for Medical Imaging
|
7 |
+
|
8 |
+
## Overview
|
9 |
+
The project addresses the challenge of accurate and efficient medical imaging analysis in healthcare, aiming to reduce human error and workload for radiologists. The proposed solution involves developing advanced AI models for Visual Question Answering (VQA) to assist healthcare professionals in analyzing medical images quickly and accurately. These models will be integrated into a user-friendly web application, providing a practical tool for real-world healthcare settings.
|
10 |
+
|
11 |
+
## Dataset
|
12 |
+
The model is trained using the [Hugging face](https://huggingface.co/datasets/flaviagiammarino/vqa-rad/viewer).
|
13 |
+
|
14 |
+
Reference: [ScienceDirect](https://www.sciencedirect.com/science/article/abs/pii/S0933365723001252)
|
15 |
+
|
16 |
+
## Model Architecture
|
17 |
+
The model uses a Parameterized Hypercomplex Shared Encoder network (PHYSEnet).
|
18 |
+
|
19 |
+
![Model Architecture](path/to/your/image.png)
|
20 |
+
|
21 |
+
Reference: [ScienceDirect](https://www.sciencedirect.com/science/article/abs/pii/S0933365723001252)
|
22 |
+
|
23 |
+
## Demo
|
24 |
+
Please select the example below or upload 4 pairs of mammography exam results.
|
25 |
+
"""
|
26 |
+
|
27 |
# Load the Visual QA model
|
28 |
generator = pipeline("visual-question-answering", model="jihadzakki/blip1-medvqa")
|
29 |
|
|
|
60 |
secondary_hue=gr.themes.colors.red,
|
61 |
)
|
62 |
) as VisualQAApp:
|
63 |
+
gr.Markdown(description, elem_classes="description")
|
64 |
+
|
65 |
gr.Markdown("# Visual Question Answering using BLIP Model", elem_classes="title")
|
66 |
|
67 |
with gr.Row():
|
index.html
DELETED
@@ -1,85 +0,0 @@
|
|
1 |
-
<!DOCTYPE html>
|
2 |
-
<html lang="en">
|
3 |
-
<head>
|
4 |
-
<meta charset="UTF-8">
|
5 |
-
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
-
<title>VQA Kalbe Bangkit</title>
|
7 |
-
<style>
|
8 |
-
body {
|
9 |
-
font-family: Arial, sans-serif;
|
10 |
-
background: linear-gradient(to right, blue, purple);
|
11 |
-
color: white;
|
12 |
-
text-align: center;
|
13 |
-
padding: 20px;
|
14 |
-
}
|
15 |
-
.container {
|
16 |
-
max-width: 800px;
|
17 |
-
margin: 0 auto;
|
18 |
-
background: rgba(255, 255, 255, 0.1);
|
19 |
-
padding: 20px;
|
20 |
-
border-radius: 10px;
|
21 |
-
}
|
22 |
-
.container img {
|
23 |
-
max-width: 100%;
|
24 |
-
height: auto;
|
25 |
-
}
|
26 |
-
pre {
|
27 |
-
text-align: left;
|
28 |
-
background: rgba(0, 0, 0, 0.7);
|
29 |
-
padding: 10px;
|
30 |
-
border-radius: 5px;
|
31 |
-
overflow-x: auto;
|
32 |
-
}
|
33 |
-
a {
|
34 |
-
color: #00e6e6;
|
35 |
-
text-decoration: none;
|
36 |
-
}
|
37 |
-
a:hover {
|
38 |
-
text-decoration: underline;
|
39 |
-
}
|
40 |
-
</style>
|
41 |
-
</head>
|
42 |
-
<body>
|
43 |
-
<div class="container">
|
44 |
-
<h1>Kalbe Farma - Visual Question Answering (VQA) for Medical Imaging</h1>
|
45 |
-
|
46 |
-
<h2>Overview</h2>
|
47 |
-
<p>
|
48 |
-
The project addresses the challenge of accurate and efficient medical imaging analysis in healthcare, aiming to reduce human error and workload for radiologists.
|
49 |
-
The proposed solution involves developing advanced AI models for Visual Question Answering (VQA) to assist healthcare professionals in analyzing medical images
|
50 |
-
quickly and accurately. These models will be integrated into a user-friendly web application, providing a practical tool for real-world healthcare settings.
|
51 |
-
</p>
|
52 |
-
|
53 |
-
<h2>Dataset</h2>
|
54 |
-
<p>
|
55 |
-
The model is trained using the <a href="https://huggingface.co/datasets/flaviagiammarino/vqa-rad/viewer" target="_blank">Hugging face</a>.
|
56 |
-
</p>
|
57 |
-
|
58 |
-
<p>Reference: <a href="https://www.sciencedirect.com/science/article/abs/pii/S0933365723001252" target="_blank">ScienceDirect</a></p>
|
59 |
-
|
60 |
-
<h2>Model Architecture</h2>
|
61 |
-
<p>
|
62 |
-
The model uses a Parameterized Hypercomplex Shared Encoder network (PHYSEnet).
|
63 |
-
</p>
|
64 |
-
<img src="path/to/your/image.png" alt="Model Architecture">
|
65 |
-
<p>Reference: <a href="https://www.sciencedirect.com/science/article/abs/pii/S0933365723001252" target="_blank">ScienceDirect</a></p>
|
66 |
-
|
67 |
-
<h2>Demo</h2>
|
68 |
-
<p>
|
69 |
-
Please select the example below or upload 4 pairs of mammography exam results.
|
70 |
-
</p>
|
71 |
-
|
72 |
-
<h2>Usage</h2>
|
73 |
-
<pre>
|
74 |
-
<code>
|
75 |
-
cd src
|
76 |
-
|
77 |
-
Run the following command below:
|
78 |
-
python app.py
|
79 |
-
</code>
|
80 |
-
</pre>
|
81 |
-
|
82 |
-
<p>Check out the configuration reference at <a href="https://huggingface.co/docs/hub/spaces-config-reference" target="_blank">Hugging Face</a></p>
|
83 |
-
</div>
|
84 |
-
</body>
|
85 |
-
</html>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|