alicelouis commited on
Commit
ad1ff60
·
1 Parent(s): acea17b

Upload 16 files

Browse files
2ct.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
How_to_use.png ADDED
README.md CHANGED
@@ -1,13 +1 @@
1
- ---
2
- title: NSCLC Classification
3
- emoji: 😻
4
- colorFrom: purple
5
- colorTo: purple
6
- sdk: streamlit
7
- sdk_version: 1.19.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # ViTLungMiNi
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from transformers import BeitImageProcessor, BeitForImageClassification
3
+ from PIL import Image
4
+ import PIL.Image as Image
5
+ import csv
6
+
7
+ from streamlit_echarts import st_echarts
8
+ from st_on_hover_tabs import on_hover_tabs
9
+ import streamlit as st
10
+
11
+ st.set_page_config(layout="wide")
12
+
13
+ import warnings
14
+ warnings.filterwarnings('ignore')
15
+ from torchvision import transforms
16
+ from datasets import load_dataset
17
+ from pytorch_grad_cam import run_dff_on_image, GradCAM
18
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
19
+ from pytorch_grad_cam.utils.image import show_cam_on_image
20
+ import cv2
21
+ import torch
22
+ from torch import nn
23
+ from typing import List, Callable, Optional
24
+ import os
25
+ import pandas as pd
26
+ import pydicom
27
+
28
+ labels = ["adenocarcinoma","large.cell","normal","squamous.cell"]
29
+ model_name_or_path = 'alicelouis/BeiT_NSCLC_lr2e-5'
30
+ st.markdown('''
31
+ <style>
32
+ section[data-testid='stSidebar'] {
33
+ background-color: #111;
34
+ min-width: unset !important;
35
+ width: unset !important;
36
+ flex-shrink: unset !important;
37
+ }
38
+
39
+ button[kind="header"] {
40
+ background-color: transparent;
41
+ color: rgb(180, 167, 141);
42
+ }
43
+
44
+ @media (hover) {
45
+ /* header element to be removed */
46
+ header["data"-testid="stHeader"] {
47
+ display: none;
48
+ }
49
+
50
+ /* The navigation menu specs and size */
51
+ section[data-testid='stSidebar'] > div {
52
+ height: 100%;
53
+ width: 95px;
54
+ position: relative;
55
+ z-index: 1;
56
+ top: 0;
57
+ left: 0;
58
+ background-color: #111;
59
+ overflow-x: hidden;
60
+ transition: 0.5s ease;
61
+ padding-top: 60px;
62
+ white-space: nowrap;
63
+ }
64
+
65
+ /* The navigation menu open and close on hover and size */
66
+ /* section[data-testid='stSidebar'] > div {
67
+ height: 100%;
68
+ width: 75px; /* Put some width to hover on. */
69
+ /* }
70
+
71
+ /* ON HOVER */
72
+ section[data-testid='stSidebar'] > div:hover{
73
+ width: 300px;
74
+ }
75
+
76
+ /* The button on the streamlit navigation menu - hidden */
77
+ button[kind="header"] {
78
+ display: none;
79
+ }
80
+ }
81
+
82
+ @media (max-width: 272px) {
83
+ section["data"-testid='stSidebar'] > div {
84
+ width: 15rem;
85
+ }/.
86
+ }
87
+ </style>
88
+ ''', unsafe_allow_html=True)
89
+
90
+ @st.cache_resource(show_spinner=False,ttl=1800,max_entries=2)
91
+ def FeatureExtractor(model_name_or_path):
92
+ feature_extractor = BeitImageProcessor.from_pretrained(model_name_or_path)
93
+ return feature_extractor
94
+
95
+
96
+ @st.cache_resource(show_spinner=False,ttl=1800,max_entries=2)
97
+ def LoadModel(model_name_or_path):
98
+ model = BeitForImageClassification.from_pretrained(
99
+ model_name_or_path,
100
+ num_labels=len(labels),
101
+ id2label={int(i): c for i, c in enumerate(labels)},
102
+ label2id={c: int(i) for i, c in enumerate(labels)},
103
+ ignore_mismatched_sizes=True)
104
+ return model
105
+
106
+
107
+ # Model wrapper to return a tensor
108
+ class HuggingfaceToTensorModelWrapper(torch.nn.Module):
109
+ def __init__(self, model):
110
+ super(HuggingfaceToTensorModelWrapper, self).__init__()
111
+ self.model = model
112
+
113
+ def forward(self, x):
114
+ return self.model(x).logits
115
+
116
+ # """ Translate the category name to the category index.
117
+ # Some models aren't trained on Imagenet but on even larger "data"sets,
118
+ # so we can't just assume that 761 will always be remote-control.
119
+
120
+ # """
121
+ def category_name_to_index(model, category_name):
122
+ name_to_index = dict((v, k) for k, v in model.config.id2label.items())
123
+ return name_to_index[category_name]
124
+
125
+ # """ Helper function to run GradCAM on an image and create a visualization.
126
+ # (note to myself: this is probably useful enough to move into the package)
127
+ # If several targets are passed in targets_for_gradcam,
128
+ # e.g different categories,
129
+ # a visualization for each of them will be created.
130
+
131
+ # """
132
+ def print_top_categories(model, img_tensor, top_k=5):
133
+ feature_extractor = FeatureExtractor(model_name_or_path)
134
+ inputs = feature_extractor(images=img_tensor, return_tensors="pt")
135
+ outputs = model(**inputs)
136
+ logits = outputs.logits
137
+ indices = logits.cpu()[0, :].detach().numpy().argsort()[-top_k :][::-1]
138
+ probabilities = nn.functional.softmax(logits, dim=-1)
139
+ topK = dict()
140
+ for i in indices:
141
+ topK[model.config.id2label[i]] = probabilities[0][i].item()*100
142
+ return topK
143
+
144
+ def reshape_transform_vit_huggingface(x):
145
+ activations = x[:, 1:, :]
146
+
147
+ activations = activations.view(activations.shape[0],
148
+ 14, 14, activations.shape[2])
149
+
150
+ activations = activations.transpose(2, 3).transpose(1, 2)
151
+
152
+ return activations
153
+
154
+
155
+ def count_system():
156
+ count_system = []
157
+ with open('count_class.txt', 'r') as f:
158
+ for line in f:
159
+ if line.strip() == '0':
160
+ continue
161
+ else:
162
+ count_system.append(line.strip())
163
+ f.close()
164
+ if len(count_system) != 0:
165
+ return int(len(count_system))
166
+ elif len(count_system) == 0:
167
+ return int(0)
168
+
169
+
170
+ def count_class(count_classes):
171
+ a = 0
172
+ b = 0
173
+ c = 0
174
+ d = 0
175
+ for i in range(len(count_classes)):
176
+ if count_classes[i] == "Adeno":
177
+ a += 1
178
+ elif count_classes[i] == "Normal":
179
+ b += 1
180
+ elif count_classes[i] == "Large":
181
+ c += 1
182
+ elif count_classes[i] == "Squamous":
183
+ d += 1
184
+ count_classes = []
185
+ count_classes.append(str(a))
186
+ count_classes.append(str(b))
187
+ count_classes.append(str(c))
188
+ count_classes.append(str(d))
189
+ with open("count_class.txt", "w") as f:
190
+ for count in count_classes:
191
+ f.write(count + "\n")
192
+
193
+ # Define CSS styling for centering
194
+ centered_style = """
195
+ display: flex;
196
+ justify-content: center;
197
+ """
198
+
199
+ st.markdown(
200
+ """
201
+ <div style='border: 2px solid green; border-radius: 5px; padding: 10px; background-color: white;'>
202
+ <h1 style='text-align: center; color: green;'>
203
+ 🏥 Lung Cancer Classification with Vision Transformer : จำแนกมะเร็งปอด 🫁
204
+ </h1>
205
+ </div>
206
+ """, unsafe_allow_html=True)
207
+
208
+ with open("assets/css/style.css") as f:
209
+ st.markdown(f"<style> {f.read()} </style>",unsafe_allow_html=True)
210
+ with open("assets/webfonts/font.txt") as f:
211
+ st.markdown(f.read(),unsafe_allow_html=True)
212
+ # end def
213
+
214
+ with st.sidebar:
215
+ tabs = on_hover_tabs(tabName=['Home','Upload', 'Analytics', 'More Information', 'Reset'],
216
+ iconName=['home','upload', 'analytics', 'informations', 'refresh'],
217
+ styles={'navtab': {'background-color': '#111', 'color': '#818181', 'font-size': '18px',
218
+ 'transition': '.3s', 'white-space': 'nowrap', 'text-transform': 'uppercase'},
219
+ 'tabOptionsStyle':
220
+ {':hover :hover': {'color': 'red', 'cursor': 'pointer'}}, 'iconStyle':
221
+ {'position': 'fixed', 'left': '7.5px', 'text-align': 'left'}, 'tabStyle':
222
+ {'list-style-type': 'none', 'margin-bottom': '30px', 'padding-left': '30px'}},
223
+ key="1",default_choice=0)
224
+ st.markdown(
225
+ """
226
+ <div style='border: 2px solid green; padding: 10px; white; margin-top: 5px; margin-buttom: 5px; margin-right: 20px; bottom: 50;'>
227
+ <h1 style='text-align: center; color: green; font-size: 100%'> ได้รับทุนสนับสนุน 2,000 บาท </h1>
228
+ <h1 style='text-align: center; color: green; font-size: 100%'> National Software Contest ครั้งที่ 25 </h1>
229
+ <h1 style='text-align: center; color: green; font-size: 100%'> ประจำปีงบประมาณ 2566 </h1>
230
+ </div>
231
+ """, unsafe_allow_html=True)
232
+ data_base = []
233
+ if tabs == 'Home':
234
+ st.image('How_to_use.png',use_column_width=True)
235
+ elif tabs == 'Upload': #and count_system () != 1:
236
+ uploaded_file = st.file_uploader("อัปโหลดไฟล์ภาพ", type=["jpg", "jpeg", "png", "dcm"], accept_multiple_files=True)
237
+ name_of_files = []
238
+ name_of_files_new = []
239
+ for n in uploaded_file:
240
+ file_name = n.name
241
+ name_of_files.append(file_name)
242
+ with open("save_name.txt", "w") as f:
243
+ for name in name_of_files:
244
+ f.write(name + "\n")
245
+ for j in range(len(name_of_files)):
246
+ if name_of_files[j].endswith('.dcm'):
247
+ name_of_files_new.append(name_of_files[j][:-4] + '.png')
248
+ else:
249
+ name_of_files_new.append(name_of_files[j])
250
+ for i in range(len(uploaded_file)):
251
+ if name_of_files[i].endswith('.dcm'):
252
+ ds = pydicom.dcmread(uploaded_file[i])
253
+ new_image = ds.pixel_array.astype(float)
254
+ scaled_image = (np.maximum(new_image, 0) / new_image.max()) * 255.0
255
+ scaled_image = np.uint8(scaled_image)
256
+ gray_scale = Image.fromarray(scaled_image)
257
+ final_image = gray_scale.convert('RGB')
258
+ final_image.resize((200,200))
259
+ final_image.save(r'.\dcm_png\{}.png'.format(name_of_files[i]))
260
+ feature_extractor = FeatureExtractor(model_name_or_path)
261
+ model = LoadModel(model_name_or_path)
262
+ if name_of_files[i].endswith('.dcm'):
263
+ img = Image.open(r'.\dcm_png\{}.png'.format(name_of_files[i]))
264
+ else:
265
+ img = Image.open(uploaded_file[i])
266
+ img_out = img.resize((224,224))
267
+ img_out = np.array(img_out)
268
+ # โหลดโมเดลที่เซฟ
269
+ image = img.resize((224,224))
270
+ img_tensor = transforms.ToTensor()(image)
271
+ def run_grad_cam_on_image(model: torch.nn.Module,
272
+ target_layer: torch.nn.Module,
273
+ targets_for_gradcam: List[Callable],
274
+ reshape_transform: Optional[Callable],
275
+ input_tensor: torch.nn.Module=img_tensor,
276
+ input_image: Image=image,
277
+ method: Callable=GradCAM):
278
+ with method(model=HuggingfaceToTensorModelWrapper(model),
279
+ target_layers=[target_layer],
280
+ reshape_transform=reshape_transform) as cam:
281
+ # Replicate the tensor for each of the categories we want to create Grad-CAM for:
282
+ repeated_tensor = input_tensor[None, :].repeat(len(targets_for_gradcam), 1, 1, 1)
283
+
284
+ batch_results = cam(input_tensor=repeated_tensor,
285
+ targets=targets_for_gradcam)
286
+ results = []
287
+ for grayscale_cam in batch_results:
288
+ visualization = show_cam_on_image(np.float32(input_image)/255,
289
+ grayscale_cam,
290
+ use_rgb=True)
291
+ # Make it weight less in the notebook:
292
+ visualization = cv2.resize(visualization,
293
+ (visualization.shape[1]//2, visualization.shape[0]//2))
294
+ results.append(visualization)
295
+ return np.hstack(results)
296
+ inputs = feature_extractor(images=image, return_tensors="pt")
297
+ targets_for_gradcam = [ClassifierOutputTarget(category_name_to_index(model, "adenocarcinoma")),
298
+ ClassifierOutputTarget(category_name_to_index(model, "large.cell")),
299
+ ClassifierOutputTarget(category_name_to_index(model, "normal")),
300
+ ClassifierOutputTarget(category_name_to_index(model, "squamous.cell"))
301
+ ]
302
+ target_layer_dff = model.beit.layernorm
303
+ target_layer_gradcam = model.beit.encoder.layer[-2].output
304
+ image_resized = image.resize((224, 224))
305
+ tensor_resized = transforms.ToTensor()(image_resized)
306
+ outputs = model(**inputs)
307
+ logits = outputs.logits
308
+ # model predicts one of the 4 classes
309
+ predicted_class_idx = logits.argmax(-1).item()
310
+ className = labels[predicted_class_idx]
311
+ # display the images on streamlit
312
+ dff_image = Image.fromarray(run_dff_on_image(model=model,
313
+ target_layer=target_layer_dff,
314
+ classifier=model.classifier,
315
+ img_pil=image_resized,
316
+ img_tensor=tensor_resized,
317
+ reshape_transform=reshape_transform_vit_huggingface,
318
+ n_components=4,
319
+ top_k=4))
320
+ # dff_image.save(r".\save_images\dff_image.png")
321
+ # gradcam_image.save(r".\save_images\gradcam_image.png")
322
+ topK = print_top_categories(model, tensor_resized)
323
+ df = pd.DataFrame.from_dict(topK, orient='index')
324
+ list_to_be_sorted= []
325
+ for x, y in topK.items():
326
+ dic = dict()
327
+ dic["value"] = y
328
+ dic["name"] = x
329
+ list_to_be_sorted.append(dic)
330
+ data_base.append(y)
331
+ if list_to_be_sorted[0]['name'] == "adenocarcinoma":
332
+ dff_image.save(r".\Adenocarcinoma\{}".format(name_of_files_new[i]))
333
+ image_path = name_of_files_new[i]
334
+ with Image.open(r".\Adenocarcinoma\{}".format(image_path)) as image:
335
+ width, height = image.size
336
+ new_width = 2 * width // 3
337
+ cropped_image = image.crop((0, 0, new_width, height))
338
+ cropped_image.save(r".\Adenocarcinoma\{}".format(image_path))
339
+ elif list_to_be_sorted[0]['name'] == "large.cell":
340
+ dff_image.save(r".\Large cell carcinoma\{}".format(name_of_files_new[i]))
341
+ image_path = name_of_files_new[i]
342
+ with Image.open(r".\Large cell carcinoma\{}".format(image_path)) as image:
343
+ width, height = image.size
344
+ new_width = 2 * width // 3
345
+ cropped_image = image.crop((0, 0, new_width, height))
346
+ cropped_image.save(r".\Large cell carcinoma\{}".format(image_path))
347
+ #dff_image.save(r".\Large cell carcinoma\{}".format(name_of_files_new[i]))
348
+ elif list_to_be_sorted[0]['name'] == "normal":
349
+ dff_image.save(r".\Normal\{}".format(name_of_files_new[i]))
350
+ image_path = name_of_files_new[i]
351
+ with Image.open(r".\Normal\{}".format(image_path)) as image:
352
+ width, height = image.size
353
+ new_width = 2 * width // 3
354
+ cropped_image = image.crop((0, 0, new_width, height))
355
+ cropped_image.save(r".\Normal\{}".format(image_path))
356
+ #dff_image.save(r".\Normal\{}".format(name_of_files_new[i]))
357
+ elif list_to_be_sorted[0]['name'] == "squamous.cell":
358
+ dff_image.save(r".\Squamous cell carcinoma\{}".format(name_of_files_new[i]))
359
+ image_path = name_of_files_new[i]
360
+ with Image.open(r".\Squamous cell carcinoma\{}".format(image_path)) as image:
361
+ width, height = image.size
362
+ new_width = 2 * width // 3
363
+ cropped_image = image.crop((0, 0, new_width, height))
364
+ cropped_image.save(r".\Squamous cell carcinoma\{}".format(image_path))
365
+ #dff_image.save(r".\Squamous cell carcinoma\{}".format(name_of_files_new[i]))
366
+ # st.image(dff_image, use_column_width=True)
367
+ # st.image(gradcam_image, use_column_width=True)
368
+ st.balloons()
369
+
370
+ # Create a container for the two columns
371
+ container = st.container()
372
+ # Create two columns within the container
373
+ col1, col2 = container.columns(2)
374
+ col3, col4 = container.columns(2)
375
+ col5, col6 = container.columns(2)
376
+ # Add the first subheader to the first column
377
+ count_classes = [] #Adenocarcinoma, Normal, Large cell carcinoma, Squamous cell carcinoma
378
+ with col1:
379
+ st.markdown("<h2 style='text-align: center; border: 2px solid #5370c6; border-radius: 5px; padding: 15px; background-color: white; color: black;' > Adenocarcinoma </h2>".format(centered_style), unsafe_allow_html=True)
380
+ # Add the second subheader to the second column
381
+ folder_path = r".\Adenocarcinoma"
382
+ image_files = [f for f in os.listdir(folder_path) if f.endswith('.png') or f.endswith('.jpg')]
383
+ # Display the images in a loop
384
+ for i in range(0, len(image_files), 2):
385
+ col7, col8 = st.columns([1, 1])
386
+ with col7:
387
+ if i < len(image_files):
388
+ image1 = Image.open(os.path.join(folder_path, image_files[i]))
389
+ st.image(image1, use_column_width=True)
390
+ st.write(f"<p style='text-align: center; color: black; border: 2px solid white; border-radius: 10px; padding: 10px; background-color: #5370c6; font-size: 32px;'>{image_files[i]}</p>", unsafe_allow_html=True)
391
+ count_classes.append("Adeno")
392
+ with col8:
393
+ if i+1 < len(image_files):
394
+ image2 = Image.open(os.path.join(folder_path, image_files[i+1]))
395
+ st.image(image2, use_column_width=True)
396
+ st.write(f"<p style='text-align: center; color: black; border: 2px solid white; border-radius: 10px; padding: 10px; background-color: #5370c6; font-size: 32px;'>{image_files[i+1]}</p>", unsafe_allow_html=True)
397
+ count_classes.append("Adeno")
398
+ with col2:
399
+ st.markdown("<h2 style='text-align: center; border: 2px solid green; border-radius: 5px; padding: 15px; background-color: white; color: black;' > Normal </h2>".format(centered_style), unsafe_allow_html=True)
400
+ folder_path = r".\Normal"
401
+ image_files = [f for f in os.listdir(folder_path) if f.endswith('.png') or f.endswith('.jpg')]
402
+ # Display the images in a loop
403
+ for i in range(0, len(image_files), 2):
404
+ col9, col10 = st.columns([1, 1])
405
+ with col9:
406
+ if i < len(image_files):
407
+ image1 = Image.open(os.path.join(folder_path, image_files[i]))
408
+ st.image(image1, use_column_width=True)
409
+ st.write(f"<p style='text-align: center; color: black; border: 2px solid white; border-radius: 10px; padding: 10px; background-color: green; font-size: 32px;'>{image_files[i]}</p>", unsafe_allow_html=True)
410
+ count_classes.append("Normal")
411
+ with col10:
412
+ if i+1 < len(image_files):
413
+ image2 = Image.open(os.path.join(folder_path, image_files[i+1]))
414
+ st.image(image2, use_column_width=True)
415
+ st.write(f"<p style='text-align: center; color: black; border: 2px solid white; border-radius: 10px; padding: 10px; background-color: green; font-size: 32px;'>{image_files[i+1]}</p>", unsafe_allow_html=True)
416
+ count_classes.append("Normal")
417
+ with col3:
418
+ st.markdown("")
419
+ with col4:
420
+ st.markdown("")
421
+
422
+ with col5:
423
+ st.markdown("<h2 style='text-align: center; border: 2px solid orange; border-radius: 5px; padding: 15px; background-color: white; color: black;' > Large cell carcinoma </h2>".format(centered_style), unsafe_allow_html=True)
424
+ folder_path = r".\Large cell carcinoma"
425
+ image_files = [f for f in os.listdir(folder_path) if f.endswith('.png') or f.endswith('.jpg')]
426
+ # Display the images in a loop
427
+ for i in range(0, len(image_files), 2):
428
+ col11, col12 = st.columns([1, 1])
429
+ with col11:
430
+ if i < len(image_files):
431
+ image1 = Image.open(os.path.join(folder_path, image_files[i]))
432
+ st.image(image1, use_column_width=True)
433
+ st.write(f"<p style='text-align: center; color: black; border: 2px solid white; border-radius: 10px; padding: 10px; background-color: orange; font-size: 32px;'>{image_files[i]}</p>", unsafe_allow_html=True)
434
+ count_classes.append("Large")
435
+ with col12:
436
+ if i+1 < len(image_files):
437
+ image2 = Image.open(os.path.join(folder_path, image_files[i+1]))
438
+ st.image(image2, use_column_width=True)
439
+ st.write(f"<p style='text-align: center; color: black; border: 2px solid white; border-radius: 10px; padding: 10px; background-color: orange; font-size: 32px;'>{image_files[i+1]}</p>", unsafe_allow_html=True)
440
+ count_classes.append("Large")
441
+ with col6:
442
+ st.markdown("<h2 style='text-align: center; border: 2px solid #f16565; border-radius: 5px; padding: 15px; background-color: white; color: black;' > Squamous cell carcinoma </h2>".format(centered_style), unsafe_allow_html=True)
443
+ folder_path = r".\Squamous cell carcinoma"
444
+ image_files = [f for f in os.listdir(folder_path) if f.endswith('.png') or f.endswith('.jpg')]
445
+ # Display the images in a loop
446
+ for i in range(0, len(image_files), 2):
447
+ col13, col14 = st.columns([1, 1])
448
+ with col13:
449
+ if i < len(image_files):
450
+ image1 = Image.open(os.path.join(folder_path, image_files[i]))
451
+ st.image(image1, use_column_width=True)
452
+ st.write(f"<p style='text-align: center; color: black; border: 2px solid white; border-radius: 10px; padding: 10px; background-color: #f16565; font-size: 32px;'>{image_files[i]}</p>", unsafe_allow_html=True)
453
+ count_classes.append("Squamous")
454
+ with col14:
455
+ if i+1 < len(image_files):
456
+ image2 = Image.open(os.path.join(folder_path, image_files[i+1]))
457
+ st.image(image2, use_column_width=True)
458
+ st.write(f"<p style='text-align: center; color: black; border: 2px solid white; border-radius: 10px; padding: 10px; background-color: #f16565; font-size: 32px;'>{image_files[i+1]}</p>", unsafe_allow_html=True)
459
+ count_classes.append("Squamous")
460
+ count_class(count_classes)
461
+
462
+ elif tabs == 'Analytics' and count_system() > 0:
463
+ data_base = []
464
+ data_base_max = []
465
+ #max_value = max(data_base)
466
+ #max_index = data_base.index(max_value)
467
+ with open('count_class.txt', 'r') as f:
468
+ for line in f:
469
+ data_base.append(line.strip())
470
+ data_base_max.append(int(line.strip()))
471
+ max_value = max(data_base_max) # Find the maximum value in the list
472
+ max_index = data_base_max.index(max_value)
473
+ max_indices = [i for i, value in enumerate(data_base_max) if value == max_value]
474
+ if len(max_indices) > 1:
475
+ max_index = 4
476
+ option = {
477
+ "tooltip": {
478
+ "trigger": 'axis',
479
+ "axisPointer": {
480
+ # Use axis to trigger tooltip
481
+ "type": 'shadow' # 'shadow' as default; can also be 'line' or 'shadow'
482
+ }
483
+ },
484
+ "legend": {},
485
+ "grid": {
486
+ "left": '3%',
487
+ "right": '4%',
488
+ "bottom": '3%',
489
+ "containLabel": True
490
+ },
491
+ "xAxis": {
492
+ "type": 'value'
493
+ },
494
+ "yAxis": {
495
+ "type": 'category',
496
+ "data": ['Results']
497
+ },
498
+ "series": [
499
+ {
500
+ "name": 'Adenocarcinoma',
501
+ "type": 'bar',
502
+ "stack": 'total',
503
+ "label": {
504
+ "show": True
505
+ },
506
+ "emphasis": {
507
+ "focus": 'series'
508
+ },
509
+ "data": [data_base[0]]
510
+ },
511
+ {
512
+ "name": 'Normal',
513
+ "type": 'bar',
514
+ "stack": 'total',
515
+ "label": {
516
+ "show": True
517
+ },
518
+ "emphasis": {
519
+ "focus": 'series'
520
+ },
521
+ "data": [data_base[1]]
522
+ },
523
+ {
524
+ "name": 'Large.Cell',
525
+ "type": 'bar',
526
+ "stack": 'total',
527
+ "label": {
528
+ "show": True
529
+ },
530
+ "emphasis": {
531
+ "focus": 'series'
532
+ },
533
+ "data": [data_base[2]]
534
+ },
535
+ {
536
+ "name": 'Squamous.Cell',
537
+ "type": 'bar',
538
+ "stack": 'total',
539
+ "label": {
540
+ "show": True
541
+ },
542
+ "emphasis": {
543
+ "focus": 'series'
544
+ },
545
+ "data": [data_base[3]]
546
+ },
547
+ ]
548
+ }
549
+ st_echarts(options=option)
550
+ if max_index == 0:
551
+ st.markdown("<h2 style='text-align: center; border: 2px solid #5370c6; border-radius: 5px; padding: 15px; background-color: white; color: black;' > Adenocarcinoma </h2>".format(centered_style), unsafe_allow_html=True)
552
+ elif max_index == 1:
553
+ st.markdown("<h2 style='text-align: center; border: 2px solid green; border-radius: 5px; padding: 15px; background-color: white; color: black;' > Normal </h2>".format(centered_style), unsafe_allow_html=True)
554
+ elif max_index == 2:
555
+ st.markdown("<h2 style='text-align: center; border: 2px solid orange; border-radius: 5px; padding: 15px; background-color: white; color: black;' > Large cell carcinoma </h2>".format(centered_style), unsafe_allow_html=True)
556
+ elif max_index == 3:
557
+ st.markdown("<h2 style='text-align: center; border: 2px solid #f16565; border-radius: 5px; padding: 15px; background-color: white; color: black;' > Squamous cell carcinoma </h2>".format(centered_style), unsafe_allow_html=True)
558
+
559
+ elif tabs == 'Analytics' and count_system() == 0:
560
+ st.markdown(
561
+ """
562
+ <div style='border: 2px solid red; border-radius: 5px; padding: 5px; background-color: white;'>
563
+ <h3 style='text-align: center; color: red; font-size: 180%'> 🖼️ Image Analytics Not Detected ❌ </h3>
564
+ </div>
565
+ """, unsafe_allow_html=True)
566
+
567
+ elif tabs == 'More Information':
568
+ st.markdown(
569
+ """
570
+ <div style='border: 2px dashed blue; border-radius: 5px; padding: 5px; background-color: white;'>
571
+ <h3 style='text-align: center; color: black; font-size: 180%'> 💻 Organizers 🖱️ </h3>
572
+ </div>
573
+ """, unsafe_allow_html=True)
574
+ st.markdown(
575
+ """
576
+ <div style="display:flex; justify-content:center; align-items:center;">
577
+ <img src="https://drive.google.com/uc?export=view&id=1xupbYYXQZzjwMQiVGwT636oCXMga2ETF" style="width:300px; height:200px; margin: 10px;">
578
+ <img src="https://drive.google.com/uc?export=view&id=1evDy9sDtJ1T_WVR1bUnfyZkeSMjT9pfr" style="width:300px; height:200px; margin: 10px;">
579
+ <img src="https://drive.google.com/uc?export=view&id=1Sebh31aX8vdNe8P7oyBL714J_0qA5WYt" style="width:300px; height:200px; margin: 10px;">
580
+ </div>
581
+ """, unsafe_allow_html=True)
582
+ st.markdown(
583
+ """
584
+ <div style="display:flex; justify-content:center; align-items:center;">
585
+ <h3 style="width:300px; height:200px; margin: 10px; font-size: 50% text-align: center;' "> 👑 Santipab Tongchan\nCall : 090-2471512 \n "[email protected]" </h3>
586
+ <h3 style="width:300px; height:200px; margin: 10px; font-size: 50% text-align: center;' "> Phakkhaphon Artburai\nCall : 091-0197314 \n "[email protected]" </h3>
587
+ <h3 style="width:300px; height:200px; margin: 10px; font-size: 50% text-align: center;' "> Natthawee Naewkumpol\nCall : 061-9487722 \n "[email protected]" </h3>
588
+ </div>
589
+ """, unsafe_allow_html=True)
590
+ st.markdown(
591
+ """
592
+ <div style='border: 2px solid orange; border-radius: 5px; padding: 5px; background-color: white;'>
593
+ <h3 style='text-align: center; color: blue; font-size: 200%'> Princess Chulabhorn Science High School Buriram </h3>
594
+ </div>
595
+ """, unsafe_allow_html=True)
596
+
597
+ elif tabs == 'Reset':
598
+ def clear_folder(folder_name):
599
+ # Check if the folder exists
600
+ if not os.path.exists(folder_name):
601
+ print(f"{folder_name} does not exist.")
602
+ return
603
+ # Get a list of all files in the folder and its subdirectories
604
+ files = []
605
+ for dirpath, dirnames, filenames in os.walk(folder_name):
606
+ for filename in filenames:
607
+ files.append(os.path.join(dirpath, filename))
608
+
609
+ # Delete all files in the list
610
+ for file in files:
611
+ os.remove(file)
612
+ clear_folder('Adenocarcinoma')
613
+ clear_folder('Large cell carcinoma')
614
+ clear_folder('Normal')
615
+ clear_folder('Squamous cell carcinoma')
616
+ clear_folder('dcm_png')
617
+ #clear data in count_class
618
+ with open('count_class.txt', 'w') as file:
619
+ file.write('')
620
+ st.markdown(
621
+ """
622
+ <div style='border: 2px solid #00FFFF; border-radius: 5px; padding: 5px; background-color: white;'>
623
+ <h3 style='text-align: center; color: blue; font-size: 180%'> 🔃 The information has been cleared. ✅ </h3>
624
+ </div>
625
+ """, unsafe_allow_html=True)
assets/css/style.css ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *{
2
+ font-family: 'Kanit', sans-serif !important;
3
+ }
4
+
5
+
6
+
7
+
8
+ .stTextArea{
9
+ height: auto;
10
+
11
+ }
12
+
13
+ div[class="css-keje6w e1tzin5v2"]{
14
+ column-gap: 100px;
15
+ }
16
+
17
+ h2{
18
+ color: #5ba56e;
19
+ }
20
+
21
+ h3{
22
+ color:#007a7a;
23
+ }
24
+
25
+ label[class="css-16huue1 effi0qh3"]{
26
+
27
+ font-size: 16px;
28
+ }
29
+
30
+ p{
31
+ color:#78701d;
32
+ font-size: 16px;
33
+ }
34
+
35
+ textarea{
36
+ color:#007a7a;
37
+ }
assets/webfonts/font.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ <link rel="preconnect" href="https://fonts.googleapis.com">
2
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
3
+ <link href="https://fonts.googleapis.com/css2?family=Kanit:wght@200&display=swap" rel="stylesheet">
className.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ adenocarcinoma
count_class.txt ADDED
File without changes
css/style.css ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ section[data-testid='stSidebar'] {
2
+ background-color: #111;
3
+ min-width:unset !important;
4
+ width: unset !important;
5
+ flex-shrink: unset !important;
6
+
7
+ }
8
+
9
+ button[kind="header"] {
10
+ background-color: transparent;
11
+ color:rgb(180, 167, 141)
12
+ }
13
+
14
+ @media(hover){
15
+ /* header element to be removed */
16
+ header[data-testid="stHeader"] {
17
+ display:none;
18
+ }
19
+
20
+ /* The navigation menu specs and size */
21
+ section[data-testid='stSidebar'] > div {
22
+ height: 100%;
23
+ width: 95px;
24
+ position: relative;
25
+ z-index: 1;
26
+ top: 0;
27
+ left: 0;
28
+ background-color: #111;
29
+ overflow-x: hidden;
30
+ transition: 0.5s ease;
31
+ padding-top: 60px;
32
+ white-space: nowrap;
33
+ }
34
+
35
+ /* The navigation menu open and close on hover and size */
36
+ /* section[data-testid='stSidebar'] > div {
37
+ height: 100%;
38
+ width: 75px; /* Put some width to hover on. */
39
+ /* }
40
+
41
+ /* ON HOVER */
42
+ section[data-testid='stSidebar'] > div:hover{
43
+ width: 300px;
44
+ }
45
+
46
+ /* The button on the streamlit navigation menu - hidden */
47
+ button[kind="header"] {
48
+ display: none;
49
+ }
50
+ }
51
+
52
+ @media(max-width: 272px){
53
+
54
+ section[data-testid='stSidebar'] > div {
55
+ width:15rem;
56
+ }
57
+ }
58
+
59
+ *{
60
+ font-family: 'Kanit', sans-serif !important;
61
+ }
62
+
63
+
64
+ .stTextArea{
65
+ height: auto;
66
+
67
+ }
68
+
69
+ div[class="css-keje6w e1tzin5v2"]{
70
+ column-gap: 100px;
71
+ }
72
+
73
+ h2{
74
+ color: #5ba56e;
75
+ }
76
+
77
+ h3{
78
+ color:#007a7a;
79
+ }
80
+
81
+ label[class="css-16huue1 effi0qh3"]{
82
+
83
+ font-size: 16px;
84
+ }
85
+
86
+ p{
87
+ color:#78701d;
88
+ font-size: 16px;
89
+ }
90
+
91
+ textarea{
92
+ color:#007a7a;
93
+ }
packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ libgl1
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ datasets
2
+ huggingface-hub
3
+ streamlit
4
+ torch
5
+ torchaudio
6
+ torchvision
7
+ transformers
8
+ grad-cam
9
+ streamlit_echarts
10
+ streamlit-on-Hover-tabs
save_images/dff_image.png ADDED
save_images/gradcam_image.png ADDED
save_images/hi.txt ADDED
File without changes
save_name.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ tra_ade_0.png
2
+ tra_ade_1.png
3
+ tra_ade_2.png
4
+ tra_ade_3.png
5
+ tra_ade_4.png
6
+ tra_ade_5.png
7
+ tra_ade_6.png
8
+ tra_ade_7.png
9
+ tra_ade_8.png
10
+ tra_ade_9.png
11
+ tra_ade_10.png
12
+ tra_ade_11.png
13
+ tra_ade_12.png
14
+ tra_ade_13.png
15
+ tra_ade_14.png
16
+ tra_ade_15.png
17
+ tra_ade_16.png
18
+ tra_ade_17.png
19
+ tra_ade_18.png
20
+ tra_ade_19.png
21
+ tra_ade_20.png
22
+ tra_ade_21.png
23
+ tra_ade_23.png
24
+ tra_ade_25.png
system.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ {'adenocarcinoma': 99.79050159454346, 'normal': 0.10952663142234087, 'large.cell': 0.05803077365271747, 'squamous.cell': 0.04194618377368897}