Kalbe-x-Bangkit commited on
Commit
4d091d2
·
verified ·
1 Parent(s): 8b1ee3f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -146
app.py CHANGED
@@ -21,7 +21,7 @@ from keras.layers import Dense, GlobalAveragePooling2D
21
  from keras.models import Model
22
  from pydicom.pixel_data_handlers.util import apply_voi_lut
23
 
24
- # Environment Configuration
25
  os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "./da-kalbe-63ee33c9cdbb.json"
26
  bucket_name = "da-kalbe-ml-result-png"
27
  storage_client = storage.Client()
@@ -29,20 +29,20 @@ bucket_result = storage_client.bucket(bucket_name)
29
  bucket_name_load = "da-ml-models"
30
  bucket_load = storage_client.bucket(bucket_name_load)
31
 
 
 
 
32
  st.sidebar.title("Configuration")
33
  uploaded_file = st.sidebar.file_uploader("Upload Original Image", type=["png", "jpg", "jpeg", "dcm"])
34
  enhancement_type = st.sidebar.selectbox(
35
- "Enhancement Type",
36
  ["Invert", "High Pass Filter", "Unsharp Masking", "Histogram Equalization", "CLAHE"]
37
  )
38
 
39
  st.sidebar.title("Detection")
40
  uploaded_detection = st.sidebar.file_uploader("Upload image to detect", type=["png", "jpg", "jpeg", "dcm"])
41
- # enhancement_type = st.sidebar.selectbox(
42
- # "Enhancement Type",
43
- # ["Invert", "High Pass Filter", "Unsharp Masking", "Histogram Equalization", "CLAHE"]
44
- # )
45
 
 
46
  H_detection = 224
47
  W_detection = 224
48
 
@@ -84,21 +84,8 @@ def draw_bbox(image, bbox):
84
  image = cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
85
  return image
86
 
 
87
 
88
- st.title("AI INTEGRATION FOR CHEST X-RAY")
89
-
90
- st.header("IMAGE ENHANCEMENT")
91
-
92
- @st.cache_resource
93
- def load_gradcam_model():
94
- model = keras.models.load_model('./model_renamed.h5', compile=False)
95
- return model
96
-
97
-
98
- # Utility Functions
99
-
100
- # Dictionaries to track InstanceNumbers and StudyInstanceUIDs per filename
101
- # Initialize session state for instance numbers and study UIDs
102
  if 'instance_numbers' not in st.session_state:
103
  st.session_state.instance_numbers = {}
104
  if 'study_uids' not in st.session_state:
@@ -225,7 +212,12 @@ def upload_folder_images(original_image_path, enhanced_image_path, file_name):
225
  upload_to_gcs(original_dicom_bytes, folder_name + '/' + 'original_image.dcm', content_type='application/dicom')
226
  upload_to_gcs(enhanced_dicom_bytes, folder_name + '/' + enhancement_name + '.dcm', content_type='application/dicom')
227
 
 
228
 
 
 
 
 
229
 
230
  def get_mean_std_per_batch(image_path, H=320, W=320):
231
  sample_data = []
@@ -277,20 +269,8 @@ def grad_cam(input_model, img_array, cls, layer_name):
277
 
278
  return cam
279
 
280
-
281
  # Compute Grad-CAM
282
  def compute_gradcam(model_gradcam, img_path, layer_name='bn'):
283
- # base_model = keras.applications.DenseNet121(weights = './densenet.hdf5', include_top = False)
284
- # x = base_model.output
285
- # x = keras.layers.GlobalAveragePooling2D()(x)
286
- # predictions = keras.layers.Dense(14, activation = "sigmoid")(x)
287
- # model_gradcam = keras.Model(inputs=base_model.input, outputs=predictions)
288
- # model_gradcam.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
289
- # loss='sparse_categorical_crossentropy')
290
- # model.load_weights('./pretrained_model.h5')
291
- # Load the original model
292
-
293
- # Now use this modified model in your application
294
  model_gradcam = load_gradcam_model()
295
  preprocessed_input = load_image(img_path)
296
  predictions = model_gradcam.predict(preprocessed_input)
@@ -298,10 +278,10 @@ def compute_gradcam(model_gradcam, img_path, layer_name='bn'):
298
  original_image = load_image(img_path, preprocess=False)
299
 
300
  # Assuming you have 14 classes as previously mentioned
301
- labels = ['Cardiomegaly', 'Emphysema', 'Effusion', 'Hernia', 'Infiltration', 'Mass',
302
- 'Nodule', 'Atelectasis', 'Pneumothorax', 'Pleural_Thickening',
303
- 'Pneumonia', 'Fibrosis', 'Edema', 'Consolidation']
304
-
305
  for i in range(len(labels)):
306
  st.write(f"Generating gradcam for class {labels[i]}")
307
  gradcam = grad_cam(model_gradcam, preprocessed_input, i, layer_name)
@@ -310,6 +290,8 @@ def compute_gradcam(model_gradcam, img_path, layer_name='bn'):
310
  gradcam = cv2.addWeighted(gradcam, 0.5, original_image.squeeze().astype(np.uint8), 0.5, 0)
311
  st.image(gradcam, caption=f"{labels[i]}: p={predictions[0][i]:.3f}", use_column_width=True)
312
 
 
 
313
  def calculate_mse(original_image, enhanced_image):
314
  mse = np.mean((original_image - enhanced_image) ** 2)
315
  return mse
@@ -383,7 +365,7 @@ def enhance_image(image, enhancement_type):
383
  else:
384
  raise ValueError(f"Unknown enhancement type: {enhancement_type}")
385
 
386
- # Function to add a button to redirect to the URL
387
  def redirect_button(url):
388
  button = st.button('Go to OHIF Viewer')
389
  if button:
@@ -415,6 +397,9 @@ def redirect_button(url):
415
  ########################### Streamlit Interface ###########################################
416
  ###########################################################################################
417
 
 
 
 
418
 
419
  # File uploader for DICOM files
420
  if uploaded_file is not None:
@@ -445,7 +430,7 @@ if uploaded_file is not None:
445
 
446
  file_bytes = np.asarray(bytearray(uploaded_detection.read()), dtype=np.uint8)
447
  image = cv2.imdecode(file_bytes, 1)
448
-
449
  # st.image(image, caption='Uploaded Image.', use_column_width=True)
450
 
451
  col1, col2 = st.columns(2)
@@ -465,7 +450,7 @@ if uploaded_file is not None:
465
  st.image(img_array, caption="Original Image", use_column_width=True)
466
  else:
467
  st.error("Unsupported image dimensions")
468
-
469
  original_image = img_array
470
 
471
  # Example: convert to grayscale if it's a color image
@@ -496,7 +481,6 @@ if uploaded_file is not None:
496
  enhanced_image_path = "enhanced_image.png"
497
  cv2.imwrite(enhanced_image_path, enhanced_image)
498
 
499
-
500
  # Save enhanced image to a file
501
  enhanced_image_path = "enhanced_image.png"
502
  cv2.imwrite(enhanced_image_path, enhanced_image)
@@ -504,113 +488,15 @@ if uploaded_file is not None:
504
  # Save original image to a file
505
  original_image_path = "original_image.png"
506
  cv2.imwrite(original_image_path, original_image)
507
- upload_folder_images(original_image_path, enhanced_image_path, file_name)
508
-
 
 
509
  # Add the redirect button
510
  col1, col2, col3 = st.columns(3)
511
  with col1:
512
  redirect_button("https://new-ohif-viewer-k7c3gdlxua-et.a.run.app/")
513
 
514
- # with col2:
515
- # model_detection = load_model_detection()
516
- # file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
517
- # image = cv2.imdecode(file_bytes, 1)
518
-
519
- # st.image(image, caption='Uploaded Image.', use_column_width=True)
520
-
521
- # st.button('Detect')
522
- # st.write("Processing...")
523
- # input_image = preprocess_image(image)
524
- # pred_bbox, pred_label, pred_label_confidence = predict(model_detection, input_image)
525
-
526
- # # Updated label mapping based on the dataset
527
- # label_mapping = {
528
- # 0: 'Atelectasis',
529
- # 1: 'Cardiomegaly',
530
- # 2: 'Effusion',
531
- # 3: 'Infiltrate',
532
- # 4: 'Mass',
533
- # 5: 'Nodule',
534
- # 6: 'Pneumonia',
535
- # 7: 'Pneumothorax'
536
- # }
537
-
538
- # if pred_label_confidence < 0.2:
539
- # st.write("May not detect a disease.")
540
- # else:
541
- # pred_label_name = label_mapping[pred_label]
542
- # st.write(f"Prediction Label: {pred_label_name}")
543
- # st.write(f"Prediction Bounding Box: {pred_bbox}")
544
- # st.write(f"Prediction Confidence: {pred_label_confidence:.2f}")
545
-
546
- # output_image = draw_bbox(image.copy(), pred_bbox)
547
- # st.image(output_image, caption='Detected Image.', use_column_width=True)
548
-
549
-
550
- # file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
551
- # image = cv2.imdecode(file_bytes, 1)
552
- # st.button('Auto Detect')
553
- # st.write("Processing...")
554
- # input_image = preprocess_image(image)
555
- # pred_bbox, pred_label, pred_label_confidence = predict(model_detection, input_image)
556
-
557
- # # Updated label mapping based on the dataset
558
- # label_mapping = {
559
- # 0: 'Atelectasis',
560
- # 1: 'Cardiomegaly',
561
- # 2: 'Effusion',
562
- # 3: 'Infiltrate',
563
- # 4: 'Mass',
564
- # 5: 'Nodule',
565
- # 6: 'Pneumonia',
566
- # 7: 'Pneumothorax'
567
- # }
568
-
569
- # if pred_label_confidence < 0.2:
570
- # st.write("May not detect a disease.")
571
- # else:
572
- # pred_label_name = label_mapping[pred_label]
573
- # st.write(f"Prediction Label: {pred_label_name}")
574
- # st.write(f"Prediction Bounding Box: {pred_bbox}")
575
- # st.write(f"Prediction Confidence: {pred_label_confidence:.2f}")
576
-
577
- # output_image = draw_bbox(image.copy(), pred_bbox)
578
- # st.image(output_image, caption='Detected Image.', use_column_width=True)
579
-
580
-
581
- # if st.button('Auto Detect'):
582
- # st.write("Processing...")
583
- # input_image = image
584
- # # input_image = enhancement_type
585
- # # input_image = cv2.resize(enhanced_image, (W, H)) # Resize the enhanced image to the required input size
586
- # # input_image = (input_image - 127.5) / 127.5 # Normalize to [-1, +1]
587
- # # input_image = np.expand_dims(input_image, axis=0).astype(np.float32) # Expand dimensions and convert to float32
588
-
589
- # pred_bbox, pred_label, pred_label_confidence = predict(model_detection, input_image)
590
-
591
- # # Updated label mapping based on the dataset
592
- # label_mapping = {
593
- # 0: 'Atelectasis',
594
- # 1: 'Cardiomegaly',
595
- # 2: 'Effusion',
596
- # 3: 'Infiltrate',
597
- # 4: 'Mass',
598
- # 5: 'Nodule',
599
- # 6: 'Pneumonia',
600
- # 7: 'Pneumothorax'
601
- # }
602
-
603
- # if pred_label_confidence < 0.2:
604
- # st.write("May not detect a disease.")
605
- # else:
606
- # pred_label_name = label_mapping[pred_label]
607
- # st.write(f"Prediction Label: {pred_label_name}")
608
- # st.write(f"Prediction Bounding Box: {pred_bbox}")
609
- # st.write(f"Prediction Confidence: {pred_label_confidence:.2f}")
610
-
611
- # output_image = draw_bbox(image.copy(), pred_bbox)
612
- # st.image(output_image, caption='Detected Image.', use_column_width=True)
613
-
614
  with col2:
615
  if st.button('Generate Grad-CAM'):
616
  model=load_gradcam_model()
@@ -627,14 +513,10 @@ st.write("Upload a chest X-ray image and click on 'Detect' to find out if there'
627
 
628
  model_detection = load_model_detection()
629
 
630
- # uploaded_file = st.sidebar.file_uploader("Choose an image...", type=["jpg", "jpeg", "png", "dcm"])
631
-
632
  if uploaded_detection is not None:
633
  file_bytes = np.asarray(bytearray(uploaded_detection.read()), dtype=np.uint8)
634
  image = cv2.imdecode(file_bytes, 1)
635
 
636
- # st.image(image, caption='Uploaded Image.', use_column_width=True)
637
-
638
  if st.button('Detect'):
639
  st.write("Processing...")
640
  input_image = preprocess_image(image)
 
21
  from keras.models import Model
22
  from pydicom.pixel_data_handlers.util import apply_voi_lut
23
 
24
+ # Environment Configuration ###############################################################
25
  os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "./da-kalbe-63ee33c9cdbb.json"
26
  bucket_name = "da-kalbe-ml-result-png"
27
  storage_client = storage.Client()
 
29
  bucket_name_load = "da-ml-models"
30
  bucket_load = storage_client.bucket(bucket_name_load)
31
 
32
+ # Utility Functions #######################################################################
33
+
34
+ # Upload Image #
35
  st.sidebar.title("Configuration")
36
  uploaded_file = st.sidebar.file_uploader("Upload Original Image", type=["png", "jpg", "jpeg", "dcm"])
37
  enhancement_type = st.sidebar.selectbox(
38
+ "Enhancement Type",
39
  ["Invert", "High Pass Filter", "Unsharp Masking", "Histogram Equalization", "CLAHE"]
40
  )
41
 
42
  st.sidebar.title("Detection")
43
  uploaded_detection = st.sidebar.file_uploader("Upload image to detect", type=["png", "jpg", "jpeg", "dcm"])
 
 
 
 
44
 
45
+ # object detection ########################################################################
46
  H_detection = 224
47
  W_detection = 224
48
 
 
84
  image = cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
85
  return image
86
 
87
+ # Upload to GCS ###########################################################################
88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  if 'instance_numbers' not in st.session_state:
90
  st.session_state.instance_numbers = {}
91
  if 'study_uids' not in st.session_state:
 
212
  upload_to_gcs(original_dicom_bytes, folder_name + '/' + 'original_image.dcm', content_type='application/dicom')
213
  upload_to_gcs(enhanced_dicom_bytes, folder_name + '/' + enhancement_name + '.dcm', content_type='application/dicom')
214
 
215
+ # Grad cam ################################################################################
216
 
217
+ @st.cache_resource
218
+ def load_gradcam_model():
219
+ model = keras.models.load_model('./model_renamed.h5', compile=False)
220
+ return model
221
 
222
  def get_mean_std_per_batch(image_path, H=320, W=320):
223
  sample_data = []
 
269
 
270
  return cam
271
 
 
272
  # Compute Grad-CAM
273
  def compute_gradcam(model_gradcam, img_path, layer_name='bn'):
 
 
 
 
 
 
 
 
 
 
 
274
  model_gradcam = load_gradcam_model()
275
  preprocessed_input = load_image(img_path)
276
  predictions = model_gradcam.predict(preprocessed_input)
 
278
  original_image = load_image(img_path, preprocess=False)
279
 
280
  # Assuming you have 14 classes as previously mentioned
281
+ labels = ['Cardiomegaly', 'Emphysema', 'Effusion', 'Hernia', 'Infiltration', 'Mass',
282
+ 'Nodule', 'Atelectasis', 'Pneumothorax', 'Pleural_Thickening',
283
+ 'Pneumonia', 'Fibrosis', 'Edema', 'Consolidation']
284
+
285
  for i in range(len(labels)):
286
  st.write(f"Generating gradcam for class {labels[i]}")
287
  gradcam = grad_cam(model_gradcam, preprocessed_input, i, layer_name)
 
290
  gradcam = cv2.addWeighted(gradcam, 0.5, original_image.squeeze().astype(np.uint8), 0.5, 0)
291
  st.image(gradcam, caption=f"{labels[i]}: p={predictions[0][i]:.3f}", use_column_width=True)
292
 
293
+ # Image enhancement #######################################################################
294
+
295
  def calculate_mse(original_image, enhanced_image):
296
  mse = np.mean((original_image - enhanced_image) ** 2)
297
  return mse
 
365
  else:
366
  raise ValueError(f"Unknown enhancement type: {enhancement_type}")
367
 
368
+ # Other Utils #############################################################################
369
  def redirect_button(url):
370
  button = st.button('Go to OHIF Viewer')
371
  if button:
 
397
  ########################### Streamlit Interface ###########################################
398
  ###########################################################################################
399
 
400
+ st.title("AI INTEGRATION FOR CHEST X-RAY")
401
+
402
+ st.header("IMAGE ENHANCEMENT")
403
 
404
  # File uploader for DICOM files
405
  if uploaded_file is not None:
 
430
 
431
  file_bytes = np.asarray(bytearray(uploaded_detection.read()), dtype=np.uint8)
432
  image = cv2.imdecode(file_bytes, 1)
433
+
434
  # st.image(image, caption='Uploaded Image.', use_column_width=True)
435
 
436
  col1, col2 = st.columns(2)
 
450
  st.image(img_array, caption="Original Image", use_column_width=True)
451
  else:
452
  st.error("Unsupported image dimensions")
453
+
454
  original_image = img_array
455
 
456
  # Example: convert to grayscale if it's a color image
 
481
  enhanced_image_path = "enhanced_image.png"
482
  cv2.imwrite(enhanced_image_path, enhanced_image)
483
 
 
484
  # Save enhanced image to a file
485
  enhanced_image_path = "enhanced_image.png"
486
  cv2.imwrite(enhanced_image_path, enhanced_image)
 
488
  # Save original image to a file
489
  original_image_path = "original_image.png"
490
  cv2.imwrite(original_image_path, original_image)
491
+
492
+ if st.button("Send to OHIF"):
493
+ upload_folder_images(original_image_path, enhanced_image_path, file_name)
494
+
495
  # Add the redirect button
496
  col1, col2, col3 = st.columns(3)
497
  with col1:
498
  redirect_button("https://new-ohif-viewer-k7c3gdlxua-et.a.run.app/")
499
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
  with col2:
501
  if st.button('Generate Grad-CAM'):
502
  model=load_gradcam_model()
 
513
 
514
  model_detection = load_model_detection()
515
 
 
 
516
  if uploaded_detection is not None:
517
  file_bytes = np.asarray(bytearray(uploaded_detection.read()), dtype=np.uint8)
518
  image = cv2.imdecode(file_bytes, 1)
519
 
 
 
520
  if st.button('Detect'):
521
  st.write("Processing...")
522
  input_image = preprocess_image(image)