ardavey commited on
Commit
9335b96
1 Parent(s): 28a5246

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -16
app.py CHANGED
@@ -3,6 +3,7 @@ import torch
3
  from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
4
  from PIL import Image
5
 
 
6
  model_id = "brucewayne0459/paligemma_derm"
7
  processor = AutoProcessor.from_pretrained(model_id)
8
  model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
@@ -11,7 +12,7 @@ model.eval()
11
  device = "cuda" if torch.cuda.is_available() else "cpu"
12
  model.to(device)
13
 
14
- # logo (pakai yg huggingface dulu)
15
  st.markdown(
16
  """
17
  <style>
@@ -31,9 +32,11 @@ st.markdown(
31
  unsafe_allow_html=True,
32
  )
33
 
 
34
  st.title("VisionDerm")
35
  st.write("Upload an image or use your camera to identify the skin condition.")
36
 
 
37
  col1, col2 = st.columns([3, 2])
38
 
39
  with col1:
@@ -48,19 +51,28 @@ input_image = None
48
  if camera_photo:
49
  input_image = Image.open(camera_photo)
50
  elif uploaded_file:
51
- input_image = Image.open(uploaded_file)
52
-
 
 
 
 
 
 
 
53
  with col2:
54
  if input_image:
55
- resized_image = input_image.resize((300, 300))
56
- st.image(resized_image, caption="Selected Image (300x300)", use_container_width=True)
 
 
57
 
58
- # Resize image for processing (512x512 pixels)
59
- max_size = (512, 512)
60
- processed_image = input_image.resize(max_size)
61
 
62
- with st.spinner("Processing..."):
63
- try:
64
  inputs = processor(
65
  text=prompt,
66
  images=processed_image,
@@ -68,28 +80,31 @@ with col2:
68
  padding="longest"
69
  ).to(device)
70
 
71
- default_max_tokens = 50 # Set a default value for max tokens
 
72
  with torch.no_grad():
73
  outputs = model.generate(**inputs, max_new_tokens=default_max_tokens)
74
 
 
75
  decoded_output = processor.decode(outputs[0], skip_special_tokens=True)
76
  if prompt in decoded_output:
77
  decoded_output = decoded_output.replace(prompt, "").strip()
78
-
79
  decoded_output = decoded_output.title()
80
 
 
81
  st.success("Analysis Complete!")
82
  st.write("**Model Output:**", decoded_output)
83
 
84
- except Exception as e:
85
- st.error(f"Error: {str(e)}")
86
 
87
  st.markdown("---")
 
 
88
  st.info("""
89
  ### Team: Mahasigma Berprestasi
90
  - **Muhammad Karov Ardava Barus** ; 103052300001
91
  - **Akmal Yaasir Fauzaan** ; 103052300008
92
  - **Farand Diy Dat Mahazalfaa** ; 103052300050
93
  - **Hauzan Rafi Attallah**; 103052330011
94
- """)
95
-
 
3
  from transformers import AutoProcessor, PaliGemmaForConditionalGeneration
4
  from PIL import Image
5
 
6
+ # Load model and processor
7
  model_id = "brucewayne0459/paligemma_derm"
8
  processor = AutoProcessor.from_pretrained(model_id)
9
  model = PaliGemmaForConditionalGeneration.from_pretrained(model_id)
 
12
  device = "cuda" if torch.cuda.is_available() else "cpu"
13
  model.to(device)
14
 
15
+ # Logo (Hugging Face)
16
  st.markdown(
17
  """
18
  <style>
 
32
  unsafe_allow_html=True,
33
  )
34
 
35
+ # App Title
36
  st.title("VisionDerm")
37
  st.write("Upload an image or use your camera to identify the skin condition.")
38
 
39
+ # Layout
40
  col1, col2 = st.columns([3, 2])
41
 
42
  with col1:
 
51
  if camera_photo:
52
  input_image = Image.open(camera_photo)
53
  elif uploaded_file:
54
+ try:
55
+ # Open and convert uploaded file to RGB
56
+ input_image = Image.open(uploaded_file).convert("RGB")
57
+ input_image = input_image.copy() # Detach from file pointer
58
+ except Exception as e:
59
+ st.error(f"Error loading image: {str(e)}")
60
+ input_image = None
61
+
62
+ # Display and process the image
63
  with col2:
64
  if input_image:
65
+ try:
66
+ # Display the uploaded or captured image
67
+ resized_image = input_image.resize((300, 300))
68
+ st.image(resized_image, caption="Selected Image (300x300)", use_container_width=True)
69
 
70
+ # Resize the image for processing (512x512 pixels)
71
+ max_size = (512, 512)
72
+ processed_image = input_image.resize(max_size)
73
 
74
+ with st.spinner("Processing..."):
75
+ # Prepare inputs for the model
76
  inputs = processor(
77
  text=prompt,
78
  images=processed_image,
 
80
  padding="longest"
81
  ).to(device)
82
 
83
+ # Generate output from the model
84
+ default_max_tokens = 50 # Default value for max tokens
85
  with torch.no_grad():
86
  outputs = model.generate(**inputs, max_new_tokens=default_max_tokens)
87
 
88
+ # Decode and clean the output
89
  decoded_output = processor.decode(outputs[0], skip_special_tokens=True)
90
  if prompt in decoded_output:
91
  decoded_output = decoded_output.replace(prompt, "").strip()
 
92
  decoded_output = decoded_output.title()
93
 
94
+ # Display the result
95
  st.success("Analysis Complete!")
96
  st.write("**Model Output:**", decoded_output)
97
 
98
+ except Exception as e:
99
+ st.error(f"Error: {str(e)}")
100
 
101
  st.markdown("---")
102
+
103
+ # Team Information
104
  st.info("""
105
  ### Team: Mahasigma Berprestasi
106
  - **Muhammad Karov Ardava Barus** ; 103052300001
107
  - **Akmal Yaasir Fauzaan** ; 103052300008
108
  - **Farand Diy Dat Mahazalfaa** ; 103052300050
109
  - **Hauzan Rafi Attallah**; 103052330011
110
+ """)