yahiab commited on
Commit
fe0c1e0
·
1 Parent(s): 7da5d38

Track coral images with Git LFS

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ coral_images/*.jpg filter=lfs diff=lfs merge=lfs -text
app _bk.py CHANGED
@@ -2,54 +2,104 @@ import gradio as gr
2
  import numpy as np
3
  from PIL import Image, ImageDraw
4
  import torch
5
- from torchvision import transforms
6
- from transformers import AutoModelForImageClassification, AutoFeatureExtractor
7
-
8
- # Define all available models
9
- MODEL_LIST = {
10
- 'beit': "microsoft/beit-base-patch16-224-pt22k-ft22k",
11
- 'vit': "google/vit-base-patch16-224",
12
- 'convnext': "facebook/convnext-tiny-224",
13
- }
14
-
15
- # Global variables
16
- current_model = None
17
- current_preprocessor = None
18
- device = "cuda" if torch.cuda.is_available() else "cpu" # Dynamically set device
19
-
20
- # Load model and preprocessor
21
- def load_model_and_preprocessor(model_name):
22
- """Load model and preprocessor for a given model name."""
23
- global current_model, current_preprocessor
24
- print(f"Loading model and preprocessor for: {model_name} on {device}")
25
- current_model = AutoModelForImageClassification.from_pretrained(MODEL_LIST[model_name]).to(device).eval()
26
- current_preprocessor = AutoFeatureExtractor.from_pretrained(MODEL_LIST[model_name])
27
- return f"Model {model_name} loaded successfully on {device}."
28
-
29
- # Predict function
30
- def predict(image, model, preprocessor):
31
- """Make a prediction on the given image patch using the loaded model."""
32
- if model is None or preprocessor is None:
33
- raise ValueError("Model and preprocessor are not loaded.")
34
- inputs = preprocessor(images=image, return_tensors="pt").to(device)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
  with torch.no_grad():
36
- outputs = model(**inputs)
37
- predicted_class = torch.argmax(outputs.logits, dim=1).item()
38
- return model.config.id2label[predicted_class]
 
 
39
 
40
  # Function to draw a rectangle on the image
41
  def draw_rectangle(image, x, y, size=224):
42
- """Draw a rectangle on the image."""
43
- image_pil = image.copy() # Create a copy to avoid modifying the original image
44
  draw = ImageDraw.Draw(image_pil)
45
- x1, y1 = x, y
46
- x2, y2 = x + size, y + size
47
- draw.rectangle([x1, y1, x2, y2], outline="red", width=5)
48
  return image_pil
49
 
50
- # Function to crop the image
51
  def crop_image(image, x, y, size=224):
52
- """Crop a region from the image."""
53
  image_np = np.array(image)
54
  h, w, _ = image_np.shape
55
  x = min(max(x, 0), w - size)
@@ -57,55 +107,40 @@ def crop_image(image, x, y, size=224):
57
  cropped = image_np[y:y+size, x:x+size]
58
  return Image.fromarray(cropped)
59
 
60
- # Gradio Interface
61
  with gr.Blocks() as demo:
62
- gr.Markdown("## Test Public Models for Coral Classification")
63
-
64
  with gr.Row():
65
  with gr.Column():
66
- model_selector = gr.Dropdown(choices=list(MODEL_LIST.keys()), value='beit', label="Select Model")
67
  image_input = gr.Image(type="pil", label="Upload Image", interactive=True)
68
- x_slider = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="X Coordinate")
69
- y_slider = gr.Slider(minimum=0, maximum=1000, step=1, value=0, label="Y Coordinate")
70
  with gr.Column():
71
- interactive_image = gr.Image(label="Interactive Image with Selection")
72
  cropped_image = gr.Image(label="Cropped Patch")
73
  label_output = gr.Textbox(label="Predicted Label")
74
-
75
- # Update the model and preprocessor
76
- def update_model(model_name):
77
- return load_model_and_preprocessor(model_name)
78
-
79
- # Update the rectangle and crop the patch
80
  def update_selection(image, x, y):
81
  overlay_image = draw_rectangle(image, x, y)
82
  cropped = crop_image(image, x, y)
83
  return overlay_image, cropped
84
 
85
- # Predict the label from the cropped patch
86
  def predict_from_cropped(cropped):
87
- print(f"Type of cropped_image before prediction: {type(cropped)}")
88
- return predict(cropped, current_model, current_preprocessor)
89
 
90
- # Buttons and interactions
91
  crop_button = gr.Button("Crop")
92
  crop_button.click(fn=update_selection, inputs=[image_input, x_slider, y_slider], outputs=[interactive_image, cropped_image])
93
 
94
  predict_button = gr.Button("Predict")
95
  predict_button.click(fn=predict_from_cropped, inputs=cropped_image, outputs=label_output)
96
 
97
- model_selector.change(fn=update_model, inputs=model_selector, outputs=None)
98
-
99
- # Update sliders dynamically based on uploaded image size
100
  def update_sliders(image):
101
- if image is not None:
102
  width, height = image.size
103
  return gr.update(maximum=width - 224), gr.update(maximum=height - 224)
104
  return gr.update(), gr.update()
105
 
106
  image_input.change(fn=update_sliders, inputs=image_input, outputs=[x_slider, y_slider])
107
 
108
- # Initialize model on app start
109
- demo.load(fn=lambda: load_model_and_preprocessor('beit'), inputs=None, outputs=None)
110
-
111
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
2
  import numpy as np
3
  from PIL import Image, ImageDraw
4
  import torch
5
+ import torchvision.transforms as transforms
6
+ import timm
7
+
8
+ # URL for the Hugging Face checkpoint
9
+ CHECKPOINT_URL = "https://huggingface.co/ReefNet/beit_global/resolve/main/checkpoint-60.pth"
10
+
11
+ # Class labels
12
+ all_classes = [
13
+ 'Acanthastrea', 'Acropora', 'Agaricia', 'Alveopora', 'Astrea', 'Astreopora',
14
+ 'Caulastraea', 'Coeloseris', 'Colpophyllia', 'Coscinaraea', 'Ctenactis',
15
+ 'Cycloseris', 'Cyphastrea', 'Dendrogyra', 'Dichocoenia', 'Diploastrea',
16
+ 'Diploria', 'Dipsastraea', 'Echinophyllia', 'Echinopora', 'Euphyllia',
17
+ 'Eusmilia', 'Favia', 'Favites', 'Fungia', 'Galaxea', 'Gardineroseris',
18
+ 'Goniastrea', 'Goniopora', 'Halomitra', 'Herpolitha', 'Hydnophora',
19
+ 'Isophyllia', 'Isopora', 'Leptastrea', 'Leptoria', 'Leptoseris',
20
+ 'Lithophyllon', 'Lobactis', 'Lobophyllia', 'Madracis', 'Meandrina', 'Merulina',
21
+ 'Montastraea', 'Montipora', 'Mussa', 'Mussismilia', 'Mycedium', 'Orbicella',
22
+ 'Oulastrea', 'Oulophyllia', 'Oxypora', 'Pachyseris', 'Pavona', 'Pectinia',
23
+ 'Physogyra', 'Platygyra', 'Plerogyra', 'Plesiastrea', 'Pocillopora',
24
+ 'Podabacia', 'Porites', 'Psammocora', 'Pseudodiploria', 'Sandalolitha',
25
+ 'Scolymia', 'Seriatopora', 'Siderastrea', 'Stephanocoenia', 'Stylocoeniella',
26
+ 'Stylophora', 'Tubastraea', 'Turbinaria'
27
+ ]
28
+
29
+ # Function to load the BeIT model
30
+ def load_model(model_name):
31
+ print(f"Loading {model_name} model...")
32
+ if model_name == 'beit':
33
+ args = type('', (), {})()
34
+ args.model = 'beitv2_large_patch16_224.in1k_ft_in22k_in1k'
35
+ args.nb_classes = len(all_classes)
36
+ args.drop_path = 0.1
37
+
38
+ # Create model
39
+ model = timm.create_model(
40
+ args.model,
41
+ pretrained=False,
42
+ num_classes=args.nb_classes,
43
+ drop_path_rate=args.drop_path,
44
+ use_rel_pos_bias=True,
45
+ use_abs_pos_emb=True,
46
+ )
47
+
48
+ # Load checkpoint from Hugging Face
49
+ checkpoint = torch.hub.load_state_dict_from_url(CHECKPOINT_URL, map_location="cpu")
50
+ state_dict = checkpoint.get('model', checkpoint)
51
+
52
+ # Filter state dict
53
+ filtered_state_dict = {k: v for k, v in state_dict.items() if "relative_position_index" not in k}
54
+ model.load_state_dict(filtered_state_dict, strict=False)
55
+ else:
56
+ raise ValueError(f"Model {model_name} not implemented!")
57
+
58
+ # Move model to CUDA if available
59
+ model.eval()
60
+ if torch.cuda.is_available():
61
+ model.cuda()
62
+ return model
63
+
64
+ # Preprocessing transforms
65
+ preprocess = transforms.Compose([
66
+ transforms.Resize((224, 224)),
67
+ transforms.ToTensor(),
68
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
69
+ ])
70
+
71
+ # Initialize selected model
72
+ selected_model_name = 'beit'
73
+ model = load_model(selected_model_name)
74
+
75
+ def predict_label(image):
76
+ """Predict the label for the given image."""
77
+ # Ensure the image is a PIL Image
78
+ if isinstance(image, np.ndarray):
79
+ image = Image.fromarray(image)
80
+ elif not isinstance(image, Image.Image):
81
+ raise TypeError(f"Unexpected type {type(image)}, expected PIL.Image or numpy.ndarray.")
82
+
83
+ input_tensor = preprocess(image).unsqueeze(0)
84
+ if torch.cuda.is_available():
85
+ input_tensor = input_tensor.cuda()
86
+
87
  with torch.no_grad():
88
+ outputs = model(input_tensor)
89
+ predicted_class = torch.argmax(outputs, dim=1).item()
90
+
91
+ return all_classes[predicted_class]
92
+
93
 
94
  # Function to draw a rectangle on the image
95
  def draw_rectangle(image, x, y, size=224):
96
+ image_pil = image.copy()
 
97
  draw = ImageDraw.Draw(image_pil)
98
+ draw.rectangle([x, y, x + size, y + size], outline="red", width=3)
 
 
99
  return image_pil
100
 
101
+ # Crop a region of interest
102
  def crop_image(image, x, y, size=224):
 
103
  image_np = np.array(image)
104
  h, w, _ = image_np.shape
105
  x = min(max(x, 0), w - size)
 
107
  cropped = image_np[y:y+size, x:x+size]
108
  return Image.fromarray(cropped)
109
 
110
+ # Gradio UI
111
  with gr.Blocks() as demo:
112
+ gr.Markdown("## Coral Classification with BeIT Model")
 
113
  with gr.Row():
114
  with gr.Column():
 
115
  image_input = gr.Image(type="pil", label="Upload Image", interactive=True)
116
+ x_slider = gr.Slider(0, 1000, step=1, value=0, label="X Coordinate")
117
+ y_slider = gr.Slider(0, 1000, step=1, value=0, label="Y Coordinate")
118
  with gr.Column():
119
+ interactive_image = gr.Image(label="Interactive Image")
120
  cropped_image = gr.Image(label="Cropped Patch")
121
  label_output = gr.Textbox(label="Predicted Label")
122
+
123
+ # Interactions
 
 
 
 
124
  def update_selection(image, x, y):
125
  overlay_image = draw_rectangle(image, x, y)
126
  cropped = crop_image(image, x, y)
127
  return overlay_image, cropped
128
 
 
129
  def predict_from_cropped(cropped):
130
+ return predict_label(cropped)
 
131
 
 
132
  crop_button = gr.Button("Crop")
133
  crop_button.click(fn=update_selection, inputs=[image_input, x_slider, y_slider], outputs=[interactive_image, cropped_image])
134
 
135
  predict_button = gr.Button("Predict")
136
  predict_button.click(fn=predict_from_cropped, inputs=cropped_image, outputs=label_output)
137
 
 
 
 
138
  def update_sliders(image):
139
+ if image:
140
  width, height = image.size
141
  return gr.update(maximum=width - 224), gr.update(maximum=height - 224)
142
  return gr.update(), gr.update()
143
 
144
  image_input.change(fn=update_sliders, inputs=image_input, outputs=[x_slider, y_slider])
145
 
 
 
 
146
  demo.launch(server_name="0.0.0.0", server_port=7860)
app.py CHANGED
@@ -26,34 +26,41 @@ all_classes = [
26
  'Stylophora', 'Tubastraea', 'Turbinaria'
27
  ]
28
 
 
 
 
 
 
 
 
 
 
 
 
 
29
  # Function to load the BeIT model
30
  def load_model(model_name):
31
  print(f"Loading {model_name} model...")
32
- if model_name == 'beit':
33
- args = type('', (), {})()
34
- args.model = 'beitv2_large_patch16_224.in1k_ft_in22k_in1k'
35
- args.nb_classes = len(all_classes)
36
- args.drop_path = 0.1
37
-
38
- # Create model
39
- model = timm.create_model(
40
- args.model,
41
- pretrained=False,
42
- num_classes=args.nb_classes,
43
- drop_path_rate=args.drop_path,
44
- use_rel_pos_bias=True,
45
- use_abs_pos_emb=True,
46
- )
47
-
48
- # Load checkpoint from Hugging Face
49
- checkpoint = torch.hub.load_state_dict_from_url(CHECKPOINT_URL, map_location="cpu")
50
- state_dict = checkpoint.get('model', checkpoint)
51
-
52
- # Filter state dict
53
- filtered_state_dict = {k: v for k, v in state_dict.items() if "relative_position_index" not in k}
54
- model.load_state_dict(filtered_state_dict, strict=False)
55
- else:
56
- raise ValueError(f"Model {model_name} not implemented!")
57
 
58
  # Move model to CUDA if available
59
  model.eval()
@@ -68,18 +75,12 @@ preprocess = transforms.Compose([
68
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
69
  ])
70
 
71
- # Initialize selected model
72
- selected_model_name = 'beit'
73
- model = load_model(selected_model_name)
74
 
75
  def predict_label(image):
76
- """Predict the label for the given image."""
77
- # Ensure the image is a PIL Image
78
  if isinstance(image, np.ndarray):
79
  image = Image.fromarray(image)
80
- elif not isinstance(image, Image.Image):
81
- raise TypeError(f"Unexpected type {type(image)}, expected PIL.Image or numpy.ndarray.")
82
-
83
  input_tensor = preprocess(image).unsqueeze(0)
84
  if torch.cuda.is_available():
85
  input_tensor = input_tensor.cuda()
@@ -90,15 +91,16 @@ def predict_label(image):
90
 
91
  return all_classes[predicted_class]
92
 
93
-
94
- # Function to draw a rectangle on the image
95
  def draw_rectangle(image, x, y, size=224):
96
- image_pil = image.copy()
 
97
  draw = ImageDraw.Draw(image_pil)
98
- draw.rectangle([x, y, x + size, y + size], outline="red", width=3)
 
 
99
  return image_pil
100
 
101
- # Crop a region of interest
102
  def crop_image(image, x, y, size=224):
103
  image_np = np.array(image)
104
  h, w, _ = image_np.shape
@@ -110,6 +112,7 @@ def crop_image(image, x, y, size=224):
110
  # Gradio UI
111
  with gr.Blocks() as demo:
112
  gr.Markdown("## Coral Classification with BeIT Model")
 
113
  with gr.Row():
114
  with gr.Column():
115
  image_input = gr.Image(type="pil", label="Upload Image", interactive=True)
@@ -120,20 +123,29 @@ with gr.Blocks() as demo:
120
  cropped_image = gr.Image(label="Cropped Patch")
121
  label_output = gr.Textbox(label="Predicted Label")
122
 
123
- # Interactions
124
- def update_selection(image, x, y):
125
- overlay_image = draw_rectangle(image, x, y)
126
- cropped = crop_image(image, x, y)
127
- return overlay_image, cropped
128
 
129
- def predict_from_cropped(cropped):
130
- return predict_label(cropped)
 
131
 
132
- crop_button = gr.Button("Crop")
133
- crop_button.click(fn=update_selection, inputs=[image_input, x_slider, y_slider], outputs=[interactive_image, cropped_image])
 
134
 
135
- predict_button = gr.Button("Predict")
136
- predict_button.click(fn=predict_from_cropped, inputs=cropped_image, outputs=label_output)
 
 
 
 
 
 
 
 
 
137
 
138
  def update_sliders(image):
139
  if image:
@@ -143,4 +155,5 @@ with gr.Blocks() as demo:
143
 
144
  image_input.change(fn=update_sliders, inputs=image_input, outputs=[x_slider, y_slider])
145
 
 
146
  demo.launch(server_name="0.0.0.0", server_port=7860)
 
26
  'Stylophora', 'Tubastraea', 'Turbinaria'
27
  ]
28
 
29
+ # Example image paths
30
+ example_images = {
31
+ "Acropora": "coral_images/Acropora_millepora.jpg",
32
+ "Agaricia": "coral_images/Agaricia_agaricites.jpg",
33
+ "Acropora": "coral_images/Acropora_aculeus.jpg",
34
+ "Montipora": "coral_images/Montipora_patula.jpg",
35
+ "Pocillopora": "coral_images/Pocillopora_acuta.jpg",
36
+ "Porites": "coral_images/porities_lobata.jpg",
37
+ "Favites": "coral_images/Favites_abdita.jpg",
38
+ "Fungia": "coral_images/Fungia_concinna.jpg",
39
+ }
40
+
41
  # Function to load the BeIT model
42
  def load_model(model_name):
43
  print(f"Loading {model_name} model...")
44
+ args = type('', (), {})()
45
+ args.model = 'beitv2_large_patch16_224.in1k_ft_in22k_in1k'
46
+ args.nb_classes = len(all_classes)
47
+ args.drop_path = 0.1
48
+
49
+ # Create model
50
+ model = timm.create_model(
51
+ args.model,
52
+ pretrained=False,
53
+ num_classes=args.nb_classes,
54
+ drop_path_rate=args.drop_path,
55
+ use_rel_pos_bias=True,
56
+ use_abs_pos_emb=True,
57
+ )
58
+
59
+ # Load checkpoint from Hugging Face
60
+ checkpoint = torch.hub.load_state_dict_from_url(CHECKPOINT_URL, map_location="cpu")
61
+ state_dict = checkpoint.get('model', checkpoint)
62
+ filtered_state_dict = {k: v for k, v in state_dict.items() if "relative_position_index" not in k}
63
+ model.load_state_dict(filtered_state_dict, strict=False)
 
 
 
 
 
64
 
65
  # Move model to CUDA if available
66
  model.eval()
 
75
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
76
  ])
77
 
78
+ # Initialize model
79
+ model = load_model('beit')
 
80
 
81
  def predict_label(image):
 
 
82
  if isinstance(image, np.ndarray):
83
  image = Image.fromarray(image)
 
 
 
84
  input_tensor = preprocess(image).unsqueeze(0)
85
  if torch.cuda.is_available():
86
  input_tensor = input_tensor.cuda()
 
91
 
92
  return all_classes[predicted_class]
93
 
 
 
94
  def draw_rectangle(image, x, y, size=224):
95
+ """Draw a clear red rectangle with increased thickness."""
96
+ image_pil = image.copy() # Create a copy to avoid modifying the original image
97
  draw = ImageDraw.Draw(image_pil)
98
+ x1, y1 = x, y
99
+ x2, y2 = x + size, y + size
100
+ draw.rectangle([x1, y1, x2, y2], outline="red", width=6) # Increase the width for clarity
101
  return image_pil
102
 
103
+
104
  def crop_image(image, x, y, size=224):
105
  image_np = np.array(image)
106
  h, w, _ = image_np.shape
 
112
  # Gradio UI
113
  with gr.Blocks() as demo:
114
  gr.Markdown("## Coral Classification with BeIT Model")
115
+
116
  with gr.Row():
117
  with gr.Column():
118
  image_input = gr.Image(type="pil", label="Upload Image", interactive=True)
 
123
  cropped_image = gr.Image(label="Cropped Patch")
124
  label_output = gr.Textbox(label="Predicted Label")
125
 
126
+ # Crop and Predict buttons
127
+ crop_button = gr.Button("Crop")
128
+ predict_button = gr.Button("Predict")
 
 
129
 
130
+ # Example table
131
+ def load_example(example_path):
132
+ return Image.open(example_path).convert("RGB")
133
 
134
+ # Generate table of examples
135
+ with gr.Row():
136
+ gr.Markdown("### Example Images for Quick Testing")
137
 
138
+ with gr.Row():
139
+ for genus, path in example_images.items():
140
+ with gr.Column():
141
+ thumbnail = gr.Image(value=path, interactive=False, label=genus)
142
+ select_button = gr.Button(value=f"Select {genus}")
143
+ select_button.click(fn=lambda p=path: load_example(p), inputs=None, outputs=image_input)
144
+
145
+ # Button functionality
146
+ crop_button.click(fn=lambda img, x, y: (draw_rectangle(img, x, y), crop_image(img, x, y)),
147
+ inputs=[image_input, x_slider, y_slider], outputs=[interactive_image, cropped_image])
148
+ predict_button.click(fn=predict_label, inputs=cropped_image, outputs=label_output)
149
 
150
  def update_sliders(image):
151
  if image:
 
155
 
156
  image_input.change(fn=update_sliders, inputs=image_input, outputs=[x_slider, y_slider])
157
 
158
+ # demo.launch()
159
  demo.launch(server_name="0.0.0.0", server_port=7860)
coral_images/Acropora-gemmifera.jpg ADDED

Git LFS Details

  • SHA256: 358530fa157ba6a4dfb7f4ece7ab26bb0ca8883775de3177fdbf0665a25c3bae
  • Pointer size: 131 Bytes
  • Size of remote file: 283 kB
coral_images/Acropora_aculeus.jpg ADDED

Git LFS Details

  • SHA256: 62918ae7c70699e2aa0c383e01e80992cc355d83e860cef9036a679b7b2aa97f
  • Pointer size: 132 Bytes
  • Size of remote file: 1.54 MB
coral_images/Acropora_anthocercis.jpg ADDED

Git LFS Details

  • SHA256: bb3bf66c4c8afc9048f02d127fe35c26725629baa7a7635ec1b1a209e830c5ea
  • Pointer size: 132 Bytes
  • Size of remote file: 1.1 MB
coral_images/Acropora_millepora.jpg ADDED

Git LFS Details

  • SHA256: 67eda0336f9234a0a03eb393fd7b8d8eae28afb94fce384312b36e3a1094bdef
  • Pointer size: 131 Bytes
  • Size of remote file: 870 kB
coral_images/Agaricia_agaricites.jpg ADDED

Git LFS Details

  • SHA256: f3ef9d573fec6b89f41123dc37d3da5b553526672b443cdc4f639aa3cba4d397
  • Pointer size: 131 Bytes
  • Size of remote file: 751 kB
coral_images/Agaricia_fragilis.jpg ADDED

Git LFS Details

  • SHA256: 0f7e4da5c85951fb2d4372aa9c0fd4dd11806a44d3aecedca28d60ce4591e968
  • Pointer size: 131 Bytes
  • Size of remote file: 467 kB
coral_images/Alveopora_spongiosa.jpg ADDED

Git LFS Details

  • SHA256: 7fa22f5ba25537755b50a24e03fe512b01531fcad024f8e605a15bfbc7bf197a
  • Pointer size: 131 Bytes
  • Size of remote file: 663 kB
coral_images/Astreopora_listeri.jpg ADDED

Git LFS Details

  • SHA256: 96c2faf2c565dcc4f6af75a627875efb8274e21c99f034b58d480e77371606c0
  • Pointer size: 131 Bytes
  • Size of remote file: 882 kB
coral_images/Euphyllia_paraancora.jpg ADDED

Git LFS Details

  • SHA256: f14a8ff201047ddee7dc8766628bdf2a9ee4ab1e5a4779765947a0d52bca1fe5
  • Pointer size: 132 Bytes
  • Size of remote file: 1.06 MB
coral_images/Euphyllia_paradivisa.jpg ADDED

Git LFS Details

  • SHA256: 10c783ec26004c13c8fd0250c74ce53a6d3bf45030b0fe6b710410867b894bf7
  • Pointer size: 131 Bytes
  • Size of remote file: 448 kB
coral_images/Favites_abdita.jpg ADDED

Git LFS Details

  • SHA256: 41a9ff860a93cbd3342ac5140a039237d0769652f0a9575843a4ddc64395cee2
  • Pointer size: 131 Bytes
  • Size of remote file: 885 kB
coral_images/Fungia_concinna.jpg ADDED

Git LFS Details

  • SHA256: e13a631a72ba2779dca0a622a7c9c3289af80b274069448dfa70496b17226b5b
  • Pointer size: 132 Bytes
  • Size of remote file: 1.11 MB
coral_images/Montipora_patula.jpg ADDED

Git LFS Details

  • SHA256: 18c2730a290d0323d30f730a55b00886b15016f2592509bbbd87ff4eb6b3f4b7
  • Pointer size: 131 Bytes
  • Size of remote file: 622 kB
coral_images/Montipora_saudii.jpg ADDED

Git LFS Details

  • SHA256: 2dbcbad0f4e79b44f1db5a4b19e39599886d3583b4addbed4403b400e96d3027
  • Pointer size: 131 Bytes
  • Size of remote file: 488 kB
coral_images/Montipora_saudii_2.jpg ADDED

Git LFS Details

  • SHA256: 51b6db33f97495dde6a3ba3e3805c8d19c857d94f8c01bb213d8a1bd928d3277
  • Pointer size: 131 Bytes
  • Size of remote file: 579 kB
coral_images/Oulastrea_crispata.jpg ADDED

Git LFS Details

  • SHA256: eb6b2b64fb25b8884b8c073c973db32f284ca8b11a56384ab0f78d01ff1669b3
  • Pointer size: 131 Bytes
  • Size of remote file: 531 kB
coral_images/Pocillopora_acuta.jpg ADDED

Git LFS Details

  • SHA256: 9d31141791dd69a9af6616f2e1747937a3b02e74e2634dd5094b5fa192f55ea2
  • Pointer size: 132 Bytes
  • Size of remote file: 1.1 MB
coral_images/Podabacia_lankaensis.jpg ADDED

Git LFS Details

  • SHA256: 224879ac6df2f53521cc744085f5d3546aadce79e3777a2da95232698c162d56
  • Pointer size: 132 Bytes
  • Size of remote file: 1.55 MB
coral_images/Turbinaria_heronensis.jpg ADDED

Git LFS Details

  • SHA256: 34bb4d8e589a0655779af550caacc8676bb9494478aaf02c17a22dc3cf1da3dd
  • Pointer size: 131 Bytes
  • Size of remote file: 379 kB
coral_images/porities_lobata.jpg ADDED

Git LFS Details

  • SHA256: 1590e4dc33b00713f6297c3bc7f2bd3269be07f683ac735519e710cdb9466813
  • Pointer size: 131 Bytes
  • Size of remote file: 755 kB