Ivanrs commited on
Commit
a8bb6a0
·
verified ·
1 Parent(s): 6bbd617

vit-base-kidney-stone-Michel_Daudon_-w256_1k_v1-_SUR

Browse files
README.md CHANGED
@@ -1,4 +1,5 @@
1
  ---
 
2
  license: apache-2.0
3
  base_model: google/vit-base-patch16-224-in21k
4
  tags:
@@ -25,30 +26,31 @@ model-index:
25
  metrics:
26
  - name: Accuracy
27
  type: accuracy
28
- value: 0.6966475878986099
29
  - name: Precision
30
  type: precision
31
- value: 0.7395056708514637
32
  - name: Recall
33
  type: recall
34
- value: 0.6966475878986099
35
  - name: F1
36
  type: f1
37
- value: 0.7121074355920559
38
  ---
39
 
40
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
41
  should probably proofread and complete it, then remove this comment. -->
42
 
 
43
  # vit-base-kidney-stone-Michel_Daudon_-w256_1k_v1-_SUR
44
 
45
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
46
  It achieves the following results on the evaluation set:
47
- - Loss: 0.9000
48
- - Accuracy: 0.6966
49
- - Precision: 0.7395
50
- - Recall: 0.6966
51
- - F1: 0.7121
52
 
53
  ## Model description
54
 
@@ -71,21 +73,21 @@ The following hyperparameters were used during training:
71
  - train_batch_size: 32
72
  - eval_batch_size: 8
73
  - seed: 42
74
- - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
75
  - lr_scheduler_type: linear
76
  - num_epochs: 1
77
  - mixed_precision_training: Native AMP
78
 
79
  ### Training results
80
 
81
- | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
82
- |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
83
- | 0.0973 | 0.67 | 100 | 0.9000 | 0.6966 | 0.7395 | 0.6966 | 0.7121 |
84
 
85
 
86
  ### Framework versions
87
 
88
- - Transformers 4.37.2
89
- - Pytorch 2.1.1
90
- - Datasets 3.1.0
91
- - Tokenizers 0.15.2
 
1
  ---
2
+ library_name: transformers
3
  license: apache-2.0
4
  base_model: google/vit-base-patch16-224-in21k
5
  tags:
 
26
  metrics:
27
  - name: Accuracy
28
  type: accuracy
29
+ value: 0.7367130008176614
30
  - name: Precision
31
  type: precision
32
+ value: 0.7595070859879842
33
  - name: Recall
34
  type: recall
35
+ value: 0.7367130008176614
36
  - name: F1
37
  type: f1
38
+ value: 0.7402764231487609
39
  ---
40
 
41
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
42
  should probably proofread and complete it, then remove this comment. -->
43
 
44
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/cv-inside/vit-base-kidney-stone/runs/6e597kni)
45
  # vit-base-kidney-stone-Michel_Daudon_-w256_1k_v1-_SUR
46
 
47
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
48
  It achieves the following results on the evaluation set:
49
+ - Loss: 0.8522
50
+ - Accuracy: 0.7367
51
+ - Precision: 0.7595
52
+ - Recall: 0.7367
53
+ - F1: 0.7403
54
 
55
  ## Model description
56
 
 
73
  - train_batch_size: 32
74
  - eval_batch_size: 8
75
  - seed: 42
76
+ - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments
77
  - lr_scheduler_type: linear
78
  - num_epochs: 1
79
  - mixed_precision_training: Native AMP
80
 
81
  ### Training results
82
 
83
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 |
84
+ |:-------------:|:------:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|
85
+ | 0.117 | 0.6667 | 100 | 0.8522 | 0.7367 | 0.7595 | 0.7367 | 0.7403 |
86
 
87
 
88
  ### Framework versions
89
 
90
+ - Transformers 4.48.2
91
+ - Pytorch 2.6.0+cu126
92
+ - Datasets 3.2.0
93
+ - Tokenizers 0.21.0
all_results.json CHANGED
@@ -1,15 +1,16 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_accuracy": 0.6966475878986099,
4
- "eval_f1": 0.7121074355920559,
5
- "eval_loss": 0.9000147581100464,
6
- "eval_precision": 0.7395056708514637,
7
- "eval_recall": 0.6966475878986099,
8
- "eval_runtime": 21.0464,
9
- "eval_samples_per_second": 58.11,
10
- "eval_steps_per_second": 7.27,
11
- "train_loss": 0.2848571586608887,
12
- "train_runtime": 93.0462,
13
- "train_samples_per_second": 51.587,
14
- "train_steps_per_second": 1.612
 
15
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_accuracy": 0.7367130008176614,
4
+ "eval_f1": 0.7402764231487609,
5
+ "eval_loss": 0.8521540760993958,
6
+ "eval_precision": 0.7595070859879842,
7
+ "eval_recall": 0.7367130008176614,
8
+ "eval_runtime": 8.8089,
9
+ "eval_samples_per_second": 138.836,
10
+ "eval_steps_per_second": 17.369,
11
+ "total_flos": 3.71974885244928e+17,
12
+ "train_loss": 0.27923648834228515,
13
+ "train_runtime": 74.1686,
14
+ "train_samples_per_second": 64.717,
15
+ "train_steps_per_second": 2.022
16
  }
config.json CHANGED
@@ -36,5 +36,5 @@
36
  "problem_type": "single_label_classification",
37
  "qkv_bias": true,
38
  "torch_dtype": "float32",
39
- "transformers_version": "4.37.2"
40
  }
 
36
  "problem_type": "single_label_classification",
37
  "qkv_bias": true,
38
  "torch_dtype": "float32",
39
+ "transformers_version": "4.48.2"
40
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2402e84ac1d3376154cdb20eb7136f7a480c89215744196d27845b1c0dcc393c
3
  size 343236280
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4322113c6a295dbef1d54f4a14f71dc9b802a1413719b1812f4add0a69d2a3a
3
  size 343236280
preprocessor_config.json CHANGED
@@ -1,4 +1,5 @@
1
  {
 
2
  "do_normalize": true,
3
  "do_rescale": true,
4
  "do_resize": true,
 
1
  {
2
+ "do_convert_rgb": null,
3
  "do_normalize": true,
4
  "do_rescale": true,
5
  "do_resize": true,
test_results.json CHANGED
@@ -1,11 +1,11 @@
1
  {
2
  "epoch": 1.0,
3
- "eval_accuracy": 0.6966475878986099,
4
- "eval_f1": 0.7121074355920559,
5
- "eval_loss": 0.9000147581100464,
6
- "eval_precision": 0.7395056708514637,
7
- "eval_recall": 0.6966475878986099,
8
- "eval_runtime": 21.0464,
9
- "eval_samples_per_second": 58.11,
10
- "eval_steps_per_second": 7.27
11
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "eval_accuracy": 0.7367130008176614,
4
+ "eval_f1": 0.7402764231487609,
5
+ "eval_loss": 0.8521540760993958,
6
+ "eval_precision": 0.7595070859879842,
7
+ "eval_recall": 0.7367130008176614,
8
+ "eval_runtime": 8.8089,
9
+ "eval_samples_per_second": 138.836,
10
+ "eval_steps_per_second": 17.369
11
  }
train_results.json CHANGED
@@ -1,7 +1,8 @@
1
  {
2
  "epoch": 1.0,
3
- "train_loss": 0.2848571586608887,
4
- "train_runtime": 93.0462,
5
- "train_samples_per_second": 51.587,
6
- "train_steps_per_second": 1.612
 
7
  }
 
1
  {
2
  "epoch": 1.0,
3
+ "total_flos": 3.71974885244928e+17,
4
+ "train_loss": 0.27923648834228515,
5
+ "train_runtime": 74.1686,
6
+ "train_samples_per_second": 64.717,
7
+ "train_steps_per_second": 2.022
8
  }
trainer_state.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "best_metric": 0.9000147581100464,
3
  "best_model_checkpoint": "vit-base-kidney-stone-Michel_Daudon_-w256_1k_v1-_SUR\\checkpoint-100",
4
  "epoch": 1.0,
5
  "eval_steps": 100,
@@ -9,205 +9,235 @@
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.03,
 
13
  "learning_rate": 0.00019333333333333333,
14
- "loss": 1.6122,
15
  "step": 5
16
  },
17
  {
18
- "epoch": 0.07,
 
19
  "learning_rate": 0.0001866666666666667,
20
- "loss": 1.1761,
21
  "step": 10
22
  },
23
  {
24
  "epoch": 0.1,
 
25
  "learning_rate": 0.00018,
26
- "loss": 0.8586,
27
  "step": 15
28
  },
29
  {
30
- "epoch": 0.13,
 
31
  "learning_rate": 0.00017333333333333334,
32
- "loss": 0.6213,
33
  "step": 20
34
  },
35
  {
36
- "epoch": 0.17,
 
37
  "learning_rate": 0.0001666666666666667,
38
- "loss": 0.4837,
39
  "step": 25
40
  },
41
  {
42
  "epoch": 0.2,
 
43
  "learning_rate": 0.00016,
44
- "loss": 0.4179,
45
  "step": 30
46
  },
47
  {
48
- "epoch": 0.23,
 
49
  "learning_rate": 0.00015333333333333334,
50
- "loss": 0.3188,
51
  "step": 35
52
  },
53
  {
54
- "epoch": 0.27,
 
55
  "learning_rate": 0.00014666666666666666,
56
- "loss": 0.2558,
57
  "step": 40
58
  },
59
  {
60
  "epoch": 0.3,
 
61
  "learning_rate": 0.00014,
62
- "loss": 0.2618,
63
  "step": 45
64
  },
65
  {
66
- "epoch": 0.33,
 
67
  "learning_rate": 0.00013333333333333334,
68
- "loss": 0.2922,
69
  "step": 50
70
  },
71
  {
72
- "epoch": 0.37,
 
73
  "learning_rate": 0.00012666666666666666,
74
- "loss": 0.2404,
75
  "step": 55
76
  },
77
  {
78
  "epoch": 0.4,
 
79
  "learning_rate": 0.00012,
80
- "loss": 0.1868,
81
  "step": 60
82
  },
83
  {
84
- "epoch": 0.43,
 
85
  "learning_rate": 0.00011333333333333334,
86
- "loss": 0.119,
87
  "step": 65
88
  },
89
  {
90
- "epoch": 0.47,
 
91
  "learning_rate": 0.00010666666666666667,
92
- "loss": 0.1586,
93
  "step": 70
94
  },
95
  {
96
  "epoch": 0.5,
 
97
  "learning_rate": 0.0001,
98
- "loss": 0.1398,
99
  "step": 75
100
  },
101
  {
102
- "epoch": 0.53,
 
103
  "learning_rate": 9.333333333333334e-05,
104
- "loss": 0.1169,
105
  "step": 80
106
  },
107
  {
108
- "epoch": 0.57,
 
109
  "learning_rate": 8.666666666666667e-05,
110
- "loss": 0.1334,
111
  "step": 85
112
  },
113
  {
114
  "epoch": 0.6,
 
115
  "learning_rate": 8e-05,
116
- "loss": 0.115,
117
  "step": 90
118
  },
119
  {
120
- "epoch": 0.63,
 
121
  "learning_rate": 7.333333333333333e-05,
122
- "loss": 0.1426,
123
  "step": 95
124
  },
125
  {
126
- "epoch": 0.67,
 
127
  "learning_rate": 6.666666666666667e-05,
128
- "loss": 0.0973,
129
  "step": 100
130
  },
131
  {
132
- "epoch": 0.67,
133
- "eval_accuracy": 0.6966475878986099,
134
- "eval_f1": 0.7121074355920559,
135
- "eval_loss": 0.9000147581100464,
136
- "eval_precision": 0.7395056708514637,
137
- "eval_recall": 0.6966475878986099,
138
- "eval_runtime": 20.3384,
139
- "eval_samples_per_second": 60.132,
140
- "eval_steps_per_second": 7.523,
141
  "step": 100
142
  },
143
  {
144
  "epoch": 0.7,
 
145
  "learning_rate": 6e-05,
146
- "loss": 0.0894,
147
  "step": 105
148
  },
149
  {
150
- "epoch": 0.73,
 
151
  "learning_rate": 5.333333333333333e-05,
152
- "loss": 0.0841,
153
  "step": 110
154
  },
155
  {
156
- "epoch": 0.77,
 
157
  "learning_rate": 4.666666666666667e-05,
158
- "loss": 0.1361,
159
  "step": 115
160
  },
161
  {
162
  "epoch": 0.8,
 
163
  "learning_rate": 4e-05,
164
- "loss": 0.0673,
165
  "step": 120
166
  },
167
  {
168
- "epoch": 0.83,
 
169
  "learning_rate": 3.3333333333333335e-05,
170
- "loss": 0.0614,
171
  "step": 125
172
  },
173
  {
174
- "epoch": 0.87,
 
175
  "learning_rate": 2.6666666666666667e-05,
176
- "loss": 0.062,
177
  "step": 130
178
  },
179
  {
180
  "epoch": 0.9,
 
181
  "learning_rate": 2e-05,
182
- "loss": 0.0775,
183
  "step": 135
184
  },
185
  {
186
- "epoch": 0.93,
 
187
  "learning_rate": 1.3333333333333333e-05,
188
- "loss": 0.1043,
189
  "step": 140
190
  },
191
  {
192
- "epoch": 0.97,
 
193
  "learning_rate": 6.666666666666667e-06,
194
- "loss": 0.0585,
195
  "step": 145
196
  },
197
  {
198
  "epoch": 1.0,
 
199
  "learning_rate": 0.0,
200
- "loss": 0.0568,
201
  "step": 150
202
  },
203
  {
204
  "epoch": 1.0,
205
  "step": 150,
206
  "total_flos": 3.71974885244928e+17,
207
- "train_loss": 0.2848571586608887,
208
- "train_runtime": 93.0462,
209
- "train_samples_per_second": 51.587,
210
- "train_steps_per_second": 1.612
211
  }
212
  ],
213
  "logging_steps": 5,
@@ -215,6 +245,18 @@
215
  "num_input_tokens_seen": 0,
216
  "num_train_epochs": 1,
217
  "save_steps": 100,
 
 
 
 
 
 
 
 
 
 
 
 
218
  "total_flos": 3.71974885244928e+17,
219
  "train_batch_size": 32,
220
  "trial_name": null,
 
1
  {
2
+ "best_metric": 0.8521540760993958,
3
  "best_model_checkpoint": "vit-base-kidney-stone-Michel_Daudon_-w256_1k_v1-_SUR\\checkpoint-100",
4
  "epoch": 1.0,
5
  "eval_steps": 100,
 
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.03333333333333333,
13
+ "grad_norm": 1.9283610582351685,
14
  "learning_rate": 0.00019333333333333333,
15
+ "loss": 1.6434,
16
  "step": 5
17
  },
18
  {
19
+ "epoch": 0.06666666666666667,
20
+ "grad_norm": 2.5478172302246094,
21
  "learning_rate": 0.0001866666666666667,
22
+ "loss": 1.251,
23
  "step": 10
24
  },
25
  {
26
  "epoch": 0.1,
27
+ "grad_norm": 1.469027042388916,
28
  "learning_rate": 0.00018,
29
+ "loss": 0.9211,
30
  "step": 15
31
  },
32
  {
33
+ "epoch": 0.13333333333333333,
34
+ "grad_norm": 1.6149502992630005,
35
  "learning_rate": 0.00017333333333333334,
36
+ "loss": 0.6539,
37
  "step": 20
38
  },
39
  {
40
+ "epoch": 0.16666666666666666,
41
+ "grad_norm": 2.1767466068267822,
42
  "learning_rate": 0.0001666666666666667,
43
+ "loss": 0.5089,
44
  "step": 25
45
  },
46
  {
47
  "epoch": 0.2,
48
+ "grad_norm": 2.2109217643737793,
49
  "learning_rate": 0.00016,
50
+ "loss": 0.3865,
51
  "step": 30
52
  },
53
  {
54
+ "epoch": 0.23333333333333334,
55
+ "grad_norm": 1.071829080581665,
56
  "learning_rate": 0.00015333333333333334,
57
+ "loss": 0.3182,
58
  "step": 35
59
  },
60
  {
61
+ "epoch": 0.26666666666666666,
62
+ "grad_norm": 4.0049943923950195,
63
  "learning_rate": 0.00014666666666666666,
64
+ "loss": 0.3118,
65
  "step": 40
66
  },
67
  {
68
  "epoch": 0.3,
69
+ "grad_norm": 1.0740928649902344,
70
  "learning_rate": 0.00014,
71
+ "loss": 0.2968,
72
  "step": 45
73
  },
74
  {
75
+ "epoch": 0.3333333333333333,
76
+ "grad_norm": 0.5208770036697388,
77
  "learning_rate": 0.00013333333333333334,
78
+ "loss": 0.1959,
79
  "step": 50
80
  },
81
  {
82
+ "epoch": 0.36666666666666664,
83
+ "grad_norm": 1.7292317152023315,
84
  "learning_rate": 0.00012666666666666666,
85
+ "loss": 0.1556,
86
  "step": 55
87
  },
88
  {
89
  "epoch": 0.4,
90
+ "grad_norm": 1.2045090198516846,
91
  "learning_rate": 0.00012,
92
+ "loss": 0.1415,
93
  "step": 60
94
  },
95
  {
96
+ "epoch": 0.43333333333333335,
97
+ "grad_norm": 0.33121228218078613,
98
  "learning_rate": 0.00011333333333333334,
99
+ "loss": 0.1212,
100
  "step": 65
101
  },
102
  {
103
+ "epoch": 0.4666666666666667,
104
+ "grad_norm": 2.1612660884857178,
105
  "learning_rate": 0.00010666666666666667,
106
+ "loss": 0.1637,
107
  "step": 70
108
  },
109
  {
110
  "epoch": 0.5,
111
+ "grad_norm": 0.30413818359375,
112
  "learning_rate": 0.0001,
113
+ "loss": 0.116,
114
  "step": 75
115
  },
116
  {
117
+ "epoch": 0.5333333333333333,
118
+ "grad_norm": 0.23730212450027466,
119
  "learning_rate": 9.333333333333334e-05,
120
+ "loss": 0.0844,
121
  "step": 80
122
  },
123
  {
124
+ "epoch": 0.5666666666666667,
125
+ "grad_norm": 3.734177827835083,
126
  "learning_rate": 8.666666666666667e-05,
127
+ "loss": 0.093,
128
  "step": 85
129
  },
130
  {
131
  "epoch": 0.6,
132
+ "grad_norm": 1.837829828262329,
133
  "learning_rate": 8e-05,
134
+ "loss": 0.1055,
135
  "step": 90
136
  },
137
  {
138
+ "epoch": 0.6333333333333333,
139
+ "grad_norm": 0.41846963763237,
140
  "learning_rate": 7.333333333333333e-05,
141
+ "loss": 0.0708,
142
  "step": 95
143
  },
144
  {
145
+ "epoch": 0.6666666666666666,
146
+ "grad_norm": 0.37256985902786255,
147
  "learning_rate": 6.666666666666667e-05,
148
+ "loss": 0.117,
149
  "step": 100
150
  },
151
  {
152
+ "epoch": 0.6666666666666666,
153
+ "eval_accuracy": 0.7367130008176614,
154
+ "eval_f1": 0.7402764231487609,
155
+ "eval_loss": 0.8521540760993958,
156
+ "eval_precision": 0.7595070859879842,
157
+ "eval_recall": 0.7367130008176614,
158
+ "eval_runtime": 10.0946,
159
+ "eval_samples_per_second": 121.154,
160
+ "eval_steps_per_second": 15.157,
161
  "step": 100
162
  },
163
  {
164
  "epoch": 0.7,
165
+ "grad_norm": 1.851919412612915,
166
  "learning_rate": 6e-05,
167
+ "loss": 0.1259,
168
  "step": 105
169
  },
170
  {
171
+ "epoch": 0.7333333333333333,
172
+ "grad_norm": 0.20213304460048676,
173
  "learning_rate": 5.333333333333333e-05,
174
+ "loss": 0.0944,
175
  "step": 110
176
  },
177
  {
178
+ "epoch": 0.7666666666666667,
179
+ "grad_norm": 2.9062812328338623,
180
  "learning_rate": 4.666666666666667e-05,
181
+ "loss": 0.0958,
182
  "step": 115
183
  },
184
  {
185
  "epoch": 0.8,
186
+ "grad_norm": 0.41974860429763794,
187
  "learning_rate": 4e-05,
188
+ "loss": 0.0599,
189
  "step": 120
190
  },
191
  {
192
+ "epoch": 0.8333333333333334,
193
+ "grad_norm": 0.17554304003715515,
194
  "learning_rate": 3.3333333333333335e-05,
195
+ "loss": 0.057,
196
  "step": 125
197
  },
198
  {
199
+ "epoch": 0.8666666666666667,
200
+ "grad_norm": 0.9086737036705017,
201
  "learning_rate": 2.6666666666666667e-05,
202
+ "loss": 0.0569,
203
  "step": 130
204
  },
205
  {
206
  "epoch": 0.9,
207
+ "grad_norm": 4.543064594268799,
208
  "learning_rate": 2e-05,
209
+ "loss": 0.0609,
210
  "step": 135
211
  },
212
  {
213
+ "epoch": 0.9333333333333333,
214
+ "grad_norm": 0.3038415312767029,
215
  "learning_rate": 1.3333333333333333e-05,
216
+ "loss": 0.0565,
217
  "step": 140
218
  },
219
  {
220
+ "epoch": 0.9666666666666667,
221
+ "grad_norm": 0.17050418257713318,
222
  "learning_rate": 6.666666666666667e-06,
223
+ "loss": 0.0524,
224
  "step": 145
225
  },
226
  {
227
  "epoch": 1.0,
228
+ "grad_norm": 0.18213202059268951,
229
  "learning_rate": 0.0,
230
+ "loss": 0.0612,
231
  "step": 150
232
  },
233
  {
234
  "epoch": 1.0,
235
  "step": 150,
236
  "total_flos": 3.71974885244928e+17,
237
+ "train_loss": 0.27923648834228515,
238
+ "train_runtime": 74.1686,
239
+ "train_samples_per_second": 64.717,
240
+ "train_steps_per_second": 2.022
241
  }
242
  ],
243
  "logging_steps": 5,
 
245
  "num_input_tokens_seen": 0,
246
  "num_train_epochs": 1,
247
  "save_steps": 100,
248
+ "stateful_callbacks": {
249
+ "TrainerControl": {
250
+ "args": {
251
+ "should_epoch_stop": false,
252
+ "should_evaluate": false,
253
+ "should_log": false,
254
+ "should_save": true,
255
+ "should_training_stop": true
256
+ },
257
+ "attributes": {}
258
+ }
259
+ },
260
  "total_flos": 3.71974885244928e+17,
261
  "train_batch_size": 32,
262
  "trial_name": null,
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4e65756886f5d398824a6282f0add6486de2cefc2f866666dd6288860afed6eb
3
- size 4728
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c23cc4ac9f96efe3810e18c145117c73b8e78af31fb6d7c567d0849333c8a95
3
+ size 5432