hf-transformers-bot commited on
Commit
9c3f5d7
1 Parent(s): fb0e6ac

Upload tiny models for MobileNetV2Model

Browse files
Files changed (3) hide show
  1. config.json +23 -0
  2. preprocessor_config.json +26 -0
  3. pytorch_model.bin +3 -0
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "MobileNetV2Model"
4
+ ],
5
+ "classifier_dropout_prob": 0.1,
6
+ "depth_divisible_by": 8,
7
+ "depth_multiplier": 0.25,
8
+ "expand_ratio": 6,
9
+ "finegrained_output": true,
10
+ "first_layer_is_expansion": true,
11
+ "hidden_act": "relu6",
12
+ "image_size": 32,
13
+ "initializer_range": 0.02,
14
+ "layer_norm_eps": 0.001,
15
+ "min_depth": 8,
16
+ "model_type": "mobilenet_v2",
17
+ "num_channels": 3,
18
+ "output_stride": 32,
19
+ "semantic_loss_ignore_index": 255,
20
+ "tf_padding": true,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.28.0.dev0"
23
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "crop_size": {
3
+ "height": 32,
4
+ "width": 32
5
+ },
6
+ "do_center_crop": true,
7
+ "do_normalize": true,
8
+ "do_rescale": true,
9
+ "do_resize": true,
10
+ "image_mean": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "image_processor_type": "MobileNetV2ImageProcessor",
16
+ "image_std": [
17
+ 0.5,
18
+ 0.5,
19
+ 0.5
20
+ ],
21
+ "resample": 2,
22
+ "rescale_factor": 0.00392156862745098,
23
+ "size": {
24
+ "shortest_edge": 32
25
+ }
26
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:904229a2a28580939036bd47b3971e854d51d53b85aa083688eec7ba05243a37
3
+ size 1092653