tanganke commited on
Commit
1ea59a2
1 Parent(s): 3a57cb1

Upload folder using huggingface_hub

Browse files
Files changed (3) hide show
  1. README.md +44 -0
  2. config.json +23 -0
  3. model.safetensors +3 -0
README.md ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ base_model:
3
+ - openai/clip-vit-base-patch32
4
+ datasets:
5
+ - nateraw/rendered-sst2
6
+ metrics:
7
+ - accuracy
8
+ ---
9
+
10
+ # Model Card
11
+
12
+ ## Model Details
13
+
14
+ - Architecture: ViT-Base with patch size 32
15
+ - Training Data: rendered-sst2 dataset
16
+
17
+ ## Training Details
18
+
19
+ Adam Optimizer with a constant learning rate 1e-5 for 4000 steps training (batch_size=32).
20
+ Only the vision encoder is fine-tuned.
21
+
22
+ ## Evaluation Results
23
+
24
+ - pre-trained: 0.5856747031211853
25
+ - fine-tuned: 0.6964021921157837
26
+
27
+ ## Usage
28
+
29
+ load vision model
30
+
31
+ ```python
32
+ from transformers import CLIPVisionModel
33
+
34
+ vision_model = CLIPVisionModel.from_pretrained('tanganke/clip-vit-base-patch32_rendered-sst2')
35
+ ```
36
+
37
+ substitute the vision encoder of clip
38
+
39
+ ```python
40
+ from transformers import CLIPModel
41
+
42
+ clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
43
+ clip_model.vision_model.load_state_dict(vision_model.vision_model.state_dict())
44
+ ```
config.json ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "results/temp/",
3
+ "architectures": [
4
+ "CLIPVisionModel"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "dropout": 0.0,
8
+ "hidden_act": "quick_gelu",
9
+ "hidden_size": 768,
10
+ "image_size": 224,
11
+ "initializer_factor": 1.0,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 3072,
14
+ "layer_norm_eps": 1e-05,
15
+ "model_type": "clip_vision_model",
16
+ "num_attention_heads": 12,
17
+ "num_channels": 3,
18
+ "num_hidden_layers": 12,
19
+ "patch_size": 32,
20
+ "projection_dim": 512,
21
+ "torch_dtype": "float32",
22
+ "transformers_version": "4.39.1"
23
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eb80c18ede1b025c857ac5d68e09c85e37b4b675ebed3c245cdde0c8e228fef
3
+ size 349847824