{ "_name_or_path": "openai/clip-vit-base-patch32", "architectures": [ "CLIPModel" ], "date_fine_tuned": "2024-11-15 13:39:39.990813", "fine_tuned_by": "Quadrant Technologies", "fine_tuned_on": "Custom annotated based on Stanford Online Products Dataset", "fine_tuning_task": "Zero-Shot Image Classification for Content Safety", "initializer_factor": 1.0, "logit_scale_init_value": 2.6592, "model_type": "clip", "projection_dim": 512, "text_config": { "bos_token_id": 0, "dropout": 0.0, "eos_token_id": 2, "model_type": "clip_text_model" }, "torch_dtype": "float32", "transformers_version": "4.45.2", "vision_config": { "dropout": 0.0, "model_type": "clip_vision_model" } }