Update with commit f7c618e3b0652a6a0f6e435c404288fc38d18dda
Browse filesSee: https://github.com/huggingface/transformers/commit/f7c618e3b0652a6a0f6e435c404288fc38d18dda
- frameworks.json +1 -1
- pipeline_tags.json +1 -0
frameworks.json
CHANGED
@@ -146,7 +146,7 @@
|
|
146 |
{"model_type":"videomae","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
147 |
{"model_type":"vilt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
148 |
{"model_type":"vision-encoder-decoder","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
149 |
-
{"model_type":"vision-text-dual-encoder","pytorch":true,"tensorflow":
|
150 |
{"model_type":"visual_bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
151 |
{"model_type":"vit","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoFeatureExtractor"}
|
152 |
{"model_type":"vit_hybrid","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
|
|
146 |
{"model_type":"videomae","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
147 |
{"model_type":"vilt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoProcessor"}
|
148 |
{"model_type":"vision-encoder-decoder","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoTokenizer"}
|
149 |
+
{"model_type":"vision-text-dual-encoder","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoProcessor"}
|
150 |
{"model_type":"visual_bert","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
151 |
{"model_type":"vit","pytorch":true,"tensorflow":true,"flax":true,"processor":"AutoFeatureExtractor"}
|
152 |
{"model_type":"vit_hybrid","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
pipeline_tags.json
CHANGED
@@ -770,6 +770,7 @@
|
|
770 |
{"model_class":"TFViTMAEForPreTraining","pipeline_tag":"pretraining","auto_class":"TF_AutoModelForPreTraining"}
|
771 |
{"model_class":"TFViTMAEModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
772 |
{"model_class":"TFViTModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
|
|
773 |
{"model_class":"TFWav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
774 |
{"model_class":"TFWhisperForConditionalGeneration","pipeline_tag":"automatic-speech-recognition","auto_class":"TF_AutoModelForSpeechSeq2Seq"}
|
775 |
{"model_class":"TFWhisperModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
|
|
770 |
{"model_class":"TFViTMAEForPreTraining","pipeline_tag":"pretraining","auto_class":"TF_AutoModelForPreTraining"}
|
771 |
{"model_class":"TFViTMAEModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
772 |
{"model_class":"TFViTModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
773 |
+
{"model_class":"TFVisionTextDualEncoderModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
774 |
{"model_class":"TFWav2Vec2Model","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
775 |
{"model_class":"TFWhisperForConditionalGeneration","pipeline_tag":"automatic-speech-recognition","auto_class":"TF_AutoModelForSpeechSeq2Seq"}
|
776 |
{"model_class":"TFWhisperModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|