Update with commit 5ca131f3d44974be1949b7b080594a11d33c3b4e
Browse filesSee: https://github.com/huggingface/transformers/commit/5ca131f3d44974be1949b7b080594a11d33c3b4e
- frameworks.json +1 -1
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
@@ -16,7 +16,7 @@
|
|
16 |
{"model_type":"convbert","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
17 |
{"model_type":"convnext","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
18 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
19 |
-
{"model_type":"cvt","pytorch":true,"tensorflow":
|
20 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
21 |
{"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
22 |
{"model_type":"data2vec-vision","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
|
|
16 |
{"model_type":"convbert","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
17 |
{"model_type":"convnext","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
18 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
19 |
+
{"model_type":"cvt","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
20 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
21 |
{"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
22 |
{"model_type":"data2vec-vision","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
pipeline_tags.json
CHANGED
@@ -533,6 +533,8 @@
|
|
533 |
{"model_class":"TFConvBertModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
534 |
{"model_class":"TFConvNextForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
535 |
{"model_class":"TFConvNextModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
|
|
|
|
536 |
{"model_class":"TFDPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
537 |
{"model_class":"TFData2VecVisionForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
538 |
{"model_class":"TFData2VecVisionModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
|
|
533 |
{"model_class":"TFConvBertModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
534 |
{"model_class":"TFConvNextForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
535 |
{"model_class":"TFConvNextModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
536 |
+
{"model_class":"TFCvtForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
537 |
+
{"model_class":"TFCvtModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
538 |
{"model_class":"TFDPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|
539 |
{"model_class":"TFData2VecVisionForImageClassification","pipeline_tag":"image-classification","auto_class":"TF_AutoModelForImageClassification"}
|
540 |
{"model_class":"TFData2VecVisionModel","pipeline_tag":"feature-extraction","auto_class":"TF_AutoModel"}
|