Update with commit adc0ff25028d29af30386f2d7d3f85e290fbef57
Browse filesSee: https://github.com/huggingface/transformers/commit/adc0ff25028d29af30386f2d7d3f85e290fbef57
- frameworks.json +1 -0
- pipeline_tags.json +2 -0
frameworks.json
CHANGED
@@ -13,6 +13,7 @@
|
|
13 |
{"model_type":"convbert","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
14 |
{"model_type":"convnext","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
15 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
|
|
16 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
17 |
{"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
18 |
{"model_type":"data2vec-vision","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
|
|
13 |
{"model_type":"convbert","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
14 |
{"model_type":"convnext","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
15 |
{"model_type":"ctrl","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoTokenizer"}
|
16 |
+
{"model_type":"cvt","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
17 |
{"model_type":"data2vec-audio","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoFeatureExtractor"}
|
18 |
{"model_type":"data2vec-text","pytorch":true,"tensorflow":false,"flax":false,"processor":"AutoTokenizer"}
|
19 |
{"model_type":"data2vec-vision","pytorch":true,"tensorflow":true,"flax":false,"processor":"AutoFeatureExtractor"}
|
pipeline_tags.json
CHANGED
@@ -66,6 +66,8 @@
|
|
66 |
{"model_class":"ConvBertModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
67 |
{"model_class":"ConvNextForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
68 |
{"model_class":"ConvNextModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
|
|
|
|
69 |
{"model_class":"DPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
70 |
{"model_class":"DPTModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
71 |
{"model_class":"Data2VecAudioForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
|
|
|
66 |
{"model_class":"ConvBertModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
67 |
{"model_class":"ConvNextForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
68 |
{"model_class":"ConvNextModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
69 |
+
{"model_class":"CvtForImageClassification","pipeline_tag":"image-classification","auto_class":"AutoModelForImageClassification"}
|
70 |
+
{"model_class":"CvtModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
71 |
{"model_class":"DPRQuestionEncoder","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
72 |
{"model_class":"DPTModel","pipeline_tag":"feature-extraction","auto_class":"AutoModel"}
|
73 |
{"model_class":"Data2VecAudioForAudioFrameClassification","pipeline_tag":"audio-frame-classification","auto_class":"AutoModelForAudioFrameClassification"}
|