text
stringlengths 0
759
|
---|
...<5 lines>...
|
timeout=timeout,
|
)
|
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 286, in _request_wrapper
|
response = _request_wrapper(
|
method=method,
|
...<2 lines>...
|
**params,
|
)
|
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/file_download.py", line 310, in _request_wrapper
|
hf_raise_for_status(response)
|
~~~~~~~~~~~~~~~~~~~^^^^^^^^^^
|
File "/tmp/.cache/uv/environments-v2/750b6ec382b5be28/lib/python3.13/site-packages/huggingface_hub/utils/_http.py", line 420, in hf_raise_for_status
|
raise _format(EntryNotFoundError, message, response) from e
|
huggingface_hub.errors.EntryNotFoundError: 404 Client Error. (Request ID: Root=1-6895ca0d-3fda83bb49f04b595769150f;9291fb74-2aac-4bee-abda-9269c198532b)
|
Entry Not Found for url: https://huggingface.co/X-Omni/X-Omni-En/resolve/main/model_index.json.
|
Traceback (most recent call last):
|
File "/tmp/YannQi_R-4B_0CLEfKc.py", line 13, in <module>
|
pipe = pipeline("image-text-to-text", model="YannQi/R-4B", trust_remote_code=True)
|
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1028, in pipeline
|
framework, model = infer_framework_load_model(
|
~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
adapter_path if adapter_path is not None else model,
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
...<5 lines>...
|
**model_kwargs,
|
^^^^^^^^^^^^^^^
|
)
|
^
|
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/base.py", line 333, in infer_framework_load_model
|
raise ValueError(
|
f"Could not load model {model} with any of the following classes: {class_tuple}. See the original errors:\n\n{error}\n"
|
)
|
ValueError: Could not load model YannQi/R-4B with any of the following classes: (<class 'transformers.models.auto.modeling_auto.AutoModelForImageTextToText'>,). See the original errors:
|
while loading with AutoModelForImageTextToText, an error is thrown:
|
Traceback (most recent call last):
|
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/base.py", line 293, in infer_framework_load_model
|
model = model_class.from_pretrained(model, **kwargs)
|
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained
|
raise ValueError(
|
...<2 lines>...
|
)
|
ValueError: Unrecognized configuration class <class 'transformers_modules.YannQi.R-4B.0fbbf22db6867d9ac260d8181ac655fd915c9415.configuration_r.RConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
|
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
During handling of the above exception, another exception occurred:
|
Traceback (most recent call last):
|
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/pipelines/base.py", line 311, in infer_framework_load_model
|
model = model_class.from_pretrained(model, **fp32_kwargs)
|
File "/tmp/.cache/uv/environments-v2/5c6b903e588f7f65/lib/python3.13/site-packages/transformers/models/auto/auto_factory.py", line 607, in from_pretrained
|
raise ValueError(
|
...<2 lines>...
|
)
|
ValueError: Unrecognized configuration class <class 'transformers_modules.YannQi.R-4B.0fbbf22db6867d9ac260d8181ac655fd915c9415.configuration_r.RConfig'> for this kind of AutoModel: AutoModelForImageTextToText.
|
Model type should be one of AriaConfig, AyaVisionConfig, BlipConfig, Blip2Config, ChameleonConfig, Cohere2VisionConfig, DeepseekVLConfig, DeepseekVLHybridConfig, Emu3Config, EvollaConfig, Florence2Config, FuyuConfig, Gemma3Config, Gemma3nConfig, GitConfig, Glm4vConfig, Glm4vMoeConfig, GotOcr2Config, IdeficsConfig, Idefics2Config, Idefics3Config, InstructBlipConfig, InternVLConfig, JanusConfig, Kosmos2Config, Kosmos2_5Config, Llama4Config, LlavaConfig, LlavaNextConfig, LlavaNextVideoConfig, LlavaOnevisionConfig, Mistral3Config, MllamaConfig, Ovis2Config, PaliGemmaConfig, PerceptionLMConfig, Pix2StructConfig, PixtralVisionConfig, Qwen2_5_VLConfig, Qwen2VLConfig, ShieldGemma2Config, SmolVLMConfig, UdopConfig, VipLlavaConfig, VisionEncoderDecoderConfig.
|
Everything was good in YannQi_R-4B_1.txt
|
Traceback (most recent call last):
|
File "/tmp/allenai_olmOCR-7B-0725_0PHqJbH.py", line 13, in <module>
|
pipe = pipeline("image-to-text", model="allenai/olmOCR-7B-0725")
|
File "/tmp/.cache/uv/environments-v2/529bdc2bdc6fbbc1/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1090, in pipeline
|
raise e
|
File "/tmp/.cache/uv/environments-v2/529bdc2bdc6fbbc1/lib/python3.13/site-packages/transformers/pipelines/__init__.py", line 1085, in pipeline
|
image_processor = AutoImageProcessor.from_pretrained(
|
image_processor, _from_pipeline=task, **hub_kwargs, **model_kwargs
|
)
|
File "/tmp/.cache/uv/environments-v2/529bdc2bdc6fbbc1/lib/python3.13/site-packages/transformers/models/auto/image_processing_auto.py", line 597, in from_pretrained
|
return image_processor_class.from_dict(config_dict, **kwargs)
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
File "/tmp/.cache/uv/environments-v2/529bdc2bdc6fbbc1/lib/python3.13/site-packages/transformers/utils/import_utils.py", line 2116, in __getattribute__
|
requires_backends(cls, cls._backends)
|
~~~~~~~~~~~~~~~~~^^^^^^^^^^^^^^^^^^^^
|
File "/tmp/.cache/uv/environments-v2/529bdc2bdc6fbbc1/lib/python3.13/site-packages/transformers/utils/import_utils.py", line 2102, in requires_backends
|
raise ImportError("".join(failed))
|
ImportError:
|
Qwen2VLImageProcessor requires the PIL library but it was not found in your environment. You can install it with pip:
|
`pip install pillow`. Please note that you may need to restart your runtime after installation.
|
Traceback (most recent call last):
|
File "/tmp/allenai_olmOCR-7B-0725_1o6KaGZ.py", line 13, in <module>
|
processor = AutoProcessor.from_pretrained("allenai/olmOCR-7B-0725")
|
File "/tmp/.cache/uv/environments-v2/b27882d3da9f988d/lib/python3.13/site-packages/transformers/models/auto/processing_auto.py", line 385, in from_pretrained
|
return processor_class.from_pretrained(
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^
|
pretrained_model_name_or_path, trust_remote_code=trust_remote_code, **kwargs
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
)
|
^
|
File "/tmp/.cache/uv/environments-v2/b27882d3da9f988d/lib/python3.13/site-packages/transformers/processing_utils.py", line 1310, in from_pretrained
|
args = cls._get_arguments_from_pretrained(pretrained_model_name_or_path, **kwargs)
|
File "/tmp/.cache/uv/environments-v2/b27882d3da9f988d/lib/python3.13/site-packages/transformers/processing_utils.py", line 1369, in _get_arguments_from_pretrained
|
args.append(attribute_class.from_pretrained(pretrained_model_name_or_path, **kwargs))
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
File "/tmp/.cache/uv/environments-v2/b27882d3da9f988d/lib/python3.13/site-packages/transformers/utils/import_utils.py", line 2116, in __getattribute__
|
requires_backends(cls, cls._backends)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.