read-lfs-files

#1070
by alozowski HF staff - opened
backend/app/services/models.py CHANGED
@@ -14,7 +14,6 @@ import sys
14
  import contextlib
15
  from concurrent.futures import ThreadPoolExecutor
16
  import tempfile
17
-
18
  from app.config import (
19
  QUEUE_REPO,
20
  HF_TOKEN,
@@ -481,10 +480,16 @@ class ModelService(HuggingFaceService):
481
  raise Exception(error)
482
  logger.info(LogFormatter.success("Chat template validation passed"))
483
 
484
-
485
- architectures = model_info.config.get("architectures", "")
486
- if architectures:
487
- architectures = ";".join(architectures)
 
 
 
 
 
 
488
 
489
  # Create eval entry
490
  eval_entry = {
 
14
  import contextlib
15
  from concurrent.futures import ThreadPoolExecutor
16
  import tempfile
 
17
  from app.config import (
18
  QUEUE_REPO,
19
  HF_TOKEN,
 
480
  raise Exception(error)
481
  logger.info(LogFormatter.success("Chat template validation passed"))
482
 
483
+ try:
484
+ architectures = "unknown"
485
+ if hasattr(model_info, 'config') and model_info.config:
486
+ config_architectures = model_info.config.get("architectures", [])
487
+ if config_architectures:
488
+ architectures = ";".join(config_architectures)
489
+ logger.info(LogFormatter.info(f"Model architectures: {architectures}"))
490
+ except Exception as e:
491
+ logger.warning(LogFormatter.warning(f"Could not determine architectures: {str(e)}"))
492
+ architectures = "unknown"
493
 
494
  # Create eval entry
495
  eval_entry = {
backend/app/utils/model_validation.py CHANGED
@@ -1,10 +1,12 @@
1
  import json
2
  import logging
3
  import asyncio
 
4
  from typing import Tuple, Optional, Dict, Any
5
  from datasets import load_dataset
6
  from huggingface_hub import HfApi, ModelCard, hf_hub_download
7
  from huggingface_hub import hf_api
 
8
  from transformers import AutoConfig, AutoTokenizer
9
  from app.config.base import HF_TOKEN
10
  from app.config.hf_config import OFFICIAL_PROVIDERS_REPO
@@ -176,15 +178,35 @@ class ModelValidator:
176
  ) -> Tuple[bool, Optional[str], Optional[Any]]:
177
  """Check if model exists and is properly configured on the Hub"""
178
  try:
179
- config = await asyncio.to_thread(
180
- AutoConfig.from_pretrained,
181
- model_name,
182
- revision=revision,
183
- trust_remote_code=trust_remote_code,
184
- token=self.token,
185
- force_download=True
186
- )
187
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  if test_tokenizer:
189
  try:
190
  await asyncio.to_thread(
@@ -198,9 +220,18 @@ class ModelValidator:
198
  return False, f"The tokenizer is not available in an official Transformers release: {e}", None
199
  except Exception:
200
  return False, "The tokenizer cannot be loaded. Ensure the tokenizer class is part of a stable Transformers release and correctly configured.", None
201
-
202
  return True, None, config
203
-
 
 
 
 
 
 
 
 
 
204
  except ValueError:
205
  return False, "The model requires `trust_remote_code=True` to launch, and for safety reasons, we don't accept such models automatically.", None
206
  except Exception as e:
 
1
  import json
2
  import logging
3
  import asyncio
4
+ import aiohttp
5
  from typing import Tuple, Optional, Dict, Any
6
  from datasets import load_dataset
7
  from huggingface_hub import HfApi, ModelCard, hf_hub_download
8
  from huggingface_hub import hf_api
9
+ from huggingface_hub.utils import build_hf_headers
10
  from transformers import AutoConfig, AutoTokenizer
11
  from app.config.base import HF_TOKEN
12
  from app.config.hf_config import OFFICIAL_PROVIDERS_REPO
 
178
  ) -> Tuple[bool, Optional[str], Optional[Any]]:
179
  """Check if model exists and is properly configured on the Hub"""
180
  try:
181
+ # First try regular API approach
182
+ try:
183
+ config = await asyncio.to_thread(
184
+ AutoConfig.from_pretrained,
185
+ model_name,
186
+ revision=revision,
187
+ trust_remote_code=trust_remote_code,
188
+ token=self.token,
189
+ force_download=True
190
+ )
191
+ except Exception as e:
192
+ logger.info(f"Standard config loading failed, attempting LFS fallback: {str(e)}")
193
+
194
+ # Fallback to direct LFS file access
195
+ async with aiohttp.ClientSession() as session:
196
+ config_url = f"https://huggingface.co/{model_name}/raw/{revision}/config.json"
197
+ headers = build_hf_headers(token=self.token)
198
+
199
+ async with session.get(config_url, headers=headers) as response:
200
+ if response.status == 200:
201
+ config_json = await response.json()
202
+ logger.info("Successfully loaded config.json from LFS")
203
+ config = AutoConfig.from_dict(config_json)
204
+ else:
205
+ error_msg = f"Failed to load config.json (Status {response.status})"
206
+ logger.error(error_msg)
207
+ raise Exception(error_msg)
208
+
209
+ # Test tokenizer if requested
210
  if test_tokenizer:
211
  try:
212
  await asyncio.to_thread(
 
220
  return False, f"The tokenizer is not available in an official Transformers release: {e}", None
221
  except Exception:
222
  return False, "The tokenizer cannot be loaded. Ensure the tokenizer class is part of a stable Transformers release and correctly configured.", None
223
+
224
  return True, None, config
225
+
226
+ except ValueError as e:
227
+ if "trust_remote_code" in str(e):
228
+ return False, "The model requires `trust_remote_code=True` to launch, and for safety reasons, we don't accept such models automatically.", None
229
+ return False, str(e), None
230
+ except Exception as e:
231
+ if "You are trying to access a gated repo." in str(e):
232
+ return True, "The model is gated and requires special access permissions.", None
233
+ return False, f"The model was not found or is misconfigured on the Hub. Error: {e}", None
234
+
235
  except ValueError:
236
  return False, "The model requires `trust_remote_code=True` to launch, and for safety reasons, we don't accept such models automatically.", None
237
  except Exception as e: