avantol commited on
Commit
2a76ab8
·
1 Parent(s): c567880

fix(setup): gracefully handle misconfiguration

Browse files
Files changed (1) hide show
  1. app.py +22 -13
app.py CHANGED
@@ -31,15 +31,23 @@ MAX_RETRY_ATTEMPTS = 1
31
  print(f"Is CUDA available: {torch.cuda.is_available()}")
32
  print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
33
 
34
- tokenizer = AutoTokenizer.from_pretrained(
35
- BASE_MODEL, token=AUTH_TOKEN, device_map="auto"
36
- )
37
- model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, token=AUTH_TOKEN)
38
- model = model.to("cuda")
39
- model = model.eval()
 
 
 
 
 
 
40
 
41
- peft_config = PeftConfig.from_pretrained(LORA_ADAPTER, token=AUTH_TOKEN)
42
- model = PeftModel.from_pretrained(model, LORA_ADAPTER, token=AUTH_TOKEN)
 
 
43
 
44
 
45
  @spaces.GPU(duration=360)
@@ -242,11 +250,12 @@ with gr.Blocks() as demo:
242
  graph_out = gr.Image(label="Network Graph Representation", type="pil")
243
 
244
  # If files are uploaded, generate prompt and run model
245
- files.upload(
246
- fn=gen_output_from_files_uploaded,
247
- inputs=files,
248
- outputs=[json_out, graph_out, sql_out],
249
- )
 
250
 
251
  gr.Markdown("Run out of FreeGPU or having issues? Try the example output!")
252
  demo_btn = gr.Button("Manually Load Example Output from Previous Run")
 
31
  print(f"Is CUDA available: {torch.cuda.is_available()}")
32
  print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
33
 
34
+ model_loaded = False
35
+
36
+ try:
37
+ tokenizer = AutoTokenizer.from_pretrained(
38
+ BASE_MODEL, token=AUTH_TOKEN, device_map="auto"
39
+ )
40
+ model = AutoModelForCausalLM.from_pretrained(BASE_MODEL, token=AUTH_TOKEN)
41
+ model = model.to("cuda")
42
+ model = model.eval()
43
+
44
+ peft_config = PeftConfig.from_pretrained(LORA_ADAPTER, token=AUTH_TOKEN)
45
+ model = PeftModel.from_pretrained(model, LORA_ADAPTER, token=AUTH_TOKEN)
46
 
47
+ model_loaded = True
48
+ except Exception:
49
+ print("No HF_TOKEN found. Ensure you follow setup instructions!")
50
+ # continue on so setup instructions load
51
 
52
 
53
  @spaces.GPU(duration=360)
 
250
  graph_out = gr.Image(label="Network Graph Representation", type="pil")
251
 
252
  # If files are uploaded, generate prompt and run model
253
+ if model_loaded:
254
+ files.upload(
255
+ fn=gen_output_from_files_uploaded,
256
+ inputs=files,
257
+ outputs=[json_out, graph_out, sql_out],
258
+ )
259
 
260
  gr.Markdown("Run out of FreeGPU or having issues? Try the example output!")
261
  demo_btn = gr.Button("Manually Load Example Output from Previous Run")