htigenai commited on
Commit
be8e867
·
verified ·
1 Parent(s): 4cbbac9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -1,5 +1,5 @@
1
  import gradio as gr
2
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
3
  import torch
4
  import logging
5
  import sys
@@ -61,15 +61,15 @@ try:
61
  )
62
 
63
  with timer("Loading model"):
64
- model = AutoModelForCausalLM.from_pretrained(
65
- model_id,
66
- quantization_config=bnb_config,
67
- device_map={"": "cpu"}, # Explicitly set to CPU
68
- trust_remote_code=True,
69
- force_download=True,
70
- cache_dir='./cache'
71
- )
72
  model.eval()
 
73
  logger.info("Model loaded successfully")
74
 
75
  def generate_text(prompt, max_tokens=100, temperature=0.7):
 
1
  import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
  import torch
4
  import logging
5
  import sys
 
61
  )
62
 
63
  with timer("Loading model"):
64
+ model = AutoModelForCausalLM.from_pretrained(
65
+ model_id,
66
+ device_map={"": "cpu"},
67
+ trust_remote_code=True,
68
+ force_download=True,
69
+ cache_dir='./cache'
70
+ )
 
71
  model.eval()
72
+
73
  logger.info("Model loaded successfully")
74
 
75
  def generate_text(prompt, max_tokens=100, temperature=0.7):