Commit
·
636a1d9
1
Parent(s):
8f8d94d
Fix token authentication method
Browse files- test_model.py +14 -4
test_model.py
CHANGED
@@ -1,16 +1,26 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
3 |
from peft import PeftModel
|
|
|
4 |
|
5 |
def generate_response(question):
|
6 |
try:
|
7 |
-
#
|
8 |
-
|
9 |
-
|
|
|
10 |
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
base_model = AutoModelForCausalLM.from_pretrained(
|
13 |
"meta-llama/Llama-2-7b-hf",
|
|
|
14 |
load_in_8bit=True,
|
15 |
device_map="auto"
|
16 |
)
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
4 |
from peft import PeftModel
|
5 |
+
from huggingface_hub import login
|
6 |
|
7 |
def generate_response(question):
|
8 |
try:
|
9 |
+
# Debug print to verify token
|
10 |
+
token = os.environ.get('HUGGINGFACE_TOKEN')
|
11 |
+
print(f"Token available: {'Yes' if token else 'No'}")
|
12 |
+
login(token)
|
13 |
|
14 |
+
print("Loading tokenizer...")
|
15 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
16 |
+
"meta-llama/Llama-2-7b-hf",
|
17 |
+
use_auth_token=True # Changed to use environment token
|
18 |
+
)
|
19 |
+
|
20 |
+
print("Loading model...")
|
21 |
base_model = AutoModelForCausalLM.from_pretrained(
|
22 |
"meta-llama/Llama-2-7b-hf",
|
23 |
+
use_auth_token=True, # Changed to use environment token
|
24 |
load_in_8bit=True,
|
25 |
device_map="auto"
|
26 |
)
|