Ozaii commited on
Commit
41fa517
Β·
verified Β·
1 Parent(s): 7ddc8df

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -1,12 +1,12 @@
1
- import spaces
2
  import torch
3
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
4
  from peft import PeftConfig, PeftModel
5
  from threading import Thread
6
  import gradio as gr
 
7
 
8
  MODEL_PATH = "Ozaii/zephyr-bae"
9
- BASE_MODEL = "unsloth/Meta-Llama-3.1-8B-Instruct-bnb-4bit"
10
  max_seq_length = 2048
11
 
12
  print("Zephyr is getting ready to charm! 🌟")
@@ -25,7 +25,8 @@ def load_model():
25
  BASE_MODEL,
26
  torch_dtype=torch.float16,
27
  device_map="auto",
28
- load_in_4bit=True
 
29
  )
30
 
31
  model = PeftModel.from_pretrained(base_model, MODEL_PATH)
 
 
1
  import torch
2
  from transformers import AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
3
  from peft import PeftConfig, PeftModel
4
  from threading import Thread
5
  import gradio as gr
6
+ import spaces
7
 
8
  MODEL_PATH = "Ozaii/zephyr-bae"
9
+ BASE_MODEL = "unsloth/llama-3-8b-bnb-4bit"
10
  max_seq_length = 2048
11
 
12
  print("Zephyr is getting ready to charm! 🌟")
 
25
  BASE_MODEL,
26
  torch_dtype=torch.float16,
27
  device_map="auto",
28
+ load_in_4bit=True,
29
+ trust_remote_code=True
30
  )
31
 
32
  model = PeftModel.from_pretrained(base_model, MODEL_PATH)