CreitinGameplays commited on
Commit
3cfe997
1 Parent(s): 77fc397

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -3,7 +3,6 @@ import torch
3
  import bitsandbytes as bnb
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
5
 
6
- """
7
  # BNB config
8
  bnb_config = BitsAndBytesConfig(
9
  load_in_4bit=True,
@@ -12,15 +11,14 @@ bnb_config = BitsAndBytesConfig(
12
  bnb_4bit_compute_dtype=torch.bfloat16
13
  )
14
 
15
- quantization_config=bnb_config
16
- """
17
 
18
  # Define the BLOOM model name
19
  model_name = "CreitinGameplays/bloom-3b-conversational"
20
 
21
  # Load tokenizer and model
22
  tokenizer = AutoTokenizer.from_pretrained(model_name)
23
- model = AutoModelForCausalLM.from_pretrained(model_name)
24
 
25
  def generate_text(user_prompt):
26
  """Generates text using the BLOOM model from Hugging Face Transformers and removes the user prompt."""
 
3
  import bitsandbytes as bnb
4
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
5
 
 
6
  # BNB config
7
  bnb_config = BitsAndBytesConfig(
8
  load_in_4bit=True,
 
11
  bnb_4bit_compute_dtype=torch.bfloat16
12
  )
13
 
14
+ #quantization_config=bnb_config
 
15
 
16
  # Define the BLOOM model name
17
  model_name = "CreitinGameplays/bloom-3b-conversational"
18
 
19
  # Load tokenizer and model
20
  tokenizer = AutoTokenizer.from_pretrained(model_name)
21
+ model = AutoModelForCausalLM.from_pretrained(model_name, quantization_config=bnb_config)
22
 
23
  def generate_text(user_prompt):
24
  """Generates text using the BLOOM model from Hugging Face Transformers and removes the user prompt."""