Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,6 @@ login(token=api_key)
|
|
12 |
# setup model
|
13 |
model_id = "google/gemma-2-2b-it"
|
14 |
dtype = torch.bfloat16
|
15 |
-
chat = []
|
16 |
|
17 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
18 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -22,7 +21,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
22 |
|
23 |
def poet(text):
|
24 |
prompt = 'Make 25 lines, it has to be absolutely 25 lines of text no less no exception, of shakespeare based on this prompt: ' + text
|
25 |
-
chat
|
26 |
prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
27 |
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
28 |
outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150)
|
|
|
12 |
# setup model
|
13 |
model_id = "google/gemma-2-2b-it"
|
14 |
dtype = torch.bfloat16
|
|
|
15 |
|
16 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
17 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
21 |
|
22 |
def poet(text):
|
23 |
prompt = 'Make 25 lines, it has to be absolutely 25 lines of text no less no exception, of shakespeare based on this prompt: ' + text
|
24 |
+
chat = [{"role": "user", "content": prompt}]
|
25 |
prompt = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)
|
26 |
inputs = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
|
27 |
outputs = model.generate(input_ids=inputs.to(model.device), max_new_tokens=150)
|