Norod78 commited on
Commit
8ef8f63
ยท
verified ยท
1 Parent(s): f66cd9a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -36,7 +36,8 @@ def generate_song(prompt_text = ''):
36
  result = ""
37
  input_template = tokenizer.apply_chat_template([{"role": "user", "content": prompt_text}], tokenize=False, add_generation_prompt=True)
38
  input_ids = tokenizer(input_template, return_tensors="pt").to(model.device)
39
- sample_outputs = model.generate(**input_ids, max_new_tokens=512 , repetition_penalty=1.1, temperature=0.4, top_p=0.95, top_k=40, do_sample = True)
 
40
  #sample_outputs = model.generate(**input_ids, max_new_tokens=512 , repetition_penalty=1.1, temperature=0.5, do_sample = True)
41
  decoded_output = tokenizer.batch_decode(sample_outputs, skip_special_tokens=True)[0]
42
  result = decoded_output.replace("user\n", "ืžืฉืชืžืฉ:\n").replace("model\n", "\nืžื•ื“ืœ:\n")
 
36
  result = ""
37
  input_template = tokenizer.apply_chat_template([{"role": "user", "content": prompt_text}], tokenize=False, add_generation_prompt=True)
38
  input_ids = tokenizer(input_template, return_tensors="pt").to(model.device)
39
+ #sample_outputs = model.generate(**input_ids, max_new_tokens=512 , repetition_penalty=1.1, temperature=0.4, top_p=0.95, top_k=40, do_sample = True)
40
+ sample_outputs = model.generate(**input_ids, max_new_tokens=256 , repetition_penalty=1.1, temperature=0.6, top_p=0.4, top_k=40, do_sample = True)
41
  #sample_outputs = model.generate(**input_ids, max_new_tokens=512 , repetition_penalty=1.1, temperature=0.5, do_sample = True)
42
  decoded_output = tokenizer.batch_decode(sample_outputs, skip_special_tokens=True)[0]
43
  result = decoded_output.replace("user\n", "ืžืฉืชืžืฉ:\n").replace("model\n", "\nืžื•ื“ืœ:\n")