wmpscc commited on
Commit
f2ea876
·
1 Parent(s): fa56ce2

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -16,7 +16,7 @@ def init_model():
16
  def chat(prompt, top_k, temperature):
17
  prompt = f"### Instruction:{prompt.strip()} ### Response:"
18
  inputs = tokenizer(prompt, return_tensors="pt").to("cuda:0")
19
- generate_ids = model.generate(inputs.input_ids, max_new_tokens=20480, do_sample = True, top_k=top_k, top_p = 0, temperature=temperature, repetition_penalty=1.15, eos_token_id=2, bos_token_id=1, pad_token_id=0)
20
  response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
21
  response = response.lstrip(prompt)
22
  print('log:', response)
 
16
  def chat(prompt, top_k, temperature):
17
  prompt = f"### Instruction:{prompt.strip()} ### Response:"
18
  inputs = tokenizer(prompt, return_tensors="pt").to("cuda:0")
19
+ generate_ids = model.generate(inputs.input_ids, max_new_tokens=2048, do_sample = True, top_k=int(top_k), top_p=0, temperature=float(temperature), repetition_penalty=1.15, eos_token_id=2, bos_token_id=1, pad_token_id=0)
20
  response = tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
21
  response = response.lstrip(prompt)
22
  print('log:', response)