BigSalmon commited on
Commit
bb080d4
·
1 Parent(s): c521dc5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -15,7 +15,7 @@ def load_model(model_name):
15
  tokenizer = AutoTokenizer.from_pretrained(model_name)
16
  model = AutoModelForCausalLM.from_pretrained(model_name)
17
  return model, tokenizer
18
- def extend(input_text, max_size=20, top_k=50, top_p=0.95):
19
  if len(input_text) == 0:
20
  input_text = ""
21
  encoded_prompt = tokenizer.encode(
@@ -32,7 +32,7 @@ def extend(input_text, max_size=20, top_k=50, top_p=0.95):
32
  top_k=top_k,
33
  top_p=top_p,
34
  do_sample=True,
35
- num_return_sequences=1)
36
  # Remove the batch dimension when returning multiple sequences
37
  if len(output_sequences.shape) > 2:
38
  output_sequences.squeeze_()
 
15
  tokenizer = AutoTokenizer.from_pretrained(model_name)
16
  model = AutoModelForCausalLM.from_pretrained(model_name)
17
  return model, tokenizer
18
+ def extend(input_text, max_size=20, top_k=50, top_p=0.95, num_return_sequences):
19
  if len(input_text) == 0:
20
  input_text = ""
21
  encoded_prompt = tokenizer.encode(
 
32
  top_k=top_k,
33
  top_p=top_p,
34
  do_sample=True,
35
+ num_return_sequences=num_return_sequences)
36
  # Remove the batch dimension when returning multiple sequences
37
  if len(output_sequences.shape) > 2:
38
  output_sequences.squeeze_()