rivercold commited on
Commit
5308684
·
1 Parent(s): f970c67

change the reponse length dynamic with number of papers in the prompt

Browse files
Files changed (1) hide show
  1. src/relevancy.py +1 -2
src/relevancy.py CHANGED
@@ -108,7 +108,7 @@ def generate_relevance_score(
108
  decoding_args = utils.OpenAIDecodingArguments(
109
  temperature=temperature,
110
  n=1,
111
- max_tokens=1072, # hard-code to maximize the length. the requests will be automatically adjusted
112
  top_p=top_p,
113
  )
114
  request_start = time.time()
@@ -118,7 +118,6 @@ def generate_relevance_score(
118
  batch_size=1,
119
  decoding_args=decoding_args,
120
  logit_bias={"100257": -100}, # prevent the <|endoftext|> from being generated
121
- # "100265":-100, "100276":-100 for <|im_end|> and <endofprompt> token
122
  )
123
  print ("response", response['message']['content'])
124
  request_duration = time.time() - request_start
 
108
  decoding_args = utils.OpenAIDecodingArguments(
109
  temperature=temperature,
110
  n=1,
111
+ max_tokens=128*num_paper_in_prompt, # The response for each paper should be less than 128 tokens.
112
  top_p=top_p,
113
  )
114
  request_start = time.time()
 
118
  batch_size=1,
119
  decoding_args=decoding_args,
120
  logit_bias={"100257": -100}, # prevent the <|endoftext|> from being generated
 
121
  )
122
  print ("response", response['message']['content'])
123
  request_duration = time.time() - request_start