Kuberwastaken commited on
Commit
a7b8952
·
1 Parent(s): 54f81d8

Adjusted maximum tokens

Browse files
Files changed (1) hide show
  1. model/analyzer.py +2 -2
model/analyzer.py CHANGED
@@ -104,9 +104,9 @@ class ContentAnalyzer:
104
  print("Generating response...")
105
  outputs = self.model.generate(
106
  **inputs,
107
- max_new_tokens=5,
108
  do_sample=True,
109
- temperature=0.6,
110
  top_p=0.9,
111
  pad_token_id=self.tokenizer.eos_token_id
112
  )
 
104
  print("Generating response...")
105
  outputs = self.model.generate(
106
  **inputs,
107
+ max_new_tokens=10,
108
  do_sample=True,
109
+ temperature=0.4,
110
  top_p=0.9,
111
  pad_token_id=self.tokenizer.eos_token_id
112
  )