Samuel L Meyers commited on
Commit
5930c36
·
1 Parent(s): c789b81

Remove max_tokens

Browse files
Files changed (1) hide show
  1. code/app.py +1 -1
code/app.py CHANGED
@@ -69,7 +69,7 @@ def talk(txt, jsn):
69
  if not running:
70
  #result = lcpp_model.create_chat_completion(messages=txt,stream=True,stop=["GPT4 Correct User: ", "<|end_of_turn|>", "</s>"], max_tokens=64, )
71
  #result = lcpp_model(prompt=jsn2prompt(txt), stream=True, stop=["GPT4 Correct User: ", "<|end_of_turn|>", "</s>"], max_tokens=64, echo=False)
72
- result = llm(prompt=jsn2prompt(txt), stream=True, stop=["GPT4 Correct User: ", "<|end_of_turn|>", "</s>"], max_tokens=192, echo=False)
73
  running = True
74
  for r in result:
75
  print("GOT RESULT:", r)
 
69
  if not running:
70
  #result = lcpp_model.create_chat_completion(messages=txt,stream=True,stop=["GPT4 Correct User: ", "<|end_of_turn|>", "</s>"], max_tokens=64, )
71
  #result = lcpp_model(prompt=jsn2prompt(txt), stream=True, stop=["GPT4 Correct User: ", "<|end_of_turn|>", "</s>"], max_tokens=64, echo=False)
72
+ result = llm(prompt=jsn2prompt(txt), stream=True, stop=["GPT4 Correct User: ", "<|end_of_turn|>", "</s>"], echo=False)
73
  running = True
74
  for r in result:
75
  print("GOT RESULT:", r)