Spaces:
Sleeping
Sleeping
kz209
commited on
Commit
•
51259f2
1
Parent(s):
551f786
update
Browse files
pages/summarization_playground.py
CHANGED
@@ -107,14 +107,14 @@ def get_model_batch_generation(model_name):
|
|
107 |
return model[model_name]
|
108 |
|
109 |
|
110 |
-
def generate_answer(sources, model_name, prompt, temperature, max_new_tokens, do_sample):
|
111 |
model_device_check(model_name)
|
112 |
content = prompt + '\n{' + sources + '}\n\nsummary:'
|
113 |
answer = model[model_name].gen(content,temperature,max_new_tokens,do_sample)[0].strip()
|
114 |
|
115 |
return answer
|
116 |
|
117 |
-
def process_input(input_text, model_selection, prompt, temperature, max_new_tokens, do_sample):
|
118 |
if input_text:
|
119 |
logging.info("Start generation")
|
120 |
response = generate_answer(input_text, model_selection, prompt, temperature, max_new_tokens, do_sample)
|
|
|
107 |
return model[model_name]
|
108 |
|
109 |
|
110 |
+
def generate_answer(sources, model_name, prompt, temperature=0.0001, max_new_tokens=500, do_sample=True):
|
111 |
model_device_check(model_name)
|
112 |
content = prompt + '\n{' + sources + '}\n\nsummary:'
|
113 |
answer = model[model_name].gen(content,temperature,max_new_tokens,do_sample)[0].strip()
|
114 |
|
115 |
return answer
|
116 |
|
117 |
+
def process_input(input_text, model_selection, prompt, temperature=0.0001, max_new_tokens=500, do_sample=True):
|
118 |
if input_text:
|
119 |
logging.info("Start generation")
|
120 |
response = generate_answer(input_text, model_selection, prompt, temperature, max_new_tokens, do_sample)
|