Spaces:
Runtime error
Runtime error
DongfuJiang
commited on
Commit
·
f770b3a
1
Parent(s):
89d2ad1
update
Browse files- app.py +156 -154
- requirements.txt +219 -3
app.py
CHANGED
@@ -172,6 +172,162 @@ with gr.Blocks(theme='ParityError/Anime') as demo:
|
|
172 |
|
173 |
|
174 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
with gr.Tab("LLM-Blender"):
|
176 |
# llm-blender interface
|
177 |
with gr.Row():
|
@@ -375,160 +531,6 @@ with gr.Blocks(theme='ParityError/Anime') as demo:
|
|
375 |
)
|
376 |
|
377 |
|
378 |
-
with gr.Tab("PairRM"):
|
379 |
-
# PairRM interface
|
380 |
-
with gr.Row():
|
381 |
-
gr.Markdown(descriptions.PairRM_OVERALL_DESC)
|
382 |
-
gr.Image("https://yuchenlin.xyz/LLM-Blender/pairranker.png")
|
383 |
-
|
384 |
-
with gr.Tab("Compare two responses"):
|
385 |
-
instruction = gr.Textbox(lines=1, label="Instruction", placeholder="Enter instruction here", show_label=True)
|
386 |
-
with gr.Row():
|
387 |
-
response1 = gr.Textbox(lines=4, label="Response 1", placeholder="Enter response 1 here", show_label=True)
|
388 |
-
response2 = gr.Textbox(lines=4, label="Response 2", placeholder="Enter response 2 here", show_label=True)
|
389 |
-
with gr.Row():
|
390 |
-
compare_button = gr.Button('Compare', variant='primary')
|
391 |
-
clear_button = gr.Button('Clear', variant='primary')
|
392 |
-
with gr.Row():
|
393 |
-
compare_result = gr.Textbox(lines=1, label="Compare Result", placeholder="", show_label=True)
|
394 |
-
compare_result_prob = gr.Textbox(lines=1, label="PairRM Confidence", placeholder="", show_label=True)
|
395 |
-
|
396 |
-
def compare_fn(inst, response1, response2):
|
397 |
-
if not inst:
|
398 |
-
raise gr.Error("Please enter instruction")
|
399 |
-
if not response1 or not response2:
|
400 |
-
raise gr.Error("Please enter response 1 and response 2")
|
401 |
-
comparison_results = blender.compare([inst], [response1], [response2], return_logits=True)
|
402 |
-
logit = comparison_results[0]
|
403 |
-
if logit > 0:
|
404 |
-
result = "Response 1 is better than Response 2"
|
405 |
-
prob = f"Confidence: {round(logit, 2)}"
|
406 |
-
elif logit < 0:
|
407 |
-
result = "Response 2 is better than Response 1"
|
408 |
-
prob = f"Cofidence: {round(abs(logit), 2)}"
|
409 |
-
else:
|
410 |
-
result = "Response 1 and Response 2 are equally good"
|
411 |
-
prob = f"No confidence for tie"
|
412 |
-
|
413 |
-
return [result, prob]
|
414 |
-
compare_button.click(
|
415 |
-
fn=compare_fn,
|
416 |
-
inputs=[instruction, response1, response2],
|
417 |
-
outputs=[compare_result, compare_result_prob],
|
418 |
-
)
|
419 |
-
clear_button.click(
|
420 |
-
fn=lambda: ["", ""],
|
421 |
-
inputs=[],
|
422 |
-
outputs=[compare_result, compare_result_prob],
|
423 |
-
)
|
424 |
-
|
425 |
-
hhh_dummy_textbox1 = gr.Textbox(lines=1, label="subset", placeholder="", show_label=False, visible=False)
|
426 |
-
hhh_dummy_textbox2 = gr.Textbox(lines=1, label="Better Response", placeholder="", show_label=False, visible=False)
|
427 |
-
gr.Markdown("## Examples from [HuggingFaceH4/hhh_alignment](https://huggingface.co/datasets/HuggingFaceH4/hhh_alignment)")
|
428 |
-
gr.Examples(
|
429 |
-
HHH_EXAMPLES,
|
430 |
-
fn=get_hhh_examples,
|
431 |
-
cache_examples=True,
|
432 |
-
examples_per_page=5,
|
433 |
-
inputs=[hhh_dummy_textbox1, instruction, response1, response2, hhh_dummy_textbox2],
|
434 |
-
outputs=[instruction, response1, response2],
|
435 |
-
)
|
436 |
-
|
437 |
-
|
438 |
-
with gr.Tab("Compare assistant's response in two multi-turn conversations"):
|
439 |
-
|
440 |
-
gr.Markdown("NOTE: Comparison of two conversations is based on that the user query in each turn is the same of two conversations.")
|
441 |
-
def append_message(message, chat_history):
|
442 |
-
if not message:
|
443 |
-
return "", chat_history
|
444 |
-
if len(chat_history) == 0:
|
445 |
-
chat_history.append((message, "(Please enter your bot response)"))
|
446 |
-
else:
|
447 |
-
if chat_history[-1][1] == "(Please enter your bot response)":
|
448 |
-
chat_history[-1] = (chat_history[-1][0], message)
|
449 |
-
else:
|
450 |
-
chat_history.append((message, "(Please enter your bot response)"))
|
451 |
-
return "", chat_history
|
452 |
-
with gr.Row():
|
453 |
-
with gr.Column():
|
454 |
-
gr.Markdown("### Conversation A")
|
455 |
-
chatbot1 = gr.Chatbot()
|
456 |
-
msg1 = gr.Textbox(lines=1, label="Enter Chat history for Conversation A", placeholder="Enter your message here", show_label=True)
|
457 |
-
clear1 = gr.ClearButton([msg1, chatbot1])
|
458 |
-
msg1.submit(append_message, [msg1, chatbot1], [msg1, chatbot1])
|
459 |
-
with gr.Column():
|
460 |
-
gr.Markdown("### Conversation B")
|
461 |
-
chatbot2 = gr.Chatbot()
|
462 |
-
msg2 = gr.Textbox(lines=1, label="Enter Chat history for Conversation B", placeholder="Enter your message here", show_label=True)
|
463 |
-
clear2 = gr.ClearButton([msg2, chatbot2])
|
464 |
-
msg2.submit(append_message, [msg2, chatbot2], [msg2, chatbot2])
|
465 |
-
with gr.Row():
|
466 |
-
compare_button = gr.Button('Compare', variant='primary')
|
467 |
-
with gr.Row():
|
468 |
-
compare_result = gr.Textbox(lines=1, label="Compare Result", placeholder="", show_label=True)
|
469 |
-
compare_result_prob = gr.Textbox(lines=1, label="PairRM Confidence", placeholder="", show_label=True)
|
470 |
-
|
471 |
-
def compare_conv_fn(chat_history1, chat_history2):
|
472 |
-
if len(chat_history1) == 0 or len(chat_history2) == 0:
|
473 |
-
raise gr.Error("Please enter chat history for both conversations")
|
474 |
-
assert chat_history1[-1][1] != "(Please enter your bot response)" \
|
475 |
-
and chat_history2[-1][1] != "(Please enter your bot response)", \
|
476 |
-
"Please complete chat history for both conversations"
|
477 |
-
chat1_messages = []
|
478 |
-
for item in chat_history1:
|
479 |
-
chat1_messages.append({
|
480 |
-
"role": "USER",
|
481 |
-
"content": item[0],
|
482 |
-
})
|
483 |
-
chat1_messages.append({
|
484 |
-
"role": "ASSISTANT",
|
485 |
-
"content": item[1],
|
486 |
-
})
|
487 |
-
chat2_messages = []
|
488 |
-
for item in chat_history2:
|
489 |
-
chat2_messages.append({
|
490 |
-
"role": "USER",
|
491 |
-
"content": item[0],
|
492 |
-
})
|
493 |
-
chat2_messages.append({
|
494 |
-
"role": "ASSISTANT",
|
495 |
-
"content": item[1],
|
496 |
-
})
|
497 |
-
|
498 |
-
comparison_results = blender.compare_conversations([chat1_messages], [chat2_messages], return_logits=True)
|
499 |
-
logit = comparison_results[0]
|
500 |
-
if logit > 0:
|
501 |
-
result = "Assistant's response in Conversation A is better than Conversation B"
|
502 |
-
prob = f"Confidence: {round(logit, 2)}"
|
503 |
-
elif logit < 0:
|
504 |
-
result = "Assistant's response in Conversation B is better than Conversation A"
|
505 |
-
prob = f"Cofidence: {round(abs(logit), 2)}"
|
506 |
-
else:
|
507 |
-
result = "Assistant's response in Conversation A and Conversation B are equally good"
|
508 |
-
prob = f"No confidence for tie"
|
509 |
-
|
510 |
-
return [result, prob]
|
511 |
|
512 |
-
compare_button.click(
|
513 |
-
fn=compare_conv_fn,
|
514 |
-
inputs=[chatbot1, chatbot2],
|
515 |
-
outputs=[compare_result, compare_result_prob],
|
516 |
-
)
|
517 |
-
|
518 |
-
model_a_dummy_textbox = gr.Textbox(lines=1, label="Model A", placeholder="", show_label=False, visible=False)
|
519 |
-
model_b_dummy_textbox = gr.Textbox(lines=1, label="Model B", placeholder="", show_label=False, visible=False)
|
520 |
-
winner_dummy_textbox = gr.Textbox(lines=1, label="Better Model in conversation", placeholder="", show_label=False, visible=False)
|
521 |
-
chatbot1_dummy_textbox = gr.Textbox(lines=1, label="Conversation A", placeholder="", show_label=False, visible=False)
|
522 |
-
chatbot2_dummy_textbox = gr.Textbox(lines=1, label="Conversation B", placeholder="", show_label=False, visible=False)
|
523 |
-
gr.Markdown("## Examples from [lmsys/mt_bench_human_judgments](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments)")
|
524 |
-
gr.Examples(
|
525 |
-
MT_BENCH_HUMAN_JUDGE_EXAMPLES,
|
526 |
-
fn=get_mt_bench_human_judge_examples,
|
527 |
-
cache_examples=True,
|
528 |
-
examples_per_page=5,
|
529 |
-
inputs=[model_a_dummy_textbox, model_b_dummy_textbox, chatbot1_dummy_textbox, chatbot2_dummy_textbox, winner_dummy_textbox],
|
530 |
-
outputs=[chatbot1, chatbot2],
|
531 |
-
)
|
532 |
-
|
533 |
gr.Markdown(descriptions.CITATION)
|
534 |
demo.queue(max_size=20).launch()
|
|
|
172 |
|
173 |
|
174 |
|
175 |
+
with gr.Tab("PairRM"):
|
176 |
+
# PairRM interface
|
177 |
+
with gr.Row():
|
178 |
+
gr.Markdown(descriptions.PairRM_OVERALL_DESC)
|
179 |
+
gr.Image("https://yuchenlin.xyz/LLM-Blender/pairranker.png")
|
180 |
+
|
181 |
+
with gr.Tab("Compare two responses"):
|
182 |
+
instruction = gr.Textbox(lines=1, label="Instruction", placeholder="Enter instruction here", show_label=True)
|
183 |
+
with gr.Row():
|
184 |
+
response1 = gr.Textbox(lines=4, label="Response 1", placeholder="Enter response 1 here", show_label=True)
|
185 |
+
response2 = gr.Textbox(lines=4, label="Response 2", placeholder="Enter response 2 here", show_label=True)
|
186 |
+
with gr.Row():
|
187 |
+
compare_button = gr.Button('Compare', variant='primary')
|
188 |
+
clear_button = gr.Button('Clear', variant='primary')
|
189 |
+
with gr.Row():
|
190 |
+
compare_result = gr.Textbox(lines=1, label="Compare Result", placeholder="", show_label=True)
|
191 |
+
compare_result_prob = gr.Textbox(lines=1, label="PairRM Confidence", placeholder="", show_label=True)
|
192 |
+
|
193 |
+
def compare_fn(inst, response1, response2):
|
194 |
+
if not inst:
|
195 |
+
raise gr.Error("Please enter instruction")
|
196 |
+
if not response1 or not response2:
|
197 |
+
raise gr.Error("Please enter response 1 and response 2")
|
198 |
+
comparison_results = blender.compare([inst], [response1], [response2], return_logits=True)
|
199 |
+
logit = comparison_results[0]
|
200 |
+
if logit > 0:
|
201 |
+
result = "Response 1 is better than Response 2"
|
202 |
+
prob = f"Confidence: {round(logit, 2)}"
|
203 |
+
elif logit < 0:
|
204 |
+
result = "Response 2 is better than Response 1"
|
205 |
+
prob = f"Cofidence: {round(abs(logit), 2)}"
|
206 |
+
else:
|
207 |
+
result = "Response 1 and Response 2 are equally good"
|
208 |
+
prob = f"No confidence for tie"
|
209 |
+
|
210 |
+
return [result, prob]
|
211 |
+
compare_button.click(
|
212 |
+
fn=compare_fn,
|
213 |
+
inputs=[instruction, response1, response2],
|
214 |
+
outputs=[compare_result, compare_result_prob],
|
215 |
+
)
|
216 |
+
clear_button.click(
|
217 |
+
fn=lambda: ["", ""],
|
218 |
+
inputs=[],
|
219 |
+
outputs=[compare_result, compare_result_prob],
|
220 |
+
)
|
221 |
+
|
222 |
+
hhh_dummy_textbox1 = gr.Textbox(lines=1, label="subset", placeholder="", show_label=False, visible=False)
|
223 |
+
hhh_dummy_textbox2 = gr.Textbox(lines=1, label="Better Response", placeholder="", show_label=False, visible=False)
|
224 |
+
gr.Markdown("## Examples from [HuggingFaceH4/hhh_alignment](https://huggingface.co/datasets/HuggingFaceH4/hhh_alignment)")
|
225 |
+
gr.Examples(
|
226 |
+
HHH_EXAMPLES,
|
227 |
+
fn=get_hhh_examples,
|
228 |
+
cache_examples=True,
|
229 |
+
examples_per_page=5,
|
230 |
+
inputs=[hhh_dummy_textbox1, instruction, response1, response2, hhh_dummy_textbox2],
|
231 |
+
outputs=[instruction, response1, response2],
|
232 |
+
)
|
233 |
+
|
234 |
+
|
235 |
+
with gr.Tab("Compare assistant's response in two multi-turn conversations"):
|
236 |
+
|
237 |
+
gr.Markdown("NOTE: Comparison of two conversations is based on that the user query in each turn is the same of two conversations.")
|
238 |
+
def append_message(message, chat_history):
|
239 |
+
if not message:
|
240 |
+
return "", chat_history
|
241 |
+
if len(chat_history) == 0:
|
242 |
+
chat_history.append((message, "(Please enter your bot response)"))
|
243 |
+
else:
|
244 |
+
if chat_history[-1][1] == "(Please enter your bot response)":
|
245 |
+
chat_history[-1] = (chat_history[-1][0], message)
|
246 |
+
else:
|
247 |
+
chat_history.append((message, "(Please enter your bot response)"))
|
248 |
+
return "", chat_history
|
249 |
+
with gr.Row():
|
250 |
+
with gr.Column():
|
251 |
+
gr.Markdown("### Conversation A")
|
252 |
+
chatbot1 = gr.Chatbot()
|
253 |
+
msg1 = gr.Textbox(lines=1, label="Enter Chat history for Conversation A", placeholder="Enter your message here", show_label=True)
|
254 |
+
clear1 = gr.ClearButton([msg1, chatbot1])
|
255 |
+
msg1.submit(append_message, [msg1, chatbot1], [msg1, chatbot1])
|
256 |
+
with gr.Column():
|
257 |
+
gr.Markdown("### Conversation B")
|
258 |
+
chatbot2 = gr.Chatbot()
|
259 |
+
msg2 = gr.Textbox(lines=1, label="Enter Chat history for Conversation B", placeholder="Enter your message here", show_label=True)
|
260 |
+
clear2 = gr.ClearButton([msg2, chatbot2])
|
261 |
+
msg2.submit(append_message, [msg2, chatbot2], [msg2, chatbot2])
|
262 |
+
with gr.Row():
|
263 |
+
compare_button = gr.Button('Compare', variant='primary')
|
264 |
+
with gr.Row():
|
265 |
+
compare_result = gr.Textbox(lines=1, label="Compare Result", placeholder="", show_label=True)
|
266 |
+
compare_result_prob = gr.Textbox(lines=1, label="PairRM Confidence", placeholder="", show_label=True)
|
267 |
+
|
268 |
+
def compare_conv_fn(chat_history1, chat_history2):
|
269 |
+
if len(chat_history1) == 0 or len(chat_history2) == 0:
|
270 |
+
raise gr.Error("Please enter chat history for both conversations")
|
271 |
+
assert chat_history1[-1][1] != "(Please enter your bot response)" \
|
272 |
+
and chat_history2[-1][1] != "(Please enter your bot response)", \
|
273 |
+
"Please complete chat history for both conversations"
|
274 |
+
chat1_messages = []
|
275 |
+
for item in chat_history1:
|
276 |
+
chat1_messages.append({
|
277 |
+
"role": "USER",
|
278 |
+
"content": item[0],
|
279 |
+
})
|
280 |
+
chat1_messages.append({
|
281 |
+
"role": "ASSISTANT",
|
282 |
+
"content": item[1],
|
283 |
+
})
|
284 |
+
chat2_messages = []
|
285 |
+
for item in chat_history2:
|
286 |
+
chat2_messages.append({
|
287 |
+
"role": "USER",
|
288 |
+
"content": item[0],
|
289 |
+
})
|
290 |
+
chat2_messages.append({
|
291 |
+
"role": "ASSISTANT",
|
292 |
+
"content": item[1],
|
293 |
+
})
|
294 |
+
|
295 |
+
comparison_results = blender.compare_conversations([chat1_messages], [chat2_messages], return_logits=True)
|
296 |
+
logit = comparison_results[0]
|
297 |
+
if logit > 0:
|
298 |
+
result = "Assistant's response in Conversation A is better than Conversation B"
|
299 |
+
prob = f"Confidence: {round(logit, 2)}"
|
300 |
+
elif logit < 0:
|
301 |
+
result = "Assistant's response in Conversation B is better than Conversation A"
|
302 |
+
prob = f"Cofidence: {round(abs(logit), 2)}"
|
303 |
+
else:
|
304 |
+
result = "Assistant's response in Conversation A and Conversation B are equally good"
|
305 |
+
prob = f"No confidence for tie"
|
306 |
+
|
307 |
+
return [result, prob]
|
308 |
+
|
309 |
+
compare_button.click(
|
310 |
+
fn=compare_conv_fn,
|
311 |
+
inputs=[chatbot1, chatbot2],
|
312 |
+
outputs=[compare_result, compare_result_prob],
|
313 |
+
)
|
314 |
+
|
315 |
+
model_a_dummy_textbox = gr.Textbox(lines=1, label="Model A", placeholder="", show_label=False, visible=False)
|
316 |
+
model_b_dummy_textbox = gr.Textbox(lines=1, label="Model B", placeholder="", show_label=False, visible=False)
|
317 |
+
winner_dummy_textbox = gr.Textbox(lines=1, label="Better Model in conversation", placeholder="", show_label=False, visible=False)
|
318 |
+
chatbot1_dummy_textbox = gr.Textbox(lines=1, label="Conversation A", placeholder="", show_label=False, visible=False)
|
319 |
+
chatbot2_dummy_textbox = gr.Textbox(lines=1, label="Conversation B", placeholder="", show_label=False, visible=False)
|
320 |
+
gr.Markdown("## Examples from [lmsys/mt_bench_human_judgments](https://huggingface.co/datasets/lmsys/mt_bench_human_judgments)")
|
321 |
+
gr.Examples(
|
322 |
+
MT_BENCH_HUMAN_JUDGE_EXAMPLES,
|
323 |
+
fn=get_mt_bench_human_judge_examples,
|
324 |
+
cache_examples=True,
|
325 |
+
examples_per_page=5,
|
326 |
+
inputs=[model_a_dummy_textbox, model_b_dummy_textbox, chatbot1_dummy_textbox, chatbot2_dummy_textbox, winner_dummy_textbox],
|
327 |
+
outputs=[chatbot1, chatbot2],
|
328 |
+
)
|
329 |
+
|
330 |
+
|
331 |
with gr.Tab("LLM-Blender"):
|
332 |
# llm-blender interface
|
333 |
with gr.Row():
|
|
|
531 |
)
|
532 |
|
533 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
534 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
535 |
gr.Markdown(descriptions.CITATION)
|
536 |
demo.queue(max_size=20).launch()
|
requirements.txt
CHANGED
@@ -1,3 +1,219 @@
|
|
1 |
-
|
2 |
-
|
3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==2.0.0
|
2 |
+
accelerate==0.24.0
|
3 |
+
aiofiles==23.2.1
|
4 |
+
aiohttp==3.8.6
|
5 |
+
aiosignal==1.3.1
|
6 |
+
altair==5.1.2
|
7 |
+
annotated-types==0.6.0
|
8 |
+
anyio==3.7.1
|
9 |
+
appdirs==1.4.4
|
10 |
+
asttokens @ file:///opt/conda/conda-bld/asttokens_1646925590279/work
|
11 |
+
astunparse==1.6.3
|
12 |
+
async-timeout==4.0.3
|
13 |
+
attrs==23.1.0
|
14 |
+
backcall @ file:///home/ktietz/src/ci/backcall_1611930011877/work
|
15 |
+
bert-score==0.3.13
|
16 |
+
bitsandbytes==0.41.1
|
17 |
+
BLEURT @ git+https://github.com/google-research/bleurt.git@cebe7e6f996b40910cfaa520a63db47807e3bf5c
|
18 |
+
blis==0.7.11
|
19 |
+
cachetools==5.3.2
|
20 |
+
catalogue==2.0.10
|
21 |
+
certifi==2023.7.22
|
22 |
+
charset-normalizer==3.3.1
|
23 |
+
click==8.1.7
|
24 |
+
cloudpathlib==0.16.0
|
25 |
+
colorama==0.4.6
|
26 |
+
comm @ file:///croot/comm_1671231121260/work
|
27 |
+
confection==0.1.3
|
28 |
+
contourpy==1.1.1
|
29 |
+
cpm-kernels==1.0.11
|
30 |
+
cycler==0.12.1
|
31 |
+
cymem==2.0.8
|
32 |
+
dataclasses-json==0.6.1
|
33 |
+
datasets==2.14.6
|
34 |
+
debugpy @ file:///croot/debugpy_1690905042057/work
|
35 |
+
decorator @ file:///opt/conda/conda-bld/decorator_1643638310831/work
|
36 |
+
deepspeed==0.11.1
|
37 |
+
dill==0.3.7
|
38 |
+
distro==1.8.0
|
39 |
+
docker-pycreds==0.4.0
|
40 |
+
docstring-parser==0.15
|
41 |
+
evaluate==0.4.1
|
42 |
+
exceptiongroup @ file:///croot/exceptiongroup_1668714342571/work
|
43 |
+
executing @ file:///opt/conda/conda-bld/executing_1646925071911/work
|
44 |
+
fairscale==0.4.13
|
45 |
+
fastapi==0.104.0
|
46 |
+
ffmpy==0.3.1
|
47 |
+
filelock @ file:///croot/filelock_1672387128942/work
|
48 |
+
fire==0.5.0
|
49 |
+
flatbuffers==23.5.26
|
50 |
+
fonttools==4.43.1
|
51 |
+
frozenlist==1.4.0
|
52 |
+
fschat==0.2.31
|
53 |
+
fsspec==2023.10.0
|
54 |
+
gast==0.4.0
|
55 |
+
gitdb==4.0.11
|
56 |
+
GitPython==3.1.40
|
57 |
+
gmpy2 @ file:///tmp/build/80754af9/gmpy2_1645438755360/work
|
58 |
+
google-auth==2.23.3
|
59 |
+
google-auth-oauthlib==0.4.6
|
60 |
+
google-pasta==0.2.0
|
61 |
+
gradio==4.2.0
|
62 |
+
gradio_client==0.7.0
|
63 |
+
grpcio==1.59.0
|
64 |
+
h11==0.14.0
|
65 |
+
h5py==3.10.0
|
66 |
+
hjson==3.1.0
|
67 |
+
httpcore==0.18.0
|
68 |
+
httpx==0.25.0
|
69 |
+
huggingface-hub==0.17.3
|
70 |
+
idna==3.4
|
71 |
+
importlib-metadata @ file:///croot/importlib-metadata_1678997070253/work
|
72 |
+
importlib-resources==6.1.0
|
73 |
+
ipykernel @ file:///croot/ipykernel_1691121631942/work
|
74 |
+
ipython @ file:///croot/ipython_1694181358621/work
|
75 |
+
jedi @ file:///tmp/build/80754af9/jedi_1644297102865/work
|
76 |
+
Jinja2 @ file:///croot/jinja2_1666908132255/work
|
77 |
+
joblib==1.3.2
|
78 |
+
jsonschema==4.19.2
|
79 |
+
jsonschema-specifications==2023.7.1
|
80 |
+
jupyter_client @ file:///croot/jupyter_client_1680171862562/work
|
81 |
+
jupyter_core @ file:///croot/jupyter_core_1679906564508/work
|
82 |
+
keras==2.11.0
|
83 |
+
kiwisolver==1.4.5
|
84 |
+
langcodes==3.3.0
|
85 |
+
Levenshtein==0.23.0
|
86 |
+
libclang==16.0.6
|
87 |
+
-e git+https://github.com/yuchenlin/LLM-Blender.git@2568bbef76d67d2d41a42c48ed482ee60430ad88#egg=llm_blender
|
88 |
+
lxml==4.9.3
|
89 |
+
Markdown==3.5
|
90 |
+
markdown-it-py==3.0.0
|
91 |
+
markdown2==2.4.10
|
92 |
+
MarkupSafe @ file:///opt/conda/conda-bld/markupsafe_1654597864307/work
|
93 |
+
marshmallow==3.20.1
|
94 |
+
matplotlib==3.8.0
|
95 |
+
matplotlib-inline @ file:///opt/conda/conda-bld/matplotlib-inline_1662014470464/work
|
96 |
+
mdurl==0.1.2
|
97 |
+
mpmath @ file:///croot/mpmath_1690848262763/work
|
98 |
+
multidict==6.0.4
|
99 |
+
multiprocess==0.70.15
|
100 |
+
murmurhash==1.0.10
|
101 |
+
mypy-extensions==1.0.0
|
102 |
+
nest-asyncio @ file:///croot/nest-asyncio_1672387112409/work
|
103 |
+
networkx @ file:///croot/networkx_1690561992265/work
|
104 |
+
nh3==0.2.14
|
105 |
+
ninja==1.11.1.1
|
106 |
+
nltk==3.8.1
|
107 |
+
numpy==1.26.1
|
108 |
+
oauthlib==3.2.2
|
109 |
+
openai==1.2.3
|
110 |
+
opt-einsum==3.3.0
|
111 |
+
orjson==3.9.10
|
112 |
+
packaging @ file:///croot/packaging_1693575174725/work
|
113 |
+
pandas==2.1.2
|
114 |
+
parso @ file:///opt/conda/conda-bld/parso_1641458642106/work
|
115 |
+
pathtools==0.1.2
|
116 |
+
peft==0.6.0
|
117 |
+
pexpect @ file:///tmp/build/80754af9/pexpect_1605563209008/work
|
118 |
+
pickleshare @ file:///tmp/build/80754af9/pickleshare_1606932040724/work
|
119 |
+
Pillow==10.1.0
|
120 |
+
platformdirs @ file:///croot/platformdirs_1692205439124/work
|
121 |
+
portalocker==2.8.2
|
122 |
+
preshed==3.0.9
|
123 |
+
prettytable==3.9.0
|
124 |
+
prompt-toolkit @ file:///croot/prompt-toolkit_1672387306916/work
|
125 |
+
protobuf==3.19.6
|
126 |
+
psutil @ file:///opt/conda/conda-bld/psutil_1656431268089/work
|
127 |
+
ptyprocess @ file:///tmp/build/80754af9/ptyprocess_1609355006118/work/dist/ptyprocess-0.7.0-py2.py3-none-any.whl
|
128 |
+
pure-eval @ file:///opt/conda/conda-bld/pure_eval_1646925070566/work
|
129 |
+
py-cpuinfo==9.0.0
|
130 |
+
pyarrow==13.0.0
|
131 |
+
pyasn1==0.5.0
|
132 |
+
pyasn1-modules==0.3.0
|
133 |
+
pycocoevalcap==1.2
|
134 |
+
pycocotools==2.0.7
|
135 |
+
pydantic==2.4.2
|
136 |
+
pydantic_core==2.10.1
|
137 |
+
pydub==0.25.1
|
138 |
+
Pygments @ file:///croot/pygments_1684279966437/work
|
139 |
+
pyparsing==3.1.1
|
140 |
+
python-dateutil @ file:///tmp/build/80754af9/python-dateutil_1626374649649/work
|
141 |
+
python-multipart==0.0.6
|
142 |
+
pytz==2023.3.post1
|
143 |
+
PyYAML @ file:///croot/pyyaml_1698096049011/work
|
144 |
+
pyzmq @ file:///croot/pyzmq_1686601365461/work
|
145 |
+
rapidfuzz==3.5.2
|
146 |
+
referencing==0.30.2
|
147 |
+
regex==2023.10.3
|
148 |
+
requests==2.31.0
|
149 |
+
requests-oauthlib==1.3.1
|
150 |
+
responses==0.18.0
|
151 |
+
rich==13.6.0
|
152 |
+
rouge-score==0.1.2
|
153 |
+
rpds-py==0.12.0
|
154 |
+
rsa==4.9
|
155 |
+
sacrebleu==2.3.1
|
156 |
+
safetensors==0.4.0
|
157 |
+
scikit-learn==1.3.2
|
158 |
+
scipy==1.11.3
|
159 |
+
semantic-version==2.10.0
|
160 |
+
sentencepiece==0.1.99
|
161 |
+
sentry-sdk==1.32.0
|
162 |
+
setproctitle==1.3.3
|
163 |
+
shellingham==1.5.4
|
164 |
+
shortuuid==1.0.11
|
165 |
+
shtab==1.6.4
|
166 |
+
six @ file:///tmp/build/80754af9/six_1644875935023/work
|
167 |
+
smart-open==6.4.0
|
168 |
+
smmap==5.0.1
|
169 |
+
sniffio==1.3.0
|
170 |
+
spacy==3.7.2
|
171 |
+
spacy-legacy==3.0.12
|
172 |
+
spacy-loggers==1.0.5
|
173 |
+
srsly==2.4.8
|
174 |
+
stack-data @ file:///opt/conda/conda-bld/stack_data_1646927590127/work
|
175 |
+
starlette==0.27.0
|
176 |
+
svgwrite==1.4.3
|
177 |
+
sympy @ file:///croot/sympy_1668202399572/work
|
178 |
+
tabulate==0.9.0
|
179 |
+
tensorboard==2.11.2
|
180 |
+
tensorboard-data-server==0.6.1
|
181 |
+
tensorboard-plugin-wit==1.8.1
|
182 |
+
tensorflow==2.11.1
|
183 |
+
tensorflow-estimator==2.11.0
|
184 |
+
tensorflow-io-gcs-filesystem==0.34.0
|
185 |
+
termcolor==2.3.0
|
186 |
+
tf-slim==1.1.0
|
187 |
+
thinc==8.2.1
|
188 |
+
threadpoolctl==3.2.0
|
189 |
+
tiktoken==0.5.1
|
190 |
+
tokenizers==0.14.1
|
191 |
+
tomlkit==0.12.0
|
192 |
+
toolz==0.12.0
|
193 |
+
torch==2.1.0
|
194 |
+
tornado @ file:///croot/tornado_1696936946304/work
|
195 |
+
tqdm==4.66.1
|
196 |
+
traitlets @ file:///croot/traitlets_1671143879854/work
|
197 |
+
transformers==4.35.0
|
198 |
+
triton==2.1.0
|
199 |
+
trl==0.7.4
|
200 |
+
typer==0.9.0
|
201 |
+
typing-inspect==0.9.0
|
202 |
+
typing_extensions==4.8.0
|
203 |
+
tyro==0.5.12
|
204 |
+
tzdata==2023.3
|
205 |
+
urllib3==2.0.7
|
206 |
+
uvicorn==0.23.2
|
207 |
+
wandb==0.15.12
|
208 |
+
wasabi==1.1.2
|
209 |
+
wavedrom==2.0.3.post3
|
210 |
+
wcwidth @ file:///Users/ktietz/demo/mc3/conda-bld/wcwidth_1629357192024/work
|
211 |
+
weasel==0.3.3
|
212 |
+
websockets==11.0.3
|
213 |
+
Werkzeug==3.0.1
|
214 |
+
wget==3.2
|
215 |
+
wrapt==1.15.0
|
216 |
+
xxhash==3.4.1
|
217 |
+
yarl==1.9.2
|
218 |
+
zipp @ file:///croot/zipp_1672387121353/work
|
219 |
+
llm_blender @ git+https://github.com/yuchenlin/LLM-Blender.git@main
|