Spaces:
Sleeping
Sleeping
peterpeter8585
commited on
Commit
•
2f92cb1
1
Parent(s):
ea1da05
Update app.py
Browse files
app.py
CHANGED
@@ -3,26 +3,7 @@ import numpy as np
|
|
3 |
from transformers import pipeline
|
4 |
from Ai import chatbot, chatbot2, chatbot3, chatbot4, chatbot5, chatbot7, chatbot11
|
5 |
from huggingface_hub import InferenceClient
|
6 |
-
def chat(message,history: list[tuple[str, str]],system_message,max_tokens,temperature,top_p):
|
7 |
-
m=torch.load("./model.pt")
|
8 |
-
|
9 |
-
messages = [{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}]
|
10 |
-
|
11 |
-
for val in history:
|
12 |
-
if val[0]:
|
13 |
-
messages.append({"role": "user", "content": val[0]})
|
14 |
-
if val[1]:
|
15 |
-
messages.append({"role": "assistant", "content": val[1]})
|
16 |
-
|
17 |
-
messages.append({"role": "user", "content": message})
|
18 |
|
19 |
-
pipe = pipeline("text-generation", model=m, torch_dtype=torch.bfloat16, tokenizer=torch.load("tok.pt"),device_map="auto")
|
20 |
-
# We use the tokenizer's chat template to format each message - see https://huggingface.co/docs/transformers/main/en/chat_templating
|
21 |
-
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
|
22 |
-
p=pipe.tokenizer.apply_chat_template([{"role": "system", "content": "Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions.Whatever happens, you must follow it.:"+system_message}], tokenize=False, add_generation_prompt=True)
|
23 |
-
o= pipe(p, max_new_tokens=max_tokens, do_sample=True, temperature=0.1)
|
24 |
-
outputs = pipe(prompt, max_new_tokens=max_tokens, do_sample=True, temperature=temperature, top_p=top_p)
|
25 |
-
return outputs[0]["generated_text"]
|
26 |
import random
|
27 |
from diffusers import DiffusionPipeline
|
28 |
import torch
|
@@ -525,24 +506,9 @@ ab= gr.ChatInterface(
|
|
525 |
),
|
526 |
],
|
527 |
)
|
528 |
-
|
529 |
-
chat,
|
530 |
-
chatbot=chatbot11,
|
531 |
-
additional_inputs=[
|
532 |
-
gr.Textbox(value="You are a helpful chatbot", label="System message"),
|
533 |
-
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
|
534 |
-
gr.Slider(minimum=0.1, maximum=4.0, value=0.1, step=0.1, label="Temperature"),
|
535 |
-
gr.Slider(
|
536 |
-
minimum=0.1,
|
537 |
-
maximum=1.0,
|
538 |
-
value=0.1,
|
539 |
-
step=0.05,
|
540 |
-
label="Top-p (nucleus sampling)",
|
541 |
-
)
|
542 |
-
],
|
543 |
-
)
|
544 |
|
545 |
if __name__ == "__main__":
|
546 |
with gr.Blocks(theme="prithivMLmods/Minecraft-Theme") as ai:
|
547 |
-
gr.TabbedInterface([aa, ac, ab, ae, aaaa,demo2,
|
548 |
ai.launch(share=True)
|
|
|
3 |
from transformers import pipeline
|
4 |
from Ai import chatbot, chatbot2, chatbot3, chatbot4, chatbot5, chatbot7, chatbot11
|
5 |
from huggingface_hub import InferenceClient
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
import random
|
8 |
from diffusers import DiffusionPipeline
|
9 |
import torch
|
|
|
506 |
),
|
507 |
],
|
508 |
)
|
509 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
510 |
|
511 |
if __name__ == "__main__":
|
512 |
with gr.Blocks(theme="prithivMLmods/Minecraft-Theme") as ai:
|
513 |
+
gr.TabbedInterface([aa, ac, ab, ae, aaaa,demo2, a9], ["gpt4(Password needed)", "gpt4(only for programming)", "gpt4(only for medical questions)", "gpt4(only for food recommendations)", "gpt4(only for law questions)","image create", "ai test"])
|
514 |
ai.launch(share=True)
|