Spaces:
Sleeping
Sleeping
peterpeter8585
commited on
Commit
•
60fa416
1
Parent(s):
6f21405
Update app.py
Browse files
app.py
CHANGED
@@ -78,61 +78,7 @@ def respond1(
|
|
78 |
|
79 |
response += token
|
80 |
yield response
|
81 |
-
def respond0(multimodal_input,password):
|
82 |
-
if password==password1:
|
83 |
-
if multimodal_input["files"] == None:
|
84 |
-
content={"type": "text", "text": multimodal_input["text"]}
|
85 |
-
messages=[{"role":"system", "content":[{"type":"text", "text":"Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions:"+"You are a helpful assietant."}]}]
|
86 |
-
messages.append([{"role": "user", "content": content}])
|
87 |
-
response = ""
|
88 |
-
|
89 |
-
model_id = "HuggingFaceM4/idefics2-8b"
|
90 |
-
|
91 |
-
processor = AutoProcessor.from_pretrained(model_id)
|
92 |
-
model = AutoModelForVision2Seq.from_pretrained(
|
93 |
-
"HuggingFaceM4/idefics2-8b",
|
94 |
-
torch_dtype=torch.float16,
|
95 |
-
quantization_config=quantization_config
|
96 |
-
).to("cpu")
|
97 |
-
prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
|
98 |
-
inputs = processor(text=prompt, images=[images], return_tensors="pt")
|
99 |
-
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
100 |
-
num_tokens = len(inputs["input_ids"][0])
|
101 |
-
with torch.inference_mode():
|
102 |
-
generated_ids = model.generate(**inputs, max_new_tokens=max_tokens,top_p=top_p, temperature=1.0,)
|
103 |
-
|
104 |
-
new_tokens = generated_ids[:, num_tokens:]
|
105 |
-
generated_text = processor.batch_decode(new_tokens, skip_special_tokens=True)[0]
|
106 |
-
|
107 |
-
token = generated_text
|
108 |
-
response+=token
|
109 |
-
yield response
|
110 |
|
111 |
-
else:
|
112 |
-
images = multimodal_input["files"]
|
113 |
-
content = [{"type": "image"} for _ in images]
|
114 |
-
content.append({"type": "text", "text": multimodal_input["text"]})
|
115 |
-
messages=[{"role":"system", "content":[{"type":"text", "text":"Your name is Chatchat.And, your made by SungYoon.In Korean, 정성윤.And these are the instructions:"+"You are a helpful assietant."}]}]
|
116 |
-
messages.append([{"role": "user", "content": content}])
|
117 |
-
response = ""
|
118 |
-
|
119 |
-
model_id = "HuggingFaceM4/idefics2-8b"
|
120 |
-
|
121 |
-
processor = AutoProcessor.from_pretrained(model_id)
|
122 |
-
model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/idefics2-8b",torch_dtype=torch.float16,quantization_config=quantization_config).to("cpu")
|
123 |
-
prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
|
124 |
-
inputs = processor(text=prompt, images=[images], return_tensors="pt")
|
125 |
-
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
126 |
-
num_tokens = len(inputs["input_ids"][0])
|
127 |
-
with torch.inference_mode():
|
128 |
-
generated_ids = model.generate(**inputs, max_new_tokens=max_tokens,top_p=top_p, temperature=1.0,)
|
129 |
-
|
130 |
-
new_tokens = generated_ids[:, num_tokens:]
|
131 |
-
generated_text = processor.batch_decode(new_tokens, skip_special_tokens=True)[0]
|
132 |
-
|
133 |
-
token = generated_text
|
134 |
-
response+=token
|
135 |
-
yield response
|
136 |
|
137 |
|
138 |
|
@@ -519,13 +465,7 @@ ae= gr.ChatInterface(
|
|
519 |
|
520 |
],
|
521 |
)
|
522 |
-
|
523 |
-
respond0,
|
524 |
-
inputs=[gr.MultimodalTextbox(file_types=["image"], show_label=False), gr.Textbox()],
|
525 |
-
outputs="text",
|
526 |
-
title="IDEFICS2-8B DPO",
|
527 |
-
description="Try IDEFICS2-8B fine-tuned using direct preference optimization (DPO) in this demo. Learn more about vision language model DPO integration of TRL [here](https://huggingface.co/blog/dpo_vlm)."
|
528 |
-
)
|
529 |
aa=gr.ChatInterface(
|
530 |
respond1,
|
531 |
chatbot=chatbot3,
|
@@ -611,5 +551,5 @@ a8= gr.ChatInterface(
|
|
611 |
|
612 |
if __name__ == "__main__":
|
613 |
with gr.Blocks(theme="gstaff/xkcd") as ai:
|
614 |
-
gr.TabbedInterface([aa, ac, ab, ae, aaaa,demo2,
|
615 |
ai.launch(share=True)
|
|
|
78 |
|
79 |
response += token
|
80 |
yield response
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
|
84 |
|
|
|
465 |
|
466 |
],
|
467 |
)
|
468 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
469 |
aa=gr.ChatInterface(
|
470 |
respond1,
|
471 |
chatbot=chatbot3,
|
|
|
551 |
|
552 |
if __name__ == "__main__":
|
553 |
with gr.Blocks(theme="gstaff/xkcd") as ai:
|
554 |
+
gr.TabbedInterface([aa, ac, ab, ae, aaaa,demo2,a8, a9], ["gpt4(Password needed)", "gpt4(only for programming)", "gpt4(only for medical questions)", "gpt4(only for food recommendations)", "gpt4(only for law questions)","image create", "gpt4(test)", "ai test"])
|
555 |
ai.launch(share=True)
|