larry1129 commited on
Commit
b1120c9
1 Parent(s): 54fa7ed

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +126 -146
app.py CHANGED
@@ -1,154 +1,134 @@
 
1
  import gradio as gr
2
- import numpy as np
3
- import random
4
-
5
- # import spaces #[uncomment to use ZeroGPU]
6
- from diffusers import DiffusionPipeline
7
- import torch
8
-
9
- device = "cuda" if torch.cuda.is_available() else "cpu"
10
- model_repo_id = "stabilityai/sdxl-turbo" # Replace to the model you would like to use
11
-
12
- if torch.cuda.is_available():
13
- torch_dtype = torch.float16
14
- else:
15
- torch_dtype = torch.float32
16
-
17
- pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype)
18
- pipe = pipe.to(device)
19
-
20
- MAX_SEED = np.iinfo(np.int32).max
21
- MAX_IMAGE_SIZE = 1024
22
-
23
-
24
- # @spaces.GPU #[uncomment to use ZeroGPU]
25
- def infer(
26
- prompt,
27
- negative_prompt,
28
- seed,
29
- randomize_seed,
30
- width,
31
- height,
32
- guidance_scale,
33
- num_inference_steps,
34
- progress=gr.Progress(track_tqdm=True),
35
- ):
36
- if randomize_seed:
37
- seed = random.randint(0, MAX_SEED)
38
-
39
- generator = torch.Generator().manual_seed(seed)
40
-
41
- image = pipe(
42
- prompt=prompt,
43
- negative_prompt=negative_prompt,
44
- guidance_scale=guidance_scale,
45
- num_inference_steps=num_inference_steps,
46
- width=width,
47
- height=height,
48
- generator=generator,
49
- ).images[0]
50
-
51
- return image, seed
52
-
53
-
54
- examples = [
55
- "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k",
56
- "An astronaut riding a green horse",
57
- "A delicious ceviche cheesecake slice",
58
- ]
59
-
60
- css = """
61
- #col-container {
62
- margin: 0 auto;
63
- max-width: 640px;
64
- }
65
- """
66
 
67
- with gr.Blocks(css=css) as demo:
68
- with gr.Column(elem_id="col-container"):
69
- gr.Markdown(" # Text-to-Image Gradio Template")
70
-
71
- with gr.Row():
72
- prompt = gr.Text(
73
- label="Prompt",
74
- show_label=False,
75
- max_lines=1,
76
- placeholder="Enter your prompt",
77
- container=False,
78
- )
79
 
80
- run_button = gr.Button("Run", scale=0, variant="primary")
 
81
 
82
- result = gr.Image(label="Result", show_label=False)
 
83
 
84
- with gr.Accordion("Advanced Settings", open=False):
85
- negative_prompt = gr.Text(
86
- label="Negative prompt",
87
- max_lines=1,
88
- placeholder="Enter a negative prompt",
89
- visible=False,
90
- )
91
 
92
- seed = gr.Slider(
93
- label="Seed",
94
- minimum=0,
95
- maximum=MAX_SEED,
96
- step=1,
97
- value=0,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
98
  )
99
 
100
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
101
-
102
- with gr.Row():
103
- width = gr.Slider(
104
- label="Width",
105
- minimum=256,
106
- maximum=MAX_IMAGE_SIZE,
107
- step=32,
108
- value=1024, # Replace with defaults that work for your model
109
- )
110
-
111
- height = gr.Slider(
112
- label="Height",
113
- minimum=256,
114
- maximum=MAX_IMAGE_SIZE,
115
- step=32,
116
- value=1024, # Replace with defaults that work for your model
117
- )
118
-
119
- with gr.Row():
120
- guidance_scale = gr.Slider(
121
- label="Guidance scale",
122
- minimum=0.0,
123
- maximum=10.0,
124
- step=0.1,
125
- value=0.0, # Replace with defaults that work for your model
126
- )
127
-
128
- num_inference_steps = gr.Slider(
129
- label="Number of inference steps",
130
- minimum=1,
131
- maximum=50,
132
- step=1,
133
- value=2, # Replace with defaults that work for your model
134
- )
135
-
136
- gr.Examples(examples=examples, inputs=[prompt])
137
- gr.on(
138
- triggers=[run_button.click, prompt.submit],
139
- fn=infer,
140
- inputs=[
141
- prompt,
142
- negative_prompt,
143
- seed,
144
- randomize_seed,
145
- width,
146
- height,
147
- guidance_scale,
148
- num_inference_steps,
149
- ],
150
- outputs=[result, seed],
151
- )
152
-
153
- if __name__ == "__main__":
154
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces # 必须在最顶部导入
2
  import gradio as gr
3
+ import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ # 获取 Hugging Face 访问令牌
6
+ hf_token = os.getenv("HF_API_TOKEN")
 
 
 
 
 
 
 
 
 
 
7
 
8
+ # 定义基础模型名称
9
+ base_model_name = "larry1129/WooWoof_AI_Vision_merged_16bit"
10
 
11
+ # 定义 adapter 模型名称
12
+ adapter_model_name = "larry1129/WooWoof_AI_Vision_merged_16bit"
13
 
14
+ # 定义全局变量用于缓存模型和分词器
15
+ model = None
16
+ tokenizer = None
 
 
 
 
17
 
18
+ # 定义提示生成函数
19
+ def generate_prompt(instruction, input_text=""):
20
+ if input_text:
21
+ prompt = f"""### Instruction:
22
+ {instruction}
23
+ ### Input:
24
+ {input_text}
25
+ ### Response:
26
+ """
27
+ else:
28
+ prompt = f"""### Instruction:
29
+ {instruction}
30
+ ### Response:
31
+ """
32
+ return prompt
33
+
34
+ # 定义生成响应的函数,并使用 @spaces.GPU 装饰
35
+ @spaces.GPU(duration=40) # 建议将 duration 增加到 120
36
+ def generate_response(instruction, input_text):
37
+ global model, tokenizer
38
+
39
+ if model is None:
40
+ print("开始加载模型...")
41
+ # 检查 bitsandbytes 是否已安装
42
+ import importlib.util
43
+ if importlib.util.find_spec("bitsandbytes") is None:
44
+ import subprocess
45
+ subprocess.call(["pip", "install", "--upgrade", "bitsandbytes"])
46
+
47
+ try:
48
+ # 在函数内部导入需要 GPU 的库
49
+ import torch
50
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
51
+
52
+ from peft import PeftModel
53
+
54
+ # 创建量化配置
55
+ bnb_config = BitsAndBytesConfig(
56
+ load_in_4bit=True,
57
+ bnb_4bit_use_double_quant=True,
58
+ bnb_4bit_quant_type="nf4",
59
+ bnb_4bit_compute_dtype=torch.float16
60
  )
61
 
62
+ # 加载分词器
63
+ tokenizer = AutoTokenizer.from_pretrained(base_model_name, use_auth_token=hf_token)
64
+ print("分词器加载成功。")
65
+
66
+ # 加载基础模型
67
+ base_model = AutoModelForCausalLM.from_pretrained(
68
+ base_model_name,
69
+ quantization_config=bnb_config,
70
+ device_map="auto",
71
+ use_auth_token=hf_token,
72
+ trust_remote_code=True
73
+ )
74
+ print("基础模型加载成功。")
75
+
76
+ # 加载适配器模型
77
+ model = PeftModel.from_pretrained(
78
+ base_model,
79
+ adapter_model_name,
80
+ torch_dtype=torch.float16,
81
+ use_auth_token=hf_token
82
+ )
83
+ print("适配器模型加载成功。")
84
+
85
+ # 设置 pad_token
86
+ tokenizer.pad_token = tokenizer.eos_token
87
+ model.config.pad_token_id = tokenizer.pad_token_id
88
+
89
+ # 切换到评估模式
90
+ model.eval()
91
+ print("模型已切换到评估模式。")
92
+ except Exception as e:
93
+ print("加载模型时出错:", e)
94
+ raise e
95
+ else:
96
+ # 在函数内部导入需要的库
97
+ import torch
98
+
99
+ # 检查 model 和 tokenizer 是否已正确加载
100
+ if model is None or tokenizer is None:
101
+ print("模型或分词器未正确加载。")
102
+ raise ValueError("模型或分词器未正确加载。")
103
+
104
+ # 生成提示
105
+ prompt = generate_prompt(instruction, input_text)
106
+ inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
107
+
108
+ with torch.no_grad():
109
+ outputs = model.generate(
110
+ input_ids=inputs["input_ids"],
111
+ attention_mask=inputs.get("attention_mask"),
112
+ max_new_tokens=128,
113
+ temperature=0.7,
114
+ top_p=0.95,
115
+ do_sample=True,
116
+ )
117
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
118
+ response = response.split("### Response:")[-1].strip()
119
+ return response
120
+
121
+ # 创建 Gradio 接口
122
+ iface = gr.Interface(
123
+ fn=generate_response,
124
+ inputs=[
125
+ gr.Textbox(lines=2, placeholder="Instruction", label="Instruction"),
126
+ ],
127
+ outputs="text",
128
+ title="WooWoof AI",
129
+ description="Based on LLAMA 3.1 for pet related",
130
+ allow_flagging="never"
131
+ )
132
+
133
+ # 启动 Gradio 接口
134
+ iface.launch(share=True)