KingNish commited on
Commit
9061f6a
Β·
verified Β·
1 Parent(s): 32b570e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -7
app.py CHANGED
@@ -10,9 +10,7 @@ from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EDMEulerSche
10
  from huggingface_hub import hf_hub_download, InferenceClient
11
 
12
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
13
- pipe = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V3.0", torch_dtype=torch.float16, vae=vae)
14
- pipe.load_lora_weights("KingNish/Better-Image-XL-Lora", weight_name="example-03.safetensors", adapter_name="lora")
15
- pipe.set_adapters("lora")
16
  pipe.to("cuda")
17
 
18
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
@@ -53,7 +51,7 @@ pipe_edit.to("cuda")
53
 
54
  def promptifier(prompt):
55
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
56
- system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, reply with prompt only, Your task is to reply with final prompt in SDXL image generation format only. [USER]"
57
  formatted_prompt = f"{system_instructions1} {prompt} [FINAL_PROMPT]"
58
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
59
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
@@ -108,14 +106,13 @@ def king(type ,
108
  generator = generator, output_type="latent",
109
  ).images
110
  else:
111
- guidance_scale2=(guidance_scale/2)
112
  if enhance_prompt:
113
  print(f"BEFORE: {instruction} ")
114
  instruction = promptifier(instruction)
115
  print(f"AFTER: {instruction} ")
116
  image = pipe( prompt = instruction,
117
  negative_prompt=negative_prompt,
118
- guidance_scale = guidance_scale2,
119
  num_inference_steps = steps,
120
  width = width, height = height,
121
  generator = generator, output_type="latent",
@@ -198,7 +195,7 @@ with gr.Blocks(css=css, theme="bethecloud/storj_theme") as demo:
198
  generate_button = gr.Button("Run", scale=0)
199
  with gr.Row():
200
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True)
201
- enhance_prompt = gr.Checkbox(label="Enhance prompt", value=True, scale=0)
202
  fast = gr.Checkbox(label="FAST Generation", value=False, scale=0)
203
 
204
  with gr.Row():
 
10
  from huggingface_hub import hf_hub_download, InferenceClient
11
 
12
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
13
+ pipe = StableDiffusionXLPipeline.from_pretrained("SG161222/RealVisXL_V4.0", torch_dtype=torch.float16, vae=vae)
 
 
14
  pipe.to("cuda")
15
 
16
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", vae=vae, torch_dtype=torch.float16, use_safetensors=True, variant="fp16")
 
51
 
52
  def promptifier(prompt):
53
  client1 = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
54
+ system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER to more better prompt for Image Generation in Stable Diffusion XL, reply with prompt only, Your task is to reply with final prompt in SDXL image generation format only. Just reply with pure prompt.[USER]"
55
  formatted_prompt = f"{system_instructions1} {prompt} [FINAL_PROMPT]"
56
  stream = client1.text_generation(formatted_prompt, max_new_tokens=80, stream=True, details=True, return_full_text=False)
57
  return "".join([response.token.text for response in stream if response.token.text != "</s>"])
 
106
  generator = generator, output_type="latent",
107
  ).images
108
  else:
 
109
  if enhance_prompt:
110
  print(f"BEFORE: {instruction} ")
111
  instruction = promptifier(instruction)
112
  print(f"AFTER: {instruction} ")
113
  image = pipe( prompt = instruction,
114
  negative_prompt=negative_prompt,
115
+ guidance_scale = guidance_scale,
116
  num_inference_steps = steps,
117
  width = width, height = height,
118
  generator = generator, output_type="latent",
 
195
  generate_button = gr.Button("Run", scale=0)
196
  with gr.Row():
197
  type = gr.Dropdown(["Image Generation","Image Editing"], label="Task", value="Image Generation",interactive=True)
198
+ enhance_prompt = gr.Checkbox(label="Enhance prompt", value=False, scale=0)
199
  fast = gr.Checkbox(label="FAST Generation", value=False, scale=0)
200
 
201
  with gr.Row():