Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -7,8 +7,7 @@ import numpy as np
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL
|
10 |
-
from huggingface_hub import hf_hub_download
|
11 |
-
from huggingface_hub import InferenceClient
|
12 |
|
13 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
14 |
pipe = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, vae=vae)
|
@@ -75,7 +74,11 @@ def king(type ,
|
|
75 |
inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
|
76 |
out = model.generate(**inputs, min_length=10, max_length=20)
|
77 |
caption = processor.decode(out[0], skip_special_tokens=True)
|
78 |
-
|
|
|
|
|
|
|
|
|
79 |
print(instructions)
|
80 |
if randomize_seed:
|
81 |
seed = random.randint(0, 99999)
|
|
|
7 |
import torch
|
8 |
from PIL import Image
|
9 |
from diffusers import DiffusionPipeline, StableDiffusionXLPipeline, EDMEulerScheduler, StableDiffusionXLInstructPix2PixPipeline, AutoencoderKL
|
10 |
+
from huggingface_hub import hf_hub_download, InferenceClient
|
|
|
11 |
|
12 |
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
|
13 |
pipe = StableDiffusionXLPipeline.from_pretrained("fluently/Fluently-XL-Final", torch_dtype=torch.float16, vae=vae)
|
|
|
74 |
inputs = processor(raw_image, return_tensors="pt").to("cuda", torch.float16)
|
75 |
out = model.generate(**inputs, min_length=10, max_length=20)
|
76 |
caption = processor.decode(out[0], skip_special_tokens=True)
|
77 |
+
client1 = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
|
78 |
+
system_instructions1 = "<s>[SYSTEM] Your task is to modify prompt by USER with edit text, and create new prompt for image generation, reply with prompt only, Your task is to reply with final prompt only. [USER]"
|
79 |
+
formatted_prompt = f"{system_instructions1} {caption} [EDIT] {instruction} [FINAL_PROMPT]"
|
80 |
+
stream = client1.text_generation(formatted_prompt, max_new_tokens=50, stream=True, details=True, return_full_text=False)
|
81 |
+
instructions = "".join([response.token.text for response in stream if response.token.text != "</s>"])
|
82 |
print(instructions)
|
83 |
if randomize_seed:
|
84 |
seed = random.randint(0, 99999)
|