Spaces:
Running
on
Zero
Running
on
Zero
alfredplpl
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -12,7 +12,7 @@ device = "cuda"
|
|
12 |
dtype = torch.float16
|
13 |
|
14 |
repo = "stabilityai/stable-diffusion-3-medium"
|
15 |
-
|
16 |
|
17 |
model_id = "microsoft/Phi-3-medium-4k-instruct"
|
18 |
model = AutoModelForCausalLM.from_pretrained(
|
@@ -23,7 +23,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
23 |
)
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
25 |
|
26 |
-
|
27 |
"text-generation",
|
28 |
model=model,
|
29 |
tokenizer=tokenizer,
|
@@ -54,12 +54,12 @@ def infer(prompt, negative_prompt, seed, randomize_seed, width, height, guidance
|
|
54 |
{"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
|
55 |
{"role": "user", "content": prompt},
|
56 |
]
|
57 |
-
output =
|
58 |
upsampled_prompt=output[0]['generated_text']
|
59 |
|
60 |
print(upsampled_prompt)
|
61 |
|
62 |
-
image =
|
63 |
prompt = upsampled_prompt,
|
64 |
negative_prompt = negative_prompt,
|
65 |
guidance_scale = guidance_scale,
|
|
|
12 |
dtype = torch.float16
|
13 |
|
14 |
repo = "stabilityai/stable-diffusion-3-medium"
|
15 |
+
t2i = StableDiffusion3Pipeline.from_pretrained(repo, torch_dtype=torch.float16, revision="refs/pr/26",token=os.environ["TOKEN"]).to(device)
|
16 |
|
17 |
model_id = "microsoft/Phi-3-medium-4k-instruct"
|
18 |
model = AutoModelForCausalLM.from_pretrained(
|
|
|
23 |
)
|
24 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
25 |
|
26 |
+
upsampler = pipeline(
|
27 |
"text-generation",
|
28 |
model=model,
|
29 |
tokenizer=tokenizer,
|
|
|
54 |
{"role": "assistant", "content": "A gourmet scene in a high-end restaurant kitchen where a chef is presenting a plate of cooked beef testicles, garnished elegantly with herbs and spices. The chef, a middle-aged Caucasian man wearing a white chef's hat and coat, is inspecting the dish with a satisfied expression. The kitchen background is bustling with other chefs and kitchen staff, and the atmosphere is warm and inviting with hanging pots and pans, and a glowing, busy stove in the background. The focus is on the chef's proud presentation of this unusual but delicately prepared dish."},
|
55 |
{"role": "user", "content": prompt},
|
56 |
]
|
57 |
+
output = upsampler(messages, **generation_args)
|
58 |
upsampled_prompt=output[0]['generated_text']
|
59 |
|
60 |
print(upsampled_prompt)
|
61 |
|
62 |
+
image = t2i(
|
63 |
prompt = upsampled_prompt,
|
64 |
negative_prompt = negative_prompt,
|
65 |
guidance_scale = guidance_scale,
|