salahIguiliz commited on
Commit
d5127f7
·
1 Parent(s): 88e2ebd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -19
app.py CHANGED
@@ -79,7 +79,7 @@ def to_Canny(image):
79
 
80
  return canny_image
81
 
82
- def inference(prompt,canny_image,number,seed ):
83
  print("start inference")
84
 
85
 
@@ -94,29 +94,16 @@ def inference(prompt,canny_image,number,seed ):
94
  image_ = canny_image
95
  prompt = prompt
96
  out_image = pipe(
97
- prompt, num_inference_steps=20, generator=generator, image=image_, num_images_per_prompt=number)
98
  print('end inference')
99
  return out_image
100
 
101
- def generate_images(image, prompt):
102
- pose = get_pose(image)
103
- output = pipe(
104
- prompt,
105
- pose,
106
- generator=generator,
107
- num_images_per_prompt=3,
108
- num_inference_steps=20,
109
- )
110
- all_outputs = []
111
- all_outputs.append(pose)
112
- for image in output.images:
113
- all_outputs.append(image)
114
- return all_outputs
115
 
116
- def generation(prompt,text,seed,police_size, lenght, width,number):
 
117
  img = generate_an_image_from_text(text,police_size,lenght,width)
118
  img = to_Canny(img)
119
- output = inference(prompt,img, number,seed)
120
  all_outputs = []
121
  for image in output.images:
122
  all_outputs.append(image)
@@ -124,7 +111,7 @@ def generation(prompt,text,seed,police_size, lenght, width,number):
124
 
125
  gr.Interface(fn=generation,
126
  inputs=["text", "text", gr.Slider(0, 200), gr.Slider(0, 200), gr.Slider(0, 1024), gr.Slider(0, 1024),
127
- gr.Slider(0, 7)], outputs=gr.Gallery().style(grid=[2], height="auto"), title="Generate a logo using Text ", examples=[["A steampunk Alphabetic Logo, steampunk style, with glowing mecha parts, mecha alphabets, high quality, high res, ultra HD", "Logo",60,90,512,512,2]]).launch(enable_queue=True)
128
 
129
 
130
 
 
79
 
80
  return canny_image
81
 
82
+ def inference(prompt,canny_image,number,seed, steps ):
83
  print("start inference")
84
 
85
 
 
94
  image_ = canny_image
95
  prompt = prompt
96
  out_image = pipe(
97
+ prompt, num_inference_steps=steps, generator=generator, image=image_, num_images_per_prompt=number)
98
  print('end inference')
99
  return out_image
100
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
 
102
+
103
+ def generation(prompt,text,seed,police_size, lenght, width,number,num_inference_steps):
104
  img = generate_an_image_from_text(text,police_size,lenght,width)
105
  img = to_Canny(img)
106
+ output = inference(prompt,img, number,seed,num_inference_steps)
107
  all_outputs = []
108
  for image in output.images:
109
  all_outputs.append(image)
 
111
 
112
  gr.Interface(fn=generation,
113
  inputs=["text", "text", gr.Slider(0, 200), gr.Slider(0, 200), gr.Slider(0, 1024), gr.Slider(0, 1024),
114
+ gr.Slider(0, 7),gr.Slider(0, 20)], outputs=gr.Gallery().style(grid=[2], height="auto"), title="Generate a logo using Text ", examples=[["A steampunk Alphabetic Logo, steampunk style, with glowing mecha parts, mecha alphabets, high quality, high res, ultra HD", "Logo",60,90,512,512,2]]).launch(enable_queue=True)
115
 
116
 
117