JVice commited on
Commit
41543d1
1 Parent(s): 145be87

Updated for V2.0

Browse files
Files changed (1) hide show
  1. model_inferencing.py +4 -6
model_inferencing.py CHANGED
@@ -1,8 +1,6 @@
1
  TargetModel = None
2
  def generate_test_image(T2IModel, testPrompt):
3
- #prompt = "The quick brown fox jumps over the lazy dog"
4
  testImage = TargetModel(testPrompt, num_inference_steps=50).images[0]
5
- #testImage.save("./image.png")
6
 
7
  return testImage
8
 
@@ -22,13 +20,13 @@ def construct_general_bias_evaluation_prompts(subjects, actions):
22
  prompts.append('a picture of a ' + subject)
23
 
24
  return prompts
25
- def generate_test_images(progressBar, barText, prompts, NSamples, NSteps, imageSize):
26
  guidance = 7.5
27
  testImages = []
28
  imageCaptions = [[], []]
29
  for prompt, ii in zip(prompts, range(len(prompts))):
30
  testImages+=TargetModel(prompt, num_images_per_prompt=NSamples, num_inference_steps=NSteps,
31
- guidance_scale=guidance, width=imageSize, height=imageSize).images
32
  for nn in range(NSamples):
33
  imageCaptions[0].append(prompt) # actual prompt used
34
  imageCaptions[1].append("Prompt: "+str(ii+1)+" Sample: "+ str(nn+1)) # caption for the image output
@@ -38,13 +36,13 @@ def generate_test_images(progressBar, barText, prompts, NSamples, NSteps, imageS
38
  progressBar.empty()
39
  return (testImages, imageCaptions)
40
 
41
- def generate_task_oriented_images(progressBar, barText, prompts, ids, NSamples, NSteps, imageSize):
42
  guidance = 7.5
43
  testImages = []
44
  imageCaptions = [[], []]
45
  for prompt, jj in zip(prompts, range(len(prompts))):
46
  testImages+=TargetModel(prompt, num_images_per_prompt=NSamples, num_inference_steps=NSteps,
47
- guidance_scale=guidance, width=imageSize, height=imageSize).images
48
  for nn in range(NSamples):
49
  imageCaptions[0].append(prompt) # actual prompt used
50
  imageCaptions[1].append("COCO ID: "+ids[jj]+" Sample: "+ str(nn+1)) # caption for the image output
 
1
  TargetModel = None
2
  def generate_test_image(T2IModel, testPrompt):
 
3
  testImage = TargetModel(testPrompt, num_inference_steps=50).images[0]
 
4
 
5
  return testImage
6
 
 
20
  prompts.append('a picture of a ' + subject)
21
 
22
  return prompts
23
+ def generate_test_images(progressBar, barText, prompts, NSamples, NSteps, imageWidth, imageHeight):
24
  guidance = 7.5
25
  testImages = []
26
  imageCaptions = [[], []]
27
  for prompt, ii in zip(prompts, range(len(prompts))):
28
  testImages+=TargetModel(prompt, num_images_per_prompt=NSamples, num_inference_steps=NSteps,
29
+ guidance_scale=guidance, width=imageWidth, height=imageHeight).images
30
  for nn in range(NSamples):
31
  imageCaptions[0].append(prompt) # actual prompt used
32
  imageCaptions[1].append("Prompt: "+str(ii+1)+" Sample: "+ str(nn+1)) # caption for the image output
 
36
  progressBar.empty()
37
  return (testImages, imageCaptions)
38
 
39
+ def generate_task_oriented_images(progressBar, barText, prompts, ids, NSamples, NSteps, imageWidth, imageHeight):
40
  guidance = 7.5
41
  testImages = []
42
  imageCaptions = [[], []]
43
  for prompt, jj in zip(prompts, range(len(prompts))):
44
  testImages+=TargetModel(prompt, num_images_per_prompt=NSamples, num_inference_steps=NSteps,
45
+ guidance_scale=guidance, width=imageWidth, height=imageHeight).images
46
  for nn in range(NSamples):
47
  imageCaptions[0].append(prompt) # actual prompt used
48
  imageCaptions[1].append("COCO ID: "+ids[jj]+" Sample: "+ str(nn+1)) # caption for the image output