patrickvonplaten commited on
Commit
ba75ac7
·
1 Parent(s): 18548df

some more images

Browse files
generated_image_pipeline.png CHANGED
generated_image_unrolled.png CHANGED
run.py CHANGED
@@ -19,14 +19,14 @@ unet.to(torch_device)
19
  vqvae.to(torch_device)
20
 
21
  # generate gaussian noise to be decoded
22
- generator = torch.manual_seed(23)
23
  noise = torch.randn(
24
  (1, unet.in_channels, unet.image_size, unet.image_size),
25
  generator=generator,
26
  ).to(torch_device)
27
 
28
  # set inference steps for DDIM
29
- scheduler.set_timesteps(num_inference_steps=50)
30
 
31
  image = noise
32
  for t in tqdm.tqdm(scheduler.timesteps):
@@ -64,8 +64,8 @@ import tqdm
64
  pipeline = LatentDiffusionUncondPipeline.from_pretrained("./")
65
 
66
  # generatae image by calling the pipeline
67
- generator = torch.manual_seed(23)
68
- image = pipeline(generator=generator, num_inference_steps=50)["sample"]
69
 
70
  # process image
71
  image_processed = image.cpu().permute(0, 2, 3, 1)
 
19
  vqvae.to(torch_device)
20
 
21
  # generate gaussian noise to be decoded
22
+ generator = torch.manual_seed(1)
23
  noise = torch.randn(
24
  (1, unet.in_channels, unet.image_size, unet.image_size),
25
  generator=generator,
26
  ).to(torch_device)
27
 
28
  # set inference steps for DDIM
29
+ scheduler.set_timesteps(num_inference_steps=200)
30
 
31
  image = noise
32
  for t in tqdm.tqdm(scheduler.timesteps):
 
64
  pipeline = LatentDiffusionUncondPipeline.from_pretrained("./")
65
 
66
  # generatae image by calling the pipeline
67
+ generator = torch.manual_seed(1)
68
+ image = pipeline(generator=generator, num_inference_steps=200)["sample"]
69
 
70
  # process image
71
  image_processed = image.cpu().permute(0, 2, 3, 1)