hysts HF staff commited on
Commit
e3f1212
·
1 Parent(s): cecb702

Migrate from yapf to black

Browse files
Files changed (4) hide show
  1. .pre-commit-config.yaml +26 -12
  2. .style.yapf +0 -5
  3. .vscode/settings.json +11 -8
  4. app.py +90 -76
.pre-commit-config.yaml CHANGED
@@ -1,6 +1,6 @@
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
- rev: v4.2.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
@@ -8,29 +8,43 @@ repos:
8
  - id: check-shebang-scripts-are-executable
9
  - id: check-toml
10
  - id: check-yaml
11
- - id: double-quote-string-fixer
12
  - id: end-of-file-fixer
13
  - id: mixed-line-ending
14
- args: ['--fix=lf']
15
  - id: requirements-txt-fixer
16
  - id: trailing-whitespace
17
  - repo: https://github.com/myint/docformatter
18
- rev: v1.4
19
  hooks:
20
  - id: docformatter
21
- args: ['--in-place']
22
  - repo: https://github.com/pycqa/isort
23
  rev: 5.12.0
24
  hooks:
25
  - id: isort
 
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
- rev: v0.991
28
  hooks:
29
  - id: mypy
30
- args: ['--ignore-missing-imports']
31
- additional_dependencies: ['types-python-slugify']
32
- - repo: https://github.com/google/yapf
33
- rev: v0.32.0
34
  hooks:
35
- - id: yapf
36
- args: ['--parallel', '--in-place']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  repos:
2
  - repo: https://github.com/pre-commit/pre-commit-hooks
3
+ rev: v4.4.0
4
  hooks:
5
  - id: check-executables-have-shebangs
6
  - id: check-json
 
8
  - id: check-shebang-scripts-are-executable
9
  - id: check-toml
10
  - id: check-yaml
 
11
  - id: end-of-file-fixer
12
  - id: mixed-line-ending
13
+ args: ["--fix=lf"]
14
  - id: requirements-txt-fixer
15
  - id: trailing-whitespace
16
  - repo: https://github.com/myint/docformatter
17
+ rev: v1.7.5
18
  hooks:
19
  - id: docformatter
20
+ args: ["--in-place"]
21
  - repo: https://github.com/pycqa/isort
22
  rev: 5.12.0
23
  hooks:
24
  - id: isort
25
+ args: ["--profile", "black"]
26
  - repo: https://github.com/pre-commit/mirrors-mypy
27
+ rev: v1.5.1
28
  hooks:
29
  - id: mypy
30
+ args: ["--ignore-missing-imports"]
31
+ additional_dependencies: ["types-python-slugify", "types-requests", "types-PyYAML"]
32
+ - repo: https://github.com/psf/black
33
+ rev: 23.9.1
34
  hooks:
35
+ - id: black
36
+ language_version: python3.10
37
+ args: ["--line-length", "119"]
38
+ - repo: https://github.com/kynan/nbstripout
39
+ rev: 0.6.1
40
+ hooks:
41
+ - id: nbstripout
42
+ args: ["--extra-keys", "metadata.interpreter metadata.kernelspec cell.metadata.pycharm"]
43
+ - repo: https://github.com/nbQA-dev/nbQA
44
+ rev: 1.7.0
45
+ hooks:
46
+ - id: nbqa-black
47
+ - id: nbqa-pyupgrade
48
+ args: ["--py37-plus"]
49
+ - id: nbqa-isort
50
+ args: ["--float-to-top"]
.style.yapf DELETED
@@ -1,5 +0,0 @@
1
- [style]
2
- based_on_style = pep8
3
- blank_line_before_nested_class_or_def = false
4
- spaces_before_comment = 2
5
- split_before_logical_operator = true
 
 
 
 
 
 
.vscode/settings.json CHANGED
@@ -1,18 +1,21 @@
1
  {
2
- "python.linting.enabled": true,
3
- "python.linting.flake8Enabled": true,
4
- "python.linting.pylintEnabled": false,
5
- "python.linting.lintOnSave": true,
6
- "python.formatting.provider": "yapf",
7
- "python.formatting.yapfArgs": [
8
- "--style={based_on_style: pep8, indent_width: 4, blank_line_before_nested_class_or_def: false, spaces_before_comment: 2, split_before_logical_operator: true}"
9
- ],
10
  "[python]": {
 
11
  "editor.formatOnType": true,
12
  "editor.codeActionsOnSave": {
13
  "source.organizeImports": true
14
  }
15
  },
 
 
 
 
 
 
 
 
 
 
16
  "editor.formatOnSave": true,
17
  "files.insertFinalNewline": true
18
  }
 
1
  {
 
 
 
 
 
 
 
 
2
  "[python]": {
3
+ "editor.defaultFormatter": "ms-python.black-formatter",
4
  "editor.formatOnType": true,
5
  "editor.codeActionsOnSave": {
6
  "source.organizeImports": true
7
  }
8
  },
9
+ "black-formatter.args": [
10
+ "--line-length=119"
11
+ ],
12
+ "isort.args": ["--profile", "black"],
13
+ "flake8.args": [
14
+ "--max-line-length=119"
15
+ ],
16
+ "ruff.args": [
17
+ "--line-length=119"
18
+ ],
19
  "editor.formatOnSave": true,
20
  "files.insertFinalNewline": true
21
  }
app.py CHANGED
@@ -11,29 +11,26 @@ import PIL.Image
11
  import torch
12
  from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
13
 
14
- DESCRIPTION = '# Kandinsky 2.2'
15
  if not torch.cuda.is_available():
16
- DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
- CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
20
- 'CACHE_EXAMPLES') == '1'
21
- MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
22
- USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
23
 
24
- device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
25
  if torch.cuda.is_available():
26
  pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
27
- 'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.float16)
 
28
  pipe_prior.to(device)
29
- pipe = KandinskyV22Pipeline.from_pretrained(
30
- 'kandinsky-community/kandinsky-2-2-decoder', torch_dtype=torch.float16)
31
  pipe.to(device)
32
  if USE_TORCH_COMPILE:
33
  pipe.unet.to(memory_format=torch.channels_last)
34
- pipe.unet = torch.compile(pipe.unet,
35
- mode='reduce-overhead',
36
- fullgraph=True)
37
  else:
38
  pipe_prior = None
39
  pipe = None
@@ -45,15 +42,17 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
45
  return seed
46
 
47
 
48
- def generate(prompt: str,
49
- negative_prompt: str = 'low quality, bad quality',
50
- seed: int = 0,
51
- width: int = 768,
52
- height: int = 768,
53
- guidance_scale_prior: float = 1.0,
54
- guidance_scale: float = 4.0,
55
- num_inference_steps_prior: int = 50,
56
- num_inference_steps: int = 100) -> PIL.Image.Image:
 
 
57
  generator = torch.Generator().manual_seed(seed)
58
  image_embeds, negative_image_embeds = pipe_prior(
59
  prompt,
@@ -75,87 +74,100 @@ def generate(prompt: str,
75
 
76
 
77
  examples = [
78
- 'An astronaut riding a horse',
79
- 'portrait of a young woman, blue eyes, cinematic',
80
- 'A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting',
81
- 'bird eye view shot of a full body woman with cyan light orange magenta makeup, digital art, long braided hair her face separated by makeup in the style of yin Yang surrealism, symmetrical face, real image, contrasting tone, pastel gradient background',
82
- 'A car exploding into colorful dust',
83
- 'editorial photography of an organic, almost liquid smoke style armchair',
84
- 'birds eye view of a quilted paper style alien planet landscape, vibrant colours, Cinematic lighting',
85
- 'Toy smiling cute octopus in a black hat, sticker',
86
- 'Red sport car, sticker',
87
  ]
88
 
89
- with gr.Blocks(css='style.css') as demo:
90
  gr.Markdown(DESCRIPTION)
91
- gr.DuplicateButton(value='Duplicate Space for private use',
92
- elem_id='duplicate-button',
93
- visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
 
 
94
  with gr.Box():
95
  with gr.Row():
96
  prompt = gr.Text(
97
- label='Prompt',
98
  show_label=False,
99
  max_lines=1,
100
- placeholder='Enter your prompt',
101
  container=False,
102
  )
103
- run_button = gr.Button('Run', scale=0)
104
- result = gr.Image(label='Result', show_label=False)
105
- with gr.Accordion('Advanced options', open=False):
106
  negative_prompt = gr.Text(
107
- label='Negative prompt',
108
- value='low quality, bad quality',
109
  max_lines=1,
110
- placeholder='Enter a negative prompt',
 
 
 
 
 
 
 
111
  )
112
- seed = gr.Slider(label='Seed',
113
- minimum=0,
114
- maximum=MAX_SEED,
115
- step=1,
116
- value=0)
117
- randomize_seed = gr.Checkbox(label='Randomize seed', value=True)
118
  width = gr.Slider(
119
- label='Width',
120
  minimum=256,
121
  maximum=MAX_IMAGE_SIZE,
122
  step=32,
123
  value=768,
124
  )
125
  height = gr.Slider(
126
- label='Height',
127
  minimum=256,
128
  maximum=MAX_IMAGE_SIZE,
129
  step=32,
130
  value=768,
131
  )
132
- guidance_scale_prior = gr.Slider(label='Guidance scale for prior',
133
- minimum=1,
134
- maximum=20,
135
- step=0.1,
136
- value=1.0)
137
- guidance_scale = gr.Slider(label='Guidance scale',
138
- minimum=1,
139
- maximum=20,
140
- step=0.1,
141
- value=4.0)
 
 
 
 
142
  num_inference_steps_prior = gr.Slider(
143
- label='Number of inference steps for prior',
144
  minimum=10,
145
  maximum=100,
146
  step=1,
147
- value=50)
148
- num_inference_steps = gr.Slider(label='Number of inference steps',
149
- minimum=10,
150
- maximum=150,
151
- step=1,
152
- value=100)
153
-
154
- gr.Examples(examples=examples,
155
- inputs=prompt,
156
- outputs=result,
157
- fn=generate,
158
- cache_examples=CACHE_EXAMPLES)
 
 
 
 
 
159
 
160
  inputs = [
161
  prompt,
@@ -178,7 +190,7 @@ with gr.Blocks(css='style.css') as demo:
178
  fn=generate,
179
  inputs=inputs,
180
  outputs=result,
181
- api_name='run',
182
  )
183
  negative_prompt.submit(
184
  fn=randomize_seed_fn,
@@ -204,4 +216,6 @@ with gr.Blocks(css='style.css') as demo:
204
  outputs=result,
205
  api_name=False,
206
  )
207
- demo.queue(max_size=20).launch()
 
 
 
11
  import torch
12
  from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
13
 
14
+ DESCRIPTION = "# Kandinsky 2.2"
15
  if not torch.cuda.is_available():
16
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
17
 
18
  MAX_SEED = np.iinfo(np.int32).max
19
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
20
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1024"))
21
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
 
22
 
23
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
24
  if torch.cuda.is_available():
25
  pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
26
+ "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
27
+ )
28
  pipe_prior.to(device)
29
+ pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16)
 
30
  pipe.to(device)
31
  if USE_TORCH_COMPILE:
32
  pipe.unet.to(memory_format=torch.channels_last)
33
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
 
 
34
  else:
35
  pipe_prior = None
36
  pipe = None
 
42
  return seed
43
 
44
 
45
+ def generate(
46
+ prompt: str,
47
+ negative_prompt: str = "low quality, bad quality",
48
+ seed: int = 0,
49
+ width: int = 768,
50
+ height: int = 768,
51
+ guidance_scale_prior: float = 1.0,
52
+ guidance_scale: float = 4.0,
53
+ num_inference_steps_prior: int = 50,
54
+ num_inference_steps: int = 100,
55
+ ) -> PIL.Image.Image:
56
  generator = torch.Generator().manual_seed(seed)
57
  image_embeds, negative_image_embeds = pipe_prior(
58
  prompt,
 
74
 
75
 
76
  examples = [
77
+ "An astronaut riding a horse",
78
+ "portrait of a young woman, blue eyes, cinematic",
79
+ "A alien cheeseburger creature eating itself, claymation, cinematic, moody lighting",
80
+ "bird eye view shot of a full body woman with cyan light orange magenta makeup, digital art, long braided hair her face separated by makeup in the style of yin Yang surrealism, symmetrical face, real image, contrasting tone, pastel gradient background",
81
+ "A car exploding into colorful dust",
82
+ "editorial photography of an organic, almost liquid smoke style armchair",
83
+ "birds eye view of a quilted paper style alien planet landscape, vibrant colours, Cinematic lighting",
84
+ "Toy smiling cute octopus in a black hat, sticker",
85
+ "Red sport car, sticker",
86
  ]
87
 
88
+ with gr.Blocks(css="style.css") as demo:
89
  gr.Markdown(DESCRIPTION)
90
+ gr.DuplicateButton(
91
+ value="Duplicate Space for private use",
92
+ elem_id="duplicate-button",
93
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
94
+ )
95
  with gr.Box():
96
  with gr.Row():
97
  prompt = gr.Text(
98
+ label="Prompt",
99
  show_label=False,
100
  max_lines=1,
101
+ placeholder="Enter your prompt",
102
  container=False,
103
  )
104
+ run_button = gr.Button("Run", scale=0)
105
+ result = gr.Image(label="Result", show_label=False)
106
+ with gr.Accordion("Advanced options", open=False):
107
  negative_prompt = gr.Text(
108
+ label="Negative prompt",
109
+ value="low quality, bad quality",
110
  max_lines=1,
111
+ placeholder="Enter a negative prompt",
112
+ )
113
+ seed = gr.Slider(
114
+ label="Seed",
115
+ minimum=0,
116
+ maximum=MAX_SEED,
117
+ step=1,
118
+ value=0,
119
  )
120
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
 
 
 
 
 
121
  width = gr.Slider(
122
+ label="Width",
123
  minimum=256,
124
  maximum=MAX_IMAGE_SIZE,
125
  step=32,
126
  value=768,
127
  )
128
  height = gr.Slider(
129
+ label="Height",
130
  minimum=256,
131
  maximum=MAX_IMAGE_SIZE,
132
  step=32,
133
  value=768,
134
  )
135
+ guidance_scale_prior = gr.Slider(
136
+ label="Guidance scale for prior",
137
+ minimum=1,
138
+ maximum=20,
139
+ step=0.1,
140
+ value=1.0,
141
+ )
142
+ guidance_scale = gr.Slider(
143
+ label="Guidance scale",
144
+ minimum=1,
145
+ maximum=20,
146
+ step=0.1,
147
+ value=4.0,
148
+ )
149
  num_inference_steps_prior = gr.Slider(
150
+ label="Number of inference steps for prior",
151
  minimum=10,
152
  maximum=100,
153
  step=1,
154
+ value=50,
155
+ )
156
+ num_inference_steps = gr.Slider(
157
+ label="Number of inference steps",
158
+ minimum=10,
159
+ maximum=150,
160
+ step=1,
161
+ value=100,
162
+ )
163
+
164
+ gr.Examples(
165
+ examples=examples,
166
+ inputs=prompt,
167
+ outputs=result,
168
+ fn=generate,
169
+ cache_examples=CACHE_EXAMPLES,
170
+ )
171
 
172
  inputs = [
173
  prompt,
 
190
  fn=generate,
191
  inputs=inputs,
192
  outputs=result,
193
+ api_name="run",
194
  )
195
  negative_prompt.submit(
196
  fn=randomize_seed_fn,
 
216
  outputs=result,
217
  api_name=False,
218
  )
219
+
220
+ if __name__ == "__main__":
221
+ demo.queue(max_size=20).launch()