dvilasuero HF staff commited on
Commit
f7dd2b7
1 Parent(s): 925ceb1

Upload pipeline.yaml with huggingface_hub

Browse files
Files changed (1) hide show
  1. pipeline.yaml +489 -0
pipeline.yaml ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ distilabel:
2
+ version: 1.4.0
3
+ pipeline:
4
+ name: image_generation_pipeline
5
+ description: null
6
+ steps:
7
+ - step:
8
+ name: load_data
9
+ resources:
10
+ replicas: 1
11
+ cpus: null
12
+ gpus: null
13
+ memory: null
14
+ resources: null
15
+ input_mappings: {}
16
+ output_mappings: {}
17
+ batch_size: 50
18
+ repo_id: fal/imgsys-results
19
+ split: train
20
+ config: null
21
+ revision: null
22
+ streaming: false
23
+ num_examples: 2
24
+ storage_options: null
25
+ runtime_parameters_info:
26
+ - name: resources
27
+ runtime_parameters_info:
28
+ - name: replicas
29
+ optional: true
30
+ description: The number of replicas for the step.
31
+ - name: cpus
32
+ optional: true
33
+ description: The number of CPUs assigned to each step replica.
34
+ - name: gpus
35
+ optional: true
36
+ description: The number of GPUs assigned to each step replica.
37
+ - name: memory
38
+ optional: true
39
+ description: The memory in bytes required for each step replica.
40
+ - name: resources
41
+ optional: true
42
+ description: A dictionary containing names of custom resources and the number
43
+ of those resources required for each step replica.
44
+ - name: batch_size
45
+ optional: true
46
+ description: The number of rows that will contain the batches generated by
47
+ the step.
48
+ - name: repo_id
49
+ optional: false
50
+ description: The Hugging Face Hub repository ID of the dataset to load.
51
+ - name: split
52
+ optional: true
53
+ description: The split of the dataset to load. Defaults to 'train'.
54
+ - name: config
55
+ optional: true
56
+ description: The configuration of the dataset to load. This is optional and
57
+ only needed if the dataset has multiple configurations.
58
+ - name: revision
59
+ optional: true
60
+ description: The revision of the dataset to load. Defaults to the latest revision.
61
+ - name: streaming
62
+ optional: true
63
+ description: Whether to load the dataset in streaming mode or not. Defaults
64
+ to False.
65
+ - name: num_examples
66
+ optional: true
67
+ description: The number of examples to load from the dataset. By default will
68
+ load all examples.
69
+ type_info:
70
+ module: distilabel.steps.generators.huggingface
71
+ name: LoadDataFromHub
72
+ name: load_data
73
+ - step:
74
+ name: flux
75
+ resources:
76
+ replicas: 1
77
+ cpus: null
78
+ gpus: null
79
+ memory: null
80
+ resources: null
81
+ input_mappings: {}
82
+ output_mappings: {}
83
+ input_batch_size: 50
84
+ llm:
85
+ use_magpie_template: false
86
+ magpie_pre_query_template: null
87
+ generation_kwargs: {}
88
+ use_offline_batch_generation: false
89
+ offline_batch_generation_block_until_done: null
90
+ jobs_ids: null
91
+ model_id: black-forest-labs/FLUX.1-schnell
92
+ endpoint_name: null
93
+ endpoint_namespace: null
94
+ base_url: null
95
+ tokenizer_id: null
96
+ model_display_name: null
97
+ structured_output: null
98
+ type_info:
99
+ module: __main__
100
+ name: InferenceEndpointsImageLLM
101
+ group_generations: false
102
+ add_raw_output: true
103
+ add_raw_input: true
104
+ num_generations: 1
105
+ use_default_structured_output: false
106
+ runtime_parameters_info:
107
+ - name: resources
108
+ runtime_parameters_info:
109
+ - name: replicas
110
+ optional: true
111
+ description: The number of replicas for the step.
112
+ - name: cpus
113
+ optional: true
114
+ description: The number of CPUs assigned to each step replica.
115
+ - name: gpus
116
+ optional: true
117
+ description: The number of GPUs assigned to each step replica.
118
+ - name: memory
119
+ optional: true
120
+ description: The memory in bytes required for each step replica.
121
+ - name: resources
122
+ optional: true
123
+ description: A dictionary containing names of custom resources and the number
124
+ of those resources required for each step replica.
125
+ - name: input_batch_size
126
+ optional: true
127
+ description: The number of rows that will contain the batches processed by
128
+ the step.
129
+ - name: llm
130
+ runtime_parameters_info:
131
+ - name: generation_kwargs
132
+ description: The kwargs to be propagated to either `generate` or `agenerate`
133
+ methods within each `LLM`.
134
+ keys: []
135
+ - name: use_offline_batch_generation
136
+ optional: true
137
+ description: Whether to use the `offline_batch_generate` method to generate
138
+ the responses.
139
+ - name: offline_batch_generation_block_until_done
140
+ optional: true
141
+ description: If provided, then polling will be done until the `ofline_batch_generate`
142
+ method is able to retrieve the results. The value indicate the time to
143
+ wait between each polling.
144
+ - name: endpoint_name
145
+ optional: true
146
+ description: The name of the Inference Endpoint to use for the LLM.
147
+ - name: endpoint_namespace
148
+ optional: true
149
+ description: The namespace of the Inference Endpoint to use for the LLM.
150
+ - name: base_url
151
+ optional: true
152
+ description: The base URL to use for the Inference Endpoints API requests.
153
+ - name: api_key
154
+ optional: true
155
+ description: The API key to authenticate the requests to the Inference Endpoints
156
+ API.
157
+ - name: structured_output
158
+ optional: true
159
+ description: The structured output format to use across all the generations.
160
+ - name: add_raw_output
161
+ optional: true
162
+ description: Whether to include the raw output of the LLM in the key `raw_output_<TASK_NAME>`
163
+ of the `distilabel_metadata` dictionary output column
164
+ - name: add_raw_input
165
+ optional: true
166
+ description: Whether to include the raw input of the LLM in the key `raw_input_<TASK_NAME>`
167
+ of the `distilabel_metadata` dictionary column
168
+ - name: num_generations
169
+ optional: true
170
+ description: The number of generations to be produced per input.
171
+ type_info:
172
+ module: __main__
173
+ name: ImageGeneration
174
+ name: flux
175
+ - step:
176
+ name: sdxl
177
+ resources:
178
+ replicas: 1
179
+ cpus: null
180
+ gpus: null
181
+ memory: null
182
+ resources: null
183
+ input_mappings: {}
184
+ output_mappings: {}
185
+ input_batch_size: 50
186
+ llm:
187
+ use_magpie_template: false
188
+ magpie_pre_query_template: null
189
+ generation_kwargs: {}
190
+ use_offline_batch_generation: false
191
+ offline_batch_generation_block_until_done: null
192
+ jobs_ids: null
193
+ model_id: stabilityai/stable-diffusion-xl-base-1.0
194
+ endpoint_name: null
195
+ endpoint_namespace: null
196
+ base_url: null
197
+ tokenizer_id: null
198
+ model_display_name: null
199
+ structured_output: null
200
+ type_info:
201
+ module: __main__
202
+ name: InferenceEndpointsImageLLM
203
+ group_generations: false
204
+ add_raw_output: true
205
+ add_raw_input: true
206
+ num_generations: 1
207
+ use_default_structured_output: false
208
+ runtime_parameters_info:
209
+ - name: resources
210
+ runtime_parameters_info:
211
+ - name: replicas
212
+ optional: true
213
+ description: The number of replicas for the step.
214
+ - name: cpus
215
+ optional: true
216
+ description: The number of CPUs assigned to each step replica.
217
+ - name: gpus
218
+ optional: true
219
+ description: The number of GPUs assigned to each step replica.
220
+ - name: memory
221
+ optional: true
222
+ description: The memory in bytes required for each step replica.
223
+ - name: resources
224
+ optional: true
225
+ description: A dictionary containing names of custom resources and the number
226
+ of those resources required for each step replica.
227
+ - name: input_batch_size
228
+ optional: true
229
+ description: The number of rows that will contain the batches processed by
230
+ the step.
231
+ - name: llm
232
+ runtime_parameters_info:
233
+ - name: generation_kwargs
234
+ description: The kwargs to be propagated to either `generate` or `agenerate`
235
+ methods within each `LLM`.
236
+ keys: []
237
+ - name: use_offline_batch_generation
238
+ optional: true
239
+ description: Whether to use the `offline_batch_generate` method to generate
240
+ the responses.
241
+ - name: offline_batch_generation_block_until_done
242
+ optional: true
243
+ description: If provided, then polling will be done until the `ofline_batch_generate`
244
+ method is able to retrieve the results. The value indicate the time to
245
+ wait between each polling.
246
+ - name: endpoint_name
247
+ optional: true
248
+ description: The name of the Inference Endpoint to use for the LLM.
249
+ - name: endpoint_namespace
250
+ optional: true
251
+ description: The namespace of the Inference Endpoint to use for the LLM.
252
+ - name: base_url
253
+ optional: true
254
+ description: The base URL to use for the Inference Endpoints API requests.
255
+ - name: api_key
256
+ optional: true
257
+ description: The API key to authenticate the requests to the Inference Endpoints
258
+ API.
259
+ - name: structured_output
260
+ optional: true
261
+ description: The structured output format to use across all the generations.
262
+ - name: add_raw_output
263
+ optional: true
264
+ description: Whether to include the raw output of the LLM in the key `raw_output_<TASK_NAME>`
265
+ of the `distilabel_metadata` dictionary output column
266
+ - name: add_raw_input
267
+ optional: true
268
+ description: Whether to include the raw input of the LLM in the key `raw_input_<TASK_NAME>`
269
+ of the `distilabel_metadata` dictionary column
270
+ - name: num_generations
271
+ optional: true
272
+ description: The number of generations to be produced per input.
273
+ type_info:
274
+ module: __main__
275
+ name: ImageGeneration
276
+ name: sdxl
277
+ - step:
278
+ name: flux_task_2
279
+ resources:
280
+ replicas: 1
281
+ cpus: null
282
+ gpus: null
283
+ memory: null
284
+ resources: null
285
+ input_mappings: {}
286
+ output_mappings: {}
287
+ input_batch_size: 50
288
+ llm:
289
+ use_magpie_template: false
290
+ magpie_pre_query_template: null
291
+ generation_kwargs: {}
292
+ use_offline_batch_generation: false
293
+ offline_batch_generation_block_until_done: null
294
+ jobs_ids: null
295
+ model_id: black-forest-labs/FLUX.1-schnell
296
+ endpoint_name: null
297
+ endpoint_namespace: null
298
+ base_url: null
299
+ tokenizer_id: null
300
+ model_display_name: null
301
+ structured_output: null
302
+ type_info:
303
+ module: __main__
304
+ name: InferenceEndpointsImageLLM
305
+ group_generations: false
306
+ add_raw_output: true
307
+ add_raw_input: true
308
+ num_generations: 1
309
+ use_default_structured_output: false
310
+ runtime_parameters_info:
311
+ - name: resources
312
+ runtime_parameters_info:
313
+ - name: replicas
314
+ optional: true
315
+ description: The number of replicas for the step.
316
+ - name: cpus
317
+ optional: true
318
+ description: The number of CPUs assigned to each step replica.
319
+ - name: gpus
320
+ optional: true
321
+ description: The number of GPUs assigned to each step replica.
322
+ - name: memory
323
+ optional: true
324
+ description: The memory in bytes required for each step replica.
325
+ - name: resources
326
+ optional: true
327
+ description: A dictionary containing names of custom resources and the number
328
+ of those resources required for each step replica.
329
+ - name: input_batch_size
330
+ optional: true
331
+ description: The number of rows that will contain the batches processed by
332
+ the step.
333
+ - name: llm
334
+ runtime_parameters_info:
335
+ - name: generation_kwargs
336
+ description: The kwargs to be propagated to either `generate` or `agenerate`
337
+ methods within each `LLM`.
338
+ keys: []
339
+ - name: use_offline_batch_generation
340
+ optional: true
341
+ description: Whether to use the `offline_batch_generate` method to generate
342
+ the responses.
343
+ - name: offline_batch_generation_block_until_done
344
+ optional: true
345
+ description: If provided, then polling will be done until the `ofline_batch_generate`
346
+ method is able to retrieve the results. The value indicate the time to
347
+ wait between each polling.
348
+ - name: endpoint_name
349
+ optional: true
350
+ description: The name of the Inference Endpoint to use for the LLM.
351
+ - name: endpoint_namespace
352
+ optional: true
353
+ description: The namespace of the Inference Endpoint to use for the LLM.
354
+ - name: base_url
355
+ optional: true
356
+ description: The base URL to use for the Inference Endpoints API requests.
357
+ - name: api_key
358
+ optional: true
359
+ description: The API key to authenticate the requests to the Inference Endpoints
360
+ API.
361
+ - name: structured_output
362
+ optional: true
363
+ description: The structured output format to use across all the generations.
364
+ - name: add_raw_output
365
+ optional: true
366
+ description: Whether to include the raw output of the LLM in the key `raw_output_<TASK_NAME>`
367
+ of the `distilabel_metadata` dictionary output column
368
+ - name: add_raw_input
369
+ optional: true
370
+ description: Whether to include the raw input of the LLM in the key `raw_input_<TASK_NAME>`
371
+ of the `distilabel_metadata` dictionary column
372
+ - name: num_generations
373
+ optional: true
374
+ description: The number of generations to be produced per input.
375
+ type_info:
376
+ module: __main__
377
+ name: ImageGeneration
378
+ name: flux_task_2
379
+ - step:
380
+ name: group_columns_0
381
+ resources:
382
+ replicas: 1
383
+ cpus: null
384
+ gpus: null
385
+ memory: null
386
+ resources: null
387
+ input_mappings: {}
388
+ output_mappings: {}
389
+ input_batch_size: 50
390
+ columns:
391
+ - image
392
+ - model_name
393
+ output_columns:
394
+ - images
395
+ - models
396
+ runtime_parameters_info:
397
+ - name: resources
398
+ runtime_parameters_info:
399
+ - name: replicas
400
+ optional: true
401
+ description: The number of replicas for the step.
402
+ - name: cpus
403
+ optional: true
404
+ description: The number of CPUs assigned to each step replica.
405
+ - name: gpus
406
+ optional: true
407
+ description: The number of GPUs assigned to each step replica.
408
+ - name: memory
409
+ optional: true
410
+ description: The memory in bytes required for each step replica.
411
+ - name: resources
412
+ optional: true
413
+ description: A dictionary containing names of custom resources and the number
414
+ of those resources required for each step replica.
415
+ - name: input_batch_size
416
+ optional: true
417
+ description: The number of rows that will contain the batches processed by
418
+ the step.
419
+ type_info:
420
+ module: distilabel.steps.columns.group
421
+ name: GroupColumns
422
+ name: group_columns_0
423
+ - step:
424
+ name: keep_columns_0
425
+ resources:
426
+ replicas: 1
427
+ cpus: null
428
+ gpus: null
429
+ memory: null
430
+ resources: null
431
+ input_mappings: {}
432
+ output_mappings: {}
433
+ input_batch_size: 50
434
+ columns:
435
+ - prompt
436
+ - models
437
+ - images
438
+ runtime_parameters_info:
439
+ - name: resources
440
+ runtime_parameters_info:
441
+ - name: replicas
442
+ optional: true
443
+ description: The number of replicas for the step.
444
+ - name: cpus
445
+ optional: true
446
+ description: The number of CPUs assigned to each step replica.
447
+ - name: gpus
448
+ optional: true
449
+ description: The number of GPUs assigned to each step replica.
450
+ - name: memory
451
+ optional: true
452
+ description: The memory in bytes required for each step replica.
453
+ - name: resources
454
+ optional: true
455
+ description: A dictionary containing names of custom resources and the number
456
+ of those resources required for each step replica.
457
+ - name: input_batch_size
458
+ optional: true
459
+ description: The number of rows that will contain the batches processed by
460
+ the step.
461
+ type_info:
462
+ module: distilabel.steps.columns.keep
463
+ name: KeepColumns
464
+ name: keep_columns_0
465
+ connections:
466
+ - from: load_data
467
+ to:
468
+ - flux
469
+ - sdxl
470
+ - flux_task_2
471
+ - from: flux
472
+ to:
473
+ - group_columns_0
474
+ - from: sdxl
475
+ to:
476
+ - group_columns_0
477
+ - from: flux_task_2
478
+ to:
479
+ - group_columns_0
480
+ - from: group_columns_0
481
+ to:
482
+ - keep_columns_0
483
+ - from: keep_columns_0
484
+ to: []
485
+ routing_batch_functions: []
486
+ type_info:
487
+ module: distilabel.pipeline.local
488
+ name: Pipeline
489
+ requirements: []