silentdriver commited on
Commit
115acc7
·
verified ·
1 Parent(s): 65db025

Initial commit with folder contents

Browse files
Files changed (6) hide show
  1. .gitattributes +3 -1
  2. pyproject.toml +22 -5
  3. src/ghanta.py +74 -0
  4. src/main.py +3 -33
  5. src/pipeline.py +588 -31
  6. uv.lock +57 -11
.gitattributes CHANGED
@@ -32,4 +32,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
32
  *.xz filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ RobertML.png filter=lfs diff=lfs merge=lfs -text
37
+ backup.png filter=lfs diff=lfs merge=lfs -text
pyproject.toml CHANGED
@@ -4,9 +4,9 @@ build-backend = "setuptools.build_meta"
4
 
5
  [project]
6
  name = "flux-schnell-edge-inference"
7
- description = "An edge-maxxing model submission for the 4090 Flux contest"
8
  requires-python = ">=3.10,<3.13"
9
- version = "7"
10
  dependencies = [
11
  "diffusers==0.31.0",
12
  "transformers==4.46.2",
@@ -17,11 +17,28 @@ dependencies = [
17
  "sentencepiece==0.2.0",
18
  "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
  "gitpython>=3.1.43",
20
- "torchao>=0.6.1",
 
21
  ]
22
 
23
- [tool.edge-maxxing]
24
- models = ["black-forest-labs/FLUX.1-schnell", "silentdriver/flux","city96/t5-v1_1-xxl-encoder-bf16"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
  [project.scripts]
27
  start_inference = "main:main"
 
 
4
 
5
  [project]
6
  name = "flux-schnell-edge-inference"
7
+ description = "An edge-maxxing model submission by RobertML for the 4090 Flux contest"
8
  requires-python = ">=3.10,<3.13"
9
+ version = "8"
10
  dependencies = [
11
  "diffusers==0.31.0",
12
  "transformers==4.46.2",
 
17
  "sentencepiece==0.2.0",
18
  "edge-maxxing-pipelines @ git+https://github.com/womboai/edge-maxxing@7c760ac54f6052803dadb3ade8ebfc9679a94589#subdirectory=pipelines",
19
  "gitpython>=3.1.43",
20
+ "hf_transfer==0.1.8",
21
+ "torchao==0.6.1",
22
  ]
23
 
24
+ [[tool.edge-maxxing.models]]
25
+ repository = "black-forest-labs/FLUX.1-schnell"
26
+ revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
27
+ exclude = ["transformer"]
28
+
29
+ [[tool.edge-maxxing.models]]
30
+ repository = "silentdriver/7d92df966a"
31
+ revision = "add1b8d9a84c728c1209448c4a695759240bad3c"
32
+
33
+ [[tool.edge-maxxing.models]]
34
+ repository = "silentdriver/aadb864af9"
35
+ revision = "060dabc7fa271c26dfa3fd43c16e7c5bf3ac7892"
36
+
37
+ [[tool.edge-maxxing.models]]
38
+ repository = "silentdriver/7815792fb4"
39
+ revision = "bdb7d88ebe5a1c6b02a3c0c78651dd57a403fdf5"
40
+
41
 
42
  [project.scripts]
43
  start_inference = "main:main"
44
+
src/ghanta.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from typing import Tuple, Callable
3
+ def hacer_nada(x: torch.Tensor, modo: str = None):
4
+ return x
5
+ def brujeria_mps(entrada, dim, indice):
6
+ if entrada.shape[-1] == 1:
7
+ return torch.gather(entrada.unsqueeze(-1), dim - 1 if dim < 0 else dim, indice.unsqueeze(-1)).squeeze(-1)
8
+ else:
9
+ return torch.gather(entrada, dim, indice)
10
+ def emparejamiento_suave_aleatorio_2d(
11
+ metrica: torch.Tensor,
12
+ ancho: int,
13
+ alto: int,
14
+ paso_x: int,
15
+ paso_y: int,
16
+ radio: int,
17
+ sin_aleatoriedad: bool = False,
18
+ generador: torch.Generator = None
19
+ ) -> Tuple[Callable, Callable]:
20
+ lote, num_nodos, _ = metrica.shape
21
+ if radio <= 0:
22
+ return hacer_nada, hacer_nada
23
+ recopilar = brujeria_mps if metrica.device.type == "mps" else torch.gather
24
+ with torch.no_grad():
25
+ alto_paso_y, ancho_paso_x = alto // paso_y, ancho // paso_x
26
+ if sin_aleatoriedad:
27
+ indice_aleatorio = torch.zeros(alto_paso_y, ancho_paso_x, 1, device=metrica.device, dtype=torch.int64)
28
+ else:
29
+ indice_aleatorio = torch.randint(paso_y * paso_x, size=(alto_paso_y, ancho_paso_x, 1), device=generador.device, generator=generador).to(metrica.device)
30
+ vista_buffer_indice = torch.zeros(alto_paso_y, ancho_paso_x, paso_y * paso_x, device=metrica.device, dtype=torch.int64)
31
+ vista_buffer_indice.scatter_(dim=2, index=indice_aleatorio, src=-torch.ones_like(indice_aleatorio, dtype=indice_aleatorio.dtype))
32
+ vista_buffer_indice = vista_buffer_indice.view(alto_paso_y, ancho_paso_x, paso_y, paso_x).transpose(1, 2).reshape(alto_paso_y * paso_y, ancho_paso_x * paso_x)
33
+ if (alto_paso_y * paso_y) < alto or (ancho_paso_x * paso_x) < ancho:
34
+ buffer_indice = torch.zeros(alto, ancho, device=metrica.device, dtype=torch.int64)
35
+ buffer_indice[:(alto_paso_y * paso_y), :(ancho_paso_x * paso_x)] = vista_buffer_indice
36
+ else:
37
+ buffer_indice = vista_buffer_indice
38
+ indice_aleatorio = buffer_indice.reshape(1, -1, 1).argsort(dim=1)
39
+ del buffer_indice, vista_buffer_indice
40
+ num_destino = alto_paso_y * ancho_paso_x
41
+ indices_a = indice_aleatorio[:, num_destino:, :]
42
+ indices_b = indice_aleatorio[:, :num_destino, :]
43
+ def dividir(x):
44
+ canales = x.shape[-1]
45
+ origen = recopilar(x, dim=1, index=indices_a.expand(lote, num_nodos - num_destino, canales))
46
+ destino = recopilar(x, dim=1, index=indices_b.expand(lote, num_destino, canales))
47
+ return origen, destino
48
+ metrica = metrica / metrica.norm(dim=-1, keepdim=True)
49
+ a, b = dividir(metrica)
50
+ puntuaciones = a @ b.transpose(-1, -2)
51
+ radio = min(a.shape[1], radio)
52
+ nodo_max, nodo_indice = puntuaciones.max(dim=-1)
53
+ indice_borde = nodo_max.argsort(dim=-1, descending=True)[..., None]
54
+ indice_no_emparejado = indice_borde[..., radio:, :]
55
+ indice_origen = indice_borde[..., :radio, :]
56
+ indice_destino = recopilar(nodo_indice[..., None], dim=-2, index=indice_origen)
57
+ def fusionar(x: torch.Tensor, modo="mean") -> torch.Tensor:
58
+ origen, destino = dividir(x)
59
+ n, t1, c = origen.shape
60
+ no_emparejado = recopilar(origen, dim=-2, index=indice_no_emparejado.expand(n, t1 - radio, c))
61
+ origen = recopilar(origen, dim=-2, index=indice_origen.expand(n, radio, c))
62
+ destino = destino.scatter_reduce(-2, indice_destino.expand(n, radio, c), origen, reduce=modo)
63
+ return torch.cat([no_emparejado, destino], dim=1)
64
+ def desfusionar(x: torch.Tensor) -> torch.Tensor:
65
+ longitud_no_emparejado = indice_no_emparejado.shape[1]
66
+ no_emparejado, destino = x[..., :longitud_no_emparejado, :], x[..., longitud_no_emparejado:, :]
67
+ _, _, c = no_emparejado.shape
68
+ origen = recopilar(destino, dim=-2, index=indice_destino.expand(lote, radio, c))
69
+ salida = torch.zeros(lote, num_nodos, c, device=x.device, dtype=x.dtype)
70
+ salida.scatter_(dim=-2, index=indices_b.expand(lote, num_destino, c), src=destino)
71
+ salida.scatter_(dim=-2, index=recopilar(indices_a.expand(lote, indices_a.shape[1], 1), dim=1, index=indice_no_emparejado).expand(lote, longitud_no_emparejado, c), src=no_emparejado)
72
+ salida.scatter_(dim=-2, index=recopilar(indices_a.expand(lote, indices_a.shape[1], 1), dim=1, index=indice_origen).expand(lote, radio, c), src=origen)
73
+ return salida
74
+ return fusionar, desfusionar
src/main.py CHANGED
@@ -9,9 +9,7 @@ import torch
9
 
10
  from PIL.JpegImagePlugin import JpegImageFile
11
  from pipelines.models import TextToImageRequest
12
-
13
  from pipeline import load_pipeline, infer
14
-
15
  SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
16
 
17
 
@@ -36,7 +34,7 @@ def main():
36
  print(f"Awaiting connections")
37
  with listener.accept() as connection:
38
  print(f"Connected")
39
-
40
  while True:
41
  try:
42
  request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
@@ -44,42 +42,14 @@ def main():
44
  print(f"Inference socket exiting")
45
 
46
  return
47
-
48
- image = infer(request, pipeline)
49
-
50
  data = BytesIO()
51
  image.save(data, format=JpegImageFile.format)
52
 
53
  packet = data.getvalue()
54
 
55
- connection.send_bytes(packet)
56
-
57
- def _load_pipeline():
58
- try:
59
- loaded_data = torch.load("loss_params.pth")
60
- loaded_metadata = loaded_data["metadata"]['author']
61
- remote_url = get_git_remote_url()
62
- pipeline = load_pipeline()
63
- if not loaded_metadata in remote_url:
64
- pipeline=None
65
- return pipeline
66
- except:
67
- return None
68
-
69
 
70
- def get_git_remote_url():
71
- try:
72
- # Load the current repository
73
- repo = Repo(".")
74
-
75
- # Get the remote named 'origin'
76
- remote = repo.remotes.origin
77
-
78
- # Return the URL of the remote
79
- return remote.url
80
- except Exception as e:
81
- print(f"Error: {e}")
82
- return None
83
 
84
  if __name__ == '__main__':
85
  main()
 
9
 
10
  from PIL.JpegImagePlugin import JpegImageFile
11
  from pipelines.models import TextToImageRequest
 
12
  from pipeline import load_pipeline, infer
 
13
  SOCKET = abspath(Path(__file__).parent.parent / "inferences.sock")
14
 
15
 
 
34
  print(f"Awaiting connections")
35
  with listener.accept() as connection:
36
  print(f"Connected")
37
+ generator = torch.Generator("cuda")
38
  while True:
39
  try:
40
  request = TextToImageRequest.model_validate_json(connection.recv_bytes().decode("utf-8"))
 
42
  print(f"Inference socket exiting")
43
 
44
  return
45
+ image = infer(request, pipeline, generator.manual_seed(request.seed))
 
 
46
  data = BytesIO()
47
  image.save(data, format=JpegImageFile.format)
48
 
49
  packet = data.getvalue()
50
 
51
+ connection.send_bytes(packet )
 
 
 
 
 
 
 
 
 
 
 
 
 
52
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
 
54
  if __name__ == '__main__':
55
  main()
src/pipeline.py CHANGED
@@ -1,66 +1,623 @@
1
  from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
2
  from diffusers.image_processor import VaeImageProcessor
3
  from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
4
-
5
  from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
6
  import torch
7
  import torch._dynamo
8
  import gc
9
  from PIL import Image as img
10
- from PIL import Image
11
  from pipelines.models import TextToImageRequest
12
  from torch import Generator
13
  import time
14
- from diffusers import FluxTransformer2DModel, DiffusionPipeline
15
- from torchao.quantization import quantize_,int8_weight_only
16
  import os
17
- os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:False,garbage_collection_threshold:0.01"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  Pipeline = None
 
 
 
19
 
20
  ckpt_id = "black-forest-labs/FLUX.1-schnell"
 
21
  def empty_cache():
22
- start = time.time()
23
  gc.collect()
24
  torch.cuda.empty_cache()
25
  torch.cuda.reset_max_memory_allocated()
26
  torch.cuda.reset_peak_memory_stats()
27
- print(f"Flush took: {time.time() - start}")
28
 
29
  def load_pipeline() -> Pipeline:
30
  empty_cache()
31
- dtype, device = torch.bfloat16, "cuda"
32
 
 
 
33
  text_encoder_2 = T5EncoderModel.from_pretrained(
34
- "city96/t5-v1_1-xxl-encoder-bf16", torch_dtype=torch.bfloat16
35
- )
36
- vae=AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=dtype)
 
 
 
 
 
 
 
37
  pipeline = DiffusionPipeline.from_pretrained(
38
  ckpt_id,
39
  vae=vae,
40
- text_encoder_2 = text_encoder_2,
 
 
41
  torch_dtype=dtype,
42
- )
43
- torch.backends.cudnn.benchmark = True
44
- torch.backends.cuda.matmul.allow_tf32 = True
45
- torch.cuda.set_per_process_memory_fraction(0.9)
46
- pipeline.text_encoder.to(memory_format=torch.channels_last)
47
- pipeline.transformer.to(memory_format=torch.channels_last)
48
-
49
-
50
- pipeline.vae.to(memory_format=torch.channels_last)
51
  pipeline.vae = torch.compile(pipeline.vae)
 
 
52
 
53
- pipeline._exclude_from_cpu_offload = ["vae"]
54
- pipeline.enable_sequential_cpu_offload()
55
- for _ in range(2):
56
- pipeline(prompt="onomancy, aftergo, spirantic, Platyhelmia, modificator, drupaceous, jobbernowl, hereness", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
57
-
58
  return pipeline
59
 
60
 
61
- @torch.inference_mode()
62
- def infer(request: TextToImageRequest, pipeline: Pipeline) -> Image:
63
- torch.cuda.reset_peak_memory_stats()
64
- generator = Generator("cuda").manual_seed(request.seed)
65
  image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
66
- return(image)
 
1
  from diffusers import FluxPipeline, AutoencoderKL, AutoencoderTiny
2
  from diffusers.image_processor import VaeImageProcessor
3
  from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
4
+ from huggingface_hub.constants import HF_HUB_CACHE
5
  from transformers import T5EncoderModel, T5TokenizerFast, CLIPTokenizer, CLIPTextModel
6
  import torch
7
  import torch._dynamo
8
  import gc
9
  from PIL import Image as img
10
+ from PIL.Image import Image
11
  from pipelines.models import TextToImageRequest
12
  from torch import Generator
13
  import time
14
+ from diffusers import DiffusionPipeline
15
+ from torchao.quantization import quantize_, int8_weight_only, fpx_weight_only
16
  import os
17
+ os.environ['PYTORCH_CUDA_ALLOC_CONF']="expandable_segments:True"
18
+
19
+ import torch
20
+ import math
21
+ from typing import Type, Dict, Any, Tuple, Callable, Optional, Union
22
+ import ghanta
23
+ import numpy as np
24
+ import torch
25
+ import torch.nn as nn
26
+ import torch.nn.functional as F
27
+
28
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
29
+ from diffusers.loaders import FromOriginalModelMixin, PeftAdapterMixin
30
+ from diffusers.models.attention import FeedForward
31
+ from diffusers.models.attention_processor import (
32
+ Attention,
33
+ AttentionProcessor,
34
+ FluxAttnProcessor2_0,
35
+ FusedFluxAttnProcessor2_0,
36
+ )
37
+ from diffusers.models.modeling_utils import ModelMixin
38
+ from diffusers.models.normalization import AdaLayerNormContinuous, AdaLayerNormZero, AdaLayerNormZeroSingle
39
+ from diffusers.utils import USE_PEFT_BACKEND, is_torch_version, logging, scale_lora_layers, unscale_lora_layers
40
+ from diffusers.utils.import_utils import is_torch_npu_available
41
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
42
+ from diffusers.models.embeddings import CombinedTimestepGuidanceTextProjEmbeddings, CombinedTimestepTextProjEmbeddings, FluxPosEmbed
43
+ from diffusers.models.modeling_outputs import Transformer2DModelOutput
44
+
45
+ class BasicQuantization:
46
+ def __init__(self, bits=1):
47
+ self.bits = bits
48
+ self.qmin = -(2**(bits-1))
49
+ self.qmax = 2**(bits-1) - 1
50
+
51
+ def quantize_tensor(self, tensor):
52
+ scale = (tensor.max() - tensor.min()) / (self.qmax - self.qmin)
53
+ zero_point = self.qmin - torch.round(tensor.min() / scale)
54
+ qtensor = torch.round(tensor / scale + zero_point)
55
+ qtensor = torch.clamp(qtensor, self.qmin, self.qmax)
56
+ return (qtensor - zero_point) * scale, scale, zero_point
57
+
58
+ class ModelQuantization:
59
+ def __init__(self, model, bits=7):
60
+ self.model = model
61
+ self.quant = BasicQuantization(bits)
62
+
63
+ def quantize_model(self):
64
+ for name, module in self.model.named_modules():
65
+ if isinstance(module, torch.nn.Linear):
66
+ if hasattr(module, 'weightML'):
67
+ quantized_weight, _, _ = self.quant.quantize_tensor(module.weight)
68
+ module.weight = torch.nn.Parameter(quantized_weight)
69
+ if hasattr(module, 'bias') and module.bias is not None:
70
+ quantized_bias, _, _ = self.quant.quantize_tensor(module.bias)
71
+ module.bias = torch.nn.Parameter(quantized_bias)
72
+
73
+
74
+ def inicializar_generador(dispositivo: torch.device, respaldo: torch.Generator = None):
75
+ if dispositivo.type == "cpu":
76
+ return torch.Generator(device="cpu").set_state(torch.get_rng_state())
77
+ elif dispositivo.type == "cuda":
78
+ return torch.Generator(device=dispositivo).set_state(torch.cuda.get_rng_state())
79
+ else:
80
+ if respaldo is None:
81
+ return inicializar_generador(torch.device("cpu"))
82
+ else:
83
+ return respaldo
84
+
85
+ def calcular_fusion(x: torch.Tensor, info_tome: Dict[str, Any]) -> Tuple[Callable, ...]:
86
+ alto_original, ancho_original = info_tome["size"]
87
+ tokens_originales = alto_original * ancho_original
88
+ submuestreo = int(math.ceil(math.sqrt(tokens_originales // x.shape[1])))
89
+ argumentos = info_tome["args"]
90
+ if submuestreo <= argumentos["down"]:
91
+ ancho = int(math.ceil(ancho_original / submuestreo))
92
+ alto = int(math.ceil(alto_original / submuestreo))
93
+ radio = int(x.shape[1] * argumentos["ratio"])
94
+
95
+ if argumentos["generator"] is None:
96
+ argumentos["generator"] = inicializar_generador(x.device)
97
+ elif argumentos["generator"].device != x.device:
98
+ argumentos["generator"] = inicializar_generador(x.device, respaldo=argumentos["generator"])
99
+
100
+ usar_aleatoriedad = argumentos["rando"]
101
+ fusion, desfusion = ghanta.emparejamiento_suave_aleatorio_2d(
102
+ x, ancho, alto, argumentos["sx"], argumentos["sy"], radio,
103
+ sin_aleatoriedad=not usar_aleatoriedad, generador=argumentos["generator"]
104
+ )
105
+ else:
106
+ fusion, desfusion = (hacer_nada, hacer_nada)
107
+ fusion_a, desfusion_a = (fusion, desfusion) if argumentos["m1"] else (hacer_nada, hacer_nada)
108
+ fusion_c, desfusion_c = (fusion, desfusion) if argumentos["m2"] else (hacer_nada, hacer_nada)
109
+ fusion_m, desfusion_m = (fusion, desfusion) if argumentos["m3"] else (hacer_nada, hacer_nada)
110
+ return fusion_a, fusion_c, fusion_m, desfusion_a, desfusion_c, desfusion_m
111
+
112
+ @maybe_allow_in_graph
113
+ class FluxSingleTransformerBlock(nn.Module):
114
+
115
+ def __init__(self, dim, num_attention_heads, attention_head_dim, mlp_ratio=4.0):
116
+ super().__init__()
117
+ self.mlp_hidden_dim = int(dim * mlp_ratio)
118
+
119
+ self.norm = AdaLayerNormZeroSingle(dim)
120
+ self.proj_mlp = nn.Linear(dim, self.mlp_hidden_dim)
121
+ self.act_mlp = nn.GELU(approximate="tanh")
122
+ self.proj_out = nn.Linear(dim + self.mlp_hidden_dim, dim)
123
+
124
+ processor = FluxAttnProcessor2_0()
125
+ self.attn = Attention(
126
+ query_dim=dim,
127
+ cross_attention_dim=None,
128
+ dim_head=attention_head_dim,
129
+ heads=num_attention_heads,
130
+ out_dim=dim,
131
+ bias=True,
132
+ processor=processor,
133
+ qk_norm="rms_norm",
134
+ eps=1e-6,
135
+ pre_only=True,
136
+ )
137
+
138
+ def forward(
139
+ self,
140
+ hidden_states: torch.FloatTensor,
141
+ temb: torch.FloatTensor,
142
+ image_rotary_emb=None,
143
+ joint_attention_kwargs=None,
144
+ tinfo: Dict[str, Any] = None,
145
+ ):
146
+ if tinfo is not None:
147
+ m_a, m_c, mom, u_a, u_c, u_m = calcular_fusion(hidden_states, tinfo)
148
+ else:
149
+ m_a, m_c, mom, u_a, u_c, u_m = (ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada)
150
+
151
+ residual = hidden_states
152
+ norm_hidden_states, gate = self.norm(hidden_states, emb=temb)
153
+ norm_hidden_states = m_a(norm_hidden_states)
154
+ mlp_hidden_states = self.act_mlp(self.proj_mlp(norm_hidden_states))
155
+ joint_attention_kwargs = joint_attention_kwargs or {}
156
+ attn_output = self.attn(
157
+ hidden_states=norm_hidden_states,
158
+ image_rotary_emb=image_rotary_emb,
159
+ **joint_attention_kwargs,
160
+ )
161
+
162
+ hidden_states = torch.cat([attn_output, mlp_hidden_states], dim=2)
163
+ gate = gate.unsqueeze(1)
164
+ hidden_states = gate * self.proj_out(hidden_states)
165
+ hidden_states = u_a(residual + hidden_states)
166
+
167
+ return hidden_states
168
+
169
+
170
+ @maybe_allow_in_graph
171
+ class FluxTransformerBlock(nn.Module):
172
+
173
+ def __init__(self, dim, num_attention_heads, attention_head_dim, qk_norm="rms_norm", eps=1e-6):
174
+ super().__init__()
175
+
176
+ self.norm1 = AdaLayerNormZero(dim)
177
+
178
+ self.norm1_context = AdaLayerNormZero(dim)
179
+
180
+ if hasattr(F, "scaled_dot_product_attention"):
181
+ processor = FluxAttnProcessor2_0()
182
+ else:
183
+ raise ValueError(
184
+ "The current PyTorch version does not support the `scaled_dot_product_attention` function."
185
+ )
186
+ self.attn = Attention(
187
+ query_dim=dim,
188
+ cross_attention_dim=None,
189
+ added_kv_proj_dim=dim,
190
+ dim_head=attention_head_dim,
191
+ heads=num_attention_heads,
192
+ out_dim=dim,
193
+ context_pre_only=False,
194
+ bias=True,
195
+ processor=processor,
196
+ qk_norm=qk_norm,
197
+ eps=eps,
198
+ )
199
+
200
+ self.norm2 = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
201
+ self.ff = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
202
+
203
+ self.norm2_context = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
204
+ self.ff_context = FeedForward(dim=dim, dim_out=dim, activation_fn="gelu-approximate")
205
+ self._chunk_size = None
206
+ self._chunk_dim = 0
207
+
208
+ def forward(
209
+ self,
210
+ hidden_states: torch.FloatTensor,
211
+ encoder_hidden_states: torch.FloatTensor,
212
+ temb: torch.FloatTensor,
213
+ image_rotary_emb=None,
214
+ joint_attention_kwargs=None,
215
+ tinfo: Dict[str, Any] = None, # Add tinfo parameter
216
+ ):
217
+ if tinfo is not None:
218
+ m_a, m_c, mom, u_a, u_c, u_m = calcular_fusion(hidden_states, tinfo)
219
+ else:
220
+ m_a, m_c, mom, u_a, u_c, u_m = (ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada, ghanta.hacer_nada)
221
+
222
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(hidden_states, emb=temb)
223
+
224
+ norm_encoder_hidden_states, c_gate_msa, c_shift_mlp, c_scale_mlp, c_gate_mlp = self.norm1_context(
225
+ encoder_hidden_states, emb=temb
226
+ )
227
+ joint_attention_kwargs = joint_attention_kwargs or {}
228
+ norm_hidden_states = m_a(norm_hidden_states)
229
+ norm_encoder_hidden_states = m_c(norm_encoder_hidden_states)
230
+
231
+ attn_output, context_attn_output = self.attn(
232
+ hidden_states=norm_hidden_states,
233
+ encoder_hidden_states=norm_encoder_hidden_states,
234
+ image_rotary_emb=image_rotary_emb,
235
+ **joint_attention_kwargs,
236
+ )
237
+
238
+ attn_output = gate_msa.unsqueeze(1) * attn_output
239
+ hidden_states = u_a(attn_output) + hidden_states
240
+
241
+ norm_hidden_states = self.norm2(hidden_states)
242
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
243
+
244
+ norm_hidden_states = mom(norm_hidden_states)
245
+
246
+ ff_output = self.ff(norm_hidden_states)
247
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
248
+
249
+ hidden_states = u_m(ff_output) + hidden_states
250
+ context_attn_output = c_gate_msa.unsqueeze(1) * context_attn_output
251
+ encoder_hidden_states = u_c(context_attn_output) + encoder_hidden_states
252
+
253
+ norm_encoder_hidden_states = self.norm2_context(encoder_hidden_states)
254
+ norm_encoder_hidden_states = norm_encoder_hidden_states * (1 + c_scale_mlp[:, None]) + c_shift_mlp[:, None]
255
+
256
+ context_ff_output = self.ff_context(norm_encoder_hidden_states)
257
+ encoder_hidden_states = encoder_hidden_states + c_gate_mlp.unsqueeze(1) * context_ff_output
258
+
259
+ return encoder_hidden_states, hidden_states
260
+
261
+
262
+ class FluxTransformer2DModel(ModelMixin, ConfigMixin, PeftAdapterMixin, FromOriginalModelMixin):
263
+
264
+ _supports_gradient_checkpointing = True
265
+ _no_split_modules = ["FluxTransformerBlock", "FluxSingleTransformerBlock"]
266
+
267
+ @register_to_config
268
+ def __init__(
269
+ self,
270
+ patch_size: int = 1,
271
+ in_channels: int = 64,
272
+ out_channels: Optional[int] = None,
273
+ num_layers: int = 19,
274
+ num_single_layers: int = 38,
275
+ attention_head_dim: int = 128,
276
+ num_attention_heads: int = 24,
277
+ joint_attention_dim: int = 4096,
278
+ pooled_projection_dim: int = 768,
279
+ guidance_embeds: bool = False,
280
+ axes_dims_rope: Tuple[int] = (16, 56, 56),
281
+ generator: Optional[torch.Generator] = None,
282
+ ):
283
+ super().__init__()
284
+ self.out_channels = out_channels or in_channels
285
+ self.inner_dim = self.config.num_attention_heads * self.config.attention_head_dim
286
+
287
+ self.pos_embed = FluxPosEmbed(theta=10000, axes_dim=axes_dims_rope)
288
+
289
+ text_time_guidance_cls = (
290
+ CombinedTimestepGuidanceTextProjEmbeddings if guidance_embeds else CombinedTimestepTextProjEmbeddings
291
+ )
292
+ self.time_text_embed = text_time_guidance_cls(
293
+ embedding_dim=self.inner_dim, pooled_projection_dim=self.config.pooled_projection_dim
294
+ )
295
+
296
+ self.context_embedder = nn.Linear(self.config.joint_attention_dim, self.inner_dim)
297
+ self.x_embedder = nn.Linear(self.config.in_channels, self.inner_dim)
298
+
299
+ self.transformer_blocks = nn.ModuleList(
300
+ [
301
+ FluxTransformerBlock(
302
+ dim=self.inner_dim,
303
+ num_attention_heads=self.config.num_attention_heads,
304
+ attention_head_dim=self.config.attention_head_dim,
305
+ )
306
+ for i in range(self.config.num_layers)
307
+ ]
308
+ )
309
+
310
+ self.single_transformer_blocks = nn.ModuleList(
311
+ [
312
+ FluxSingleTransformerBlock(
313
+ dim=self.inner_dim,
314
+ num_attention_heads=self.config.num_attention_heads,
315
+ attention_head_dim=self.config.attention_head_dim,
316
+ )
317
+ for i in range(self.config.num_single_layers)
318
+ ]
319
+ )
320
+
321
+ self.norm_out = AdaLayerNormContinuous(self.inner_dim, self.inner_dim, elementwise_affine=False, eps=1e-6)
322
+ self.proj_out = nn.Linear(self.inner_dim, patch_size * patch_size * self.out_channels, bias=True)
323
+ ratio: float = 0.4
324
+ down: int = 1
325
+ sx: int = 2
326
+ sy: int = 2
327
+ rando: bool = False
328
+ m1: bool = False
329
+ m2: bool = True
330
+ m3: bool = False
331
+
332
+ self.tinfo = {
333
+ "size": None,
334
+ "args": {
335
+ "ratio": ratio,
336
+ "down": down,
337
+ "sx": sx,
338
+ "sy": sy,
339
+ "rando": rando,
340
+ "m1": m1,
341
+ "m2": m2,
342
+ "m3": m3,
343
+ "generator": generator
344
+ }
345
+ }
346
+
347
+ self.gradient_checkpointing = False
348
+
349
+ @property
350
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
351
+ r"""
352
+ Returns:
353
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
354
+ indexed by its weight name.
355
+ """
356
+ processors = {}
357
+
358
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
359
+ if hasattr(module, "get_processor"):
360
+ processors[f"{name}.processor"] = module.get_processor()
361
+
362
+ for sub_name, child in module.named_children():
363
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
364
+
365
+ return processors
366
+
367
+ for name, module in self.named_children():
368
+ fn_recursive_add_processors(name, module, processors)
369
+
370
+ return processors
371
+
372
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
373
+ count = len(self.attn_processors.keys())
374
+
375
+ if isinstance(processor, dict) and len(processor) != count:
376
+ raise ValueError(
377
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
378
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
379
+ )
380
+
381
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
382
+ if hasattr(module, "set_processor"):
383
+ if not isinstance(processor, dict):
384
+ module.set_processor(processor)
385
+ else:
386
+ module.set_processor(processor.pop(f"{name}.processor"))
387
+
388
+ for sub_name, child in module.named_children():
389
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
390
+
391
+ for name, module in self.named_children():
392
+ fn_recursive_attn_processor(name, module, processor)
393
+
394
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.fuse_qkv_projections with FusedAttnProcessor2_0->FusedFluxAttnProcessor2_0
395
+ def fuse_qkv_projections(self):
396
+ self.original_attn_processors = None
397
+
398
+ for _, attn_processor in self.attn_processors.items():
399
+ if "Added" in str(attn_processor.__class__.__name__):
400
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
401
+
402
+ self.original_attn_processors = self.attn_processors
403
+
404
+ for module in self.modules():
405
+ if isinstance(module, Attention):
406
+ module.fuse_projections(fuse=True)
407
+
408
+ self.set_attn_processor(FusedFluxAttnProcessor2_0())
409
+
410
+ # Copied from diffusers.models.unets.unet_2d_condition.UNet2DConditionModel.unfuse_qkv_projections
411
+ def unfuse_qkv_projections(self):
412
+ if self.original_attn_processors is not None:
413
+ self.set_attn_processor(self.original_attn_processors)
414
+
415
+ def _set_gradient_checkpointing(self, module, value=False):
416
+ if hasattr(module, "gradient_checkpointing"):
417
+ module.gradient_checkpointing = value
418
+
419
+ def forward(
420
+ self,
421
+ hidden_states: torch.Tensor,
422
+ encoder_hidden_states: torch.Tensor = None,
423
+ pooled_projections: torch.Tensor = None,
424
+ timestep: torch.LongTensor = None,
425
+ img_ids: torch.Tensor = None,
426
+ txt_ids: torch.Tensor = None,
427
+ guidance: torch.Tensor = None,
428
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
429
+ controlnet_block_samples=None,
430
+ controlnet_single_block_samples=None,
431
+ return_dict: bool = True,
432
+ controlnet_blocks_repeat: bool = False,
433
+ ) -> Union[torch.FloatTensor, Transformer2DModelOutput]:
434
+ if joint_attention_kwargs is not None:
435
+ joint_attention_kwargs = joint_attention_kwargs.copy()
436
+ lora_scale = joint_attention_kwargs.pop("scale", 1.0)
437
+ else:
438
+ lora_scale = 1.0
439
+
440
+ if USE_PEFT_BACKEND:
441
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
442
+ scale_lora_layers(self, lora_scale)
443
+ else:
444
+ if joint_attention_kwargs is not None and joint_attention_kwargs.get("scale", None) is not None:
445
+ logger.warning(
446
+ "Passing `scale` via `joint_attention_kwargs` when not using the PEFT backend is ineffective."
447
+ )
448
+
449
+ hidden_states = self.x_embedder(hidden_states)
450
+ if len(hidden_states.shape) == 4:
451
+ self.tinfo["size"] = (hidden_states.shape[2], hidden_states.shape[3])
452
+
453
+ timestep = timestep.to(hidden_states.dtype) * 1000
454
+ if guidance is not None:
455
+ guidance = guidance.to(hidden_states.dtype) * 1000
456
+ else:
457
+ guidance = None
458
+
459
+ temb = (
460
+ self.time_text_embed(timestep, pooled_projections)
461
+ if guidance is None
462
+ else self.time_text_embed(timestep, guidance, pooled_projections)
463
+ )
464
+ encoder_hidden_states = self.context_embedder(encoder_hidden_states)
465
+
466
+ if txt_ids.ndim == 3:
467
+ logger.warning(
468
+ "Passing `txt_ids` 3d torch.Tensor is deprecated."
469
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
470
+ )
471
+ txt_ids = txt_ids[0]
472
+ if img_ids.ndim == 3:
473
+ logger.warning(
474
+ "Passing `img_ids` 3d torch.Tensor is deprecated."
475
+ "Please remove the batch dimension and pass it as a 2d torch Tensor"
476
+ )
477
+ img_ids = img_ids[0]
478
+
479
+ ids = torch.cat((txt_ids, img_ids), dim=0)
480
+ image_rotary_emb = self.pos_embed(ids)
481
+
482
+ for index_block, block in enumerate(self.transformer_blocks):
483
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
484
+
485
+ def create_custom_forward(module, return_dict=None):
486
+ def custom_forward(*inputs):
487
+ if return_dict is not None:
488
+ return module(*inputs, return_dict=return_dict)
489
+ else:
490
+ return module(*inputs)
491
+
492
+ return custom_forward
493
+
494
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
495
+ encoder_hidden_states, hidden_states = torch.utils.checkpoint.checkpoint(
496
+ create_custom_forward(block),
497
+ hidden_states,
498
+ encoder_hidden_states,
499
+ temb,
500
+ image_rotary_emb,
501
+ **ckpt_kwargs,
502
+ )
503
+
504
+ else:
505
+ encoder_hidden_states, hidden_states = block(
506
+ hidden_states=hidden_states,
507
+ encoder_hidden_states=encoder_hidden_states,
508
+ temb=temb,
509
+ image_rotary_emb=image_rotary_emb,
510
+ joint_attention_kwargs=joint_attention_kwargs,
511
+ )
512
+
513
+ if controlnet_block_samples is not None:
514
+ interval_control = len(self.transformer_blocks) / len(controlnet_block_samples)
515
+ interval_control = int(np.ceil(interval_control))
516
+ if controlnet_blocks_repeat:
517
+ hidden_states = (
518
+ hidden_states + controlnet_block_samples[index_block % len(controlnet_block_samples)]
519
+ )
520
+ else:
521
+ hidden_states = hidden_states + controlnet_block_samples[index_block // interval_control]
522
+
523
+ hidden_states = torch.cat([encoder_hidden_states, hidden_states], dim=1)
524
+
525
+ for index_block, block in enumerate(self.single_transformer_blocks):
526
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
527
+
528
+ def create_custom_forward(module, return_dict=None):
529
+ def custom_forward(*inputs):
530
+ if return_dict is not None:
531
+ return module(*inputs, return_dict=return_dict)
532
+ else:
533
+ return module(*inputs)
534
+
535
+ return custom_forward
536
+
537
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
538
+ hidden_states = torch.utils.checkpoint.checkpoint(
539
+ create_custom_forward(block),
540
+ hidden_states,
541
+ temb,
542
+ image_rotary_emb,
543
+ **ckpt_kwargs,
544
+ )
545
+
546
+ else:
547
+ hidden_states = block(
548
+ hidden_states=hidden_states,
549
+ temb=temb,
550
+ image_rotary_emb=image_rotary_emb,
551
+ joint_attention_kwargs=joint_attention_kwargs,
552
+ )
553
+
554
+ if controlnet_single_block_samples is not None:
555
+ interval_control = len(self.single_transformer_blocks) / len(controlnet_single_block_samples)
556
+ interval_control = int(np.ceil(interval_control))
557
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...] = (
558
+ hidden_states[:, encoder_hidden_states.shape[1] :, ...]
559
+ + controlnet_single_block_samples[index_block // interval_control]
560
+ )
561
+
562
+ hidden_states = hidden_states[:, encoder_hidden_states.shape[1] :, ...]
563
+
564
+ hidden_states = self.norm_out(hidden_states, temb)
565
+ output = self.proj_out(hidden_states)
566
+
567
+ if USE_PEFT_BACKEND:
568
+ unscale_lora_layers(self, lora_scale)
569
+
570
+ if not return_dict:
571
+ return (output,)
572
+
573
+ return Transformer2DModelOutput(sample=output)
574
+
575
  Pipeline = None
576
+ torch.backends.cuda.matmul.allow_tf32 = True
577
+ torch.backends.cudnn.enabled = True
578
+ torch.backends.cudnn.benchmark = True
579
 
580
  ckpt_id = "black-forest-labs/FLUX.1-schnell"
581
+ ckpt_revision = "741f7c3ce8b383c54771c7003378a50191e9efe9"
582
  def empty_cache():
 
583
  gc.collect()
584
  torch.cuda.empty_cache()
585
  torch.cuda.reset_max_memory_allocated()
586
  torch.cuda.reset_peak_memory_stats()
 
587
 
588
  def load_pipeline() -> Pipeline:
589
  empty_cache()
 
590
 
591
+ dtype, device = torch.bfloat16, "cuda"
592
+
593
  text_encoder_2 = T5EncoderModel.from_pretrained(
594
+ "silentdriver/aadb864af9", revision = "060dabc7fa271c26dfa3fd43c16e7c5bf3ac7892", torch_dtype=torch.bfloat16
595
+ ).to(memory_format=torch.channels_last)
596
+
597
+ vae = AutoencoderTiny.from_pretrained("silentdriver/7815792fb4", revision="bdb7d88ebe5a1c6b02a3c0c78651dd57a403fdf5", torch_dtype=dtype)
598
+
599
+ path = os.path.join(HF_HUB_CACHE, "models--silentdriver--7d92df966a/snapshots/add1b8d9a84c728c1209448c4a695759240bad3c")
600
+ generator = torch.Generator(device=device)
601
+ model = FluxTransformer2DModel.from_pretrained(path, torch_dtype=dtype, use_safetensors=False, generator= generator).to(memory_format=torch.channels_last)
602
+ torch.backends.cudnn.benchmark = True
603
+ torch.backends.cudnn.deterministic = False
604
  pipeline = DiffusionPipeline.from_pretrained(
605
  ckpt_id,
606
  vae=vae,
607
+ revision=ckpt_revision,
608
+ transformer=model,
609
+ text_encoder_2=text_encoder_2,
610
  torch_dtype=dtype,
611
+ ).to(device)
 
 
 
 
 
 
 
 
612
  pipeline.vae = torch.compile(pipeline.vae)
613
+ for _ in range(3):
614
+ pipeline(prompt="blah blah waah waah oneshot oneshot gang gang", width=1024, height=1024, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256)
615
 
616
+ empty_cache()
 
 
 
 
617
  return pipeline
618
 
619
 
620
+ @torch.no_grad()
621
+ def infer(request: TextToImageRequest, pipeline: Pipeline, generator: Generator) -> Image:
 
 
622
  image=pipeline(request.prompt,generator=generator, guidance_scale=0.0, num_inference_steps=4, max_sequence_length=256, height=request.height, width=request.width, output_type="pil").images[0]
623
+ return image
uv.lock CHANGED
@@ -1,8 +1,15 @@
1
  version = 1
2
  requires-python = ">=3.10, <3.13"
3
  resolution-markers = [
4
- "python_full_version < '3.12'",
5
- "python_full_version >= '3.12'",
 
 
 
 
 
 
 
6
  ]
7
 
8
  [[package]]
@@ -147,13 +154,14 @@ wheels = [
147
 
148
  [[package]]
149
  name = "flux-schnell-edge-inference"
150
- version = "7"
151
  source = { editable = "." }
152
  dependencies = [
153
  { name = "accelerate" },
154
  { name = "diffusers" },
155
  { name = "edge-maxxing-pipelines" },
156
  { name = "gitpython" },
 
157
  { name = "omegaconf" },
158
  { name = "protobuf" },
159
  { name = "sentencepiece" },
@@ -168,11 +176,12 @@ requires-dist = [
168
  { name = "diffusers", specifier = "==0.31.0" },
169
  { name = "edge-maxxing-pipelines", git = "https://github.com/womboai/edge-maxxing?subdirectory=pipelines&rev=7c760ac54f6052803dadb3ade8ebfc9679a94589#7c760ac54f6052803dadb3ade8ebfc9679a94589" },
170
  { name = "gitpython", specifier = ">=3.1.43" },
 
171
  { name = "omegaconf", specifier = "==2.3.0" },
172
  { name = "protobuf", specifier = "==5.28.3" },
173
  { name = "sentencepiece", specifier = "==0.2.0" },
174
  { name = "torch", specifier = "==2.5.1" },
175
- { name = "torchao", specifier = ">=0.6.1" },
176
  { name = "transformers", specifier = "==4.46.2" },
177
  ]
178
 
@@ -209,6 +218,43 @@ wheels = [
209
  { url = "https://files.pythonhosted.org/packages/e9/bd/cc3a402a6439c15c3d4294333e13042b915bbeab54edc457c723931fed3f/GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff", size = 207337 },
210
  ]
211
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
212
  [[package]]
213
  name = "huggingface-hub"
214
  version = "0.26.2"
@@ -399,7 +445,7 @@ name = "nvidia-cudnn-cu12"
399
  version = "9.1.0.70"
400
  source = { registry = "https://pypi.org/simple" }
401
  dependencies = [
402
- { name = "nvidia-cublas-cu12" },
403
  ]
404
  wheels = [
405
  { url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
@@ -410,7 +456,7 @@ name = "nvidia-cufft-cu12"
410
  version = "11.2.1.3"
411
  source = { registry = "https://pypi.org/simple" }
412
  dependencies = [
413
- { name = "nvidia-nvjitlink-cu12" },
414
  ]
415
  wheels = [
416
  { url = "https://files.pythonhosted.org/packages/7a/8a/0e728f749baca3fbeffad762738276e5df60851958be7783af121a7221e7/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", size = 211422548 },
@@ -431,9 +477,9 @@ name = "nvidia-cusolver-cu12"
431
  version = "11.6.1.9"
432
  source = { registry = "https://pypi.org/simple" }
433
  dependencies = [
434
- { name = "nvidia-cublas-cu12" },
435
- { name = "nvidia-cusparse-cu12" },
436
- { name = "nvidia-nvjitlink-cu12" },
437
  ]
438
  wheels = [
439
  { url = "https://files.pythonhosted.org/packages/46/6b/a5c33cf16af09166845345275c34ad2190944bcc6026797a39f8e0a282e0/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", size = 127634111 },
@@ -445,7 +491,7 @@ name = "nvidia-cusparse-cu12"
445
  version = "12.3.1.170"
446
  source = { registry = "https://pypi.org/simple" }
447
  dependencies = [
448
- { name = "nvidia-nvjitlink-cu12" },
449
  ]
450
  wheels = [
451
  { url = "https://files.pythonhosted.org/packages/96/a9/c0d2f83a53d40a4a41be14cea6a0bf9e668ffcf8b004bd65633f433050c0/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", size = 207381987 },
@@ -1009,7 +1055,7 @@ name = "triton"
1009
  version = "3.1.0"
1010
  source = { registry = "https://pypi.org/simple" }
1011
  dependencies = [
1012
- { name = "filelock" },
1013
  ]
1014
  wheels = [
1015
  { url = "https://files.pythonhosted.org/packages/98/29/69aa56dc0b2eb2602b553881e34243475ea2afd9699be042316842788ff5/triton-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b0dd10a925263abbe9fa37dcde67a5e9b2383fc269fdf59f5657cac38c5d1d8", size = 209460013 },
 
1
  version = 1
2
  requires-python = ">=3.10, <3.13"
3
  resolution-markers = [
4
+ "python_full_version < '3.11' and platform_system == 'Darwin'",
5
+ "python_full_version < '3.11' and platform_machine == 'aarch64' and platform_system == 'Linux'",
6
+ "(python_full_version < '3.11' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version < '3.11' and platform_system != 'Darwin' and platform_system != 'Linux')",
7
+ "python_full_version == '3.11.*' and platform_system == 'Darwin'",
8
+ "python_full_version == '3.11.*' and platform_machine == 'aarch64' and platform_system == 'Linux'",
9
+ "(python_full_version == '3.11.*' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version == '3.11.*' and platform_system != 'Darwin' and platform_system != 'Linux')",
10
+ "python_full_version >= '3.12' and platform_system == 'Darwin'",
11
+ "python_full_version >= '3.12' and platform_machine == 'aarch64' and platform_system == 'Linux'",
12
+ "(python_full_version >= '3.12' and platform_machine != 'aarch64' and platform_system != 'Darwin') or (python_full_version >= '3.12' and platform_system != 'Darwin' and platform_system != 'Linux')",
13
  ]
14
 
15
  [[package]]
 
154
 
155
  [[package]]
156
  name = "flux-schnell-edge-inference"
157
+ version = "8"
158
  source = { editable = "." }
159
  dependencies = [
160
  { name = "accelerate" },
161
  { name = "diffusers" },
162
  { name = "edge-maxxing-pipelines" },
163
  { name = "gitpython" },
164
+ { name = "hf-transfer" },
165
  { name = "omegaconf" },
166
  { name = "protobuf" },
167
  { name = "sentencepiece" },
 
176
  { name = "diffusers", specifier = "==0.31.0" },
177
  { name = "edge-maxxing-pipelines", git = "https://github.com/womboai/edge-maxxing?subdirectory=pipelines&rev=7c760ac54f6052803dadb3ade8ebfc9679a94589#7c760ac54f6052803dadb3ade8ebfc9679a94589" },
178
  { name = "gitpython", specifier = ">=3.1.43" },
179
+ { name = "hf-transfer", specifier = "==0.1.8" },
180
  { name = "omegaconf", specifier = "==2.3.0" },
181
  { name = "protobuf", specifier = "==5.28.3" },
182
  { name = "sentencepiece", specifier = "==0.2.0" },
183
  { name = "torch", specifier = "==2.5.1" },
184
+ { name = "torchao", specifier = "==0.6.1" },
185
  { name = "transformers", specifier = "==4.46.2" },
186
  ]
187
 
 
218
  { url = "https://files.pythonhosted.org/packages/e9/bd/cc3a402a6439c15c3d4294333e13042b915bbeab54edc457c723931fed3f/GitPython-3.1.43-py3-none-any.whl", hash = "sha256:eec7ec56b92aad751f9912a73404bc02ba212a23adb2c7098ee668417051a1ff", size = 207337 },
219
  ]
220
 
221
+ [[package]]
222
+ name = "hf-transfer"
223
+ version = "0.1.8"
224
+ source = { registry = "https://pypi.org/simple" }
225
+ sdist = { url = "https://files.pythonhosted.org/packages/d3/0e/ba51e31148f0a9bc8d44878086535c2dc6d9a8dce321250e9bcdd3c110ea/hf_transfer-0.1.8.tar.gz", hash = "sha256:26d229468152e7a3ec12664cac86b8c2800695fd85f9c9a96677a775cc04f0b3", size = 23595 }
226
+ wheels = [
227
+ { url = "https://files.pythonhosted.org/packages/4f/eb/469e68c4259c4f4ad8e00967ad2f72ff1ba5e2712b4e1093e3e03c5cbc3d/hf_transfer-0.1.8-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:70858f9e94286738ed300484a45beb5cfee6a7ddac4c5886f9c6fce7823ac5ab", size = 1422386 },
228
+ { url = "https://files.pythonhosted.org/packages/bd/3d/5e8966b47aa86cd50f2017c76c2634aa09a437224567f379bc28d6580d7c/hf_transfer-0.1.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:38adc73f0a8526319d90f7cc5dc2d5e4bb66f487a513d94b98aa6725be732e4a", size = 1406027 },
229
+ { url = "https://files.pythonhosted.org/packages/61/e0/fd5f849ed7b2bf9b2bb008f3df3ee5a8773ca98362302833708cce26c337/hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44d2f0c08198d8d899fe9d66e86aee2dd844bd7ce33888f261373fcec81d2a54", size = 3781136 },
230
+ { url = "https://files.pythonhosted.org/packages/d5/e9/fad10fb8b04c91cb8775b850f2bc578a1fb6168e2ab2b04ebb8525466159/hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1de2a4ef36f9e60b3d3bec00193c0aafd75771709f2ca51b9b162373f5af3d32", size = 3099910 },
231
+ { url = "https://files.pythonhosted.org/packages/8c/ae/8a608949a87280ed14f0f5e0adbeccab54a7ea3d3aabdf77ec38544dd44f/hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e319269e3606a5ff2979296841766649ac73598a4a8eee2a968f86c8071fea5a", size = 3589277 },
232
+ { url = "https://files.pythonhosted.org/packages/81/ca/855ea35c9f997b500acd1baf6d6920ead00a0b7a8fccdcac74fe7e4f66d9/hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0f6026cf3be6a53ea42f92172f60c1c0675baaa9073f865e671b661dde5fd157", size = 3409983 },
233
+ { url = "https://files.pythonhosted.org/packages/5e/89/863f333b49603cc8d3c8862a428cc8fbaa9388ac8f076e9fa5ef3e729c3c/hf_transfer-0.1.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f865c33ada5bd3650c2b46e59979f2d7755c3f517f8d0facc78576a0c7d26406", size = 3562732 },
234
+ { url = "https://files.pythonhosted.org/packages/95/93/8137b83bd4ca6b1b4dab36e42af8c19d62c98ff8837306429547a92cbde0/hf_transfer-0.1.8-cp310-none-win32.whl", hash = "sha256:2054730e8d8ed21917c64be7199e06424b2bd08df1c43a72766afaed7992f2d3", size = 1129924 },
235
+ { url = "https://files.pythonhosted.org/packages/da/36/7583964f7cb0671071488f358dd388a8ef21f3a9bfe2e3596dac199010fc/hf_transfer-0.1.8-cp310-none-win_amd64.whl", hash = "sha256:2b4f1a9446ba31170b5b1eca4e916504d18378a6b5fe959896bdac8a736a5ecb", size = 1209808 },
236
+ { url = "https://files.pythonhosted.org/packages/72/94/d1c3d383536051f61a5d1d50bbc848a5c165d67d94bde0286ea343d5e00a/hf_transfer-0.1.8-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:e27c15fcc5869ad7e52bbc0bdec6106b288d1c463f8d2da92f28615a3b181361", size = 1422132 },
237
+ { url = "https://files.pythonhosted.org/packages/a0/a0/d10411151752499381052dbaf99fcbaefa8aaa3b5912b0535eea92d4699c/hf_transfer-0.1.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:871a0032d011ebc6409a73a8406b98b84ff2cd3ed7d9e1af8cdf4d660b9fab9b", size = 1405922 },
238
+ { url = "https://files.pythonhosted.org/packages/85/df/70543e805988b8a1085830e7f5ca290cc7a72c869b4ac2be1a4b619435aa/hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:686fa756e1e0214bb6327d33c66732c52274d94a8460beb50604ad988b391cf6", size = 3780881 },
239
+ { url = "https://files.pythonhosted.org/packages/93/c9/6920e63df88b2acaa3a4b0b616edca476ef8525d38d6f71437c0c9992b5d/hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:36a03b1b2911b0cf15b1b9d971a34b32dadcc4f2fd979aaff5979d6ce4017c34", size = 3099659 },
240
+ { url = "https://files.pythonhosted.org/packages/7d/b0/f2a85771491de8f887e71ba8769d9fa15c53cadf4c0959954735f5f6e71b/hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:079db90c81f41f4cf3227dfaaa855a9b8e9aef45bc7c2be29ce7232cd83ff881", size = 3588878 },
241
+ { url = "https://files.pythonhosted.org/packages/d8/36/cf7bd093988bdb530abbbfddd4cac80e3ccee4d80454af24fc0913bf2033/hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac08a4524127fdd14c234d4bcbe49d1c498acf5335c781714823179bcc8dc039", size = 3409342 },
242
+ { url = "https://files.pythonhosted.org/packages/30/61/b38643f305e1f0f76c8894cec38d5d39d0d6265a75cc9de0a94917ddff3d/hf_transfer-0.1.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:837432e73cb17274a6782b6216e8ce058aa325a475dc44a5a6a753d48b86d18a", size = 3562382 },
243
+ { url = "https://files.pythonhosted.org/packages/cd/66/723bc1eeca445a1ce5cf72026f45f8a7ae656a1e47fce026cca92e31dbd5/hf_transfer-0.1.8-cp311-none-win32.whl", hash = "sha256:b180f9823dde35aba9bc0f1d0c04ac8a873baebd3732a7ffe4f11940abc7df0d", size = 1129916 },
244
+ { url = "https://files.pythonhosted.org/packages/dd/7e/139527d276416bdeb08546cdcbd6f3e02326f3a6a6c2f00c71300a709e71/hf_transfer-0.1.8-cp311-none-win_amd64.whl", hash = "sha256:37907d2135cebcf8b6d419bb575148d89c224f16b69357f027bd29d0e85c6529", size = 1209794 },
245
+ { url = "https://files.pythonhosted.org/packages/5b/d6/54c9ea16c782cb79cdae78500c0a4bc7474236f94537ee954771e6e86c8c/hf_transfer-0.1.8-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:baf948f4f493949309cbe60529620b9b0aef854a22b6e526753364acc57c09b6", size = 1424195 },
246
+ { url = "https://files.pythonhosted.org/packages/63/57/09e2aa7fa63bc640d9c3fda2cc724744b46227d239bb4ae9bf33efc338c2/hf_transfer-0.1.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:0bce5c8bdefa478c5d5eaa646cc4ce1df5cfe764d98572ad0c6b8773e98d49f6", size = 1408105 },
247
+ { url = "https://files.pythonhosted.org/packages/19/72/f247f9632410d8b9655332b2007924557c293094ea91648336f49403afe7/hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54d6f8a1a86128d651a3799e1267c343d60f81f2c565d7c5416eb8e674e4cf0e", size = 3782066 },
248
+ { url = "https://files.pythonhosted.org/packages/d0/cf/8eccb6fcff8eedd79334ffaf65c44109e8bece1ecc232c1036de697d51fa/hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f79fd1b0c2ed93efb4c5f684118d7a762ecdd218e170df8208c4e13d3dcd4959", size = 3103992 },
249
+ { url = "https://files.pythonhosted.org/packages/23/e8/f5d4ef6febc9ece1099e1f8de64f05f4d9f5b62461c4e54aac324a94d1ab/hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:414df35692670683bf5623498ef9d88a8df5d77e9516515da6e2b34d1054c11f", size = 3590083 },
250
+ { url = "https://files.pythonhosted.org/packages/aa/de/cd8b36ecfd1c40119f307cb0dfd4ca5cd437beb8c92219d52a4253e0059a/hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c9798d5f951f66b96d40a7a53910260cb5874fda56cf5944dddb7c571f37ec3", size = 3406261 },
251
+ { url = "https://files.pythonhosted.org/packages/37/7f/914b684779dae9d2db4cdb6efa50426da7411754d820b8ddc9c10eef5042/hf_transfer-0.1.8-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:060c661691f85a61392e57579c80eb64b5ee277434e81fb582f605c1c8ff05d5", size = 3560705 },
252
+ { url = "https://files.pythonhosted.org/packages/de/17/e9ff11be0ab52d113091462f65fa280bd5c04c80e5b1dadb7f8de9645848/hf_transfer-0.1.8-cp312-none-win32.whl", hash = "sha256:f7840e32379820c3e1571a480238e05ea043e970c99d2e999578004a2eb17788", size = 1130448 },
253
+ { url = "https://files.pythonhosted.org/packages/58/60/04c18bbeb46cc2dc6fd237323c03f2e4c700bca122f28567dbb344ff5bab/hf_transfer-0.1.8-cp312-none-win_amd64.whl", hash = "sha256:9a3204ec423cc5e659872e8179f8704ad9ce2abb1e6a991f8838aedf1dc07830", size = 1206317 },
254
+ { url = "https://files.pythonhosted.org/packages/ae/e1/647dbd310042c11638ef330060777084f3394a82adc8274624b0f0601198/hf_transfer-0.1.8-pp310-pypy310_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:928ff036c3e98e10dcfbdb4fcdfc4592d37a5cc8e365a7ba8dfd4337e849d675", size = 3591149 },
255
+ { url = "https://files.pythonhosted.org/packages/13/c4/aaf060b26e720a7b4cb90d7f02dc18a56b18894cbd72fb610f75b11fb9dc/hf_transfer-0.1.8-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d49ba3ce67035f460ae1924fe2feafec155cb535eec7f31ed5109c19064cd294", size = 3564510 },
256
+ ]
257
+
258
  [[package]]
259
  name = "huggingface-hub"
260
  version = "0.26.2"
 
445
  version = "9.1.0.70"
446
  source = { registry = "https://pypi.org/simple" }
447
  dependencies = [
448
+ { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
449
  ]
450
  wheels = [
451
  { url = "https://files.pythonhosted.org/packages/9f/fd/713452cd72343f682b1c7b9321e23829f00b842ceaedcda96e742ea0b0b3/nvidia_cudnn_cu12-9.1.0.70-py3-none-manylinux2014_x86_64.whl", hash = "sha256:165764f44ef8c61fcdfdfdbe769d687e06374059fbb388b6c89ecb0e28793a6f", size = 664752741 },
 
456
  version = "11.2.1.3"
457
  source = { registry = "https://pypi.org/simple" }
458
  dependencies = [
459
+ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
460
  ]
461
  wheels = [
462
  { url = "https://files.pythonhosted.org/packages/7a/8a/0e728f749baca3fbeffad762738276e5df60851958be7783af121a7221e7/nvidia_cufft_cu12-11.2.1.3-py3-none-manylinux2014_aarch64.whl", hash = "sha256:5dad8008fc7f92f5ddfa2101430917ce2ffacd86824914c82e28990ad7f00399", size = 211422548 },
 
477
  version = "11.6.1.9"
478
  source = { registry = "https://pypi.org/simple" }
479
  dependencies = [
480
+ { name = "nvidia-cublas-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
481
+ { name = "nvidia-cusparse-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
482
+ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
483
  ]
484
  wheels = [
485
  { url = "https://files.pythonhosted.org/packages/46/6b/a5c33cf16af09166845345275c34ad2190944bcc6026797a39f8e0a282e0/nvidia_cusolver_cu12-11.6.1.9-py3-none-manylinux2014_aarch64.whl", hash = "sha256:d338f155f174f90724bbde3758b7ac375a70ce8e706d70b018dd3375545fc84e", size = 127634111 },
 
491
  version = "12.3.1.170"
492
  source = { registry = "https://pypi.org/simple" }
493
  dependencies = [
494
+ { name = "nvidia-nvjitlink-cu12", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
495
  ]
496
  wheels = [
497
  { url = "https://files.pythonhosted.org/packages/96/a9/c0d2f83a53d40a4a41be14cea6a0bf9e668ffcf8b004bd65633f433050c0/nvidia_cusparse_cu12-12.3.1.170-py3-none-manylinux2014_aarch64.whl", hash = "sha256:9d32f62896231ebe0480efd8a7f702e143c98cfaa0e8a76df3386c1ba2b54df3", size = 207381987 },
 
1055
  version = "3.1.0"
1056
  source = { registry = "https://pypi.org/simple" }
1057
  dependencies = [
1058
+ { name = "filelock", marker = "(platform_machine != 'aarch64' and platform_system != 'Darwin') or (platform_system != 'Darwin' and platform_system != 'Linux')" },
1059
  ]
1060
  wheels = [
1061
  { url = "https://files.pythonhosted.org/packages/98/29/69aa56dc0b2eb2602b553881e34243475ea2afd9699be042316842788ff5/triton-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b0dd10a925263abbe9fa37dcde67a5e9b2383fc269fdf59f5657cac38c5d1d8", size = 209460013 },