John6666 commited on
Commit
6c79e9c
1 Parent(s): f36e76c

Upload 8 files

Browse files
Files changed (6) hide show
  1. app.py +2 -4
  2. constants.py +70 -2
  3. dc.py +11 -13
  4. llmdolphin.py +2 -0
  5. llmenv.py +78 -0
  6. requirements.txt +1 -1
app.py CHANGED
@@ -11,7 +11,7 @@ from dc import (infer, _infer, pass_result, get_diffusers_model_list, get_sample
11
  SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS, preprocessor_tab, SDXL_TASK, TASK_MODEL_LIST,
12
  PROMPT_W_OPTIONS, POST_PROCESSING_SAMPLER, IP_ADAPTERS_SD, IP_ADAPTERS_SDXL, DIFFUSERS_CONTROLNET_MODEL,
13
  TASK_AND_PREPROCESSORS, update_task_options, change_preprocessor_choices, get_ti_choices,
14
- update_textual_inversion, set_textual_inversion_prompt, create_mask_now)
15
  # Translator
16
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
17
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
@@ -221,7 +221,7 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
221
  image_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
222
  info="The maximum proportional size of the generated image based on the uploaded image.")
223
  with gr.Row():
224
- controlnet_model = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0])
225
  control_net_output_scaling = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
226
  control_net_start_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
227
  control_net_stop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
@@ -237,8 +237,6 @@ with gr.Blocks(fill_width=True, elem_id="container", css=css, delete_cache=(60,
237
  tile_blur_sigma = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
238
 
239
  with gr.Tab("IP-Adapter"):
240
- IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
241
- MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
242
  with gr.Accordion("IP-Adapter 1", open=True, visible=True):
243
  with gr.Row():
244
  with gr.Column():
 
11
  SCHEDULE_TYPE_OPTIONS, SCHEDULE_PREDICTION_TYPE_OPTIONS, preprocessor_tab, SDXL_TASK, TASK_MODEL_LIST,
12
  PROMPT_W_OPTIONS, POST_PROCESSING_SAMPLER, IP_ADAPTERS_SD, IP_ADAPTERS_SDXL, DIFFUSERS_CONTROLNET_MODEL,
13
  TASK_AND_PREPROCESSORS, update_task_options, change_preprocessor_choices, get_ti_choices,
14
+ update_textual_inversion, set_textual_inversion_prompt, create_mask_now, IP_MODELS, MODE_IP_OPTIONS)
15
  # Translator
16
  from llmdolphin import (dolphin_respond_auto, dolphin_parse_simple,
17
  get_llm_formats, get_dolphin_model_format, get_dolphin_models,
 
221
  image_resolution = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution",
222
  info="The maximum proportional size of the generated image based on the uploaded image.")
223
  with gr.Row():
224
+ controlnet_model = gr.Dropdown(label="ControlNet model", choices=DIFFUSERS_CONTROLNET_MODEL, value=DIFFUSERS_CONTROLNET_MODEL[0], allow_custom_value=True)
225
  control_net_output_scaling = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
226
  control_net_start_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
227
  control_net_stop_threshold = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
 
237
  tile_blur_sigma = gr.Number(minimum=0, maximum=100, value=9, step=1, label="'TILE' blur sigma")
238
 
239
  with gr.Tab("IP-Adapter"):
 
 
240
  with gr.Accordion("IP-Adapter 1", open=True, visible=True):
241
  with gr.Row():
242
  with gr.Column():
constants.py CHANGED
@@ -5,6 +5,8 @@ from stablepy import (
5
  SD15_TASKS,
6
  SDXL_TASKS,
7
  ALL_BUILTIN_UPSCALERS,
 
 
8
  )
9
 
10
  # - **Download Models**
@@ -19,6 +21,7 @@ DOWNLOAD_LORA = "https://huggingface.co/Leopain/color/resolve/main/Coloring_book
19
  LOAD_DIFFUSERS_FORMAT_MODEL = [
20
  'stabilityai/stable-diffusion-xl-base-1.0',
21
  'Laxhar/noobai-XL-1.1',
 
22
  'black-forest-labs/FLUX.1-dev',
23
  'John6666/blue-pencil-flux1-v021-fp8-flux',
24
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
@@ -31,7 +34,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
31
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
32
  'terminusresearch/FluxBooru-v0.3',
33
  'black-forest-labs/FLUX.1-schnell',
34
- 'ostris/OpenFLUX.1',
35
  'shuttleai/shuttle-3-diffusion',
36
  'Laxhar/noobai-XL-1.0',
37
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
@@ -125,6 +128,7 @@ LOAD_DIFFUSERS_FORMAT_MODEL = [
125
  'John6666/duchaiten-pony-real-v20-sdxl',
126
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
127
  'Spestly/OdysseyXL-3.0',
 
128
  'KBlueLeaf/Kohaku-XL-Zeta',
129
  'cagliostrolab/animagine-xl-3.1',
130
  'yodayo-ai/kivotos-xl-2.0',
@@ -336,6 +340,20 @@ POST_PROCESSING_SAMPLER = ["Use same sampler"] + [
336
  name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
337
  ]
338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
  SUBTITLE_GUI = (
340
  "### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
341
  " to perform different tasks in image generation."
@@ -356,7 +374,9 @@ EXAMPLES_GUI_HELP = (
356
  3. ControlNet Canny SDXL
357
  4. Optical pattern (Optical illusion) SDXL
358
  5. Convert an image to a coloring drawing
359
- 6. ControlNet OpenPose SD 1.5 and Latent upscale
 
 
360
 
361
  - Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
362
  """
@@ -483,6 +503,54 @@ EXAMPLES_GUI = [
483
  35,
484
  False,
485
  ],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
486
  [
487
  "1girl,face,curly hair,red hair,white background,",
488
  "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
 
5
  SD15_TASKS,
6
  SDXL_TASKS,
7
  ALL_BUILTIN_UPSCALERS,
8
+ IP_ADAPTERS_SD,
9
+ IP_ADAPTERS_SDXL,
10
  )
11
 
12
  # - **Download Models**
 
21
  LOAD_DIFFUSERS_FORMAT_MODEL = [
22
  'stabilityai/stable-diffusion-xl-base-1.0',
23
  'Laxhar/noobai-XL-1.1',
24
+ 'Laxhar/noobai-XL-Vpred-1.0',
25
  'black-forest-labs/FLUX.1-dev',
26
  'John6666/blue-pencil-flux1-v021-fp8-flux',
27
  'John6666/wai-ani-flux-v10forfp8-fp8-flux',
 
34
  'mikeyandfriends/PixelWave_FLUX.1-dev_03',
35
  'terminusresearch/FluxBooru-v0.3',
36
  'black-forest-labs/FLUX.1-schnell',
37
+ # 'ostris/OpenFLUX.1',
38
  'shuttleai/shuttle-3-diffusion',
39
  'Laxhar/noobai-XL-1.0',
40
  'John6666/noobai-xl-nai-xl-epsilonpred10version-sdxl',
 
128
  'John6666/duchaiten-pony-real-v20-sdxl',
129
  'John6666/duchaiten-pony-xl-no-score-v70-sdxl',
130
  'Spestly/OdysseyXL-3.0',
131
+ 'Spestly/OdysseyXL-4.0',
132
  'KBlueLeaf/Kohaku-XL-Zeta',
133
  'cagliostrolab/animagine-xl-3.1',
134
  'yodayo-ai/kivotos-xl-2.0',
 
340
  name_s for name_s in scheduler_names if "Auto-Loader" not in name_s
341
  ]
342
 
343
+ IP_MODELS = []
344
+ ALL_IPA = sorted(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL))
345
+
346
+ for origin_name in ALL_IPA:
347
+ suffixes = []
348
+ if origin_name in IP_ADAPTERS_SD:
349
+ suffixes.append("sd1.5")
350
+ if origin_name in IP_ADAPTERS_SDXL:
351
+ suffixes.append("sdxl")
352
+ ref_name = f"{origin_name} ({'/'.join(suffixes)})"
353
+ IP_MODELS.append((ref_name, origin_name))
354
+
355
+ MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
356
+
357
  SUBTITLE_GUI = (
358
  "### This demo uses [diffusers](https://github.com/huggingface/diffusers)"
359
  " to perform different tasks in image generation."
 
374
  3. ControlNet Canny SDXL
375
  4. Optical pattern (Optical illusion) SDXL
376
  5. Convert an image to a coloring drawing
377
+ 6. V prediction model inference
378
+ 7. V prediction model sd_embed variant inference
379
+ 8. ControlNet OpenPose SD 1.5 and Latent upscale
380
 
381
  - Different tasks can be performed, such as img2img or using the IP adapter, to preserve a person's appearance or a specific style based on an image.
382
  """
 
503
  35,
504
  False,
505
  ],
506
+ [
507
+ "[mochizuki_shiina], [syuri22], newest, reimu, solo, outdoors, water, flower, lantern",
508
+ "worst quality, normal quality, old, sketch,",
509
+ 28,
510
+ 7.0,
511
+ -1,
512
+ "None",
513
+ 0.33,
514
+ "DPM 3M Ef",
515
+ 1600,
516
+ 1024,
517
+ "Laxhar/noobai-XL-Vpred-1.0",
518
+ "txt2img",
519
+ "color_image.png", # img conttol
520
+ 1024, # img resolution
521
+ 0.35, # strength
522
+ 1.0, # cn scale
523
+ 0.0, # cn start
524
+ 1.0, # cn end
525
+ "Classic",
526
+ None,
527
+ 30,
528
+ False,
529
+ ],
530
+ [
531
+ "[mochizuki_shiina], [syuri22], newest, multiple girls, 2girls, earrings, jewelry, gloves, purple eyes, black hair, looking at viewer, nail polish, hat, smile, open mouth, fingerless gloves, sleeveless, :d, upper body, blue eyes, closed mouth, black gloves, hands up, long hair, shirt, bare shoulders, white headwear, blush, black headwear, blue nails, upper teeth only, short hair, white gloves, white shirt, teeth, rabbit hat, star earrings, purple nails, pink hair, detached sleeves, fingernails, fake animal ears, animal hat, sleeves past wrists, black shirt, medium hair, fur trim, sleeveless shirt, turtleneck, long sleeves, rabbit ears, star \\(symbol\\)",
532
+ "worst quality, normal quality, old, sketch,",
533
+ 28,
534
+ 7.0,
535
+ -1,
536
+ "None",
537
+ 0.33,
538
+ "DPM 3M Ef",
539
+ 1600,
540
+ 1024,
541
+ "Laxhar/noobai-XL-Vpred-1.0",
542
+ "txt2img",
543
+ "color_image.png", # img conttol
544
+ 1024, # img resolution
545
+ 0.35, # strength
546
+ 1.0, # cn scale
547
+ 0.0, # cn start
548
+ 1.0, # cn end
549
+ "Classic-sd_embed",
550
+ None,
551
+ 30,
552
+ False,
553
+ ],
554
  [
555
  "1girl,face,curly hair,red hair,white background,",
556
  "(worst quality:2),(low quality:2),(normal quality:2),lowres,watermark,",
dc.py CHANGED
@@ -7,6 +7,7 @@ from stablepy import (
7
  check_scheduler_compatibility,
8
  TASK_AND_PREPROCESSORS,
9
  FACE_RESTORATION_MODELS,
 
10
  )
11
  from constants import (
12
  DIRECTORY_UPSCALERS,
@@ -20,16 +21,12 @@ from constants import (
20
  MODEL_TYPE_TASK,
21
  POST_PROCESSING_SAMPLER,
22
  DIFFUSERS_CONTROLNET_MODEL,
23
-
 
24
  )
25
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
26
  import torch
27
  import re
28
- from stablepy import (
29
- scheduler_names,
30
- IP_ADAPTERS_SD,
31
- IP_ADAPTERS_SDXL,
32
- )
33
  import time
34
  from PIL import ImageFile
35
  from utils import (
@@ -54,6 +51,9 @@ import warnings
54
  from stablepy import logger
55
  from diffusers import FluxPipeline
56
  # import urllib.parse
 
 
 
57
 
58
  ImageFile.LOAD_TRUNCATED_IMAGES = True
59
  torch.backends.cuda.matmul.allow_tf32 = True
@@ -143,6 +143,7 @@ flux_pipe = FluxPipeline.from_pretrained(
143
  )#.to("cuda")
144
  components = flux_pipe.components
145
  components.pop("transformer", None)
 
146
  delete_model(flux_repo)
147
  #components = None
148
 
@@ -205,10 +206,7 @@ class GuiSD:
205
  yield f"Loading model: {model_name}"
206
 
207
  if vae_model == "BakedVAE":
208
- if not os.path.exists(model_name):
209
- vae_model = model_name
210
- else:
211
- vae_model = None
212
  elif vae_model:
213
  vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
214
  if model_type != vae_type:
@@ -429,10 +427,10 @@ class GuiSD:
429
  self.model.stream_config(concurrency=concurrency, latent_resize_by=1, vae_decoding=False)
430
 
431
  if task != "txt2img" and not image_control:
432
- raise ValueError("No control image found: To use this function, you have to upload an image in 'Image ControlNet/Inpaint/Img2img'")
433
 
434
- if task == "inpaint" and not image_mask:
435
- raise ValueError("No mask image found: Specify one in 'Image Mask'")
436
 
437
  if "https://" not in str(UPSCALER_DICT_GUI[upscaler_model_path]):
438
  upscaler_model = upscaler_model_path
 
7
  check_scheduler_compatibility,
8
  TASK_AND_PREPROCESSORS,
9
  FACE_RESTORATION_MODELS,
10
+ scheduler_names,
11
  )
12
  from constants import (
13
  DIRECTORY_UPSCALERS,
 
21
  MODEL_TYPE_TASK,
22
  POST_PROCESSING_SAMPLER,
23
  DIFFUSERS_CONTROLNET_MODEL,
24
+ IP_MODELS,
25
+ MODE_IP_OPTIONS,
26
  )
27
  from stablepy.diffusers_vanilla.style_prompt_config import STYLE_NAMES
28
  import torch
29
  import re
 
 
 
 
 
30
  import time
31
  from PIL import ImageFile
32
  from utils import (
 
51
  from stablepy import logger
52
  from diffusers import FluxPipeline
53
  # import urllib.parse
54
+ import subprocess
55
+
56
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
57
 
58
  ImageFile.LOAD_TRUNCATED_IMAGES = True
59
  torch.backends.cuda.matmul.allow_tf32 = True
 
143
  )#.to("cuda")
144
  components = flux_pipe.components
145
  components.pop("transformer", None)
146
+ components.pop("scheduler", None)
147
  delete_model(flux_repo)
148
  #components = None
149
 
 
206
  yield f"Loading model: {model_name}"
207
 
208
  if vae_model == "BakedVAE":
209
+ vae_model = model_name
 
 
 
210
  elif vae_model:
211
  vae_type = "SDXL" if "sdxl" in vae_model.lower() else "SD 1.5"
212
  if model_type != vae_type:
 
427
  self.model.stream_config(concurrency=concurrency, latent_resize_by=1, vae_decoding=False)
428
 
429
  if task != "txt2img" and not image_control:
430
+ raise ValueError("Reference image is required. Please upload one in 'Image ControlNet/Inpaint/Img2img'.")
431
 
432
+ if task in ["inpaint", "repaint"] and not image_mask:
433
+ raise ValueError("Mask image not found. Upload one in 'Image Mask' to proceed.")
434
 
435
  if "https://" not in str(UPSCALER_DICT_GUI[upscaler_model_path]):
436
  upscaler_model = upscaler_model_path
llmdolphin.py CHANGED
@@ -16,6 +16,8 @@ import wrapt_timeout_decorator
16
  from llama_cpp_agent.messages_formatter import MessagesFormatter
17
  from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
18
  from llmenv import llm_models, llm_models_dir, llm_formats, llm_languages, dolphin_system_prompt
 
 
19
 
20
 
21
  llm_models_tupled_list = []
 
16
  from llama_cpp_agent.messages_formatter import MessagesFormatter
17
  from formatter import mistral_v1_formatter, mistral_v2_formatter, mistral_v3_tekken_formatter
18
  from llmenv import llm_models, llm_models_dir, llm_formats, llm_languages, dolphin_system_prompt
19
+ import subprocess
20
+ subprocess.run("rm -rf /data-nvme/zerogpu-offload/*", env={}, shell=True)
21
 
22
 
23
  llm_models_tupled_list = []
llmenv.py CHANGED
@@ -83,6 +83,83 @@ llm_models = {
83
  #"": ["", MessagesFormatterType.OPEN_CHAT],
84
  #"": ["", MessagesFormatterType.CHATML],
85
  #"": ["", MessagesFormatterType.PHI_3],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  "WestKunai-XS-7b.Q5_K_M.gguf": ["mradermacher/WestKunai-XS-7b-GGUF", MessagesFormatterType.MISTRAL],
87
  "WestKunai-XD-7b.Q5_K_M.gguf": ["mradermacher/WestKunai-XD-7b-GGUF", MessagesFormatterType.MISTRAL],
88
  "GenTest-7B-slerp.Q5_K_M.gguf": ["mradermacher/GenTest-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
@@ -1503,6 +1580,7 @@ llm_models = {
1503
  "ContextualToppy_KTO-7B.Q5_K_M.gguf": ["mradermacher/ContextualToppy_KTO-7B-GGUF", MessagesFormatterType.MISTRAL],
1504
  "Berghof-NSFW-7B.Q5_K_M.gguf": ["QuantFactory/Berghof-NSFW-7B-GGUF", MessagesFormatterType.MISTRAL],
1505
  "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
 
1506
  "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
1507
  "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
1508
  "ChatWaifu_v1.0.i1-Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
 
83
  #"": ["", MessagesFormatterType.OPEN_CHAT],
84
  #"": ["", MessagesFormatterType.CHATML],
85
  #"": ["", MessagesFormatterType.PHI_3],
86
+ "kunoichi-squared-model_stock-7B.Q5_K_M.gguf": ["mradermacher/kunoichi-squared-model_stock-7B-GGUF", MessagesFormatterType.MISTRAL],
87
+ "Infinite-Laymons-7B.Q5_K_M.gguf": ["mradermacher/Infinite-Laymons-7B-GGUF", MessagesFormatterType.MISTRAL],
88
+ "EXAONESumm-3.0-7.8B-Instruct.Q5_K_M.gguf": ["mradermacher/EXAONESumm-3.0-7.8B-Instruct-GGUF", MessagesFormatterType.LLAMA_3],
89
+ "MyModelsMerge-7b.Q5_K_M.gguf": ["mradermacher/MyModelsMerge-7b-GGUF", MessagesFormatterType.MISTRAL],
90
+ "WestBeagle-7B.Q5_K_M.gguf": ["mradermacher/WestBeagle-7B-GGUF", MessagesFormatterType.MISTRAL],
91
+ "Barcenas-8b-Juridico-Mexicano.Q5_K_M.gguf": ["mradermacher/Barcenas-8b-Juridico-Mexicano-GGUF", MessagesFormatterType.LLAMA_3],
92
+ "Dans-PersonalityEngine-V1.1.0-12b-Q4_K_M.gguf": ["bartowski/Dans-PersonalityEngine-V1.1.0-12b-GGUF", MessagesFormatterType.CHATML],
93
+ "Teleut-7b-RP-Q5_K_M.gguf": ["bartowski/Teleut-7b-RP-GGUF", MessagesFormatterType.OPEN_CHAT],
94
+ "Berghof-vanilla-7B.i1-Q5_K_M.gguf": ["mradermacher/Berghof-vanilla-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
95
+ "NeuralKukedlc-7B-Labonned.i1-Q5_K_M.gguf": ["mradermacher/NeuralKukedlc-7B-Labonned-i1-GGUF", MessagesFormatterType.MISTRAL],
96
+ "Mistral-Nemo-VICIOUS_MESH-12B-2407.i1-Q4_K_M.gguf": ["mradermacher/Mistral-Nemo-VICIOUS_MESH-12B-2407-i1-GGUF", MessagesFormatterType.MISTRAL],
97
+ "StrangeMerges_43-7B-dare_ties.Q5_K_M.gguf": ["mradermacher/StrangeMerges_43-7B-dare_ties-GGUF", MessagesFormatterType.MISTRAL],
98
+ "SmartLlama-3-8B-MS-v0.1.i1-Q5_K_M.gguf": ["mradermacher/SmartLlama-3-8B-MS-v0.1-i1-GGUF", MessagesFormatterType.LLAMA_3],
99
+ "bruphin-kappa.i1-Q5_K_M.gguf": ["mradermacher/bruphin-kappa-i1-GGUF", MessagesFormatterType.MISTRAL],
100
+ "NeuralKrishnaMath-7B-slerp.Q5_K_M.gguf": ["mradermacher/NeuralKrishnaMath-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
101
+ "113-Aspect-Emotion-Model-1.1.Q5_K_M.gguf": ["mradermacher/113-Aspect-Emotion-Model-1.1-GGUF", MessagesFormatterType.MISTRAL],
102
+ "ErisepBeagleNuBuRPInfinWestLakev2-IreneRP-Neural-7B-slerp.Q5_K_M.gguf": ["mradermacher/ErisepBeagleNuBuRPInfinWestLakev2-IreneRP-Neural-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
103
+ "ScaleDown-7B-slerp-v0.1.i1-Q5_K_M.gguf": ["mradermacher/ScaleDown-7B-slerp-v0.1-i1-GGUF", MessagesFormatterType.MISTRAL],
104
+ "KangalKhan-PolishedRuby-7B.i1-Q5_K_M.gguf": ["mradermacher/KangalKhan-PolishedRuby-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
105
+ "Eris_PrimeV4-Vision-7B.i1-Q5_K_M.gguf": ["mradermacher/Eris_PrimeV4-Vision-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
106
+ "Mistral-7B-Instruct-demi-merge-v0.2-7B.i1-Q5_K_M.gguf": ["mradermacher/Mistral-7B-Instruct-demi-merge-v0.2-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
107
+ "DaturaCookie_7B.i1-Q5_K_M.gguf": ["mradermacher/DaturaCookie_7B-i1-GGUF", MessagesFormatterType.MISTRAL],
108
+ "Eris_PrimeV4-Vision-32k-7B.i1-Q5_K_M.gguf": ["mradermacher/Eris_PrimeV4-Vision-32k-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
109
+ "swallow-hermes-st-v1.Q5_K_M.gguf": ["mradermacher/swallow-hermes-st-v1-GGUF", MessagesFormatterType.MISTRAL],
110
+ "StrangeMerges_45-7B-dare_ties.Q5_K_M.gguf": ["mradermacher/StrangeMerges_45-7B-dare_ties-GGUF", MessagesFormatterType.MISTRAL],
111
+ "Eris_PrimeV3.05-Vision-7B.i1-Q5_K_M.gguf": ["mradermacher/Eris_PrimeV3.05-Vision-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
112
+ "Pearl-7B-slerp.i1-Q5_K_M.gguf": ["mradermacher/Pearl-7B-slerp-i1-GGUF", MessagesFormatterType.MISTRAL],
113
+ "Follex-7B.i1-Q5_K_M.gguf": ["mradermacher/Follex-7B-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
114
+ "flammen13-mistral-7B.Q5_K_M.gguf": ["mradermacher/flammen13-mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
115
+ "flammen12-mistral-7B.Q5_K_M.gguf": ["mradermacher/flammen12-mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
116
+ "KangalKhan-Beta-Sapphire-7B.Q5_K_M.gguf": ["mradermacher/KangalKhan-Beta-Sapphire-7B-GGUF", MessagesFormatterType.MISTRAL],
117
+ "StrangeMerges_44-7B-dare_ties.Q5_K_M.gguf": ["mradermacher/StrangeMerges_44-7B-dare_ties-GGUF", MessagesFormatterType.MISTRAL],
118
+ "StarMix-7B-slerp.Q5_K_M.gguf": ["mradermacher/StarMix-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
119
+ "Qwen2.5-14B-Brocav3.i1-Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Brocav3-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
120
+ "Calliope-7b.Q5_K_M.gguf": ["mradermacher/Calliope-7b-GGUF", MessagesFormatterType.MISTRAL],
121
+ "Qwen2.5-14B-Broca.i1-Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-Broca-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
122
+ "iq-code-evmind-14b.i1-Q4_K_M.gguf": ["mradermacher/iq-code-evmind-14b-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
123
+ "AetherLlama.Q5_K_M.gguf": ["mradermacher/AetherLlama-GGUF", MessagesFormatterType.LLAMA_3],
124
+ "Avalon_8.Q4_K_M.gguf": ["mradermacher/Avalon_8-GGUF", MessagesFormatterType.MISTRAL],
125
+ "Zephyr_beta_32k_7B.Q5_K_M.gguf": ["mradermacher/Zephyr_beta_32k_7B-GGUF", MessagesFormatterType.MISTRAL],
126
+ "iq-code-evmind-7b.Q5_K_M.gguf": ["mradermacher/iq-code-evmind-7b-GGUF", MessagesFormatterType.OPEN_CHAT],
127
+ "Follex-7B-V2.i1-Q5_K_M.gguf": ["mradermacher/Follex-7B-V2-i1-GGUF", MessagesFormatterType.OPEN_CHAT],
128
+ "bruphin-iota.Q5_K_M.gguf": ["mradermacher/bruphin-iota-GGUF", MessagesFormatterType.MISTRAL],
129
+ "Qwen2.5-14B-FinalMergev2.Q4_K_M.gguf": ["mradermacher/Qwen2.5-14B-FinalMergev2-GGUF", MessagesFormatterType.OPEN_CHAT],
130
+ "flammen11X-mistral-7B.Q5_K_M.gguf": ["mradermacher/flammen11X-mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
131
+ "J.O.S.I.E.3-Beta8-slerp.Q5_K_M.gguf": ["mradermacher/J.O.S.I.E.3-Beta8-slerp-GGUF", MessagesFormatterType.MISTRAL],
132
+ "flammen10-mistral-7B.Q5_K_M.gguf": ["mradermacher/flammen10-mistral-7B-GGUF", MessagesFormatterType.MISTRAL],
133
+ "gemma-2-9b-it-abliterated-Q4_K_M.gguf": ["bartowski/gemma-2-9b-it-abliterated-GGUF", MessagesFormatterType.ALPACA],
134
+ "The-Trinity-Coder-7B.Q5_K_M.gguf": ["mradermacher/The-Trinity-Coder-7B-GGUF", MessagesFormatterType.MISTRAL],
135
+ "MT-Gen5-MUMMIG-gemma-2-9B.Q4_K_M.gguf": ["mradermacher/MT-Gen5-MUMMIG-gemma-2-9B-GGUF", MessagesFormatterType.ALPACA],
136
+ "DarkCamelot_2.i1-Q4_K_M.gguf": ["mradermacher/DarkCamelot_2-i1-GGUF", MessagesFormatterType.MISTRAL],
137
+ "NeuralSirKrishna-Spanish-FT.Q5_K_M.gguf": ["mradermacher/NeuralSirKrishna-Spanish-FT-GGUF", MessagesFormatterType.MISTRAL],
138
+ "Pearl-7B-0211-ties.Q5_K_M.gguf": ["mradermacher/Pearl-7B-0211-ties-GGUF", MessagesFormatterType.MISTRAL],
139
+ "Pearl-7B-0210-ties.Q5_K_M.gguf": ["mradermacher/Pearl-7B-0210-ties-GGUF", MessagesFormatterType.MISTRAL],
140
+ "GarrulusMarcoro-7B-v0.1.Q5_K_M.gguf": ["mradermacher/GarrulusMarcoro-7B-v0.1-GGUF", MessagesFormatterType.MISTRAL],
141
+ "NeuralDareDMistralPro-7b-slerp.Q5_K_M.gguf": ["mradermacher/NeuralDareDMistralPro-7b-slerp-GGUF", MessagesFormatterType.MISTRAL],
142
+ "Matter-0.1-7B-DPO-preview.Q5_K_M.gguf": ["mradermacher/Matter-0.1-7B-DPO-preview-GGUF", MessagesFormatterType.MISTRAL],
143
+ "Mistral7BInstructv0.2-RakutenAI7Bchat.Q5_K_M.gguf": ["mradermacher/Mistral7BInstructv0.2-RakutenAI7Bchat-GGUF", MessagesFormatterType.MISTRAL],
144
+ "Asherah_7B.i1-Q5_K_M.gguf": ["mradermacher/Asherah_7B-i1-GGUF", MessagesFormatterType.MISTRAL],
145
+ "Matter-0.1-7B.i1-Q5_K_M.gguf": ["mradermacher/Matter-0.1-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
146
+ "kukulemon-7B.i1-Q5_K_M.gguf": ["mradermacher/kukulemon-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
147
+ "Mystical-dare5-7b.Q5_K_M.gguf": ["mradermacher/Mystical-dare5-7b-GGUF", MessagesFormatterType.MISTRAL],
148
+ "RasGulla1-7b.Q5_K_M.gguf": ["mradermacher/RasGulla1-7b-GGUF", MessagesFormatterType.MISTRAL],
149
+ "NeuralMonarchCoderPearlBeagle-T3Q-Mistral-Orca-Math-DPO-7b.Q5_K_M.gguf": ["mradermacher/NeuralMonarchCoderPearlBeagle-T3Q-Mistral-Orca-Math-DPO-7b-GGUF", MessagesFormatterType.MISTRAL],
150
+ "Einstein-4d-Marcoro14-nddmpk-KrishnaHercules-7b-slerp-fix1.Q5_K_M.gguf": ["mradermacher/Einstein-4d-Marcoro14-nddmpk-KrishnaHercules-7b-slerp-fix1-GGUF", MessagesFormatterType.MISTRAL],
151
+ "Einstein-4d-Marcoro14-nddmpk-KrishnaHercules-7b-slerp.Q5_K_M.gguf": ["mradermacher/Einstein-4d-Marcoro14-nddmpk-KrishnaHercules-7b-slerp-GGUF", MessagesFormatterType.MISTRAL],
152
+ "EvoLLM-JP-v1-7B.i1-Q5_K_M.gguf": ["mradermacher/EvoLLM-JP-v1-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
153
+ "PearlMathMstralPro-7b-slerp.Q5_K_M.gguf": ["mradermacher/PearlMathMstralPro-7b-slerp-GGUF", MessagesFormatterType.MISTRAL],
154
+ "tokyotech-llm-Llama-3.1-Swallow-8B-Instruct-v0.3-Q5_K_M.gguf": ["mmnga/tokyotech-llm-Llama-3.1-Swallow-8B-Instruct-v0.3-gguf", MessagesFormatterType.LLAMA_3],
155
+ "Mayonnaise-4in1-03.Q5_K_M.gguf": ["mradermacher/Mayonnaise-4in1-03-GGUF", MessagesFormatterType.MISTRAL],
156
+ "Franken-MistressMaid-7B-v2.i1-Q5_K_M.gguf": ["mradermacher/Franken-MistressMaid-7B-v2-i1-GGUF", MessagesFormatterType.MISTRAL],
157
+ "Mayonnaise-4in1-01.Q5_K_M.gguf": ["mradermacher/Mayonnaise-4in1-01-GGUF", MessagesFormatterType.MISTRAL],
158
+ "TheMayonnaise.i1-Q5_K_M.gguf": ["mradermacher/TheMayonnaise-i1-GGUF", MessagesFormatterType.MISTRAL],
159
+ "Gemma-The-Writer-Mighty-Sword-9B-D_AU-Q4_k_m.gguf": ["DavidAU/Gemma-The-Writer-Mighty-Sword-9B-GGUF", MessagesFormatterType.ALPACA],
160
+ "Elly_7B.Q5_K_M.gguf": ["mradermacher/Elly_7B-GGUF", MessagesFormatterType.MISTRAL],
161
+ "Eris_Prime-V2-7B.Q5_K_M.gguf": ["mradermacher/Eris_Prime-V2-7B-GGUF", MessagesFormatterType.MISTRAL],
162
+ "discolm-mfto-7b-german-v0.1.Q5_K_M.gguf": ["mradermacher/discolm-mfto-7b-german-v0.1-GGUF", MessagesFormatterType.MISTRAL],
163
  "WestKunai-XS-7b.Q5_K_M.gguf": ["mradermacher/WestKunai-XS-7b-GGUF", MessagesFormatterType.MISTRAL],
164
  "WestKunai-XD-7b.Q5_K_M.gguf": ["mradermacher/WestKunai-XD-7b-GGUF", MessagesFormatterType.MISTRAL],
165
  "GenTest-7B-slerp.Q5_K_M.gguf": ["mradermacher/GenTest-7B-slerp-GGUF", MessagesFormatterType.MISTRAL],
 
1580
  "ContextualToppy_KTO-7B.Q5_K_M.gguf": ["mradermacher/ContextualToppy_KTO-7B-GGUF", MessagesFormatterType.MISTRAL],
1581
  "Berghof-NSFW-7B.Q5_K_M.gguf": ["QuantFactory/Berghof-NSFW-7B-GGUF", MessagesFormatterType.MISTRAL],
1582
  "Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW_iMat_Ch200_IQ4_XS.gguf": ["dddump/Japanese-TextGen-Kage-v0.1.2-2x7B-NSFW-gguf", MessagesFormatterType.VICUNA],
1583
+ "EvoLLM-JP-A-v1-7B.i1-Q5_K_M.gguf": ["mradermacher/EvoLLM-JP-A-v1-7B-i1-GGUF", MessagesFormatterType.MISTRAL],
1584
  "ChatWaifu_v1.2.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.2.1-GGUF", MessagesFormatterType.MISTRAL],
1585
  "ChatWaifu_v1.1.Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.1-GGUF", MessagesFormatterType.MISTRAL],
1586
  "ChatWaifu_v1.0.i1-Q5_K_M.gguf": ["mradermacher/ChatWaifu_v1.0-i1-GGUF", MessagesFormatterType.MISTRAL],
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
- git+https://github.com/R3gm/stablepy.git@47c19f5 # -b refactor_sampler_fix
2
  accelerate
3
  diffusers
4
  invisible_watermark
 
1
+ stablepy==0.6.0
2
  accelerate
3
  diffusers
4
  invisible_watermark