File size: 2,441 Bytes
4d0c0c1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
models:
  # === Core Instruction Following (35% weight allocation) ===
  - model: qingy2024/MwM-22B-Instruct
    parameters:
      weight: 1.35  # Increased for stronger instruction adherence :cite[4]
      density: 0.82  # Sparse enough for creative deviation
  - model: DigitalSouls/BlackSheep-DigitalSoul-22B
    parameters:
      weight: 0.85  # Maintains dark thematic elements
      density: 0.88  # Higher retention for personality consistency
  - model: ArliAI/Mistral-Small-22B-ArliAI-RPMax-v1.1
    parameters:
      weight: 0.75  # Boosted for anti-repetition training :cite[6]
      density: 0.83  # Preserve diverse RP patterns

  # === Narrative Engine (45% weight allocation) === :cite[4]:cite[6]
  - model: Kaoeiri/MS-Magpantheonsel-lark-v4x1.6.2RP-Cydonia-vXXX-22B-8
    parameters:
      weight: 0.72  # Balanced mythos building
      density: 0.68  # Allow concept recombination
  - model: TheDrummer/Cydonia-22B-v1.3
    parameters:
      weight: 0.40  # Increased for plot continuity
      density: 0.65  # Encourage narrative surprises
  - model: Gryphe/Pantheon-RP-Pure-1.6.2-22b-Small
    parameters:
      weight: 0.55  # Maintains pure RP characteristics
      density: 0.70  # Standard retention

  # === Support Matrix (20% weight allocation) ===
  - model: anthracite-org/magnum-v4-22b
    parameters:
      weight: 0.60  # Boosted for atmospheric depth
      density: 0.72  # Preserve environmental descriptors
  - model: Saxo/Linkbricks-Horizon-AI-Korean-Superb-22B
    parameters:
      weight: 0.35  # Cultural nuance preservation
      density: 0.75  # High retention for language features
  - model: allura-org/MS-Meadowlark-22B
    parameters:
      weight: 0.38  # Slight boost for natural dialogue flow
      density: 0.68
  # ... (other support models adjusted similarly with 0.3-0.4 weights)

merge_method: dare_ties
base_model: unsloth/Mistral-Small-Instruct-2409
parameters:
  density: 0.85  # More aggressive sparsity for creative recombination :cite[9]
  epsilon: 0.12   # Higher resurrection rate for narrative parameters
  lambda: 1.22    # Controlled divergence amplification
  normalize: true
  t:
    - filter: "self_attn"  # Stable attention structure
      value: 0.92
    - filter: "mlp"        # Chaotic concept mixing
      value: 0.68
    - filter: "embed_tokens"      # Cultural preservation
      value: 0.95

dtype: bfloat16
random_seed: 314159  # Pi constant for infinite variations