Merged_model_MoE / mergekit_moe_config.yml
EthanLiu1991's picture
Upload folder using huggingface_hub
7c0169c verified
raw
history blame
2.13 kB
#slices:
# - sources:
# - model: liminerity/M7-7b
# layer_range: [0, 32]
# - model: AurelPx/Percival_01-7b-slerp
# layer_range: [0, 32]
#merge_method: slerp
#base_model: liminerity/M7-7b
#parameters:
# t:
# - filter: self_attn
# value: [0.32115546068984224, 0.8724514048921832, 0.9416438704146307, 0.13454654769720242, 0.0020171582354998607]
# - filter: mlp
# value: [0.6788445393101578, 0.1275485951078168, 0.8654534523027976, 0.8654534523027976, 0.9979828417645001]
# - value: 0.23694210240180313
#dtype: bfloat16
#random_seed: 0
#slices:
# - sources:
# - model: psmathur/orca_mini_v3_13b
# layer_range: [0, 40]
# - model: garage-bAInd/Platypus2-7b
# layer_range: [0, 32]
#merge_method: slerp
#base_model: psmathur/orca_mini_v3_13b
#parameters:
# t:
# - filter: self_attn
# value: [0.32115546068984224, 0.8724514048921832, 0.9416438704146307, 0.13454654769720242, 0.0020171582354998607]
# - filter: mlp
# value: [0.6788445393101578, 0.1275485951078168, 0.05835612958536929, 0.8654534523027976, 0.9979828417645001]
# - value: 0.23694210240180313
#dtype: float16
#random_seed: 0
#slices:
# - sources:
# - model: psmathur/orca_mini_v3_13b
# parameters:
# density: [1, 0.7, 0.1] # density gradient
# weight: 1.0
# - model: garage-bAInd/Platypus2-13B
# parameters:
# density: 0.5
# weight: [0, 0.3, 0.7, 1] # weight gradient
# - model: WizardLM/WizardMath-13B-V1.0
# parameters:
# density: 0.33
# weight:
# - filter: mlp
# value: 0.5
# - value: 0
#merge_method: ties
#base_model: TheBloke/Llama-2-13B-fp16
#parameters:
# normalize: true
# int8_mask: true
#dtype: float16
#random_seed: 0
base_model: mlabonne/AlphaMonarch-7B
experts:
- source_model: mlabonne/AlphaMonarch-7B
positive_prompts:
- "chat"
- "assistant"
- "tell me"
- "explain"
- "I want"
- source_model: mlabonne/NeuralDaredevil-8B-abliterated
positive_prompts:
- "reason"
- "math"
- "mathematics"
- "solve"
- "count"