base_model: princeton-nlp/gemma-2-9b-it-SimPO | |
dtype: bfloat16 | |
merge_method: task_arithmetic | |
parameters: | |
normalize: true | |
models: | |
- model: princeton-nlp/gemma-2-9b-it-SimPO | |
- model: AXCXEPT/EZO-Humanities-9B-gemma-2-it | |
parameters: | |
weight: 0.3 | |
- model: VAGOsolutions/SauerkrautLM-gemma-2-9b-it | |
parameters: | |
weight: 0.1 | |
- model: aisingapore/gemma2-9b-cpt-sea-lionv3-instruct | |
parameters: | |
weight: 0.2 | |
- model: silma-ai/SILMA-9B-Instruct-v1.0 | |
parameters: | |
weight: 0.001 | |