Update librechat.yaml
Browse files- librechat.yaml +51 -390
librechat.yaml
CHANGED
@@ -1,418 +1,79 @@
|
|
|
|
1 |
version: 1.0.2
|
2 |
|
3 |
-
|
4 |
-
|
5 |
|
|
|
6 |
registration:
|
7 |
-
socialLogins: ["
|
|
|
|
|
8 |
|
|
|
9 |
endpoints:
|
10 |
custom:
|
11 |
# Mistral AI API
|
12 |
-
- name: "Mistral"
|
13 |
-
apiKey
|
|
|
|
|
14 |
baseURL: "https://api.mistral.ai/v1"
|
|
|
|
|
15 |
models:
|
16 |
-
default
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
]
|
21 |
-
fetch: false
|
22 |
-
titleConvo: true
|
23 |
-
titleMethod: "completion"
|
24 |
-
titleModel: "mistral-tiny"
|
25 |
-
summarize: false
|
26 |
-
summaryModel: "mistral-tiny"
|
27 |
-
forcePrompt: false
|
28 |
-
modelDisplayLabel: "Mistral"
|
29 |
-
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
models:
|
36 |
-
"default": [
|
37 |
-
"nousresearch/nous-capybara-7b:free",
|
38 |
-
"mistralai/mistral-7b-instruct:free",
|
39 |
-
"huggingfaceh4/zephyr-7b-beta:free",
|
40 |
-
"openchat/openchat-7b:free",
|
41 |
-
"gryphe/mythomist-7b:free",
|
42 |
-
"undi95/toppy-m-7b:free",
|
43 |
-
"openrouter/cinematika-7b:free",
|
44 |
-
"openrouter/auto",
|
45 |
-
"nousresearch/nous-capybara-7b",
|
46 |
-
"mistralai/mistral-7b-instruct",
|
47 |
-
"huggingfaceh4/zephyr-7b-beta",
|
48 |
-
"openchat/openchat-7b",
|
49 |
-
"gryphe/mythomist-7b",
|
50 |
-
"openrouter/cinematika-7b",
|
51 |
-
"rwkv/rwkv-5-world-3b",
|
52 |
-
"recursal/rwkv-5-3b-ai-town",
|
53 |
-
"recursal/eagle-7b",
|
54 |
-
"jondurbin/bagel-34b",
|
55 |
-
"jebcarter/psyfighter-13b",
|
56 |
-
"koboldai/psyfighter-13b-2",
|
57 |
-
"neversleep/noromaid-mixtral-8x7b-instruct",
|
58 |
-
"nousresearch/nous-hermes-llama2-13b",
|
59 |
-
"meta-llama/codellama-34b-instruct",
|
60 |
-
"codellama/codellama-70b-instruct",
|
61 |
-
"phind/phind-codellama-34b",
|
62 |
-
"intel/neural-chat-7b",
|
63 |
-
"nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
|
64 |
-
"nousresearch/nous-hermes-2-mixtral-8x7b-sft",
|
65 |
-
"haotian-liu/llava-13b",
|
66 |
-
"nousresearch/nous-hermes-2-vision-7b",
|
67 |
-
"meta-llama/llama-2-13b-chat",
|
68 |
-
"gryphe/mythomax-l2-13b",
|
69 |
-
"nousresearch/nous-hermes-llama2-70b",
|
70 |
-
"teknium/openhermes-2-mistral-7b",
|
71 |
-
"teknium/openhermes-2.5-mistral-7b",
|
72 |
-
"undi95/remm-slerp-l2-13b",
|
73 |
-
"undi95/toppy-m-7b",
|
74 |
-
"01-ai/yi-34b-chat",
|
75 |
-
"01-ai/yi-34b",
|
76 |
-
"01-ai/yi-6b",
|
77 |
-
"togethercomputer/stripedhyena-nous-7b",
|
78 |
-
"togethercomputer/stripedhyena-hessian-7b",
|
79 |
-
"mistralai/mixtral-8x7b",
|
80 |
-
"nousresearch/nous-hermes-yi-34b",
|
81 |
-
"open-orca/mistral-7b-openorca",
|
82 |
-
"openai/gpt-3.5-turbo",
|
83 |
-
"openai/gpt-3.5-turbo-0125",
|
84 |
-
"openai/gpt-3.5-turbo-1106",
|
85 |
-
"openai/gpt-3.5-turbo-16k",
|
86 |
-
"openai/gpt-4-turbo-preview",
|
87 |
-
"openai/gpt-4-1106-preview",
|
88 |
-
"openai/gpt-4",
|
89 |
-
"openai/gpt-4-32k",
|
90 |
-
"openai/gpt-4-vision-preview",
|
91 |
-
"openai/gpt-3.5-turbo-instruct",
|
92 |
-
"google/palm-2-chat-bison",
|
93 |
-
"google/palm-2-codechat-bison",
|
94 |
-
"google/palm-2-chat-bison-32k",
|
95 |
-
"google/palm-2-codechat-bison-32k",
|
96 |
-
"google/gemini-pro",
|
97 |
-
"google/gemini-pro-vision",
|
98 |
-
"perplexity/pplx-70b-online",
|
99 |
-
"perplexity/pplx-7b-online",
|
100 |
-
"perplexity/pplx-7b-chat",
|
101 |
-
"perplexity/pplx-70b-chat",
|
102 |
-
"meta-llama/llama-2-70b-chat",
|
103 |
-
"nousresearch/nous-capybara-34b",
|
104 |
-
"jondurbin/airoboros-l2-70b",
|
105 |
-
"austism/chronos-hermes-13b",
|
106 |
-
"migtissera/synthia-70b",
|
107 |
-
"pygmalionai/mythalion-13b",
|
108 |
-
"undi95/remm-slerp-l2-13b-6k",
|
109 |
-
"xwin-lm/xwin-lm-70b",
|
110 |
-
"gryphe/mythomax-l2-13b-8k",
|
111 |
-
"alpindale/goliath-120b",
|
112 |
-
"lizpreciatior/lzlv-70b-fp16-hf",
|
113 |
-
"neversleep/noromaid-20b",
|
114 |
-
"mistralai/mixtral-8x7b-instruct",
|
115 |
-
"cognitivecomputations/dolphin-mixtral-8x7b",
|
116 |
-
"anthropic/claude-2",
|
117 |
-
"anthropic/claude-2.0",
|
118 |
-
"anthropic/claude-instant-v1",
|
119 |
-
"mancer/weaver",
|
120 |
-
"mistralai/mistral-tiny",
|
121 |
-
"mistralai/mistral-small",
|
122 |
-
"mistralai/mistral-medium"
|
123 |
-
]
|
124 |
-
fetch: false
|
125 |
-
titleConvo: true
|
126 |
-
titleModel: "gpt-3.5-turbo"
|
127 |
-
summarize: false
|
128 |
-
summaryModel: "gpt-3.5-turbo"
|
129 |
-
forcePrompt: false
|
130 |
-
modelDisplayLabel: "OpenRouter"
|
131 |
|
132 |
-
|
133 |
-
|
134 |
-
apiKey: "user_provided"
|
135 |
-
baseURL: "https://api.together.xyz"
|
136 |
-
models:
|
137 |
-
default: [
|
138 |
-
"zero-one-ai/Yi-34B-Chat",
|
139 |
-
"Austism/chronos-hermes-13b",
|
140 |
-
"DiscoResearch/DiscoLM-mixtral-8x7b-v2",
|
141 |
-
"Gryphe/MythoMax-L2-13b",
|
142 |
-
"lmsys/vicuna-13b-v1.5",
|
143 |
-
"lmsys/vicuna-7b-v1.5",
|
144 |
-
"lmsys/vicuna-13b-v1.5-16k",
|
145 |
-
"codellama/CodeLlama-13b-Instruct-hf",
|
146 |
-
"codellama/CodeLlama-34b-Instruct-hf",
|
147 |
-
"codellama/CodeLlama-70b-Instruct-hf",
|
148 |
-
"codellama/CodeLlama-7b-Instruct-hf",
|
149 |
-
"togethercomputer/llama-2-13b-chat",
|
150 |
-
"togethercomputer/llama-2-70b-chat",
|
151 |
-
"togethercomputer/llama-2-7b-chat",
|
152 |
-
"NousResearch/Nous-Capybara-7B-V1p9",
|
153 |
-
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
154 |
-
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
|
155 |
-
"NousResearch/Nous-Hermes-Llama2-70b",
|
156 |
-
"NousResearch/Nous-Hermes-llama-2-7b",
|
157 |
-
"NousResearch/Nous-Hermes-Llama2-13b",
|
158 |
-
"NousResearch/Nous-Hermes-2-Yi-34B",
|
159 |
-
"openchat/openchat-3.5-1210",
|
160 |
-
"Open-Orca/Mistral-7B-OpenOrca",
|
161 |
-
"togethercomputer/Qwen-7B-Chat",
|
162 |
-
"snorkelai/Snorkel-Mistral-PairRM-DPO",
|
163 |
-
"togethercomputer/alpaca-7b",
|
164 |
-
"togethercomputer/falcon-40b-instruct",
|
165 |
-
"togethercomputer/falcon-7b-instruct",
|
166 |
-
"togethercomputer/GPT-NeoXT-Chat-Base-20B",
|
167 |
-
"togethercomputer/Llama-2-7B-32K-Instruct",
|
168 |
-
"togethercomputer/Pythia-Chat-Base-7B-v0.16",
|
169 |
-
"togethercomputer/RedPajama-INCITE-Chat-3B-v1",
|
170 |
-
"togethercomputer/RedPajama-INCITE-7B-Chat",
|
171 |
-
"togethercomputer/StripedHyena-Nous-7B",
|
172 |
-
"Undi95/ReMM-SLERP-L2-13B",
|
173 |
-
"Undi95/Toppy-M-7B",
|
174 |
-
"WizardLM/WizardLM-13B-V1.2",
|
175 |
-
"garage-bAInd/Platypus2-70B-instruct",
|
176 |
-
"mistralai/Mistral-7B-Instruct-v0.1",
|
177 |
-
"mistralai/Mistral-7B-Instruct-v0.2",
|
178 |
-
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
179 |
-
"teknium/OpenHermes-2-Mistral-7B",
|
180 |
-
"teknium/OpenHermes-2p5-Mistral-7B",
|
181 |
-
"upstage/SOLAR-10.7B-Instruct-v1.0"
|
182 |
-
]
|
183 |
-
fetch: false
|
184 |
-
titleConvo: true
|
185 |
-
titleModel: "openchat/openchat-3.5-1210"
|
186 |
-
summarize: false
|
187 |
-
summaryModel: "openchat/openchat-3.5-1210"
|
188 |
-
forcePrompt: false
|
189 |
-
modelDisplayLabel: "together.ai"
|
190 |
-
iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/togetherai.png"
|
191 |
|
192 |
-
|
193 |
-
|
194 |
-
apiKey: "user_provided"
|
195 |
-
baseURL: "https://api.naga.ac/v1"
|
196 |
-
models:
|
197 |
-
default: [
|
198 |
-
"gpt-4-0125-preview",
|
199 |
-
"gpt-4-turbo-preview",
|
200 |
-
"gpt-4-1106-preview",
|
201 |
-
"gpt-4-vision-preview",
|
202 |
-
"gpt-4-0613",
|
203 |
-
"gpt-3.5-turbo-1106",
|
204 |
-
"gpt-3.5-turbo-0613",
|
205 |
-
"gemini-pro",
|
206 |
-
"gemini-pro-vision",
|
207 |
-
"llama-2-70b-chat",
|
208 |
-
"llama-2-13b-chat",
|
209 |
-
"llama-2-7b-chat",
|
210 |
-
"code-llama-34b",
|
211 |
-
"mistral-7b",
|
212 |
-
"mixtral-8x7b",
|
213 |
-
"falcon-180b-chat",
|
214 |
-
"claude-2",
|
215 |
-
"claude-instant"
|
216 |
-
]
|
217 |
-
fetch: false
|
218 |
-
titleConvo: true
|
219 |
-
titleModel: "gpt-3.5-turbo"
|
220 |
-
summarize: false
|
221 |
-
summaryModel: "gpt-3.5-turbo"
|
222 |
-
forcePrompt: false
|
223 |
-
modelDisplayLabel: "NagaAI"
|
224 |
-
iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/NagaAI.png"
|
225 |
|
226 |
-
|
227 |
-
- name: "Mandrill"
|
228 |
-
apiKey: "user_provided"
|
229 |
-
baseURL: "https://api.mandrillai.tech/v1"
|
230 |
-
models:
|
231 |
-
default: [
|
232 |
-
"gpt-4-vision-preview",
|
233 |
-
"gpt-4",
|
234 |
-
"gpt-4-1106-preview",
|
235 |
-
"gpt-3.5-turbo",
|
236 |
-
"gpt-3.5-turbo-1106",
|
237 |
-
"gpt-3.5-turbo-0613",
|
238 |
-
"gpt-3.5-turbo-0301",
|
239 |
-
"gemini-pro",
|
240 |
-
"gemini-pro-vision"
|
241 |
-
]
|
242 |
-
fetch: false
|
243 |
-
titleConvo: true
|
244 |
-
titleModel: "gpt-3.5-turbo"
|
245 |
summarize: false
|
246 |
-
summaryModel: "gpt-3.5-turbo"
|
247 |
-
forcePrompt: false
|
248 |
-
modelDisplayLabel: "Mandrill"
|
249 |
-
iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/Mandrill.png"
|
250 |
|
251 |
-
|
252 |
-
|
253 |
-
apiKey: "user_provided"
|
254 |
-
baseURL: "https://api.freegpt4.tech/v1/"
|
255 |
-
models:
|
256 |
-
default: [
|
257 |
-
"gpt-3.5-turbo",
|
258 |
-
"gpt-3.5-turbo-1106",
|
259 |
-
"gpt-3.5-turbo-0125",
|
260 |
-
"gpt-3.5-turbo-16k",
|
261 |
-
"gpt-4",
|
262 |
-
"gpt-4-1106-preview",
|
263 |
-
"gpt-4-0125-preview",
|
264 |
-
"claude",
|
265 |
-
"gemini-pro"
|
266 |
-
]
|
267 |
-
fetch: false
|
268 |
-
titleConvo: true
|
269 |
-
titleModel: "gpt-3.5-turbo"
|
270 |
-
summarize: false
|
271 |
-
summaryModel: "gpt-3.5-turbo"
|
272 |
-
forcePrompt: false
|
273 |
-
modelDisplayLabel: "FreeGPT-4"
|
274 |
-
iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/FreeGPT-4.png"
|
275 |
|
276 |
-
|
277 |
-
- name: "ConvoAI"
|
278 |
-
apiKey: "user_provided"
|
279 |
-
baseURL: "https://api.convoai.tech/v1/"
|
280 |
-
models:
|
281 |
-
default: [
|
282 |
-
"gpt-3.5-turbo",
|
283 |
-
"gpt-3.5-turbo-1106",
|
284 |
-
"gpt-3.5-turbo-0125",
|
285 |
-
"gpt-3.5-turbo-16k",
|
286 |
-
"gpt-4",
|
287 |
-
"gpt-4-1106-preview",
|
288 |
-
"gpt-4-0125-preview",
|
289 |
-
"claude",
|
290 |
-
"gemini-pro"
|
291 |
-
]
|
292 |
-
fetch: false
|
293 |
-
titleConvo: true
|
294 |
-
titleModel: "gpt-3.5-turbo"
|
295 |
-
summarize: false
|
296 |
-
summaryModel: "gpt-3.5-turbo"
|
297 |
forcePrompt: false
|
298 |
-
modelDisplayLabel: "ConvoAI"
|
299 |
-
iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/ConvoAI.png"
|
300 |
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
305 |
models:
|
306 |
-
default: [
|
307 |
-
"gpt-3.5-turbo",
|
308 |
-
"gpt-3.5-turbo-1106",
|
309 |
-
"gpt-3.5-turbo-0125",
|
310 |
-
"gpt-3.5-turbo-instruct",
|
311 |
-
"gpt-3.5-turbo-16k",
|
312 |
-
"gpt-4", "gpt-4-32k",
|
313 |
-
"gpt-4-1106-preview",
|
314 |
-
"gpt-4-0125-preview",
|
315 |
-
"gpt-4-vision-preview",
|
316 |
-
"claude",
|
317 |
-
"claude-2",
|
318 |
-
"claude-2.1",
|
319 |
-
"claude-instant-v1",
|
320 |
-
"claude-instant-v1-100k",
|
321 |
-
"pplx-70b-online",
|
322 |
-
"palm-2",
|
323 |
-
"bard",
|
324 |
-
"gemini-pro",
|
325 |
-
"gemini-pro-vision",
|
326 |
-
"mixtral-8x7b",
|
327 |
-
"mixtral-8x7b-instruct",
|
328 |
-
"mistral-tiny",
|
329 |
-
"mistral-small",
|
330 |
-
"mistral-medium",
|
331 |
-
"mistral-7b-instruct",
|
332 |
-
"codellama-7b-instruct",
|
333 |
-
"llama-2-7b",
|
334 |
-
"llama-2-70b-chat",
|
335 |
-
"mythomax-l2-13b-8k",
|
336 |
-
"sheep-duck-llama",
|
337 |
-
"goliath-120b",
|
338 |
-
"nous-llama",
|
339 |
-
"yi-34b",
|
340 |
-
"openchat",
|
341 |
-
"solar10-7b",
|
342 |
-
"pi"
|
343 |
-
]
|
344 |
fetch: true
|
345 |
titleConvo: true
|
346 |
titleModel: "gpt-3.5-turbo"
|
347 |
summarize: false
|
348 |
summaryModel: "gpt-3.5-turbo"
|
349 |
forcePrompt: false
|
350 |
-
|
351 |
-
iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/zuki.png"
|
352 |
-
|
353 |
-
# Pawan
|
354 |
-
- name: "Pawan"
|
355 |
-
apiKey: "user_provided"
|
356 |
-
baseURL: "https://api.pawan.krd/pai-001-rp/v1"
|
357 |
-
models:
|
358 |
-
default: [
|
359 |
-
"pai-001-rp"
|
360 |
-
]
|
361 |
-
fetch: false
|
362 |
-
titleConvo: true
|
363 |
-
titleModel: "pai-001-rp"
|
364 |
-
summarize: false
|
365 |
-
summaryModel: "pai-001-rp"
|
366 |
-
forcePrompt: false
|
367 |
-
modelDisplayLabel: "Pawan"
|
368 |
-
iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/Pawan.png"
|
369 |
-
|
370 |
-
# Pawan light
|
371 |
-
- name: "Pawan light"
|
372 |
-
apiKey: "user_provided"
|
373 |
-
baseURL: "https://api.pawan.krd/pai-001-light-rp/v1"
|
374 |
-
models:
|
375 |
-
default: [
|
376 |
-
"pai-001-light-rp"
|
377 |
-
]
|
378 |
-
fetch: false
|
379 |
-
titleConvo: true
|
380 |
-
titleModel: "pai-001-light-rp"
|
381 |
-
summarize: false
|
382 |
-
summaryModel: "pai-001-light-rp"
|
383 |
-
forcePrompt: false
|
384 |
-
modelDisplayLabel: "Pawan light"
|
385 |
-
iconURL: "https://raw.githubusercontent.com/fuegovic/lc-config-yaml/main/icons/Pawan.png"
|
386 |
|
387 |
# See the Custom Configuration Guide for more information:
|
388 |
-
# https://docs.librechat.ai/install/configuration/custom_config.html
|
389 |
-
# Notes:
|
390 |
-
## Legit Offering:
|
391 |
-
### Mistral
|
392 |
-
#### https://mistral.ai/
|
393 |
-
### OpenRouter
|
394 |
-
#### https://openrouter.ai/
|
395 |
-
### TogetherAI
|
396 |
-
#### https://www.together.ai/
|
397 |
-
#### lots of open source models
|
398 |
-
#### $25 credit
|
399 |
-
### Pawan
|
400 |
-
#### https://discord.gg/pawan
|
401 |
-
#### Custom models (good for roleplay?)
|
402 |
-
#### Get API key on discord with the command `/key`
|
403 |
-
## Grey Area:
|
404 |
-
### NagaAI
|
405 |
-
#### https://discord.naga.ac
|
406 |
-
#### Get API key on discord with the command `/key get`
|
407 |
-
## Mandrill
|
408 |
-
#### https://discord.mandrillai.tech
|
409 |
-
#### Get API key on discord with the command `/key get`
|
410 |
-
## FreeGPT-4
|
411 |
-
#### https://discord.com/invite/gpt4
|
412 |
-
#### Get API key on discord with the command `/key`
|
413 |
-
## ConvoAI
|
414 |
-
#### https://discord.gg/taH8UnARwd
|
415 |
-
#### Get API key on discord with the command `/key`
|
416 |
-
## Zukijourney
|
417 |
-
#### https://discord.gg/zukijourney
|
418 |
-
#### Get API key on discord with the command `/key`
|
|
|
1 |
+
# Configuration version (required)
|
2 |
version: 1.0.2
|
3 |
|
4 |
+
# Cache settings: Set to true to enable caching
|
5 |
+
cache: true
|
6 |
|
7 |
+
# Example Registration Object Structure (optional)
|
8 |
registration:
|
9 |
+
socialLogins: ["github", "google", "discord", "openid", "facebook"]
|
10 |
+
# allowedDomains:
|
11 |
+
# - "gmail.com"
|
12 |
|
13 |
+
# Definition of custom endpoints
|
14 |
endpoints:
|
15 |
custom:
|
16 |
# Mistral AI API
|
17 |
+
- name: "Mistral" # Unique name for the endpoint
|
18 |
+
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
19 |
+
# recommended environment variables:
|
20 |
+
apiKey: "${MISTRAL_API_KEY}"
|
21 |
baseURL: "https://api.mistral.ai/v1"
|
22 |
+
|
23 |
+
# Models configuration
|
24 |
models:
|
25 |
+
# List of default models to use. At least one value is required.
|
26 |
+
default: ["mistral-tiny", "mistral-small", "mistral-medium"]
|
27 |
+
# Fetch option: Set to true to fetch models from API.
|
28 |
+
fetch: true # Defaults to false.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
29 |
|
30 |
+
# Optional configurations
|
31 |
+
|
32 |
+
# Title Conversation setting
|
33 |
+
titleConvo: true # Set to true to enable title conversation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
|
35 |
+
# Title Method: Choose between "completion" or "functions".
|
36 |
+
titleMethod: "completion" # Defaults to "completion" if omitted.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
# Title Model: Specify the model to use for titles.
|
39 |
+
titleModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
40 |
|
41 |
+
# Summarize setting: Set to true to enable summarization.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
42 |
summarize: false
|
|
|
|
|
|
|
|
|
43 |
|
44 |
+
# Summary Model: Specify the model to use if summarization is enabled.
|
45 |
+
summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
|
47 |
+
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
48 |
forcePrompt: false
|
|
|
|
|
49 |
|
50 |
+
# The label displayed for the AI model in messages.
|
51 |
+
modelDisplayLabel: "Mistral" # Default is "AI" when not set.
|
52 |
+
|
53 |
+
# Add additional parameters to the request. Default params will be overwritten.
|
54 |
+
addParams:
|
55 |
+
safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
|
56 |
+
|
57 |
+
# Drop Default params parameters from the request. See default params in guide linked below.
|
58 |
+
# NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
|
59 |
+
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
|
60 |
+
|
61 |
+
# OpenRouter.ai Example
|
62 |
+
- name: "OpenRouter"
|
63 |
+
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
64 |
+
# recommended environment variables:
|
65 |
+
# Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
|
66 |
+
apiKey: "${OPENROUTER_KEY}"
|
67 |
+
baseURL: "https://openrouter.ai/api/v1"
|
68 |
models:
|
69 |
+
default: ["gpt-3.5-turbo"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
fetch: true
|
71 |
titleConvo: true
|
72 |
titleModel: "gpt-3.5-turbo"
|
73 |
summarize: false
|
74 |
summaryModel: "gpt-3.5-turbo"
|
75 |
forcePrompt: false
|
76 |
+
modelDisplayLabel: "OpenRouter"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
# See the Custom Configuration Guide for more information:
|
79 |
+
# https://docs.librechat.ai/install/configuration/custom_config.html
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|