Update gpt4free.yaml
Browse files- gpt4free.yaml +124 -16
gpt4free.yaml
CHANGED
@@ -4,13 +4,13 @@ description: Free generative AI service providers
|
|
4 |
readme: Added ask LLM agent
|
5 |
title: GPT for free
|
6 |
url: https://huggingface.co/PiperMy/Node-Packages/resolve/main/gpt4free.yaml
|
7 |
-
version:
|
8 |
nodes:
|
9 |
ask_llm_agent_gpt4free:
|
10 |
_id: ask_llm_agent_gpt4free
|
11 |
arrange:
|
12 |
-
x:
|
13 |
-
y:
|
14 |
category:
|
15 |
_id: llm_agents
|
16 |
title: en=Language Agents;ru=Языковые агенты
|
@@ -28,7 +28,102 @@ nodes:
|
|
28 |
type: string
|
29 |
required: true
|
30 |
multiline: true
|
31 |
-
default:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
outputs:
|
33 |
answer:
|
34 |
title: en=Answer;ru=Ответ
|
@@ -44,13 +139,14 @@ nodes:
|
|
44 |
export async function run({ inputs }) {
|
45 |
|
46 |
const { NextNode } = DEFINITIONS;
|
47 |
-
const { instructions, question } = inputs;
|
48 |
|
49 |
const { data } = await httpClient({
|
50 |
method: 'post',
|
51 |
url: `http://${NODE_ENV === 'test' ? '0.0.0.0:8080' : 'gpt4free:1337'}/v1/chat/completions`,
|
52 |
timeout: 60000,
|
53 |
data: {
|
|
|
54 |
stream: false,
|
55 |
messages: [
|
56 |
...(!!instructions ? [{
|
@@ -70,8 +166,8 @@ nodes:
|
|
70 |
}
|
71 |
});
|
72 |
|
73 |
-
const { model, provider, choices: [{ message: { content: answer } }] } = data;
|
74 |
-
return NextNode.from({ outputs: {
|
75 |
}
|
76 |
source: catalog
|
77 |
title: en=Ask LLM agent for free;ru=Спросить LLM агента бесп.
|
@@ -96,13 +192,18 @@ nodes:
|
|
96 |
default: superhero game card
|
97 |
model:
|
98 |
order: 2
|
99 |
-
title: Model
|
100 |
type: string
|
101 |
-
default: flux
|
102 |
enum:
|
103 |
-
-
|
104 |
-
-
|
105 |
-
- flux
|
|
|
|
|
|
|
|
|
|
|
106 |
outputs:
|
107 |
image:
|
108 |
title: en=Image;ru=Изображение
|
@@ -110,27 +211,34 @@ nodes:
|
|
110 |
provider:
|
111 |
title: en=Provider;ru=Провайдер
|
112 |
type: string
|
|
|
|
|
|
|
113 |
package: gpt4free
|
114 |
script: |
|
|
|
|
|
115 |
export async function run({ inputs }) {
|
116 |
|
117 |
const { NextNode } = DEFINITIONS;
|
118 |
-
const {
|
119 |
|
120 |
-
const { data
|
121 |
method: 'post',
|
122 |
url: `http://${NODE_ENV === 'test' ? '0.0.0.0:8080' : 'gpt4free:1337'}/v1/images/generate`,
|
123 |
timeout: 60000,
|
124 |
data: {
|
|
|
125 |
prompt: prompt || 'superhero game card',
|
126 |
-
model: model || 'flux',
|
127 |
response_format: 'b64_json'
|
128 |
},
|
129 |
headers: {
|
130 |
'Content-Type': 'application/json',
|
131 |
}
|
132 |
});
|
133 |
-
|
|
|
|
|
134 |
}
|
135 |
source: catalog
|
136 |
title: en=Generate image for free;ru=Генерация изобр. бесп.
|
|
|
4 |
readme: Added ask LLM agent
|
5 |
title: GPT for free
|
6 |
url: https://huggingface.co/PiperMy/Node-Packages/resolve/main/gpt4free.yaml
|
7 |
+
version: 11
|
8 |
nodes:
|
9 |
ask_llm_agent_gpt4free:
|
10 |
_id: ask_llm_agent_gpt4free
|
11 |
arrange:
|
12 |
+
x: 480
|
13 |
+
y: 130
|
14 |
category:
|
15 |
_id: llm_agents
|
16 |
title: en=Language Agents;ru=Языковые агенты
|
|
|
28 |
type: string
|
29 |
required: true
|
30 |
multiline: true
|
31 |
+
default: What is your name?
|
32 |
+
model:
|
33 |
+
order: 3
|
34 |
+
title: en=Model;ru=Модель
|
35 |
+
type: string
|
36 |
+
default: gpt-4o-mini
|
37 |
+
enum:
|
38 |
+
- gpt-3.5-turbo
|
39 |
+
- gpt-4
|
40 |
+
- gpt-4o
|
41 |
+
- gpt-4o-mini
|
42 |
+
- o1
|
43 |
+
- o1-mini
|
44 |
+
- o3-mini
|
45 |
+
- gigachat
|
46 |
+
- meta-ai
|
47 |
+
- llama-2-7b
|
48 |
+
- llama-3-8b
|
49 |
+
- llama-3-70b
|
50 |
+
- llama-3.1-8b
|
51 |
+
- llama-3.1-70b
|
52 |
+
- llama-3.1-405b
|
53 |
+
- llama-3.2-1b
|
54 |
+
- llama-3.2-3b
|
55 |
+
- llama-3.2-11b
|
56 |
+
- llama-3.2-90b
|
57 |
+
- llama-3.3-70b
|
58 |
+
- mixtral-8x7b
|
59 |
+
- mixtral-8x22b
|
60 |
+
- mistral-nemo
|
61 |
+
- mixtral-small-24b
|
62 |
+
- hermes-3
|
63 |
+
- phi-3.5-mini
|
64 |
+
- phi-4
|
65 |
+
- wizardlm-2-7b
|
66 |
+
- wizardlm-2-8x22b
|
67 |
+
- gemini-exp
|
68 |
+
- gemini-1.5-flash
|
69 |
+
- gemini-1.5-pro
|
70 |
+
- gemini-2.0
|
71 |
+
- gemini-2.0-flash
|
72 |
+
- gemini-2.0-flash-thinking
|
73 |
+
- gemini-2.0-pro
|
74 |
+
- claude-3-haiku
|
75 |
+
- claude-3-sonnet
|
76 |
+
- claude-3-opus
|
77 |
+
- claude-3.5-sonnet
|
78 |
+
- claude-3.7-sonnet
|
79 |
+
- claude-3.7-sonnet-thinking
|
80 |
+
- reka-core
|
81 |
+
- blackboxai
|
82 |
+
- blackboxai-pro
|
83 |
+
- command-r
|
84 |
+
- command-r-plus
|
85 |
+
- command-r7b
|
86 |
+
- command-a
|
87 |
+
- qwen-1.5-7b
|
88 |
+
- qwen-2-72b
|
89 |
+
- qwen-2-vl-7b
|
90 |
+
- qwen-2.5-72b
|
91 |
+
- qwen-2.5-coder-32b
|
92 |
+
- qwen-2.5-1m
|
93 |
+
- qwen-2-5-max
|
94 |
+
- qwq-32b
|
95 |
+
- qvq-72b
|
96 |
+
- pi
|
97 |
+
- deepseek-chat
|
98 |
+
- deepseek-v3
|
99 |
+
- deepseek-r1
|
100 |
+
- janus-pro-7b
|
101 |
+
- grok-3
|
102 |
+
- grok-3-r1
|
103 |
+
- sonar
|
104 |
+
- sonar-pro
|
105 |
+
- sonar-reasoning
|
106 |
+
- sonar-reasoning-pro
|
107 |
+
- r1-1776
|
108 |
+
- nemotron-70b
|
109 |
+
- dbrx-instruct
|
110 |
+
- glm-4
|
111 |
+
- mini_max
|
112 |
+
- yi-34b
|
113 |
+
- dolphin-2.6
|
114 |
+
- dolphin-2.9
|
115 |
+
- airoboros-70b
|
116 |
+
- lzlv-70b
|
117 |
+
- minicpm-2.5
|
118 |
+
- tulu-3-1-8b
|
119 |
+
- tulu-3-70b
|
120 |
+
- tulu-3-405b
|
121 |
+
- olmo-1-7b
|
122 |
+
- olmo-2-13b
|
123 |
+
- olmo-2-32b
|
124 |
+
- olmo-4-synthetic
|
125 |
+
- lfm-40b
|
126 |
+
- evil
|
127 |
outputs:
|
128 |
answer:
|
129 |
title: en=Answer;ru=Ответ
|
|
|
139 |
export async function run({ inputs }) {
|
140 |
|
141 |
const { NextNode } = DEFINITIONS;
|
142 |
+
const { model, instructions, question } = inputs;
|
143 |
|
144 |
const { data } = await httpClient({
|
145 |
method: 'post',
|
146 |
url: `http://${NODE_ENV === 'test' ? '0.0.0.0:8080' : 'gpt4free:1337'}/v1/chat/completions`,
|
147 |
timeout: 60000,
|
148 |
data: {
|
149 |
+
model: model || 'gpt-4o-mini',
|
150 |
stream: false,
|
151 |
messages: [
|
152 |
...(!!instructions ? [{
|
|
|
166 |
}
|
167 |
});
|
168 |
|
169 |
+
const { model: used, provider, choices: [{ message: { content: answer } }] } = data;
|
170 |
+
return NextNode.from({ outputs: { model: used, provider, answer } });
|
171 |
}
|
172 |
source: catalog
|
173 |
title: en=Ask LLM agent for free;ru=Спросить LLM агента бесп.
|
|
|
192 |
default: superhero game card
|
193 |
model:
|
194 |
order: 2
|
195 |
+
title: en=Model;ru=Модель
|
196 |
type: string
|
197 |
+
default: flux-dev
|
198 |
enum:
|
199 |
+
- sdxl-turbo
|
200 |
+
- sd-3.5
|
201 |
+
- flux
|
202 |
+
- flux-pro
|
203 |
+
- flux-dev
|
204 |
+
- flux-schnell
|
205 |
+
- dall-e-3
|
206 |
+
- midjourney
|
207 |
outputs:
|
208 |
image:
|
209 |
title: en=Image;ru=Изображение
|
|
|
211 |
provider:
|
212 |
title: en=Provider;ru=Провайдер
|
213 |
type: string
|
214 |
+
model:
|
215 |
+
title: en=Model;ru=Модель
|
216 |
+
type: string
|
217 |
package: gpt4free
|
218 |
script: |
|
219 |
+
// https://github.com/nomic-ai/gpt4all
|
220 |
+
|
221 |
export async function run({ inputs }) {
|
222 |
|
223 |
const { NextNode } = DEFINITIONS;
|
224 |
+
const { model, prompt } = inputs;
|
225 |
|
226 |
+
const { data } = await httpClient({
|
227 |
method: 'post',
|
228 |
url: `http://${NODE_ENV === 'test' ? '0.0.0.0:8080' : 'gpt4free:1337'}/v1/images/generate`,
|
229 |
timeout: 60000,
|
230 |
data: {
|
231 |
+
model: model || 'flux-dev',
|
232 |
prompt: prompt || 'superhero game card',
|
|
|
233 |
response_format: 'b64_json'
|
234 |
},
|
235 |
headers: {
|
236 |
'Content-Type': 'application/json',
|
237 |
}
|
238 |
});
|
239 |
+
|
240 |
+
const { model: used, provider, data: [{ b64_json }] } = data;
|
241 |
+
return NextNode.from({ outputs: { model: used, provider, image: Buffer.from(b64_json, 'base64') } });
|
242 |
}
|
243 |
source: catalog
|
244 |
title: en=Generate image for free;ru=Генерация изобр. бесп.
|