|
_id: hugging_face |
|
title: Hugging Face |
|
description: "" |
|
version: 1 |
|
readme: "" |
|
url: https://huggingface.co/PiperMy/Node-Packages/resolve/main/hugging_face.yaml |
|
author: Anton Breslavskii | https://github.com/breslavsky |
|
nodes: |
|
pulid_flux: |
|
_id: pulid_flux |
|
arrange: |
|
x: 140 |
|
y: 60 |
|
category: |
|
id: deep_swap |
|
title: en=Deep swap;ru=Глубокая замена |
|
environment: |
|
HF_TOKEN: |
|
title: Hugging Face Token |
|
type: string |
|
scope: global |
|
inputs: |
|
prompt: |
|
order: 1 |
|
title: en=Prompt;ru=Подсказка |
|
type: string |
|
required: true |
|
default: portrait, color, cinematic |
|
person: |
|
title: en=Person;ru=Фото человека |
|
type: image |
|
required: true |
|
imageSize: |
|
title: Image size |
|
type: string |
|
default: 1024x1024 |
|
enum: |
|
- 512x512 |
|
- 1024x1024 |
|
outputs: |
|
image: |
|
title: "" |
|
type: image |
|
package: hugging_face |
|
script: |- |
|
export async function run({ inputs }) { |
|
|
|
const { FatalError, NextNode } = DEFINITIONS; |
|
|
|
const HF_TOKEN = env?.variables?.get('HF_TOKEN'); |
|
if(!HF_TOKEN) { |
|
throw new FatalError('Please, set your API key Hugging Face'); |
|
} |
|
|
|
const { Client } = await import("@gradio/client/dist/index.js"); |
|
const { person, prompt, imageSize } = inputs; |
|
const [width, height] = (() => { |
|
const [width, height] = imageSize.split('x'); |
|
return [parseInt(width), parseInt(height)]; |
|
})(); |
|
|
|
const { data: id_image } = await download(person); |
|
const client = await Client.connect("PiperMy/PuLID-FLUX", { |
|
hf_token: HF_TOKEN |
|
}); |
|
|
|
console.log('Send request'); |
|
try { |
|
const { data } = await client.predict("/generate_image", { |
|
prompt, |
|
id_image, |
|
start_step: 0, |
|
guidance: 2, |
|
seed: -1, |
|
true_cfg: 1, |
|
width, |
|
height, |
|
num_steps: 25, |
|
id_weight: 1, |
|
neg_prompt: "artefacts, bad face", |
|
timestep_to_start_cfg: 0, |
|
max_sequence_length: 128, |
|
}); |
|
const [{ url }] = data; |
|
const { data: image } = await download(url); |
|
return NextNode.from({ outputs: { image } }); |
|
} catch (e) { |
|
throw new FatalError(e.message); |
|
} |
|
|
|
} |
|
source: catalog |
|
title: PuLID Flux |
|
version: 1 |
|
deepseek_r1_8b_six: |
|
_id: deepseek_r1_8b_six |
|
arrange: |
|
x: 210 |
|
y: 310 |
|
category: |
|
id: llm_agents |
|
title: en=Language Agents;ru=Языковые агенты |
|
environment: |
|
HF_TOKEN: |
|
title: Hugging Face Token |
|
type: string |
|
scope: pipeline |
|
inputs: |
|
question: |
|
order: 1 |
|
title: en=Question;ru=Вопрос |
|
type: string |
|
required: true |
|
multiline: true |
|
default: What time is it now? Only JSON ready to parse. |
|
answerFormat: |
|
order: 2 |
|
title: en=Answer format;ru=Формат ответа |
|
description: Don't forget add instructions for LLM |
|
type: string |
|
required: true |
|
default: text |
|
enum: |
|
- text|Text |
|
- json|JSON |
|
outputs: |
|
answer: |
|
title: en=Answer;ru=Ответ |
|
type: string |
|
json: |
|
title: JSON |
|
type: json |
|
package: hugging_face |
|
script: | |
|
export async function run({ inputs }) { |
|
|
|
const { FatalError, NextNode } = DEFINITIONS; |
|
const OpenAI = require('openai'); |
|
|
|
const HF_TOKEN = env?.variables?.get('HF_TOKEN'); |
|
if (!HF_TOKEN) { |
|
throw new FatalError('Please, set your API key for HF'); |
|
} |
|
|
|
const { question, model, answerFormat } = inputs; |
|
|
|
const openai = new OpenAI({ |
|
baseURL: 'https://jq20v0lfcxycc1z3.us-east-1.aws.endpoints.huggingface.cloud/v1/', |
|
apiKey: HF_TOKEN, |
|
}); |
|
|
|
const result = await openai.chat.completions.create({ |
|
model: 'lmstudio-community/DeepSeek-R1-Distill-Llama-8B-GGUF', |
|
messages: [ |
|
{ "role": "user", "content": question }, |
|
], |
|
}); |
|
|
|
const { content: answer } = result.choices[0].message; |
|
|
|
switch (answerFormat) { |
|
case 'text': |
|
return NextNode.from({ outputs: { answer } }); |
|
case 'json': |
|
try { |
|
const json = answer.replace(/^\`\`\`json\s*/ig, '').replace(/\`\`\`\s*$/ig, ''); |
|
return NextNode.from({ outputs: { json: JSON.parse(json) } }); |
|
} catch (e) { |
|
console.log(e); |
|
message(`Wrong JSON for question \`\`\`text\n${question}\n\`\`\`\nnanswer from LLM\n\`\`\`text${answer}\n\`\`\``, 'defect'); |
|
throw new FatalError("Can't parse JSON asnwer from LLM"); |
|
} |
|
default: |
|
throw new FatalError(`Wrong answer format ${answerFormat}`); |
|
} |
|
} |
|
source: catalog |
|
title: DeepSeek R1 Distill Llama 8B Q8 |
|
version: 1 |
|
|