how to use?

#3
by chengyiming - opened
    model = AutoModelForCausalLM.from_pretrained(
        repo_id, 
        device_map="cuda", 
        torch_dtype="auto", 
    )
    tokenizer = AutoTokenizer.from_pretrained(repo_id)

    outputs = transformers.pipeline(
        task='text-generation',
        model=model, 
        tokenizer=tokenizer,                
        max_new_tokens=max_new_tokens,
        do_sample=True,
        temperature=temperature,
        top_p=top_p,
        top_k=top_k,
        return_full_text=True,
        )
    outputs = outputs(generate_prompt(system_content,user_content))

def generate_prompt(system_content,user_content):
prompt = ''
# sys_prompt = "Du bist ein freundlicher und hilfsbereiter KI-Assistent. Du beantwortest Fragen faktenorientiert und präzise, ohne dabei relevante Fakten auszulassen."

prompt += f"<|system|>\n{system_content.strip()}</s>\n"
prompt += f"<|user|>\n{user_content.strip()}</s>\n"
prompt += f"<|assistant|>\n"

return prompt.strip()

result:[{'generated_text': '<|system|>\nYou are an assistant that provides complete tags based on partial tags. When given a partial tag, provide a list of related tags.\n<|user|>\npartial tag:1girl,answer me in English\n<|assistant|>'}]
Prompt executed in 4.41 seconds
how to use
this model?

Your need to confirm your account before you can post a new comment.

Sign up or log in to comment