Spaces:
Sleeping
Sleeping
File size: 1,372 Bytes
20f9f0c bf11484 6817ac7 20f9f0c bf11484 1633ccf bf11484 1633ccf bf11484 1633ccf 20f9f0c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
# from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline,BitsAndBytesConfig
# import accelerate
# import bitsandbytes
from langchain_core.prompts import PromptTemplate
# model_id = "mistralai/Mistral-7B-Instruct-v0.2"
from langchain_community.llms.huggingface_pipeline import HuggingFacePipeline
hf = HuggingFacePipeline.from_model_id(
model_id="facebook/bart-large-cnn",
task="text-generation",
pipeline_kwargs={"max_new_tokens": 10000}
)
# tokenizer = AutoTokenizer.from_pretrained(model_id,quantization_config=quants)
# model = AutoModelForCausalLM.from_pretrained(model_id,quantization_config=quants)
# pipe = pipeline("text-generation", model=model, tokenizer=tokenizer,max_new_tokens=1000)
# hf = HuggingFacePipeline(pipeline=pipe)
def generate_blog(role , words , topic):
template = ''' You are an expert Blog generator , Given the Topic , the intended audience and the maximum number of words ,
Write a blog on the given topic
Topic : {topic}
Intended Audince : {role}
Number of Words : {words}
Strictly return the output in a markdown format'''
prompt = PromptTemplate.from_template(template)
chain = prompt | hf
return chain.invoke({"topic": topic,"words":words,"role":role}) |