Hotel-Booking-Assistant / chatbot_bedrock.py
KvrParaskevi's picture
Upload 2 files
189a7a7 verified
raw
history blame
1.66 kB
import os
from langchain import PromptTemplate, HuggingFaceHub, LLMChain
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationChain
import langchain.globals
from transformers import AutoModelForCausalLM, AutoTokenizer
def get_Model(hugging_face_key):
tokenizer = AutoTokenizer.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b",use_auth_token=hugging_face_key)
model = AutoModelForCausalLM.from_pretrained("KvrParaskevi/Hotel-Assistant-Attempt4-Llama-2-7b",use_auth_token=hugging_face_key).eval()
return model
#Write function to connect to Bedrock
# def demo_chatbot():
# # client = boto3.client('bedrock-runtime')
# template = """Question: {question}
# Answer: Let's think step by step."""
# prompt = PromptTemplate(template=template, input_variables=["question"])
# llm=HuggingFaceHub(repo_id="google/flan-t5-xl", model_kwargs={"temperature":1e-10})
# question = "When was Google founded?"
# print(llm_chain.run(question))
# return demo_llm
#test out the code with the Predicgt method
#return demo_llm.predict(input)
# = demo_chatbot('What is the temperature in Nuremberg today?')
#print(response)
def demo_miny_memory(model):
# llm_data = get_Model(hugging_face_key)
memory = ConversationBufferMemory(llm = model,max_token_limit = 512)
return memory
def demo_chain(input_text, memory,model):
# llm_data = get_Model(hugging_face_key)
llm_conversation = ConversationChain(llm=model,memory=memory,verbose=langchain.globals.get_verbose())
chat_reply = llm_conversation.predict(input=input_text)
return chat_reply