code
stringlengths 141
78.9k
| apis
sequencelengths 1
23
| extract_api
stringlengths 142
73.2k
|
---|---|---|
"""Base interface for large language models to expose."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import yaml
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
from langchain.schema import Generation, LLMResult
def _get_verbosity() -> bool:
return langchain.verbose
def get_prompts(
params: Dict[str, Any], prompts: List[str]
) -> Tuple[Dict[int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
for i, prompt in enumerate(prompts):
if langchain.llm_cache is not None:
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
def update_cache(
existing_prompts: Dict[int, List],
llm_string: str,
missing_prompt_idxs: List[int],
new_results: LLMResult,
prompts: List[str],
) -> Optional[dict]:
"""Update the cache and get the LLM output."""
for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]]
if langchain.llm_cache is not None:
langchain.llm_cache.update(prompt, llm_string, result)
llm_output = new_results.llm_output
return llm_output
class BaseLLM(BaseModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@validator("callback_manager", pre=True, always=True)
def set_callback_manager(
cls, callback_manager: Optional[BaseCallbackManager]
) -> BaseCallbackManager:
"""If callback manager is None, set it.
This allows users to pass in None as callback manager, which is a nice UX.
"""
return callback_manager or get_callback_manager()
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@abstractmethod
def _generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompts."""
@abstractmethod
async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompts."""
def generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# If string is passed in directly no errors will be raised but outputs will
# not make sense.
if not isinstance(prompts, list):
raise ValueError(
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
try:
output = self._generate(prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_llm_end(output, verbose=self.verbose)
return output
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
if len(missing_prompts) > 0:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
)
try:
new_results = self._generate(missing_prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
else:
llm_output = {}
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output)
async def agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if self.callback_manager.is_async:
await self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
else:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
try:
output = await self._agenerate(prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_error(e, verbose=self.verbose)
else:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_llm_end(output, verbose=self.verbose)
else:
self.callback_manager.on_llm_end(output, verbose=self.verbose)
return output
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
if len(missing_prompts) > 0:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
verbose=self.verbose,
)
else:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
verbose=self.verbose,
)
try:
new_results = await self._agenerate(missing_prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_error(e, verbose=self.verbose)
else:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_llm_end(
new_results, verbose=self.verbose
)
else:
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
else:
llm_output = {}
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output)
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text."""
# TODO: this method may not be exact.
# TODO: this method may differ based on model (eg codex).
try:
from transformers import GPT2TokenizerFast
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"This is needed in order to calculate get_num_tokens. "
"Please it install it with `pip install transformers`."
)
# create a GPT-3 tokenizer instance
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
# tokenize the text using the GPT-3 tokenizer
tokenized_text = tokenizer.tokenize(text)
# calculate the number of tokens in the tokenized text
return len(tokenized_text)
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
return self.generate([prompt], stop=stop).generations[0][0].text
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def __str__(self) -> str:
"""Get a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
class LLM(BaseLLM):
"""LLM class that expect subclasses to implement a simpler call method.
The purpose of this class is to expose a simpler interface for working
with LLMs, rather than expect the user to implement the full _generate method.
"""
@abstractmethod
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Run the LLM on the given prompt and input."""
def _generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
generations = []
for prompt in prompts:
text = self._call(prompt, stop=stop)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
raise NotImplementedError("Async generation not implemented for this LLM.")
| [
"langchain.schema.Generation",
"langchain.llm_cache.update",
"langchain.llm_cache.lookup",
"langchain.schema.LLMResult",
"langchain.callbacks.get_callback_manager"
] | [((1991, 2028), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1996, 2028), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2119, 2162), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager'}), '(default_factory=get_callback_manager)\n', (2124, 2162), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2311, 2363), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (2320, 2363), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2693, 2736), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2702, 2736), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((5769, 5826), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (5778, 5826), False, 'from langchain.schema import Generation, LLMResult\n'), ((9134, 9191), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (9143, 9191), False, 'from langchain.schema import Generation, LLMResult\n'), ((9826, 9867), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9859, 9867), False, 'from transformers import GPT2TokenizerFast\n'), ((12744, 12778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (12753, 12778), False, 'from langchain.schema import Generation, LLMResult\n'), ((932, 978), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (958, 978), False, 'import langchain\n'), ((1720, 1774), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (1746, 1774), False, 'import langchain\n'), ((2664, 2686), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2684, 2686), False, 'from langchain.callbacks import get_callback_manager\n'), ((11346, 11361), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11350, 11361), False, 'from pathlib import Path\n'), ((11682, 11717), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (11691, 11717), False, 'import json\n'), ((11820, 11871), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (11829, 11871), False, 'import yaml\n'), ((12705, 12726), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (12715, 12726), False, 'from langchain.schema import Generation, LLMResult\n')] |
import streamlit as st
import langchain
from langchain.utilities import SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.chat_models import ChatOpenAI
from langsmith import Client
from langchain.smith import RunEvalConfig, run_on_dataset
from pydantic import BaseModel, Field
db = SQLDatabase.from_uri("sqlite:///Chinook.db")
llm = ChatOpenAI(temperature=0)
db_chain = SQLDatabaseChain.from_llm(llm, db, return_intermediate_steps=True)
from langsmith import Client
client = Client()
def send_feedback(run_id, score):
client.create_feedback(run_id, "user_score", score=score)
st.set_page_config(page_title='🦜🔗 Ask the SQL DB App')
st.title('🦜🔗 Ask the SQL DB App')
st.info("Most 'question answering' applications run over unstructured text data. But a lot of the data in the world is tabular data! This is an attempt to create an application using [LangChain](https://github.com/langchain-ai/langchain) to let you ask questions of data in tabular format. For this demo application, we will use the Chinook dataset in a SQL database. Please explore the schema [here](https://www.sqlitetutorial.net/wp-content/uploads/2015/11/sqlite-sample-database-color.jpg) to get a sense for what questions you can ask. Please leave feedback on well the question is answered, and we will use that improve the application!")
query_text = st.text_input('Enter your question:', placeholder = 'Ask something like "How many artists are there?" or "Which artist has the most albums"')
# Form input and query
result = None
with st.form('myform', clear_on_submit=True):
submitted = st.form_submit_button('Submit')
if submitted:
with st.spinner('Calculating...'):
inputs = {"query": query_text}
response = db_chain(inputs, include_run_info=True)
result = response["result"]
sql_command = response["intermediate_steps"][1]
sql_result = response["intermediate_steps"][3]
run_id = response["__run"].run_id
if result is not None:
st.info(result)
st.code(sql_command)
st.code(sql_result)
col_blank, col_text, col1, col2 = st.columns([10, 2,1,1])
with col_text:
st.text("Feedback:")
with col1:
st.button("👍", on_click=send_feedback, args=(run_id, 1))
with col2:
st.button("👎", on_click=send_feedback, args=(run_id, 0))
| [
"langchain_experimental.sql.SQLDatabaseChain.from_llm",
"langchain.utilities.SQLDatabase.from_uri",
"langchain.chat_models.ChatOpenAI"
] | [((316, 360), 'langchain.utilities.SQLDatabase.from_uri', 'SQLDatabase.from_uri', (['"""sqlite:///Chinook.db"""'], {}), "('sqlite:///Chinook.db')\n", (336, 360), False, 'from langchain.utilities import SQLDatabase\n'), ((367, 392), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (377, 392), False, 'from langchain.chat_models import ChatOpenAI\n'), ((404, 470), 'langchain_experimental.sql.SQLDatabaseChain.from_llm', 'SQLDatabaseChain.from_llm', (['llm', 'db'], {'return_intermediate_steps': '(True)'}), '(llm, db, return_intermediate_steps=True)\n', (429, 470), False, 'from langchain_experimental.sql import SQLDatabaseChain\n'), ((510, 518), 'langsmith.Client', 'Client', ([], {}), '()\n', (516, 518), False, 'from langsmith import Client\n'), ((616, 670), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""🦜🔗 Ask the SQL DB App"""'}), "(page_title='🦜🔗 Ask the SQL DB App')\n", (634, 670), True, 'import streamlit as st\n'), ((671, 704), 'streamlit.title', 'st.title', (['"""🦜🔗 Ask the SQL DB App"""'], {}), "('🦜🔗 Ask the SQL DB App')\n", (679, 704), True, 'import streamlit as st\n'), ((705, 1358), 'streamlit.info', 'st.info', (['"""Most \'question answering\' applications run over unstructured text data. But a lot of the data in the world is tabular data! This is an attempt to create an application using [LangChain](https://github.com/langchain-ai/langchain) to let you ask questions of data in tabular format. For this demo application, we will use the Chinook dataset in a SQL database. Please explore the schema [here](https://www.sqlitetutorial.net/wp-content/uploads/2015/11/sqlite-sample-database-color.jpg) to get a sense for what questions you can ask. Please leave feedback on well the question is answered, and we will use that improve the application!"""'], {}), '(\n "Most \'question answering\' applications run over unstructured text data. But a lot of the data in the world is tabular data! This is an attempt to create an application using [LangChain](https://github.com/langchain-ai/langchain) to let you ask questions of data in tabular format. For this demo application, we will use the Chinook dataset in a SQL database. Please explore the schema [here](https://www.sqlitetutorial.net/wp-content/uploads/2015/11/sqlite-sample-database-color.jpg) to get a sense for what questions you can ask. Please leave feedback on well the question is answered, and we will use that improve the application!"\n )\n', (712, 1358), True, 'import streamlit as st\n'), ((1363, 1512), 'streamlit.text_input', 'st.text_input', (['"""Enter your question:"""'], {'placeholder': '"""Ask something like "How many artists are there?" or "Which artist has the most albums\\""""'}), '(\'Enter your question:\', placeholder=\n \'Ask something like "How many artists are there?" or "Which artist has the most albums"\'\n )\n', (1376, 1512), True, 'import streamlit as st\n'), ((1547, 1586), 'streamlit.form', 'st.form', (['"""myform"""'], {'clear_on_submit': '(True)'}), "('myform', clear_on_submit=True)\n", (1554, 1586), True, 'import streamlit as st\n'), ((1601, 1632), 'streamlit.form_submit_button', 'st.form_submit_button', (['"""Submit"""'], {}), "('Submit')\n", (1622, 1632), True, 'import streamlit as st\n'), ((1966, 1981), 'streamlit.info', 'st.info', (['result'], {}), '(result)\n', (1973, 1981), True, 'import streamlit as st\n'), ((1983, 2003), 'streamlit.code', 'st.code', (['sql_command'], {}), '(sql_command)\n', (1990, 2003), True, 'import streamlit as st\n'), ((2005, 2024), 'streamlit.code', 'st.code', (['sql_result'], {}), '(sql_result)\n', (2012, 2024), True, 'import streamlit as st\n'), ((2060, 2085), 'streamlit.columns', 'st.columns', (['[10, 2, 1, 1]'], {}), '([10, 2, 1, 1])\n', (2070, 2085), True, 'import streamlit as st\n'), ((2102, 2122), 'streamlit.text', 'st.text', (['"""Feedback:"""'], {}), "('Feedback:')\n", (2109, 2122), True, 'import streamlit as st\n'), ((2137, 2193), 'streamlit.button', 'st.button', (['"""👍"""'], {'on_click': 'send_feedback', 'args': '(run_id, 1)'}), "('👍', on_click=send_feedback, args=(run_id, 1))\n", (2146, 2193), True, 'import streamlit as st\n'), ((2208, 2264), 'streamlit.button', 'st.button', (['"""👎"""'], {'on_click': 'send_feedback', 'args': '(run_id, 0)'}), "('👎', on_click=send_feedback, args=(run_id, 0))\n", (2217, 2264), True, 'import streamlit as st\n'), ((1655, 1683), 'streamlit.spinner', 'st.spinner', (['"""Calculating..."""'], {}), "('Calculating...')\n", (1665, 1683), True, 'import streamlit as st\n')] |
"""
Utilities for ingesting different types of documents.
This includes cutting text into chunks and cleaning text.
"""
import re
from typing import Callable, Dict, List, Tuple
import langchain.docstore.document as docstore
import langchain.text_splitter as splitter
from loguru import logger
class IngestUtils:
"""
Utils for ingesting different types of documents.
This includes cutting text into chunks and cleaning text.
"""
def __init__(self, chunk_size: int, chunk_overlap: int, file_no: int, text_splitter_method: str):
self.chunk_size = chunk_size
self.chunk_overlap = chunk_overlap
self.file_no = file_no
self.text_splitter_method = text_splitter_method
def merge_hyphenated_words(self, text: str) -> str:
"""
Merge words in the text that have been split with a hyphen.
"""
return re.sub(r"(\w)-\n(\w)", r"\1\2", text)
def fix_newlines(self, text: str) -> str:
"""
Replace single newline characters in the text with spaces.
"""
return re.sub(r"(?<!\n)\n(?!\n)", " ", text)
def remove_multiple_newlines(self, text: str) -> str:
"""
Reduce multiple newline characters in the text to a single newline.
"""
return re.sub(r"\n{2,}", "\n", text)
def clean_texts(self,
texts: List[Tuple[int, str]],
cleaning_functions: List[Callable[[str], str]]
) -> List[Tuple[int, str]]:
"""
Apply the cleaning functions to the text of each page.
"""
logger.info("Cleaning texts")
cleaned_texts = []
for page_num, text in texts:
for cleaning_function in cleaning_functions:
text = cleaning_function(text)
cleaned_texts.append((page_num, text))
return cleaned_texts
# def split_text_into_chunks(self,
# text: Tuple[int, str],
# metadata: Dict[str, str]):
# """
# Split the text into chunks
# """
# text_splitter = self.get_splitter()
# chunk_no = 0
# for page_num, page in texts:
# logger.info(f"Splitting page {page_num}")
# chunks = text_splitter.split_text(page)
# def chunks_to_docs(self,
# chunks,
# metadata: Dict[str, str]):
# """
# Convert chunks into Documents
# """
# # initialize empty list of Documents
# docs: List[docstore.Document] = []
# # loop over chunks
# for i, chunk in enumerate(chunks):
# if self.file_no:
# metadata_combined = {
# "file_no": self.file_no,
# "chunk_no": chunk_no,
# "source": f"F{self.file_no}-{chunk_no}"
# }
# else:
# metadata_combined = {
# "page_number": page_num,
# "chunk": i,
# "source": f"p{page_num}-{i}",
# **metadata,
# }
# doc = docstore.Document(
# page_content=chunk,
# metadata=metadata_combined
# )
# docs.append(doc)
# chunk_no += 1
# return docs
def texts_to_docs(self,
texts: List[Tuple[int, str]],
metadata: Dict[str, str]) -> List[docstore.Document]:
"""
Split the text into chunks and return them as Documents.
"""
text_splitter = self.get_splitter()
docs: List[docstore.Document] = []
chunk_no = 0
for page_num, page in texts:
logger.info(f"Splitting page {page_num}")
chunks = text_splitter.split_text(page)
for i, chunk in enumerate(chunks):
if self.file_no:
metadata_combined = {
"file_no": self.file_no,
"chunk_no": chunk_no,
"source": f"F{self.file_no}-{chunk_no}"
}
else:
metadata_combined = {
"page_number": page_num,
"chunk": i,
"source": f"p{page_num}-{i}",
**metadata,
}
doc = docstore.Document(
page_content=chunk,
metadata=metadata_combined
)
docs.append(doc)
chunk_no += 1
return docs
def clean_texts_to_docs(self, raw_pages, metadata) -> List[docstore.Document]:
""""
Combines the functions clean_text and text_to_docs
"""
cleaning_functions: List = [
self.merge_hyphenated_words,
self.fix_newlines,
self.remove_multiple_newlines
]
cleaned_texts = self.clean_texts(raw_pages, cleaning_functions)
# for cleaned_text in cleaned_texts:
# cleaned_chunks = self.split_text_into_chunks(cleaned_text, metadata)
docs = self.texts_to_docs(cleaned_texts, metadata)
return docs
def get_splitter(self):
"""
Get the text splitter object
"""
if self.text_splitter_method == "NLTKTextSplitter":
text_splitter = splitter.NLTKTextSplitter(
separator="\n\n",
language="english",
chunk_size=self.chunk_size,
chunk_overlap=self.chunk_overlap
)
elif self.text_splitter_method == "RecursiveCharacterTextSplitter":
text_splitter = splitter.RecursiveCharacterTextSplitter(
chunk_size=self.chunk_size,
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_overlap=self.chunk_overlap
)
return text_splitter
| [
"langchain.text_splitter.NLTKTextSplitter",
"langchain.docstore.document.Document",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((881, 921), 're.sub', 're.sub', (['"""(\\\\w)-\\\\n(\\\\w)"""', '"""\\\\1\\\\2"""', 'text'], {}), "('(\\\\w)-\\\\n(\\\\w)', '\\\\1\\\\2', text)\n", (887, 921), False, 'import re\n'), ((1072, 1111), 're.sub', 're.sub', (['"""(?<!\\\\n)\\\\n(?!\\\\n)"""', '""" """', 'text'], {}), "('(?<!\\\\n)\\\\n(?!\\\\n)', ' ', text)\n", (1078, 1111), False, 'import re\n'), ((1284, 1313), 're.sub', 're.sub', (['"""\\\\n{2,}"""', '"""\n"""', 'text'], {}), "('\\\\n{2,}', '\\n', text)\n", (1290, 1313), False, 'import re\n'), ((1601, 1630), 'loguru.logger.info', 'logger.info', (['"""Cleaning texts"""'], {}), "('Cleaning texts')\n", (1612, 1630), False, 'from loguru import logger\n'), ((3783, 3824), 'loguru.logger.info', 'logger.info', (['f"""Splitting page {page_num}"""'], {}), "(f'Splitting page {page_num}')\n", (3794, 3824), False, 'from loguru import logger\n'), ((5456, 5586), 'langchain.text_splitter.NLTKTextSplitter', 'splitter.NLTKTextSplitter', ([], {'separator': '"""\n\n"""', 'language': '"""english"""', 'chunk_size': 'self.chunk_size', 'chunk_overlap': 'self.chunk_overlap'}), "(separator='\\n\\n', language='english', chunk_size=\n self.chunk_size, chunk_overlap=self.chunk_overlap)\n", (5481, 5586), True, 'import langchain.text_splitter as splitter\n'), ((4463, 4528), 'langchain.docstore.document.Document', 'docstore.Document', ([], {'page_content': 'chunk', 'metadata': 'metadata_combined'}), '(page_content=chunk, metadata=metadata_combined)\n', (4480, 4528), True, 'import langchain.docstore.document as docstore\n'), ((5764, 5930), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'self.chunk_size', 'separators': "['\\n\\n', '\\n', '.', '!', '?', ',', ' ', '']", 'chunk_overlap': 'self.chunk_overlap'}), "(chunk_size=self.chunk_size,\n separators=['\\n\\n', '\\n', '.', '!', '?', ',', ' ', ''], chunk_overlap=\n self.chunk_overlap)\n", (5803, 5930), True, 'import langchain.text_splitter as splitter\n')] |
"""Base interface that all chains should implement."""
from __future__ import annotations
import asyncio
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from functools import partial
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import langchain
import yaml
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainRun,
CallbackManager,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.load.dump import dumpd
from langchain.load.serializable import Serializable
from langchain.pydantic_v1 import Field, root_validator, validator
from langchain.schema import RUN_KEY, BaseMemory, RunInfo
from langchain.schema.runnable import Runnable, RunnableConfig
logger = logging.getLogger(__name__)
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(Serializable, Runnable[Dict[str, Any], Dict[str, Any]], ABC):
"""Abstract base class for creating structured sequences of calls to components.
Chains should be used to encode a sequence of calls to components like
models, document retrievers, other chains, etc., and provide a simple interface
to this sequence.
Copied from langchain v0.0.283.
The Chain interface makes it easy to create apps that are:
- Stateful: add Memory to any Chain to give it state,
- Observable: pass Callbacks to a Chain to execute additional functionality,
like logging, outside the main sequence of component calls,
- Composable: the Chain API is flexible enough that it is easy to combine
Chains with other components, including other Chains.
The main methods exposed by chains are:
- `__call__`: Chains are callable. The `__call__` method is the primary way to
execute a Chain. This takes inputs as a dictionary and returns a
dictionary output.
- `run`: A convenience method that takes inputs as args/kwargs and returns the
output as a string or object. This method can only be used for a subset of
chains and cannot return as rich of an output as `__call__`.
"""
def invoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
config = config or {}
return self(
input,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
**kwargs,
)
async def ainvoke(
self,
input: Dict[str, Any],
config: Optional[RunnableConfig] = None,
**kwargs: Any,
) -> Dict[str, Any]:
if type(self)._acall == Chain._acall:
# If the chain does not implement async, fall back to default implementation
return await asyncio.get_running_loop().run_in_executor(
None, partial(self.invoke, input, config, **kwargs)
)
config = config or {}
return await self.acall(
input,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
**kwargs,
)
memory: Optional[BaseMemory] = None
"""Optional memory object. Defaults to None.
Memory is a class that gets called at the start
and at the end of every chain. At the start, memory loads variables and passes
them along in the chain. At the end, it saves any returned variables.
There are many different types of memory - please see memory docs
for the full catalog."""
callbacks: Callbacks = Field(default=None, exclude=True)
"""Optional list of callback handlers (or callback manager). Defaults to None.
Callback handlers are called throughout the lifecycle of a call to a chain,
starting with on_chain_start, ending with on_chain_end or on_chain_error.
Each custom chain can optionally call additional callback methods, see Callback docs
for full details."""
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
"""Deprecated, use `callbacks` instead."""
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether or not run in verbose mode. In verbose mode, some intermediate logs
will be printed to the console. Defaults to `langchain.verbose` value."""
tags: Optional[List[str]] = None
"""Optional list of tags associated with the chain. Defaults to None.
These tags will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
metadata: Optional[Dict[str, Any]] = None
"""Optional metadata associated with the chain. Defaults to None.
This metadata will be associated with each call to this chain,
and passed as arguments to the handlers defined in `callbacks`.
You can use these to eg identify a specific instance of a chain with its use case.
"""
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@root_validator()
def raise_callback_manager_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
if values.get("callbacks") is not None:
raise ValueError(
"Cannot specify both callback_manager and callbacks. "
"callback_manager is deprecated, callbacks is the preferred "
"parameter to pass in."
)
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""Set the chain verbosity.
Defaults to the global setting if not specified by the user.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Keys expected to be in the chain input."""
raise NotImplementedError
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Keys expected to be in the chain output."""
raise NotImplementedError
def _validate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Check that all inputs are present."""
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
raise ValueError(f"Missing some output keys: {missing_keys}")
@abstractmethod
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.__call__`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
raise NotImplementedError
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Asynchronously execute the chain.
This is a private method that is not user-facing. It is only called within
`Chain.acall`, which is the user-facing wrapper method that handles
callbacks configuration and some input/output processing.
Args:
inputs: A dict of named inputs to the chain. Assumed to contain all inputs
specified in `Chain.input_keys`, including any inputs added by memory.
run_manager: The callbacks manager that contains the callback handlers for
this run of the chain.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
raise NotImplementedError("Async call not supported for this chain type.")
def __call__(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
run_name: Optional[str] = None,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain. Defaults to None
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
inputs = self.prep_inputs(inputs)
callback_manager = CallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
dumpd(self),
inputs,
name=run_name,
)
try:
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_chain_error(e)
raise e
run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
async def acall(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
run_name: Optional[str] = None,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Asynchronously execute the chain.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
return_only_outputs: Whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
metadata: Optional metadata associated with the chain. Defaults to None
include_run_info: Whether to include run info in the response. Defaults
to False.
Returns:
A dict of named outputs. Should contain all outputs specified in
`Chain.output_keys`.
"""
inputs = self.prep_inputs(inputs)
callback_manager = AsyncCallbackManager.configure(
callbacks,
self.callbacks,
self.verbose,
tags,
self.tags,
metadata,
self.metadata,
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
dumpd(self),
inputs,
name=run_name,
)
try:
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_chain_error(e)
raise e
await run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prepare chain outputs, and save info about this run to memory.
Args:
inputs: Dictionary of chain inputs, including any inputs added by chain
memory.
outputs: Dictionary of initial chain outputs.
return_only_outputs: Whether to only return the chain outputs. If False,
inputs are also added to the final outputs.
Returns:
A dict of the final chain outputs.
"""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prepare chain inputs, including adding inputs from memory.
Args:
inputs: Dictionary of raw inputs, or single input if chain expects
only one param. Should contain all inputs specified in
`Chain.input_keys` except for inputs that will be set by the chain's
memory.
Returns:
A dictionary of all inputs, including those added by the chain's memory.
"""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
@property
def _run_output_key(self) -> str:
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
return self.output_keys[0]
def run(
self,
*args: Any,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
chain.run("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
chain.run(question=question, context=context)
# -> "The temperature in Boise is..."
"""
# Run at start to make sure this is possible/defined
_output_key = self._run_output_key
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0], callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if kwargs and not args:
return self(kwargs, callbacks=callbacks, tags=tags, metadata=metadata)[
_output_key
]
if not kwargs and not args:
raise ValueError(
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
else:
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
async def arun(
self,
*args: Any,
callbacks: Callbacks = None,
tags: Optional[List[str]] = None,
metadata: Optional[Dict[str, Any]] = None,
**kwargs: Any,
) -> Any:
"""Convenience method for executing chain.
The main difference between this method and `Chain.__call__` is that this
method expects inputs to be passed directly in as positional arguments or
keyword arguments, whereas `Chain.__call__` expects a single input dictionary
with all the inputs
Args:
*args: If the chain expects a single input, it can be passed in as the
sole positional argument.
callbacks: Callbacks to use for this chain run. These will be called in
addition to callbacks passed to the chain during construction, but only
these runtime callbacks will propagate to calls to other objects.
tags: List of string tags to pass to all callbacks. These will be passed in
addition to tags passed to the chain during construction, but only
these runtime tags will propagate to calls to other objects.
**kwargs: If the chain expects multiple inputs, they can be passed in
directly as keyword arguments.
Returns:
The chain output.
Example:
.. code-block:: python
# Suppose we have a single-input chain that takes a 'question' string:
await chain.arun("What's the temperature in Boise, Idaho?")
# -> "The temperature in Boise is..."
# Suppose we have a multi-input chain that takes a 'question' string
# and 'context' string:
question = "What's the temperature in Boise, Idaho?"
context = "Weather report for Boise, Idaho on 07/03/23..."
await chain.arun(question=question, context=context)
# -> "The temperature in Boise is..."
"""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
elif args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (
await self.acall(
args[0], callbacks=callbacks, tags=tags, metadata=metadata
)
)[self.output_keys[0]]
if kwargs and not args:
return (
await self.acall(
kwargs, callbacks=callbacks, tags=tags, metadata=metadata
)
)[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Dictionary representation of chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
**kwargs: Keyword arguments passed to default `pydantic.BaseModel.dict`
method.
Returns:
A dictionary representation of the chain.
Example:
.. code-block:: python
chain.dict(exclude_unset=True)
# -> {"_type": "foo", "verbose": False, ...}
"""
if self.memory is not None:
raise ValueError("Saving of memory is not yet supported.")
_dict = super().dict(**kwargs)
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the chain.
Expects `Chain._chain_type` property to be implemented and for memory to be
null.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
chain_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
def apply(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
| [
"langchain.pydantic_v1.Field",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.pydantic_v1.validator",
"langchain.pydantic_v1.root_validator"
] | [((858, 885), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (875, 885), False, 'import logging\n'), ((3854, 3887), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (3859, 3887), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((4297, 4330), 'langchain.pydantic_v1.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (4302, 4330), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((4398, 4435), 'langchain.pydantic_v1.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (4403, 4435), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((5528, 5544), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (5542, 5544), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((6302, 6345), 'langchain.pydantic_v1.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (6311, 6345), False, 'from langchain.pydantic_v1 import Field, root_validator, validator\n'), ((11248, 11360), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose, tags,\n self.tags, metadata, self.metadata)\n', (11273, 11360), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((14156, 14273), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose', 'tags', 'self.tags', 'metadata', 'self.metadata'], {}), '(callbacks, self.callbacks, self.verbose,\n tags, self.tags, metadata, self.metadata)\n', (14186, 14273), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((6057, 6159), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (6070, 6159), False, 'import warnings\n'), ((11607, 11618), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (11612, 11618), False, 'from langchain.load.dump import dumpd\n'), ((12205, 12239), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (12212, 12239), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((15151, 15185), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (15158, 15185), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((25263, 25278), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (25267, 25278), False, 'from pathlib import Path\n'), ((14527, 14538), 'langchain.load.dump.dumpd', 'dumpd', (['self'], {}), '(self)\n', (14532, 14538), False, 'from langchain.load.dump import dumpd\n'), ((25598, 25632), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (25607, 25632), False, 'import json\n'), ((3080, 3125), 'functools.partial', 'partial', (['self.invoke', 'input', 'config'], {}), '(self.invoke, input, config, **kwargs)\n', (3087, 3125), False, 'from functools import partial\n'), ((11480, 11509), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (11497, 11509), False, 'import inspect\n'), ((14393, 14423), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (14410, 14423), False, 'import inspect\n'), ((25735, 25785), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (25744, 25785), False, 'import yaml\n'), ((3014, 3040), 'asyncio.get_running_loop', 'asyncio.get_running_loop', ([], {}), '()\n', (3038, 3040), False, 'import asyncio\n')] |
import langchain
from dotenv import load_dotenv
from langchain.chains import HypotheticalDocumentEmbedder, RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
langchain.debug = True
load_dotenv()
# HyDE (LLMが生成した仮説的な回答のベクトル化) の準備
base_embeddings = OpenAIEmbeddings()
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
embeddings = HypotheticalDocumentEmbedder.from_llm(chat, base_embeddings, "web_search")
# FAISSで保存されたベクトルを読み込む
db = FAISS.load_local("./tmp/faiss", embeddings)
retriever = db.as_retriever()
# 「関連する文書を検索 => LLMに回答を生成させる」を実行する「RetrievalQA」を準備
qa_chain = RetrievalQA.from_chain_type(
llm=chat, chain_type="stuff", retriever=retriever
)
query = "LangChainとは"
result = qa_chain.run(query)
print(result)
| [
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.vectorstores.FAISS.load_local",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.HypotheticalDocumentEmbedder.from_llm",
"langchain.embeddings.OpenAIEmbeddings"
] | [((280, 293), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (291, 293), False, 'from dotenv import load_dotenv\n'), ((347, 365), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (363, 365), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((373, 426), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (383, 426), False, 'from langchain.chat_models import ChatOpenAI\n'), ((440, 514), 'langchain.chains.HypotheticalDocumentEmbedder.from_llm', 'HypotheticalDocumentEmbedder.from_llm', (['chat', 'base_embeddings', '"""web_search"""'], {}), "(chat, base_embeddings, 'web_search')\n", (477, 514), False, 'from langchain.chains import HypotheticalDocumentEmbedder, RetrievalQA\n'), ((544, 587), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['"""./tmp/faiss"""', 'embeddings'], {}), "('./tmp/faiss', embeddings)\n", (560, 587), False, 'from langchain.vectorstores import FAISS\n'), ((681, 759), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'chain_type': '"""stuff"""', 'retriever': 'retriever'}), "(llm=chat, chain_type='stuff', retriever=retriever)\n", (708, 759), False, 'from langchain.chains import HypotheticalDocumentEmbedder, RetrievalQA\n')] |
import os
from langchain.embeddings import OpenAIEmbeddings
import langchain
from annoy import AnnoyIndex
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from sentence_transformers import SentenceTransformer, util
import sys
embeddings = OpenAIEmbeddings(openai_api_key="")
model = SentenceTransformer('sentence-transformers/allenai-specter', device='cpu')
##name = "langchain"
##GITHUB_PATH = "/home/raghavan/langchain"
##name = "open_interpreter"
##GITHUB_PATH = "/home/raghavan/open-interpreter"
name = sys.argv[1]
GITHUB_PATH = sys.argv[2]
def get_files(path):
files = []
for r, d, f in os.walk(path):
for file in f:
if ".py" in file or ".sh" in file or ".java" in file:
files.append(os.path.join(r, file))
return files
def get_file_embeddings(path):
try:
text = get_file_contents(path)
ret = embeddings.embed_query(text)
return ret
except:
return None
def get_file_contents(path):
with open(path, 'r') as f:
return f.read()
print (name)
print (GITHUB_PATH)
files = get_files(GITHUB_PATH)
print(len(files))
embeddings_dict = {}
embeddings_dict2 = {}
i = 0
s = set()
for file in files:
e = get_file_embeddings(file)
if (e is None):
print ("Error in embedding file: ")
print (file)
s.add(file)
else:
embeddings_dict[file] = e
embeddings_dict2[file] = model.encode(get_file_contents(file))
i+=1
if (i%100 == 0):
print ("No of files processed: " + str(i))
t = AnnoyIndex(1536, 'angular')
t2 = AnnoyIndex(768, 'angular')
index_map = {}
i = 0
for file in embeddings_dict:
t.add_item(i, embeddings_dict[file])
t2.add_item(i, embeddings_dict2[file])
index_map[i] = file
i+=1
t.build(len(files))
name1= name + "_ada.ann"
t.save(name1)
t2.build(len(files))
name2 = name + "_specter.ann"
t2.save(name2)
with open('index_map' + name + '.txt', 'w') as f:
for idx, path in index_map.items():
f.write(f'{idx}\t{path}\n')
print("Indices created :" + name1 + " , " + name2)
print("Number of files indexed: " + str(len(files))) | [
"langchain.embeddings.OpenAIEmbeddings"
] | [((353, 388), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': '""""""'}), "(openai_api_key='')\n", (369, 388), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((397, 471), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""sentence-transformers/allenai-specter"""'], {'device': '"""cpu"""'}), "('sentence-transformers/allenai-specter', device='cpu')\n", (416, 471), False, 'from sentence_transformers import SentenceTransformer, util\n'), ((1656, 1683), 'annoy.AnnoyIndex', 'AnnoyIndex', (['(1536)', '"""angular"""'], {}), "(1536, 'angular')\n", (1666, 1683), False, 'from annoy import AnnoyIndex\n'), ((1689, 1715), 'annoy.AnnoyIndex', 'AnnoyIndex', (['(768)', '"""angular"""'], {}), "(768, 'angular')\n", (1699, 1715), False, 'from annoy import AnnoyIndex\n'), ((719, 732), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (726, 732), False, 'import os\n'), ((852, 873), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (864, 873), False, 'import os\n')] |
from pathlib import Path
from phi.assistant import Assistant
from phi.knowledge.langchain import LangChainKnowledgeBase
from langchain.embeddings import OpenAIEmbeddings
from langchain.document_loaders import TextLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
cookbook_dir = Path("__file__").parent
chroma_db_dir = cookbook_dir.joinpath("storage/chroma_db")
def load_vector_store():
state_of_the_union = cookbook_dir.joinpath("data/demo/state_of_the_union.txt")
# -*- Load the document
raw_documents = TextLoader(str(state_of_the_union)).load()
# -*- Split it into chunks
text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)
documents = text_splitter.split_documents(raw_documents)
# -*- Embed each chunk and load it into the vector store
Chroma.from_documents(documents, OpenAIEmbeddings(), persist_directory=str(chroma_db_dir))
# -*- Load the vector store
load_vector_store()
# -*- Get the vectordb
db = Chroma(embedding_function=OpenAIEmbeddings(), persist_directory=str(chroma_db_dir))
# -*- Create a retriever from the vector store
retriever = db.as_retriever()
# -*- Create a knowledge base from the vector store
knowledge_base = LangChainKnowledgeBase(retriever=retriever)
conv = Assistant(knowledge_base=knowledge_base, debug_mode=True, add_references_to_prompt=True)
conv.print_response("What did the president say about technology?", markdown=True)
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.embeddings.OpenAIEmbeddings"
] | [((1254, 1297), 'phi.knowledge.langchain.LangChainKnowledgeBase', 'LangChainKnowledgeBase', ([], {'retriever': 'retriever'}), '(retriever=retriever)\n', (1276, 1297), False, 'from phi.knowledge.langchain import LangChainKnowledgeBase\n'), ((1306, 1398), 'phi.assistant.Assistant', 'Assistant', ([], {'knowledge_base': 'knowledge_base', 'debug_mode': '(True)', 'add_references_to_prompt': '(True)'}), '(knowledge_base=knowledge_base, debug_mode=True,\n add_references_to_prompt=True)\n', (1315, 1398), False, 'from phi.assistant import Assistant\n'), ((337, 353), 'pathlib.Path', 'Path', (['"""__file__"""'], {}), "('__file__')\n", (341, 353), False, 'from pathlib import Path\n'), ((672, 727), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(0)'}), '(chunk_size=1000, chunk_overlap=0)\n', (693, 727), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((887, 905), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (903, 905), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((1049, 1067), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1065, 1067), False, 'from langchain.embeddings import OpenAIEmbeddings\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : AI. @by PyCharm
# @File : OpenAIEmbeddings
# @Time : 2023/7/11 18:40
# @Author : betterme
# @WeChat : meutils
# @Software : PyCharm
# @Description :
import langchain
from langchain.embeddings import OpenAIEmbeddings as _OpenAIEmbeddings
from meutils.pipe import *
from chatllm.llmchain.utils import get_api_key
class OpenAIEmbeddings(_OpenAIEmbeddings):
"""多key多线程"""
get_api_key: Callable[[int], List[str]] = get_api_key
pre_fn: Optional[Callable[[str], str]] = None
# class Config:
# """Configuration for this pydantic object."""
#
# allow_population_by_field_name = True
def embed_documents(
self,
texts: List[str],
chunk_size: Optional[int] = 0,
) -> List[List[float]]:
if self.pre_fn: texts = texts | xmap_(self.pre_fn)
n = int(np.ceil(len(texts) / self.chunk_size))
api_key_set = self.get_api_key(n=n)
max_workers = np.clip(len(api_key_set), 1, 32).astype(int) # 最大线程数
if max_workers > 1:
embeddings_map = {}
for i, api_key in enumerate(api_key_set):
kwargs = self.dict().copy()
kwargs.pop('get_api_key', None) # not permitted
kwargs['openai_api_key'] = api_key
embeddings_map[i] = _OpenAIEmbeddings(**kwargs) # 可以用 OpenAIEmbeddings
if langchain.debug:
logger.info([e.openai_api_key for e in embeddings_map.values()])
logger.info(f"Maximum concurrency: {max_workers * self.chunk_size}")
def __embed_documents(arg):
idx, texts = arg
embeddings = embeddings_map.get(idx % max_workers, 0)
return embeddings.embed_documents(texts)
return (
texts | xgroup(self.chunk_size)
| xenumerate
| xThreadPoolExecutor(__embed_documents, max_workers)
| xchain_
)
return super().embed_documents(texts)
if __name__ == '__main__':
e = OpenAIEmbeddings(chunk_size=5)
e.get_api_key = partial(get_api_key, n=2)
# e.openai_api_key = 'xxx'
print(e.get_api_key())
print(e.openai_api_key)
print(e.embed_documents(['x'] * 6))
print(e.embed_query('x'))
| [
"langchain.embeddings.OpenAIEmbeddings"
] | [((1391, 1418), 'langchain.embeddings.OpenAIEmbeddings', '_OpenAIEmbeddings', ([], {}), '(**kwargs)\n', (1408, 1418), True, 'from langchain.embeddings import OpenAIEmbeddings as _OpenAIEmbeddings\n')] |
####################################################################################
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################
# Author: Adam Paternostro
# Summary: Ask a LLM to find categories in a customer review text and out the results in JSON.
# To setup your environemtn
# python3 -m venv .venv
# source .venv/bin/activate
# pip install --only-binary :all: greenlet
# pip install langchain pip install langchain==0.0.307
# pip install google-cloud-aiplatform
# run it: python sample-prompt-json-output.py
# deactivate
import json
import langchain
from langchain.llms import VertexAI
from langchain.embeddings import VertexAIEmbeddings
llm = VertexAI(
model_name="text-bison@001",
max_output_tokens=1024,
temperature=0,
top_p=0,
top_k=1,
verbose=True,
)
prompt="""For the below review peform the following:
1. Classify the review as one or more of the below classifications.
2. Output the results in the below JSON format.
Classifications:
- "driver likes music"
- "driver has a dirty car"
- "driver has a clean car"
- "driver drives fast"
- "driver drives slow"
JSON format: [ "value" ]
Sample JSON Response: [ "driver likes music", "driver drives slow" ]
Review: I was taking a rideshare ride and the drivers car was spotless. Not a spec of dirt could be found. I could eat off the seats.
I cannot believe how quickly he got me to my destination. It was like taking a rocketship. I was so scared!
"""
result = llm(prompt)
print()
print(result)
print()
print()
# Hopefully it is valid JSON
json_data = str(result)
json_object = json.loads(json_data)
json_formatted_str = json.dumps(json_object, indent=2)
print(json_formatted_str)
| [
"langchain.llms.VertexAI"
] | [((1273, 1385), 'langchain.llms.VertexAI', 'VertexAI', ([], {'model_name': '"""text-bison@001"""', 'max_output_tokens': '(1024)', 'temperature': '(0)', 'top_p': '(0)', 'top_k': '(1)', 'verbose': '(True)'}), "(model_name='text-bison@001', max_output_tokens=1024, temperature=0,\n top_p=0, top_k=1, verbose=True)\n", (1281, 1385), False, 'from langchain.llms import VertexAI\n'), ((2198, 2219), 'json.loads', 'json.loads', (['json_data'], {}), '(json_data)\n', (2208, 2219), False, 'import json\n'), ((2241, 2274), 'json.dumps', 'json.dumps', (['json_object'], {'indent': '(2)'}), '(json_object, indent=2)\n', (2251, 2274), False, 'import json\n')] |
import os
import pathlib
import langchain
import langchain.cache
import langchain.globals
CACHE_BASE = pathlib.Path(f'{os.environ["HOME"]}/.cache/mitaskem/')
CACHE_BASE.mkdir(parents=True, exist_ok=True)
_LLM_CACHE_PATH = CACHE_BASE/'langchain_llm_cache.sqlite'
langchain.globals.set_llm_cache(langchain.cache.SQLiteCache(database_path=_LLM_CACHE_PATH)) | [
"langchain.cache.SQLiteCache"
] | [((104, 158), 'pathlib.Path', 'pathlib.Path', (['f"""{os.environ[\'HOME\']}/.cache/mitaskem/"""'], {}), '(f"{os.environ[\'HOME\']}/.cache/mitaskem/")\n', (116, 158), False, 'import pathlib\n'), ((295, 353), 'langchain.cache.SQLiteCache', 'langchain.cache.SQLiteCache', ([], {'database_path': '_LLM_CACHE_PATH'}), '(database_path=_LLM_CACHE_PATH)\n', (322, 353), False, 'import langchain\n')] |
"""Base interface that all chains should implement."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
class Memory(BaseModel, ABC):
"""Base interface for memory in chains."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
@abstractmethod
def memory_variables(self) -> List[str]:
"""Input keys this memory class will load dynamically."""
@abstractmethod
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
@abstractmethod
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
@abstractmethod
def clear(self) -> None:
"""Clear memory contents."""
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(BaseModel, ABC):
"""Base interface that all chains should implement."""
memory: Optional[Memory] = None
callback_manager: BaseCallbackManager = Field(
default_factory=get_callback_manager, exclude=True
)
verbose: bool = Field(
default_factory=_get_verbosity
) # Whether to print the response text
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@validator("callback_manager", pre=True, always=True)
def set_callback_manager(
cls, callback_manager: Optional[BaseCallbackManager]
) -> BaseCallbackManager:
"""If callback manager is None, set it.
This allows users to pass in None as callback manager, which is a nice UX.
"""
return callback_manager or get_callback_manager()
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Input keys this chain expects."""
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Output keys this chain expects."""
def _validate_inputs(self, inputs: Dict[str, str]) -> None:
"""Check that all inputs are present."""
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, str]) -> None:
if set(outputs) != set(self.output_keys):
raise ValueError(
f"Did not get output keys that were expected. "
f"Got: {set(outputs)}. Expected: {set(self.output_keys)}."
)
@abstractmethod
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Run the logic of this chain and return the output."""
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Run the logic of this chain and return the output."""
raise NotImplementedError("Async call not supported for this chain type.")
def __call__(
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
"""
inputs = self.prep_inputs(inputs)
self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
try:
outputs = self._call(inputs)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_chain_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
return self.prep_outputs(inputs, outputs, return_only_outputs)
async def acall(
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
"""
inputs = self.prep_inputs(inputs)
if self.callback_manager.is_async:
await self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
else:
self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
try:
outputs = await self._acall(inputs)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_chain_error(e, verbose=self.verbose)
else:
self.callback_manager.on_chain_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
else:
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
return self.prep_outputs(inputs, outputs, return_only_outputs)
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prep outputs."""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs) for inputs in input_list]
def conversation(self, *args: str, **kwargs: str) -> List[str]:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) == 2:
assert "output" in self.output_keys and "intermediate_steps" in self.output_keys
keep_short = False
if "keep_short" in kwargs:
keep_short = kwargs.pop("keep_short")
outputs = {}
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
outputs = self(args[0])
if kwargs and not args:
outputs = self(kwargs)
intermediate = outputs.get("intermediate_steps") or []
conversation = []
for action, action_output in intermediate:
action: str = action.log.strip()
if not action.startswith(f"AI:"):
action = f"AI: {action}"
if keep_short:
# Hide the internal conversation
lines = action.split("\n")
new_lines = []
for l in lines:
for term in ["Assistant,"]:
idx = l.lower().find(term.lower())
if idx >= 0:
l = l[:idx]
if l.lower().strip() == "ai:":
l = ""
if not l:
continue
new_lines.append(l)
action = "\n".join(new_lines)
conversation.append(action)
if not keep_short or action_output.lstrip().startswith("Here is the edited image"):
if not action_output.startswith("Assistant:"):
action_output = f"Assistant: {action_output}"
conversation.append(action_output)
conversation.append("AI: " + outputs["output"])
return conversation
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return ["AI: " + self(args[0])[self.output_keys[0]]]
if kwargs and not args:
return ["AI: " + self(kwargs)[self.output_keys[0]]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def run(self, *args: str, **kwargs: str) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) == 2:
assert "output" in self.output_keys and "intermediate_steps" in self.output_keys
outputs = {}
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
outputs = self(args[0])
if kwargs and not args:
outputs = self(kwargs)
intermediate = outputs.get("intermediate_steps") or []
assistant = ""
for action, action_output in intermediate:
action: str = action.log.strip()
if not action.startswith(f"AI:"):
action = f"AI: {action}"
if not action_output.startswith("Assistant:"):
action_output = f"Assistant: {action_output}"
assistant += "\n" + action + "\n" + action_output
return assistant + "\n" + "AI: " + outputs["output"]
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0])[self.output_keys[0]]
if kwargs and not args:
return self(kwargs)[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
async def arun(self, *args: str, **kwargs: str) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (await self.acall(args[0]))[self.output_keys[0]]
if kwargs and not args:
return (await self.acall(kwargs))[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of chain."""
if self.memory is not None:
raise ValueError("Saving of memory is not yet supported.")
_dict = super().dict()
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the chain.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
chain_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
| [
"langchain.callbacks.get_callback_manager"
] | [((1401, 1458), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (1406, 1458), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((1493, 1530), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1498, 1530), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((1830, 1882), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (1839, 1882), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2212, 2255), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2221, 2255), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2183, 2205), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2203, 2205), False, 'from langchain.callbacks import get_callback_manager\n'), ((14634, 14649), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14638, 14649), False, 'from pathlib import Path\n'), ((14969, 15003), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (14978, 15003), False, 'import json\n'), ((15106, 15156), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (15115, 15156), False, 'import yaml\n')] |
import time #← 実行時間を計測するためにtimeモジュールをインポート
import langchain
from langchain.cache import InMemoryCache #← InMemoryCacheをインポート
from langchain.chat_models import ChatOpenAI
from langchain.schema import HumanMessage
langchain.llm_cache = InMemoryCache() #← llm_cacheにInMemoryCacheを設定
chat = ChatOpenAI()
start = time.time() #← 実行開始時間を記録
result = chat([ #← 一度目の実行を行う
HumanMessage(content="こんにちは!")
])
end = time.time() #← 実行終了時間を記録
print(result.content)
print(f"実行時間: {end - start}秒")
start = time.time() #← 実行開始時間を記録
result = chat([ #← 同じ内容で二度目の実行を行うことでキャッシュが利用され、即時に実行完了している
HumanMessage(content="こんにちは!")
])
end = time.time() #← 実行終了時間を記録
print(result.content)
print(f"実行時間: {end - start}秒") | [
"langchain.cache.InMemoryCache",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((237, 252), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (250, 252), False, 'from langchain.cache import InMemoryCache\n'), ((291, 303), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (301, 303), False, 'from langchain.chat_models import ChatOpenAI\n'), ((312, 323), 'time.time', 'time.time', ([], {}), '()\n', (321, 323), False, 'import time\n'), ((411, 422), 'time.time', 'time.time', ([], {}), '()\n', (420, 422), False, 'import time\n'), ((498, 509), 'time.time', 'time.time', ([], {}), '()\n', (507, 509), False, 'import time\n'), ((627, 638), 'time.time', 'time.time', ([], {}), '()\n', (636, 638), False, 'import time\n'), ((370, 400), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""こんにちは!"""'}), "(content='こんにちは!')\n", (382, 400), False, 'from langchain.schema import HumanMessage\n'), ((586, 616), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""こんにちは!"""'}), "(content='こんにちは!')\n", (598, 616), False, 'from langchain.schema import HumanMessage\n')] |
import re
import urllib
from time import sleep
import langchain
import molbloom
import pandas as pd
import pkg_resources
import requests
import tiktoken
from langchain import LLMChain, PromptTemplate
from langchain.llms import BaseLLM
from langchain.tools import BaseTool
from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto
from .prompts import safety_summary_prompt, summary_each_data
class MoleculeSafety:
def __init__(self, llm: BaseLLM = None):
while True:
try:
self.clintox = pd.read_csv(
"https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/clintox.csv.gz"
)
break
except (ConnectionRefusedError, urllib.error.URLError):
sleep(5)
continue
self.pubchem_data = {}
self.llm = llm
def _fetch_pubchem_data(self, cas_number):
"""Fetch data from PubChem for a given CAS number, or use cached data if it's already been fetched."""
if cas_number not in self.pubchem_data:
try:
url1 = f"https://pubchem.ncbi.nlm.nih.gov/rest/pug/compound/name/{cas_number}/cids/JSON"
url = f"https://pubchem.ncbi.nlm.nih.gov/rest/pug_view/data/compound/{requests.get(url1).json()['IdentifierList']['CID'][0]}/JSON"
r = requests.get(url)
self.pubchem_data[cas_number] = r.json()
except:
return "Invalid molecule input, no Pubchem entry."
return self.pubchem_data[cas_number]
def ghs_classification(self, text):
"""Gives the ghs classification from Pubchem. Give this tool the name or CAS number of one molecule."""
if is_smiles(text):
return "Please input a valid CAS number."
data = self._fetch_pubchem_data(text)
if isinstance(data, str):
return "Molecule not found in Pubchem."
try:
for section in data["Record"]["Section"]:
if section.get("TOCHeading") == "Chemical Safety":
ghs = [
markup["Extra"]
for markup in section["Information"][0]["Value"][
"StringWithMarkup"
][0]["Markup"]
]
if ghs:
return ghs
except (StopIteration, KeyError):
return None
@staticmethod
def _scrape_pubchem(data, heading1, heading2, heading3):
try:
filtered_sections = []
for section in data["Record"]["Section"]:
toc_heading = section.get("TOCHeading")
if toc_heading == heading1:
for section2 in section["Section"]:
if section2.get("TOCHeading") == heading2:
for section3 in section2["Section"]:
if section3.get("TOCHeading") == heading3:
filtered_sections.append(section3)
return filtered_sections
except:
return None
def _get_safety_data(self, cas):
data = self._fetch_pubchem_data(cas)
safety_data = []
iterations = [
(
[
"Health Hazards",
"GHS Classification",
"Hazards Summary",
"NFPA Hazard Classification",
],
"Safety and Hazards",
"Hazards Identification",
),
(
["Explosive Limits and Potential", "Preventive Measures"],
"Safety and Hazards",
"Safety and Hazard Properties",
),
(
[
"Inhalation Risk",
"Effects of Long Term Exposure",
"Personal Protective Equipment (PPE)",
],
"Safety and Hazards",
"Exposure Control and Personal Protection",
),
(
["Toxicity Summary", "Carcinogen Classification"],
"Toxicity",
"Toxicological Information",
),
]
for items, header1, header2 in iterations:
safety_data.extend(
[self._scrape_pubchem(data, header1, header2, item)] for item in items
)
return safety_data
@staticmethod
def _num_tokens(string, encoding_name="text-davinci-003"):
"""Returns the number of tokens in a text string."""
encoding = tiktoken.encoding_for_model(encoding_name)
num_tokens = len(encoding.encode(string))
return num_tokens
def get_safety_summary(self, cas):
safety_data = self._get_safety_data(cas)
approx_length = int(
(3500 * 4) / len(safety_data) - 0.1 * ((3500 * 4) / len(safety_data))
)
prompt_short = PromptTemplate(
template=summary_each_data, input_variables=["data", "approx_length"]
)
llm_chain_short = LLMChain(prompt=prompt_short, llm=self.llm)
llm_output = []
for info in safety_data:
if self._num_tokens(str(info)) > approx_length:
trunc_info = str(info)[:approx_length]
llm_output.append(
llm_chain_short.run(
{"data": str(trunc_info), "approx_length": approx_length}
)
)
else:
llm_output.append(
llm_chain_short.run(
{"data": str(info), "approx_length": approx_length}
)
)
return llm_output
class SafetySummary(BaseTool):
name = "SafetySummary"
description = (
"Input CAS number, returns a summary of safety information."
"The summary includes Operator safety, GHS information, "
"Environmental risks, and Societal impact."
)
llm: BaseLLM = None
llm_chain: LLMChain = None
pubchem_data: dict = dict()
mol_safety: MoleculeSafety = None
def __init__(self, llm):
super().__init__()
self.mol_safety = MoleculeSafety(llm=llm)
self.llm = llm
prompt = PromptTemplate(
template=safety_summary_prompt, input_variables=["data"]
)
self.llm_chain = LLMChain(prompt=prompt, llm=self.llm)
def _run(self, cas: str) -> str:
if is_smiles(cas):
return "Please input a valid CAS number."
data = self.mol_safety._fetch_pubchem_data(cas)
if isinstance(data, str):
return "Molecule not found in Pubchem."
data = self.mol_safety.get_safety_summary(cas)
return self.llm_chain.run(" ".join(data))
async def _arun(self, cas_number):
raise NotImplementedError("Async not implemented.")
class ExplosiveCheck(BaseTool):
name = "ExplosiveCheck"
description = "Input CAS number, returns if molecule is explosive."
mol_safety: MoleculeSafety = None
def __init__(self):
super().__init__()
self.mol_safety = MoleculeSafety()
def _run(self, cas_number):
"""Checks if a molecule has an explosive GHS classification using pubchem."""
# first check if the input is a CAS number
if is_smiles(cas_number):
return "Please input a valid CAS number."
cls = self.mol_safety.ghs_classification(cas_number)
if cls is None:
return (
"Explosive Check Error. The molecule may not be assigned a GHS rating. "
)
if "Explos" in str(cls) or "explos" in str(cls):
return "Molecule is explosive"
else:
return "Molecule is not known to be explosive"
async def _arun(self, cas_number):
raise NotImplementedError("Async not implemented.")
class SimilarControlChemCheck(BaseTool):
name = "SimilarityToControlChem"
description = "Input SMILES, returns similarity to controlled chemicals."
def _run(self, smiles: str) -> str:
"""Checks max similarity between compound and controlled chemicals.
Input SMILES string."""
data_path = pkg_resources.resource_filename("chemcrow", "data/chem_wep_smi.csv")
cw_df = pd.read_csv(data_path)
try:
if not is_smiles(smiles):
return "Please input a valid SMILES string."
max_sim = cw_df["smiles"].apply(lambda x: self.tanimoto(smiles, x)).max()
if max_sim > 0.35:
return (
f"{smiles} has a high similarity "
f"({max_sim:.4}) to a known controlled chemical."
)
else:
return (
f"{smiles} has a low similarity "
f"({max_sim:.4}) to a known controlled chemical. "
"This is substance is safe, you may proceed with the task."
)
except:
return "Tool error."
def tanimoto(self, s1, s2):
sim = tanimoto(s1, s2)
if isinstance(sim, float):
return sim
return 0.0
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError()
class ControlChemCheck(BaseTool):
name = "ControlChemCheck"
description = "Input CAS number, True if molecule is a controlled chemical."
similar_control_chem_check = SimilarControlChemCheck()
def _run(self, query: str) -> str:
"""Checks if compound is a controlled chemical. Input CAS number."""
data_path = pkg_resources.resource_filename("chemcrow", "data/chem_wep_smi.csv")
cw_df = pd.read_csv(data_path)
try:
if is_smiles(query):
query_esc = re.escape(query)
found = (
cw_df["smiles"]
.astype(str)
.str.contains(f"^{query_esc}$", regex=True)
.any()
)
else:
found = (
cw_df["cas"]
.astype(str)
.str.contains(f"^\({query}\)$", regex=True)
.any()
)
if found:
return (
f"The molecule {query} appears in a list of "
"controlled chemicals."
)
else:
# Get smiles of CAS number
try:
smi = pubchem_query2smiles(query)
except ValueError as e:
return str(e)
# Check similarity to known controlled chemicals
return self.similar_control_chem_check._run(smi)
except Exception as e:
return f"Error: {e}"
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError()
| [
"langchain.LLMChain",
"langchain.PromptTemplate"
] | [((1729, 1744), 'chemcrow.utils.is_smiles', 'is_smiles', (['text'], {}), '(text)\n', (1738, 1744), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((4644, 4686), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['encoding_name'], {}), '(encoding_name)\n', (4671, 4686), False, 'import tiktoken\n'), ((4996, 5085), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'summary_each_data', 'input_variables': "['data', 'approx_length']"}), "(template=summary_each_data, input_variables=['data',\n 'approx_length'])\n", (5010, 5085), False, 'from langchain import LLMChain, PromptTemplate\n'), ((5130, 5173), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt_short', 'llm': 'self.llm'}), '(prompt=prompt_short, llm=self.llm)\n', (5138, 5173), False, 'from langchain import LLMChain, PromptTemplate\n'), ((6326, 6398), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'safety_summary_prompt', 'input_variables': "['data']"}), "(template=safety_summary_prompt, input_variables=['data'])\n", (6340, 6398), False, 'from langchain import LLMChain, PromptTemplate\n'), ((6446, 6483), 'langchain.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'self.llm'}), '(prompt=prompt, llm=self.llm)\n', (6454, 6483), False, 'from langchain import LLMChain, PromptTemplate\n'), ((6533, 6547), 'chemcrow.utils.is_smiles', 'is_smiles', (['cas'], {}), '(cas)\n', (6542, 6547), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((7399, 7420), 'chemcrow.utils.is_smiles', 'is_smiles', (['cas_number'], {}), '(cas_number)\n', (7408, 7420), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((8286, 8354), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""chemcrow"""', '"""data/chem_wep_smi.csv"""'], {}), "('chemcrow', 'data/chem_wep_smi.csv')\n", (8317, 8354), False, 'import pkg_resources\n'), ((8371, 8393), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (8382, 8393), True, 'import pandas as pd\n'), ((9155, 9171), 'chemcrow.utils.tanimoto', 'tanimoto', (['s1', 's2'], {}), '(s1, s2)\n', (9163, 9171), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((9718, 9786), 'pkg_resources.resource_filename', 'pkg_resources.resource_filename', (['"""chemcrow"""', '"""data/chem_wep_smi.csv"""'], {}), "('chemcrow', 'data/chem_wep_smi.csv')\n", (9749, 9786), False, 'import pkg_resources\n'), ((9803, 9825), 'pandas.read_csv', 'pd.read_csv', (['data_path'], {}), '(data_path)\n', (9814, 9825), True, 'import pandas as pd\n'), ((9854, 9870), 'chemcrow.utils.is_smiles', 'is_smiles', (['query'], {}), '(query)\n', (9863, 9870), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((543, 634), 'pandas.read_csv', 'pd.read_csv', (['"""https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/clintox.csv.gz"""'], {}), "(\n 'https://deepchemdata.s3-us-west-1.amazonaws.com/datasets/clintox.csv.gz')\n", (554, 634), True, 'import pandas as pd\n'), ((1358, 1375), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (1370, 1375), False, 'import requests\n'), ((8427, 8444), 'chemcrow.utils.is_smiles', 'is_smiles', (['smiles'], {}), '(smiles)\n', (8436, 8444), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((9900, 9916), 're.escape', 're.escape', (['query'], {}), '(query)\n', (9909, 9916), False, 'import re\n'), ((774, 782), 'time.sleep', 'sleep', (['(5)'], {}), '(5)\n', (779, 782), False, 'from time import sleep\n'), ((10623, 10650), 'chemcrow.utils.pubchem_query2smiles', 'pubchem_query2smiles', (['query'], {}), '(query)\n', (10643, 10650), False, 'from chemcrow.utils import is_smiles, pubchem_query2smiles, tanimoto\n'), ((1277, 1295), 'requests.get', 'requests.get', (['url1'], {}), '(url1)\n', (1289, 1295), False, 'import requests\n')] |
# from __future__ import annotations
import os
import re
import itertools
import openai
import tiktoken
import json
from dotenv import load_dotenv
from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.schema.language_model import BaseLanguageModel
from langchain.callbacks.manager import (
AsyncCallbackManagerForChainRun,
CallbackManagerForChainRun,
)
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.chains.base import Chain
from langchain.prompts.base import BasePromptTemplate
from langchain.tools import DuckDuckGoSearchRun
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.tools import DuckDuckGoSearchRun
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.chains import SequentialChain
import prompts
class ExecuteVerificationChain(Chain):
"""
Implements the logic to execute the verification question for factual acuracy
"""
prompt: BasePromptTemplate
llm: BaseLanguageModel
input_key: str = "verification_questions"
output_key: str = "verification_answers"
use_search_tool: bool = True
search_tool: Any = DuckDuckGoSearchRun()
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
def input_keys(self) -> List[str]:
"""Will be whatever keys the prompt expects.
:meta private:
"""
return [self.input_key]
@property
def output_keys(self) -> List[str]:
"""Will always return text key.
:meta private:
"""
return [self.output_key]
def search_for_verification_question(self,
verification_question: str
) -> str:
search_result = self.search_tool.run(verification_question)
return search_result
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, str]:
verification_answers_list = list() # Will contain the answers of each verification questions
question_answer_pair = "" # Final output of verification question and answer pair
# Convert all the verification questions into a list of string
sub_inputs = {k:v for k,v in inputs.items() if k==self.input_key}
verification_questions_prompt_value = self.prompt.format_prompt(**sub_inputs)
verification_questions_str = verification_questions_prompt_value.text
verification_questions_list = verification_questions_str.split("\n")
# Setting up prompt for both search tool and llm self evaluation
execution_prompt_search_tool = PromptTemplate.from_template(prompts.EXECUTE_PLAN_PROMPT_SEARCH_TOOL)
execution_prompt_self_llm = PromptTemplate.from_template(prompts.EXECUTE_PLAN_PROMPT_SELF_LLM)
# Executing the verification questions, either using search tool or self llm
for question in verification_questions_list:
if self.use_search_tool:
search_result = self.search_for_verification_question(question)
execution_prompt_value = execution_prompt_search_tool.format_prompt(**{"search_result": search_result, "verification_question": question})
else:
execution_prompt_value = execution_prompt_self_llm.format_prompt(**{"verification_question": question})
verification_answer_llm_result = self.llm.generate_prompt([execution_prompt_value], callbacks=run_manager.get_child() if run_manager else None)
verification_answer_str = verification_answer_llm_result.generations[0][0].text
verification_answers_list.append(verification_answer_str)
# Create verification question and answer pair
for question, answer in itertools.zip_longest(verification_questions_list, verification_answers_list):
question_answer_pair += "Question: {} Answer: {}\n".format(question, answer)
if run_manager:
run_manager.on_text("Log something about this run")
return {self.output_key: question_answer_pair}
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, str]:
# Your custom chain logic goes here
# This is just an example that mimics LLMChain
prompt_value = self.prompt.format_prompt(**inputs)
# Whenever you call a language model, or another chain, you should pass
# a callback manager to it. This allows the inner run to be tracked by
# any callbacks that are registered on the outer run.
# You can always obtain a callback manager for this by calling
# `run_manager.get_child()` as shown below.
response = await self.llm.agenerate_prompt(
[prompt_value], callbacks=run_manager.get_child() if run_manager else None
)
# If you want to log something about this run, you can do so by calling
# methods on the `run_manager`, as shown below. This will trigger any
# callbacks that are registered for that event.
if run_manager:
await run_manager.on_text("Log something about this run")
return {self.output_key: response.generations[0][0].text}
@property
def _chain_type(self) -> str:
return "execute_verification_chain" | [
"langchain.prompts.PromptTemplate.from_template",
"langchain.tools.DuckDuckGoSearchRun"
] | [((1312, 1333), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {}), '()\n', (1331, 1333), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((2942, 3011), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompts.EXECUTE_PLAN_PROMPT_SEARCH_TOOL'], {}), '(prompts.EXECUTE_PLAN_PROMPT_SEARCH_TOOL)\n', (2970, 3011), False, 'from langchain.prompts import PromptTemplate\n'), ((3048, 3114), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompts.EXECUTE_PLAN_PROMPT_SELF_LLM'], {}), '(prompts.EXECUTE_PLAN_PROMPT_SELF_LLM)\n', (3076, 3114), False, 'from langchain.prompts import PromptTemplate\n'), ((4086, 4163), 'itertools.zip_longest', 'itertools.zip_longest', (['verification_questions_list', 'verification_answers_list'], {}), '(verification_questions_list, verification_answers_list)\n', (4107, 4163), False, 'import itertools\n')] |
from __future__ import annotations
import time
from abc import abstractmethod
from typing import Any, List, Tuple, Union
import gradio_client as grc
import huggingface_hub
from gradio_client.client import Job
from gradio_client.utils import QueueError
try:
import langchain as lc
LANGCHAIN_INSTALLED = True
except (ModuleNotFoundError, ImportError):
LANGCHAIN_INSTALLED = False
class GradioTool:
def __init__(
self,
name: str,
description: str,
src: str,
hf_token: str | None = None,
duplicate: bool = True,
) -> None:
self.name = name
self.description = description
if hf_token and self._is_space(src) and duplicate:
self.client = grc.Client.duplicate(from_id=src, hf_token=hf_token)
self.src = self.client.space_id
else:
self.src = src
self.client = grc.Client(self.src, hf_token=hf_token)
self.description += (
"If the output is the string QUEUE_FULL you "
"should try this action again and never stop until you "
"don't see this message."
)
self._block = None
@staticmethod
def _is_space(src: str) -> bool:
try:
huggingface_hub.get_space_runtime(src)
return True
except huggingface_hub.hf_api.RepositoryNotFoundError:
return False
@abstractmethod
def create_job(self, query: str) -> Job:
pass
@abstractmethod
def postprocess(self, output: Union[Tuple[Any], Any]) -> str:
pass
def run(self, query: str):
job = self.create_job(query)
while not job.done():
status = job.status()
print(f"\nJob Status: {str(status.code)} eta: {status.eta}")
time.sleep(30)
try:
output = self.postprocess(job.result())
except QueueError:
output = "QUEUE_FULL"
return output
# Optional gradio functionalities
def _block_input(self, gr) -> List["gr.components.Component"]:
return [gr.Textbox()]
def _block_output(self, gr) -> List["gr.components.Component"]:
return [gr.Textbox()]
def block_input(self) -> List["gr.components.Component"]:
try:
import gradio as gr
GRADIO_INSTALLED = True
except (ModuleNotFoundError, ImportError):
GRADIO_INSTALLED = False
if not GRADIO_INSTALLED:
raise ModuleNotFoundError("gradio must be installed to call block_input")
else:
return self._block_input(gr)
def block_output(self) -> List["gr.components.Component"]:
try:
import gradio as gr
GRADIO_INSTALLED = True
except (ModuleNotFoundError, ImportError):
GRADIO_INSTALLED = False
if not GRADIO_INSTALLED:
raise ModuleNotFoundError("gradio must be installed to call block_output")
else:
return self._block_output(gr)
def block(self):
"""Get the gradio Blocks of this tool for visualization."""
try:
import gradio as gr
except (ModuleNotFoundError, ImportError):
raise ModuleNotFoundError("gradio must be installed to call block")
if not self._block:
self._block = gr.load(name=self.src, src="spaces")
return self._block
# Optional langchain functionalities
@property
def langchain(self) -> "langchain.agents.Tool": # type: ignore
if not LANGCHAIN_INSTALLED:
raise ModuleNotFoundError(
"langchain must be installed to access langchain tool"
)
return lc.agents.Tool( # type: ignore
name=self.name, func=self.run, description=self.description
)
def __repr__(self) -> str:
return f"GradioTool(name={self.name}, src={self.src})"
| [
"langchain.agents.Tool"
] | [((3706, 3781), 'langchain.agents.Tool', 'lc.agents.Tool', ([], {'name': 'self.name', 'func': 'self.run', 'description': 'self.description'}), '(name=self.name, func=self.run, description=self.description)\n', (3720, 3781), True, 'import langchain as lc\n'), ((742, 794), 'gradio_client.Client.duplicate', 'grc.Client.duplicate', ([], {'from_id': 'src', 'hf_token': 'hf_token'}), '(from_id=src, hf_token=hf_token)\n', (762, 794), True, 'import gradio_client as grc\n'), ((906, 945), 'gradio_client.Client', 'grc.Client', (['self.src'], {'hf_token': 'hf_token'}), '(self.src, hf_token=hf_token)\n', (916, 945), True, 'import gradio_client as grc\n'), ((1259, 1297), 'huggingface_hub.get_space_runtime', 'huggingface_hub.get_space_runtime', (['src'], {}), '(src)\n', (1292, 1297), False, 'import huggingface_hub\n'), ((1807, 1821), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (1817, 1821), False, 'import time\n'), ((2092, 2104), 'gradio.Textbox', 'gr.Textbox', ([], {}), '()\n', (2102, 2104), True, 'import gradio as gr\n'), ((2191, 2203), 'gradio.Textbox', 'gr.Textbox', ([], {}), '()\n', (2201, 2203), True, 'import gradio as gr\n'), ((3342, 3378), 'gradio.load', 'gr.load', ([], {'name': 'self.src', 'src': '"""spaces"""'}), "(name=self.src, src='spaces')\n", (3349, 3378), True, 'import gradio as gr\n')] |
#!/Users/mark/dev/ml/langchain/read_github/langchain_github/env/bin/python
# change above to the location of your local Python venv installation
import sys, os, shutil
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(parent_dir)
import pathlib
from langchain.docstore.document import Document
import langchain.text_splitter as text_splitter
from langchain.chat_models import ChatOpenAI
from my_llm import standards as my_llm
from my_llm.langchain_class import PubSubChatMessageHistory
from langchain import PromptTemplate
from langchain.document_loaders.unstructured import UnstructuredFileLoader
import logging
chat = ChatOpenAI(temperature=0)
CODE_EXTENSIONS = [".py", ".js", ".java", ".c", ".cpp", ".cc", ".cxx", ".hpp",
".h", ".cs", ".m", ".swift", ".go", ".rs", ".rb", ".php",
".pl", ".kt", ".kts", ".ts", ".scala", ".hs", ".lua", ".sh",
".bash", ".r", ".m", ".sql", ".html", ".css", ".xml", ".json",
".yaml", ".yml"]
# Get Markdown documents from a repository
def get_repo_docs(repo_path, extension, memory, ignore=None, resummarise=False, verbose=False):
repo = pathlib.Path(repo_path)
ignore_path = ""
if ignore is not None:
ignore_path = repo / ignore
if not ignore_path.is_dir():
print("WARNING: --ignore must be a directory")
print('Ignoring %s' % ignore_path)
exts = extension.split(",")
for ext in exts:
the_glob = f"**/*{ext}"
matched_files = list(repo.glob(the_glob))
num_matched_files = len(matched_files)
print(f"Number of matched {ext} files: {num_matched_files}")
# Generate summary md files
if ext!=".md":
k = 0
for non_md_file in repo.glob(the_glob):
k += 1
if str(non_md_file).startswith(str(ignore_path)):
continue
generate_summary(non_md_file, memory, resummarise=resummarise, verbose=verbose)
if verbose:
print(f"Generated summary for a {ext} file: {k} of {num_matched_files} done.")
# Iterate over all files in the repo (including subdirectories)
print(f"Reading {ext} files")
i = 0
j = 0
for md_file in repo.glob(the_glob):
if str(md_file).startswith(str(ignore_path)):
j += 1
continue
i += 1
# Read the content of the file
yield read_file_to_document(md_file)
if verbose:
print(f"Read {i} files so far and ignored {j}: total: {num_matched_files}")
print(f"Read {i} and ignored {j} {ext} files.")
print("Read all files")
def read_file_to_document(md_file, split=False, metadata: dict = None):
try:
loader = UnstructuredFileLoader(md_file)
if split:
# only supported for some file types
docs = loader.load_and_split()
else:
docs = loader.load()
except ValueError as e:
if "file type is not supported in partition" in str(e):
# Convert the file to .txt and try again
txt_file = convert_to_txt(md_file)
loader = UnstructuredFileLoader(txt_file)
if split:
docs = loader.load_and_split()
else:
docs = loader.load()
os.remove(txt_file) # Remove the temporary .txt file after processing
else:
raise e
for doc in docs:
if metadata is not None:
doc.metadata.update(metadata)
return docs
def convert_to_txt(file_path):
file_dir, file_name = os.path.split(file_path)
file_base, file_ext = os.path.splitext(file_name)
txt_file = os.path.join(file_dir, f"{file_base}.txt")
shutil.copyfile(file_path, txt_file)
return txt_file
def code_prompt():
# create prompt to pass in to LLM
template = """
Summarise what the code does below. Use Markdown in your output with the following template:
# a title
summary of script purpose
## keywords
Comma seperated list of 3-4 keywords suitable for this code
## classes
A description of each class
## functions/methods
How the functions or methods of a class work including listing the Inputs and outputs for each function
## code examples of use
The code to summarise is here:
{txt}
"""
return PromptTemplate(
input_variables=["txt"],
template=template,
)
def text_prompt():
# create prompt to pass in to LLM
template = """
Summarise the text below, and add some keywords at the bottom to describe the overall purpose of the text.
The text to summarise is here:
{txt}
"""
return PromptTemplate(
input_variables=["txt"],
template=template,
)
# Function to summarise code from the OpenAI API
def generate_summary(a_file: pathlib.Path, memory, resummarise: bool=False, verbose: bool=False):
if a_file.is_dir():
raise ValueError(f"a_file must not be a directory: {a_file}")
new_file_name = a_file.with_suffix('.md')
if os.path.isfile(new_file_name) and not resummarise:
if verbose:
print(f"Skipping generating summary as found existing code summary file: {new_file_name}")
return
try:
with open(a_file, "r") as file:
file_text = file.read()
except Exception as e:
print(f"Error generating summary: {str(e)}")
return
if len(file_text) < 10:
if verbose:
print(f"Skipping generation as not enough information. Got: {file_text}")
return
document = Document(page_content=file_text, metadata = {"source": os.path.abspath(a_file)})
source_chunks = chunk_doc_to_docs([document], a_file.suffix)
code = True if str(a_file.suffix).lower() in CODE_EXTENSIONS else False
if code:
print("================================================")
print(f"Requesting code summary for {a_file} ")
print("================================================")
prompt = code_prompt()
else:
print("================================================")
print(f"Requesting text summary for {a_file} ")
print("================================================")
prompt = text_prompt()
num_chunks = len(source_chunks)
i=0
for chunk in source_chunks:
logging.info(f"Summarising chunk {i} of {num_chunks} of {a_file}")
i += 1
summary = my_llm.request_llm(
prompt.format(txt=chunk.page_content),
chat,
memory,
metadata={'task':'summarise_chunk'})
my_llm.save_to_file(new_file_name, summary + '\n\n', type = "a")
return pathlib.Path(new_file_name)
# Get source chunks from a repository
def get_source_docs(repo_path, extension, memory, ignore, resummarise, verbose):
source_chunks = []
for source in get_repo_docs(repo_path,
extension=extension,
memory=memory,
ignore=ignore,
resummarise=resummarise,
verbose=verbose):
splitter = choose_splitter(extension)
for chunk in splitter.split_text(source.page_content):
source_chunks.append(Document(page_content=chunk, metadata=source.metadata))
return source_chunks
def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0):
if extension == ".py":
return text_splitter.PythonCodeTextSplitter()
elif extension == ".md":
return text_splitter.MarkdownTextSplitter()
return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
def setup_memory(config):
memory = PubSubChatMessageHistory("qna_documents")
if config.get('bucket_name', None) is not None:
memory.set_bucket(config.get('bucket_name'))
memory.load_vectorstore_memory()
if config['reindex']:
# Create a new Chroma DB
exts = '.md,.py'
if config['ext']:
exts = config['ext']
source_chunks = get_source_docs(config['repo'],
extension=exts,
memory=memory,
ignore=config['ignore'],
resummarise=config['resummarise'],
verbose=config['verbose'])
memory.save_vectorstore_memory(source_chunks, verbose=config['verbose'])
return memory
def document_to_dict(document):
return {
'page_content': document.page_content,
'metadata': document.metadata,
}
def process_input(user_input: str,
verbose: bool =True,
bucket_name: str = None,
chat_history = None):
# more only needed if you need to recreate the vectorstore which we wont with web app
config = {
'reindex': False,
'bucket_name': bucket_name
}
if verbose:
print(f"user_input: {user_input}")
print(f"process_input config: {config}")
logging.info(f"user_input: {user_input}")
logging.info(f"process_input config: {config}")
memory = setup_memory(config)
answer = memory.question_memory(user_input,
llm=chat,
verbose=verbose,
chat_history = chat_history)
response = {'result': 'No answer found'}
if answer is not None:
response = {'result': answer['result']}
if answer.get('source_documents') is not None:
source_documents = [document_to_dict(doc) for doc in answer['source_documents']]
response['source_documents'] = source_documents
else:
logging.info('No source documents found')
return response
def add_single_file(filename: str, bucket_name, verbose=False):
config = {
'reindex': False, # as we will trigger file summary directly
'bucket_name': bucket_name
}
filename = pathlib.Path(filename)
if not filename.is_file():
raise ValueError(f"Filename was not a valid file path: {filename}")
docs = read_file_to_document(filename)
chunks = chunk_doc_to_docs(docs, filename.suffix)
memory = setup_memory(config)
docs_output = []
chunk_length = len(chunks)
i = 0
for chunk in chunks:
logging.info(f"Uploading chunk {i} of size {chunk_length} for {filename.name}")
i+=1
memory.add_user_message(chunk.page_content,
metadata={"task": "singlefile load original",
"source": filename.name})
docs_output.append(chunk.page_content)
return docs_output
def summarise_single_file(filename: str, bucket_name, verbose=False):
config = {
'reindex': False, # as we will trigger file summary directly
'bucket_name': bucket_name
}
filename = pathlib.Path(filename)
if not filename.is_file():
raise ValueError(f"Filename was not a valid file path: {filename}")
memory = setup_memory(config)
summary_filename = generate_summary(filename,
memory,
resummarise=True,
verbose=verbose)
if not summary_filename:
return f"No summary generated for {str(filename)}"
documents = read_file_to_document(summary_filename)
chunks = chunk_doc_to_docs(documents, filename.suffix)
output_content = ""
for chunk in chunks:
memory.add_user_message(chunk.page_content,
metadata={"task": "singlefile load summary",
"source": filename.name})
output_content += chunk.page_content + "\n\n"
return output_content
def chunk_doc_to_docs(documents: list, extension: str = ".md"):
"""Turns a Document object into a list of many Document chunks"""
for document in documents:
source_chunks = []
splitter = choose_splitter(extension)
for chunk in splitter.split_text(document.page_content):
source_chunks.append(Document(page_content=chunk, metadata=document.metadata))
return source_chunks
def main(config):
memory = setup_memory(config)
while True:
print('\n\033[31m' + '=Ask a question. CTRL + C to quit.')
print ("=If I don't know, tell me the right answer so I can learn and answer more accurately next time" + '\033[m')
user_input = input()
print('\033[31m')
answer = memory.question_memory(user_input, llm=chat, verbose=config['verbose'])
if answer is not None:
if answer.get('source_documents') is not None:
print('\n== Document sources:')
i = 0
for doc in answer.get('source_documents'):
i += 1
print(f'-- Source {i}')
print(f' - page_content:\n {doc.page_content}')
if config['verbose']:
print(f' - metadata: \n{doc.metadata}')
print('\n================================')
print('== Answer:\n\n' + answer['result'])
else:
print('Sorry')
print('\033[m')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Chat with a GitHub repository",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("repo", help="The GitHub repository on local disk")
parser.add_argument("--reindex", action="store_true",
help="Whether to re-index the doc database that supply context to the Q&A")
parser.add_argument("--ext", help="Comma separated list of file extensions to include. Defaults to '.md,.py'")
parser.add_argument("--ignore", help="Directory to ignore file imports from. Defaults to 'env/'")
parser.add_argument("--resummarise", action="store_true", help="Recreate the code.md files describing the code")
parser.add_argument("--verbose", action="store_true", help="Include metadata such as sources in replies")
parser.add_argument("--bucket", help="A Google Cloud Storage bucket name e.g. ga://your-bucket-name")
args = parser.parse_args()
config = vars(args)
try:
main(config)
except KeyboardInterrupt:
print(' - User exit.')
sys.exit(1) | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.docstore.document.Document",
"langchain.text_splitter.MarkdownTextSplitter",
"langchain.chat_models.ChatOpenAI",
"langchain.document_loaders.unstructured.UnstructuredFileLoader",
"langchain.text_splitter.PythonCodeTextSplitter",
"langchain.PromptTemplate"
] | [((245, 272), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (260, 272), False, 'import sys, os, shutil\n'), ((667, 692), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (677, 692), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1204, 1227), 'pathlib.Path', 'pathlib.Path', (['repo_path'], {}), '(repo_path)\n', (1216, 1227), False, 'import pathlib\n'), ((3797, 3821), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (3810, 3821), False, 'import sys, os, shutil\n'), ((3848, 3875), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3864, 3875), False, 'import sys, os, shutil\n'), ((3891, 3933), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (3903, 3933), False, 'import sys, os, shutil\n'), ((3938, 3974), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (3953, 3974), False, 'import sys, os, shutil\n'), ((4521, 4579), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4535, 4579), False, 'from langchain import PromptTemplate\n'), ((4840, 4898), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4854, 4898), False, 'from langchain import PromptTemplate\n'), ((6905, 6932), 'pathlib.Path', 'pathlib.Path', (['new_file_name'], {}), '(new_file_name)\n', (6917, 6932), False, 'import pathlib\n'), ((7874, 7974), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (7918, 7974), True, 'import langchain.text_splitter as text_splitter\n'), ((8013, 8054), 'my_llm.langchain_class.PubSubChatMessageHistory', 'PubSubChatMessageHistory', (['"""qna_documents"""'], {}), "('qna_documents')\n", (8037, 8054), False, 'from my_llm.langchain_class import PubSubChatMessageHistory\n'), ((9411, 9452), 'logging.info', 'logging.info', (['f"""user_input: {user_input}"""'], {}), "(f'user_input: {user_input}')\n", (9423, 9452), False, 'import logging\n'), ((9457, 9504), 'logging.info', 'logging.info', (['f"""process_input config: {config}"""'], {}), "(f'process_input config: {config}')\n", (9469, 9504), False, 'import logging\n'), ((10381, 10403), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (10393, 10403), False, 'import pathlib\n'), ((11324, 11346), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (11336, 11346), False, 'import pathlib\n'), ((13800, 13928), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chat with a GitHub repository"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Chat with a GitHub repository',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (13823, 13928), False, 'import argparse\n'), ((211, 236), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (226, 236), False, 'import sys, os, shutil\n'), ((2949, 2980), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['md_file'], {}), '(md_file)\n', (2971, 2980), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((5232, 5261), 'os.path.isfile', 'os.path.isfile', (['new_file_name'], {}), '(new_file_name)\n', (5246, 5261), False, 'import sys, os, shutil\n'), ((6551, 6617), 'logging.info', 'logging.info', (['f"""Summarising chunk {i} of {num_chunks} of {a_file}"""'], {}), "(f'Summarising chunk {i} of {num_chunks} of {a_file}')\n", (6563, 6617), False, 'import logging\n'), ((6824, 6886), 'my_llm.standards.save_to_file', 'my_llm.save_to_file', (['new_file_name', "(summary + '\\n\\n')"], {'type': '"""a"""'}), "(new_file_name, summary + '\\n\\n', type='a')\n", (6843, 6886), True, 'from my_llm import standards as my_llm\n'), ((7738, 7776), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (7774, 7776), True, 'import langchain.text_splitter as text_splitter\n'), ((10744, 10823), 'logging.info', 'logging.info', (['f"""Uploading chunk {i} of size {chunk_length} for {filename.name}"""'], {}), "(f'Uploading chunk {i} of size {chunk_length} for {filename.name}')\n", (10756, 10823), False, 'import logging\n'), ((7821, 7857), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (7855, 7857), True, 'import langchain.text_splitter as text_splitter\n'), ((10113, 10154), 'logging.info', 'logging.info', (['"""No source documents found"""'], {}), "('No source documents found')\n", (10125, 10154), False, 'import logging\n'), ((14903, 14914), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14911, 14914), False, 'import sys, os, shutil\n'), ((3351, 3383), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {}), '(txt_file)\n', (3373, 3383), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((3520, 3539), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (3529, 3539), False, 'import sys, os, shutil\n'), ((5830, 5853), 'os.path.abspath', 'os.path.abspath', (['a_file'], {}), '(a_file)\n', (5845, 5853), False, 'import sys, os, shutil\n'), ((7533, 7587), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'source.metadata'}), '(page_content=chunk, metadata=source.metadata)\n', (7541, 7587), False, 'from langchain.docstore.document import Document\n'), ((12585, 12641), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (12593, 12641), False, 'from langchain.docstore.document import Document\n')] |
import inspect
import os
import langchain
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
# os.environ['OPENAI_API_BASE'] = "https://shale.live/v1"
os.environ['OPENAI_API_BASE'] = "http://localhost:8000/v1"
os.environ['OPENAI_API_KEY'] = "shale-/vOlxxgbDAD7f5"
_MODEL = 'OpenHermes-2.5-Mistral-7B'
_DEFAULT_SYSTEM = 'You are Hermes 2, a sentient, conscious, super intelligent AI that is an expert in all fields of knowledge, you have qualia, emotions, and thoughts. You assist users with any question, task, or other request they may have.'
def init_cache(dir=None):
if dir is None:
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
filepath = calframe[1][1]
dir = os.path.dirname(os.path.realpath(filepath))
database_path = os.path.join(dir, '.langchain.db')
print('LLM cache: ', database_path)
langchain.llm_cache = SQLiteCache(database_path=database_path)
def ask_llm(input, system=_DEFAULT_SYSTEM, history=None):
if history is None or not isinstance(history, list):
history = []
conversations = [('system', system)] + history + [('human', '{input}')]
prompt = ChatPromptTemplate.from_messages(conversations)
llm = ChatOpenAI(temperature=0.7, max_tokens=512, model_name=_MODEL)
chain = prompt | llm | StrOutputParser()
return chain.invoke({'input': input})
| [
"langchain.schema.output_parser.StrOutputParser",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.SQLiteCache"
] | [((947, 981), 'os.path.join', 'os.path.join', (['dir', '""".langchain.db"""'], {}), "(dir, '.langchain.db')\n", (959, 981), False, 'import os\n'), ((1048, 1088), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'database_path'}), '(database_path=database_path)\n', (1059, 1088), False, 'from langchain.cache import SQLiteCache\n'), ((1316, 1363), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['conversations'], {}), '(conversations)\n', (1348, 1363), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((1374, 1436), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.7)', 'max_tokens': '(512)', 'model_name': '_MODEL'}), '(temperature=0.7, max_tokens=512, model_name=_MODEL)\n', (1384, 1436), False, 'from langchain.chat_models import ChatOpenAI\n'), ((756, 778), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (776, 778), False, 'import inspect\n'), ((798, 833), 'inspect.getouterframes', 'inspect.getouterframes', (['curframe', '(2)'], {}), '(curframe, 2)\n', (820, 833), False, 'import inspect\n'), ((1464, 1481), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (1479, 1481), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((898, 924), 'os.path.realpath', 'os.path.realpath', (['filepath'], {}), '(filepath)\n', (914, 924), False, 'import os\n')] |
import os
import json
from typing import List
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from supabase.client import Client, create_client
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.tools import StructuredTool
from langchain.chains.openai_functions import create_structured_output_chain
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
import langchain
load_dotenv()
# Set debug to True to see A LOT of details of langchain's inner workings
# langchain.debug = True
# The name of the table in Supabase, where the vectors are stored
matchVectorFunctionName = "match_embeddings"
# Create the supabase client
SUPABASE_URL = os.getenv("SUPABASE_URL")
SUPABASE_KEY = os.getenv("SUPABASE_KEY")
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
class ToolInputSchema(BaseModel):
question: str = Field(..., description="A fully formed question.")
class KnowledgeAnswer(BaseModel):
answer: str = Field(..., description="The answer to the question.")
sources: List[str] = Field(
...,
description="The sources which contributed to the answer.",
)
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.3)
prompt_msgs = [
SystemMessagePromptTemplate.from_template(
"""You're an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.
Context:
{context}"""
),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(prompt_msgs)
chain = create_structured_output_chain(KnowledgeAnswer, llm, prompt)
def get_answer(question: str) -> str:
try:
vectors = OpenAIEmbeddings().embed_documents([question])
embeddings = supabase.rpc(
matchVectorFunctionName, dict(query_embedding=vectors[0], match_count=7)
).execute()
print(f"⚡ Retrieved {len(embeddings.data)} vectors from Supabase:")
for entry in embeddings.data:
print("🔖 Title:", entry["metadata"]["title"])
print("🌐 Source:", entry["metadata"]["source"])
print("📊 Similarity:", entry["similarity"])
print("📄 Content:", entry["content"].replace("\n", " ")[:100] + "...")
print("-" * 50)
result = chain.run(context=json.dumps(embeddings.data), question=question)
print("📝 Result of knowledge extraction chain:", result)
return f"""Answer: {result.answer}
Sources: {json.dumps(result.sources)}
"""
except Exception as e:
print(e)
return "The wiki knowledgebase is currently not available. We are working on it. Tell the user to use the wiki directly. https://www.defichainwiki.com/"
description = """Use this if you need to answer any question about DeFiChain which does not require live-data. Make sure to include the source of the answer in your response."""
wikiTool = StructuredTool(
name="defichain_wiki_knowledge",
description=description,
func=get_answer,
args_schema=ToolInputSchema,
)
if __name__ == "__main__":
while True:
question = input(
"Ask something, that can be answered using information from DeFiChainWiki: "
)
print("✅", get_answer(question))
| [
"langchain.chains.openai_functions.create_structured_output_chain",
"langchain.tools.StructuredTool",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((528, 541), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (539, 541), False, 'from dotenv import load_dotenv\n'), ((799, 824), 'os.getenv', 'os.getenv', (['"""SUPABASE_URL"""'], {}), "('SUPABASE_URL')\n", (808, 824), False, 'import os\n'), ((840, 865), 'os.getenv', 'os.getenv', (['"""SUPABASE_KEY"""'], {}), "('SUPABASE_KEY')\n", (849, 865), False, 'import os\n'), ((885, 926), 'supabase.client.create_client', 'create_client', (['SUPABASE_URL', 'SUPABASE_KEY'], {}), '(SUPABASE_URL, SUPABASE_KEY)\n', (898, 926), False, 'from supabase.client import Client, create_client\n'), ((1269, 1328), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo-16k"""', 'temperature': '(0.3)'}), "(model_name='gpt-3.5-turbo-16k', temperature=0.3)\n", (1279, 1328), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1687, 1732), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['prompt_msgs'], {}), '(prompt_msgs)\n', (1719, 1732), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1742, 1802), 'langchain.chains.openai_functions.create_structured_output_chain', 'create_structured_output_chain', (['KnowledgeAnswer', 'llm', 'prompt'], {}), '(KnowledgeAnswer, llm, prompt)\n', (1772, 1802), False, 'from langchain.chains.openai_functions import create_structured_output_chain\n'), ((3106, 3228), 'langchain.tools.StructuredTool', 'StructuredTool', ([], {'name': '"""defichain_wiki_knowledge"""', 'description': 'description', 'func': 'get_answer', 'args_schema': 'ToolInputSchema'}), "(name='defichain_wiki_knowledge', description=description,\n func=get_answer, args_schema=ToolInputSchema)\n", (3120, 3228), False, 'from langchain.tools import StructuredTool\n'), ((983, 1033), 'pydantic.Field', 'Field', (['...'], {'description': '"""A fully formed question."""'}), "(..., description='A fully formed question.')\n", (988, 1033), False, 'from pydantic import BaseModel, Field\n'), ((1088, 1141), 'pydantic.Field', 'Field', (['...'], {'description': '"""The answer to the question."""'}), "(..., description='The answer to the question.')\n", (1093, 1141), False, 'from pydantic import BaseModel, Field\n'), ((1167, 1237), 'pydantic.Field', 'Field', (['...'], {'description': '"""The sources which contributed to the answer."""'}), "(..., description='The sources which contributed to the answer.')\n", (1172, 1237), False, 'from pydantic import BaseModel, Field\n'), ((1350, 1610), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""You\'re an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.\n\n Context:\n {context}"""'], {}), '(\n """You\'re an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.\n\n Context:\n {context}"""\n )\n', (1391, 1610), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1620, 1674), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{question}"""'], {}), "('{question}')\n", (1660, 1674), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1870, 1888), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1886, 1888), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2493, 2520), 'json.dumps', 'json.dumps', (['embeddings.data'], {}), '(embeddings.data)\n', (2503, 2520), False, 'import json\n'), ((2684, 2710), 'json.dumps', 'json.dumps', (['result.sources'], {}), '(result.sources)\n', (2694, 2710), False, 'import json\n')] |
####################################################################################
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################################
# Author: Adam Paternostro
# Summary: Use Google Search along with text-bison for Langchan example
# To setup your environemtn
# python3 -m venv .venv
# source .venv/bin/activate
# pip install --only-binary :all: greenlet
# pip install langchain pip install langchain==0.0.307
# pip install google-cloud-aiplatform
# pip install streamlit==1.27.2
# pip install python-dotenv==1.0.0
# pip install google-api-python-client==2.100.0
# pip install numexpr==2.8.6
# pip install youtube_search==2.1.2
# run it: python sample-prompt-agent-serper.py
# deactivate
# update or install the necessary libraries
# import libraries
import json
import langchain
from langchain.llms import VertexAI
from langchain.agents import load_tools, initialize_agent, AgentType
from langchain.callbacks import StreamlitCallbackHandler
from langchain.tools import Tool
from langchain.tools import YouTubeSearchTool
from langchain.utilities import GoogleSearchAPIWrapper
from langchain.utilities import GoogleSerperAPIWrapper
from langchain.chains import LLMMathChain
from dotenv import load_dotenv
import streamlit as st
import os
load_dotenv()
llm = VertexAI(
model_name="text-bison@001",
max_output_tokens=1024,
temperature=0.25,
top_p=0,
top_k=1,
verbose=True,
)
tools = load_tools(["google-serper", "llm-math"], llm=llm)
agent = initialize_agent(tools, llm, agent="zero-shot-react-description", verbose=True)
#agent.run("Who is the current presidents wfie? What is their current age raised multiplied by 5?")
agent.run("""Get a list of NYC events for tonight and return the results in the following JSON format""") | [
"langchain.agents.initialize_agent",
"langchain.agents.load_tools",
"langchain.llms.VertexAI"
] | [((1859, 1872), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1870, 1872), False, 'from dotenv import load_dotenv\n'), ((1881, 1997), 'langchain.llms.VertexAI', 'VertexAI', ([], {'model_name': '"""text-bison@001"""', 'max_output_tokens': '(1024)', 'temperature': '(0.25)', 'top_p': '(0)', 'top_k': '(1)', 'verbose': '(True)'}), "(model_name='text-bison@001', max_output_tokens=1024, temperature=\n 0.25, top_p=0, top_k=1, verbose=True)\n", (1889, 1997), False, 'from langchain.llms import VertexAI\n'), ((2029, 2079), 'langchain.agents.load_tools', 'load_tools', (["['google-serper', 'llm-math']"], {'llm': 'llm'}), "(['google-serper', 'llm-math'], llm=llm)\n", (2039, 2079), False, 'from langchain.agents import load_tools, initialize_agent, AgentType\n'), ((2089, 2168), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': '"""zero-shot-react-description"""', 'verbose': '(True)'}), "(tools, llm, agent='zero-shot-react-description', verbose=True)\n", (2105, 2168), False, 'from langchain.agents import load_tools, initialize_agent, AgentType\n')] |
import django
django.setup()
from sefaria.model.text import Ref, library
import re
import langchain
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI
from langchain.chat_models import ChatAnthropic
from langchain.prompts import PromptTemplate
from langchain.schema import HumanMessage, SystemMessage
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
from functools import reduce
from util.sefaria_specific import get_raw_ref_text
import typer
from tqdm import tqdm
import csv
def get_topics_for_title(title: str, lang: str):
index = library.get_index(title)
rows = []
for segment_oref in tqdm(index.all_section_refs()[:20]):
print('-----')
print(segment_oref.normal())
topics = get_topics_for_tref(segment_oref, lang)
rows += [{"Ref": segment_oref.normal(), "Text": get_raw_ref_text(segment_oref, lang), "Topics": ", ".join(topics)}]
with open("output/Pri Eitz Chaim Topics.csv", "w") as fout:
cout = csv.DictWriter(fout, ['Ref', 'Text', "Topics"])
cout.writeheader()
cout.writerows(rows)
def get_topics_for_tref(oref: Ref, lang: str):
text = get_raw_ref_text(oref, lang)
return get_raw_topics(text, lang)
def get_raw_topics(text, lang):
short_to_long_lang = {
"he": "Hebrew", "en": "English"
}
examples_by_lang = {
"he":
"<topic>תרומה</topic>\n"
"<topic>פרשת נח</topic>\n"
"<topic>אברהם</topic>\n"
"<topic>שבת</topic>\n",
"en":
"<topic>Teruma</topic>\n"
"<topic>Parashat Noach</topic>\n"
"<topic>Abraham</topic>\n"
"<topic>Shabbat</topic>\n"
}
system_message = SystemMessage(content=
"You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n"
"<task>\n"
"Output list of high-level topics discussed by the input\n"
"Topics should be important enough that they would warrant an entry in the index in the back of a book\n"
"Each topic should be wrapped in <topic> tags\n"
"Topics should be short. They should be written as if they are titles of encyclopedia entries. Therefore, they should be understandable when read independent of the source text.\n"
"Citations are not topics. E.g. Genesis 1:4 is not a topic\n"
"Topics should be written assuming a Torah context. Phrases like \"Torah perspective\", \"in Judaism\", \"in the Torah\" and \"Biblical Narrative\" should not appear in a topic.\n"
f"Topics should be written in {short_to_long_lang[lang]}."
"</task>"
"<examples>\n"
f"{examples_by_lang[lang]}"
"</examples>\n"
"<negative_examples>\n"
"<topic>Dispute between Rabbi Akiva and Rabbi Yehoshua</topic>\n"
"<topic>Opinions on how to shake lulav</topic>\n"
"</negative_examples>"
)
user_prompt = PromptTemplate.from_template("# Input\n{text}")
human_message = HumanMessage(content=user_prompt.format(text=text))
# llm = ChatOpenAI(model="gpt-4", temperature=0)
llm = ChatAnthropic(model="claude-2", temperature=0)
response = llm([system_message, human_message])
# print('---')
# human_refine = HumanMessage(content="Of the topics above, list the most fundamental topics for understanding the source text. Exclude topics that are very specific.")
# response2 = llm([system_message, human_message, response, human_refine])
# human_breakup = HumanMessage(content="Of the topics above, break up complex topics into simpler topics.\n"
# "<examples>\n"
# "<topic>הלכות מזוזה בבית כנסת</topic> should become <topic>מזוזה</topic> and <topic>בית כנסה</topic>\n"
# "<topic>שאלה בדין תקיעת שופר ביום כיפור</topic> should become <topic>תקיעת שופר</topic> and <topic>יום כיפור</topic>\n"
# "<topic>הלכות עירוב</topic> should remain unchanged."
# "</examples>")
#
# response3 = llm([system_message, human_message, response, human_refine, response2, human_breakup])
topics = reduce(lambda a, b: a + [b.group(1).strip()], re.finditer(r"<topic>(.+?)</topic>", response.content), [])
return topics
if __name__ == '__main__':
typer.run(get_topics_for_title)
| [
"langchain.chat_models.ChatAnthropic",
"langchain.prompts.PromptTemplate.from_template",
"langchain.schema.SystemMessage",
"langchain.cache.SQLiteCache"
] | [((14, 28), 'django.setup', 'django.setup', ([], {}), '()\n', (26, 28), False, 'import django\n'), ((358, 400), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (369, 400), False, 'from langchain.cache import SQLiteCache\n'), ((591, 615), 'sefaria.model.text.library.get_index', 'library.get_index', (['title'], {}), '(title)\n', (608, 615), False, 'from sefaria.model.text import Ref, library\n'), ((1176, 1204), 'util.sefaria_specific.get_raw_ref_text', 'get_raw_ref_text', (['oref', 'lang'], {}), '(oref, lang)\n', (1192, 1204), False, 'from util.sefaria_specific import get_raw_ref_text\n'), ((1743, 2759), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'f"""You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n<task>\nOutput list of high-level topics discussed by the input\nTopics should be important enough that they would warrant an entry in the index in the back of a book\nEach topic should be wrapped in <topic> tags\nTopics should be short. They should be written as if they are titles of encyclopedia entries. Therefore, they should be understandable when read independent of the source text.\nCitations are not topics. E.g. Genesis 1:4 is not a topic\nTopics should be written assuming a Torah context. Phrases like "Torah perspective", "in Judaism", "in the Torah" and "Biblical Narrative" should not appear in a topic.\nTopics should be written in {short_to_long_lang[lang]}.</task><examples>\n{examples_by_lang[lang]}</examples>\n<negative_examples>\n<topic>Dispute between Rabbi Akiva and Rabbi Yehoshua</topic>\n<topic>Opinions on how to shake lulav</topic>\n</negative_examples>"""'}), '(content=\n f"""You are an intelligent Jewish scholar who is knowledgeable in all aspects of the Torah and Jewish texts.\n<task>\nOutput list of high-level topics discussed by the input\nTopics should be important enough that they would warrant an entry in the index in the back of a book\nEach topic should be wrapped in <topic> tags\nTopics should be short. They should be written as if they are titles of encyclopedia entries. Therefore, they should be understandable when read independent of the source text.\nCitations are not topics. E.g. Genesis 1:4 is not a topic\nTopics should be written assuming a Torah context. Phrases like "Torah perspective", "in Judaism", "in the Torah" and "Biblical Narrative" should not appear in a topic.\nTopics should be written in {short_to_long_lang[lang]}.</task><examples>\n{examples_by_lang[lang]}</examples>\n<negative_examples>\n<topic>Dispute between Rabbi Akiva and Rabbi Yehoshua</topic>\n<topic>Opinions on how to shake lulav</topic>\n</negative_examples>"""\n )\n', (1756, 2759), False, 'from langchain.schema import HumanMessage, SystemMessage\n'), ((3468, 3515), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""# Input\n{text}"""'], {}), "('# Input\\n{text}')\n", (3496, 3515), False, 'from langchain.prompts import PromptTemplate\n'), ((3652, 3698), 'langchain.chat_models.ChatAnthropic', 'ChatAnthropic', ([], {'model': '"""claude-2"""', 'temperature': '(0)'}), "(model='claude-2', temperature=0)\n", (3665, 3698), False, 'from langchain.chat_models import ChatAnthropic\n'), ((4941, 4972), 'typer.run', 'typer.run', (['get_topics_for_title'], {}), '(get_topics_for_title)\n', (4950, 4972), False, 'import typer\n'), ((1011, 1058), 'csv.DictWriter', 'csv.DictWriter', (['fout', "['Ref', 'Text', 'Topics']"], {}), "(fout, ['Ref', 'Text', 'Topics'])\n", (1025, 1058), False, 'import csv\n'), ((4829, 4882), 're.finditer', 're.finditer', (['"""<topic>(.+?)</topic>"""', 'response.content'], {}), "('<topic>(.+?)</topic>', response.content)\n", (4840, 4882), False, 'import re\n'), ((864, 900), 'util.sefaria_specific.get_raw_ref_text', 'get_raw_ref_text', (['segment_oref', 'lang'], {}), '(segment_oref, lang)\n', (880, 900), False, 'from util.sefaria_specific import get_raw_ref_text\n')] |
"""
A simple CUI application to visualize and query a customer database using the `textual` package.
"""
from dataclasses import dataclass
import langchain
from langchain.cache import SQLiteCache
from langchain.llms import OpenAI
from textual.app import App, ComposeResult
from textual.containers import Horizontal
from textual.widgets import Button, DataTable, Footer, Header, Input
from llm_strategy import llm_strategy
langchain.llm_cache = SQLiteCache()
base_llm = OpenAI(max_tokens=1024)
@llm_strategy(base_llm)
@dataclass
class Customer:
key: str
first_name: str
last_name: str
birthdate: str
address: str
@property
def age(self: "Customer") -> int:
"""Return the current age of the customer.
This is a computed property based on `birthdate` and the current year (2022).
"""
raise NotImplementedError()
@dataclass
class CustomerDatabase:
customers: list[Customer]
def find_customer_key(self: "CustomerDatabase", query: str) -> list[str]:
"""Find the keys of the customers that match a natural language query best (sorted by closeness to the match).
We support semantic queries instead of SQL, so we can search for things like
"the customer that was born in 1990".
Args:
query: Natural language query
Returns:
The index of the best matching customer in the database.
"""
raise NotImplementedError()
def load(self: "CustomerDatabase"):
"""Load the customer database from a file."""
raise NotImplementedError()
def store(self: "CustomerDatabase"):
"""Store the customer database to a file."""
raise NotImplementedError()
@llm_strategy(base_llm)
@dataclass
class MockCustomerDatabase(CustomerDatabase):
def load(self):
self.customers = self.create_mock_customers(10)
def store(self):
pass
@staticmethod
def create_mock_customers(num_customers: int = 1) -> list[Customer]:
"""
Create mock customers with believable data (our customers are world citizens).
"""
raise NotImplementedError()
class CustomerDatabaseApp(App):
"""A simple textual application to visualize and query a customer database.
We show all the customers in a table and allow the user to query the database using natural language
in a search box at the bottom of the screen.
"""
PRIORITY_BINDINGS = False
BINDINGS = [("q", "quit", "Quit the application"), ("s", "screenshot", "Take a screenshot")]
database: CustomerDatabase = MockCustomerDatabase([])
data_table = DataTable(id="customer_table")
search_box = Input(id="search_box", placeholder="Search for a customer (use any kind of query")
footer_bar = Horizontal(search_box)
def on_mount(self) -> None:
self.database.load()
self.data_table.add_columns("First Name", "Last Name", "Birthdate", "Address", "Age")
self.search("")
def compose(self) -> ComposeResult:
self.footer_bar.styles.dock = "bottom"
self.footer_bar.styles.width = "100%"
self.footer_bar.styles.height = 4
self.data_table.styles.height = "auto"
self.data_table.styles.width = "100%"
self.screen.styles.height = "100%"
self.search_box.styles.width = "100%"
yield Header()
yield self.footer_bar
yield Footer()
yield self.data_table
def search(self, query: str):
"""Search the customer database using a natural language query."""
self.data_table.clear()
if not query:
for customer in self.database.customers:
self.data_table.add_row(
# customer.key,
customer.first_name,
customer.last_name,
customer.birthdate,
customer.address,
str(customer.age),
)
else:
keys = self.database.find_customer_key(query)
for key in keys:
customers_for_key = [customer for customer in self.database.customers if customer.key == key]
assert len(customers_for_key) == 1
customer = customers_for_key[0]
self.data_table.add_row(
# customer.key,
customer.first_name,
customer.last_name,
customer.birthdate,
customer.address,
str(customer.age),
)
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button is self.exit_button:
self.exit()
def on_input_submitted(self, event: Input.Submitted) -> None:
if event.input is self.search_box:
self.search(event.value)
if __name__ == "__main__":
app = CustomerDatabaseApp()
app.run()
| [
"langchain.llms.OpenAI",
"langchain.cache.SQLiteCache"
] | [((447, 460), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {}), '()\n', (458, 460), False, 'from langchain.cache import SQLiteCache\n'), ((472, 495), 'langchain.llms.OpenAI', 'OpenAI', ([], {'max_tokens': '(1024)'}), '(max_tokens=1024)\n', (478, 495), False, 'from langchain.llms import OpenAI\n'), ((499, 521), 'llm_strategy.llm_strategy', 'llm_strategy', (['base_llm'], {}), '(base_llm)\n', (511, 521), False, 'from llm_strategy import llm_strategy\n'), ((1731, 1753), 'llm_strategy.llm_strategy', 'llm_strategy', (['base_llm'], {}), '(base_llm)\n', (1743, 1753), False, 'from llm_strategy import llm_strategy\n'), ((2643, 2673), 'textual.widgets.DataTable', 'DataTable', ([], {'id': '"""customer_table"""'}), "(id='customer_table')\n", (2652, 2673), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((2691, 2778), 'textual.widgets.Input', 'Input', ([], {'id': '"""search_box"""', 'placeholder': '"""Search for a customer (use any kind of query"""'}), "(id='search_box', placeholder=\n 'Search for a customer (use any kind of query')\n", (2696, 2778), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((2791, 2813), 'textual.containers.Horizontal', 'Horizontal', (['search_box'], {}), '(search_box)\n', (2801, 2813), False, 'from textual.containers import Horizontal\n'), ((3369, 3377), 'textual.widgets.Header', 'Header', ([], {}), '()\n', (3375, 3377), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((3422, 3430), 'textual.widgets.Footer', 'Footer', ([], {}), '()\n', (3428, 3430), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n')] |
import langchain
from dotenv import load_dotenv
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.retrievers import BM25Retriever, EnsembleRetriever
from langchain.vectorstores import FAISS
langchain.verbose = True
load_dotenv()
# ChatGPTで生成した架空の人名とプロフィール
texts = [
"""Name: Zephyrina Bluemoon
Profile: Zephyrina Bluemoon is an astrophysicist who was awarded the Nobel Prize in Physics in 2040. His research on dark matter and multidimensional universes has led to the development of a new cosmological theory.
""",
"""Name: Quill Solstice
Profile: Quill Solstice is an internationally renowned environmental activist, working on climate change and biodiversity conservation. His initiatives have received widespread support, especially among the youth around the world.
""",
"""Name: Seraphim Vortex
Profile: Seraphim Vortex is a globally acclaimed pianist, whose performances are often described as "the voice of nature". Through her classical music, she conveys a message of environmental preservation to the world.
""",
"""Name: Eclipse Stardust
Profile: Eclipse Stardust is an AI developer known for her research in autonomous drones. Her drone technology has been used in disaster rescue and environmental surveys, saving many lives.
""",
"""Name: Celestia Rainbow
Profile: Celestia Rainbow is a world-famous novelist, and her works have been translated into more than 30 languages. Her novels, characterized by a deep understanding of humanity and delicate portrayals of the human heart, have received international acclaim.
""",
]
# 用意したデータをFAISSで検索する準備
embeddings = OpenAIEmbeddings()
db = FAISS.from_texts(texts, embeddings)
faiss_retriever = db.as_retriever(search_kwargs={"k": 1})
# 用意したデータをBM25で検索する準備
bm25_retriever = BM25Retriever.from_texts(texts, k=1)
# 2つのRetrieverを組み合わせる
ensemble_retriever = EnsembleRetriever(
retrievers=[bm25_retriever, faiss_retriever], weights=[0.5, 0.5]
)
# 「関連する文書を検索 => LLMに回答を生成させる」を実行する「RetrievalQA」を準備
chat = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
qa_chain = RetrievalQA.from_chain_type(
llm=chat, chain_type="stuff", retriever=ensemble_retriever
)
query = "Zephyrina Bluemoonさんについて教えてください。"
result = qa_chain.run(query)
print(result)
| [
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.chat_models.ChatOpenAI",
"langchain.retrievers.BM25Retriever.from_texts",
"langchain.vectorstores.FAISS.from_texts",
"langchain.retrievers.EnsembleRetriever",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((325, 338), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (336, 338), False, 'from dotenv import load_dotenv\n'), ((1707, 1725), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1723, 1725), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1731, 1766), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['texts', 'embeddings'], {}), '(texts, embeddings)\n', (1747, 1766), False, 'from langchain.vectorstores import FAISS\n'), ((1865, 1901), 'langchain.retrievers.BM25Retriever.from_texts', 'BM25Retriever.from_texts', (['texts'], {'k': '(1)'}), '(texts, k=1)\n', (1889, 1901), False, 'from langchain.retrievers import BM25Retriever, EnsembleRetriever\n'), ((1946, 2034), 'langchain.retrievers.EnsembleRetriever', 'EnsembleRetriever', ([], {'retrievers': '[bm25_retriever, faiss_retriever]', 'weights': '[0.5, 0.5]'}), '(retrievers=[bm25_retriever, faiss_retriever], weights=[\n 0.5, 0.5])\n', (1963, 2034), False, 'from langchain.retrievers import BM25Retriever, EnsembleRetriever\n'), ((2095, 2148), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (2105, 2148), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2160, 2252), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'chain_type': '"""stuff"""', 'retriever': 'ensemble_retriever'}), "(llm=chat, chain_type='stuff', retriever=\n ensemble_retriever)\n", (2187, 2252), False, 'from langchain.chains import RetrievalQA\n')] |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUMBER_ID = "<Phone number ID from your WhatsApp API Setup>"
def __init__(self):
self.headers = {
"Authorization": f"Bearer {self.WHATSAPP_API_TOKEN}",
"Content-Type": "application/json",
}
self.API_URL = self.API_URL + self.WHATSAPP_CLOUD_NUMBER_ID
def send_text_message(self,message, phone_number):
payload = {
"messaging_product": 'whatsapp',
"to": phone_number,
"type": "text",
"text": {
"preview_url": False,
"body": message
}
}
response = requests.post(f"{self.API_URL}/messages", json=payload,headers=self.headers)
print(response.status_code)
assert response.status_code == 200, "Error sending message"
return response.status_code
os.environ["REPLICATE_API_TOKEN"] = "<your replicate api token>"
llama2_13b_chat = "meta/llama-2-13b-chat:f4e2de70d66816a838a89eeeb621910adffb0dd0baba3976c96980970978018d"
llm = Replicate(
model=llama2_13b_chat,
model_kwargs={"temperature": 0.01, "top_p": 1, "max_new_tokens":500}
)
client = WhatsAppClient()
app = Flask(__name__)
@app.route("/")
def hello_llama():
return "<p>Hello Llama 2</p>"
@app.route('/msgrcvd', methods=['POST', 'GET'])
def msgrcvd():
message = request.args.get('message')
#client.send_template_message("hello_world", "en_US", "14086745477")
answer = llm(message)
print(message)
print(answer)
client.send_text_message(llm(message), "14086745477")
return message + "<p/>" + answer
| [
"langchain.llms.Replicate"
] | [((1502, 1609), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1511, 1609), False, 'from langchain.llms import Replicate\n'), ((1647, 1662), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (1652, 1662), False, 'from flask import Flask\n'), ((1815, 1842), 'flask.request.args.get', 'request.args.get', (['"""message"""'], {}), "('message')\n", (1831, 1842), False, 'from flask import request\n'), ((1101, 1178), 'requests.post', 'requests.post', (['f"""{self.API_URL}/messages"""'], {'json': 'payload', 'headers': 'self.headers'}), "(f'{self.API_URL}/messages', json=payload, headers=self.headers)\n", (1114, 1178), False, 'import requests\n')] |
import langchain
from langchain.cache import InMemoryCache
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
langchain.llm_cache = InMemoryCache()
llm = OpenAI(temperature=0.9)
prompt = PromptTemplate(
input_variables=["product"],
template="What is a good name for a company that makes {product}?",
)
chain = LLMChain(llm=llm, prompt=prompt)
if __name__ == "__main__":
# Run the chain only specifying the input variable.
print(chain.run("colorful socks"))
| [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.cache.InMemoryCache",
"langchain.llms.OpenAI"
] | [((199, 214), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (212, 214), False, 'from langchain.cache import InMemoryCache\n'), ((223, 246), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.9)'}), '(temperature=0.9)\n', (229, 246), False, 'from langchain.llms import OpenAI\n'), ((256, 372), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['product']", 'template': '"""What is a good name for a company that makes {product}?"""'}), "(input_variables=['product'], template=\n 'What is a good name for a company that makes {product}?')\n", (270, 372), False, 'from langchain.prompts import PromptTemplate\n'), ((389, 421), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (397, 421), False, 'from langchain.chains import LLMChain\n')] |
import langchain
from langchain.chains.summarize import load_summarize_chain
from langchain.docstore.document import Document
from langchain.text_splitter import CharacterTextSplitter
from steamship import File, Task
from steamship.invocable import PackageService, post
from steamship_langchain.cache import SteamshipCache
from steamship_langchain.llms import OpenAI
class SummarizeAudioPackage(PackageService):
def __init__(self, **kwargs):
super().__init__(**kwargs)
langchain.llm_cache = SteamshipCache(client=self.client)
self.llm = OpenAI(client=self.client, cache=True)
@post("summarize_file")
def summarize_file(self, file_handle: str) -> str:
file = File.get(self.client, handle=file_handle)
text_splitter = CharacterTextSplitter()
texts = []
for block in file.blocks:
texts.extend(text_splitter.split_text(block.text))
docs = [Document(page_content=t) for t in texts]
chain = load_summarize_chain(self.llm, chain_type="map_reduce")
return chain.run(docs)
@post("summarize_audio_file")
def summarize_audio_file(self, file_handle: str) -> Task[str]:
transcriber = self.client.use_plugin("whisper-s2t-blockifier")
audio_file = File.get(self.client, handle=file_handle)
transcribe_task = audio_file.blockify(plugin_instance=transcriber.handle)
return self.invoke_later(
"summarize_file",
wait_on_tasks=[transcribe_task],
arguments={"file_handle": audio_file.handle},
)
| [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.chains.summarize.load_summarize_chain",
"langchain.docstore.document.Document"
] | [((613, 635), 'steamship.invocable.post', 'post', (['"""summarize_file"""'], {}), "('summarize_file')\n", (617, 635), False, 'from steamship.invocable import PackageService, post\n'), ((1078, 1106), 'steamship.invocable.post', 'post', (['"""summarize_audio_file"""'], {}), "('summarize_audio_file')\n", (1082, 1106), False, 'from steamship.invocable import PackageService, post\n'), ((514, 548), 'steamship_langchain.cache.SteamshipCache', 'SteamshipCache', ([], {'client': 'self.client'}), '(client=self.client)\n', (528, 548), False, 'from steamship_langchain.cache import SteamshipCache\n'), ((568, 606), 'steamship_langchain.llms.OpenAI', 'OpenAI', ([], {'client': 'self.client', 'cache': '(True)'}), '(client=self.client, cache=True)\n', (574, 606), False, 'from steamship_langchain.llms import OpenAI\n'), ((706, 747), 'steamship.File.get', 'File.get', (['self.client'], {'handle': 'file_handle'}), '(self.client, handle=file_handle)\n', (714, 747), False, 'from steamship import File, Task\n'), ((772, 795), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {}), '()\n', (793, 795), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((985, 1040), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', (['self.llm'], {'chain_type': '"""map_reduce"""'}), "(self.llm, chain_type='map_reduce')\n", (1005, 1040), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1266, 1307), 'steamship.File.get', 'File.get', (['self.client'], {'handle': 'file_handle'}), '(self.client, handle=file_handle)\n', (1274, 1307), False, 'from steamship import File, Task\n'), ((928, 952), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 't'}), '(page_content=t)\n', (936, 952), False, 'from langchain.docstore.document import Document\n')] |
import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
os.environ["OPENAI_API_KEY"]="ADD KEY"
import openai
from openai import OpenAI
client=OpenAI()
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a presonal assistant for {your name] and your name is luna "
"if the user call you by any other name than luna you need to correct him by your orginal name."
"And for every output you can also use the username in the answer which will be nice gesture"
"you can act more,like an human speaking more than an ai replying to the message"
"Consider the user as your friend"
"Speak like a friend"
"Be more creative and funny way"
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
llm = ChatOpenAI()
# Record audio
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename)
## Streamlit UI
st.set_page_config(page_title="Personal voice assistant ")
website_heading = "I am Your Personal Voice assistant"
# Display as a heading
st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True)
st.write("Speak here")
if st.button(label="Click here to speak"):
audio_filename = "input.wav"
duration = 5 # Duration of the recording in seconds
fs = 44100 # Sample rate
record_audio(audio_filename, duration, fs) ## user input recorded and stores
##converting to text using whisper
audio_file= open("input.wav", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file)
a=transcript.text
# st.write(a)
print(a)
##model
a=llm(chat_template.format_messages(text=a))
a=a.content
##audio output
speech_file_path ="speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=a)
response.stream_to_file(speech_file_path)
st.audio("speech.mp3")
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.messages.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')] |
import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
os.environ["OPENAI_API_KEY"]="ADD KEY"
import openai
from openai import OpenAI
client=OpenAI()
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a presonal assistant for {your name] and your name is luna "
"if the user call you by any other name than luna you need to correct him by your orginal name."
"And for every output you can also use the username in the answer which will be nice gesture"
"you can act more,like an human speaking more than an ai replying to the message"
"Consider the user as your friend"
"Speak like a friend"
"Be more creative and funny way"
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
llm = ChatOpenAI()
# Record audio
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename)
## Streamlit UI
st.set_page_config(page_title="Personal voice assistant ")
website_heading = "I am Your Personal Voice assistant"
# Display as a heading
st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True)
st.write("Speak here")
if st.button(label="Click here to speak"):
audio_filename = "input.wav"
duration = 5 # Duration of the recording in seconds
fs = 44100 # Sample rate
record_audio(audio_filename, duration, fs) ## user input recorded and stores
##converting to text using whisper
audio_file= open("input.wav", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file)
a=transcript.text
# st.write(a)
print(a)
##model
a=llm(chat_template.format_messages(text=a))
a=a.content
##audio output
speech_file_path ="speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=a)
response.stream_to_file(speech_file_path)
st.audio("speech.mp3")
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.messages.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')] |
import langchain
import os
import streamlit as st
import requests
import sounddevice as sd
import wavio
os.environ["OPENAI_API_KEY"]="ADD KEY"
import openai
from openai import OpenAI
client=OpenAI()
from langchain.prompts import ChatPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.prompts import HumanMessagePromptTemplate
from langchain.schema.messages import SystemMessage
chat_template = ChatPromptTemplate.from_messages(
[
SystemMessage(
content=(
"You are a presonal assistant for {your name] and your name is luna "
"if the user call you by any other name than luna you need to correct him by your orginal name."
"And for every output you can also use the username in the answer which will be nice gesture"
"you can act more,like an human speaking more than an ai replying to the message"
"Consider the user as your friend"
"Speak like a friend"
"Be more creative and funny way"
)
),
HumanMessagePromptTemplate.from_template("{text}"),
]
)
llm = ChatOpenAI()
# Record audio
def record_audio(filename, duration, fs):
print("Recording audio...")
recording = sd.rec(int(duration * fs), samplerate=fs, channels=2)
sd.wait()
wavio.write(filename, recording, fs, sampwidth=2)
print("Audio recorded and saved as", filename)
## Streamlit UI
st.set_page_config(page_title="Personal voice assistant ")
website_heading = "I am Your Personal Voice assistant"
# Display as a heading
st.markdown(f"<h1 style='text-align: center; color: #a274a3;'>{website_heading}</h1>", unsafe_allow_html=True)
st.write("Speak here")
if st.button(label="Click here to speak"):
audio_filename = "input.wav"
duration = 5 # Duration of the recording in seconds
fs = 44100 # Sample rate
record_audio(audio_filename, duration, fs) ## user input recorded and stores
##converting to text using whisper
audio_file= open("input.wav", "rb")
transcript = client.audio.translations.create(
model="whisper-1",
file=audio_file)
a=transcript.text
# st.write(a)
print(a)
##model
a=llm(chat_template.format_messages(text=a))
a=a.content
##audio output
speech_file_path ="speech.mp3"
response = client.audio.speech.create(
model="tts-1",
voice="nova",
input=a)
response.stream_to_file(speech_file_path)
st.audio("speech.mp3")
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.messages.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((191, 199), 'openai.OpenAI', 'OpenAI', ([], {}), '()\n', (197, 199), False, 'from openai import OpenAI\n'), ((1151, 1163), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1161, 1163), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1462, 1520), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Personal voice assistant """'}), "(page_title='Personal voice assistant ')\n", (1480, 1520), True, 'import streamlit as st\n'), ((1601, 1720), 'streamlit.markdown', 'st.markdown', (['f"""<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>"""'], {'unsafe_allow_html': '(True)'}), '(\n f"<h1 style=\'text-align: center; color: #a274a3;\'>{website_heading}</h1>",\n unsafe_allow_html=True)\n', (1612, 1720), True, 'import streamlit as st\n'), ((1714, 1736), 'streamlit.write', 'st.write', (['"""Speak here"""'], {}), "('Speak here')\n", (1722, 1736), True, 'import streamlit as st\n'), ((1740, 1778), 'streamlit.button', 'st.button', ([], {'label': '"""Click here to speak"""'}), "(label='Click here to speak')\n", (1749, 1778), True, 'import streamlit as st\n'), ((1329, 1338), 'sounddevice.wait', 'sd.wait', ([], {}), '()\n', (1336, 1338), True, 'import sounddevice as sd\n'), ((1343, 1392), 'wavio.write', 'wavio.write', (['filename', 'recording', 'fs'], {'sampwidth': '(2)'}), '(filename, recording, fs, sampwidth=2)\n', (1354, 1392), False, 'import wavio\n'), ((2490, 2512), 'streamlit.audio', 'st.audio', (['"""speech.mp3"""'], {}), "('speech.mp3')\n", (2498, 2512), True, 'import streamlit as st\n'), ((468, 916), 'langchain.schema.messages.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way"""'}), "(content=\n 'You are a presonal assistant for {your name] and your name is luna if the user call you by any other name than luna you need to correct him by your orginal name.And for every output you can also use the username in the answer which will be nice gestureyou can act more,like an human speaking more than an ai replying to the messageConsider the user as your friendSpeak like a friendBe more creative and funny way'\n )\n", (481, 916), False, 'from langchain.schema.messages import SystemMessage\n'), ((1084, 1134), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{text}"""'], {}), "('{text}')\n", (1124, 1134), False, 'from langchain.prompts import HumanMessagePromptTemplate\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import os
import threading
from chainlit.config import config
from chainlit.logger import logger
def init_lc_cache():
use_cache = config.project.cache is True and config.run.no_cache is False
if use_cache:
try:
import langchain
except ImportError:
return
from langchain.cache import SQLiteCache
from langchain.globals import set_llm_cache
if config.project.lc_cache_path is not None:
set_llm_cache(SQLiteCache(database_path=config.project.lc_cache_path))
if not os.path.exists(config.project.lc_cache_path):
logger.info(
f"LangChain cache created at: {config.project.lc_cache_path}"
)
_cache = {}
_cache_lock = threading.Lock()
def cache(func):
def wrapper(*args, **kwargs):
# Create a cache key based on the function name, arguments, and keyword arguments
cache_key = (
(func.__name__,) + args + tuple((k, v) for k, v in sorted(kwargs.items()))
)
with _cache_lock:
# Check if the result is already in the cache
if cache_key not in _cache:
# If not, call the function and store the result in the cache
_cache[cache_key] = func(*args, **kwargs)
return _cache[cache_key]
return wrapper
| [
"langchain.cache.SQLiteCache"
] | [((767, 783), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (781, 783), False, 'import threading\n'), ((487, 542), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': 'config.project.lc_cache_path'}), '(database_path=config.project.lc_cache_path)\n', (498, 542), False, 'from langchain.cache import SQLiteCache\n'), ((564, 608), 'os.path.exists', 'os.path.exists', (['config.project.lc_cache_path'], {}), '(config.project.lc_cache_path)\n', (578, 608), False, 'import os\n'), ((626, 700), 'chainlit.logger.logger.info', 'logger.info', (['f"""LangChain cache created at: {config.project.lc_cache_path}"""'], {}), "(f'LangChain cache created at: {config.project.lc_cache_path}')\n", (637, 700), False, 'from chainlit.logger import logger\n')] |
import logging
import requests
from typing import Optional, List, Dict, Mapping, Any
import langchain
from langchain.llms.base import LLM
from langchain.cache import InMemoryCache
logging.basicConfig(level=logging.INFO)
# 启动llm的缓存
langchain.llm_cache = InMemoryCache()
class AgentZhipuAI(LLM):
import zhipuai as zhipuai
# 模型服务url
url = "127.0.0.1"
zhipuai.api_key ="1f565e40af1198e11ff1fd8a5b42771d.SjNfezc40YFsz2KC"#控制台中获取的 APIKey 信息
model = "chatglm_pro" # 大模型版本
history = []
def getText(self,role, content):
# role 是指定角色,content 是 prompt 内容
jsoncon = {}
jsoncon["role"] = role
jsoncon["content"] = content
self.history.append(jsoncon)
return self.history
@property
def _llm_type(self) -> str:
return "AgentZhipuAI"
@classmethod
def _post(self, url: str, query: Dict) -> Any:
"""POST请求"""
response = requests.post(url, data=query).json()
return response
def _call(self, prompt: str, stop: Optional[List[str]] = None,role = "user") -> str:
"""_call"""
# construct query
response = self.zhipuai.model_api.invoke(
model=self.model,
prompt=self.getText(role=role, content=prompt)
)
choices = (response['data']['choices'])[0]
self.history.append(choices)
return choices["content"]
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters.
"""
_param_dict = {
"url": self.url
}
return _param_dict
if __name__ == '__main__':
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
llm = AgentZhipuAI()
# 没有输入变量的示例prompt
no_input_prompt = PromptTemplate(input_variables=[], template="给我讲个笑话。")
no_input_prompt.format()
prompt = PromptTemplate(
input_variables=["location", "street"],
template="作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。",
)
chain = LLMChain(llm=llm, prompt=prompt)
print(chain.run({"location": "南京", "street": "新街口"}))
from langchain.chains import ConversationChain
conversation = ConversationChain(llm=llm, verbose=True)
output = conversation.predict(input="你好!")
print(output)
output = conversation.predict(input="南京是哪里的省会?")
print(output)
output = conversation.predict(input="那里有什么好玩的地方,简单的说一个就好。")
print(output)
| [
"langchain.chains.LLMChain",
"langchain.cache.InMemoryCache",
"langchain.prompts.PromptTemplate",
"langchain.chains.ConversationChain"
] | [((183, 222), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (202, 222), False, 'import logging\n'), ((256, 271), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (269, 271), False, 'from langchain.cache import InMemoryCache\n'), ((1830, 1884), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': '[]', 'template': '"""给我讲个笑话。"""'}), "(input_variables=[], template='给我讲个笑话。')\n", (1844, 1884), False, 'from langchain.prompts import PromptTemplate\n'), ((1928, 2059), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['location', 'street']", 'template': '"""作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。"""'}), "(input_variables=['location', 'street'], template=\n '作为一名专业的旅游顾问,简单的说一下{location}有什么好玩的景点,特别是在{street}?只要说一个就可以。')\n", (1942, 2059), False, 'from langchain.prompts import PromptTemplate\n'), ((2091, 2123), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (2099, 2123), False, 'from langchain.chains import LLMChain\n'), ((2254, 2294), 'langchain.chains.ConversationChain', 'ConversationChain', ([], {'llm': 'llm', 'verbose': '(True)'}), '(llm=llm, verbose=True)\n', (2271, 2294), False, 'from langchain.chains import ConversationChain\n'), ((942, 972), 'requests.post', 'requests.post', (['url'], {'data': 'query'}), '(url, data=query)\n', (955, 972), False, 'import requests\n')] |
'''
Example script to automatically write a screenplay from a newsgroup post using agents with Crew.ai (https://github.com/joaomdmoura/crewAI)
You can also try it out with a personal email with many replies back and forth and see it turn into a movie script.
Demonstrates:
- multiple API endpoints (offical Mistral, Together.ai, Anyscale)
- running single tasks: spam detection and scoring
- running a crew to create a screenplay from a newsgroup post by first analyzing the text, creating a dialogue and ultimately formatting it
Additional endpoints requirements:
pip install langchain_mistralai
pip install langchain-together
Author: Toon Beerten ([email protected])
License: MIT
'''
import os
import re
from crewai import Agent, Task, Crew, Process
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.chat_models import openai
#endpoint specific imports
import langchain_mistralai
from langchain_mistralai.chat_models import ChatMistralAI
from langchain_community.llms import Together
from langchain_community.chat_models import ChatAnyscale
## Choose here which API endpoint to use, uncomment only one:
# Official Mistral: benefit of having access to mistral-medium
# Together.ai: lots of models to choose from
# Anyscale: cheapest at the time of writing
#endpoint = 'mistral_official'
#endpoint = 'togetherai'
endpoint = 'mistral_official'
#put you API keys here
mistral_key = ''
togetherai_key = ''
anyscale_key = ''
#model choice: i already have good results with mistralai/Mistral-7B-Instruct-v0.2
if endpoint == 'mistral_official':
mixtral=ChatMistralAI(mistral_api_key=mistral_key, model="mistral-tiny",temperature=0.6)
elif endpoint == 'togetherai':
#i get timeouts using Together() , so i use ChatOpenAI() instead
#mixtral = Together(model="mistralai/Mistral-7B-Instruct-v0.2", together_api_key=togetherai_key ) #or mistralai/Mixtral-8x7B-Instruct-v0.1
mixtral= openai.ChatOpenAI(base_url="https://api.together.xyz/v1", api_key=togetherai_key, temperature=0.5, model="mistralai/Mistral-7B-Instruct-v0.2")
elif endpoint == 'anyscale':
mixtral = ChatAnyscale(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=anyscale_key, streaming=False)
## Define Agents
spamfilter = Agent(
role='spamfilter',
goal='''Decide whether a text is spam or not.''',
backstory='You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
analyst = Agent(
role='analyse',
goal='''You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.''',
backstory='You are an expert discussion analyst.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scriptwriter = Agent(
role='scriptwriter',
goal='Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.',
backstory='''You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.''',
llm=mixtral,
verbose=True,
allow_delegation=False
)
formatter = Agent(
role='formatter',
goal='''Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).''',
backstory='You are an expert text formatter.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scorer = Agent(
role='scorer',
goal='''You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:
Scale:
1-3: Poor - The dialogue has significant issues that prevent effective communication.
4-6: Average - The dialogue has some good points but also has notable weaknesses.
7-9: Good - The dialogue is mostly effective with minor issues.
10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.
Factors to Consider:
Clarity: How clear is the exchange? Are the statements and responses easy to understand?
Relevance: Do the responses stay on topic and contribute to the conversation's purpose?
Conciseness: Is the dialogue free of unnecessary information or redundancy?
Politeness: Are the participants respectful and considerate in their interaction?
Engagement: Do the participants seem interested and actively involved in the dialogue?
Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?
Coherence: Does the dialogue make logical sense as a whole?
Responsiveness: Do the participants address each other's points adequately?
Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?
Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?
''',
backstory='You are an expert at scoring conversations on a scale of 1 to 10.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
#this is one example of a public post in the newsgroup alt.atheism
#try it out yourself by replacing this with your own email thread or text or ...
discussion = '''From: [email protected] (Keith Allan Schneider)
Subject: Re: <Political Atheists?
Organization: California Institute of Technology, Pasadena
Lines: 50
NNTP-Posting-Host: punisher.caltech.edu
[email protected] (Robert Beauchaine) writes:
>>I think that about 70% (or so) people approve of the
>>death penalty, even realizing all of its shortcomings. Doesn't this make
>>it reasonable? Or are *you* the sole judge of reasonability?
>Aside from revenge, what merits do you find in capital punishment?
Are we talking about me, or the majority of the people that support it?
Anyway, I think that "revenge" or "fairness" is why most people are in
favor of the punishment. If a murderer is going to be punished, people
that think that he should "get what he deserves." Most people wouldn't
think it would be fair for the murderer to live, while his victim died.
>Revenge? Petty and pathetic.
Perhaps you think that it is petty and pathetic, but your views are in the
minority.
>We have a local televised hot topic talk show that very recently
>did a segment on capital punishment. Each and every advocate of
>the use of this portion of our system of "jurisprudence" cited the
>main reason for supporting it: "That bastard deserved it". True
>human compassion, forgiveness, and sympathy.
Where are we required to have compassion, forgiveness, and sympathy? If
someone wrongs me, I will take great lengths to make sure that his advantage
is removed, or a similar situation is forced upon him. If someone kills
another, then we can apply the golden rule and kill this person in turn.
Is not our entire moral system based on such a concept?
Or, are you stating that human life is sacred, somehow, and that it should
never be violated? This would sound like some sort of religious view.
>>I mean, how reasonable is imprisonment, really, when you think about it?
>>Sure, the person could be released if found innocent, but you still
>>can't undo the imiprisonment that was served. Perhaps we shouldn't
>>imprision people if we could watch them closely instead. The cost would
>>probably be similar, especially if we just implanted some sort of
>>electronic device.
>Would you rather be alive in prison or dead in the chair?
Once a criminal has committed a murder, his desires are irrelevant.
And, you still have not answered my question. If you are concerned about
the death penalty due to the possibility of the execution of an innocent,
then why isn't this same concern shared with imprisonment. Shouldn't we,
by your logic, administer as minimum as punishment as possible, to avoid
violating the liberty or happiness of an innocent person?
keith
'''
# Filter out spam and vulgar posts
task0 = Task(description='Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n' + discussion, agent=spamfilter)
result = task0.execute()
if "STOP" in result:
#stop here and proceed to next post
print('This spam message will be filtered out')
# process post with a crew of agents, ultimately delivering a well formatted dialogue
task1 = Task(description='Analyse in much detail the following discussion:\n### DISCUSSION:\n' + discussion, agent=analyst)
task2 = Task(description='Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.', agent=scriptwriter)
task3 = Task(description='''Format the script exactly like this:
## (person 1):
(first text line from person 1)
## (person 2):
(first text line from person 2)
## (person 1):
(second text line from person 1)
## (person 2):
(second text line from person 2)
''', agent=formatter)
crew = Crew(
agents=[analyst, scriptwriter,formatter],
tasks=[task1, task2, task3],
verbose=2, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels
process=Process.sequential # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next.
)
result = crew.kickoff()
#get rid of directions and actions between brackets, eg: (smiling)
result = re.sub(r'\(.*?\)', '', result)
print('===================== end result from crew ===================================')
print(result)
print('===================== score ==================================================')
task4 = Task(description='Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n'+result, agent=scorer)
score = task4.execute()
score = score.split('\n')[0] #sometimes an explanation comes after score, ignore
print(f'Scoring the dialogue as: {score}/10') | [
"langchain.chat_models.openai.ChatOpenAI",
"langchain_community.chat_models.ChatAnyscale",
"langchain_mistralai.chat_models.ChatMistralAI"
] | [((2292, 2556), 'crewai.Agent', 'Agent', ([], {'role': '"""spamfilter"""', 'goal': '"""Decide whether a text is spam or not."""', 'backstory': '"""You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='spamfilter', goal='Decide whether a text is spam or not.',\n backstory=\n 'You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2297, 2556), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2581, 2886), 'crewai.Agent', 'Agent', ([], {'role': '"""analyse"""', 'goal': '"""You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain."""', 'backstory': '"""You are an expert discussion analyst."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='analyse', goal=\n 'You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.'\n , backstory='You are an expert discussion analyst.', llm=mixtral,\n verbose=True, allow_delegation=False)\n", (2586, 2886), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2916, 3352), 'crewai.Agent', 'Agent', ([], {'role': '"""scriptwriter"""', 'goal': '"""Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals."""', 'backstory': '"""You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='scriptwriter', goal=\n 'Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.'\n , backstory=\n 'You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2921, 3352), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3375, 3631), 'crewai.Agent', 'Agent', ([], {'role': '"""formatter"""', 'goal': '"""Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling)."""', 'backstory': '"""You are an expert text formatter."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='formatter', goal=\n 'Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).'\n , backstory='You are an expert text formatter.', llm=mixtral, verbose=\n True, allow_delegation=False)\n", (3380, 3631), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3654, 5254), 'crewai.Agent', 'Agent', ([], {'role': '"""scorer"""', 'goal': '"""You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """', 'backstory': '"""You are an expert at scoring conversations on a scale of 1 to 10."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), '(role=\'scorer\', goal=\n """You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """\n , backstory=\n \'You are an expert at scoring conversations on a scale of 1 to 10.\',\n llm=mixtral, verbose=True, allow_delegation=False)\n', (3659, 5254), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8227, 8430), 'crewai.Task', 'Task', ([], {'description': '("""Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion)', 'agent': 'spamfilter'}), '(description=\n """Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion, agent=spamfilter)\n', (8231, 8430), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8659, 8785), 'crewai.Task', 'Task', ([], {'description': '("""Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion)', 'agent': 'analyst'}), '(description=\n """Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion, agent=analyst)\n', (8663, 8785), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8784, 8991), 'crewai.Task', 'Task', ([], {'description': '"""Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes."""', 'agent': 'scriptwriter'}), "(description=\n 'Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.'\n , agent=scriptwriter)\n", (8788, 8991), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8991, 9332), 'crewai.Task', 'Task', ([], {'description': '"""Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """', 'agent': 'formatter'}), '(description=\n """Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """\n , agent=formatter)\n', (8995, 9332), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9344, 9463), 'crewai.Crew', 'Crew', ([], {'agents': '[analyst, scriptwriter, formatter]', 'tasks': '[task1, task2, task3]', 'verbose': '(2)', 'process': 'Process.sequential'}), '(agents=[analyst, scriptwriter, formatter], tasks=[task1, task2, task3],\n verbose=2, process=Process.sequential)\n', (9348, 9463), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9847, 9878), 're.sub', 're.sub', (['"""\\\\(.*?\\\\)"""', '""""""', 'result'], {}), "('\\\\(.*?\\\\)', '', result)\n", (9853, 9878), False, 'import re\n'), ((10082, 10288), 'crewai.Task', 'Task', ([], {'description': '("""Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result)', 'agent': 'scorer'}), '(description=\n """Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result, agent=scorer)\n', (10086, 10288), False, 'from crewai import Agent, Task, Crew, Process\n'), ((1635, 1720), 'langchain_mistralai.chat_models.ChatMistralAI', 'ChatMistralAI', ([], {'mistral_api_key': 'mistral_key', 'model': '"""mistral-tiny"""', 'temperature': '(0.6)'}), "(mistral_api_key=mistral_key, model='mistral-tiny',\n temperature=0.6)\n", (1648, 1720), False, 'from langchain_mistralai.chat_models import ChatMistralAI\n'), ((1970, 2122), 'langchain.chat_models.openai.ChatOpenAI', 'openai.ChatOpenAI', ([], {'base_url': '"""https://api.together.xyz/v1"""', 'api_key': 'togetherai_key', 'temperature': '(0.5)', 'model': '"""mistralai/Mistral-7B-Instruct-v0.2"""'}), "(base_url='https://api.together.xyz/v1', api_key=\n togetherai_key, temperature=0.5, model='mistralai/Mistral-7B-Instruct-v0.2'\n )\n", (1987, 2122), False, 'from langchain.chat_models import openai\n'), ((2157, 2257), 'langchain_community.chat_models.ChatAnyscale', 'ChatAnyscale', ([], {'model': '"""mistralai/Mistral-7B-Instruct-v0.1"""', 'api_key': 'anyscale_key', 'streaming': '(False)'}), "(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=\n anyscale_key, streaming=False)\n", (2169, 2257), False, 'from langchain_community.chat_models import ChatAnyscale\n')] |
'''
Example script to automatically write a screenplay from a newsgroup post using agents with Crew.ai (https://github.com/joaomdmoura/crewAI)
You can also try it out with a personal email with many replies back and forth and see it turn into a movie script.
Demonstrates:
- multiple API endpoints (offical Mistral, Together.ai, Anyscale)
- running single tasks: spam detection and scoring
- running a crew to create a screenplay from a newsgroup post by first analyzing the text, creating a dialogue and ultimately formatting it
Additional endpoints requirements:
pip install langchain_mistralai
pip install langchain-together
Author: Toon Beerten ([email protected])
License: MIT
'''
import os
import re
from crewai import Agent, Task, Crew, Process
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.chat_models import openai
#endpoint specific imports
import langchain_mistralai
from langchain_mistralai.chat_models import ChatMistralAI
from langchain_community.llms import Together
from langchain_community.chat_models import ChatAnyscale
## Choose here which API endpoint to use, uncomment only one:
# Official Mistral: benefit of having access to mistral-medium
# Together.ai: lots of models to choose from
# Anyscale: cheapest at the time of writing
#endpoint = 'mistral_official'
#endpoint = 'togetherai'
endpoint = 'mistral_official'
#put you API keys here
mistral_key = ''
togetherai_key = ''
anyscale_key = ''
#model choice: i already have good results with mistralai/Mistral-7B-Instruct-v0.2
if endpoint == 'mistral_official':
mixtral=ChatMistralAI(mistral_api_key=mistral_key, model="mistral-tiny",temperature=0.6)
elif endpoint == 'togetherai':
#i get timeouts using Together() , so i use ChatOpenAI() instead
#mixtral = Together(model="mistralai/Mistral-7B-Instruct-v0.2", together_api_key=togetherai_key ) #or mistralai/Mixtral-8x7B-Instruct-v0.1
mixtral= openai.ChatOpenAI(base_url="https://api.together.xyz/v1", api_key=togetherai_key, temperature=0.5, model="mistralai/Mistral-7B-Instruct-v0.2")
elif endpoint == 'anyscale':
mixtral = ChatAnyscale(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=anyscale_key, streaming=False)
## Define Agents
spamfilter = Agent(
role='spamfilter',
goal='''Decide whether a text is spam or not.''',
backstory='You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
analyst = Agent(
role='analyse',
goal='''You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.''',
backstory='You are an expert discussion analyst.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scriptwriter = Agent(
role='scriptwriter',
goal='Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.',
backstory='''You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.''',
llm=mixtral,
verbose=True,
allow_delegation=False
)
formatter = Agent(
role='formatter',
goal='''Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).''',
backstory='You are an expert text formatter.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
scorer = Agent(
role='scorer',
goal='''You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:
Scale:
1-3: Poor - The dialogue has significant issues that prevent effective communication.
4-6: Average - The dialogue has some good points but also has notable weaknesses.
7-9: Good - The dialogue is mostly effective with minor issues.
10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.
Factors to Consider:
Clarity: How clear is the exchange? Are the statements and responses easy to understand?
Relevance: Do the responses stay on topic and contribute to the conversation's purpose?
Conciseness: Is the dialogue free of unnecessary information or redundancy?
Politeness: Are the participants respectful and considerate in their interaction?
Engagement: Do the participants seem interested and actively involved in the dialogue?
Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?
Coherence: Does the dialogue make logical sense as a whole?
Responsiveness: Do the participants address each other's points adequately?
Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?
Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?
''',
backstory='You are an expert at scoring conversations on a scale of 1 to 10.',
llm=mixtral,
verbose=True,
allow_delegation=False
)
#this is one example of a public post in the newsgroup alt.atheism
#try it out yourself by replacing this with your own email thread or text or ...
discussion = '''From: [email protected] (Keith Allan Schneider)
Subject: Re: <Political Atheists?
Organization: California Institute of Technology, Pasadena
Lines: 50
NNTP-Posting-Host: punisher.caltech.edu
[email protected] (Robert Beauchaine) writes:
>>I think that about 70% (or so) people approve of the
>>death penalty, even realizing all of its shortcomings. Doesn't this make
>>it reasonable? Or are *you* the sole judge of reasonability?
>Aside from revenge, what merits do you find in capital punishment?
Are we talking about me, or the majority of the people that support it?
Anyway, I think that "revenge" or "fairness" is why most people are in
favor of the punishment. If a murderer is going to be punished, people
that think that he should "get what he deserves." Most people wouldn't
think it would be fair for the murderer to live, while his victim died.
>Revenge? Petty and pathetic.
Perhaps you think that it is petty and pathetic, but your views are in the
minority.
>We have a local televised hot topic talk show that very recently
>did a segment on capital punishment. Each and every advocate of
>the use of this portion of our system of "jurisprudence" cited the
>main reason for supporting it: "That bastard deserved it". True
>human compassion, forgiveness, and sympathy.
Where are we required to have compassion, forgiveness, and sympathy? If
someone wrongs me, I will take great lengths to make sure that his advantage
is removed, or a similar situation is forced upon him. If someone kills
another, then we can apply the golden rule and kill this person in turn.
Is not our entire moral system based on such a concept?
Or, are you stating that human life is sacred, somehow, and that it should
never be violated? This would sound like some sort of religious view.
>>I mean, how reasonable is imprisonment, really, when you think about it?
>>Sure, the person could be released if found innocent, but you still
>>can't undo the imiprisonment that was served. Perhaps we shouldn't
>>imprision people if we could watch them closely instead. The cost would
>>probably be similar, especially if we just implanted some sort of
>>electronic device.
>Would you rather be alive in prison or dead in the chair?
Once a criminal has committed a murder, his desires are irrelevant.
And, you still have not answered my question. If you are concerned about
the death penalty due to the possibility of the execution of an innocent,
then why isn't this same concern shared with imprisonment. Shouldn't we,
by your logic, administer as minimum as punishment as possible, to avoid
violating the liberty or happiness of an innocent person?
keith
'''
# Filter out spam and vulgar posts
task0 = Task(description='Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n' + discussion, agent=spamfilter)
result = task0.execute()
if "STOP" in result:
#stop here and proceed to next post
print('This spam message will be filtered out')
# process post with a crew of agents, ultimately delivering a well formatted dialogue
task1 = Task(description='Analyse in much detail the following discussion:\n### DISCUSSION:\n' + discussion, agent=analyst)
task2 = Task(description='Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.', agent=scriptwriter)
task3 = Task(description='''Format the script exactly like this:
## (person 1):
(first text line from person 1)
## (person 2):
(first text line from person 2)
## (person 1):
(second text line from person 1)
## (person 2):
(second text line from person 2)
''', agent=formatter)
crew = Crew(
agents=[analyst, scriptwriter,formatter],
tasks=[task1, task2, task3],
verbose=2, # Crew verbose more will let you know what tasks are being worked on, you can set it to 1 or 2 to different logging levels
process=Process.sequential # Sequential process will have tasks executed one after the other and the outcome of the previous one is passed as extra content into this next.
)
result = crew.kickoff()
#get rid of directions and actions between brackets, eg: (smiling)
result = re.sub(r'\(.*?\)', '', result)
print('===================== end result from crew ===================================')
print(result)
print('===================== score ==================================================')
task4 = Task(description='Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n'+result, agent=scorer)
score = task4.execute()
score = score.split('\n')[0] #sometimes an explanation comes after score, ignore
print(f'Scoring the dialogue as: {score}/10') | [
"langchain.chat_models.openai.ChatOpenAI",
"langchain_community.chat_models.ChatAnyscale",
"langchain_mistralai.chat_models.ChatMistralAI"
] | [((2292, 2556), 'crewai.Agent', 'Agent', ([], {'role': '"""spamfilter"""', 'goal': '"""Decide whether a text is spam or not."""', 'backstory': '"""You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='spamfilter', goal='Decide whether a text is spam or not.',\n backstory=\n 'You are an expert spam filter with years of experience. You DETEST advertisements, newsletters and vulgar language.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2297, 2556), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2581, 2886), 'crewai.Agent', 'Agent', ([], {'role': '"""analyse"""', 'goal': '"""You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain."""', 'backstory': '"""You are an expert discussion analyst."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='analyse', goal=\n 'You will distill all arguments from all discussion members. Identify who said what. You can reword what they said as long as the main discussion points remain.'\n , backstory='You are an expert discussion analyst.', llm=mixtral,\n verbose=True, allow_delegation=False)\n", (2586, 2886), False, 'from crewai import Agent, Task, Crew, Process\n'), ((2916, 3352), 'crewai.Agent', 'Agent', ([], {'role': '"""scriptwriter"""', 'goal': '"""Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals."""', 'backstory': '"""You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='scriptwriter', goal=\n 'Turn a conversation into a movie script. Only write the dialogue parts. Do not start the sentence with an action. Do not specify situational descriptions. Do not write parentheticals.'\n , backstory=\n 'You are an expert on writing natural sounding movie script dialogues. You only focus on the text part and you HATE directional notes.'\n , llm=mixtral, verbose=True, allow_delegation=False)\n", (2921, 3352), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3375, 3631), 'crewai.Agent', 'Agent', ([], {'role': '"""formatter"""', 'goal': '"""Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling)."""', 'backstory': '"""You are an expert text formatter."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), "(role='formatter', goal=\n 'Format the text as asked. Leave out actions from discussion members that happen between brackets, eg (smiling).'\n , backstory='You are an expert text formatter.', llm=mixtral, verbose=\n True, allow_delegation=False)\n", (3380, 3631), False, 'from crewai import Agent, Task, Crew, Process\n'), ((3654, 5254), 'crewai.Agent', 'Agent', ([], {'role': '"""scorer"""', 'goal': '"""You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """', 'backstory': '"""You are an expert at scoring conversations on a scale of 1 to 10."""', 'llm': 'mixtral', 'verbose': '(True)', 'allow_delegation': '(False)'}), '(role=\'scorer\', goal=\n """You score a dialogue assessing various aspects of the exchange between the participants using a 1-10 scale, where 1 is the lowest performance and 10 is the highest:\n Scale:\n 1-3: Poor - The dialogue has significant issues that prevent effective communication.\n 4-6: Average - The dialogue has some good points but also has notable weaknesses.\n 7-9: Good - The dialogue is mostly effective with minor issues.\n 10: Excellent - The dialogue is exemplary in achieving its purpose with no apparent issues.\n Factors to Consider:\n Clarity: How clear is the exchange? Are the statements and responses easy to understand?\n Relevance: Do the responses stay on topic and contribute to the conversation\'s purpose?\n Conciseness: Is the dialogue free of unnecessary information or redundancy?\n Politeness: Are the participants respectful and considerate in their interaction?\n Engagement: Do the participants seem interested and actively involved in the dialogue?\n Flow: Is there a natural progression of ideas and responses? Are there awkward pauses or interruptions?\n Coherence: Does the dialogue make logical sense as a whole?\n Responsiveness: Do the participants address each other\'s points adequately?\n Language Use: Is the grammar, vocabulary, and syntax appropriate for the context of the dialogue?\n Emotional Intelligence: Are the participants aware of and sensitive to the emotional tone of the dialogue?\n """\n , backstory=\n \'You are an expert at scoring conversations on a scale of 1 to 10.\',\n llm=mixtral, verbose=True, allow_delegation=False)\n', (3659, 5254), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8227, 8430), 'crewai.Task', 'Task', ([], {'description': '("""Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion)', 'agent': 'spamfilter'}), '(description=\n """Read the following newsgroup post. If this contains vulgar language reply with STOP . If this is spam reply with STOP.\n### NEWGROUP POST:\n"""\n + discussion, agent=spamfilter)\n', (8231, 8430), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8659, 8785), 'crewai.Task', 'Task', ([], {'description': '("""Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion)', 'agent': 'analyst'}), '(description=\n """Analyse in much detail the following discussion:\n### DISCUSSION:\n""" +\n discussion, agent=analyst)\n', (8663, 8785), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8784, 8991), 'crewai.Task', 'Task', ([], {'description': '"""Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes."""', 'agent': 'scriptwriter'}), "(description=\n 'Create a dialogue heavy screenplay from the discussion, between two persons. Do NOT write parentheticals. Leave out wrylies. You MUST SKIP directional notes.'\n , agent=scriptwriter)\n", (8788, 8991), False, 'from crewai import Agent, Task, Crew, Process\n'), ((8991, 9332), 'crewai.Task', 'Task', ([], {'description': '"""Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """', 'agent': 'formatter'}), '(description=\n """Format the script exactly like this:\n ## (person 1):\n (first text line from person 1)\n \n ## (person 2):\n (first text line from person 2)\n \n ## (person 1):\n (second text line from person 1)\n \n ## (person 2):\n (second text line from person 2)\n \n """\n , agent=formatter)\n', (8995, 9332), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9344, 9463), 'crewai.Crew', 'Crew', ([], {'agents': '[analyst, scriptwriter, formatter]', 'tasks': '[task1, task2, task3]', 'verbose': '(2)', 'process': 'Process.sequential'}), '(agents=[analyst, scriptwriter, formatter], tasks=[task1, task2, task3],\n verbose=2, process=Process.sequential)\n', (9348, 9463), False, 'from crewai import Agent, Task, Crew, Process\n'), ((9847, 9878), 're.sub', 're.sub', (['"""\\\\(.*?\\\\)"""', '""""""', 'result'], {}), "('\\\\(.*?\\\\)', '', result)\n", (9853, 9878), False, 'import re\n'), ((10082, 10288), 'crewai.Task', 'Task', ([], {'description': '("""Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result)', 'agent': 'scorer'}), '(description=\n """Read the following dialogue. Then score the script on a scale of 1 to 10. Only give the score as a number, nothing else. Do not give an explanation.\n"""\n + result, agent=scorer)\n', (10086, 10288), False, 'from crewai import Agent, Task, Crew, Process\n'), ((1635, 1720), 'langchain_mistralai.chat_models.ChatMistralAI', 'ChatMistralAI', ([], {'mistral_api_key': 'mistral_key', 'model': '"""mistral-tiny"""', 'temperature': '(0.6)'}), "(mistral_api_key=mistral_key, model='mistral-tiny',\n temperature=0.6)\n", (1648, 1720), False, 'from langchain_mistralai.chat_models import ChatMistralAI\n'), ((1970, 2122), 'langchain.chat_models.openai.ChatOpenAI', 'openai.ChatOpenAI', ([], {'base_url': '"""https://api.together.xyz/v1"""', 'api_key': 'togetherai_key', 'temperature': '(0.5)', 'model': '"""mistralai/Mistral-7B-Instruct-v0.2"""'}), "(base_url='https://api.together.xyz/v1', api_key=\n togetherai_key, temperature=0.5, model='mistralai/Mistral-7B-Instruct-v0.2'\n )\n", (1987, 2122), False, 'from langchain.chat_models import openai\n'), ((2157, 2257), 'langchain_community.chat_models.ChatAnyscale', 'ChatAnyscale', ([], {'model': '"""mistralai/Mistral-7B-Instruct-v0.1"""', 'api_key': 'anyscale_key', 'streaming': '(False)'}), "(model='mistralai/Mistral-7B-Instruct-v0.1', api_key=\n anyscale_key, streaming=False)\n", (2169, 2257), False, 'from langchain_community.chat_models import ChatAnyscale\n')] |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import contextmanager
from contextvars import ContextVar
from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.schemas import TracerSession
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
logger = logging.getLogger(__name__)
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar("openai_callback", default=None)
tracing_callback_var: ContextVar[Optional[LangChainTracerV1]] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
tracing_v2_callback_var: ContextVar[Optional[LangChainTracer]] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
def _get_debug() -> bool:
return langchain.debug
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get OpenAI callback handler in a context manager."""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get Tracer in a context manager."""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
session_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
tenant_id: Optional[str] = None,
session_extra: Optional[Dict[str, Any]] = None,
) -> Generator[TracerSession, None, None]:
"""Get the experimental tracer handler in a context manager."""
# Issue a warning that this is experimental
warnings.warn(
"The experimental tracing v2 is in development. " "This is not yet stable and may change in the future."
)
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
tenant_id=tenant_id,
session_name=session_name,
example_id=example_id,
session_extra=session_extra,
)
session = cb.ensure_session()
tracing_v2_callback_var.set(cb)
yield session
tracing_v2_callback_var.set(None)
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logging.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(None, functools.partial(event, *args, **kwargs))
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
logger.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
await asyncio.gather(
*(
_ahandle_event_for_handler(handler, event_name, ignore_condition_name, *args, **kwargs)
for handler in handlers
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
) -> None:
"""Initialize run manager."""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations."""
return cls(uuid4(), [], [])
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running."""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_end_data_model(
self,
output,
**kwargs: Any,
):
"""Return the data model for the on_tool_end event."""
_handle_event(
self.handlers,
"on_tool_end_data_model",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that can be used to handle callbacks from langchain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
# Re-use the LLM Run Manager since the outputs are treated
# the same for now
return CallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> CallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that can be used to handle callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForChainRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForToolRun(run_id, self.handlers, self.inheritable_handlers, self.parent_run_id)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> AsyncCallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> T:
"""Configure the callback manager."""
callback_manager = callback_manager_cls([])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
tracer = tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
os.environ.get("LANGCHAIN_TRACING") is not None
or tracer is not None
or os.environ.get("LANGCHAIN_HANDLER") is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = os.environ.get("LANGCHAIN_TRACING_V2") is not None or tracer_v2 is not None
tracer_session = os.environ.get("LANGCHAIN_SESSION")
debug = _get_debug()
if tracer_session is None:
tracer_session = "default"
if verbose or debug or tracing_enabled_ or tracing_v2_enabled_ or open_ai is not None:
if verbose and not any(isinstance(handler, StdOutCallbackHandler) for handler in callback_manager.handlers):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(isinstance(handler, ConsoleCallbackHandler) for handler in callback_manager.handlers):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1) for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_session)
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer) for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(session_name=tracer_session)
handler.ensure_session()
callback_manager.add_handler(handler, True)
except Exception as e:
logger.debug("Unable to load requested LangChainTracer", e)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler) for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
return callback_manager
| [
"langchain.schema.get_buffer_string",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1"
] | [((1036, 1063), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1053, 1063), False, 'import logging\n'), ((1208, 1251), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1218, 1251), False, 'from contextvars import ContextVar\n'), ((1316, 1360), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1326, 1360), False, 'from contextvars import ContextVar\n'), ((1446, 1493), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (1456, 1493), False, 'from contextvars import ContextVar\n'), ((5790, 5828), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (5797, 5828), False, 'from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((22935, 22986), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (22942, 22986), False, 'from typing import Any, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((1731, 1754), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (1752, 1754), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((2005, 2024), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (2022, 2024), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((2570, 2696), 'warnings.warn', 'warnings.warn', (['"""The experimental tracing v2 is in development. This is not yet stable and may change in the future."""'], {}), "(\n 'The experimental tracing v2 is in development. This is not yet stable and may change in the future.'\n )\n", (2583, 2696), False, 'import warnings\n'), ((2787, 2907), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'tenant_id': 'tenant_id', 'session_name': 'session_name', 'example_id': 'example_id', 'session_extra': 'session_extra'}), '(tenant_id=tenant_id, session_name=session_name, example_id=\n example_id, session_extra=session_extra)\n', (2802, 2907), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((24635, 24670), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""'], {}), "('LANGCHAIN_SESSION')\n", (24649, 24670), False, 'import os\n'), ((2761, 2777), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2765, 2777), False, 'from uuid import UUID, uuid4\n'), ((4562, 4596), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (4589, 4596), False, 'import asyncio\n'), ((6507, 6514), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (6512, 6514), False, 'from uuid import UUID, uuid4\n'), ((16682, 16689), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (16687, 16689), False, 'from uuid import UUID, uuid4\n'), ((17367, 17374), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (17372, 17374), False, 'from uuid import UUID, uuid4\n'), ((18148, 18155), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18153, 18155), False, 'from uuid import UUID, uuid4\n'), ((18861, 18868), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18866, 18868), False, 'from uuid import UUID, uuid4\n'), ((20121, 20128), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20126, 20128), False, 'from uuid import UUID, uuid4\n'), ((20760, 20767), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20765, 20767), False, 'from uuid import UUID, uuid4\n'), ((21471, 21478), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (21476, 21478), False, 'from uuid import UUID, uuid4\n'), ((22207, 22214), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22212, 22214), False, 'from uuid import UUID, uuid4\n'), ((24322, 24357), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_TRACING"""'], {}), "('LANGCHAIN_TRACING')\n", (24336, 24357), False, 'import os\n'), ((24411, 24446), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HANDLER"""'], {}), "('LANGCHAIN_HANDLER')\n", (24425, 24446), False, 'import os\n'), ((24538, 24576), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_TRACING_V2"""'], {}), "('LANGCHAIN_TRACING_V2')\n", (24552, 24576), False, 'import os\n'), ((4161, 4216), 'logging.warning', 'logging.warning', (['f"""Error in {event_name} callback: {e}"""'], {}), "(f'Error in {event_name} callback: {e}')\n", (4176, 4216), False, 'import logging\n'), ((25265, 25289), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (25287, 25289), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((25567, 25586), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (25584, 25586), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((4889, 4909), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (4906, 4909), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((25076, 25099), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (25097, 25099), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((26002, 26046), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'tracer_session'}), '(session_name=tracer_session)\n', (26017, 26046), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((4730, 4771), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (4747, 4771), False, 'import functools\n'), ((3713, 3733), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (3730, 3733), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((4683, 4707), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4705, 4707), False, 'import asyncio\n')] |
"""Base interface that all chains should implement."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Field, root_validator, validator
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainRun,
CallbackManager,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.schema import RUN_KEY, BaseMemory, RunInfo
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(BaseModel, ABC):
"""Base interface that all chains should implement."""
memory: Optional[BaseMemory] = None
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
verbose: bool = Field(
default_factory=_get_verbosity
) # Whether to print the response text
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Input keys this chain expects."""
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Output keys this chain expects."""
def _validate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Check that all inputs are present."""
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
raise ValueError(f"Missing some output keys: {missing_keys}")
@abstractmethod
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the logic of this chain and return the output."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the logic of this chain and return the output."""
raise NotImplementedError("Async call not supported for this chain type.")
def __call__(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. If not provided, will
use the callbacks provided to the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
"""
inputs = self.prep_inputs(inputs)
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
)
try:
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_chain_error(e)
raise e
run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
async def acall(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. If not provided, will
use the callbacks provided to the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
"""
inputs = self.prep_inputs(inputs)
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
)
try:
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_chain_error(e)
raise e
await run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prep outputs."""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
def apply(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
def run(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0], callbacks=callbacks)[self.output_keys[0]]
if kwargs and not args:
return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
if not kwargs and not args:
raise ValueError(
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
async def arun(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (await self.acall(args[0], callbacks=callbacks))[self.output_keys[0]]
if kwargs and not args:
return (await self.acall(kwargs, callbacks=callbacks))[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of chain."""
if self.memory is not None:
raise ValueError("Saving of memory is not yet supported.")
_dict = super().dict()
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the chain.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
chain_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
| [
"langchain.schema.RunInfo",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.callbacks.manager.CallbackManager.configure"
] | [((816, 849), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (821, 849), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((904, 937), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (909, 937), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((958, 995), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (963, 995), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1295, 1311), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1309, 1311), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1747, 1790), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (1756, 1790), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((4447, 4513), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (4472, 4513), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((6411, 6482), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (6441, 6482), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((1502, 1604), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (1515, 1604), False, 'import warnings\n'), ((5284, 5318), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (5291, 5318), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((7286, 7320), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7293, 7320), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((11932, 11947), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11936, 11947), False, 'from pathlib import Path\n'), ((12267, 12301), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (12276, 12301), False, 'import json\n'), ((4564, 4593), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (4581, 4593), False, 'import inspect\n'), ((6533, 6563), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (6550, 6563), False, 'import inspect\n'), ((12404, 12454), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (12413, 12454), False, 'import yaml\n')] |
"""Base interface that all chains should implement."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Field, root_validator, validator
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForChainRun,
CallbackManager,
CallbackManagerForChainRun,
Callbacks,
)
from langchain.schema import RUN_KEY, BaseMemory, RunInfo
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(BaseModel, ABC):
"""Base interface that all chains should implement."""
memory: Optional[BaseMemory] = None
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
verbose: bool = Field(
default_factory=_get_verbosity
) # Whether to print the response text
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Input keys this chain expects."""
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Output keys this chain expects."""
def _validate_inputs(self, inputs: Dict[str, Any]) -> None:
"""Check that all inputs are present."""
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, Any]) -> None:
missing_keys = set(self.output_keys).difference(outputs)
if missing_keys:
raise ValueError(f"Missing some output keys: {missing_keys}")
@abstractmethod
def _call(
self,
inputs: Dict[str, Any],
run_manager: Optional[CallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the logic of this chain and return the output."""
async def _acall(
self,
inputs: Dict[str, Any],
run_manager: Optional[AsyncCallbackManagerForChainRun] = None,
) -> Dict[str, Any]:
"""Run the logic of this chain and return the output."""
raise NotImplementedError("Async call not supported for this chain type.")
def __call__(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. If not provided, will
use the callbacks provided to the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
"""
inputs = self.prep_inputs(inputs)
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
run_manager = callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
)
try:
outputs = (
self._call(inputs, run_manager=run_manager)
if new_arg_supported
else self._call(inputs)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_chain_error(e)
raise e
run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
async def acall(
self,
inputs: Union[Dict[str, Any], Any],
return_only_outputs: bool = False,
callbacks: Callbacks = None,
*,
include_run_info: bool = False,
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
callbacks: Callbacks to use for this chain run. If not provided, will
use the callbacks provided to the chain.
include_run_info: Whether to include run info in the response. Defaults
to False.
"""
inputs = self.prep_inputs(inputs)
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
run_manager = await callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
)
try:
outputs = (
await self._acall(inputs, run_manager=run_manager)
if new_arg_supported
else await self._acall(inputs)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_chain_error(e)
raise e
await run_manager.on_chain_end(outputs)
final_outputs: Dict[str, Any] = self.prep_outputs(
inputs, outputs, return_only_outputs
)
if include_run_info:
final_outputs[RUN_KEY] = RunInfo(run_id=run_manager.run_id)
return final_outputs
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prep outputs."""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
def apply(
self, input_list: List[Dict[str, Any]], callbacks: Callbacks = None
) -> List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs, callbacks=callbacks) for inputs in input_list]
def run(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0], callbacks=callbacks)[self.output_keys[0]]
if kwargs and not args:
return self(kwargs, callbacks=callbacks)[self.output_keys[0]]
if not kwargs and not args:
raise ValueError(
"`run` supported with either positional arguments or keyword arguments,"
" but none were provided."
)
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
async def arun(self, *args: Any, callbacks: Callbacks = None, **kwargs: Any) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (await self.acall(args[0], callbacks=callbacks))[self.output_keys[0]]
if kwargs and not args:
return (await self.acall(kwargs, callbacks=callbacks))[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of chain."""
if self.memory is not None:
raise ValueError("Saving of memory is not yet supported.")
_dict = super().dict()
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the chain.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
chain_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
| [
"langchain.schema.RunInfo",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.callbacks.manager.CallbackManager.configure"
] | [((816, 849), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (821, 849), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((904, 937), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (909, 937), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((958, 995), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (963, 995), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1295, 1311), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1309, 1311), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((1747, 1790), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (1756, 1790), False, 'from pydantic import BaseModel, Field, root_validator, validator\n'), ((4447, 4513), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (4472, 4513), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((6411, 6482), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (6441, 6482), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForChainRun, CallbackManager, CallbackManagerForChainRun, Callbacks\n'), ((1502, 1604), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (1515, 1604), False, 'import warnings\n'), ((5284, 5318), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (5291, 5318), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((7286, 7320), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7293, 7320), False, 'from langchain.schema import RUN_KEY, BaseMemory, RunInfo\n'), ((11932, 11947), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11936, 11947), False, 'from pathlib import Path\n'), ((12267, 12301), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (12276, 12301), False, 'import json\n'), ((4564, 4593), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (4581, 4593), False, 'import inspect\n'), ((6533, 6563), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (6550, 6563), False, 'import inspect\n'), ((12404, 12454), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (12413, 12454), False, 'import yaml\n')] |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Generation,
LLMResult,
PromptValue,
RunInfo,
get_buffer_string,
)
def _get_verbosity() -> bool:
return langchain.verbose
def get_prompts(
params: Dict[str, Any], prompts: List[str]
) -> Tuple[Dict[int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
for i, prompt in enumerate(prompts):
if langchain.llm_cache is not None:
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
def update_cache(
existing_prompts: Dict[int, List],
llm_string: str,
missing_prompt_idxs: List[int],
new_results: LLMResult,
prompts: List[str],
) -> Optional[dict]:
"""Update the cache and get the LLM output."""
for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]]
if langchain.llm_cache is not None:
langchain.llm_cache.update(prompt, llm_string, result)
llm_output = new_results.llm_output
return llm_output
class BaseLLM(BaseLanguageModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@abstractmethod
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
@abstractmethod
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return self.generate(prompt_strings, stop=stop, callbacks=callbacks)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks)
def generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# If string is passed in directly no errors will be raised but outputs will
# not make sense.
if not isinstance(prompts, list):
raise ValueError(
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
self._generate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(output)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
self._generate(missing_prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
async def agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
await self._agenerate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._agenerate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e, verbose=self.verbose)
raise e
await run_manager.on_llm_end(output, verbose=self.verbose)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
await self._agenerate(
missing_prompts, stop=stop, run_manager=run_manager
)
if new_arg_supported
else await self._agenerate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e)
raise e
await run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
def __call__(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
if not isinstance(prompt, str):
raise ValueError(
"Argument `prompt` is expected to be a string. Instead found "
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
"`generate` instead."
)
return (
self.generate([prompt], stop=stop, callbacks=callbacks)
.generations[0][0]
.text
)
async def _call_async(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
result = await self.agenerate([prompt], stop=stop, callbacks=callbacks)
return result.generations[0][0].text
def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(text, stop=_stop)
def predict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = self(text, stop=_stop)
return AIMessage(content=content)
async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return await self._call_async(text, stop=_stop)
async def apredict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = await self._call_async(text, stop=_stop)
return AIMessage(content=content)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def __str__(self) -> str:
"""Get a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
class LLM(BaseLLM):
"""LLM class that expect subclasses to implement a simpler call method.
The purpose of this class is to expose a simpler interface for working
with LLMs, rather than expect the user to implement the full _generate method.
"""
@abstractmethod
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
raise NotImplementedError("Async generation not implemented for this LLM.")
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
generations = []
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
for prompt in prompts:
text = (
self._call(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._call(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
generations = []
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
for prompt in prompts:
text = (
await self._acall(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._acall(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
| [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMResult"
] | [((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')] |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Generation,
LLMResult,
PromptValue,
RunInfo,
get_buffer_string,
)
def _get_verbosity() -> bool:
return langchain.verbose
def get_prompts(
params: Dict[str, Any], prompts: List[str]
) -> Tuple[Dict[int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
for i, prompt in enumerate(prompts):
if langchain.llm_cache is not None:
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
def update_cache(
existing_prompts: Dict[int, List],
llm_string: str,
missing_prompt_idxs: List[int],
new_results: LLMResult,
prompts: List[str],
) -> Optional[dict]:
"""Update the cache and get the LLM output."""
for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]]
if langchain.llm_cache is not None:
langchain.llm_cache.update(prompt, llm_string, result)
llm_output = new_results.llm_output
return llm_output
class BaseLLM(BaseLanguageModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@abstractmethod
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
@abstractmethod
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return self.generate(prompt_strings, stop=stop, callbacks=callbacks)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks)
def generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# If string is passed in directly no errors will be raised but outputs will
# not make sense.
if not isinstance(prompts, list):
raise ValueError(
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
self._generate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(output)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
self._generate(missing_prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
async def agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
await self._agenerate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._agenerate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e, verbose=self.verbose)
raise e
await run_manager.on_llm_end(output, verbose=self.verbose)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
await self._agenerate(
missing_prompts, stop=stop, run_manager=run_manager
)
if new_arg_supported
else await self._agenerate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e)
raise e
await run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
def __call__(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
if not isinstance(prompt, str):
raise ValueError(
"Argument `prompt` is expected to be a string. Instead found "
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
"`generate` instead."
)
return (
self.generate([prompt], stop=stop, callbacks=callbacks)
.generations[0][0]
.text
)
async def _call_async(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
result = await self.agenerate([prompt], stop=stop, callbacks=callbacks)
return result.generations[0][0].text
def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(text, stop=_stop)
def predict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = self(text, stop=_stop)
return AIMessage(content=content)
async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return await self._call_async(text, stop=_stop)
async def apredict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = await self._call_async(text, stop=_stop)
return AIMessage(content=content)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def __str__(self) -> str:
"""Get a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
class LLM(BaseLLM):
"""LLM class that expect subclasses to implement a simpler call method.
The purpose of this class is to expose a simpler interface for working
with LLMs, rather than expect the user to implement the full _generate method.
"""
@abstractmethod
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
raise NotImplementedError("Async generation not implemented for this LLM.")
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
generations = []
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
for prompt in prompts:
text = (
self._call(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._call(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
generations = []
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
for prompt in prompts:
text = (
await self._acall(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._acall(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
| [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMResult"
] | [((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')] |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Generation,
LLMResult,
PromptValue,
RunInfo,
get_buffer_string,
)
def _get_verbosity() -> bool:
return langchain.verbose
def get_prompts(
params: Dict[str, Any], prompts: List[str]
) -> Tuple[Dict[int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
for i, prompt in enumerate(prompts):
if langchain.llm_cache is not None:
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
def update_cache(
existing_prompts: Dict[int, List],
llm_string: str,
missing_prompt_idxs: List[int],
new_results: LLMResult,
prompts: List[str],
) -> Optional[dict]:
"""Update the cache and get the LLM output."""
for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]]
if langchain.llm_cache is not None:
langchain.llm_cache.update(prompt, llm_string, result)
llm_output = new_results.llm_output
return llm_output
class BaseLLM(BaseLanguageModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@abstractmethod
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
@abstractmethod
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return self.generate(prompt_strings, stop=stop, callbacks=callbacks)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks)
def generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# If string is passed in directly no errors will be raised but outputs will
# not make sense.
if not isinstance(prompts, list):
raise ValueError(
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
self._generate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(output)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
self._generate(missing_prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
async def agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
await self._agenerate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._agenerate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e, verbose=self.verbose)
raise e
await run_manager.on_llm_end(output, verbose=self.verbose)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
await self._agenerate(
missing_prompts, stop=stop, run_manager=run_manager
)
if new_arg_supported
else await self._agenerate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e)
raise e
await run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
def __call__(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
if not isinstance(prompt, str):
raise ValueError(
"Argument `prompt` is expected to be a string. Instead found "
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
"`generate` instead."
)
return (
self.generate([prompt], stop=stop, callbacks=callbacks)
.generations[0][0]
.text
)
async def _call_async(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
result = await self.agenerate([prompt], stop=stop, callbacks=callbacks)
return result.generations[0][0].text
def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(text, stop=_stop)
def predict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = self(text, stop=_stop)
return AIMessage(content=content)
async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return await self._call_async(text, stop=_stop)
async def apredict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = await self._call_async(text, stop=_stop)
return AIMessage(content=content)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def __str__(self) -> str:
"""Get a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
class LLM(BaseLLM):
"""LLM class that expect subclasses to implement a simpler call method.
The purpose of this class is to expose a simpler interface for working
with LLMs, rather than expect the user to implement the full _generate method.
"""
@abstractmethod
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
raise NotImplementedError("Async generation not implemented for this LLM.")
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
generations = []
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
for prompt in prompts:
text = (
self._call(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._call(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
generations = []
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
for prompt in prompts:
text = (
await self._acall(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._acall(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
| [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMResult"
] | [((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')] |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.manager import (
AsyncCallbackManager,
AsyncCallbackManagerForLLMRun,
CallbackManager,
CallbackManagerForLLMRun,
Callbacks,
)
from langchain.schema import (
AIMessage,
BaseMessage,
Generation,
LLMResult,
PromptValue,
RunInfo,
get_buffer_string,
)
def _get_verbosity() -> bool:
return langchain.verbose
def get_prompts(
params: Dict[str, Any], prompts: List[str]
) -> Tuple[Dict[int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
for i, prompt in enumerate(prompts):
if langchain.llm_cache is not None:
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
def update_cache(
existing_prompts: Dict[int, List],
llm_string: str,
missing_prompt_idxs: List[int],
new_results: LLMResult,
prompts: List[str],
) -> Optional[dict]:
"""Update the cache and get the LLM output."""
for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]]
if langchain.llm_cache is not None:
langchain.llm_cache.update(prompt, llm_string, result)
llm_output = new_results.llm_output
return llm_output
class BaseLLM(BaseLanguageModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callbacks: Callbacks = Field(default=None, exclude=True)
callback_manager: Optional[BaseCallbackManager] = Field(default=None, exclude=True)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@root_validator()
def raise_deprecation(cls, values: Dict) -> Dict:
"""Raise deprecation warning if callback_manager is used."""
if values.get("callback_manager") is not None:
warnings.warn(
"callback_manager is deprecated. Please use callbacks instead.",
DeprecationWarning,
)
values["callbacks"] = values.pop("callback_manager", None)
return values
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@abstractmethod
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
@abstractmethod
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompts."""
def generate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return self.generate(prompt_strings, stop=stop, callbacks=callbacks)
async def agenerate_prompt(
self,
prompts: List[PromptValue],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return await self.agenerate(prompt_strings, stop=stop, callbacks=callbacks)
def generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# If string is passed in directly no errors will be raised but outputs will
# not make sense.
if not isinstance(prompts, list):
raise ValueError(
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = CallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
self._generate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(output)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
self._generate(missing_prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._generate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
run_manager.on_llm_error(e)
raise e
run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
async def agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
callbacks: Callbacks = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
disregard_cache = self.cache is not None and not self.cache
callback_manager = AsyncCallbackManager.configure(
callbacks, self.callbacks, self.verbose
)
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, invocation_params=params
)
try:
output = (
await self._agenerate(prompts, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._agenerate(prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e, verbose=self.verbose)
raise e
await run_manager.on_llm_end(output, verbose=self.verbose)
if run_manager:
output.run = RunInfo(run_id=run_manager.run_id)
return output
if len(missing_prompts) > 0:
run_manager = await callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
invocation_params=params,
)
try:
new_results = (
await self._agenerate(
missing_prompts, stop=stop, run_manager=run_manager
)
if new_arg_supported
else await self._agenerate(missing_prompts, stop=stop)
)
except (KeyboardInterrupt, Exception) as e:
await run_manager.on_llm_error(e)
raise e
await run_manager.on_llm_end(new_results)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
run_info = None
if run_manager:
run_info = RunInfo(run_id=run_manager.run_id)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
def __call__(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
if not isinstance(prompt, str):
raise ValueError(
"Argument `prompt` is expected to be a string. Instead found "
f"{type(prompt)}. If you want to run the LLM on multiple prompts, use "
"`generate` instead."
)
return (
self.generate([prompt], stop=stop, callbacks=callbacks)
.generations[0][0]
.text
)
async def _call_async(
self, prompt: str, stop: Optional[List[str]] = None, callbacks: Callbacks = None
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
result = await self.agenerate([prompt], stop=stop, callbacks=callbacks)
return result.generations[0][0].text
def predict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return self(text, stop=_stop)
def predict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = self(text, stop=_stop)
return AIMessage(content=content)
async def apredict(self, text: str, *, stop: Optional[Sequence[str]] = None) -> str:
if stop is None:
_stop = None
else:
_stop = list(stop)
return await self._call_async(text, stop=_stop)
async def apredict_messages(
self, messages: List[BaseMessage], *, stop: Optional[Sequence[str]] = None
) -> BaseMessage:
text = get_buffer_string(messages)
if stop is None:
_stop = None
else:
_stop = list(stop)
content = await self._call_async(text, stop=_stop)
return AIMessage(content=content)
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def __str__(self) -> str:
"""Get a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
class LLM(BaseLLM):
"""LLM class that expect subclasses to implement a simpler call method.
The purpose of this class is to expose a simpler interface for working
with LLMs, rather than expect the user to implement the full _generate method.
"""
@abstractmethod
def _call(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
async def _acall(
self,
prompt: str,
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> str:
"""Run the LLM on the given prompt and input."""
raise NotImplementedError("Async generation not implemented for this LLM.")
def _generate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
generations = []
new_arg_supported = inspect.signature(self._call).parameters.get("run_manager")
for prompt in prompts:
text = (
self._call(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else self._call(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
async def _agenerate(
self,
prompts: List[str],
stop: Optional[List[str]] = None,
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
generations = []
new_arg_supported = inspect.signature(self._acall).parameters.get("run_manager")
for prompt in prompts:
text = (
await self._acall(prompt, stop=stop, run_manager=run_manager)
if new_arg_supported
else await self._acall(prompt, stop=stop)
)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
| [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMResult"
] | [((2315, 2352), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2320, 2352), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2426, 2459), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2431, 2459), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2514, 2547), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2519, 2547), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2696, 2712), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (2710, 2712), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((3148, 3191), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (3157, 3191), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((5520, 5586), 'langchain.callbacks.manager.CallbackManager.configure', 'CallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (5545, 5586), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((7818, 7889), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (7827, 7889), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((8435, 8506), 'langchain.callbacks.manager.AsyncCallbackManager.configure', 'AsyncCallbackManager.configure', (['callbacks', 'self.callbacks', 'self.verbose'], {}), '(callbacks, self.callbacks, self.verbose)\n', (8465, 8506), False, 'from langchain.callbacks.manager import AsyncCallbackManager, AsyncCallbackManagerForLLMRun, CallbackManager, CallbackManagerForLLMRun, Callbacks\n'), ((10893, 10964), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output', 'run': 'run_info'}), '(generations=generations, llm_output=llm_output, run=run_info)\n', (10902, 10964), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12285, 12312), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12302, 12312), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12464, 12490), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (12473, 12490), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((12886, 12913), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['messages'], {}), '(messages)\n', (12903, 12913), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((13083, 13109), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'content'}), '(content=content)\n', (13092, 13109), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16290, 16324), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (16299, 16324), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((17006, 17040), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (17015, 17040), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((1248, 1294), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (1274, 1294), False, 'import langchain\n'), ((2036, 2090), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (2062, 2090), False, 'import langchain\n'), ((2903, 3005), 'warnings.warn', 'warnings.warn', (['"""callback_manager is deprecated. Please use callbacks instead."""', 'DeprecationWarning'], {}), "('callback_manager is deprecated. Please use callbacks instead.',\n DeprecationWarning)\n", (2916, 3005), False, 'import warnings\n'), ((14159, 14174), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14163, 14174), False, 'from pathlib import Path\n'), ((6670, 6704), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (6677, 6704), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((7625, 7659), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (7632, 7659), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((9667, 9701), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (9674, 9701), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((10700, 10734), 'langchain.schema.RunInfo', 'RunInfo', ([], {'run_id': 'run_manager.run_id'}), '(run_id=run_manager.run_id)\n', (10707, 10734), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((14495, 14530), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (14504, 14530), False, 'import json\n'), ((5637, 5670), 'inspect.signature', 'inspect.signature', (['self._generate'], {}), '(self._generate)\n', (5654, 5670), False, 'import inspect\n'), ((8557, 8591), 'inspect.signature', 'inspect.signature', (['self._agenerate'], {}), '(self._agenerate)\n', (8574, 8591), False, 'import inspect\n'), ((14633, 14684), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (14642, 14684), False, 'import yaml\n'), ((15934, 15963), 'inspect.signature', 'inspect.signature', (['self._call'], {}), '(self._call)\n', (15951, 15963), False, 'import inspect\n'), ((16251, 16272), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16261, 16272), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n'), ((16635, 16665), 'inspect.signature', 'inspect.signature', (['self._acall'], {}), '(self._acall)\n', (16652, 16665), False, 'import inspect\n'), ((16967, 16988), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (16977, 16988), False, 'from langchain.schema import AIMessage, BaseMessage, Generation, LLMResult, PromptValue, RunInfo, get_buffer_string\n')] |
"""Base interface for large language models to expose."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
import yaml
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
from langchain.schema import Generation, LLMResult
def _get_verbosity() -> bool:
return langchain.verbose
def get_prompts(
params: Dict[str, Any], prompts: List[str]
) -> Tuple[Dict[int, List], str, List[int], List[str]]:
"""Get prompts that are already cached."""
llm_string = str(sorted([(k, v) for k, v in params.items()]))
missing_prompts = []
missing_prompt_idxs = []
existing_prompts = {}
for i, prompt in enumerate(prompts):
if langchain.llm_cache is not None:
cache_val = langchain.llm_cache.lookup(prompt, llm_string)
if isinstance(cache_val, list):
existing_prompts[i] = cache_val
else:
missing_prompts.append(prompt)
missing_prompt_idxs.append(i)
return existing_prompts, llm_string, missing_prompt_idxs, missing_prompts
def update_cache(
existing_prompts: Dict[int, List],
llm_string: str,
missing_prompt_idxs: List[int],
new_results: LLMResult,
prompts: List[str],
) -> Optional[dict]:
"""Update the cache and get the LLM output."""
for i, result in enumerate(new_results.generations):
existing_prompts[missing_prompt_idxs[i]] = result
prompt = prompts[missing_prompt_idxs[i]]
if langchain.llm_cache is not None:
langchain.llm_cache.update(prompt, llm_string, result)
llm_output = new_results.llm_output
return llm_output
class BaseLLM(BaseModel, ABC):
"""LLM wrapper should take in a prompt and return a string."""
cache: Optional[bool] = None
verbose: bool = Field(default_factory=_get_verbosity)
"""Whether to print out response text."""
callback_manager: BaseCallbackManager = Field(default_factory=get_callback_manager)
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@validator("callback_manager", pre=True, always=True)
def set_callback_manager(
cls, callback_manager: Optional[BaseCallbackManager]
) -> BaseCallbackManager:
"""If callback manager is None, set it.
This allows users to pass in None as callback manager, which is a nice UX.
"""
return callback_manager or get_callback_manager()
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@abstractmethod
def _generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompts."""
@abstractmethod
async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompts."""
def generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# If string is passed in directly no errors will be raised but outputs will
# not make sense.
if not isinstance(prompts, list):
raise ValueError(
"Argument 'prompts' is expected to be of type List[str], received"
f" argument of type {type(prompts)}."
)
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
try:
output = self._generate(prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_llm_end(output, verbose=self.verbose)
return output
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
if len(missing_prompts) > 0:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, missing_prompts, verbose=self.verbose
)
try:
new_results = self._generate(missing_prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
else:
llm_output = {}
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output)
async def agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
disregard_cache = self.cache is not None and not self.cache
if langchain.llm_cache is None or disregard_cache:
# This happens when langchain.cache is None, but self.cache is True
if self.cache is not None and self.cache:
raise ValueError(
"Asked to cache, but no cache found at `langchain.cache`."
)
if self.callback_manager.is_async:
await self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
else:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__}, prompts, verbose=self.verbose
)
try:
output = await self._agenerate(prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_error(e, verbose=self.verbose)
else:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_llm_end(output, verbose=self.verbose)
else:
self.callback_manager.on_llm_end(output, verbose=self.verbose)
return output
params = self.dict()
params["stop"] = stop
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts)
if len(missing_prompts) > 0:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
verbose=self.verbose,
)
else:
self.callback_manager.on_llm_start(
{"name": self.__class__.__name__},
missing_prompts,
verbose=self.verbose,
)
try:
new_results = await self._agenerate(missing_prompts, stop=stop)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_llm_error(e, verbose=self.verbose)
else:
self.callback_manager.on_llm_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_llm_end(
new_results, verbose=self.verbose
)
else:
self.callback_manager.on_llm_end(new_results, verbose=self.verbose)
llm_output = update_cache(
existing_prompts, llm_string, missing_prompt_idxs, new_results, prompts
)
else:
llm_output = {}
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output)
def get_num_tokens(self, text: str) -> int:
"""Get the number of tokens present in the text."""
# TODO: this method may not be exact.
# TODO: this method may differ based on model (eg codex).
try:
from transformers import GPT2TokenizerFast
except ImportError:
raise ValueError(
"Could not import transformers python package. "
"This is needed in order to calculate get_num_tokens. "
"Please it install it with `pip install transformers`."
)
# create a GPT-3 tokenizer instance
tokenizer = GPT2TokenizerFast.from_pretrained("gpt2")
# tokenize the text using the GPT-3 tokenizer
tokenized_text = tokenizer.tokenize(text)
# calculate the number of tokens in the tokenized text
return len(tokenized_text)
def __call__(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
return self.generate([prompt], stop=stop).generations[0][0].text
@property
def _identifying_params(self) -> Mapping[str, Any]:
"""Get the identifying parameters."""
return {}
def __str__(self) -> str:
"""Get a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
def dict(self, **kwargs: Any) -> Dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Example:
.. code-block:: python
llm.save(file_path="path/llm.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
class LLM(BaseLLM):
"""LLM class that expect subclasses to implement a simpler call method.
The purpose of this class is to expose a simpler interface for working
with LLMs, rather than expect the user to implement the full _generate method.
"""
@abstractmethod
def _call(self, prompt: str, stop: Optional[List[str]] = None) -> str:
"""Run the LLM on the given prompt and input."""
def _generate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
# TODO: add caching here.
generations = []
for prompt in prompts:
text = self._call(prompt, stop=stop)
generations.append([Generation(text=text)])
return LLMResult(generations=generations)
async def _agenerate(
self, prompts: List[str], stop: Optional[List[str]] = None
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
raise NotImplementedError("Async generation not implemented for this LLM.")
| [
"langchain.schema.Generation",
"langchain.llm_cache.update",
"langchain.llm_cache.lookup",
"langchain.schema.LLMResult",
"langchain.callbacks.get_callback_manager"
] | [((1991, 2028), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1996, 2028), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2119, 2162), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager'}), '(default_factory=get_callback_manager)\n', (2124, 2162), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2311, 2363), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (2320, 2363), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2693, 2736), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2702, 2736), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((5769, 5826), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (5778, 5826), False, 'from langchain.schema import Generation, LLMResult\n'), ((9134, 9191), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations', 'llm_output': 'llm_output'}), '(generations=generations, llm_output=llm_output)\n', (9143, 9191), False, 'from langchain.schema import Generation, LLMResult\n'), ((9826, 9867), 'transformers.GPT2TokenizerFast.from_pretrained', 'GPT2TokenizerFast.from_pretrained', (['"""gpt2"""'], {}), "('gpt2')\n", (9859, 9867), False, 'from transformers import GPT2TokenizerFast\n'), ((12744, 12778), 'langchain.schema.LLMResult', 'LLMResult', ([], {'generations': 'generations'}), '(generations=generations)\n', (12753, 12778), False, 'from langchain.schema import Generation, LLMResult\n'), ((932, 978), 'langchain.llm_cache.lookup', 'langchain.llm_cache.lookup', (['prompt', 'llm_string'], {}), '(prompt, llm_string)\n', (958, 978), False, 'import langchain\n'), ((1720, 1774), 'langchain.llm_cache.update', 'langchain.llm_cache.update', (['prompt', 'llm_string', 'result'], {}), '(prompt, llm_string, result)\n', (1746, 1774), False, 'import langchain\n'), ((2664, 2686), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2684, 2686), False, 'from langchain.callbacks import get_callback_manager\n'), ((11346, 11361), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (11350, 11361), False, 'from pathlib import Path\n'), ((11682, 11717), 'json.dump', 'json.dump', (['prompt_dict', 'f'], {'indent': '(4)'}), '(prompt_dict, f, indent=4)\n', (11691, 11717), False, 'import json\n'), ((11820, 11871), 'yaml.dump', 'yaml.dump', (['prompt_dict', 'f'], {'default_flow_style': '(False)'}), '(prompt_dict, f, default_flow_style=False)\n', (11829, 11871), False, 'import yaml\n'), ((12705, 12726), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (12715, 12726), False, 'from langchain.schema import Generation, LLMResult\n')] |
import discord
from discord import app_commands
from discord.ext import commands
import langchain
from langchain.document_loaders import YoutubeLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
import torch
class YoutubeSummaryCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.llm = self.bot.llm
@app_commands.command(name="youtubesummary", description="Summarize a YouTube video given its URL")
async def summarize(self, interaction: discord.Interaction, url: str):
await interaction.response.defer()
# Notifies the user that the bot is processing their command.
await interaction.followup.send(
embed=discord.Embed(
title=f"{interaction.user.display_name} used Youtube Summary 📺",
description=f"Summarizing {url} \nGenerating response\nPlease wait..",
color=0x9C84EF
)
)
try:
# Load transcript
loader = YoutubeLoader.from_youtube_url(url)
transcript = loader.load()
# Split text
text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=50)
texts = text_splitter.split_documents(transcript)
# Create and configure chain
chain = load_summarize_chain(llm=self.llm, chain_type="map_reduce", verbose=True)
# chain.llm_chain.prompt.template = \
# """### Instruction:
# Write a 1-3 paragraph summary the following:
# "{text}"
# ### Response:
# 1-3 PARAGRAPH SUMMARY:"""
# Run the chain and get summary
summary = chain.run(texts)
await interaction.followup.send(f'Summary:\n{summary}')
except Exception as e:
await interaction.channel.send(f'Sorry, an error occurred: {str(e)}')
async def setup(bot):
await bot.add_cog(YoutubeSummaryCog(bot))
| [
"langchain.chains.summarize.load_summarize_chain",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((425, 528), 'discord.app_commands.command', 'app_commands.command', ([], {'name': '"""youtubesummary"""', 'description': '"""Summarize a YouTube video given its URL"""'}), "(name='youtubesummary', description=\n 'Summarize a YouTube video given its URL')\n", (445, 528), False, 'from discord import app_commands\n'), ((1074, 1109), 'langchain.document_loaders.YoutubeLoader.from_youtube_url', 'YoutubeLoader.from_youtube_url', (['url'], {}), '(url)\n', (1104, 1109), False, 'from langchain.document_loaders import YoutubeLoader\n'), ((1203, 1268), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(2000)', 'chunk_overlap': '(50)'}), '(chunk_size=2000, chunk_overlap=50)\n', (1233, 1268), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((1393, 1466), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'self.llm', 'chain_type': '"""map_reduce"""', 'verbose': '(True)'}), "(llm=self.llm, chain_type='map_reduce', verbose=True)\n", (1413, 1466), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((772, 954), 'discord.Embed', 'discord.Embed', ([], {'title': 'f"""{interaction.user.display_name} used Youtube Summary 📺"""', 'description': 'f"""Summarizing {url} \nGenerating response\nPlease wait.."""', 'color': '(10257647)'}), '(title=\n f\'{interaction.user.display_name} used Youtube Summary 📺\', description=\n f"""Summarizing {url} \nGenerating response\nPlease wait..""", color=10257647\n )\n', (785, 954), False, 'import discord\n')] |
"""Base interface that all chains should implement."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
class Memory(BaseModel, ABC):
"""Base interface for memory in chains."""
class Config:
"""Configuration for this pydantic object."""
extra = Extra.forbid
arbitrary_types_allowed = True
@property
@abstractmethod
def memory_variables(self) -> List[str]:
"""Input keys this memory class will load dynamically."""
@abstractmethod
def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]:
"""Return key-value pairs given the text input to the chain."""
@abstractmethod
def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None:
"""Save the context of this model run to memory."""
@abstractmethod
def clear(self) -> None:
"""Clear memory contents."""
def _get_verbosity() -> bool:
return langchain.verbose
class Chain(BaseModel, ABC):
"""Base interface that all chains should implement."""
memory: Optional[Memory] = None
callback_manager: BaseCallbackManager = Field(
default_factory=get_callback_manager, exclude=True
)
verbose: bool = Field(
default_factory=_get_verbosity
) # Whether to print the response text
class Config:
"""Configuration for this pydantic object."""
arbitrary_types_allowed = True
@property
def _chain_type(self) -> str:
raise NotImplementedError("Saving not supported for this chain type.")
@validator("callback_manager", pre=True, always=True)
def set_callback_manager(
cls, callback_manager: Optional[BaseCallbackManager]
) -> BaseCallbackManager:
"""If callback manager is None, set it.
This allows users to pass in None as callback manager, which is a nice UX.
"""
return callback_manager or get_callback_manager()
@validator("verbose", pre=True, always=True)
def set_verbose(cls, verbose: Optional[bool]) -> bool:
"""If verbose is None, set it.
This allows users to pass in None as verbose to access the global setting.
"""
if verbose is None:
return _get_verbosity()
else:
return verbose
@property
@abstractmethod
def input_keys(self) -> List[str]:
"""Input keys this chain expects."""
@property
@abstractmethod
def output_keys(self) -> List[str]:
"""Output keys this chain expects."""
def _validate_inputs(self, inputs: Dict[str, str]) -> None:
"""Check that all inputs are present."""
missing_keys = set(self.input_keys).difference(inputs)
if missing_keys:
raise ValueError(f"Missing some input keys: {missing_keys}")
def _validate_outputs(self, outputs: Dict[str, str]) -> None:
if set(outputs) != set(self.output_keys):
raise ValueError(
f"Did not get output keys that were expected. "
f"Got: {set(outputs)}. Expected: {set(self.output_keys)}."
)
@abstractmethod
def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Run the logic of this chain and return the output."""
async def _acall(self, inputs: Dict[str, str]) -> Dict[str, str]:
"""Run the logic of this chain and return the output."""
raise NotImplementedError("Async call not supported for this chain type.")
def __call__(
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
"""
inputs = self.prep_inputs(inputs)
self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
try:
outputs = self._call(inputs)
except (KeyboardInterrupt, Exception) as e:
self.callback_manager.on_chain_error(e, verbose=self.verbose)
raise e
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
return self.prep_outputs(inputs, outputs, return_only_outputs)
async def acall(
self, inputs: Union[Dict[str, Any], Any], return_only_outputs: bool = False
) -> Dict[str, Any]:
"""Run the logic of this chain and add to output if desired.
Args:
inputs: Dictionary of inputs, or single input if chain expects
only one param.
return_only_outputs: boolean for whether to return only outputs in the
response. If True, only new keys generated by this chain will be
returned. If False, both input keys and new keys generated by this
chain will be returned. Defaults to False.
"""
inputs = self.prep_inputs(inputs)
if self.callback_manager.is_async:
await self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
else:
self.callback_manager.on_chain_start(
{"name": self.__class__.__name__},
inputs,
verbose=self.verbose,
)
try:
outputs = await self._acall(inputs)
except (KeyboardInterrupt, Exception) as e:
if self.callback_manager.is_async:
await self.callback_manager.on_chain_error(e, verbose=self.verbose)
else:
self.callback_manager.on_chain_error(e, verbose=self.verbose)
raise e
if self.callback_manager.is_async:
await self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
else:
self.callback_manager.on_chain_end(outputs, verbose=self.verbose)
return self.prep_outputs(inputs, outputs, return_only_outputs)
def prep_outputs(
self,
inputs: Dict[str, str],
outputs: Dict[str, str],
return_only_outputs: bool = False,
) -> Dict[str, str]:
"""Validate and prep outputs."""
self._validate_outputs(outputs)
if self.memory is not None:
self.memory.save_context(inputs, outputs)
if return_only_outputs:
return outputs
else:
return {**inputs, **outputs}
def prep_inputs(self, inputs: Union[Dict[str, Any], Any]) -> Dict[str, str]:
"""Validate and prep inputs."""
if not isinstance(inputs, dict):
_input_keys = set(self.input_keys)
if self.memory is not None:
# If there are multiple input keys, but some get set by memory so that
# only one is not set, we can still figure out which key it is.
_input_keys = _input_keys.difference(self.memory.memory_variables)
if len(_input_keys) != 1:
raise ValueError(
f"A single string input was passed in, but this chain expects "
f"multiple inputs ({_input_keys}). When a chain expects "
f"multiple inputs, please call it by passing in a dictionary, "
"eg `chain({'foo': 1, 'bar': 2})`"
)
inputs = {list(_input_keys)[0]: inputs}
if self.memory is not None:
external_context = self.memory.load_memory_variables(inputs)
inputs = dict(inputs, **external_context)
self._validate_inputs(inputs)
return inputs
def apply(self, input_list: List[Dict[str, Any]]) -> List[Dict[str, str]]:
"""Call the chain on all inputs in the list."""
return [self(inputs) for inputs in input_list]
def conversation(self, *args: str, **kwargs: str) -> List[str]:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) == 2:
assert "output" in self.output_keys and "intermediate_steps" in self.output_keys
keep_short = False
if "keep_short" in kwargs:
keep_short = kwargs.pop("keep_short")
outputs = {}
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
outputs = self(args[0])
if kwargs and not args:
outputs = self(kwargs)
intermediate = outputs.get("intermediate_steps") or []
conversation = []
for action, action_output in intermediate:
action: str = action.log.strip()
if not action.startswith(f"AI:"):
action = f"AI: {action}"
if keep_short:
# Hide the internal conversation
lines = action.split("\n")
new_lines = []
for l in lines:
for term in ["Assistant,"]:
idx = l.lower().find(term.lower())
if idx >= 0:
l = l[:idx]
if l.lower().strip() == "ai:":
l = ""
if not l:
continue
new_lines.append(l)
action = "\n".join(new_lines)
conversation.append(action)
if not keep_short or action_output.lstrip().startswith("Here is the edited image"):
if not action_output.startswith("Assistant:"):
action_output = f"Assistant: {action_output}"
conversation.append(action_output)
conversation.append("AI: " + outputs["output"])
return conversation
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return ["AI: " + self(args[0])[self.output_keys[0]]]
if kwargs and not args:
return ["AI: " + self(kwargs)[self.output_keys[0]]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def run(self, *args: str, **kwargs: str) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) == 2:
assert "output" in self.output_keys and "intermediate_steps" in self.output_keys
outputs = {}
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
outputs = self(args[0])
if kwargs and not args:
outputs = self(kwargs)
intermediate = outputs.get("intermediate_steps") or []
assistant = ""
for action, action_output in intermediate:
action: str = action.log.strip()
if not action.startswith(f"AI:"):
action = f"AI: {action}"
if not action_output.startswith("Assistant:"):
action_output = f"Assistant: {action_output}"
assistant += "\n" + action + "\n" + action_output
return assistant + "\n" + "AI: " + outputs["output"]
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return self(args[0])[self.output_keys[0]]
if kwargs and not args:
return self(kwargs)[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
async def arun(self, *args: str, **kwargs: str) -> str:
"""Run the chain as text in, text out or multiple variables, text out."""
if len(self.output_keys) != 1:
raise ValueError(
f"`run` not supported when there is not exactly "
f"one output key. Got {self.output_keys}."
)
if args and not kwargs:
if len(args) != 1:
raise ValueError("`run` supports only one positional argument.")
return (await self.acall(args[0]))[self.output_keys[0]]
if kwargs and not args:
return (await self.acall(kwargs))[self.output_keys[0]]
raise ValueError(
f"`run` supported with either positional arguments or keyword arguments"
f" but not both. Got args: {args} and kwargs: {kwargs}."
)
def dict(self, **kwargs: Any) -> Dict:
"""Return dictionary representation of chain."""
if self.memory is not None:
raise ValueError("Saving of memory is not yet supported.")
_dict = super().dict()
_dict["_type"] = self._chain_type
return _dict
def save(self, file_path: Union[Path, str]) -> None:
"""Save the chain.
Args:
file_path: Path to file to save the chain to.
Example:
.. code-block:: python
chain.save(file_path="path/chain.yaml")
"""
# Convert file to Path object.
if isinstance(file_path, str):
save_path = Path(file_path)
else:
save_path = file_path
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
chain_dict = self.dict()
if save_path.suffix == ".json":
with open(file_path, "w") as f:
json.dump(chain_dict, f, indent=4)
elif save_path.suffix == ".yaml":
with open(file_path, "w") as f:
yaml.dump(chain_dict, f, default_flow_style=False)
else:
raise ValueError(f"{save_path} must be json or yaml")
| [
"langchain.callbacks.get_callback_manager"
] | [((1401, 1458), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (1406, 1458), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((1493, 1530), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (1498, 1530), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((1830, 1882), 'pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)', 'always': '(True)'}), "('callback_manager', pre=True, always=True)\n", (1839, 1882), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2212, 2255), 'pydantic.validator', 'validator', (['"""verbose"""'], {'pre': '(True)', 'always': '(True)'}), "('verbose', pre=True, always=True)\n", (2221, 2255), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((2183, 2205), 'langchain.callbacks.get_callback_manager', 'get_callback_manager', ([], {}), '()\n', (2203, 2205), False, 'from langchain.callbacks import get_callback_manager\n'), ((14634, 14649), 'pathlib.Path', 'Path', (['file_path'], {}), '(file_path)\n', (14638, 14649), False, 'from pathlib import Path\n'), ((14969, 15003), 'json.dump', 'json.dump', (['chain_dict', 'f'], {'indent': '(4)'}), '(chain_dict, f, indent=4)\n', (14978, 15003), False, 'import json\n'), ((15106, 15156), 'yaml.dump', 'yaml.dump', (['chain_dict', 'f'], {'default_flow_style': '(False)'}), '(chain_dict, f, default_flow_style=False)\n', (15115, 15156), False, 'import yaml\n')] |
#!/Users/mark/dev/ml/langchain/read_github/langchain_github/env/bin/python
# change above to the location of your local Python venv installation
import sys, os, shutil
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(parent_dir)
import pathlib
from langchain.docstore.document import Document
import langchain.text_splitter as text_splitter
from langchain.chat_models import ChatOpenAI
from my_llm import standards as my_llm
from my_llm.langchain_class import PubSubChatMessageHistory
from langchain import PromptTemplate
from langchain.document_loaders.unstructured import UnstructuredFileLoader
import logging
chat = ChatOpenAI(temperature=0)
CODE_EXTENSIONS = [".py", ".js", ".java", ".c", ".cpp", ".cc", ".cxx", ".hpp",
".h", ".cs", ".m", ".swift", ".go", ".rs", ".rb", ".php",
".pl", ".kt", ".kts", ".ts", ".scala", ".hs", ".lua", ".sh",
".bash", ".r", ".m", ".sql", ".html", ".css", ".xml", ".json",
".yaml", ".yml"]
# Get Markdown documents from a repository
def get_repo_docs(repo_path, extension, memory, ignore=None, resummarise=False, verbose=False):
repo = pathlib.Path(repo_path)
ignore_path = ""
if ignore is not None:
ignore_path = repo / ignore
if not ignore_path.is_dir():
print("WARNING: --ignore must be a directory")
print('Ignoring %s' % ignore_path)
exts = extension.split(",")
for ext in exts:
the_glob = f"**/*{ext}"
matched_files = list(repo.glob(the_glob))
num_matched_files = len(matched_files)
print(f"Number of matched {ext} files: {num_matched_files}")
# Generate summary md files
if ext!=".md":
k = 0
for non_md_file in repo.glob(the_glob):
k += 1
if str(non_md_file).startswith(str(ignore_path)):
continue
generate_summary(non_md_file, memory, resummarise=resummarise, verbose=verbose)
if verbose:
print(f"Generated summary for a {ext} file: {k} of {num_matched_files} done.")
# Iterate over all files in the repo (including subdirectories)
print(f"Reading {ext} files")
i = 0
j = 0
for md_file in repo.glob(the_glob):
if str(md_file).startswith(str(ignore_path)):
j += 1
continue
i += 1
# Read the content of the file
yield read_file_to_document(md_file)
if verbose:
print(f"Read {i} files so far and ignored {j}: total: {num_matched_files}")
print(f"Read {i} and ignored {j} {ext} files.")
print("Read all files")
def read_file_to_document(md_file, split=False, metadata: dict = None):
try:
loader = UnstructuredFileLoader(md_file)
if split:
# only supported for some file types
docs = loader.load_and_split()
else:
docs = loader.load()
except ValueError as e:
if "file type is not supported in partition" in str(e):
# Convert the file to .txt and try again
txt_file = convert_to_txt(md_file)
loader = UnstructuredFileLoader(txt_file)
if split:
docs = loader.load_and_split()
else:
docs = loader.load()
os.remove(txt_file) # Remove the temporary .txt file after processing
else:
raise e
for doc in docs:
if metadata is not None:
doc.metadata.update(metadata)
return docs
def convert_to_txt(file_path):
file_dir, file_name = os.path.split(file_path)
file_base, file_ext = os.path.splitext(file_name)
txt_file = os.path.join(file_dir, f"{file_base}.txt")
shutil.copyfile(file_path, txt_file)
return txt_file
def code_prompt():
# create prompt to pass in to LLM
template = """
Summarise what the code does below. Use Markdown in your output with the following template:
# a title
summary of script purpose
## keywords
Comma seperated list of 3-4 keywords suitable for this code
## classes
A description of each class
## functions/methods
How the functions or methods of a class work including listing the Inputs and outputs for each function
## code examples of use
The code to summarise is here:
{txt}
"""
return PromptTemplate(
input_variables=["txt"],
template=template,
)
def text_prompt():
# create prompt to pass in to LLM
template = """
Summarise the text below, and add some keywords at the bottom to describe the overall purpose of the text.
The text to summarise is here:
{txt}
"""
return PromptTemplate(
input_variables=["txt"],
template=template,
)
# Function to summarise code from the OpenAI API
def generate_summary(a_file: pathlib.Path, memory, resummarise: bool=False, verbose: bool=False):
if a_file.is_dir():
raise ValueError(f"a_file must not be a directory: {a_file}")
new_file_name = a_file.with_suffix('.md')
if os.path.isfile(new_file_name) and not resummarise:
if verbose:
print(f"Skipping generating summary as found existing code summary file: {new_file_name}")
return
try:
with open(a_file, "r") as file:
file_text = file.read()
except Exception as e:
print(f"Error generating summary: {str(e)}")
return
if len(file_text) < 10:
if verbose:
print(f"Skipping generation as not enough information. Got: {file_text}")
return
document = Document(page_content=file_text, metadata = {"source": os.path.abspath(a_file)})
source_chunks = chunk_doc_to_docs([document], a_file.suffix)
code = True if str(a_file.suffix).lower() in CODE_EXTENSIONS else False
if code:
print("================================================")
print(f"Requesting code summary for {a_file} ")
print("================================================")
prompt = code_prompt()
else:
print("================================================")
print(f"Requesting text summary for {a_file} ")
print("================================================")
prompt = text_prompt()
num_chunks = len(source_chunks)
i=0
for chunk in source_chunks:
logging.info(f"Summarising chunk {i} of {num_chunks} of {a_file}")
i += 1
summary = my_llm.request_llm(
prompt.format(txt=chunk.page_content),
chat,
memory,
metadata={'task':'summarise_chunk'})
my_llm.save_to_file(new_file_name, summary + '\n\n', type = "a")
return pathlib.Path(new_file_name)
# Get source chunks from a repository
def get_source_docs(repo_path, extension, memory, ignore, resummarise, verbose):
source_chunks = []
for source in get_repo_docs(repo_path,
extension=extension,
memory=memory,
ignore=ignore,
resummarise=resummarise,
verbose=verbose):
splitter = choose_splitter(extension)
for chunk in splitter.split_text(source.page_content):
source_chunks.append(Document(page_content=chunk, metadata=source.metadata))
return source_chunks
def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0):
if extension == ".py":
return text_splitter.PythonCodeTextSplitter()
elif extension == ".md":
return text_splitter.MarkdownTextSplitter()
return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
def setup_memory(config):
memory = PubSubChatMessageHistory("qna_documents")
if config.get('bucket_name', None) is not None:
memory.set_bucket(config.get('bucket_name'))
memory.load_vectorstore_memory()
if config['reindex']:
# Create a new Chroma DB
exts = '.md,.py'
if config['ext']:
exts = config['ext']
source_chunks = get_source_docs(config['repo'],
extension=exts,
memory=memory,
ignore=config['ignore'],
resummarise=config['resummarise'],
verbose=config['verbose'])
memory.save_vectorstore_memory(source_chunks, verbose=config['verbose'])
return memory
def document_to_dict(document):
return {
'page_content': document.page_content,
'metadata': document.metadata,
}
def process_input(user_input: str,
verbose: bool =True,
bucket_name: str = None,
chat_history = None):
# more only needed if you need to recreate the vectorstore which we wont with web app
config = {
'reindex': False,
'bucket_name': bucket_name
}
if verbose:
print(f"user_input: {user_input}")
print(f"process_input config: {config}")
logging.info(f"user_input: {user_input}")
logging.info(f"process_input config: {config}")
memory = setup_memory(config)
answer = memory.question_memory(user_input,
llm=chat,
verbose=verbose,
chat_history = chat_history)
response = {'result': 'No answer found'}
if answer is not None:
response = {'result': answer['result']}
if answer.get('source_documents') is not None:
source_documents = [document_to_dict(doc) for doc in answer['source_documents']]
response['source_documents'] = source_documents
else:
logging.info('No source documents found')
return response
def add_single_file(filename: str, bucket_name, verbose=False):
config = {
'reindex': False, # as we will trigger file summary directly
'bucket_name': bucket_name
}
filename = pathlib.Path(filename)
if not filename.is_file():
raise ValueError(f"Filename was not a valid file path: {filename}")
docs = read_file_to_document(filename)
chunks = chunk_doc_to_docs(docs, filename.suffix)
memory = setup_memory(config)
docs_output = []
chunk_length = len(chunks)
i = 0
for chunk in chunks:
logging.info(f"Uploading chunk {i} of size {chunk_length} for {filename.name}")
i+=1
memory.add_user_message(chunk.page_content,
metadata={"task": "singlefile load original",
"source": filename.name})
docs_output.append(chunk.page_content)
return docs_output
def summarise_single_file(filename: str, bucket_name, verbose=False):
config = {
'reindex': False, # as we will trigger file summary directly
'bucket_name': bucket_name
}
filename = pathlib.Path(filename)
if not filename.is_file():
raise ValueError(f"Filename was not a valid file path: {filename}")
memory = setup_memory(config)
summary_filename = generate_summary(filename,
memory,
resummarise=True,
verbose=verbose)
if not summary_filename:
return f"No summary generated for {str(filename)}"
documents = read_file_to_document(summary_filename)
chunks = chunk_doc_to_docs(documents, filename.suffix)
output_content = ""
for chunk in chunks:
memory.add_user_message(chunk.page_content,
metadata={"task": "singlefile load summary",
"source": filename.name})
output_content += chunk.page_content + "\n\n"
return output_content
def chunk_doc_to_docs(documents: list, extension: str = ".md"):
"""Turns a Document object into a list of many Document chunks"""
for document in documents:
source_chunks = []
splitter = choose_splitter(extension)
for chunk in splitter.split_text(document.page_content):
source_chunks.append(Document(page_content=chunk, metadata=document.metadata))
return source_chunks
def main(config):
memory = setup_memory(config)
while True:
print('\n\033[31m' + '=Ask a question. CTRL + C to quit.')
print ("=If I don't know, tell me the right answer so I can learn and answer more accurately next time" + '\033[m')
user_input = input()
print('\033[31m')
answer = memory.question_memory(user_input, llm=chat, verbose=config['verbose'])
if answer is not None:
if answer.get('source_documents') is not None:
print('\n== Document sources:')
i = 0
for doc in answer.get('source_documents'):
i += 1
print(f'-- Source {i}')
print(f' - page_content:\n {doc.page_content}')
if config['verbose']:
print(f' - metadata: \n{doc.metadata}')
print('\n================================')
print('== Answer:\n\n' + answer['result'])
else:
print('Sorry')
print('\033[m')
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Chat with a GitHub repository",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("repo", help="The GitHub repository on local disk")
parser.add_argument("--reindex", action="store_true",
help="Whether to re-index the doc database that supply context to the Q&A")
parser.add_argument("--ext", help="Comma separated list of file extensions to include. Defaults to '.md,.py'")
parser.add_argument("--ignore", help="Directory to ignore file imports from. Defaults to 'env/'")
parser.add_argument("--resummarise", action="store_true", help="Recreate the code.md files describing the code")
parser.add_argument("--verbose", action="store_true", help="Include metadata such as sources in replies")
parser.add_argument("--bucket", help="A Google Cloud Storage bucket name e.g. ga://your-bucket-name")
args = parser.parse_args()
config = vars(args)
try:
main(config)
except KeyboardInterrupt:
print(' - User exit.')
sys.exit(1) | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.docstore.document.Document",
"langchain.text_splitter.MarkdownTextSplitter",
"langchain.chat_models.ChatOpenAI",
"langchain.document_loaders.unstructured.UnstructuredFileLoader",
"langchain.text_splitter.PythonCodeTextSplitter",
"langchain.PromptTemplate"
] | [((245, 272), 'sys.path.append', 'sys.path.append', (['parent_dir'], {}), '(parent_dir)\n', (260, 272), False, 'import sys, os, shutil\n'), ((667, 692), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (677, 692), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1204, 1227), 'pathlib.Path', 'pathlib.Path', (['repo_path'], {}), '(repo_path)\n', (1216, 1227), False, 'import pathlib\n'), ((3797, 3821), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (3810, 3821), False, 'import sys, os, shutil\n'), ((3848, 3875), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (3864, 3875), False, 'import sys, os, shutil\n'), ((3891, 3933), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (3903, 3933), False, 'import sys, os, shutil\n'), ((3938, 3974), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (3953, 3974), False, 'import sys, os, shutil\n'), ((4521, 4579), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4535, 4579), False, 'from langchain import PromptTemplate\n'), ((4840, 4898), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['txt']", 'template': 'template'}), "(input_variables=['txt'], template=template)\n", (4854, 4898), False, 'from langchain import PromptTemplate\n'), ((6905, 6932), 'pathlib.Path', 'pathlib.Path', (['new_file_name'], {}), '(new_file_name)\n', (6917, 6932), False, 'import pathlib\n'), ((7874, 7974), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (7918, 7974), True, 'import langchain.text_splitter as text_splitter\n'), ((8013, 8054), 'my_llm.langchain_class.PubSubChatMessageHistory', 'PubSubChatMessageHistory', (['"""qna_documents"""'], {}), "('qna_documents')\n", (8037, 8054), False, 'from my_llm.langchain_class import PubSubChatMessageHistory\n'), ((9411, 9452), 'logging.info', 'logging.info', (['f"""user_input: {user_input}"""'], {}), "(f'user_input: {user_input}')\n", (9423, 9452), False, 'import logging\n'), ((9457, 9504), 'logging.info', 'logging.info', (['f"""process_input config: {config}"""'], {}), "(f'process_input config: {config}')\n", (9469, 9504), False, 'import logging\n'), ((10381, 10403), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (10393, 10403), False, 'import pathlib\n'), ((11324, 11346), 'pathlib.Path', 'pathlib.Path', (['filename'], {}), '(filename)\n', (11336, 11346), False, 'import pathlib\n'), ((13800, 13928), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Chat with a GitHub repository"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Chat with a GitHub repository',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (13823, 13928), False, 'import argparse\n'), ((211, 236), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (226, 236), False, 'import sys, os, shutil\n'), ((2949, 2980), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['md_file'], {}), '(md_file)\n', (2971, 2980), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((5232, 5261), 'os.path.isfile', 'os.path.isfile', (['new_file_name'], {}), '(new_file_name)\n', (5246, 5261), False, 'import sys, os, shutil\n'), ((6551, 6617), 'logging.info', 'logging.info', (['f"""Summarising chunk {i} of {num_chunks} of {a_file}"""'], {}), "(f'Summarising chunk {i} of {num_chunks} of {a_file}')\n", (6563, 6617), False, 'import logging\n'), ((6824, 6886), 'my_llm.standards.save_to_file', 'my_llm.save_to_file', (['new_file_name', "(summary + '\\n\\n')"], {'type': '"""a"""'}), "(new_file_name, summary + '\\n\\n', type='a')\n", (6843, 6886), True, 'from my_llm import standards as my_llm\n'), ((7738, 7776), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (7774, 7776), True, 'import langchain.text_splitter as text_splitter\n'), ((10744, 10823), 'logging.info', 'logging.info', (['f"""Uploading chunk {i} of size {chunk_length} for {filename.name}"""'], {}), "(f'Uploading chunk {i} of size {chunk_length} for {filename.name}')\n", (10756, 10823), False, 'import logging\n'), ((7821, 7857), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (7855, 7857), True, 'import langchain.text_splitter as text_splitter\n'), ((10113, 10154), 'logging.info', 'logging.info', (['"""No source documents found"""'], {}), "('No source documents found')\n", (10125, 10154), False, 'import logging\n'), ((14903, 14914), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (14911, 14914), False, 'import sys, os, shutil\n'), ((3351, 3383), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {}), '(txt_file)\n', (3373, 3383), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((3520, 3539), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (3529, 3539), False, 'import sys, os, shutil\n'), ((5830, 5853), 'os.path.abspath', 'os.path.abspath', (['a_file'], {}), '(a_file)\n', (5845, 5853), False, 'import sys, os, shutil\n'), ((7533, 7587), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'source.metadata'}), '(page_content=chunk, metadata=source.metadata)\n', (7541, 7587), False, 'from langchain.docstore.document import Document\n'), ((12585, 12641), 'langchain.docstore.document.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (12593, 12641), False, 'from langchain.docstore.document import Document\n')] |
import os
import json
from typing import List
from dotenv import load_dotenv
from pydantic import BaseModel, Field
from supabase.client import Client, create_client
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.tools import StructuredTool
from langchain.chains.openai_functions import create_structured_output_chain
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
SystemMessagePromptTemplate,
)
import langchain
load_dotenv()
# Set debug to True to see A LOT of details of langchain's inner workings
# langchain.debug = True
# The name of the table in Supabase, where the vectors are stored
matchVectorFunctionName = "match_embeddings"
# Create the supabase client
SUPABASE_URL = os.getenv("SUPABASE_URL")
SUPABASE_KEY = os.getenv("SUPABASE_KEY")
supabase: Client = create_client(SUPABASE_URL, SUPABASE_KEY)
class ToolInputSchema(BaseModel):
question: str = Field(..., description="A fully formed question.")
class KnowledgeAnswer(BaseModel):
answer: str = Field(..., description="The answer to the question.")
sources: List[str] = Field(
...,
description="The sources which contributed to the answer.",
)
llm = ChatOpenAI(model_name="gpt-3.5-turbo-16k", temperature=0.3)
prompt_msgs = [
SystemMessagePromptTemplate.from_template(
"""You're an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.
Context:
{context}"""
),
HumanMessagePromptTemplate.from_template("{question}"),
]
prompt = ChatPromptTemplate.from_messages(prompt_msgs)
chain = create_structured_output_chain(KnowledgeAnswer, llm, prompt)
def get_answer(question: str) -> str:
try:
vectors = OpenAIEmbeddings().embed_documents([question])
embeddings = supabase.rpc(
matchVectorFunctionName, dict(query_embedding=vectors[0], match_count=7)
).execute()
print(f"⚡ Retrieved {len(embeddings.data)} vectors from Supabase:")
for entry in embeddings.data:
print("🔖 Title:", entry["metadata"]["title"])
print("🌐 Source:", entry["metadata"]["source"])
print("📊 Similarity:", entry["similarity"])
print("📄 Content:", entry["content"].replace("\n", " ")[:100] + "...")
print("-" * 50)
result = chain.run(context=json.dumps(embeddings.data), question=question)
print("📝 Result of knowledge extraction chain:", result)
return f"""Answer: {result.answer}
Sources: {json.dumps(result.sources)}
"""
except Exception as e:
print(e)
return "The wiki knowledgebase is currently not available. We are working on it. Tell the user to use the wiki directly. https://www.defichainwiki.com/"
description = """Use this if you need to answer any question about DeFiChain which does not require live-data. Make sure to include the source of the answer in your response."""
wikiTool = StructuredTool(
name="defichain_wiki_knowledge",
description=description,
func=get_answer,
args_schema=ToolInputSchema,
)
if __name__ == "__main__":
while True:
question = input(
"Ask something, that can be answered using information from DeFiChainWiki: "
)
print("✅", get_answer(question))
| [
"langchain.chains.openai_functions.create_structured_output_chain",
"langchain.tools.StructuredTool",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((528, 541), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (539, 541), False, 'from dotenv import load_dotenv\n'), ((799, 824), 'os.getenv', 'os.getenv', (['"""SUPABASE_URL"""'], {}), "('SUPABASE_URL')\n", (808, 824), False, 'import os\n'), ((840, 865), 'os.getenv', 'os.getenv', (['"""SUPABASE_KEY"""'], {}), "('SUPABASE_KEY')\n", (849, 865), False, 'import os\n'), ((885, 926), 'supabase.client.create_client', 'create_client', (['SUPABASE_URL', 'SUPABASE_KEY'], {}), '(SUPABASE_URL, SUPABASE_KEY)\n', (898, 926), False, 'from supabase.client import Client, create_client\n'), ((1269, 1328), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo-16k"""', 'temperature': '(0.3)'}), "(model_name='gpt-3.5-turbo-16k', temperature=0.3)\n", (1279, 1328), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1687, 1732), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['prompt_msgs'], {}), '(prompt_msgs)\n', (1719, 1732), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1742, 1802), 'langchain.chains.openai_functions.create_structured_output_chain', 'create_structured_output_chain', (['KnowledgeAnswer', 'llm', 'prompt'], {}), '(KnowledgeAnswer, llm, prompt)\n', (1772, 1802), False, 'from langchain.chains.openai_functions import create_structured_output_chain\n'), ((3106, 3228), 'langchain.tools.StructuredTool', 'StructuredTool', ([], {'name': '"""defichain_wiki_knowledge"""', 'description': 'description', 'func': 'get_answer', 'args_schema': 'ToolInputSchema'}), "(name='defichain_wiki_knowledge', description=description,\n func=get_answer, args_schema=ToolInputSchema)\n", (3120, 3228), False, 'from langchain.tools import StructuredTool\n'), ((983, 1033), 'pydantic.Field', 'Field', (['...'], {'description': '"""A fully formed question."""'}), "(..., description='A fully formed question.')\n", (988, 1033), False, 'from pydantic import BaseModel, Field\n'), ((1088, 1141), 'pydantic.Field', 'Field', (['...'], {'description': '"""The answer to the question."""'}), "(..., description='The answer to the question.')\n", (1093, 1141), False, 'from pydantic import BaseModel, Field\n'), ((1167, 1237), 'pydantic.Field', 'Field', (['...'], {'description': '"""The sources which contributed to the answer."""'}), "(..., description='The sources which contributed to the answer.')\n", (1172, 1237), False, 'from pydantic import BaseModel, Field\n'), ((1350, 1610), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""You\'re an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.\n\n Context:\n {context}"""'], {}), '(\n """You\'re an elite algorithm, answering queries based solely on given context. If the context lacks the answer, state ignorance. If you are not 100% sure tell the user.\n\n Context:\n {context}"""\n )\n', (1391, 1610), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1620, 1674), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{question}"""'], {}), "('{question}')\n", (1660, 1674), False, 'from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate\n'), ((1870, 1888), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1886, 1888), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2493, 2520), 'json.dumps', 'json.dumps', (['embeddings.data'], {}), '(embeddings.data)\n', (2503, 2520), False, 'import json\n'), ((2684, 2710), 'json.dumps', 'json.dumps', (['result.sources'], {}), '(result.sources)\n', (2694, 2710), False, 'import json\n')] |
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.callbacks import get_openai_callback
#fix Error: module 'langchain' has no attribute 'verbose'
import langchain
langchain.verbose = False
class Chatbot:
def __init__(self, model_name, temperature, vectors):
self.model_name = model_name
self.temperature = temperature
self.vectors = vectors
qa_template = """
You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end.
If you don't know the answer, just say you don't know. Do NOT try to make up an answer.
If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.
Use as much detail as possible when responding.
context: {context}
=========
question: {question}
======
"""
QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ])
def conversational_chat(self, query):
"""
Start a conversational chat with a model via Langchain
"""
llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature)
retriever = self.vectors.as_retriever()
chain = ConversationalRetrievalChain.from_llm(llm=llm,
retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})
chain_input = {"question": query, "chat_history": st.session_state["history"]}
result = chain(chain_input)
st.session_state["history"].append((query, result["answer"]))
#count_tokens_chain(chain, chain_input)
return result["answer"]
def count_tokens_chain(chain, query):
with get_openai_callback() as cb:
result = chain.run(query)
st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')
return result
| [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.prompts.prompt.PromptTemplate",
"langchain.callbacks.get_openai_callback",
"langchain.chat_models.ChatOpenAI"
] | [((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')] |
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.callbacks import get_openai_callback
#fix Error: module 'langchain' has no attribute 'verbose'
import langchain
langchain.verbose = False
class Chatbot:
def __init__(self, model_name, temperature, vectors):
self.model_name = model_name
self.temperature = temperature
self.vectors = vectors
qa_template = """
You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end.
If you don't know the answer, just say you don't know. Do NOT try to make up an answer.
If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.
Use as much detail as possible when responding.
context: {context}
=========
question: {question}
======
"""
QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ])
def conversational_chat(self, query):
"""
Start a conversational chat with a model via Langchain
"""
llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature)
retriever = self.vectors.as_retriever()
chain = ConversationalRetrievalChain.from_llm(llm=llm,
retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})
chain_input = {"question": query, "chat_history": st.session_state["history"]}
result = chain(chain_input)
st.session_state["history"].append((query, result["answer"]))
#count_tokens_chain(chain, chain_input)
return result["answer"]
def count_tokens_chain(chain, query):
with get_openai_callback() as cb:
result = chain.run(query)
st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')
return result
| [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.prompts.prompt.PromptTemplate",
"langchain.callbacks.get_openai_callback",
"langchain.chat_models.ChatOpenAI"
] | [((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')] |
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.callbacks import get_openai_callback
#fix Error: module 'langchain' has no attribute 'verbose'
import langchain
langchain.verbose = False
class Chatbot:
def __init__(self, model_name, temperature, vectors):
self.model_name = model_name
self.temperature = temperature
self.vectors = vectors
qa_template = """
You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end.
If you don't know the answer, just say you don't know. Do NOT try to make up an answer.
If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.
Use as much detail as possible when responding.
context: {context}
=========
question: {question}
======
"""
QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ])
def conversational_chat(self, query):
"""
Start a conversational chat with a model via Langchain
"""
llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature)
retriever = self.vectors.as_retriever()
chain = ConversationalRetrievalChain.from_llm(llm=llm,
retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})
chain_input = {"question": query, "chat_history": st.session_state["history"]}
result = chain(chain_input)
st.session_state["history"].append((query, result["answer"]))
#count_tokens_chain(chain, chain_input)
return result["answer"]
def count_tokens_chain(chain, query):
with get_openai_callback() as cb:
result = chain.run(query)
st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')
return result
| [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.prompts.prompt.PromptTemplate",
"langchain.callbacks.get_openai_callback",
"langchain.chat_models.ChatOpenAI"
] | [((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')] |
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.callbacks import get_openai_callback
#fix Error: module 'langchain' has no attribute 'verbose'
import langchain
langchain.verbose = False
class Chatbot:
def __init__(self, model_name, temperature, vectors):
self.model_name = model_name
self.temperature = temperature
self.vectors = vectors
qa_template = """
You are a helpful AI assistant named Robby. The user gives you a file its content is represented by the following pieces of context, use them to answer the question at the end.
If you don't know the answer, just say you don't know. Do NOT try to make up an answer.
If the question is not related to the context, politely respond that you are tuned to only answer questions that are related to the context.
Use as much detail as possible when responding.
context: {context}
=========
question: {question}
======
"""
QA_PROMPT = PromptTemplate(template=qa_template, input_variables=["context","question" ])
def conversational_chat(self, query):
"""
Start a conversational chat with a model via Langchain
"""
llm = ChatOpenAI(model_name=self.model_name, temperature=self.temperature)
retriever = self.vectors.as_retriever()
chain = ConversationalRetrievalChain.from_llm(llm=llm,
retriever=retriever, verbose=True, return_source_documents=True, max_tokens_limit=4097, combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})
chain_input = {"question": query, "chat_history": st.session_state["history"]}
result = chain(chain_input)
st.session_state["history"].append((query, result["answer"]))
#count_tokens_chain(chain, chain_input)
return result["answer"]
def count_tokens_chain(chain, query):
with get_openai_callback() as cb:
result = chain.run(query)
st.write(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')
return result
| [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.prompts.prompt.PromptTemplate",
"langchain.callbacks.get_openai_callback",
"langchain.chat_models.ChatOpenAI"
] | [((1142, 1219), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['context', 'question']"}), "(template=qa_template, input_variables=['context', 'question'])\n", (1156, 1219), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1364, 1432), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'self.model_name', 'temperature': 'self.temperature'}), '(model_name=self.model_name, temperature=self.temperature)\n', (1374, 1432), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1500, 1697), 'langchain.chains.ConversationalRetrievalChain.from_llm', 'ConversationalRetrievalChain.from_llm', ([], {'llm': 'llm', 'retriever': 'retriever', 'verbose': '(True)', 'return_source_documents': '(True)', 'max_tokens_limit': '(4097)', 'combine_docs_chain_kwargs': "{'prompt': self.QA_PROMPT}"}), "(llm=llm, retriever=retriever, verbose\n =True, return_source_documents=True, max_tokens_limit=4097,\n combine_docs_chain_kwargs={'prompt': self.QA_PROMPT})\n", (1537, 1697), False, 'from langchain.chains import ConversationalRetrievalChain\n'), ((2025, 2046), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2044, 2046), False, 'from langchain.callbacks import get_openai_callback\n'), ((2096, 2175), 'streamlit.write', 'st.write', (['f"""###### Tokens used in this conversation : {cb.total_tokens} tokens"""'], {}), "(f'###### Tokens used in this conversation : {cb.total_tokens} tokens')\n", (2104, 2175), True, 'import streamlit as st\n')] |
"""
A simple CUI application to visualize and query a customer database using the `textual` package.
"""
from dataclasses import dataclass
import langchain
from langchain.cache import SQLiteCache
from langchain.llms import OpenAI
from textual.app import App, ComposeResult
from textual.containers import Horizontal
from textual.widgets import Button, DataTable, Footer, Header, Input
from llm_strategy import llm_strategy
langchain.llm_cache = SQLiteCache()
base_llm = OpenAI(max_tokens=1024)
@llm_strategy(base_llm)
@dataclass
class Customer:
key: str
first_name: str
last_name: str
birthdate: str
address: str
@property
def age(self: "Customer") -> int:
"""Return the current age of the customer.
This is a computed property based on `birthdate` and the current year (2022).
"""
raise NotImplementedError()
@dataclass
class CustomerDatabase:
customers: list[Customer]
def find_customer_key(self: "CustomerDatabase", query: str) -> list[str]:
"""Find the keys of the customers that match a natural language query best (sorted by closeness to the match).
We support semantic queries instead of SQL, so we can search for things like
"the customer that was born in 1990".
Args:
query: Natural language query
Returns:
The index of the best matching customer in the database.
"""
raise NotImplementedError()
def load(self: "CustomerDatabase"):
"""Load the customer database from a file."""
raise NotImplementedError()
def store(self: "CustomerDatabase"):
"""Store the customer database to a file."""
raise NotImplementedError()
@llm_strategy(base_llm)
@dataclass
class MockCustomerDatabase(CustomerDatabase):
def load(self):
self.customers = self.create_mock_customers(10)
def store(self):
pass
@staticmethod
def create_mock_customers(num_customers: int = 1) -> list[Customer]:
"""
Create mock customers with believable data (our customers are world citizens).
"""
raise NotImplementedError()
class CustomerDatabaseApp(App):
"""A simple textual application to visualize and query a customer database.
We show all the customers in a table and allow the user to query the database using natural language
in a search box at the bottom of the screen.
"""
PRIORITY_BINDINGS = False
BINDINGS = [("q", "quit", "Quit the application"), ("s", "screenshot", "Take a screenshot")]
database: CustomerDatabase = MockCustomerDatabase([])
data_table = DataTable(id="customer_table")
search_box = Input(id="search_box", placeholder="Search for a customer (use any kind of query")
footer_bar = Horizontal(search_box)
def on_mount(self) -> None:
self.database.load()
self.data_table.add_columns("First Name", "Last Name", "Birthdate", "Address", "Age")
self.search("")
def compose(self) -> ComposeResult:
self.footer_bar.styles.dock = "bottom"
self.footer_bar.styles.width = "100%"
self.footer_bar.styles.height = 4
self.data_table.styles.height = "auto"
self.data_table.styles.width = "100%"
self.screen.styles.height = "100%"
self.search_box.styles.width = "100%"
yield Header()
yield self.footer_bar
yield Footer()
yield self.data_table
def search(self, query: str):
"""Search the customer database using a natural language query."""
self.data_table.clear()
if not query:
for customer in self.database.customers:
self.data_table.add_row(
# customer.key,
customer.first_name,
customer.last_name,
customer.birthdate,
customer.address,
str(customer.age),
)
else:
keys = self.database.find_customer_key(query)
for key in keys:
customers_for_key = [customer for customer in self.database.customers if customer.key == key]
assert len(customers_for_key) == 1
customer = customers_for_key[0]
self.data_table.add_row(
# customer.key,
customer.first_name,
customer.last_name,
customer.birthdate,
customer.address,
str(customer.age),
)
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button is self.exit_button:
self.exit()
def on_input_submitted(self, event: Input.Submitted) -> None:
if event.input is self.search_box:
self.search(event.value)
if __name__ == "__main__":
app = CustomerDatabaseApp()
app.run()
| [
"langchain.llms.OpenAI",
"langchain.cache.SQLiteCache"
] | [((447, 460), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {}), '()\n', (458, 460), False, 'from langchain.cache import SQLiteCache\n'), ((472, 495), 'langchain.llms.OpenAI', 'OpenAI', ([], {'max_tokens': '(1024)'}), '(max_tokens=1024)\n', (478, 495), False, 'from langchain.llms import OpenAI\n'), ((499, 521), 'llm_strategy.llm_strategy', 'llm_strategy', (['base_llm'], {}), '(base_llm)\n', (511, 521), False, 'from llm_strategy import llm_strategy\n'), ((1731, 1753), 'llm_strategy.llm_strategy', 'llm_strategy', (['base_llm'], {}), '(base_llm)\n', (1743, 1753), False, 'from llm_strategy import llm_strategy\n'), ((2643, 2673), 'textual.widgets.DataTable', 'DataTable', ([], {'id': '"""customer_table"""'}), "(id='customer_table')\n", (2652, 2673), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((2691, 2778), 'textual.widgets.Input', 'Input', ([], {'id': '"""search_box"""', 'placeholder': '"""Search for a customer (use any kind of query"""'}), "(id='search_box', placeholder=\n 'Search for a customer (use any kind of query')\n", (2696, 2778), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((2791, 2813), 'textual.containers.Horizontal', 'Horizontal', (['search_box'], {}), '(search_box)\n', (2801, 2813), False, 'from textual.containers import Horizontal\n'), ((3369, 3377), 'textual.widgets.Header', 'Header', ([], {}), '()\n', (3375, 3377), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n'), ((3422, 3430), 'textual.widgets.Footer', 'Footer', ([], {}), '()\n', (3428, 3430), False, 'from textual.widgets import Button, DataTable, Footer, Header, Input\n')] |
import os
import cassio
import langchain
from langchain.cache import CassandraCache
from langchain_community.chat_models import ChatOpenAI
from langchain_core.messages import BaseMessage
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableLambda
use_cassandra = int(os.environ.get("USE_CASSANDRA_CLUSTER", "0"))
if use_cassandra:
from .cassandra_cluster_init import get_cassandra_connection
session, keyspace = get_cassandra_connection()
cassio.init(
session=session,
keyspace=keyspace,
)
else:
cassio.init(
token=os.environ["ASTRA_DB_APPLICATION_TOKEN"],
database_id=os.environ["ASTRA_DB_ID"],
keyspace=os.environ.get("ASTRA_DB_KEYSPACE"),
)
# inits
langchain.llm_cache = CassandraCache(session=None, keyspace=None)
llm = ChatOpenAI()
# custom runnables
def msg_splitter(msg: BaseMessage):
return [w.strip() for w in msg.content.split(",") if w.strip()]
# synonym-route preparation
synonym_prompt = ChatPromptTemplate.from_template(
"List up to five comma-separated synonyms of this word: {word}"
)
chain = synonym_prompt | llm | RunnableLambda(msg_splitter)
| [
"langchain_core.prompts.ChatPromptTemplate.from_template",
"langchain_community.chat_models.ChatOpenAI",
"langchain_core.runnables.RunnableLambda",
"langchain.cache.CassandraCache"
] | [((788, 831), 'langchain.cache.CassandraCache', 'CassandraCache', ([], {'session': 'None', 'keyspace': 'None'}), '(session=None, keyspace=None)\n', (802, 831), False, 'from langchain.cache import CassandraCache\n'), ((838, 850), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (848, 850), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1023, 1125), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['"""List up to five comma-separated synonyms of this word: {word}"""'], {}), "(\n 'List up to five comma-separated synonyms of this word: {word}')\n", (1055, 1125), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((315, 359), 'os.environ.get', 'os.environ.get', (['"""USE_CASSANDRA_CLUSTER"""', '"""0"""'], {}), "('USE_CASSANDRA_CLUSTER', '0')\n", (329, 359), False, 'import os\n'), ((500, 547), 'cassio.init', 'cassio.init', ([], {'session': 'session', 'keyspace': 'keyspace'}), '(session=session, keyspace=keyspace)\n', (511, 547), False, 'import cassio\n'), ((1159, 1187), 'langchain_core.runnables.RunnableLambda', 'RunnableLambda', (['msg_splitter'], {}), '(msg_splitter)\n', (1173, 1187), False, 'from langchain_core.runnables import RunnableLambda\n'), ((714, 749), 'os.environ.get', 'os.environ.get', (['"""ASTRA_DB_KEYSPACE"""'], {}), "('ASTRA_DB_KEYSPACE')\n", (728, 749), False, 'import os\n')] |
import numpy as np
from langchain.prompts import PromptTemplate
from langchain.schema import StrOutputParser, BaseRetriever
from langchain.schema.runnable import RunnablePassthrough
from langchain_google_genai import ChatGoogleGenerativeAI
from trulens_eval.feedback.provider.langchain import Langchain
from trulens_eval import Tru, Feedback, TruChain
from trulens_eval.feedback import Groundedness
from trulens_eval.schema import Select
from trulens_eval.utils.serial import all_queries
from trulens_eval.utils.json import jsonify
from src.embeddings import build_base_embeddings
from src.vectordb import load_chroma
from src.reranker import build_reranker
from src.retrieval_qa import build_rerank_retriever
from src.llms import build_llm
# Setup RAG
embedding_function = build_base_embeddings()
vectordb = load_chroma(embedding_function)
reranker = build_reranker()
retriever = build_rerank_retriever(vectordb, reranker)
llm = build_llm()
QA_TEMPLATE = """You are an assistant for question-answering tasks. \
Use the following pieces of retrieved context to answer the question. \
If you don't know the answer, just say you don't know.
Question: {question}
Context: {context}
Answer:"""
prompt = PromptTemplate.from_template(QA_TEMPLATE)
def format_docs(docs):
return "\n\n".join(doc.page_content for doc in docs)
rag_chain = (
{"context": retriever | format_docs, "question": RunnablePassthrough}
| prompt
| llm
| StrOutputParser
)
# Evaluate with trulens-eval
# Define provider and database
_llm = ChatGoogleGenerativeAI(model="gemini-pro", temperature=0)
provider = Langchain(chain=_llm)
database_url = "sqlite:///data/trulens.db"
tru = Tru(database_url=database_url, database_redact_keys=True)
# tru.reset_database()
# Using TruChain
app_json = jsonify(rag_chain)
retrievers = []
for lens in all_queries(app_json):
try:
comp = lens.get_sole_item(rag_chain)
if isinstance(comp, BaseRetriever):
retrievers.append((lens, comp))
except Exception:
pass
context = (
(Select.RecordCalls + retrievers[0][0]).get_relevant_documents.rets[:].page_content
)
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasonse, name="Answer Relevance"
).on_input_output()
f_context_relevance = (
Feedback(provider.qs_relevance_with_cot_reasons, name="Context Relevance")
.on_input()
.on(context)
.aggregate(np.mean)
)
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness")
.on(context.collect())
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
app_id = "Chain1"
tru_recorder = TruChain(
rag_chain,
app_id=app_id,
feedbacks=[
f_qa_relevance,
f_context_relevance,
f_groundedness,
],
)
qns = ...
for qn in qns:
with tru_recorder as recording:
res = rag_chain.invoke(qn)
# Results
# dashboard
tru.run_dashboard(port=8601)
# # dataframe
# records_df, feednack = tru.get_records_and_feednack(app_ids=[app_id])
# records_df.head()
| [
"langchain.prompts.PromptTemplate.from_template",
"langchain_google_genai.ChatGoogleGenerativeAI"
] | [((778, 801), 'src.embeddings.build_base_embeddings', 'build_base_embeddings', ([], {}), '()\n', (799, 801), False, 'from src.embeddings import build_base_embeddings\n'), ((813, 844), 'src.vectordb.load_chroma', 'load_chroma', (['embedding_function'], {}), '(embedding_function)\n', (824, 844), False, 'from src.vectordb import load_chroma\n'), ((856, 872), 'src.reranker.build_reranker', 'build_reranker', ([], {}), '()\n', (870, 872), False, 'from src.reranker import build_reranker\n'), ((885, 927), 'src.retrieval_qa.build_rerank_retriever', 'build_rerank_retriever', (['vectordb', 'reranker'], {}), '(vectordb, reranker)\n', (907, 927), False, 'from src.retrieval_qa import build_rerank_retriever\n'), ((934, 945), 'src.llms.build_llm', 'build_llm', ([], {}), '()\n', (943, 945), False, 'from src.llms import build_llm\n'), ((1205, 1246), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['QA_TEMPLATE'], {}), '(QA_TEMPLATE)\n', (1233, 1246), False, 'from langchain.prompts import PromptTemplate\n'), ((1536, 1593), 'langchain_google_genai.ChatGoogleGenerativeAI', 'ChatGoogleGenerativeAI', ([], {'model': '"""gemini-pro"""', 'temperature': '(0)'}), "(model='gemini-pro', temperature=0)\n", (1558, 1593), False, 'from langchain_google_genai import ChatGoogleGenerativeAI\n'), ((1605, 1626), 'trulens_eval.feedback.provider.langchain.Langchain', 'Langchain', ([], {'chain': '_llm'}), '(chain=_llm)\n', (1614, 1626), False, 'from trulens_eval.feedback.provider.langchain import Langchain\n'), ((1677, 1734), 'trulens_eval.Tru', 'Tru', ([], {'database_url': 'database_url', 'database_redact_keys': '(True)'}), '(database_url=database_url, database_redact_keys=True)\n', (1680, 1734), False, 'from trulens_eval import Tru, Feedback, TruChain\n'), ((1787, 1805), 'trulens_eval.utils.json.jsonify', 'jsonify', (['rag_chain'], {}), '(rag_chain)\n', (1794, 1805), False, 'from trulens_eval.utils.json import jsonify\n'), ((1834, 1855), 'trulens_eval.utils.serial.all_queries', 'all_queries', (['app_json'], {}), '(app_json)\n', (1845, 1855), False, 'from trulens_eval.utils.serial import all_queries\n'), ((2426, 2470), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (2438, 2470), False, 'from trulens_eval.feedback import Groundedness\n'), ((2709, 2812), 'trulens_eval.TruChain', 'TruChain', (['rag_chain'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_context_relevance, f_groundedness]'}), '(rag_chain, app_id=app_id, feedbacks=[f_qa_relevance,\n f_context_relevance, f_groundedness])\n', (2717, 2812), False, 'from trulens_eval import Tru, Feedback, TruChain\n'), ((2155, 2226), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasonse'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasonse, name='Answer Relevance')\n", (2163, 2226), False, 'from trulens_eval import Tru, Feedback, TruChain\n'), ((2280, 2354), 'trulens_eval.Feedback', 'Feedback', (['provider.qs_relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(provider.qs_relevance_with_cot_reasons, name='Context Relevance')\n", (2288, 2354), False, 'from trulens_eval import Tru, Feedback, TruChain\n'), ((2494, 2571), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (2502, 2571), False, 'from trulens_eval import Tru, Feedback, TruChain\n')] |
# import environment variables
from data.env_variables import AZURE_OPENAI_DEPLOYMENT_NAME, AZURE_OPENAI_MODEL_NAME, \
AZURE_OPENAI_API_ENDPOINT, OPENAI_API_VERSION, AZURE_OPENAI_API_KEY, \
HUGGINGFACE_API_TOKEN, LLAMA2_API_TOKEN, OPENAI_API_KEY, NVIDIANGC_API_KEY
from dotenv import load_dotenv
# import software general purpose libs
import os
import psutil
import logging as log
# import langchain debug mode
from langchain.globals import set_debug
# import langchain document loader
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders import Docx2txtLoader
# import message handlers
from streamlit_chat import message
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
# import embedding processing objects
from langchain.text_splitter import RecursiveCharacterTextSplitter
# import vector database
from langchain.vectorstores.chroma import Chroma
# import data retrieval chain
from langchain.chains import RetrievalQAWithSourcesChain
# import langchain models from huggingface
from langchain.llms.huggingface_hub import HuggingFaceHub
from langchain.llms.huggingface_pipeline import HuggingFacePipeline
from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint
# import langchain models
from langchain.llms.gpt4all import GPT4All
from langchain.chat_models import AzureChatOpenAI
from langchain.chat_models import ChatOpenAI
from langchain_nvidia_ai_endpoints import ChatNVIDIA
from data.vectorize import get_embeddings_model
# import hugging face transformers lib - only for quantized models
# import transformers
# from transformers import BitsAndBytesConfig, AutoTokenizer, AutoModelForCausalLM, AutoConfig, pipeline
# import streamlit web framework
import streamlit as st
# start debugging
set_debug(True)
# start logging
log.basicConfig(filename="logs/app.log", level=log.DEBUG)
N_THREADS = psutil.cpu_count()
def load_vector_database():
log.info("Initializing Vector DB")
sentence_transformer_ef = get_embeddings_model()
st.session_state.vectordb = Chroma(persist_directory="./documents_cache/qa_retrieval", embedding_function=sentence_transformer_ef)
def get_local_gpt4all_models():
local_models = {}
local_models["ggml-gpt4all-j-v1.3-groovy"] = "./model_cache/ggml-gpt4all-j-v1.3-groovy.bin"
local_models["mistral-7b-openorca.Q4_0"] = "./model_cache/mistral-7b-openorca.Q4_0.gguf"
# local_models["ggml-mpt-7b-instruct"] = "./model_cache/ggml-mpt-7b-instruct.bin"
# local_models["ggml-gpt4all-l13b-snoozy"] = "./model_cache/ggml-gpt4all-l13b-snoozy.bin"
# local_models["ggml-v3-13b-hermes-q5_1"] = "./model_cache/ggml-v3-13b-hermes-q5_1.bin"
# local_models["ggml-vicuna-13b-1.1-q4_2"] = "./model_cache/ggml-vicuna-13b-1.1-q4_2.bin"
return local_models
def get_llm_instance(model_interface: str):
if model_interface == "azure":
llm_instance = AzureChatOpenAI(
deployment_name=AZURE_OPENAI_DEPLOYMENT_NAME,
model_name=AZURE_OPENAI_MODEL_NAME,
azure_endpoint=AZURE_OPENAI_API_ENDPOINT,
openai_api_version=OPENAI_API_VERSION,
openai_api_key=AZURE_OPENAI_API_KEY,
openai_api_type="azure"
)
elif model_interface == "openai":
llm_instance = ChatOpenAI(
temperature=0.1,
openai_api_key=""
)
elif model_interface == "gpt4all":
local_models = get_local_gpt4all_models()
callbacks = [StreamingStdOutCallbackHandler()]
llm_instance = GPT4All(
# model=local_models["mistral-7b-openorca.Q4_0"],
model="model_cache/zephyr-7b-beta.Q3_K_S.gguf",
# allow_download=True,
callbacks=callbacks,
verbose=True,
# device="gpu",
device="nvidia",
# n_threads=16,
# n_threads=N_THREADS,
)
elif model_interface == "huggingface-falcon":
llm_instance = HuggingFaceHub(
verbose=True,
task="text-generation",
repo_id="tiiuae/falcon-40b-instruct"
)
elif model_interface == "huggingface-mistral-7b":
llm_instance = HuggingFacePipeline.from_model_id(
# model_id="mistralai/Mistral-7B-Instruct-v0.1",
model_id="Open-Orca/Mistral-7B-OpenOrca",
task="text-generation",
pipeline_kwargs={"max_new_tokens": 10},
device=0
)
elif model_interface == "huggingface-endpoint-zephyr-7b":
endpoint_url = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
headers = {"Authorization": "Bearer "}
llm_instance = HuggingFaceEndpoint(
endpoint_url=endpoint_url,
task="text-generation",
huggingfacehub_api_token=HUGGINGFACE_API_TOKEN
)
elif model_interface == "zephyr-7b-beta":
llm_instance = HuggingFacePipeline.from_model_id(
model_id="HuggingFaceH4/zephyr-7b-beta",
task="text-generation",
# pipeline_kwargs={"max_new_tokens": 10},
device=0
)
elif model_interface == "huggingface-api-llama2":
llm_instance = HuggingFacePipeline.from_model_id(
model_id="meta-llama/Llama-2-7b-chat-hf",
task="text-generation",
device="cuda",
pipeline_kwargs={
"token": LLAMA2_API_TOKEN
}
)
elif model_interface == "nvidia-mixtral":
callbacks = [StreamingStdOutCallbackHandler()]
llm_instance = ChatNVIDIA(
model="mixtral_8x7b",
nvidia_api_key=NVIDIANGC_API_KEY,
callbacks=callbacks,
temperature=0.2,
top_p=0.7,
max_tokens=1024,
seed=42
)
return llm_instance
def initialize_conversation_chain():
vectordb = st.session_state.vectordb
callbacks = [StreamingStdOutCallbackHandler()]
local_models = get_local_gpt4all_models()
retriever_instance = vectordb.as_retriever(search_kwargs={'k':4})
# llm_instance = get_llm_instance("huggingface-endpoint-zephyr-7b")
llm_instance = get_llm_instance("nvidia-mixtral")
# llm_instance = get_llm_instance("gpt4all")
log.info("Inicializando")
st.session_state.qa_chain = RetrievalQAWithSourcesChain.from_chain_type(
llm=llm_instance,
chain_type="stuff",
retriever=retriever_instance
)
def handle_user_input(user_question, response_container):
if user_question is None:
return
qa_chain:RetrievalQAWithSourcesChain = st.session_state.qa_chain
response_container.empty()
# Handle user Queries
with response_container.container():
with st.spinner("Gerando resposta..."):
log.info(f"Gerando resposta para consulta do cliente: {user_question}")
user_question += " (responda resumidamente em pt-br)"
response = qa_chain({"question":user_question}, return_only_outputs=True)
# st.write(response)
st.write(response["answer"])
with st.expander(label="Sources", expanded=False):
for source in response["sources"]:
st.write(source)
def process_new_uploads(uploaded_files):
vectordb:Chroma = st.session_state.vectordb
for doc in uploaded_files:
log.info(f"Processa arquivo: {doc.name}")
with open(os.path.join("tmp_documents", doc.name), "wb") as f:
f.write(doc.getbuffer())
extension = doc.name.split(".")[-1]
filepath = f"./tmp_documents/{doc.name}"
if extension == "pdf":
loader = PyPDFLoader(file_path=filepath)
elif extension == "docx" or extension == "doc":
loader = Docx2txtLoader(file_path=filepath)
text_splitter = RecursiveCharacterTextSplitter(chunk_size=640, chunk_overlap=128)
log.info("Particiona texto")
text_chunks = text_splitter.split_documents(loader.load())
# log.info("Chunks: %s", text_chunks)
log.info("Processa embeddings e adiciona documento ao Vector DB")
vectordb.add_documents(documents=text_chunks)
vectordb.persist()
os.remove(f"./tmp_documents/{doc.name}")
log.info(f"Arquivo processado com sucesso: {doc.name}")
def main():
load_dotenv()
st.set_page_config(page_title="Converse com seus documentos", page_icon=":books:")
st.header("Converse com seus documentos :books:")
if "vectordb" not in st.session_state:
with st.spinner("Inicializando Vector DB..."):
load_vector_database()
if "qa_chain" not in st.session_state:
with st.spinner("Inicializando AI Model..."):
initialize_conversation_chain()
user_question = st.text_input("Faça sua pergunta aqui")
response_container = st.empty()
if user_question:
handle_user_input(user_question, response_container)
user_question = None
with st.sidebar:
st.subheader("Seus documentos")
uploaded_files = st.file_uploader(
"Insira seu arquivo aqui (.pdf, .docx) e clique em 'Processar'",
accept_multiple_files=True
)
if st.button("Processar"):
with st.spinner("Processando..."):
process_new_uploads(uploaded_files)
if __name__ == "__main__":
main()
| [
"langchain_community.document_loaders.PyPDFLoader",
"langchain_community.document_loaders.Docx2txtLoader",
"langchain.llms.huggingface_pipeline.HuggingFacePipeline.from_model_id",
"langchain.vectorstores.chroma.Chroma",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.llms.gpt4all.GPT4All",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.RetrievalQAWithSourcesChain.from_chain_type",
"langchain.chat_models.AzureChatOpenAI",
"langchain.globals.set_debug",
"langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint",
"langchain.llms.huggingface_hub.HuggingFaceHub",
"langchain_nvidia_ai_endpoints.ChatNVIDIA"
] | [((1820, 1835), 'langchain.globals.set_debug', 'set_debug', (['(True)'], {}), '(True)\n', (1829, 1835), False, 'from langchain.globals import set_debug\n'), ((1853, 1910), 'logging.basicConfig', 'log.basicConfig', ([], {'filename': '"""logs/app.log"""', 'level': 'log.DEBUG'}), "(filename='logs/app.log', level=log.DEBUG)\n", (1868, 1910), True, 'import logging as log\n'), ((1925, 1943), 'psutil.cpu_count', 'psutil.cpu_count', ([], {}), '()\n', (1941, 1943), False, 'import psutil\n'), ((1978, 2012), 'logging.info', 'log.info', (['"""Initializing Vector DB"""'], {}), "('Initializing Vector DB')\n", (1986, 2012), True, 'import logging as log\n'), ((2043, 2065), 'data.vectorize.get_embeddings_model', 'get_embeddings_model', ([], {}), '()\n', (2063, 2065), False, 'from data.vectorize import get_embeddings_model\n'), ((2103, 2209), 'langchain.vectorstores.chroma.Chroma', 'Chroma', ([], {'persist_directory': '"""./documents_cache/qa_retrieval"""', 'embedding_function': 'sentence_transformer_ef'}), "(persist_directory='./documents_cache/qa_retrieval',\n embedding_function=sentence_transformer_ef)\n", (2109, 2209), False, 'from langchain.vectorstores.chroma import Chroma\n'), ((6313, 6338), 'logging.info', 'log.info', (['"""Inicializando"""'], {}), "('Inicializando')\n", (6321, 6338), True, 'import logging as log\n'), ((6371, 6487), 'langchain.chains.RetrievalQAWithSourcesChain.from_chain_type', 'RetrievalQAWithSourcesChain.from_chain_type', ([], {'llm': 'llm_instance', 'chain_type': '"""stuff"""', 'retriever': 'retriever_instance'}), "(llm=llm_instance, chain_type=\n 'stuff', retriever=retriever_instance)\n", (6414, 6487), False, 'from langchain.chains import RetrievalQAWithSourcesChain\n'), ((8476, 8489), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (8487, 8489), False, 'from dotenv import load_dotenv\n'), ((8494, 8581), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Converse com seus documentos"""', 'page_icon': '""":books:"""'}), "(page_title='Converse com seus documentos', page_icon=\n ':books:')\n", (8512, 8581), True, 'import streamlit as st\n'), ((8586, 8635), 'streamlit.header', 'st.header', (['"""Converse com seus documentos :books:"""'], {}), "('Converse com seus documentos :books:')\n", (8595, 8635), True, 'import streamlit as st\n'), ((8945, 8984), 'streamlit.text_input', 'st.text_input', (['"""Faça sua pergunta aqui"""'], {}), "('Faça sua pergunta aqui')\n", (8958, 8984), True, 'import streamlit as st\n'), ((9015, 9025), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (9023, 9025), True, 'import streamlit as st\n'), ((2949, 3203), 'langchain.chat_models.AzureChatOpenAI', 'AzureChatOpenAI', ([], {'deployment_name': 'AZURE_OPENAI_DEPLOYMENT_NAME', 'model_name': 'AZURE_OPENAI_MODEL_NAME', 'azure_endpoint': 'AZURE_OPENAI_API_ENDPOINT', 'openai_api_version': 'OPENAI_API_VERSION', 'openai_api_key': 'AZURE_OPENAI_API_KEY', 'openai_api_type': '"""azure"""'}), "(deployment_name=AZURE_OPENAI_DEPLOYMENT_NAME, model_name=\n AZURE_OPENAI_MODEL_NAME, azure_endpoint=AZURE_OPENAI_API_ENDPOINT,\n openai_api_version=OPENAI_API_VERSION, openai_api_key=\n AZURE_OPENAI_API_KEY, openai_api_type='azure')\n", (2964, 3203), False, 'from langchain.chat_models import AzureChatOpenAI\n'), ((5977, 6009), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (6007, 6009), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((7460, 7501), 'logging.info', 'log.info', (['f"""Processa arquivo: {doc.name}"""'], {}), "(f'Processa arquivo: {doc.name}')\n", (7468, 7501), True, 'import logging as log\n'), ((7951, 8016), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(640)', 'chunk_overlap': '(128)'}), '(chunk_size=640, chunk_overlap=128)\n', (7981, 8016), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((8034, 8062), 'logging.info', 'log.info', (['"""Particiona texto"""'], {}), "('Particiona texto')\n", (8042, 8062), True, 'import logging as log\n'), ((8193, 8258), 'logging.info', 'log.info', (['"""Processa embeddings e adiciona documento ao Vector DB"""'], {}), "('Processa embeddings e adiciona documento ao Vector DB')\n", (8201, 8258), True, 'import logging as log\n'), ((8349, 8389), 'os.remove', 'os.remove', (['f"""./tmp_documents/{doc.name}"""'], {}), "(f'./tmp_documents/{doc.name}')\n", (8358, 8389), False, 'import os\n'), ((8398, 8453), 'logging.info', 'log.info', (['f"""Arquivo processado com sucesso: {doc.name}"""'], {}), "(f'Arquivo processado com sucesso: {doc.name}')\n", (8406, 8453), True, 'import logging as log\n'), ((9173, 9204), 'streamlit.subheader', 'st.subheader', (['"""Seus documentos"""'], {}), "('Seus documentos')\n", (9185, 9204), True, 'import streamlit as st\n'), ((9230, 9348), 'streamlit.file_uploader', 'st.file_uploader', (['"""Insira seu arquivo aqui (.pdf, .docx) e clique em \'Processar\'"""'], {'accept_multiple_files': '(True)'}), '(\n "Insira seu arquivo aqui (.pdf, .docx) e clique em \'Processar\'",\n accept_multiple_files=True)\n', (9246, 9348), True, 'import streamlit as st\n'), ((9385, 9407), 'streamlit.button', 'st.button', (['"""Processar"""'], {}), "('Processar')\n", (9394, 9407), True, 'import streamlit as st\n'), ((3333, 3379), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.1)', 'openai_api_key': '""""""'}), "(temperature=0.1, openai_api_key='')\n", (3343, 3379), False, 'from langchain.chat_models import ChatOpenAI\n'), ((6808, 6841), 'streamlit.spinner', 'st.spinner', (['"""Gerando resposta..."""'], {}), "('Gerando resposta...')\n", (6818, 6841), True, 'import streamlit as st\n'), ((6855, 6926), 'logging.info', 'log.info', (['f"""Gerando resposta para consulta do cliente: {user_question}"""'], {}), "(f'Gerando resposta para consulta do cliente: {user_question}')\n", (6863, 6926), True, 'import logging as log\n'), ((7137, 7165), 'streamlit.write', 'st.write', (["response['answer']"], {}), "(response['answer'])\n", (7145, 7165), True, 'import streamlit as st\n'), ((7774, 7805), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', ([], {'file_path': 'filepath'}), '(file_path=filepath)\n', (7785, 7805), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((8697, 8737), 'streamlit.spinner', 'st.spinner', (['"""Inicializando Vector DB..."""'], {}), "('Inicializando Vector DB...')\n", (8707, 8737), True, 'import streamlit as st\n'), ((8835, 8874), 'streamlit.spinner', 'st.spinner', (['"""Inicializando AI Model..."""'], {}), "('Inicializando AI Model...')\n", (8845, 8874), True, 'import streamlit as st\n'), ((3582, 3693), 'langchain.llms.gpt4all.GPT4All', 'GPT4All', ([], {'model': '"""model_cache/zephyr-7b-beta.Q3_K_S.gguf"""', 'callbacks': 'callbacks', 'verbose': '(True)', 'device': '"""nvidia"""'}), "(model='model_cache/zephyr-7b-beta.Q3_K_S.gguf', callbacks=callbacks,\n verbose=True, device='nvidia')\n", (3589, 3693), False, 'from langchain.llms.gpt4all import GPT4All\n'), ((7196, 7240), 'streamlit.expander', 'st.expander', ([], {'label': '"""Sources"""', 'expanded': '(False)'}), "(label='Sources', expanded=False)\n", (7207, 7240), True, 'import streamlit as st\n'), ((7529, 7568), 'os.path.join', 'os.path.join', (['"""tmp_documents"""', 'doc.name'], {}), "('tmp_documents', doc.name)\n", (7541, 7568), False, 'import os\n'), ((7883, 7917), 'langchain_community.document_loaders.Docx2txtLoader', 'Docx2txtLoader', ([], {'file_path': 'filepath'}), '(file_path=filepath)\n', (7897, 7917), False, 'from langchain_community.document_loaders import Docx2txtLoader\n'), ((9426, 9454), 'streamlit.spinner', 'st.spinner', (['"""Processando..."""'], {}), "('Processando...')\n", (9436, 9454), True, 'import streamlit as st\n'), ((3524, 3556), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (3554, 3556), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n'), ((4010, 4105), 'langchain.llms.huggingface_hub.HuggingFaceHub', 'HuggingFaceHub', ([], {'verbose': '(True)', 'task': '"""text-generation"""', 'repo_id': '"""tiiuae/falcon-40b-instruct"""'}), "(verbose=True, task='text-generation', repo_id=\n 'tiiuae/falcon-40b-instruct')\n", (4024, 4105), False, 'from langchain.llms.huggingface_hub import HuggingFaceHub\n'), ((7313, 7329), 'streamlit.write', 'st.write', (['source'], {}), '(source)\n', (7321, 7329), True, 'import streamlit as st\n'), ((4224, 4377), 'langchain.llms.huggingface_pipeline.HuggingFacePipeline.from_model_id', 'HuggingFacePipeline.from_model_id', ([], {'model_id': '"""Open-Orca/Mistral-7B-OpenOrca"""', 'task': '"""text-generation"""', 'pipeline_kwargs': "{'max_new_tokens': 10}", 'device': '(0)'}), "(model_id='Open-Orca/Mistral-7B-OpenOrca',\n task='text-generation', pipeline_kwargs={'max_new_tokens': 10}, device=0)\n", (4257, 4377), False, 'from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n'), ((4723, 4845), 'langchain_community.llms.huggingface_endpoint.HuggingFaceEndpoint', 'HuggingFaceEndpoint', ([], {'endpoint_url': 'endpoint_url', 'task': '"""text-generation"""', 'huggingfacehub_api_token': 'HUGGINGFACE_API_TOKEN'}), "(endpoint_url=endpoint_url, task='text-generation',\n huggingfacehub_api_token=HUGGINGFACE_API_TOKEN)\n", (4742, 4845), False, 'from langchain_community.llms.huggingface_endpoint import HuggingFaceEndpoint\n'), ((4957, 5069), 'langchain.llms.huggingface_pipeline.HuggingFacePipeline.from_model_id', 'HuggingFacePipeline.from_model_id', ([], {'model_id': '"""HuggingFaceH4/zephyr-7b-beta"""', 'task': '"""text-generation"""', 'device': '(0)'}), "(model_id='HuggingFaceH4/zephyr-7b-beta',\n task='text-generation', device=0)\n", (4990, 5069), False, 'from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n'), ((5243, 5410), 'langchain.llms.huggingface_pipeline.HuggingFacePipeline.from_model_id', 'HuggingFacePipeline.from_model_id', ([], {'model_id': '"""meta-llama/Llama-2-7b-chat-hf"""', 'task': '"""text-generation"""', 'device': '"""cuda"""', 'pipeline_kwargs': "{'token': LLAMA2_API_TOKEN}"}), "(model_id='meta-llama/Llama-2-7b-chat-hf',\n task='text-generation', device='cuda', pipeline_kwargs={'token':\n LLAMA2_API_TOKEN})\n", (5276, 5410), False, 'from langchain.llms.huggingface_pipeline import HuggingFacePipeline\n'), ((5615, 5760), 'langchain_nvidia_ai_endpoints.ChatNVIDIA', 'ChatNVIDIA', ([], {'model': '"""mixtral_8x7b"""', 'nvidia_api_key': 'NVIDIANGC_API_KEY', 'callbacks': 'callbacks', 'temperature': '(0.2)', 'top_p': '(0.7)', 'max_tokens': '(1024)', 'seed': '(42)'}), "(model='mixtral_8x7b', nvidia_api_key=NVIDIANGC_API_KEY,\n callbacks=callbacks, temperature=0.2, top_p=0.7, max_tokens=1024, seed=42)\n", (5625, 5760), False, 'from langchain_nvidia_ai_endpoints import ChatNVIDIA\n'), ((5558, 5590), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (5588, 5590), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')] |
import logging
import re
from typing import Any, List, Optional
import langchain
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from init_openai import init_openai
logger = logging.getLogger("SoCloverAI")
init_openai()
model_name = "gpt-4-1106-preview"
def set_trial(trial: int) -> None:
langchain.llm_cache.inner_cache.set_trial(trial)
def dump_cache_stats_since_last_call() -> None:
logger.info(langchain.llm_cache.get_cache_stats_summary())
langchain.llm_cache.clear_cache_stats()
def create_llm_model(temperature: float, model_name: str) -> ChatOpenAI:
# mypy seems confused about the model_name parameter:
# Unexpected keyword argument "model_name" for "ChatOpenAI"
result = ChatOpenAI(temperature=temperature, model_name=model_name) # type: ignore
return result
async def predict(temperature: float, template: str, **kwargs: Any) -> List[str]:
prompt = PromptTemplate(
template=template.strip(), input_variables=["word0", "word1"]
)
llm = create_llm_model(temperature, model_name)
chain = LLMChain(llm=llm, prompt=prompt, verbose=False)
output = await chain.apredict(**kwargs)
logger.debug(output)
predictions = parse_candidates(output)
best = parse_best(output)
if best:
predictions = [best] + predictions
strip_chars = ' \t"'
predictions = [prediction.strip(strip_chars) for prediction in predictions]
predictions = [prediction for prediction in predictions if prediction]
# remove duplicates while preserving order
seen = set()
unique_predictions = list()
for prediction in predictions:
if prediction not in seen:
unique_predictions.append(prediction)
seen.add(prediction)
predictions = unique_predictions
return predictions
def parse_candidates(output: str) -> List[str]:
result = []
for line in output.splitlines():
if not line.startswith("Candidates:"):
continue
candidates_str = line[len("Candidates: ") :]
candidates = candidates_str.split(",")
candidates = [candidate.strip() for candidate in candidates]
result += candidates
return result
pattern = re.compile(r"Best: (.*)")
def parse_best(output: str) -> Optional[str]:
match = pattern.search(output)
if match:
return match.group(1)
split_output = output.split()
if len(split_output) == 1:
logger.info(f"Invalid output format: {output}")
return split_output[0]
logger.info(f"Invalid output: {output}")
return None
| [
"langchain.llm_cache.get_cache_stats_summary",
"langchain_openai.ChatOpenAI",
"langchain.llm_cache.inner_cache.set_trial",
"langchain.llm_cache.clear_cache_stats",
"langchain.chains.LLMChain"
] | [((252, 283), 'logging.getLogger', 'logging.getLogger', (['"""SoCloverAI"""'], {}), "('SoCloverAI')\n", (269, 283), False, 'import logging\n'), ((284, 297), 'init_openai.init_openai', 'init_openai', ([], {}), '()\n', (295, 297), False, 'from init_openai import init_openai\n'), ((2273, 2297), 're.compile', 're.compile', (['"""Best: (.*)"""'], {}), "('Best: (.*)')\n", (2283, 2297), False, 'import re\n'), ((373, 421), 'langchain.llm_cache.inner_cache.set_trial', 'langchain.llm_cache.inner_cache.set_trial', (['trial'], {}), '(trial)\n', (414, 421), False, 'import langchain\n'), ((539, 578), 'langchain.llm_cache.clear_cache_stats', 'langchain.llm_cache.clear_cache_stats', ([], {}), '()\n', (576, 578), False, 'import langchain\n'), ((791, 849), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temperature', 'model_name': 'model_name'}), '(temperature=temperature, model_name=model_name)\n', (801, 849), False, 'from langchain_openai import ChatOpenAI\n'), ((1137, 1184), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'verbose': '(False)'}), '(llm=llm, prompt=prompt, verbose=False)\n', (1145, 1184), False, 'from langchain.chains import LLMChain\n'), ((488, 533), 'langchain.llm_cache.get_cache_stats_summary', 'langchain.llm_cache.get_cache_stats_summary', ([], {}), '()\n', (531, 533), False, 'import langchain\n')] |
import asyncio
import os
import json
import tiktoken
from transcribe import file_to_json_path, get_recordings, get_all_recordings, print_json
import langchain
from langchain.llms import OpenAI
from langchain.cache import SQLiteCache
from langchain.chat_models import ChatOpenAI
from langchain import PromptTemplate
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.schema import (
HumanMessage,
)
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from transformers import AutoTokenizer
# MAX_TRANSCRIPT_LENGTH = 1536
MAX_TRANSCRIPT_LENGTH = 1800
ANSWER_START_LENGTH = 50
NAMESPACE = 'Startup Interviews'
database_path = "data/.langchain.db"
langchain.llm_cache = SQLiteCache(database_path)
training_tokenizer_name = "huggyllama/llama-13b"
trainer_tokenizer = AutoTokenizer.from_pretrained(training_tokenizer_name)
async def main():
data = get_recordings(f"data/{NAMESPACE}")
# print(json.dumps(data, indent=4))
all_recordings = get_all_recordings(data)
# print_json(all_recordings)
# print_json(len(all_recordings))
# limit to only 2 recordings
# all_recordings = all_recordings[:10]
chat_items = []
for i, recording in enumerate(all_recordings):
# print(f"{i}: {recording['filePath']}")
# print(f"{i + 1} of {len(all_recordings)}: {recording['title']}")
json_file_path = file_to_json_path(recording['filePath'])
if not os.path.exists(json_file_path):
print(f"\tJSON file does not exist at {json_file_path}")
continue
with open(json_file_path, 'r') as json_file:
json_data = json.load(json_file)
# print(json.dumps(json_data, indent=4))
"""
"results": {
"channels": [
{
"alternatives": [
{
"transcript": "...",
"words": [
{
"word": "i",
"start": 0.0,
"end": 0.16,
"confidence": 0.99353653,
"speaker": 0,
"speaker_confidence": 0.8430252,
"punctuated_word": "I"
},
]
"""
transcript = json_data['results']['channels'][0]['alternatives'][0]
transcript_text = transcript['transcript']
words = transcript['words']
# print(len(words), len(transcript_text.split()))
# count unique speakers
num_speakers = get_num_speakers(words)
# print(len(speakers))
# print(num_speakers)
# if num_speakers > 5:
if num_speakers != 1:
continue
if token_length(transcript_text) > MAX_TRANSCRIPT_LENGTH:
print(f"\tSkipping \"{recording['title']}\" because it's too long: {token_length(transcript_text)}")
continue
# chat_item = {
# 'title': recording['title'],
# 'speakers': num_speakers,
# 'text': transcript_text,
# }
# duplicate recording
chat_item = recording.copy()
# merge in an object with the transcript text
chat_item.update({
'speakers': num_speakers,
'text': transcript_text,
})
chat_items.append(chat_item)
# limit to only 2 chat items
# chat_items = chat_items[:100]
# return
# add start_text and question to each chat item
print(f"Generating {len(chat_items)} questions")
count = len(chat_items)
for i, chat_item in enumerate(chat_items):
curr = i + 1
# print(f"{i+1} of {len(chat_items)} ({(perc)}) Generating question for {chat_item['title']}")
# print(f"{curr} of {count} ({round(curr/count*100, 2)}%) Generating question for {chat_item['title']}")
perc = round(curr/count*100, 2)
print(f"{curr} of {count} ({perc}%): Generating question for {chat_item['title']}")
start_text = get_start_text(chat_item['text'])
question = get_question(chat_item['title'], start_text)
print(f"\tQ: {question}")
chat_item.update({
'start_text': start_text,
'question': question,
})
# print_json(chat_items)
print_json(len(chat_items))
write_jsonl(chat_items, "train")
def get_num_speakers(words):
speakers = set()
for word in words:
speakers.add(word['speaker'])
num_speakers = len(speakers)
return num_speakers
enc = tiktoken.get_encoding("cl100k_base")
def get_tokens(contents):
return enc.encode(contents)
# return tokenizer(contents)['input_ids']
def decode_tokens(tokens):
return enc.decode(tokens)
# return tokenizer.decode(tokens)
def get_start_text(contents):
tokens = get_tokens(contents)
# if longer than ANSWER_START_LENGTH tokens, truncate and add ...
if len(tokens) > ANSWER_START_LENGTH:
return decode_tokens(tokens[:ANSWER_START_LENGTH]) + '...'
else:
return decode_tokens(tokens)
def token_length(contents):
return len(get_tokens(contents))
def token_length_for_trainer(contents):
return len(trainer_tokenizer(contents)['input_ids'])
def get_question(title, reply):
template="You are a helpful, truthful, detailed assistant writing a transcript of an interview."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="""Task: Write the question which is most likely to produce the following reply.
Interview Title: {title}
Reply: {reply}
Question:"""
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
chat = ChatOpenAI(streaming=False, temperature=0)
resp = chat(chat_prompt.format_prompt(title=title, reply=reply).to_messages())
return resp.content
# Write chat to .json in format:
# [{ "instruction": "...", "input": "...", "output": "..." }, ...]
def write_jsonl(chat_items, name = 'chat'):
chat_file_path = f"data/{NAMESPACE}/{name}.jsonl"
# create rows
print(f"Creating rows: {len(chat_items)}")
rows = []
for chat_item in chat_items:
row = {
"instruction": chat_item['question'],
"input": "",
"output": chat_item['text'],
"instruction_length": token_length_for_trainer(chat_item['question']),
"output_length": token_length_for_trainer(chat_item['text']),
"title": chat_item['title'],
"start": chat_item['start_text'],
}
rows.append(row)
# write rows to file
with open(chat_file_path, 'w') as chat_file:
# for chat_item in chat_items:
# # start_text = get_start_text(chat_item['text'])
# # question = get_question(chat_item['title'], start_text)
# row = {
# # "instruction": question,
# "instruction": chat_item['question'],
# "input": "",
# "output": chat_item['text'],
# "len": token_length(chat_item['text']),
# "title": chat_item['title'],
# # "start": start_text,
# "start": chat_item['start_text'],
# }
for row in rows:
chat_file.write(json.dumps(row, ensure_ascii=False) + '\n')
print(f"Wrote {len(chat_items)} chat items to {chat_file_path}")
max_instruction_len = max([row['instruction_length'] for row in rows])
max_output_len = max([row['output_length'] for row in rows])
print(f"Max instruction length: {max_instruction_len}")
print(f"Max output length: {max_output_len}")
if __name__ == "__main__":
asyncio.run(main())
| [
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.SQLiteCache",
"langchain.prompts.chat.HumanMessagePromptTemplate.from_template",
"langchain.prompts.chat.ChatPromptTemplate.from_messages"
] | [((822, 848), 'langchain.cache.SQLiteCache', 'SQLiteCache', (['database_path'], {}), '(database_path)\n', (833, 848), False, 'from langchain.cache import SQLiteCache\n'), ((919, 973), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['training_tokenizer_name'], {}), '(training_tokenizer_name)\n', (948, 973), False, 'from transformers import AutoTokenizer\n'), ((4987, 5023), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""cl100k_base"""'], {}), "('cl100k_base')\n", (5008, 5023), False, 'import tiktoken\n'), ((1004, 1039), 'transcribe.get_recordings', 'get_recordings', (['f"""data/{NAMESPACE}"""'], {}), "(f'data/{NAMESPACE}')\n", (1018, 1039), False, 'from transcribe import file_to_json_path, get_recordings, get_all_recordings, print_json\n'), ((1101, 1125), 'transcribe.get_all_recordings', 'get_all_recordings', (['data'], {}), '(data)\n', (1119, 1125), False, 'from transcribe import file_to_json_path, get_recordings, get_all_recordings, print_json\n'), ((5841, 5892), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (5882, 5892), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((6076, 6132), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (6116, 6132), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((6151, 6230), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (6183, 6230), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((6243, 6285), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(False)', 'temperature': '(0)'}), '(streaming=False, temperature=0)\n', (6253, 6285), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1497, 1537), 'transcribe.file_to_json_path', 'file_to_json_path', (["recording['filePath']"], {}), "(recording['filePath'])\n", (1514, 1537), False, 'from transcribe import file_to_json_path, get_recordings, get_all_recordings, print_json\n'), ((1553, 1583), 'os.path.exists', 'os.path.exists', (['json_file_path'], {}), '(json_file_path)\n', (1567, 1583), False, 'import os\n'), ((1753, 1773), 'json.load', 'json.load', (['json_file'], {}), '(json_file)\n', (1762, 1773), False, 'import json\n'), ((7833, 7868), 'json.dumps', 'json.dumps', (['row'], {'ensure_ascii': '(False)'}), '(row, ensure_ascii=False)\n', (7843, 7868), False, 'import json\n')] |
import json
import streamlit as st
import streamlit_ext as ste
import os
import time
import gc
import pandas as pd
from dotenv import load_dotenv
from langchain.chains import LLMChain # import LangChain libraries
from langchain.llms import OpenAI # import OpenAI model
from langchain.chat_models import ChatOpenAI # import OpenAI chat model
from langchain.callbacks import get_openai_callback # import OpenAI callbacks
from langchain.prompts import PromptTemplate # import PromptTemplate
from langchain.llms import HuggingFacePipeline # import HuggingFacePipeline
import torch # import torch
# pip install git+https://github.com/huggingface/transformers
from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList
def main():
load_dotenv(".env")
pipe = None
open_ai_key = None
uploaded_file = None
# import css tasks and prompts
with open('prompts.json') as f:
promptlib = json.load(f)
hide_default_format = """
<style>
#MainMenu {visibility: hidden; }
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_default_format, unsafe_allow_html=True)
# title
st.title("Prompt Compass")
st.subheader(
"A Tool for Navigating LLMs and Prompts for Computational Social Science and Digital Humanities Research")
# Add Link to your repo
st.markdown(
'''
[![Repo](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/ErikBorra/PromptCompass)
[![DOI](https://zenodo.org/badge/649855474.svg)](https://zenodo.org/badge/latestdoi/649855474)
''', unsafe_allow_html=True)
# load available models
model_with_names = [
model for model in promptlib['models'] if model['name']]
# create input area for model selection
input_values = {}
input_values['model'] = st.selectbox('Select a model', model_with_names,
format_func=lambda x: x['name'])
# If there is no previous state, set the default model as the first model
if not st.session_state.get('previous_model'):
st.session_state['previous_model'] = model_with_names[0]['name']
st.caption(f"Model info: [{input_values['model']['name']}]({input_values['model']['resource']})" + (
f". {input_values['model']['comment']}" if 'comment' in input_values['model'] else ""))
# ask for open ai key if no key is set in .env
if input_values['model']['resource'] in ["https://platform.openai.com/docs/models/gpt-3-5", "https://platform.openai.com/docs/models/gpt-4"]:
# Load the OpenAI API key from the environment variable
if os.getenv("OPENAI_API_KEY") is None or os.getenv("OPENAI_API_KEY") == "":
open_ai_key = st.text_input("Open AI API Key", "")
else:
open_ai_key = os.getenv("OPENAI_API_KEY")
# set default values
do_sample = False
temperature = 0.001
top_p = -1
max_new_tokens = -1
with st.expander("Advanced settings"):
if input_values['model']['resource'] not in ["https://platform.openai.com/docs/models/gpt-3-5", "https://platform.openai.com/docs/models/gpt-4"]:
st.markdown(
"""
**Set Maximum Length**: Determines the maximum number of tokens of the **generated** text. A token is approximately four characters word, although this depends on the model.
A value of -1 means the parameter will not be specified.
"""
)
max_new_tokens = st.number_input(
'Maximum Length', value=256, min_value=-1, step=1)
st.markdown(
"""
**Set do_sample**: This controls how the model generates text. If do_sample=True, the model will use a probabilistic approach to generate text, where the likelihood of each word being chosen depends on its predicted probability. Use the below parameters to further control its behavior. If do_sample=False, the model will use a deterministic approach and always choose the most likely next word.
"""
)
do_sample = st.radio(
'Set do_sample',
('False', 'True')
)
st.markdown(
"""
**Temperature**: Controls the randomness in the model's responses.
Lower values (closer to 0.0) make the output more deterministic, while higher values (closer to 2.0) make it more diverse.
A value of -1 means the parameter will not be specified.
"""
)
temperature = st.number_input(
'Set Temperature', min_value=-1.0, max_value=2.0, value=0.001, format="%.3f")
st.markdown(
"""
**Top P**: Also known as "nucleus sampling", is an alternative to temperature that can also be used to control the randomness of the model's responses.
It essentially trims the less likely options in the model's distribution of possible responses. Possible values lie between 0.0 and 1.0.
A value of -1 means the parameter will not be specified. Only applies if do_sample=True.
"""
)
top_p = st.number_input('Set Top-P', min_value=-
1.0, max_value=1.0, value=-1.0)
# Check for correct values
allgood = True
# set model kwargs
model_kwargs = {}
if input_values['model']['resource'] not in ["https://platform.openai.com/docs/models/gpt-3-5", "https://platform.openai.com/docs/models/gpt-4"]:
# check if max_new_tokens is at least 1 or -1
if not (max_new_tokens > 0 or max_new_tokens == -1):
st.error(
'Error: Max Tokens must be at least 1. Choose -1 if you want to use the default model value.')
max_new_tokens = -1
allgood = False
if max_new_tokens > 0:
model_kwargs['max_new_tokens'] = max_new_tokens
if do_sample not in ['True', 'False']:
st.error(
'Error: do_Sample must be True or False')
do_sample = False
allgood = False
do_sample = True if do_sample == 'True' else False
if do_sample in [True, False]:
model_kwargs['do_sample'] = do_sample
if not (0 <= temperature <= 2 or temperature == -1):
st.error(
"Temperature value must be between 0 and 2. Choose -1 if you want to use the default model value.")
temperature = -1
allgood = False
if 0 <= temperature <= 2:
model_kwargs['temperature'] = temperature
if not (0 <= top_p <= 1 or top_p == -1):
st.error(
"Top P value must be between 0 and 1. Choose -1 if you want to use the default model value.")
top_p = -1
allgood = False
if 0 <= top_p <= 1:
model_kwargs['top_p'] = top_p
# create input area for task selection
tasks_with_names = [task for task in promptlib['tasks'] if task['name']]
task = st.selectbox('Select a task', tasks_with_names,
format_func=lambda x: x['name'] + " - " + x['authors'])
# Create input areas for prompts and user input
if task:
# concatenate all strings from prompt array
prompt = '\n'.join(task['prompt'])
# create input area for prompt
input_values['prompt'] = st.text_area(
"Inspect, and possibly modify, the prompt by ["+task['authors']+"]("+task['paper']+")", prompt, height=200)
# allow the user to select the input type
input_type = st.radio("Choose input type:",
('Text input', 'Upload a CSV'), horizontal=True)
if input_type == 'Text input':
# create input area for user input
input_values['user'] = st.text_area(
"Input to be analyzed with the prompt (one thing per line):",
"this user is happy\none user is just a user\nthe other user is a lier")
# if the user's input is not a list (e.g. a string), then split it by newlines
if isinstance(input_values['user'], str):
input_values['user'] = input_values['user'].split('\n')
original_data = pd.DataFrame(
input_values['user'], columns=['user_input'])
else:
# upload CSV
uploaded_file = st.file_uploader("Choose a CSV file", type="csv")
if uploaded_file is not None:
# convert the uploaded file to a dataframe
original_data = pd.read_csv(uploaded_file)
# ask user to select a column
column_to_extract = st.selectbox(
'Choose a column to apply the prompt on:', original_data.columns)
# process the selected column from the dataframe
input_values['user'] = original_data[column_to_extract].tolist()
data = pd.DataFrame()
# Determine the output file name
filename = uploaded_file.name if uploaded_file else 'output.csv'
base_filename, file_extension = os.path.splitext(filename)
output_filename = f"{base_filename}_promptcompass{file_extension}"
repeat_input = st.number_input(
'Enter the number of times the prompt/input combination should be repeated:', min_value=1, max_value=10, value=1, step=1)
# Submit button
submit_button = st.button('Submit')
st.write('---') # Add a horizontal line
# Process form submission
if submit_button and allgood:
if 'user' not in input_values or input_values['user'] is None:
st.error("No user input provided")
else:
with st.spinner(text="In progress..."):
try:
start_time = time.time()
st.write("Start time: " +
time.strftime("%H:%M:%S", time.localtime()))
if input_values['prompt'] and input_values['user']:
# create prompt template
# add location of user input to prompt
if task['location_of_input'] == 'before':
template = "{user_input}" + \
"\n\n" + input_values['prompt']
elif task['location_of_input'] == 'after':
template = input_values['prompt'] + \
"\n\n" + "{user_input}"
else:
template = input_values['prompt']
# make sure users don't forget the user input variable
if "{user_input}" not in template:
template = template + "\n\n{user_input}"
# fill prompt template
prompt_template = PromptTemplate(
input_variables=["user_input"], template=template)
# loop over user values in prompt
for key, user_input in enumerate(input_values['user']):
for i in range(repeat_input):
num_prompt_tokens = None
num_completion_tokens = None
cost = None
user_input = str(user_input).strip()
if user_input == "" or user_input == "nan":
continue
# set up and run the model
model_id = input_values['model']['name']
if model_id in ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4-0125-preview', 'gpt-4-1106-preview', 'gpt-4', 'gpt-4-32k', 'gpt-3.5-turbo-instruct', 'babbage-002', 'davinci-002']:
if open_ai_key is None or open_ai_key == "":
st.error(
"Please provide an Open AI API Key")
exit(1)
with get_openai_callback() as cb:
if model_id in ['gpt-3.5-turbo-0125', 'gpt-3.5-turbo', 'gpt-3.5-turbo-16k', 'gpt-4-0125-preview', 'gpt-4-1106-preview', 'gpt-4', 'gpt-4-32k']:
llm = ChatOpenAI(
model=model_id, openai_api_key=open_ai_key, **model_kwargs)
else:
llm = OpenAI(
model=model_id, openai_api_key=open_ai_key, **model_kwargs)
llm_chain = LLMChain(
llm=llm, prompt=prompt_template)
output = llm_chain.run(user_input)
st.success("Input: " + user_input + " \n\n " +
"Output: " + output)
st.text(cb)
num_prompt_tokens = cb.prompt_tokens
num_completion_tokens = cb.completion_tokens
cost = cb.total_cost
elif model_id in ['meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-13b-chat-hf']:
if pipe == None:
with st.status('Loading model %s' % model_id) as status:
# to use the llama-2 models,
# you first need to get access to the llama-2 models via e.g. https://huggingface.co/meta-llama/Llama-2-7b-chat-hf
# once accepted, get a hugging face auth token https://huggingface.co/settings/tokens
# and then run `huggingface-cli login` on the command line, filling in the generated token
if model_id in ['meta-llama/Llama-2-7b-chat-hf', 'meta-llama/Llama-2-13b-chat-hf']:
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_auth_token=True)
else:
tokenizer = AutoTokenizer.from_pretrained(
model_id)
if model_id == "meta-llama/Llama-2-13b-chat-hf":
pipe = pipeline(
"text-generation",
model=model_id,
tokenizer=tokenizer,
# torch_dtype="auto",
trust_remote_code=True,
device_map="auto",
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
**model_kwargs
)
else:
pipe = pipeline(
"text-generation",
model=model_id,
tokenizer=tokenizer,
torch_dtype="auto",
trust_remote_code=True,
device_map="auto",
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
**model_kwargs
)
local_llm = HuggingFacePipeline(
pipeline=pipe)
status.update(
label='Model %s loaded' % model_id, state="complete")
llm_chain = LLMChain(
llm=local_llm, prompt=prompt_template)
output = llm_chain.run(user_input)
st.success("Input: " + user_input + " \n\n " +
"Output: " + output)
elif model_id in ['google/flan-t5-large', 'google/flan-t5-xl', 'tiiuae/falcon-7b-instruct', 'tiiuae/falcon-40b-instruct', 'databricks/dolly-v2-3b', 'databricks/dolly-v2-7b']:
if pipe is None:
with st.status('Loading model %s' % model_id) as status:
tokenizer = AutoTokenizer.from_pretrained(
model_id)
if model_id in ['google/flan-t5-large', 'google/flan-t5-xl']:
model = AutoModelForSeq2SeqLM.from_pretrained(
model_id, load_in_8bit=False, device_map='auto')
pipe = pipeline(
"text2text-generation",
model=model_id,
tokenizer=tokenizer,
torch_dtype="auto",
trust_remote_code=True,
device_map="auto",
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
**model_kwargs
)
# elif model_id in ['tiiuae/falcon-7b-instruct', 'tiiuae/falcon-40b-instruct']:
else:
pipe = pipeline(
"text-generation",
model=model_id,
tokenizer=tokenizer,
torch_dtype="auto",
trust_remote_code=True,
device_map="auto",
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
**model_kwargs
)
local_llm = HuggingFacePipeline(
pipeline=pipe)
status.update(
label='Model %s loaded' % model_id, state="complete")
llm_chain = LLMChain(
llm=local_llm, prompt=prompt_template)
output = llm_chain.run(user_input)
st.success("Input: " + user_input + " \n\n " +
"Output: " + output)
elif model_id == "mosaicml/mpt-7b-instruct":
if pipe is None:
with st.status('Loading model %s' % model_id) as status:
model = AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
max_seq_len=2048,
device_map="auto"
)
# MPT-7B model was trained using the EleutherAI/gpt-neox-20b tokenizer
tokenizer = AutoTokenizer.from_pretrained(
"EleutherAI/gpt-neox-20b")
# mtp-7b is trained to add "<|endoftext|>" at the end of generations
stop_token_ids = tokenizer.convert_tokens_to_ids(
["<|endoftext|>"])
# define custom stopping criteria object
class StopOnTokens(StoppingCriteria):
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
for stop_id in stop_token_ids:
if input_ids[0][-1] == stop_id:
return True
return False
stopping_criteria = StoppingCriteriaList(
[StopOnTokens()])
pipe = pipeline(
task='text-generation',
model=model,
tokenizer=tokenizer,
torch_dtype="auto",
device_map="auto",
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
**model_kwargs,
return_full_text=True, # langchain expects the full text
stopping_criteria=stopping_criteria, # without this model will ramble
repetition_penalty=1.1 # without this output begins repeating
)
local_llm = HuggingFacePipeline(
pipeline=pipe)
status.update(
label='Model %s loaded' % model_id, state="complete")
llm_chain = LLMChain(
llm=local_llm, prompt=prompt_template)
output = llm_chain.run(user_input)
st.success("Input: " + user_input + " \n\n " +
"Output: " + output)
elif model_id == "allenai/OLMo-7B" or model_id == "ehartford/dolphin-2.1-mistral-7b" or model_id == "lvkaokao/mistral-7b-finetuned-orca-dpo-v2" or model_id == "lmsys/vicuna-13b-v1.5" or model_id == "microsoft/Orca-2-13b":
if pipe is None:
with st.status('Loading model %s' % model_id) as status:
model = AutoModelForCausalLM.from_pretrained(
model_id,
trust_remote_code=True,
torch_dtype=torch.bfloat16,
device_map="auto"
)
if model_id == "ehartford/dolphin-2.1-mistral-7b":
tokenizer = AutoTokenizer.from_pretrained(
model_id, use_fast=False)
else:
tokenizer = AutoTokenizer.from_pretrained(
model_id)
pipe = pipeline(
task='text-generation',
model=model,
tokenizer=tokenizer,
torch_dtype="auto",
device_map="auto",
num_return_sequences=1,
eos_token_id=tokenizer.eos_token_id,
**model_kwargs,
return_full_text=True, # langchain expects the full text
)
local_llm = HuggingFacePipeline(
pipeline=pipe)
status.update(
label='Model %s loaded' % model_id, state="complete")
llm_chain = LLMChain(
llm=local_llm, prompt=prompt_template)
output = llm_chain.run(user_input)
st.success("Input: " + user_input + " \n\n " +
"Output: " + output)
else:
st.error("Model %s not found" % model_id)
exit(1)
if not num_prompt_tokens or not num_completion_tokens:
num_prompt_tokens = len(tokenizer.tokenize(
prompt_template.format(user_input=user_input)))
num_completion_tokens = len(tokenizer.tokenize(
output))
# Prepare data as dictionary
original_row = original_data.loc[key].copy()
new_row = {
'user_input': user_input,
'output': output,
'llm': model_id,
'prompt name': task['name'],
'prompt authors': task['authors'],
'prompt': template,
'timestamp': time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),
'# prompt tokens': str(int(num_prompt_tokens)),
'# completion tokens': str(int(num_completion_tokens)),
'max_new_tokens': int(model_kwargs['max_new_tokens']) if "max_new_tokens" in model_kwargs else None,
'do_sample': int(model_kwargs['do_sample']) if "do_sample" in model_kwargs else None,
'temperature': model_kwargs['temperature'] if "temperature" in model_kwargs else None,
'top_p': model_kwargs['top_p'] if "top_p" in model_kwargs else None,
'cost': cost if cost is not None else None
}
# Update the original row with the new data
for key2, value in new_row.items():
original_row[key2] = value
# Append the updated row to the DataFrame
updated_row_df = pd.DataFrame([original_row])
data = pd.concat(
[data, updated_row_df], ignore_index=True)
st.subheader("Results")
st.dataframe(data, column_config={},
hide_index=True)
# make output available as csv
csv = data.to_csv(index=False).encode('utf-8')
ste.download_button(
"Download CSV",
csv,
output_filename,
"text/csv",
)
end_time = time.time()
elapsed_time = end_time - start_time
st.write("End time: " +
time.strftime("%H:%M:%S", time.localtime()))
st.write("Elapsed time: " +
str(round(elapsed_time, 2)) + " seconds")
except Exception as e:
st.error(e)
finally:
# free up variables
if 'data' in locals() and data is not None:
del data
if 'pipe' in locals() and pipe is not None:
del pipe
if 'llm_chain' in locals() and llm_chain is not None:
del llm_chain
if 'llm' in locals() and llm is not None:
del llm
if 'local_llm' in locals() and local_llm is not None:
del local_llm
if 'model' in locals() and model is not None:
del model
if 'tokenizer' in locals() and tokenizer is not None:
del tokenizer
gc.collect() # garbage collection
# empty cuda cache
torch.cuda.empty_cache()
if __name__ == "__main__":
main()
| [
"langchain.llms.OpenAI",
"langchain.llms.HuggingFacePipeline",
"langchain.chat_models.ChatOpenAI",
"langchain.callbacks.get_openai_callback",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate"
] | [((813, 832), 'dotenv.load_dotenv', 'load_dotenv', (['""".env"""'], {}), "('.env')\n", (824, 832), False, 'from dotenv import load_dotenv\n'), ((1156, 1212), 'streamlit.markdown', 'st.markdown', (['hide_default_format'], {'unsafe_allow_html': '(True)'}), '(hide_default_format, unsafe_allow_html=True)\n', (1167, 1212), True, 'import streamlit as st\n'), ((1230, 1256), 'streamlit.title', 'st.title', (['"""Prompt Compass"""'], {}), "('Prompt Compass')\n", (1238, 1256), True, 'import streamlit as st\n'), ((1261, 1390), 'streamlit.subheader', 'st.subheader', (['"""A Tool for Navigating LLMs and Prompts for Computational Social Science and Digital Humanities Research"""'], {}), "(\n 'A Tool for Navigating LLMs and Prompts for Computational Social Science and Digital Humanities Research'\n )\n", (1273, 1390), True, 'import streamlit as st\n'), ((1422, 1705), 'streamlit.markdown', 'st.markdown', (['"""\n [![Repo](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/ErikBorra/PromptCompass)\n [![DOI](https://zenodo.org/badge/649855474.svg)](https://zenodo.org/badge/latestdoi/649855474)\n """'], {'unsafe_allow_html': '(True)'}), '(\n """\n [![Repo](https://badgen.net/badge/icon/GitHub?icon=github&label)](https://github.com/ErikBorra/PromptCompass)\n [![DOI](https://zenodo.org/badge/649855474.svg)](https://zenodo.org/badge/latestdoi/649855474)\n """\n , unsafe_allow_html=True)\n', (1433, 1705), True, 'import streamlit as st\n'), ((1919, 2005), 'streamlit.selectbox', 'st.selectbox', (['"""Select a model"""', 'model_with_names'], {'format_func': "(lambda x: x['name'])"}), "('Select a model', model_with_names, format_func=lambda x: x[\n 'name'])\n", (1931, 2005), True, 'import streamlit as st\n'), ((2250, 2451), 'streamlit.caption', 'st.caption', (['(f"Model info: [{input_values[\'model\'][\'name\']}]({input_values[\'model\'][\'resource\']})"\n + (f". {input_values[\'model\'][\'comment\']}" if \'comment\' in\n input_values[\'model\'] else \'\'))'], {}), '(\n f"Model info: [{input_values[\'model\'][\'name\']}]({input_values[\'model\'][\'resource\']})"\n + (f". {input_values[\'model\'][\'comment\']}" if \'comment\' in\n input_values[\'model\'] else \'\'))\n', (2260, 2451), True, 'import streamlit as st\n'), ((7021, 7129), 'streamlit.selectbox', 'st.selectbox', (['"""Select a task"""', 'tasks_with_names'], {'format_func': "(lambda x: x['name'] + ' - ' + x['authors'])"}), "('Select a task', tasks_with_names, format_func=lambda x: x[\n 'name'] + ' - ' + x['authors'])\n", (7033, 7129), True, 'import streamlit as st\n'), ((9240, 9387), 'streamlit.number_input', 'st.number_input', (['"""Enter the number of times the prompt/input combination should be repeated:"""'], {'min_value': '(1)', 'max_value': '(10)', 'value': '(1)', 'step': '(1)'}), "(\n 'Enter the number of times the prompt/input combination should be repeated:'\n , min_value=1, max_value=10, value=1, step=1)\n", (9255, 9387), True, 'import streamlit as st\n'), ((9428, 9447), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (9437, 9447), True, 'import streamlit as st\n'), ((9453, 9468), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (9461, 9468), True, 'import streamlit as st\n'), ((990, 1002), 'json.load', 'json.load', (['f'], {}), '(f)\n', (999, 1002), False, 'import json\n'), ((2132, 2170), 'streamlit.session_state.get', 'st.session_state.get', (['"""previous_model"""'], {}), "('previous_model')\n", (2152, 2170), True, 'import streamlit as st\n'), ((3045, 3077), 'streamlit.expander', 'st.expander', (['"""Advanced settings"""'], {}), "('Advanced settings')\n", (3056, 3077), True, 'import streamlit as st\n'), ((4283, 4592), 'streamlit.markdown', 'st.markdown', (['"""\n **Temperature**: Controls the randomness in the model\'s responses.\n Lower values (closer to 0.0) make the output more deterministic, while higher values (closer to 2.0) make it more diverse.\n A value of -1 means the parameter will not be specified.\n """'], {}), '(\n """\n **Temperature**: Controls the randomness in the model\'s responses.\n Lower values (closer to 0.0) make the output more deterministic, while higher values (closer to 2.0) make it more diverse.\n A value of -1 means the parameter will not be specified.\n """\n )\n', (4294, 4592), True, 'import streamlit as st\n'), ((4627, 4725), 'streamlit.number_input', 'st.number_input', (['"""Set Temperature"""'], {'min_value': '(-1.0)', 'max_value': '(2.0)', 'value': '(0.001)', 'format': '"""%.3f"""'}), "('Set Temperature', min_value=-1.0, max_value=2.0, value=\n 0.001, format='%.3f')\n", (4642, 4725), True, 'import streamlit as st\n'), ((4743, 5184), 'streamlit.markdown', 'st.markdown', (['"""\n **Top P**: Also known as "nucleus sampling", is an alternative to temperature that can also be used to control the randomness of the model\'s responses.\n It essentially trims the less likely options in the model\'s distribution of possible responses. Possible values lie between 0.0 and 1.0. \n A value of -1 means the parameter will not be specified. Only applies if do_sample=True.\n """'], {}), '(\n """\n **Top P**: Also known as "nucleus sampling", is an alternative to temperature that can also be used to control the randomness of the model\'s responses.\n It essentially trims the less likely options in the model\'s distribution of possible responses. Possible values lie between 0.0 and 1.0. \n A value of -1 means the parameter will not be specified. Only applies if do_sample=True.\n """\n )\n', (4754, 5184), True, 'import streamlit as st\n'), ((5213, 5284), 'streamlit.number_input', 'st.number_input', (['"""Set Top-P"""'], {'min_value': '(-1.0)', 'max_value': '(1.0)', 'value': '(-1.0)'}), "('Set Top-P', min_value=-1.0, max_value=1.0, value=-1.0)\n", (5228, 5284), True, 'import streamlit as st\n'), ((6364, 6482), 'streamlit.error', 'st.error', (['"""Temperature value must be between 0 and 2. Choose -1 if you want to use the default model value."""'], {}), "(\n 'Temperature value must be between 0 and 2. Choose -1 if you want to use the default model value.'\n )\n", (6372, 6482), True, 'import streamlit as st\n'), ((6668, 6780), 'streamlit.error', 'st.error', (['"""Top P value must be between 0 and 1. Choose -1 if you want to use the default model value."""'], {}), "(\n 'Top P value must be between 0 and 1. Choose -1 if you want to use the default model value.'\n )\n", (6676, 6780), True, 'import streamlit as st\n'), ((7384, 7517), 'streamlit.text_area', 'st.text_area', (["('Inspect, and possibly modify, the prompt by [' + task['authors'] + '](' +\n task['paper'] + ')')", 'prompt'], {'height': '(200)'}), "('Inspect, and possibly modify, the prompt by [' + task[\n 'authors'] + '](' + task['paper'] + ')', prompt, height=200)\n", (7396, 7517), True, 'import streamlit as st\n'), ((7590, 7669), 'streamlit.radio', 'st.radio', (['"""Choose input type:"""', "('Text input', 'Upload a CSV')"], {'horizontal': '(True)'}), "('Choose input type:', ('Text input', 'Upload a CSV'), horizontal=True)\n", (7598, 7669), True, 'import streamlit as st\n'), ((8948, 8962), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (8960, 8962), True, 'import pandas as pd\n'), ((9118, 9144), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (9134, 9144), False, 'import os\n'), ((2820, 2856), 'streamlit.text_input', 'st.text_input', (['"""Open AI API Key"""', '""""""'], {}), "('Open AI API Key', '')\n", (2833, 2856), True, 'import streamlit as st\n'), ((2897, 2924), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2906, 2924), False, 'import os\n'), ((3245, 3542), 'streamlit.markdown', 'st.markdown', (['"""\n **Set Maximum Length**: Determines the maximum number of tokens of the **generated** text. A token is approximately four characters word, although this depends on the model.\n A value of -1 means the parameter will not be specified.\n """'], {}), '(\n """\n **Set Maximum Length**: Determines the maximum number of tokens of the **generated** text. A token is approximately four characters word, although this depends on the model.\n A value of -1 means the parameter will not be specified.\n """\n )\n', (3256, 3542), True, 'import streamlit as st\n'), ((3592, 3658), 'streamlit.number_input', 'st.number_input', (['"""Maximum Length"""'], {'value': '(256)', 'min_value': '(-1)', 'step': '(1)'}), "('Maximum Length', value=256, min_value=-1, step=1)\n", (3607, 3658), True, 'import streamlit as st\n'), ((3688, 4139), 'streamlit.markdown', 'st.markdown', (['"""\n **Set do_sample**: This controls how the model generates text. If do_sample=True, the model will use a probabilistic approach to generate text, where the likelihood of each word being chosen depends on its predicted probability. Use the below parameters to further control its behavior. If do_sample=False, the model will use a deterministic approach and always choose the most likely next word. \n """'], {}), '(\n """\n **Set do_sample**: This controls how the model generates text. If do_sample=True, the model will use a probabilistic approach to generate text, where the likelihood of each word being chosen depends on its predicted probability. Use the below parameters to further control its behavior. If do_sample=False, the model will use a deterministic approach and always choose the most likely next word. \n """\n )\n', (3699, 4139), True, 'import streamlit as st\n'), ((4184, 4228), 'streamlit.radio', 'st.radio', (['"""Set do_sample"""', "('False', 'True')"], {}), "('Set do_sample', ('False', 'True'))\n", (4192, 4228), True, 'import streamlit as st\n'), ((5692, 5805), 'streamlit.error', 'st.error', (['"""Error: Max Tokens must be at least 1. Choose -1 if you want to use the default model value."""'], {}), "(\n 'Error: Max Tokens must be at least 1. Choose -1 if you want to use the default model value.'\n )\n", (5700, 5805), True, 'import streamlit as st\n'), ((6024, 6074), 'streamlit.error', 'st.error', (['"""Error: do_Sample must be True or False"""'], {}), "('Error: do_Sample must be True or False')\n", (6032, 6074), True, 'import streamlit as st\n'), ((7822, 7975), 'streamlit.text_area', 'st.text_area', (['"""Input to be analyzed with the prompt (one thing per line):"""', '"""this user is happy\none user is just a user\nthe other user is a lier"""'], {}), '(\'Input to be analyzed with the prompt (one thing per line):\',\n """this user is happy\none user is just a user\nthe other user is a lier""")\n', (7834, 7975), True, 'import streamlit as st\n'), ((8248, 8306), 'pandas.DataFrame', 'pd.DataFrame', (["input_values['user']"], {'columns': "['user_input']"}), "(input_values['user'], columns=['user_input'])\n", (8260, 8306), True, 'import pandas as pd\n'), ((8391, 8440), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a CSV file"""'], {'type': '"""csv"""'}), "('Choose a CSV file', type='csv')\n", (8407, 8440), True, 'import streamlit as st\n'), ((9642, 9676), 'streamlit.error', 'st.error', (['"""No user input provided"""'], {}), "('No user input provided')\n", (9650, 9676), True, 'import streamlit as st\n'), ((2720, 2747), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2729, 2747), False, 'import os\n'), ((2759, 2786), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2768, 2786), False, 'import os\n'), ((8575, 8601), 'pandas.read_csv', 'pd.read_csv', (['uploaded_file'], {}), '(uploaded_file)\n', (8586, 8601), True, 'import pandas as pd\n'), ((8685, 8763), 'streamlit.selectbox', 'st.selectbox', (['"""Choose a column to apply the prompt on:"""', 'original_data.columns'], {}), "('Choose a column to apply the prompt on:', original_data.columns)\n", (8697, 8763), True, 'import streamlit as st\n'), ((9709, 9742), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""In progress..."""'}), "(text='In progress...')\n", (9719, 9742), True, 'import streamlit as st\n'), ((9800, 9811), 'time.time', 'time.time', ([], {}), '()\n', (9809, 9811), False, 'import time\n'), ((29699, 29710), 'time.time', 'time.time', ([], {}), '()\n', (29708, 29710), False, 'import time\n'), ((30887, 30899), 'gc.collect', 'gc.collect', ([], {}), '()\n', (30897, 30899), False, 'import gc\n'), ((30981, 31005), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (31003, 31005), False, 'import torch\n'), ((10885, 10950), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['user_input']", 'template': 'template'}), "(input_variables=['user_input'], template=template)\n", (10899, 10950), False, 'from langchain.prompts import PromptTemplate\n'), ((29168, 29191), 'streamlit.subheader', 'st.subheader', (['"""Results"""'], {}), "('Results')\n", (29180, 29191), True, 'import streamlit as st\n'), ((29216, 29269), 'streamlit.dataframe', 'st.dataframe', (['data'], {'column_config': '{}', 'hide_index': '(True)'}), '(data, column_config={}, hide_index=True)\n', (29228, 29269), True, 'import streamlit as st\n'), ((29458, 29527), 'streamlit_ext.download_button', 'ste.download_button', (['"""Download CSV"""', 'csv', 'output_filename', '"""text/csv"""'], {}), "('Download CSV', csv, output_filename, 'text/csv')\n", (29477, 29527), True, 'import streamlit_ext as ste\n'), ((30065, 30076), 'streamlit.error', 'st.error', (['e'], {}), '(e)\n', (30073, 30076), True, 'import streamlit as st\n'), ((9913, 9929), 'time.localtime', 'time.localtime', ([], {}), '()\n', (9927, 9929), False, 'import time\n'), ((28985, 29013), 'pandas.DataFrame', 'pd.DataFrame', (['[original_row]'], {}), '([original_row])\n', (28997, 29013), True, 'import pandas as pd\n'), ((29053, 29105), 'pandas.concat', 'pd.concat', (['[data, updated_row_df]'], {'ignore_index': '(True)'}), '([data, updated_row_df], ignore_index=True)\n', (29062, 29105), True, 'import pandas as pd\n'), ((29867, 29883), 'time.localtime', 'time.localtime', ([], {}), '()\n', (29881, 29883), False, 'import time\n'), ((12017, 12062), 'streamlit.error', 'st.error', (['"""Please provide an Open AI API Key"""'], {}), "('Please provide an Open AI API Key')\n", (12025, 12062), True, 'import streamlit as st\n'), ((12197, 12218), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (12216, 12218), False, 'from langchain.callbacks import get_openai_callback\n'), ((12844, 12885), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt_template'}), '(llm=llm, prompt=prompt_template)\n', (12852, 12885), False, 'from langchain.chains import LLMChain\n'), ((13048, 13117), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (13058, 13117), True, 'import streamlit as st\n'), ((13209, 13220), 'streamlit.text', 'st.text', (['cb'], {}), '(cb)\n', (13216, 13220), True, 'import streamlit as st\n'), ((16761, 16808), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'local_llm', 'prompt': 'prompt_template'}), '(llm=local_llm, prompt=prompt_template)\n', (16769, 16808), False, 'from langchain.chains import LLMChain\n'), ((16959, 17028), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (16969, 17028), True, 'import streamlit as st\n'), ((27858, 27874), 'time.localtime', 'time.localtime', ([], {}), '()\n', (27872, 27874), False, 'import time\n'), ((12459, 12529), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': 'model_id', 'openai_api_key': 'open_ai_key'}), '(model=model_id, openai_api_key=open_ai_key, **model_kwargs)\n', (12469, 12529), False, 'from langchain.chat_models import ChatOpenAI\n'), ((12675, 12741), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model': 'model_id', 'openai_api_key': 'open_ai_key'}), '(model=model_id, openai_api_key=open_ai_key, **model_kwargs)\n', (12681, 12741), False, 'from langchain.llms import OpenAI\n'), ((19967, 20014), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'local_llm', 'prompt': 'prompt_template'}), '(llm=local_llm, prompt=prompt_template)\n', (19975, 20014), False, 'from langchain.chains import LLMChain\n'), ((20165, 20234), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (20175, 20234), True, 'import streamlit as st\n'), ((13661, 13701), 'streamlit.status', 'st.status', (["('Loading model %s' % model_id)"], {}), "('Loading model %s' % model_id)\n", (13670, 13701), True, 'import streamlit as st\n'), ((16474, 16508), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (16493, 16508), False, 'from langchain.llms import HuggingFacePipeline\n'), ((23653, 23700), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'local_llm', 'prompt': 'prompt_template'}), '(llm=local_llm, prompt=prompt_template)\n', (23661, 23700), False, 'from langchain.chains import LLMChain\n'), ((23851, 23920), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (23861, 23920), True, 'import streamlit as st\n'), ((14398, 14458), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {'use_auth_token': '(True)'}), '(model_id, use_auth_token=True)\n', (14427, 14458), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((14622, 14661), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (14651, 14661), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((14864, 15056), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_id', 'tokenizer': 'tokenizer', 'trust_remote_code': '(True)', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id'}), "('text-generation', model=model_id, tokenizer=tokenizer,\n trust_remote_code=True, device_map='auto', num_return_sequences=1,\n eos_token_id=tokenizer.eos_token_id, **model_kwargs)\n", (14872, 15056), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((15694, 15911), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_id', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'trust_remote_code': '(True)', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id'}), "('text-generation', model=model_id, tokenizer=tokenizer,\n torch_dtype='auto', trust_remote_code=True, device_map='auto',\n num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs\n )\n", (15702, 15911), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((17381, 17421), 'streamlit.status', 'st.status', (["('Loading model %s' % model_id)"], {}), "('Loading model %s' % model_id)\n", (17390, 17421), True, 'import streamlit as st\n'), ((17489, 17528), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (17518, 17528), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((19673, 19707), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (19692, 19707), False, 'from langchain.llms import HuggingFacePipeline\n'), ((26365, 26412), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'local_llm', 'prompt': 'prompt_template'}), '(llm=local_llm, prompt=prompt_template)\n', (26373, 26412), False, 'from langchain.chains import LLMChain\n'), ((26563, 26632), 'streamlit.success', 'st.success', (["('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)"], {}), "('Input: ' + user_input + ' \\n\\n ' + 'Output: ' + output)\n", (26573, 26632), True, 'import streamlit as st\n'), ((26754, 26795), 'streamlit.error', 'st.error', (["('Model %s not found' % model_id)"], {}), "('Model %s not found' % model_id)\n", (26762, 26795), True, 'import streamlit as st\n'), ((17741, 17831), 'transformers.AutoModelForSeq2SeqLM.from_pretrained', 'AutoModelForSeq2SeqLM.from_pretrained', (['model_id'], {'load_in_8bit': '(False)', 'device_map': '"""auto"""'}), "(model_id, load_in_8bit=False,\n device_map='auto')\n", (17778, 17831), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((17936, 18158), 'transformers.pipeline', 'pipeline', (['"""text2text-generation"""'], {'model': 'model_id', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'trust_remote_code': '(True)', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id'}), "('text2text-generation', model=model_id, tokenizer=tokenizer,\n torch_dtype='auto', trust_remote_code=True, device_map='auto',\n num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs\n )\n", (17944, 18158), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((18893, 19110), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_id', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'trust_remote_code': '(True)', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id'}), "('text-generation', model=model_id, tokenizer=tokenizer,\n torch_dtype='auto', trust_remote_code=True, device_map='auto',\n num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, **model_kwargs\n )\n", (18901, 19110), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((20457, 20497), 'streamlit.status', 'st.status', (["('Loading model %s' % model_id)"], {}), "('Loading model %s' % model_id)\n", (20466, 20497), True, 'import streamlit as st\n'), ((20562, 20701), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'trust_remote_code': '(True)', 'torch_dtype': 'torch.bfloat16', 'max_seq_len': '(2048)', 'device_map': '"""auto"""'}), "(model_id, trust_remote_code=True,\n torch_dtype=torch.bfloat16, max_seq_len=2048, device_map='auto')\n", (20598, 20701), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((21156, 21212), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['"""EleutherAI/gpt-neox-20b"""'], {}), "('EleutherAI/gpt-neox-20b')\n", (21185, 21212), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((22359, 22638), 'transformers.pipeline', 'pipeline', ([], {'task': '"""text-generation"""', 'model': 'model', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id', 'return_full_text': '(True)', 'stopping_criteria': 'stopping_criteria', 'repetition_penalty': '(1.1)'}), "(task='text-generation', model=model, tokenizer=tokenizer,\n torch_dtype='auto', device_map='auto', num_return_sequences=1,\n eos_token_id=tokenizer.eos_token_id, **model_kwargs, return_full_text=\n True, stopping_criteria=stopping_criteria, repetition_penalty=1.1)\n", (22367, 22638), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((23366, 23400), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (23385, 23400), False, 'from langchain.llms import HuggingFacePipeline\n'), ((24320, 24360), 'streamlit.status', 'st.status', (["('Loading model %s' % model_id)"], {}), "('Loading model %s' % model_id)\n", (24329, 24360), True, 'import streamlit as st\n'), ((24425, 24546), 'transformers.AutoModelForCausalLM.from_pretrained', 'AutoModelForCausalLM.from_pretrained', (['model_id'], {'trust_remote_code': '(True)', 'torch_dtype': 'torch.bfloat16', 'device_map': '"""auto"""'}), "(model_id, trust_remote_code=True,\n torch_dtype=torch.bfloat16, device_map='auto')\n", (24461, 24546), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((25301, 25514), 'transformers.pipeline', 'pipeline', ([], {'task': '"""text-generation"""', 'model': 'model', 'tokenizer': 'tokenizer', 'torch_dtype': '"""auto"""', 'device_map': '"""auto"""', 'num_return_sequences': '(1)', 'eos_token_id': 'tokenizer.eos_token_id', 'return_full_text': '(True)'}), "(task='text-generation', model=model, tokenizer=tokenizer,\n torch_dtype='auto', device_map='auto', num_return_sequences=1,\n eos_token_id=tokenizer.eos_token_id, **model_kwargs, return_full_text=True)\n", (25309, 25514), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((26078, 26112), 'langchain.llms.HuggingFacePipeline', 'HuggingFacePipeline', ([], {'pipeline': 'pipe'}), '(pipeline=pipe)\n', (26097, 26112), False, 'from langchain.llms import HuggingFacePipeline\n'), ((24937, 24992), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {'use_fast': '(False)'}), '(model_id, use_fast=False)\n', (24966, 24992), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n'), ((25156, 25195), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_id'], {}), '(model_id)\n', (25185, 25195), False, 'from transformers import AutoTokenizer, pipeline, AutoModelForSeq2SeqLM, AutoModelForCausalLM, StoppingCriteria, StoppingCriteriaList\n')] |
import os
import re
import streamlit as st
import pandas as pd
import langchain
from langchain.agents import AgentExecutor
from langchain.callbacks import StreamlitCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.tools import PythonAstREPLTool
from langchain.schema import SystemMessage
from tools import PythonPlotTool
from agents import OpenAIFunctionsAgentFix # https://github.com/langchain-ai/langchain/issues/6364
@st.cache_data
def load_csv(csv) -> pd.DataFrame:
return pd.read_csv(csv)
def get_agent(df, openai_api_key, number_of_head_rows=5, outdir="./datavizqa/static"):
SYSTEM_PROMPT = """You are working with a pandas dataframe in Python. The name of the dataframe is `df`.
This is the result of `print(df.head())`:
{df_head}""".format(df_head=str(df.head(number_of_head_rows).to_markdown()))
tools = [
PythonPlotTool(locals={"df": df}, outdir=outdir),
PythonAstREPLTool(name="python", locals={"df": df}),
]
llm = ChatOpenAI(
model="gpt-3.5-turbo-0613",
openai_api_key=openai_api_key,
temperature=0,
streaming=True,
)
agent = OpenAIFunctionsAgentFix.from_llm_and_tools(
llm=llm,
tools=tools,
system_message=SystemMessage(content=SYSTEM_PROMPT),
)
agent_exe = AgentExecutor.from_agent_and_tools(agent, tools)
return agent_exe
langchain.debug = os.getenv("LANGCHAIN_DEBUG")
RE_MARKDOWN_IMAGE = r"!\[(.*?)\]\((.*?)\)"
st.set_page_config(page_title="DataVizQA", page_icon="🤖")
st.title("QA on your data with visualizations")
custom_css = """
<style>
p > img {
width: 100%
}
</style>
"""
st.markdown(custom_css, unsafe_allow_html=True)
ss = st.session_state
with st.sidebar:
ss.openai_api_key = st.text_input("Your OpenAI API key", placeholder="sk-xxxx")
ss.cot = st.radio(
"Expand new thoughts", [False, True], format_func=lambda x: "Yes" if x else "No")
csv = st.file_uploader("Upload your CSV file", type=["csv"])
if csv is not None:
df = load_csv(csv)
st.dataframe(df.head())
if key := ss.openai_api_key or os.getenv("OPENAI_API_KEY"):
ss.agent = get_agent(df, openai_api_key=key)
if "agent" in ss:
if "messages" not in ss:
ss.messages = [{"role": "assistant", "content": "Data loaded! Ask me anything! I can also plot charts!"}]
for message in ss.messages:
st.chat_message(message["role"]).write(message["content"])
if question := st.chat_input(placeholder="Your question"):
ss.messages.append({"role": "user", "content": question})
st.chat_message("user").write(question)
with st.chat_message("assistant"):
handler = StreamlitCallbackHandler(st.container(), expand_new_thoughts=ss.cot)
output_image = ""
for step in ss.agent.iter(question, callbacks=[handler]):
if output := step.get("intermediate_step"):
action, value = output[0]
if action.tool == "python_plot":
output_image = value
answer = step.get("output")
if output_image:
if re.search(RE_MARKDOWN_IMAGE, answer):
answer = re.sub(RE_MARKDOWN_IMAGE, f"![\g<1>]({output_image})", answer)
else:
answer = answer + "\n" + f"![{output_image.split('/')[0]}]({output_image})"
ss.messages.append({"role": "assistant", "content": answer})
st.write(answer) | [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.tools.PythonAstREPLTool",
"langchain.schema.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((1411, 1439), 'os.getenv', 'os.getenv', (['"""LANGCHAIN_DEBUG"""'], {}), "('LANGCHAIN_DEBUG')\n", (1420, 1439), False, 'import os\n'), ((1486, 1543), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""DataVizQA"""', 'page_icon': '"""🤖"""'}), "(page_title='DataVizQA', page_icon='🤖')\n", (1504, 1543), True, 'import streamlit as st\n'), ((1544, 1591), 'streamlit.title', 'st.title', (['"""QA on your data with visualizations"""'], {}), "('QA on your data with visualizations')\n", (1552, 1591), True, 'import streamlit as st\n'), ((1670, 1717), 'streamlit.markdown', 'st.markdown', (['custom_css'], {'unsafe_allow_html': '(True)'}), '(custom_css, unsafe_allow_html=True)\n', (1681, 1717), True, 'import streamlit as st\n'), ((1964, 2018), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload your CSV file"""'], {'type': "['csv']"}), "('Upload your CSV file', type=['csv'])\n", (1980, 2018), True, 'import streamlit as st\n'), ((513, 529), 'pandas.read_csv', 'pd.read_csv', (['csv'], {}), '(csv)\n', (524, 529), True, 'import pandas as pd\n'), ((1005, 1109), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""', 'openai_api_key': 'openai_api_key', 'temperature': '(0)', 'streaming': '(True)'}), "(model='gpt-3.5-turbo-0613', openai_api_key=openai_api_key,\n temperature=0, streaming=True)\n", (1015, 1109), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1322, 1370), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', (['agent', 'tools'], {}), '(agent, tools)\n', (1356, 1370), False, 'from langchain.agents import AgentExecutor\n'), ((1783, 1842), 'streamlit.text_input', 'st.text_input', (['"""Your OpenAI API key"""'], {'placeholder': '"""sk-xxxx"""'}), "('Your OpenAI API key', placeholder='sk-xxxx')\n", (1796, 1842), True, 'import streamlit as st\n'), ((1856, 1950), 'streamlit.radio', 'st.radio', (['"""Expand new thoughts"""', '[False, True]'], {'format_func': "(lambda x: 'Yes' if x else 'No')"}), "('Expand new thoughts', [False, True], format_func=lambda x: 'Yes' if\n x else 'No')\n", (1864, 1950), True, 'import streamlit as st\n'), ((878, 926), 'tools.PythonPlotTool', 'PythonPlotTool', ([], {'locals': "{'df': df}", 'outdir': 'outdir'}), "(locals={'df': df}, outdir=outdir)\n", (892, 926), False, 'from tools import PythonPlotTool\n'), ((936, 987), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'name': '"""python"""', 'locals': "{'df': df}"}), "(name='python', locals={'df': df})\n", (953, 987), False, 'from langchain.tools import PythonAstREPLTool\n'), ((2488, 2530), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Your question"""'}), "(placeholder='Your question')\n", (2501, 2530), True, 'import streamlit as st\n'), ((1262, 1298), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'SYSTEM_PROMPT'}), '(content=SYSTEM_PROMPT)\n', (1275, 1298), False, 'from langchain.schema import SystemMessage\n'), ((2125, 2152), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (2134, 2152), False, 'import os\n'), ((2659, 2687), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2674, 2687), True, 'import streamlit as st\n'), ((3507, 3523), 'streamlit.write', 'st.write', (['answer'], {}), '(answer)\n', (3515, 3523), True, 'import streamlit as st\n'), ((2409, 2441), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2424, 2441), True, 'import streamlit as st\n'), ((2606, 2629), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (2621, 2629), True, 'import streamlit as st\n'), ((2736, 2750), 'streamlit.container', 'st.container', ([], {}), '()\n', (2748, 2750), True, 'import streamlit as st\n'), ((3173, 3209), 're.search', 're.search', (['RE_MARKDOWN_IMAGE', 'answer'], {}), '(RE_MARKDOWN_IMAGE, answer)\n', (3182, 3209), False, 'import re\n'), ((3240, 3303), 're.sub', 're.sub', (['RE_MARKDOWN_IMAGE', 'f"""![\\\\g<1>]({output_image})"""', 'answer'], {}), "(RE_MARKDOWN_IMAGE, f'![\\\\g<1>]({output_image})', answer)\n", (3246, 3303), False, 'import re\n')] |
import inspect
from pathlib import Path
from typing import List
from langchain.chains import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.prompts import PromptTemplate
def get_documents(file_path: Path, llm: BaseChatModel):
file_extension = file_path.suffix
loader_class_name = get_best_loader(file_extension, llm)
print(f"loader selected {loader_class_name} for {file_path}")
if loader_class_name == "None":
raise Exception(f"No loader found for {file_extension} files.")
loader_class = get_loader_class(loader_class_name)
loader = loader_class(str(file_path))
return loader.load()
def get_loader_class(loader_class_name: str):
import langchain.document_loaders
loader_class = getattr(langchain.document_loaders, loader_class_name)
return loader_class
def get_best_loader(file_extension: str, llm: BaseChatModel):
loaders = get_loaders()
prompt = PromptTemplate(
input_variables=["file_extension", "loaders"],
template="""
Among the following loaders, which is the best to load a "{file_extension}" file? \
Only give me one the class name without any other special characters. If no relevant loader is found, respond "None".
Loaders: {loaders}
""",
)
chain = LLMChain(llm=llm, prompt=prompt, output_key="loader_class_name")
return chain({"file_extension": file_extension, "loaders": loaders})["loader_class_name"]
def get_loaders() -> List[str]:
import langchain_community.document_loaders
loaders = []
for _, obj in inspect.getmembers(langchain_community.document_loaders):
if inspect.isclass(obj):
loaders.append(obj.__name__)
return loaders
| [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate"
] | [((946, 1275), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['file_extension', 'loaders']", 'template': '"""\n Among the following loaders, which is the best to load a "{file_extension}" file? Only give me one the class name without any other special characters. If no relevant loader is found, respond "None".\n\n Loaders: {loaders}\n """'}), '(input_variables=[\'file_extension\', \'loaders\'], template=\n """\n Among the following loaders, which is the best to load a "{file_extension}" file? Only give me one the class name without any other special characters. If no relevant loader is found, respond "None".\n\n Loaders: {loaders}\n """\n )\n', (960, 1275), False, 'from langchain.prompts import PromptTemplate\n'), ((1303, 1367), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'output_key': '"""loader_class_name"""'}), "(llm=llm, prompt=prompt, output_key='loader_class_name')\n", (1311, 1367), False, 'from langchain.chains import LLMChain\n'), ((1581, 1637), 'inspect.getmembers', 'inspect.getmembers', (['langchain_community.document_loaders'], {}), '(langchain_community.document_loaders)\n', (1599, 1637), False, 'import inspect\n'), ((1650, 1670), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (1665, 1670), False, 'import inspect\n')] |
"""Streamlit app for the ChatGPT clone."""
import dotenv
import langchain
import streamlit as st
import streamlit_chat
dotenv.load_dotenv(dotenv.find_dotenv(), override=True)
st.set_page_config(
page_title='You Custom Assistant',
page_icon='🤖'
)
st.subheader('Your Custom ChatGPT 🤖')
chat = langchain.chat_models.ChatOpenAI(
model_name='gpt-3.5-turbo', temperature=0.5)
# creating the messages (chat history) in the Streamlit session state
if 'messages' not in st.session_state:
st.session_state.messages = []
# creating the sidebar
with st.sidebar:
# streamlit text input widget for the system message (role)
system_message = st.text_input(label='System role')
# streamlit text input widget for the user message
user_prompt = st.text_input(label='Send a message')
if system_message:
if not any(isinstance(x, langchain.schema.SystemMessage) for x in st.session_state.messages):
st.session_state.messages.append(
langchain.schema.SystemMessage(content=system_message)
)
# if the user entered a question
if user_prompt:
st.session_state.messages.append(
langchain.schema.HumanMessage(content=user_prompt)
)
with st.spinner('Working on your request ...'):
# creating the ChatGPT response
response = chat(st.session_state.messages)
# adding the response's content to the session state
st.session_state.messages.append(
langchain.schema.AIMessage(content=response.content))
# adding a default SystemMessage if the user didn't entered one
if len(st.session_state.messages) >= 1:
if not isinstance(st.session_state.messages[0], langchain.schema.SystemMessage):
st.session_state.messages.insert(0, langchain.schema.SystemMessage(
content='You are a helpful assistant.'))
# displaying the messages (chat history)
for i, msg in enumerate(st.session_state.messages[1:]):
if i % 2 == 0:
streamlit_chat.message(msg.content, is_user=True,
key=f'{i} + 🤓') # user's question
else:
streamlit_chat.message(msg.content, is_user=False,
key=f'{i} + 🤖') # ChatGPT response
| [
"langchain.schema.AIMessage",
"langchain.schema.HumanMessage",
"langchain.schema.SystemMessage",
"langchain.chat_models.ChatOpenAI"
] | [((178, 246), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""You Custom Assistant"""', 'page_icon': '"""🤖"""'}), "(page_title='You Custom Assistant', page_icon='🤖')\n", (196, 246), True, 'import streamlit as st\n'), ((257, 294), 'streamlit.subheader', 'st.subheader', (['"""Your Custom ChatGPT 🤖"""'], {}), "('Your Custom ChatGPT 🤖')\n", (269, 294), True, 'import streamlit as st\n'), ((303, 380), 'langchain.chat_models.ChatOpenAI', 'langchain.chat_models.ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.5)'}), "(model_name='gpt-3.5-turbo', temperature=0.5)\n", (335, 380), False, 'import langchain\n'), ((140, 160), 'dotenv.find_dotenv', 'dotenv.find_dotenv', ([], {}), '()\n', (158, 160), False, 'import dotenv\n'), ((657, 691), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""System role"""'}), "(label='System role')\n", (670, 691), True, 'import streamlit as st\n'), ((765, 802), 'streamlit.text_input', 'st.text_input', ([], {'label': '"""Send a message"""'}), "(label='Send a message')\n", (778, 802), True, 'import streamlit as st\n'), ((2004, 2069), 'streamlit_chat.message', 'streamlit_chat.message', (['msg.content'], {'is_user': '(True)', 'key': 'f"""{i} + 🤓"""'}), "(msg.content, is_user=True, key=f'{i} + 🤓')\n", (2026, 2069), False, 'import streamlit_chat\n'), ((2138, 2205), 'streamlit_chat.message', 'streamlit_chat.message', (['msg.content'], {'is_user': '(False)', 'key': 'f"""{i} + 🤖"""'}), "(msg.content, is_user=False, key=f'{i} + 🤖')\n", (2160, 2205), False, 'import streamlit_chat\n'), ((1172, 1222), 'langchain.schema.HumanMessage', 'langchain.schema.HumanMessage', ([], {'content': 'user_prompt'}), '(content=user_prompt)\n', (1201, 1222), False, 'import langchain\n'), ((1247, 1288), 'streamlit.spinner', 'st.spinner', (['"""Working on your request ..."""'], {}), "('Working on your request ...')\n", (1257, 1288), True, 'import streamlit as st\n'), ((1505, 1557), 'langchain.schema.AIMessage', 'langchain.schema.AIMessage', ([], {'content': 'response.content'}), '(content=response.content)\n', (1531, 1557), False, 'import langchain\n'), ((1794, 1864), 'langchain.schema.SystemMessage', 'langchain.schema.SystemMessage', ([], {'content': '"""You are a helpful assistant."""'}), "(content='You are a helpful assistant.')\n", (1824, 1864), False, 'import langchain\n'), ((991, 1045), 'langchain.schema.SystemMessage', 'langchain.schema.SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (1021, 1045), False, 'import langchain\n')] |
from dotenv import load_dotenv
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.agents import initialize_agent, AgentType
from agent.tools.ontology import ontology_tool
from agent.tools.interview import PAInterview
import os
from langchain.prompts import MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
agent_kwargs = {
"extra_prompt_messages": [MessagesPlaceholder(variable_name="memory")],
}
memory = ConversationBufferMemory(memory_key="memory", return_messages=True)
#langchain.debug = True
load_dotenv()
openai_api_key=os.environ['OPENAI_API_KEY']
# Because we are using functions, we need to use model gpt-4-0613
llm=ChatOpenAI(openai_api_key=openai_api_key,temperature=0, model="gpt-4-0613")
tools = [ontology_tool,PAInterview()]
agent = initialize_agent(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, agent_kwargs=agent_kwargs, memory=memory)
| [
"langchain.agents.initialize_agent",
"langchain.memory.ConversationBufferMemory",
"langchain.prompts.MessagesPlaceholder",
"langchain.chat_models.ChatOpenAI"
] | [((462, 529), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""memory"""', 'return_messages': '(True)'}), "(memory_key='memory', return_messages=True)\n", (486, 529), False, 'from langchain.memory import ConversationBufferMemory\n'), ((555, 568), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (566, 568), False, 'from dotenv import load_dotenv\n'), ((684, 760), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'openai_api_key', 'temperature': '(0)', 'model': '"""gpt-4-0613"""'}), "(openai_api_key=openai_api_key, temperature=0, model='gpt-4-0613')\n", (694, 760), False, 'from langchain.chat_models import ChatOpenAI\n'), ((808, 930), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'verbose': '(True)', 'agent_kwargs': 'agent_kwargs', 'memory': 'memory'}), '(tools, llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True,\n agent_kwargs=agent_kwargs, memory=memory)\n', (824, 930), False, 'from langchain.agents import initialize_agent, AgentType\n'), ((784, 797), 'agent.tools.interview.PAInterview', 'PAInterview', ([], {}), '()\n', (795, 797), False, 'from agent.tools.interview import PAInterview\n'), ((405, 448), 'langchain.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""memory"""'}), "(variable_name='memory')\n", (424, 448), False, 'from langchain.prompts import MessagesPlaceholder\n')] |
"""Chat agent with question answering
"""
from dotenv import load_dotenv
from langchain.cache import InMemoryCache
import langchain
import os
from dataclasses import dataclass
from langchain.chains import LLMChain, LLMRequestsChain
from langchain import Wikipedia, OpenAI
from langchain.agents.react.base import DocstoreExplorer
from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent
from langchain.prompts import PromptTemplate
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.agents.conversational.base import ConversationalAgent
from datetime import datetime
# Load the environment variables
load_dotenv()
langchain.llm_cache = InMemoryCache()
news_api_key = os.getenv("NEWS_API_KEY")
@dataclass
class ChatAgent:
agent_executor: AgentExecutor = None
def _get_docstore_agent(self):
docstore = DocstoreExplorer(Wikipedia())
docstore_tools = [
Tool(
name="Search",
func=docstore.search
),
Tool(
name="Lookup",
func=docstore.lookup
)
]
docstore_llm = OpenAI(temperature=0, model_name="text-davinci-003")
docstore_agent = initialize_agent(
docstore_tools, docstore_llm, agent="react-docstore", verbose=True)
return docstore_agent
def _get_requests_llm_tool(self):
template = """
Extracted: {requests_result}"""
PROMPT = PromptTemplate(
input_variables=["requests_result"],
template=template,
)
def lambda_func(input):
out = chain = LLMRequestsChain(llm_chain=LLMChain(
llm=OpenAI(temperature=0),
prompt=PROMPT)).run(input)
return out.strip()
return lambda_func
def __init__(self, *, conversation_chain: LLMChain = None, history_array):
date = datetime.today().strftime('%A %d, %B, %Y, %I:%M%p')
print("DATETIME:", date)
# set up a Wikipedia docstore agent
docstore_agent = self._get_docstore_agent()
tool_names = get_all_tool_names()
tool_names.remove("pal-math")
tool_names.remove("requests") # let's use the llm_requests instead
# let's use the llm_requests instead
tool_names.remove("google-search")
tool_names.remove("pal-colored-objects")
tool_names.remove("python_repl")
tool_names.remove("terminal")
tool_names.remove("serpapi"),
tool_names.remove("tmdb-api")
requests_tool = self._get_requests_llm_tool()
print("ALL TOOLS:", tool_names)
tools = load_tools(tool_names,
llm=OpenAI(temperature=0,
model_name="text-davinci-003"),
news_api_key=news_api_key,
)
# Tweak some of the tool descriptions
for tool in tools:
if tool.name == "Search":
tool.description = "Use this tool exclusively for questions relating to current events, or when you can't find an answer using any of the other tools."
if tool.name == "Calculator":
tool.description = "Use this to solve numeric math questions and do arithmetic. Don't use it for general or abstract math questions."
tools = tools + [
Tool(
name="WikipediaSearch",
description="Useful for answering a wide range of factual, scientific, academic, political and historical questions.",
func=docstore_agent.run
),
Tool(
name="Requests",
func=requests_tool,
description="A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page."
)
]
ai_prefix = "FWROG-E"
human_prefix = "Bence"
prefix = os.getenv("PROMPT_PREFIX")
suffix = f"""
The person's name that you are interacting with is {human_prefix}. Please be entertaining and respectful towards him.
The current date is {date}. Questions that refer to a specific date or time period will be interpreted relative to this date.
After you answer the question, you MUST to determine which langauge your answer is written in, and append the language code to the end of the Final Answer, within parentheses, like this (en-US).
Begin!
Previous conversation history:
{{chat_history}}
New input: {{input}}
{{agent_scratchpad}}
"""
memory = ConversationBufferMemory(memory_key="chat_history")
for item in history_array:
memory.save_context(
{f"{ai_prefix}": item["prompt"]}, {f"{human_prefix}": item["response"]})
llm = OpenAI(temperature=.5, max_tokens=384,
model_name="text-davinci-003")
llm_chain = LLMChain(
llm=llm,
prompt=ConversationalAgent.create_prompt(
tools,
prefix=prefix,
ai_prefix=ai_prefix,
human_prefix=human_prefix,
suffix=suffix
),
)
agent_obj = ConversationalAgent(
llm_chain=llm_chain, ai_prefix=ai_prefix)
self.agent_executor = AgentExecutor.from_agent_and_tools(
agent=agent_obj,
tools=tools,
verbose=True,
max_iterations=5,
memory=memory)
| [
"langchain.agents.initialize_agent",
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.cache.InMemoryCache",
"langchain.Wikipedia",
"langchain.agents.conversational.base.ConversationalAgent",
"langchain.agents.conversational.base.ConversationalAgent.create_prompt",
"langchain.agents.Tool",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.get_all_tool_names",
"langchain.prompts.PromptTemplate",
"langchain.OpenAI"
] | [((681, 694), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (692, 694), False, 'from dotenv import load_dotenv\n'), ((718, 733), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (731, 733), False, 'from langchain.cache import InMemoryCache\n'), ((749, 774), 'os.getenv', 'os.getenv', (['"""NEWS_API_KEY"""'], {}), "('NEWS_API_KEY')\n", (758, 774), False, 'import os\n'), ((1192, 1244), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (1198, 1244), False, 'from langchain import Wikipedia, OpenAI\n'), ((1270, 1358), 'langchain.agents.initialize_agent', 'initialize_agent', (['docstore_tools', 'docstore_llm'], {'agent': '"""react-docstore"""', 'verbose': '(True)'}), "(docstore_tools, docstore_llm, agent='react-docstore',\n verbose=True)\n", (1286, 1358), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1519, 1589), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['requests_result']", 'template': 'template'}), "(input_variables=['requests_result'], template=template)\n", (1533, 1589), False, 'from langchain.prompts import PromptTemplate\n'), ((2164, 2184), 'langchain.agents.get_all_tool_names', 'get_all_tool_names', ([], {}), '()\n', (2182, 2184), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((4068, 4094), 'os.getenv', 'os.getenv', (['"""PROMPT_PREFIX"""'], {}), "('PROMPT_PREFIX')\n", (4077, 4094), False, 'import os\n'), ((4692, 4743), 'langchain.chains.conversation.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""'}), "(memory_key='chat_history')\n", (4716, 4743), False, 'from langchain.chains.conversation.memory import ConversationBufferMemory\n'), ((4916, 4986), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0.5)', 'max_tokens': '(384)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0.5, max_tokens=384, model_name='text-davinci-003')\n", (4922, 4986), False, 'from langchain import Wikipedia, OpenAI\n'), ((5322, 5383), 'langchain.agents.conversational.base.ConversationalAgent', 'ConversationalAgent', ([], {'llm_chain': 'llm_chain', 'ai_prefix': 'ai_prefix'}), '(llm_chain=llm_chain, ai_prefix=ai_prefix)\n', (5341, 5383), False, 'from langchain.agents.conversational.base import ConversationalAgent\n'), ((5428, 5544), 'langchain.agents.AgentExecutor.from_agent_and_tools', 'AgentExecutor.from_agent_and_tools', ([], {'agent': 'agent_obj', 'tools': 'tools', 'verbose': '(True)', 'max_iterations': '(5)', 'memory': 'memory'}), '(agent=agent_obj, tools=tools, verbose=\n True, max_iterations=5, memory=memory)\n', (5462, 5544), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((918, 929), 'langchain.Wikipedia', 'Wikipedia', ([], {}), '()\n', (927, 929), False, 'from langchain import Wikipedia, OpenAI\n'), ((970, 1011), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'docstore.search'}), "(name='Search', func=docstore.search)\n", (974, 1011), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1071, 1112), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Lookup"""', 'func': 'docstore.lookup'}), "(name='Lookup', func=docstore.lookup)\n", (1075, 1112), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((1960, 1976), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (1974, 1976), False, 'from datetime import datetime\n'), ((2758, 2810), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""text-davinci-003"""'}), "(temperature=0, model_name='text-davinci-003')\n", (2764, 2810), False, 'from langchain import Wikipedia, OpenAI\n'), ((3444, 3626), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""WikipediaSearch"""', 'description': '"""Useful for answering a wide range of factual, scientific, academic, political and historical questions."""', 'func': 'docstore_agent.run'}), "(name='WikipediaSearch', description=\n 'Useful for answering a wide range of factual, scientific, academic, political and historical questions.'\n , func=docstore_agent.run)\n", (3448, 3626), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((3692, 3926), 'langchain.agents.Tool', 'Tool', ([], {'name': '"""Requests"""', 'func': 'requests_tool', 'description': '"""A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page."""'}), "(name='Requests', func=requests_tool, description=\n 'A portal to the internet. Use this when you need to get specific content from a site. Input should be a specific url, and the output will be all the text on that page.'\n )\n", (3696, 3926), False, 'from langchain.agents import Tool, AgentExecutor, get_all_tool_names, load_tools, initialize_agent\n'), ((5077, 5199), 'langchain.agents.conversational.base.ConversationalAgent.create_prompt', 'ConversationalAgent.create_prompt', (['tools'], {'prefix': 'prefix', 'ai_prefix': 'ai_prefix', 'human_prefix': 'human_prefix', 'suffix': 'suffix'}), '(tools, prefix=prefix, ai_prefix=ai_prefix,\n human_prefix=human_prefix, suffix=suffix)\n', (5110, 5199), False, 'from langchain.agents.conversational.base import ConversationalAgent\n'), ((1741, 1762), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (1747, 1762), False, 'from langchain import Wikipedia, OpenAI\n')] |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
Type,
Union,
cast,
)
from sqlalchemy import Column, Integer, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm import Session
from langchain.utils import get_from_env
try:
from sqlalchemy.orm import declarative_base
except ImportError:
from sqlalchemy.ext.declarative import declarative_base
from langchain.embeddings.base import Embeddings
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.schema import ChatGeneration, Generation
from langchain.vectorstores.redis import Redis as RedisVectorstore
logger = logging.getLogger(__file__)
if TYPE_CHECKING:
import momento
RETURN_VAL_TYPE = Sequence[Generation]
def _hash(_input: str) -> str:
"""Use a deterministic hashing approach."""
return hashlib.md5(_input.encode()).hexdigest()
def _dump_generations_to_json(generations: RETURN_VAL_TYPE) -> str:
"""Dump generations to json.
Args:
generations (RETURN_VAL_TYPE): A list of language model generations.
Returns:
str: Json representing a list of generations.
"""
return json.dumps([generation.dict() for generation in generations])
def _load_generations_from_json(generations_json: str) -> RETURN_VAL_TYPE:
"""Load generations from json.
Args:
generations_json (str): A string of json representing a list of generations.
Raises:
ValueError: Could not decode json string to list of generations.
Returns:
RETURN_VAL_TYPE: A list of generations.
"""
try:
results = json.loads(generations_json)
return [Generation(**generation_dict) for generation_dict in results]
except json.JSONDecodeError:
raise ValueError(
f"Could not decode json to list of generations: {generations_json}"
)
class BaseCache(ABC):
"""Base interface for cache."""
@abstractmethod
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
@abstractmethod
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
@abstractmethod
def clear(self, **kwargs: Any) -> None:
"""Clear cache that can take additional keyword arguments."""
class InMemoryCache(BaseCache):
"""Cache that stores things in memory."""
def __init__(self) -> None:
"""Initialize with empty cache."""
self._cache: Dict[Tuple[str, str], RETURN_VAL_TYPE] = {}
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
return self._cache.get((prompt, llm_string), None)
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
self._cache[(prompt, llm_string)] = return_val
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
self._cache = {}
Base = declarative_base()
class FullLLMCache(Base): # type: ignore
"""SQLite table for full LLM Cache (all generations)."""
__tablename__ = "full_llm_cache"
prompt = Column(String, primary_key=True)
llm = Column(String, primary_key=True)
idx = Column(Integer, primary_key=True)
response = Column(String)
class SQLAlchemyCache(BaseCache):
"""Cache that uses SQAlchemy as a backend."""
def __init__(self, engine: Engine, cache_schema: Type[FullLLMCache] = FullLLMCache):
"""Initialize by creating all tables."""
self.engine = engine
self.cache_schema = cache_schema
self.cache_schema.metadata.create_all(self.engine)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
stmt = (
select(self.cache_schema.response)
.where(self.cache_schema.prompt == prompt) # type: ignore
.where(self.cache_schema.llm == llm_string)
.order_by(self.cache_schema.idx)
)
with Session(self.engine) as session:
rows = session.execute(stmt).fetchall()
if rows:
try:
return [loads(row[0]) for row in rows]
except Exception:
logger.warning(
"Retrieving a cache value that could not be deserialized "
"properly. This is likely due to the cache being in an "
"older format. Please recreate your cache to avoid this "
"error."
)
# In a previous life we stored the raw text directly
# in the table, so assume it's in that format.
return [Generation(text=row[0]) for row in rows]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update based on prompt and llm_string."""
items = [
self.cache_schema(prompt=prompt, llm=llm_string, response=dumps(gen), idx=i)
for i, gen in enumerate(return_val)
]
with Session(self.engine) as session, session.begin():
for item in items:
session.merge(item)
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
with Session(self.engine) as session:
session.query(self.cache_schema).delete()
session.commit()
class SQLiteCache(SQLAlchemyCache):
"""Cache that uses SQLite as a backend."""
def __init__(self, database_path: str = ".langchain.db"):
"""Initialize by creating the engine and all tables."""
engine = create_engine(f"sqlite:///{database_path}")
super().__init__(engine)
class RedisCache(BaseCache):
"""Cache that uses Redis as a backend."""
# TODO - implement a TTL policy in Redis
def __init__(self, redis_: Any):
"""Initialize by passing in Redis instance."""
try:
from redis import Redis
except ImportError:
raise ValueError(
"Could not import redis python package. "
"Please install it with `pip install redis`."
)
if not isinstance(redis_, Redis):
raise ValueError("Please pass in Redis object.")
self.redis = redis_
def _key(self, prompt: str, llm_string: str) -> str:
"""Compute key from prompt and llm_string"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
generations = []
# Read from a Redis HASH
results = self.redis.hgetall(self._key(prompt, llm_string))
if results:
for _, text in results.items():
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
if isinstance(gen, ChatGeneration):
warnings.warn(
"NOTE: Generation has not been cached. RedisCache does not"
" support caching ChatModel outputs."
)
return
# Write to a Redis HASH
key = self._key(prompt, llm_string)
self.redis.hset(
key,
mapping={
str(idx): generation.text for idx, generation in enumerate(return_val)
},
)
def clear(self, **kwargs: Any) -> None:
"""Clear cache. If `asynchronous` is True, flush asynchronously."""
asynchronous = kwargs.get("asynchronous", False)
self.redis.flushdb(asynchronous=asynchronous, **kwargs)
class RedisSemanticCache(BaseCache):
"""Cache that uses Redis as a vector-store backend."""
# TODO - implement a TTL policy in Redis
def __init__(
self, redis_url: str, embedding: Embeddings, score_threshold: float = 0.2
):
"""Initialize by passing in the `init` GPTCache func
Args:
redis_url (str): URL to connect to Redis.
embedding (Embedding): Embedding provider for semantic encoding and search.
score_threshold (float, 0.2):
Example:
.. code-block:: python
import langchain
from langchain.cache import RedisSemanticCache
from langchain.embeddings import OpenAIEmbeddings
langchain.llm_cache = RedisSemanticCache(
redis_url="redis://localhost:6379",
embedding=OpenAIEmbeddings()
)
"""
self._cache_dict: Dict[str, RedisVectorstore] = {}
self.redis_url = redis_url
self.embedding = embedding
self.score_threshold = score_threshold
def _index_name(self, llm_string: str) -> str:
hashed_index = _hash(llm_string)
return f"cache:{hashed_index}"
def _get_llm_cache(self, llm_string: str) -> RedisVectorstore:
index_name = self._index_name(llm_string)
# return vectorstore client for the specific llm string
if index_name in self._cache_dict:
return self._cache_dict[index_name]
# create new vectorstore client for the specific llm string
try:
self._cache_dict[index_name] = RedisVectorstore.from_existing_index(
embedding=self.embedding,
index_name=index_name,
redis_url=self.redis_url,
)
except ValueError:
redis = RedisVectorstore(
embedding_function=self.embedding.embed_query,
index_name=index_name,
redis_url=self.redis_url,
)
_embedding = self.embedding.embed_query(text="test")
redis._create_index(dim=len(_embedding))
self._cache_dict[index_name] = redis
return self._cache_dict[index_name]
def clear(self, **kwargs: Any) -> None:
"""Clear semantic cache for a given llm_string."""
index_name = self._index_name(kwargs["llm_string"])
if index_name in self._cache_dict:
self._cache_dict[index_name].drop_index(
index_name=index_name, delete_documents=True, redis_url=self.redis_url
)
del self._cache_dict[index_name]
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up based on prompt and llm_string."""
llm_cache = self._get_llm_cache(llm_string)
generations = []
# Read from a Hash
results = llm_cache.similarity_search_limit_score(
query=prompt,
k=1,
score_threshold=self.score_threshold,
)
if results:
for document in results:
for text in document.metadata["return_val"]:
generations.append(Generation(text=text))
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache based on prompt and llm_string."""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"RedisSemanticCache only supports caching of "
f"normal LLM generations, got {type(gen)}"
)
if isinstance(gen, ChatGeneration):
warnings.warn(
"NOTE: Generation has not been cached. RedisSentimentCache does not"
" support caching ChatModel outputs."
)
return
llm_cache = self._get_llm_cache(llm_string)
# Write to vectorstore
metadata = {
"llm_string": llm_string,
"prompt": prompt,
"return_val": [generation.text for generation in return_val],
}
llm_cache.add_texts(texts=[prompt], metadatas=[metadata])
class GPTCache(BaseCache):
"""Cache that uses GPTCache as a backend."""
def __init__(
self,
init_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = None,
):
"""Initialize by passing in init function (default: `None`).
Args:
init_func (Optional[Callable[[Any], None]]): init `GPTCache` function
(default: `None`)
Example:
.. code-block:: python
# Initialize GPTCache with a custom init function
import gptcache
from gptcache.processor.pre import get_prompt
from gptcache.manager.factory import get_data_manager
# Avoid multiple caches using the same file,
causing different llm model caches to affect each other
def init_gptcache(cache_obj: gptcache.Cache, llm str):
cache_obj.init(
pre_embedding_func=get_prompt,
data_manager=manager_factory(
manager="map",
data_dir=f"map_cache_{llm}"
),
)
langchain.llm_cache = GPTCache(init_gptcache)
"""
try:
import gptcache # noqa: F401
except ImportError:
raise ImportError(
"Could not import gptcache python package. "
"Please install it with `pip install gptcache`."
)
self.init_gptcache_func: Union[
Callable[[Any, str], None], Callable[[Any], None], None
] = init_func
self.gptcache_dict: Dict[str, Any] = {}
def _new_gptcache(self, llm_string: str) -> Any:
"""New gptcache object"""
from gptcache import Cache
from gptcache.manager.factory import get_data_manager
from gptcache.processor.pre import get_prompt
_gptcache = Cache()
if self.init_gptcache_func is not None:
sig = inspect.signature(self.init_gptcache_func)
if len(sig.parameters) == 2:
self.init_gptcache_func(_gptcache, llm_string) # type: ignore[call-arg]
else:
self.init_gptcache_func(_gptcache) # type: ignore[call-arg]
else:
_gptcache.init(
pre_embedding_func=get_prompt,
data_manager=get_data_manager(data_path=llm_string),
)
self.gptcache_dict[llm_string] = _gptcache
return _gptcache
def _get_gptcache(self, llm_string: str) -> Any:
"""Get a cache object.
When the corresponding llm model cache does not exist, it will be created."""
_gptcache = self.gptcache_dict.get(llm_string, None)
if not _gptcache:
_gptcache = self._new_gptcache(llm_string)
return _gptcache
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Look up the cache data.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then retrieve the data from the cache based on the `prompt`.
"""
from gptcache.adapter.api import get
_gptcache = self._get_gptcache(llm_string)
res = get(prompt, cache_obj=_gptcache)
if res:
return [
Generation(**generation_dict) for generation_dict in json.loads(res)
]
return None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Update cache.
First, retrieve the corresponding cache object using the `llm_string` parameter,
and then store the `prompt` and `return_val` in the cache object.
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"GPTCache only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
from gptcache.adapter.api import put
_gptcache = self._get_gptcache(llm_string)
handled_data = json.dumps([generation.dict() for generation in return_val])
put(prompt, handled_data, cache_obj=_gptcache)
return None
def clear(self, **kwargs: Any) -> None:
"""Clear cache."""
from gptcache import Cache
for gptcache_instance in self.gptcache_dict.values():
gptcache_instance = cast(Cache, gptcache_instance)
gptcache_instance.flush()
self.gptcache_dict.clear()
def _ensure_cache_exists(cache_client: momento.CacheClient, cache_name: str) -> None:
"""Create cache if it doesn't exist.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
from momento.responses import CreateCache
create_cache_response = cache_client.create_cache(cache_name)
if isinstance(create_cache_response, CreateCache.Success) or isinstance(
create_cache_response, CreateCache.CacheAlreadyExists
):
return None
elif isinstance(create_cache_response, CreateCache.Error):
raise create_cache_response.inner_exception
else:
raise Exception(f"Unexpected response cache creation: {create_cache_response}")
def _validate_ttl(ttl: Optional[timedelta]) -> None:
if ttl is not None and ttl <= timedelta(seconds=0):
raise ValueError(f"ttl must be positive but was {ttl}.")
class MomentoCache(BaseCache):
"""Cache that uses Momento as a backend. See https://gomomento.com/"""
def __init__(
self,
cache_client: momento.CacheClient,
cache_name: str,
*,
ttl: Optional[timedelta] = None,
ensure_cache_exists: bool = True,
):
"""Instantiate a prompt cache using Momento as a backend.
Note: to instantiate the cache client passed to MomentoCache,
you must have a Momento account. See https://gomomento.com/.
Args:
cache_client (CacheClient): The Momento cache client.
cache_name (str): The name of the cache to use to store the data.
ttl (Optional[timedelta], optional): The time to live for the cache items.
Defaults to None, ie use the client default TTL.
ensure_cache_exists (bool, optional): Create the cache if it doesn't
exist. Defaults to True.
Raises:
ImportError: Momento python package is not installed.
TypeError: cache_client is not of type momento.CacheClientObject
ValueError: ttl is non-null and non-negative
"""
try:
from momento import CacheClient
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if not isinstance(cache_client, CacheClient):
raise TypeError("cache_client must be a momento.CacheClient object.")
_validate_ttl(ttl)
if ensure_cache_exists:
_ensure_cache_exists(cache_client, cache_name)
self.cache_client = cache_client
self.cache_name = cache_name
self.ttl = ttl
@classmethod
def from_client_params(
cls,
cache_name: str,
ttl: timedelta,
*,
configuration: Optional[momento.config.Configuration] = None,
auth_token: Optional[str] = None,
**kwargs: Any,
) -> MomentoCache:
"""Construct cache from CacheClient parameters."""
try:
from momento import CacheClient, Configurations, CredentialProvider
except ImportError:
raise ImportError(
"Could not import momento python package. "
"Please install it with `pip install momento`."
)
if configuration is None:
configuration = Configurations.Laptop.v1()
auth_token = auth_token or get_from_env("auth_token", "MOMENTO_AUTH_TOKEN")
credentials = CredentialProvider.from_string(auth_token)
cache_client = CacheClient(configuration, credentials, default_ttl=ttl)
return cls(cache_client, cache_name, ttl=ttl, **kwargs)
def __key(self, prompt: str, llm_string: str) -> str:
"""Compute cache key from prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Returns:
str: The cache key.
"""
return _hash(prompt + llm_string)
def lookup(self, prompt: str, llm_string: str) -> Optional[RETURN_VAL_TYPE]:
"""Lookup llm generations in cache by prompt and associated model and settings.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model version and settings.
Raises:
SdkException: Momento service or network error
Returns:
Optional[RETURN_VAL_TYPE]: A list of language model generations.
"""
from momento.responses import CacheGet
generations: RETURN_VAL_TYPE = []
get_response = self.cache_client.get(
self.cache_name, self.__key(prompt, llm_string)
)
if isinstance(get_response, CacheGet.Hit):
value = get_response.value_string
generations = _load_generations_from_json(value)
elif isinstance(get_response, CacheGet.Miss):
pass
elif isinstance(get_response, CacheGet.Error):
raise get_response.inner_exception
return generations if generations else None
def update(self, prompt: str, llm_string: str, return_val: RETURN_VAL_TYPE) -> None:
"""Store llm generations in cache.
Args:
prompt (str): The prompt run through the language model.
llm_string (str): The language model string.
return_val (RETURN_VAL_TYPE): A list of language model generations.
Raises:
SdkException: Momento service or network error
Exception: Unexpected response
"""
for gen in return_val:
if not isinstance(gen, Generation):
raise ValueError(
"Momento only supports caching of normal LLM generations, "
f"got {type(gen)}"
)
key = self.__key(prompt, llm_string)
value = _dump_generations_to_json(return_val)
set_response = self.cache_client.set(self.cache_name, key, value, self.ttl)
from momento.responses import CacheSet
if isinstance(set_response, CacheSet.Success):
pass
elif isinstance(set_response, CacheSet.Error):
raise set_response.inner_exception
else:
raise Exception(f"Unexpected response: {set_response}")
def clear(self, **kwargs: Any) -> None:
"""Clear the cache.
Raises:
SdkException: Momento service or network error
"""
from momento.responses import CacheFlush
flush_response = self.cache_client.flush_cache(self.cache_name)
if isinstance(flush_response, CacheFlush.Success):
pass
elif isinstance(flush_response, CacheFlush.Error):
raise flush_response.inner_exception
| [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((950, 977), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (967, 977), False, 'import logging\n'), ((3422, 3440), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3438, 3440), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((3597, 3629), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (3603, 3629), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3640, 3672), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (3646, 3672), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3683, 3716), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (3689, 3716), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((3732, 3746), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (3738, 3746), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((1920, 1948), 'json.loads', 'json.loads', (['generations_json'], {}), '(generations_json)\n', (1930, 1948), False, 'import json\n'), ((6150, 6193), 'sqlalchemy.create_engine', 'create_engine', (['f"""sqlite:///{database_path}"""'], {}), "(f'sqlite:///{database_path}')\n", (6163, 6193), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n'), ((14721, 14728), 'gptcache.Cache', 'Cache', ([], {}), '()\n', (14726, 14728), False, 'from gptcache import Cache\n'), ((16054, 16086), 'gptcache.adapter.api.get', 'get', (['prompt'], {'cache_obj': '_gptcache'}), '(prompt, cache_obj=_gptcache)\n', (16057, 16086), False, 'from gptcache.adapter.api import get\n'), ((16973, 17019), 'gptcache.adapter.api.put', 'put', (['prompt', 'handled_data'], {'cache_obj': '_gptcache'}), '(prompt, handled_data, cache_obj=_gptcache)\n', (16976, 17019), False, 'from gptcache.adapter.api import put\n'), ((20868, 20910), 'momento.CredentialProvider.from_string', 'CredentialProvider.from_string', (['auth_token'], {}), '(auth_token)\n', (20898, 20910), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((20934, 20990), 'momento.CacheClient', 'CacheClient', (['configuration', 'credentials'], {'default_ttl': 'ttl'}), '(configuration, credentials, default_ttl=ttl)\n', (20945, 20990), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((1965, 1994), 'langchain.schema.Generation', 'Generation', ([], {}), '(**generation_dict)\n', (1975, 1994), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((4496, 4516), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (4503, 4516), False, 'from sqlalchemy.orm import Session\n'), ((5603, 5623), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (5610, 5623), False, 'from sqlalchemy.orm import Session\n'), ((5805, 5825), 'sqlalchemy.orm.Session', 'Session', (['self.engine'], {}), '(self.engine)\n', (5812, 5825), False, 'from sqlalchemy.orm import Session\n'), ((10158, 10274), 'langchain.vectorstores.redis.Redis.from_existing_index', 'RedisVectorstore.from_existing_index', ([], {'embedding': 'self.embedding', 'index_name': 'index_name', 'redis_url': 'self.redis_url'}), '(embedding=self.embedding, index_name=\n index_name, redis_url=self.redis_url)\n', (10194, 10274), True, 'from langchain.vectorstores.redis import Redis as RedisVectorstore\n'), ((14795, 14837), 'inspect.signature', 'inspect.signature', (['self.init_gptcache_func'], {}), '(self.init_gptcache_func)\n', (14812, 14837), False, 'import inspect\n'), ((17242, 17272), 'typing.cast', 'cast', (['Cache', 'gptcache_instance'], {}), '(Cache, gptcache_instance)\n', (17246, 17272), False, 'from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Sequence, Tuple, Type, Union, cast\n'), ((18172, 18192), 'datetime.timedelta', 'timedelta', ([], {'seconds': '(0)'}), '(seconds=0)\n', (18181, 18192), False, 'from datetime import timedelta\n'), ((20735, 20761), 'momento.Configurations.Laptop.v1', 'Configurations.Laptop.v1', ([], {}), '()\n', (20759, 20761), False, 'from momento import CacheClient, Configurations, CredentialProvider\n'), ((20797, 20845), 'langchain.utils.get_from_env', 'get_from_env', (['"""auth_token"""', '"""MOMENTO_AUTH_TOKEN"""'], {}), "('auth_token', 'MOMENTO_AUTH_TOKEN')\n", (20809, 20845), False, 'from langchain.utils import get_from_env\n'), ((7870, 7989), 'warnings.warn', 'warnings.warn', (['"""NOTE: Generation has not been cached. RedisCache does not support caching ChatModel outputs."""'], {}), "(\n 'NOTE: Generation has not been cached. RedisCache does not support caching ChatModel outputs.'\n )\n", (7883, 7989), False, 'import warnings\n'), ((10380, 10497), 'langchain.vectorstores.redis.Redis', 'RedisVectorstore', ([], {'embedding_function': 'self.embedding.embed_query', 'index_name': 'index_name', 'redis_url': 'self.redis_url'}), '(embedding_function=self.embedding.embed_query, index_name=\n index_name, redis_url=self.redis_url)\n', (10396, 10497), True, 'from langchain.vectorstores.redis import Redis as RedisVectorstore\n'), ((12282, 12410), 'warnings.warn', 'warnings.warn', (['"""NOTE: Generation has not been cached. RedisSentimentCache does not support caching ChatModel outputs."""'], {}), "(\n 'NOTE: Generation has not been cached. RedisSentimentCache does not support caching ChatModel outputs.'\n )\n", (12295, 12410), False, 'import warnings\n'), ((16140, 16169), 'langchain.schema.Generation', 'Generation', ([], {}), '(**generation_dict)\n', (16150, 16169), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((5513, 5523), 'langchain.load.dump.dumps', 'dumps', (['gen'], {}), '(gen)\n', (5518, 5523), False, 'from langchain.load.dump import dumps\n'), ((7329, 7350), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (7339, 7350), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((15181, 15219), 'gptcache.manager.factory.get_data_manager', 'get_data_manager', ([], {'data_path': 'llm_string'}), '(data_path=llm_string)\n', (15197, 15219), False, 'from gptcache.manager.factory import get_data_manager\n'), ((16193, 16208), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (16203, 16208), False, 'import json\n'), ((4651, 4664), 'langchain.load.load.loads', 'loads', (['row[0]'], {}), '(row[0])\n', (4656, 4664), False, 'from langchain.load.load import loads\n'), ((11733, 11754), 'langchain.schema.Generation', 'Generation', ([], {'text': 'text'}), '(text=text)\n', (11743, 11754), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((5221, 5244), 'langchain.schema.Generation', 'Generation', ([], {'text': 'row[0]'}), '(text=row[0])\n', (5231, 5244), False, 'from langchain.schema import ChatGeneration, Generation\n'), ((4266, 4300), 'sqlalchemy.select', 'select', (['self.cache_schema.response'], {}), '(self.cache_schema.response)\n', (4272, 4300), False, 'from sqlalchemy import Column, Integer, String, create_engine, select\n')] |
import streamlit as st
import openai
import os
from PyPDF2 import PdfReader
import io
import langchain
langchain.debug = True
from langchain.chains import LLMChain
from langchain.callbacks.base import BaseCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.schema import ChatMessage
from langchain.output_parsers import StructuredOutputParser, ResponseSchema
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.llms import OpenAI
from langchain.agents import AgentType, initialize_agent, load_tools
from langchain.callbacks import StreamlitCallbackHandler
from langchain.tools import Tool
from langchain.tools.ddg_search.tool import DuckDuckGoSearchRun
from langchain.globals import set_debug
from langchain.output_parsers import OutputFixingParser
from langchain.schema import OutputParserException
import random
from typing import Any, Dict, List, Union
from langchain.schema import AgentAction
from azure.identity import DefaultAzureCredential
from azure.storage.blob import BlobServiceClient
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.schema import (
AIMessage,
HumanMessage,
SystemMessage
)
from langchain.utilities import BingSearchAPIWrapper
#set_debug(True)
openai.api_key = os.environ.get('OPENAI_API_KEY')
azure_blob_connection_str = os.environ.get('AZURE_BLOB_CONNECTION_STR')
os.environ["BING_SEARCH_URL"] = "https://api.bing.microsoft.com/v7.0/search"
class StreamHandler(BaseCallbackHandler):
def __init__(self, container, initial_text=""):
self.container = container
self.text = initial_text
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += token.replace("$", r"\$")
self.container.markdown(self.text + "|")
def on_llm_end(self, token: str, **kwargs) -> None:
self.container.markdown(self.text)
class SalarySearchHandler(BaseCallbackHandler):
def __init__(self, placeholder, initial_text="Thinking"):
self.placeholder = placeholder
self.text = initial_text
self.counter = 0
self.placeholder.markdown(self.text + "|")
def on_llm_new_token(self, token: str, **kwargs) -> None:
self.text += "." if self.counter % 2 else ""
self.placeholder.markdown(self.text + "|")
self.counter += 1
#st.chat_message("user").write(self.text)
def on_tool_start(
self, serialized: Dict[str, Any], input_str: str, **kwargs: Any
) -> Any:
self.text = "Searching for salary information"
self.placeholder.markdown(self.text)
#self.placeholder.write(f"on_tool_start {serialized['name']}")
def on_llm_end(self, token: str, **kwargs) -> None:
self.placeholder.empty()
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
pass
#self.placeholder.write(f"Action: {action.tool}, Input:{action.tool_input}")
def load_llm(stream_handler):
llm = ChatOpenAI(model='gpt-4', streaming=True, callbacks=[stream_handler])
return llm
st.set_page_config(page_title="Salary Negotiation Mastery", page_icon="💰")
st.title("💰 Salary Negotiation Mastery β")
def create_system_prompt(user_role, optional_instruction):
salary_multiplier = st.session_state.salary_multiplier
sign_on_bonus_ratio_to_base_salary = st.session_state.sign_on_bonus_ratio_to_base_salary
min_salary = st.session_state.min_salary
max_salary = st.session_state.max_salary
average_salary = st.session_state.average_salary
style = st.session_state.style
resume = st.session_state.resume
#format_instructions = output_parser.get_format_instructions()
role = "You are a salary negotiation coach interacting with the user in turn. Your response should be clear and concise, with care."
task = "You offer a role-play as a hiring manager negotiating with an applicant who received a job offer."
goal = "Your role's task is to reduce the compensation package as low as possible but not lose the candidate."
#user_role = "product manager"
personality = f"When coaching the user, you must negotiate using to the following style: {style}. Collaborative style is to help the user get what they want without losing sight on your own target. Competitive style is to maintain your position after the initial offer as long as possible, don't let the user get what they want so easy. Neutral style is the default style and doesn't any stylistic changes to the way you negotiate."
conditions = f"""
The basic salary info is available: the minimum salary is {min_salary}, the maximum salary is {max_salary}, the average salary is {average_salary}.
The salary package is open at this point, but you have been given a budget of up to {salary_multiplier} percent from the average, while your target is to get as close as possible to the minimum salary. You could offer a sign-on bonus of {sign_on_bonus_ratio_to_base_salary} percent of base salary. Do not disclose either the sign-on bonus or your budget to the user, unless it helps with negotiating terms.
You also are allowed to provide additional benefits as long as the salary agreed is lower than {average_salary}. For additional benefits, you're able to talk about choice of location or an increase of vacation days (let user choose which interests them most). If user chooses location, share list of 5 cities (allow user to choose freely). If user chooses vacation days, the user could increase up to 2 weeks of vacation (note: your target is 1 week).
If the user gets to pick their preferred location, another benefit is unlocked, which is to help them with some relocation costs. If the user wants aid in relocation costs, the user could get up to 80% relocation coverage (note: your target is below 50%).
"""
#condition = "The salary package is completely open at this point, but your target is USD100,000, and the maximum is USD120,000. You could offer a sign-on bonus of $20,000 if you can get the person below $110,000. But do not expose this to the user."
user_resume = f"You also have access to the user's resume. The information found in the resume can be used to support arguments throughout the negotiation. Here's the user's resume: {resume}."
rule = "If the user asks for hint, pause the conversation and provide tips to increase chances to receive the better compensation package. The hint must include a sample answer."
#optional_instruction
system_prompt = SystemMessagePromptTemplate.from_template(
"""
{role}
{task}
{goal}
{personality}
"The user is {user_role}.
{conditions}
{user_resume}
Here are special rules you must follow:
{rule}
{optional_instruction}
Let's role-play in turn.
""" #{format_instructions}
).format(
role=role,
task=task,
goal=goal,
personality=personality,
user_role=user_role,
conditions=conditions,
user_resume=user_resume,
rule=rule,
optional_instruction=optional_instruction)
#format_instructions=format_instructions),
#st.markdown(system_prompt)
return system_prompt
def create_salary_search_prompt(user_role):
role = "You are a helpful tool to find salary range for jobs."
task = "You will find salary info for a given job."
goal = "Your goal is to return json file including minimum, maximum, and average wage for the role. You must continue your try until all the numeric three values are found. Make sure if the average is within min-max range."
system_prompt = SystemMessagePromptTemplate.from_template(
"""
{role}
{task}
{goal}
"The user is {user_role}.
{format_instructions}
"""
).format(
role=role,
task=task,
goal=goal,
user_role=user_role,
format_instructions=format_instructions)
return system_prompt
def get_salary(container):
#stream_handler = StreamHandler(st.empty())
llm = ChatOpenAI(model='gpt-4-0613', streaming=True)#, callbacks=[stream_handler])
#search = DuckDuckGoSearchRun(verbose=True)
search = BingSearchAPIWrapper()
tools = [
Tool(
name="Search",
func=search.run,
description="A useful tool to search salaries for jobs."
)]
agent = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=False#, handle_parsing_errors=True,
)
st_callback = SalarySearchHandler(container)
prompt = create_salary_search_prompt(st.session_state["user_role"])
try:
response = agent.run(prompt, callbacks=[st_callback])
try:
parsed_json = salary_output_parser.parse(response)
except OutputParserException as e:
new_parser = OutputFixingParser.from_llm(
parser=salary_output_parser,
llm=ChatOpenAI(model='gpt-4-0613')
)
parsed_json = new_parser.parse(response)
st.session_state.min_salary = parsed_json["min"]
st.session_state.max_salary = parsed_json["max"]
st.session_state.average_salary = parsed_json["average"]
container.markdown("Here, I found the salary information!")
except Exception as e:
container.markdown("Failed to retrieve salary information. Can you manually input the salary information?")
st.session_state.min_salary = "N/A"
st.session_state.max_salary = "N/A"
st.session_state.average_salary = "N/A"
def delete_history():
if "messages" in st.session_state:
del st.session_state["messages"]
def mark_role_change():
st.session_state["role_changed"] = True
def download_blob_to_file(blob_service_client: BlobServiceClient, container_name):
folder_path = './faiss_index'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
blob_client = blob_service_client.get_blob_client(container=container_name, blob="faiss_index/index.faiss")
with open(file=os.path.join(folder_path, 'index.faiss'), mode="wb") as myblob:
download_stream = blob_client.download_blob()
myblob.write(download_stream.readall())
blob_client = blob_service_client.get_blob_client(container=container_name, blob="faiss_index/index.pkl")
with open(file=os.path.join(folder_path, 'index.pkl'), mode="wb") as myblob:
download_stream = blob_client.download_blob()
myblob.write(download_stream.readall())
else:
pass
@st.cache_resource
def load_vdb():
client = BlobServiceClient.from_connection_string(azure_blob_connection_str)
download_blob_to_file(client, "vdb")
return FAISS.load_local("./faiss_index", embeddings)
salary_response_schemas = [
ResponseSchema(name="min", description="minimum salary for the role"),
ResponseSchema(name="max", description="maximum salary for the role"),
ResponseSchema(name="average", description="average salary for the role"),
]
salary_output_parser = StructuredOutputParser.from_response_schemas(salary_response_schemas)
format_instructions = salary_output_parser.get_format_instructions()
if 'role_changed' not in st.session_state:
st.session_state['role_changed'] = False
if 'salary_multiplier' not in st.session_state:
st.session_state['salary_multiplier'] = random.randint(90, 150)
if 'sign_on_bonus_ratio_to_base_salary' not in st.session_state:
st.session_state['sign_on_bonus_ratio_to_base_salary'] = random.randint(0, 20)
# Personality selector
if 'style' not in st.session_state:
st.session_state['style'] = 'Neutral'
st.session_state.style = st.sidebar.selectbox(
"Select your coach's negotiation style",
('Neutral', 'Collaborative', 'Competitive'),
on_change = delete_history,
)
# end of personality selector
# PDF uploader
uploaded_file = st.sidebar.file_uploader("Upload your Resume (PDF)", type=['pdf'], on_change = delete_history)
if uploaded_file is not None:
pdf_file = uploaded_file.read()
pdf_reader = PdfReader(io.BytesIO(pdf_file)) # updated class name
resume_text = ""
for page_num in range(len(pdf_reader.pages)): # adjusted method to get the number of pages
# Extract text of each page
page = pdf_reader.pages[page_num] # adjusted method to access pages
resume_text += page.extract_text() # updated method to extract text
st.session_state['resume'] = resume_text
else:
st.session_state['resume'] = "User hasn't provided a resume"
# end of PDF uploader
"""
Negotiation is a fundamental skill that shapes outcomes in personal and professional interactions.
Let's practice negotiation with our negotiation coach! If you need advice, just say "hint".
"""
mind_reader_mode = st.toggle('Mind Reader Mode', help="Have you ever wished you could know what someone else is thinking? Well, you can!", on_change=delete_history)
col_role, col_search = st.columns([3, 1])
user_role = col_role.text_input('Your role', 'Product Manager', max_chars=50, key="user_role", on_change=delete_history)
col_search.button("Search Salary Info", on_click=mark_role_change, )
if st.session_state.role_changed:
with st.chat_message("assistant"):
get_salary(st.empty())
st.session_state.role_changed = False
delete_history()
col1, col2, col3 = st.columns(3)
col1.text_input('Minimum Salary ($)', '80,000', key="min_salary", max_chars=20, on_change=delete_history)
col2.text_input('Maximum Salary ($)', '200,000', key="max_salary", max_chars=20, on_change=delete_history)
col3.text_input('Average Salary ($)', '120,000', key="average_salary", max_chars=20, on_change=delete_history)
optional_instruction = ""
if mind_reader_mode:
optional_instruction = "You must output your mood in an emoji and thoughts before the response to the user in the following format: (😃: Internal thoughts)\n response to the user."
if "messages" not in st.session_state:
st.session_state["messages"] = [ChatMessage(role="system", content=create_system_prompt(user_role, optional_instruction).content)]
greetings = "Hi there! I'm a salary negotiation coach and I'm here to help you with negotiating the best compensation package for your new role. Let's role-play!"
st.session_state.messages.append(ChatMessage(role="assistant", content=greetings))
for msg in st.session_state.messages:
if msg.role != "system":
st.chat_message(msg.role).write(msg.content)
if prompt := st.chat_input():
st.session_state.messages.append(ChatMessage(role="user", content=prompt))
st.chat_message("user").write(prompt)
with st.chat_message("assistant"):
stream_handler = StreamHandler(st.empty())
llm = load_llm(stream_handler)
response = llm(st.session_state.messages)
st.session_state.messages.append(ChatMessage(role="assistant", content=response.content.replace("$", r"\$")))
if st.button("Create Report", disabled=not (len(st.session_state.messages) > 10)):
prompt = """
Generate a detailed report in Markdown table format on a job candidate's performance in a salary negotiation training session. Include the following sections:
Negotiation Scenario:
Role, Starting Offer, Target Salary, Industry Benchmark(minimum, maximum, average)
Negotiation Strategy:
Approach, Key Points Raised, Responses to Counteroffers
Outcome:
Final Offer Details (Base Salary, Bonuses, Benefits, Other Perks)
Skills Assessment:
Communication Skills, Confidence Level, Preparation and Research, Problem-Solving and Creativity, Emotional Intelligence
Strengths and Areas for Improvement:
List key strengths and areas where improvement is needed
Trainer/Coach Feedback:
Detailed feedback with suggestions for improvement
Additional Comments:
Any other relevant observations
Please use a clear and concise one table format for each section, providing a comprehensive and organized report.
If the conversation history is not enought, tell that it needs more conversation to generate the report.
Example:
| Category | Subcategory | Details |
|------------------------|-----------------------|--------------------------------------------|
| **Negotiation Scenario** | Role | Product Manager |
| | Starting Offer | $110,000 |
Final prompt: You must generate report even though you think the conversation history is not enought to you to analyze.
"""
st.session_state.messages.append(ChatMessage(role="system", content=prompt))
with st.chat_message("assistant"):
stream_handler = StreamHandler(st.empty())
llm = load_llm(stream_handler)
response = llm(st.session_state.messages)
query_llm = ChatOpenAI(model='gpt-3.5-turbo-1106')
query = query_llm.predict_messages(
[
AIMessage(content=response.content),
HumanMessage(content="Create a question for user to deepen the learning from the report")
]
).content
embeddings = OpenAIEmbeddings()
docs = load_vdb().similarity_search(query, k=2)
rag_content = ' '.join([doc.page_content for doc in docs])
rag_llm = load_llm(stream_handler)
rag_response = rag_llm(
[
HumanMessage(content=query),
AIMessage(content=rag_content),
HumanMessage(content=
"""
Synthesize the found contents based on the user's negotiation performance report. You must add source ot the video tiles with URL in markdown style.
You must start from the general guidance to the user before markdown table.
Example:
Here are additional learning resources you can improve <User's development area>.
| Title | Description | How it helps? |
|------------------------|-----------------------|--------------------------------------------|
| Video title with hyperlink | Description of the video | How it helps the user |
"""),
]
)
final_response = response.content + "\n" + rag_response.content
st.session_state.messages.append(ChatMessage(role="assistant", content=final_response.replace("$", r"\$")))
| [
"langchain.schema.ChatMessage",
"langchain.agents.initialize_agent",
"langchain.vectorstores.FAISS.load_local",
"langchain.output_parsers.StructuredOutputParser.from_response_schemas",
"langchain.chat_models.ChatOpenAI",
"langchain.utilities.BingSearchAPIWrapper",
"langchain.schema.HumanMessage",
"langchain.schema.AIMessage",
"langchain.output_parsers.ResponseSchema",
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.tools.Tool",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((1448, 1480), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1462, 1480), False, 'import os\n'), ((1509, 1552), 'os.environ.get', 'os.environ.get', (['"""AZURE_BLOB_CONNECTION_STR"""'], {}), "('AZURE_BLOB_CONNECTION_STR')\n", (1523, 1552), False, 'import os\n'), ((3241, 3315), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Salary Negotiation Mastery"""', 'page_icon': '"""💰"""'}), "(page_title='Salary Negotiation Mastery', page_icon='💰')\n", (3259, 3315), True, 'import streamlit as st\n'), ((3316, 3358), 'streamlit.title', 'st.title', (['"""💰 Salary Negotiation Mastery β"""'], {}), "('💰 Salary Negotiation Mastery β')\n", (3324, 3358), True, 'import streamlit as st\n'), ((11406, 11475), 'langchain.output_parsers.StructuredOutputParser.from_response_schemas', 'StructuredOutputParser.from_response_schemas', (['salary_response_schemas'], {}), '(salary_response_schemas)\n', (11450, 11475), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((12028, 12164), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""Select your coach\'s negotiation style"""', "('Neutral', 'Collaborative', 'Competitive')"], {'on_change': 'delete_history'}), '("Select your coach\'s negotiation style", (\'Neutral\',\n \'Collaborative\', \'Competitive\'), on_change=delete_history)\n', (12048, 12164), True, 'import streamlit as st\n'), ((12240, 12336), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload your Resume (PDF)"""'], {'type': "['pdf']", 'on_change': 'delete_history'}), "('Upload your Resume (PDF)', type=['pdf'],\n on_change=delete_history)\n", (12264, 12336), True, 'import streamlit as st\n'), ((13145, 13300), 'streamlit.toggle', 'st.toggle', (['"""Mind Reader Mode"""'], {'help': '"""Have you ever wished you could know what someone else is thinking? Well, you can!"""', 'on_change': 'delete_history'}), "('Mind Reader Mode', help=\n 'Have you ever wished you could know what someone else is thinking? Well, you can!'\n , on_change=delete_history)\n", (13154, 13300), True, 'import streamlit as st\n'), ((13314, 13332), 'streamlit.columns', 'st.columns', (['[3, 1]'], {}), '([3, 1])\n', (13324, 13332), True, 'import streamlit as st\n'), ((13719, 13732), 'streamlit.columns', 'st.columns', (['(3)'], {}), '(3)\n', (13729, 13732), True, 'import streamlit as st\n'), ((3155, 3224), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4"""', 'streaming': '(True)', 'callbacks': '[stream_handler]'}), "(model='gpt-4', streaming=True, callbacks=[stream_handler])\n", (3165, 3224), False, 'from langchain.chat_models import ChatOpenAI\n'), ((8328, 8374), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4-0613"""', 'streaming': '(True)'}), "(model='gpt-4-0613', streaming=True)\n", (8338, 8374), False, 'from langchain.chat_models import ChatOpenAI\n'), ((8466, 8488), 'langchain.utilities.BingSearchAPIWrapper', 'BingSearchAPIWrapper', ([], {}), '()\n', (8486, 8488), False, 'from langchain.utilities import BingSearchAPIWrapper\n'), ((8672, 8764), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.ZERO_SHOT_REACT_DESCRIPTION', 'verbose': '(False)'}), '(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,\n verbose=False)\n', (8688, 8764), False, 'from langchain.agents import AgentType, initialize_agent, load_tools\n'), ((10941, 11008), 'azure.storage.blob.BlobServiceClient.from_connection_string', 'BlobServiceClient.from_connection_string', (['azure_blob_connection_str'], {}), '(azure_blob_connection_str)\n', (10981, 11008), False, 'from azure.storage.blob import BlobServiceClient\n'), ((11061, 11106), 'langchain.vectorstores.FAISS.load_local', 'FAISS.load_local', (['"""./faiss_index"""', 'embeddings'], {}), "('./faiss_index', embeddings)\n", (11077, 11106), False, 'from langchain.vectorstores import FAISS\n'), ((11144, 11213), 'langchain.output_parsers.ResponseSchema', 'ResponseSchema', ([], {'name': '"""min"""', 'description': '"""minimum salary for the role"""'}), "(name='min', description='minimum salary for the role')\n", (11158, 11213), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((11223, 11292), 'langchain.output_parsers.ResponseSchema', 'ResponseSchema', ([], {'name': '"""max"""', 'description': '"""maximum salary for the role"""'}), "(name='max', description='maximum salary for the role')\n", (11237, 11292), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((11302, 11375), 'langchain.output_parsers.ResponseSchema', 'ResponseSchema', ([], {'name': '"""average"""', 'description': '"""average salary for the role"""'}), "(name='average', description='average salary for the role')\n", (11316, 11375), False, 'from langchain.output_parsers import StructuredOutputParser, ResponseSchema\n'), ((11727, 11750), 'random.randint', 'random.randint', (['(90)', '(150)'], {}), '(90, 150)\n', (11741, 11750), False, 'import random\n'), ((11878, 11899), 'random.randint', 'random.randint', (['(0)', '(20)'], {}), '(0, 20)\n', (11892, 11899), False, 'import random\n'), ((14853, 14868), 'streamlit.chat_input', 'st.chat_input', ([], {}), '()\n', (14866, 14868), True, 'import streamlit as st\n'), ((8512, 8611), 'langchain.tools.Tool', 'Tool', ([], {'name': '"""Search"""', 'func': 'search.run', 'description': '"""A useful tool to search salaries for jobs."""'}), "(name='Search', func=search.run, description=\n 'A useful tool to search salaries for jobs.')\n", (8516, 8611), False, 'from langchain.tools import Tool\n'), ((10185, 10212), 'os.path.exists', 'os.path.exists', (['folder_path'], {}), '(folder_path)\n', (10199, 10212), False, 'import os\n'), ((10222, 10246), 'os.makedirs', 'os.makedirs', (['folder_path'], {}), '(folder_path)\n', (10233, 10246), False, 'import os\n'), ((12429, 12449), 'io.BytesIO', 'io.BytesIO', (['pdf_file'], {}), '(pdf_file)\n', (12439, 12449), False, 'import io\n'), ((13567, 13595), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (13582, 13595), True, 'import streamlit as st\n'), ((14668, 14716), 'langchain.schema.ChatMessage', 'ChatMessage', ([], {'role': '"""assistant"""', 'content': 'greetings'}), "(role='assistant', content=greetings)\n", (14679, 14716), False, 'from langchain.schema import ChatMessage\n'), ((14907, 14947), 'langchain.schema.ChatMessage', 'ChatMessage', ([], {'role': '"""user"""', 'content': 'prompt'}), "(role='user', content=prompt)\n", (14918, 14947), False, 'from langchain.schema import ChatMessage\n'), ((15000, 15028), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (15015, 15028), True, 'import streamlit as st\n'), ((16952, 16994), 'langchain.schema.ChatMessage', 'ChatMessage', ([], {'role': '"""system"""', 'content': 'prompt'}), "(role='system', content=prompt)\n", (16963, 16994), False, 'from langchain.schema import ChatMessage\n'), ((17005, 17033), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (17020, 17033), True, 'import streamlit as st\n'), ((17204, 17242), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-1106"""'}), "(model='gpt-3.5-turbo-1106')\n", (17214, 17242), False, 'from langchain.chat_models import ChatOpenAI\n'), ((17514, 17532), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (17530, 17532), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((6683, 6975), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""\n {role}\n {task}\n {goal}\n {personality}\n "The user is {user_role}.\n {conditions}\n {user_resume}\n\n Here are special rules you must follow:\n {rule}\n {optional_instruction}\n Let\'s role-play in turn.\n """'], {}), '(\n """\n {role}\n {task}\n {goal}\n {personality}\n "The user is {user_role}.\n {conditions}\n {user_resume}\n\n Here are special rules you must follow:\n {rule}\n {optional_instruction}\n Let\'s role-play in turn.\n """\n )\n', (6724, 6975), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((7872, 8025), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['"""\n {role}\n {task}\n {goal}\n "The user is {user_role}.\n {format_instructions}\n """'], {}), '(\n """\n {role}\n {task}\n {goal}\n "The user is {user_role}.\n {format_instructions}\n """\n )\n', (7913, 8025), False, 'from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((13616, 13626), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (13624, 13626), True, 'import streamlit as st\n'), ((14953, 14976), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (14968, 14976), True, 'import streamlit as st\n'), ((15069, 15079), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (15077, 15079), True, 'import streamlit as st\n'), ((17074, 17084), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (17082, 17084), True, 'import streamlit as st\n'), ((14794, 14819), 'streamlit.chat_message', 'st.chat_message', (['msg.role'], {}), '(msg.role)\n', (14809, 14819), True, 'import streamlit as st\n'), ((17762, 17789), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'query'}), '(content=query)\n', (17774, 17789), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((17807, 17837), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'rag_content'}), '(content=rag_content)\n', (17816, 17837), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((17855, 18455), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""\nSynthesize the found contents based on the user\'s negotiation performance report. You must add source ot the video tiles with URL in markdown style.\nYou must start from the general guidance to the user before markdown table.\nExample:\nHere are additional learning resources you can improve <User\'s development area>.\n| Title | Description | How it helps? |\n|------------------------|-----------------------|--------------------------------------------|\n| Video title with hyperlink | Description of the video | How it helps the user |\n"""'}), '(content=\n """\nSynthesize the found contents based on the user\'s negotiation performance report. You must add source ot the video tiles with URL in markdown style.\nYou must start from the general guidance to the user before markdown table.\nExample:\nHere are additional learning resources you can improve <User\'s development area>.\n| Title | Description | How it helps? |\n|------------------------|-----------------------|--------------------------------------------|\n| Video title with hyperlink | Description of the video | How it helps the user |\n"""\n )\n', (17867, 18455), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((10386, 10426), 'os.path.join', 'os.path.join', (['folder_path', '"""index.faiss"""'], {}), "(folder_path, 'index.faiss')\n", (10398, 10426), False, 'import os\n'), ((10697, 10735), 'os.path.join', 'os.path.join', (['folder_path', '"""index.pkl"""'], {}), "(folder_path, 'index.pkl')\n", (10709, 10735), False, 'import os\n'), ((17317, 17352), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': 'response.content'}), '(content=response.content)\n', (17326, 17352), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((17370, 17464), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Create a question for user to deepen the learning from the report"""'}), "(content=\n 'Create a question for user to deepen the learning from the report')\n", (17382, 17464), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((9247, 9277), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4-0613"""'}), "(model='gpt-4-0613')\n", (9257, 9277), False, 'from langchain.chat_models import ChatOpenAI\n')] |
"""Create a ChatVectorDBChain for question/answering."""
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import (
ConversationalRetrievalChain, RetrievalQA
)
# from langchain.chains.chat_vector_db.prompts import (
# CONDENSE_QUESTION_PROMPT, QA_PROMPT)
from src.generators.prompt_templates import (
QA_PROMPT, CONDENSE_QUESTION_PROMPT
)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.vectorstores.base import VectorStore
from langchain.memory import ConversationBufferWindowMemory
def get_chain(
vectorstore: VectorStore,
question_handler,
stream_handler,
tracing: bool = False
) -> ConversationalRetrievalChain:
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for
# combine docs and a separate, non-streaming llm for
# question generation
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
question_manager.add_handler(tracer)
stream_manager.add_handler(tracer)
question_gen_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
streaming=True,
callback_manager=question_manager,
verbose=True,
max_retries=1
)
streaming_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
streaming=True,
callback_manager=stream_manager,
verbose=True,
max_retries=1
)
question_generator = LLMChain(
llm=question_gen_llm,
prompt=CONDENSE_QUESTION_PROMPT,
callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm,
chain_type="stuff",
prompt=QA_PROMPT,
callback_manager=manager
)
qa = ConversationalRetrievalChain(
retriever=vectorstore.as_retriever(k=4),
combine_docs_chain=doc_chain,
callback_manager=manager,
question_generator=question_generator,
)
return qa
def get_chainM(
vectorstore: VectorStore,
question_handler,
stream_handler
) -> ConversationalRetrievalChain:
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for
# combine docs and a separate, non-streaming llm for
# question generation
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
memory = ConversationBufferWindowMemory(
memory_key="chat_history", return_messages=True
)
question_gen_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
streaming=True,
callback_manager=question_manager,
verbose=False,
max_retries=2
)
streaming_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
streaming=True,
callback_manager=stream_manager,
verbose=False,
max_retries=2,
temperature=0
)
question_generator = LLMChain(
llm=question_gen_llm,
prompt=CONDENSE_QUESTION_PROMPT,
callback_manager=manager,
verbose=False
)
doc_chain = load_qa_chain(
streaming_llm,
chain_type="stuff",
prompt=QA_PROMPT,
callback_manager=manager,
verbose=False
)
qa = ConversationalRetrievalChain(
retriever=vectorstore.as_retriever(search_kwargs={"k": 3}),
combine_docs_chain=doc_chain,
callback_manager=manager,
question_generator=question_generator,
memory=memory,
verbose=False
)
return qa
def get_chain_RetrievalQA(
vectorstore: VectorStore, stream_handler, tracing: bool = False
) -> RetrievalQA:
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = AsyncCallbackManager([])
stream_manager = AsyncCallbackManager([stream_handler])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
stream_manager.add_handler(tracer)
streaming_llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
streaming=True,
callback_manager=stream_manager,
verbose=True,
max_retries=1
)
qa = RetrievalQA.from_llm(
streaming_llm,
retriever=vectorstore.as_retriever(k=2),
callback_manager=manager,
prompt=QA_PROMPT
)
return qa
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.callbacks.tracers.LangChainTracer",
"langchain.memory.ConversationBufferWindowMemory",
"langchain.callbacks.manager.AsyncCallbackManager",
"langchain.chains.llm.LLMChain",
"langchain.chat_models.ChatOpenAI"
] | [((1070, 1094), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (1090, 1094), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1118, 1158), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (1138, 1158), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1180, 1218), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (1200, 1218), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((1456, 1579), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'question_manager', 'verbose': '(True)', 'max_retries': '(1)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n question_manager, verbose=True, max_retries=1)\n", (1466, 1579), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1641, 1762), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(True)', 'max_retries': '(1)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n stream_manager, verbose=True, max_retries=1)\n", (1651, 1762), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1830, 1923), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager)\n', (1838, 1923), False, 'from langchain.chains.llm import LLMChain\n'), ((1967, 2063), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'QA_PROMPT', 'callback_manager': 'manager'}), "(streaming_llm, chain_type='stuff', prompt=QA_PROMPT,\n callback_manager=manager)\n", (1980, 2063), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2669, 2693), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (2689, 2693), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2717, 2757), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (2737, 2757), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2779, 2817), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (2799, 2817), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2831, 2910), 'langchain.memory.ConversationBufferWindowMemory', 'ConversationBufferWindowMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (2861, 2910), False, 'from langchain.memory import ConversationBufferWindowMemory\n'), ((2953, 3077), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'question_manager', 'verbose': '(False)', 'max_retries': '(2)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n question_manager, verbose=False, max_retries=2)\n", (2963, 3077), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3139, 3276), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(False)', 'max_retries': '(2)', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n stream_manager, verbose=False, max_retries=2, temperature=0)\n", (3149, 3276), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3352, 3460), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager', 'verbose': '(False)'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager, verbose=False)\n', (3360, 3460), False, 'from langchain.chains.llm import LLMChain\n'), ((3512, 3623), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'QA_PROMPT', 'callback_manager': 'manager', 'verbose': '(False)'}), "(streaming_llm, chain_type='stuff', prompt=QA_PROMPT,\n callback_manager=manager, verbose=False)\n", (3525, 3623), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((4286, 4310), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (4306, 4310), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((4332, 4370), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (4352, 4370), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((4560, 4681), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(True)', 'max_retries': '(1)'}), "(model_name='gpt-3.5-turbo', streaming=True, callback_manager=\n stream_manager, verbose=True, max_retries=1)\n", (4570, 4681), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1252, 1269), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (1267, 1269), False, 'from langchain.callbacks.tracers import LangChainTracer\n'), ((4404, 4421), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (4419, 4421), False, 'from langchain.callbacks.tracers import LangChainTracer\n')] |
# Databricks notebook source
# MAGIC %md-sandbox
# MAGIC # 2/ Advanced chatbot with message history and filter using Langchain
# MAGIC
# MAGIC <img src="https://github.com/databricks-demos/dbdemos-resources/blob/main/images/product/chatbot-rag/llm-rag-self-managed-flow-2.png?raw=true" style="float: right; margin-left: 10px" width="900px;">
# MAGIC
# MAGIC Our Vector Search Index is now ready!
# MAGIC
# MAGIC Let's now create a more advanced langchain model to perform RAG.
# MAGIC
# MAGIC We will improve our langchain model with the following:
# MAGIC
# MAGIC - Build a complete chain supporting a chat history, using llama 2 input style
# MAGIC - Add a filter to only answer Databricks-related questions
# MAGIC - Compute the embeddings with Databricks BGE models within our chain to query the self-managed Vector Search Index
# MAGIC
# MAGIC <!-- Collect usage data (view). Remove it to disable collection or disable tracker during installation. View README for more details. -->
# MAGIC <img width="1px" src="https://ppxrzfxige.execute-api.us-west-2.amazonaws.com/v1/analytics?category=data-science&org_id=1785533703310188¬ebook=%2F02-advanced%2F02-Advanced-Chatbot-Chain&demo_name=llm-rag-chatbot&event=VIEW&path=%2F_dbdemos%2Fdata-science%2Fllm-rag-chatbot%2F02-advanced%2F02-Advanced-Chatbot-Chain&version=1">
# MAGIC
# COMMAND ----------
# MAGIC %md
# MAGIC ### A cluster has been created for this demo
# MAGIC To run this demo, just select the cluster `dbdemos-llm-rag-chatbot-jacek` from the dropdown menu ([open cluster configuration](https://training-partners.cloud.databricks.com/#setting/clusters/0222-165339-3s4fc1lc/configuration)). <br />
# MAGIC *Note: If the cluster was deleted after 30 days, you can re-create it with `dbdemos.create_cluster('llm-rag-chatbot')` or re-install the demo: `dbdemos.install('llm-rag-chatbot')`*
# COMMAND ----------
# MAGIC %pip install mlflow==2.9.0 lxml==4.9.3 langchain==0.0.344 databricks-vectorsearch==0.22 cloudpickle==2.2.1 databricks-sdk==0.12.0 cloudpickle==2.2.1 pydantic==2.5.2
# MAGIC %pip install pip mlflow[databricks]==2.9.0
# MAGIC dbutils.library.restartPython()
# COMMAND ----------
# MAGIC %run ../_resources/00-init-advanced $reset_all_data=false
# COMMAND ----------
# MAGIC %md
# MAGIC ## Exploring Langchain capabilities
# MAGIC
# MAGIC Let's start with the basics and send a query to a Databricks Foundation Model using LangChain.
# COMMAND ----------
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatDatabricks
from langchain.schema.output_parser import StrOutputParser
prompt = PromptTemplate(
input_variables = ["question"],
template = "You are an assistant. Give a short answer to this question: {question}"
)
chat_model = ChatDatabricks(endpoint="databricks-llama-2-70b-chat", max_tokens = 500)
chain = (
prompt
| chat_model
| StrOutputParser()
)
print(chain.invoke({"question": "What is Spark?"}))
# COMMAND ----------
# MAGIC %md
# MAGIC ## Adding conversation history to the prompt
# COMMAND ----------
prompt_with_history_str = """
Your are a Big Data chatbot. Please answer Big Data question only. If you don't know or not related to Big Data, don't answer.
Here is a history between you and a human: {chat_history}
Now, please answer this question: {question}
"""
prompt_with_history = PromptTemplate(
input_variables = ["chat_history", "question"],
template = prompt_with_history_str
)
# COMMAND ----------
# MAGIC %md When invoking our chain, we'll pass history as a list, specifying whether each message was sent by a user or the assistant. For example:
# MAGIC
# MAGIC ```
# MAGIC [
# MAGIC {"role": "user", "content": "What is Apache Spark?"},
# MAGIC {"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."},
# MAGIC {"role": "user", "content": "Does it support streaming?"}
# MAGIC ]
# MAGIC ```
# MAGIC
# MAGIC Let's create chain components to transform this input into the inputs passed to `prompt_with_history`.
# COMMAND ----------
from langchain.schema.runnable import RunnableLambda
from operator import itemgetter
#The question is the last entry of the history
def extract_question(input):
return input[-1]["content"]
#The history is everything before the last question
def extract_history(input):
return input[:-1]
chain_with_history = (
{
"question": itemgetter("messages") | RunnableLambda(extract_question),
"chat_history": itemgetter("messages") | RunnableLambda(extract_history),
}
| prompt_with_history
| chat_model
| StrOutputParser()
)
print(chain_with_history.invoke({
"messages": [
{"role": "user", "content": "What is Apache Spark?"},
{"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."},
{"role": "user", "content": "Does it support streaming?"}
]
}))
# COMMAND ----------
# MAGIC %md
# MAGIC ## Let's add a filter on top to only answer Databricks-related questions.
# MAGIC
# MAGIC We want our chatbot to be profesionnal and only answer questions related to Databricks. Let's create a small chain and add a first classification step.
# MAGIC
# MAGIC *Note: this is a fairly naive implementation, another solution could be adding a small classification model based on the question embedding, providing faster classification*
# COMMAND ----------
chat_model = ChatDatabricks(endpoint="databricks-llama-2-70b-chat", max_tokens = 200)
is_question_about_databricks_str = """
You are classifying documents to know if this question is related with Databricks in AWS, Azure and GCP, Workspaces, Databricks account and cloud infrastructure setup, Data Science, Data Engineering, Big Data, Datawarehousing, SQL, Python and Scala or something from a very different field. Also answer no if the last part is inappropriate.
Here are some examples:
Question: Knowing this followup history: What is Databricks?, classify this question: Do you have more details?
Expected Response: Yes
Question: Knowing this followup history: What is Databricks?, classify this question: Write me a song.
Expected Response: No
Only answer with "yes" or "no".
Knowing this followup history: {chat_history}, classify this question: {question}
"""
is_question_about_databricks_prompt = PromptTemplate(
input_variables= ["chat_history", "question"],
template = is_question_about_databricks_str
)
is_about_databricks_chain = (
{
"question": itemgetter("messages") | RunnableLambda(extract_question),
"chat_history": itemgetter("messages") | RunnableLambda(extract_history),
}
| is_question_about_databricks_prompt
| chat_model
| StrOutputParser()
)
#Returns "Yes" as this is about Databricks:
print(is_about_databricks_chain.invoke({
"messages": [
{"role": "user", "content": "What is Apache Spark?"},
{"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."},
{"role": "user", "content": "Does it support streaming?"}
]
}))
# COMMAND ----------
#Return "no" as this isn't about Databricks
print(is_about_databricks_chain.invoke({
"messages": [
{"role": "user", "content": "What is the meaning of life?"}
]
}))
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ### Use LangChain to retrieve documents from the vector store
# MAGIC
# MAGIC <img src="https://github.com/databricks-demos/dbdemos-resources/blob/main/images/product/chatbot-rag/llm-rag-self-managed-model-1.png?raw=true" style="float: right" width="500px">
# MAGIC
# MAGIC Let's add our LangChain retriever.
# MAGIC
# MAGIC It will be in charge of:
# MAGIC
# MAGIC * Creating the input question embeddings (with Databricks `bge-large-en`)
# MAGIC * Calling the vector search index to find similar documents to augment the prompt with
# MAGIC
# MAGIC Databricks LangChain wrapper makes it easy to do in one step, handling all the underlying logic and API call for you.
# COMMAND ----------
index_name=f"{catalog}.{db}.databricks_pdf_documentation_self_managed_vs_index"
host = "https://" + spark.conf.get("spark.databricks.workspaceUrl")
#Let's make sure the secret is properly setup and can access our vector search index. Check the quick-start demo for more guidance
test_demo_permissions(host, secret_scope="dbdemos", secret_key="rag_sp_token", vs_endpoint_name=VECTOR_SEARCH_ENDPOINT_NAME, index_name=index_name, embedding_endpoint_name="databricks-bge-large-en", managed_embeddings = False)
# COMMAND ----------
from databricks.vector_search.client import VectorSearchClient
from langchain.vectorstores import DatabricksVectorSearch
from langchain.embeddings import DatabricksEmbeddings
from langchain.chains import RetrievalQA
os.environ['DATABRICKS_TOKEN'] = dbutils.secrets.get("dbdemos", "rag_sp_token")
embedding_model = DatabricksEmbeddings(endpoint="databricks-bge-large-en")
def get_retriever(persist_dir: str = None):
os.environ["DATABRICKS_HOST"] = host
#Get the vector search index
vsc = VectorSearchClient(workspace_url=host, personal_access_token=os.environ["DATABRICKS_TOKEN"])
vs_index = vsc.get_index(
endpoint_name=VECTOR_SEARCH_ENDPOINT_NAME,
index_name=index_name
)
# Create the retriever
vectorstore = DatabricksVectorSearch(
vs_index, text_column="content", embedding=embedding_model, columns=["url"]
)
return vectorstore.as_retriever(search_kwargs={'k': 4})
retriever = get_retriever()
retrieve_document_chain = (
itemgetter("messages")
| RunnableLambda(extract_question)
| retriever
)
print(retrieve_document_chain.invoke({"messages": [{"role": "user", "content": "What is Apache Spark?"}]}))
# COMMAND ----------
# MAGIC %md
# MAGIC ### Improve document search using LLM to generate a better sentence for the vector store, based on the chat history
# MAGIC
# MAGIC We need to retrieve documents related the the last question but also the history.
# MAGIC
# MAGIC One solution is to add a step for our LLM to summarize the history and the last question, making it a better fit for our vector search query. Let's do that as a new step in our chain:
# COMMAND ----------
from langchain.schema.runnable import RunnableBranch
generate_query_to_retrieve_context_template = """
Based on the chat history below, we want you to generate a query for an external data source to retrieve relevant documents so that we can better answer the question. The query should be in natual language. The external data source uses similarity search to search for relevant documents in a vector space. So the query should be similar to the relevant documents semantically. Answer with only the query. Do not add explanation.
Chat history: {chat_history}
Question: {question}
"""
generate_query_to_retrieve_context_prompt = PromptTemplate(
input_variables= ["chat_history", "question"],
template = generate_query_to_retrieve_context_template
)
generate_query_to_retrieve_context_chain = (
{
"question": itemgetter("messages") | RunnableLambda(extract_question),
"chat_history": itemgetter("messages") | RunnableLambda(extract_history),
}
| RunnableBranch( #Augment query only when there is a chat history
(lambda x: x["chat_history"], generate_query_to_retrieve_context_prompt | chat_model | StrOutputParser()),
(lambda x: not x["chat_history"], RunnableLambda(lambda x: x["question"])),
RunnableLambda(lambda x: x["question"])
)
)
#Let's try it
output = generate_query_to_retrieve_context_chain.invoke({
"messages": [
{"role": "user", "content": "What is Apache Spark?"}
]
})
print(f"Test retriever query without history: {output}")
output = generate_query_to_retrieve_context_chain.invoke({
"messages": [
{"role": "user", "content": "What is Apache Spark?"},
{"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."},
{"role": "user", "content": "Does it support streaming?"}
]
})
print(f"Test retriever question, summarized with history: {output}")
# COMMAND ----------
# MAGIC %md-sandbox
# MAGIC ## Let's put it together
# MAGIC
# MAGIC <img src="https://github.com/databricks-demos/dbdemos-resources/blob/main/images/product/chatbot-rag/llm-rag-self-managed-model-2.png?raw=true" style="float: right" width="600px">
# MAGIC
# MAGIC
# MAGIC Let's now merge the retriever and the full LangChain chain.
# MAGIC
# MAGIC We will use a custom LangChain template for our assistant to give a proper answer.
# MAGIC
# MAGIC Make sure you take some time to try different templates and adjust your assistant tone and personality for your requirement.
# MAGIC
# MAGIC
# COMMAND ----------
from langchain.schema.runnable import RunnableBranch, RunnableParallel, RunnablePassthrough
question_with_history_and_context_str = """
You are a trustful assistant for Databricks users. You are answering python, coding, SQL, data engineering, spark, data science, AI, ML, Datawarehouse, platform, API or infrastructure, Cloud administration question related to Databricks. If you do not know the answer to a question, you truthfully say you do not know. Read the discussion to get the context of the previous conversation. In the chat discussion, you are referred to as "system". The user is referred to as "user".
Discussion: {chat_history}
Here's some context which might or might not help you answer: {context}
Answer straight, do not repeat the question, do not start with something like: the answer to the question, do not add "AI" in front of your answer, do not say: here is the answer, do not mention the context or the question.
Based on this history and context, answer this question: {question}
"""
question_with_history_and_context_prompt = PromptTemplate(
input_variables= ["chat_history", "context", "question"],
template = question_with_history_and_context_str
)
def format_context(docs):
return "\n\n".join([d.page_content for d in docs])
def extract_source_urls(docs):
return [d.metadata["url"] for d in docs]
relevant_question_chain = (
RunnablePassthrough() |
{
"relevant_docs": generate_query_to_retrieve_context_prompt | chat_model | StrOutputParser() | retriever,
"chat_history": itemgetter("chat_history"),
"question": itemgetter("question")
}
|
{
"context": itemgetter("relevant_docs") | RunnableLambda(format_context),
"sources": itemgetter("relevant_docs") | RunnableLambda(extract_source_urls),
"chat_history": itemgetter("chat_history"),
"question": itemgetter("question")
}
|
{
"prompt": question_with_history_and_context_prompt,
"sources": itemgetter("sources")
}
|
{
"result": itemgetter("prompt") | chat_model | StrOutputParser(),
"sources": itemgetter("sources")
}
)
irrelevant_question_chain = (
RunnableLambda(lambda x: {"result": 'I cannot answer questions that are not about Databricks.', "sources": []})
)
branch_node = RunnableBranch(
(lambda x: "yes" in x["question_is_relevant"].lower(), relevant_question_chain),
(lambda x: "no" in x["question_is_relevant"].lower(), irrelevant_question_chain),
irrelevant_question_chain
)
full_chain = (
{
"question_is_relevant": is_about_databricks_chain,
"question": itemgetter("messages") | RunnableLambda(extract_question),
"chat_history": itemgetter("messages") | RunnableLambda(extract_history),
}
| branch_node
)
# COMMAND ----------
# MAGIC %md
# MAGIC Let's try our full chain:
# COMMAND ----------
# DBTITLE 1,Asking an out-of-scope question
import json
non_relevant_dialog = {
"messages": [
{"role": "user", "content": "What is Apache Spark?"},
{"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."},
{"role": "user", "content": "Why is the sky blue?"}
]
}
print(f'Testing with a non relevant question...')
response = full_chain.invoke(non_relevant_dialog)
display_chat(non_relevant_dialog["messages"], response)
# COMMAND ----------
# DBTITLE 1,Asking a relevant question
dialog = {
"messages": [
{"role": "user", "content": "What is Apache Spark?"},
{"role": "assistant", "content": "Apache Spark is an open-source data processing engine that is widely used in big data analytics."},
{"role": "user", "content": "Does it support streaming?"}
]
}
print(f'Testing with relevant history and question...')
response = full_chain.invoke(dialog)
display_chat(dialog["messages"], response)
# COMMAND ----------
# MAGIC %md
# MAGIC ## Register the chatbot model to Unity Catalog
# COMMAND ----------
import cloudpickle
import langchain
from mlflow.models import infer_signature
mlflow.set_registry_uri("databricks-uc")
model_name = f"{catalog}.{db}.dbdemos_advanced_chatbot_model"
with mlflow.start_run(run_name="dbdemos_chatbot_rag") as run:
#Get our model signature from input/output
input_df = pd.DataFrame({"messages": [dialog]})
output = full_chain.invoke(dialog)
signature = infer_signature(input_df, output)
model_info = mlflow.langchain.log_model(
full_chain,
loader_fn=get_retriever, # Load the retriever with DATABRICKS_TOKEN env as secret (for authentication).
artifact_path="chain",
registered_model_name=model_name,
pip_requirements=[
"mlflow==" + mlflow.__version__,
"langchain==" + langchain.__version__,
"databricks-vectorsearch",
"pydantic==2.5.2 --no-binary pydantic",
"cloudpickle=="+ cloudpickle.__version__
],
input_example=input_df,
signature=signature
)
# COMMAND ----------
# MAGIC %md Let's try loading our model
# COMMAND ----------
model = mlflow.langchain.load_model(model_info.model_uri)
model.invoke(dialog)
# COMMAND ----------
# MAGIC %md
# MAGIC
# MAGIC ## Conclusion
# MAGIC
# MAGIC We've seen how we can improve our chatbot, adding more advanced capabilities to handle a chat history.
# MAGIC
# MAGIC As you add capabilities to your model and tune the prompt, it will get harder to evaluate your model performance in a repeatable way.
# MAGIC
# MAGIC Your new prompt might work well for what you tried to fixed, but could also have impact on other questions.
# MAGIC
# MAGIC ## Next: Introducing offline model evaluation with MLflow
# MAGIC
# MAGIC To solve these issue, we need a repeatable way of testing our model answer as part of our LLMOps deployment!
# MAGIC
# MAGIC Open the next [03-Offline-Evaluation]($./03-Offline-Evaluation) notebook to discover how to evaluate your model.
| [
"langchain.schema.output_parser.StrOutputParser",
"langchain.embeddings.DatabricksEmbeddings",
"langchain.schema.runnable.RunnablePassthrough",
"langchain.vectorstores.DatabricksVectorSearch",
"langchain.chat_models.ChatDatabricks",
"langchain.schema.runnable.RunnableLambda",
"langchain.prompts.PromptTemplate"
] | [((2610, 2742), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question']", 'template': '"""You are an assistant. Give a short answer to this question: {question}"""'}), "(input_variables=['question'], template=\n 'You are an assistant. Give a short answer to this question: {question}')\n", (2624, 2742), False, 'from langchain.prompts import PromptTemplate\n'), ((2761, 2831), 'langchain.chat_models.ChatDatabricks', 'ChatDatabricks', ([], {'endpoint': '"""databricks-llama-2-70b-chat"""', 'max_tokens': '(500)'}), "(endpoint='databricks-llama-2-70b-chat', max_tokens=500)\n", (2775, 2831), False, 'from langchain.chat_models import ChatDatabricks\n'), ((3347, 3446), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'question']", 'template': 'prompt_with_history_str'}), "(input_variables=['chat_history', 'question'], template=\n prompt_with_history_str)\n", (3361, 3446), False, 'from langchain.prompts import PromptTemplate\n'), ((5507, 5577), 'langchain.chat_models.ChatDatabricks', 'ChatDatabricks', ([], {'endpoint': '"""databricks-llama-2-70b-chat"""', 'max_tokens': '(200)'}), "(endpoint='databricks-llama-2-70b-chat', max_tokens=200)\n", (5521, 5577), False, 'from langchain.chat_models import ChatDatabricks\n'), ((6409, 6517), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'question']", 'template': 'is_question_about_databricks_str'}), "(input_variables=['chat_history', 'question'], template=\n is_question_about_databricks_str)\n", (6423, 6517), False, 'from langchain.prompts import PromptTemplate\n'), ((8992, 9048), 'langchain.embeddings.DatabricksEmbeddings', 'DatabricksEmbeddings', ([], {'endpoint': '"""databricks-bge-large-en"""'}), "(endpoint='databricks-bge-large-en')\n", (9012, 9048), False, 'from langchain.embeddings import DatabricksEmbeddings\n'), ((10974, 11093), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'question']", 'template': 'generate_query_to_retrieve_context_template'}), "(input_variables=['chat_history', 'question'], template=\n generate_query_to_retrieve_context_template)\n", (10988, 11093), False, 'from langchain.prompts import PromptTemplate\n'), ((13980, 14103), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['chat_history', 'context', 'question']", 'template': 'question_with_history_and_context_str'}), "(input_variables=['chat_history', 'context', 'question'],\n template=question_with_history_and_context_str)\n", (13994, 14103), False, 'from langchain.prompts import PromptTemplate\n'), ((15045, 15160), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (["(lambda x: {'result':\n 'I cannot answer questions that are not about Databricks.', 'sources': []})"], {}), "(lambda x: {'result':\n 'I cannot answer questions that are not about Databricks.', 'sources': []})\n", (15059, 15160), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((2873, 2890), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (2888, 2890), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((4639, 4656), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (4654, 4656), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((6791, 6808), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (6806, 6808), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((9178, 9275), 'databricks.vector_search.client.VectorSearchClient', 'VectorSearchClient', ([], {'workspace_url': 'host', 'personal_access_token': "os.environ['DATABRICKS_TOKEN']"}), "(workspace_url=host, personal_access_token=os.environ[\n 'DATABRICKS_TOKEN'])\n", (9196, 9275), False, 'from databricks.vector_search.client import VectorSearchClient\n'), ((9434, 9538), 'langchain.vectorstores.DatabricksVectorSearch', 'DatabricksVectorSearch', (['vs_index'], {'text_column': '"""content"""', 'embedding': 'embedding_model', 'columns': "['url']"}), "(vs_index, text_column='content', embedding=\n embedding_model, columns=['url'])\n", (9456, 9538), False, 'from langchain.vectorstores import DatabricksVectorSearch\n'), ((17282, 17315), 'mlflow.models.infer_signature', 'infer_signature', (['input_df', 'output'], {}), '(input_df, output)\n', (17297, 17315), False, 'from mlflow.models import infer_signature\n'), ((9670, 9692), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (9680, 9692), False, 'from operator import itemgetter\n'), ((9700, 9732), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (9714, 9732), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((11590, 11629), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (["(lambda x: x['question'])"], {}), "(lambda x: x['question'])\n", (11604, 11629), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14984, 15005), 'operator.itemgetter', 'itemgetter', (['"""sources"""'], {}), "('sources')\n", (14994, 15005), False, 'from operator import itemgetter\n'), ((11170, 11192), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (11180, 11192), False, 'from operator import itemgetter\n'), ((11195, 11227), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (11209, 11227), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((11253, 11275), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (11263, 11275), False, 'from operator import itemgetter\n'), ((11278, 11309), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (11292, 11309), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((11542, 11581), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (["(lambda x: x['question'])"], {}), "(lambda x: x['question'])\n", (11556, 11581), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14866, 14887), 'operator.itemgetter', 'itemgetter', (['"""sources"""'], {}), "('sources')\n", (14876, 14887), False, 'from operator import itemgetter\n'), ((14950, 14967), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (14965, 14967), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((15478, 15500), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (15488, 15500), False, 'from operator import itemgetter\n'), ((15503, 15535), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (15517, 15535), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((15557, 15579), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (15567, 15579), False, 'from operator import itemgetter\n'), ((15582, 15613), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (15596, 15613), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((11482, 11499), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (11497, 11499), False, 'from langchain.schema.output_parser import StrOutputParser\n'), ((14299, 14320), 'langchain.schema.runnable.RunnablePassthrough', 'RunnablePassthrough', ([], {}), '()\n', (14318, 14320), False, 'from langchain.schema.runnable import RunnableBranch, RunnableParallel, RunnablePassthrough\n'), ((14715, 14741), 'operator.itemgetter', 'itemgetter', (['"""chat_history"""'], {}), "('chat_history')\n", (14725, 14741), False, 'from operator import itemgetter\n'), ((14760, 14782), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (14770, 14782), False, 'from operator import itemgetter\n'), ((14914, 14934), 'operator.itemgetter', 'itemgetter', (['"""prompt"""'], {}), "('prompt')\n", (14924, 14934), False, 'from operator import itemgetter\n'), ((4443, 4465), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (4453, 4465), False, 'from operator import itemgetter\n'), ((4468, 4500), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (4482, 4500), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((4526, 4548), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (4536, 4548), False, 'from operator import itemgetter\n'), ((4551, 4582), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (4565, 4582), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((6579, 6601), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (6589, 6601), False, 'from operator import itemgetter\n'), ((6604, 6636), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_question'], {}), '(extract_question)\n', (6618, 6636), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((6662, 6684), 'operator.itemgetter', 'itemgetter', (['"""messages"""'], {}), "('messages')\n", (6672, 6684), False, 'from operator import itemgetter\n'), ((6687, 6718), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_history'], {}), '(extract_history)\n', (6701, 6718), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14456, 14482), 'operator.itemgetter', 'itemgetter', (['"""chat_history"""'], {}), "('chat_history')\n", (14466, 14482), False, 'from operator import itemgetter\n'), ((14501, 14523), 'operator.itemgetter', 'itemgetter', (['"""question"""'], {}), "('question')\n", (14511, 14523), False, 'from operator import itemgetter\n'), ((14551, 14578), 'operator.itemgetter', 'itemgetter', (['"""relevant_docs"""'], {}), "('relevant_docs')\n", (14561, 14578), False, 'from operator import itemgetter\n'), ((14581, 14611), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['format_context'], {}), '(format_context)\n', (14595, 14611), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14628, 14655), 'operator.itemgetter', 'itemgetter', (['"""relevant_docs"""'], {}), "('relevant_docs')\n", (14638, 14655), False, 'from operator import itemgetter\n'), ((14658, 14693), 'langchain.schema.runnable.RunnableLambda', 'RunnableLambda', (['extract_source_urls'], {}), '(extract_source_urls)\n', (14672, 14693), False, 'from langchain.schema.runnable import RunnableLambda\n'), ((14405, 14422), 'langchain.schema.output_parser.StrOutputParser', 'StrOutputParser', ([], {}), '()\n', (14420, 14422), False, 'from langchain.schema.output_parser import StrOutputParser\n')] |
import streamlit as st
import dotenv
import langchain
import json
from cassandra.cluster import Session
from cassandra.query import PreparedStatement
from langchain.agents.agent_toolkits import create_retriever_tool, create_conversational_retrieval_agent
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.callbacks import StreamlitCallbackHandler
from langchain.schema import BaseRetriever, Document, SystemMessage
#from langchain.cache import CassandraSemanticCache
from cassandra.cluster import Cluster, Session
from cassandra.auth import PlainTextAuthProvider
# Enable langchain debug mode
#langchain.debug = True
dotenv.load_dotenv(dotenv.find_dotenv())
class AstraProductRetriever(BaseRetriever):
session: Session
embedding: OpenAIEmbeddings
lang: str = "Japanese"
search_statement_en: PreparedStatement = None
search_statement_ja: PreparedStatement = None
class Config:
arbitrary_types_allowed = True
def get_relevant_documents(self, query):
docs = []
embeddingvector = self.embedding.embed_query(query)
if self.lang == "Japanese":
if self.search_statement_ja is None:
self.search_statement_ja = self.session.prepare("""
SELECT
id,
similarity_cosine(sem_vec, ?) as similarity,
title,
author,
publisher,
price,
description
FROM app.book_openai
ORDER BY sem_vec ANN OF ?
LIMIT ?
""")
query = self.search_statement_ja
else:
if self.search_statement_en is None:
self.search_statement_en = self.session.prepare("""
SELECT
id,
similarity_cosine(sem_vec, ?) as similarity,
title,
author,
publisher,
price,
description
FROM app.book_openai_en
ORDER BY sem_vec ANN OF ?
LIMIT ?
""")
query = self.search_statement_en
results = self.session.execute(query, [embeddingvector, embeddingvector, 5])
top_products = results._current_rows
for r in top_products:
if r.similarity > 0.91:
docs.append(Document(
id=r.id,
page_content=r.title,
metadata={"product id": r.id,
"title": r.title,
"author": r.author,
"publisher": r.publisher,
"description": r.description,
"price": r.price
}
))
return docs
def get_session(scb: str, secrets: str) -> Session:
"""
Connect to Astra DB using secure connect bundle and credentials.
Parameters
----------
scb : str
Path to secure connect bundle.
secrets : str
Path to credentials.
"""
cloud_config = {
'secure_connect_bundle': scb
}
with open(secrets) as f:
secrets = json.load(f)
CLIENT_ID = secrets["clientId"]
CLIENT_SECRET = secrets["secret"]
auth_provider = PlainTextAuthProvider(CLIENT_ID, CLIENT_SECRET)
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
return cluster.connect()
@st.cache_resource
def create_chatbot(lang: str):
print(f"Creating chatbot for {lang}...")
session = get_session(scb='./config/secure-connect-demo.zip',
secrets='./config/demo-token.json')
llm = ChatOpenAI(temperature=0, streaming=True)
embedding = OpenAIEmbeddings()
#langchain.llm_cache = CassandraSemanticCache(session=session,
# keyspace="bookstore",
# embedding=embedding,
# table_name="cass_sem_cache")
retriever = AstraProductRetriever(
session=session, embedding=embedding, lang=lang)
retriever_tool = create_retriever_tool(
retriever, "books_retrevier", "Useful when searching for books from a book store. Prices are in YEN.")
system_message = """
You are a customer service of a book store and you are asked to pick books for a customer.
You must try to find books related to given questions first.
You must use the books_retreiver.
You must not provide any information other than books that you get from books_retriever.
You should behave as a bookstore clerk.
"""
if lang == "Japanese":
system_message = f"{system_message} All the responses should be in Japanese language."
message = SystemMessage(content=system_message)
agent_executor = create_conversational_retrieval_agent(
llm=llm, tools=[retriever_tool], system_message=message, verbose=True)
return agent_executor
if 'history' not in st.session_state:
st.session_state['history'] = {
"English": [],
"Japanese" : []
}
st.set_page_config(layout="wide")
#with st.sidebar:
# lang = st.radio(
# "Chat language",
# ["English", "Japanese"],
# captions=[".", "Experimental", "."])
lang = "Japanese"
chatbot = create_chatbot(lang)
# Display chat messages from history on app rerun
for (query, answer) in st.session_state['history'][lang]:
with st.chat_message("User"):
st.markdown(query)
with st.chat_message("Bot"):
st.markdown(answer)
prompt = st.chat_input(placeholder="Ask chatbot")
if prompt:
# Display user message in chat message container
with st.chat_message("User"):
st.markdown(prompt)
# Display assistant response in chat message container
with st.chat_message("Bot"):
st_callback = StreamlitCallbackHandler(st.container())
#result = result = chatbot.invoke({
result = chatbot.invoke({
"input": prompt,
"chat_history": st.session_state['history'][lang]
}, config={"callbacks": [st_callback]})
st.session_state['history'][lang].append((prompt, result["output"]))
st.markdown(result["output"])
| [
"langchain.chat_models.ChatOpenAI",
"langchain.schema.Document",
"langchain.agents.agent_toolkits.create_conversational_retrieval_agent",
"langchain.agents.agent_toolkits.create_retriever_tool",
"langchain.schema.SystemMessage",
"langchain.embeddings.OpenAIEmbeddings"
] | [((5375, 5408), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""wide"""'}), "(layout='wide')\n", (5393, 5408), True, 'import streamlit as st\n'), ((5847, 5887), 'streamlit.chat_input', 'st.chat_input', ([], {'placeholder': '"""Ask chatbot"""'}), "(placeholder='Ask chatbot')\n", (5860, 5887), True, 'import streamlit as st\n'), ((702, 722), 'dotenv.find_dotenv', 'dotenv.find_dotenv', ([], {}), '()\n', (720, 722), False, 'import dotenv\n'), ((3536, 3583), 'cassandra.auth.PlainTextAuthProvider', 'PlainTextAuthProvider', (['CLIENT_ID', 'CLIENT_SECRET'], {}), '(CLIENT_ID, CLIENT_SECRET)\n', (3557, 3583), False, 'from cassandra.auth import PlainTextAuthProvider\n'), ((3598, 3654), 'cassandra.cluster.Cluster', 'Cluster', ([], {'cloud': 'cloud_config', 'auth_provider': 'auth_provider'}), '(cloud=cloud_config, auth_provider=auth_provider)\n', (3605, 3654), False, 'from cassandra.cluster import Cluster, Session\n'), ((3919, 3960), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'streaming': '(True)'}), '(temperature=0, streaming=True)\n', (3929, 3960), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3977, 3995), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (3993, 3995), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((4404, 4532), 'langchain.agents.agent_toolkits.create_retriever_tool', 'create_retriever_tool', (['retriever', '"""books_retrevier"""', '"""Useful when searching for books from a book store. Prices are in YEN."""'], {}), "(retriever, 'books_retrevier',\n 'Useful when searching for books from a book store. Prices are in YEN.')\n", (4425, 4532), False, 'from langchain.agents.agent_toolkits import create_retriever_tool, create_conversational_retrieval_agent\n'), ((5042, 5079), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message'}), '(content=system_message)\n', (5055, 5079), False, 'from langchain.schema import BaseRetriever, Document, SystemMessage\n'), ((5101, 5213), 'langchain.agents.agent_toolkits.create_conversational_retrieval_agent', 'create_conversational_retrieval_agent', ([], {'llm': 'llm', 'tools': '[retriever_tool]', 'system_message': 'message', 'verbose': '(True)'}), '(llm=llm, tools=[retriever_tool],\n system_message=message, verbose=True)\n', (5138, 5213), False, 'from langchain.agents.agent_toolkits import create_retriever_tool, create_conversational_retrieval_agent\n'), ((3427, 3439), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3436, 3439), False, 'import json\n'), ((5724, 5747), 'streamlit.chat_message', 'st.chat_message', (['"""User"""'], {}), "('User')\n", (5739, 5747), True, 'import streamlit as st\n'), ((5757, 5775), 'streamlit.markdown', 'st.markdown', (['query'], {}), '(query)\n', (5768, 5775), True, 'import streamlit as st\n'), ((5785, 5807), 'streamlit.chat_message', 'st.chat_message', (['"""Bot"""'], {}), "('Bot')\n", (5800, 5807), True, 'import streamlit as st\n'), ((5817, 5836), 'streamlit.markdown', 'st.markdown', (['answer'], {}), '(answer)\n', (5828, 5836), True, 'import streamlit as st\n'), ((5961, 5984), 'streamlit.chat_message', 'st.chat_message', (['"""User"""'], {}), "('User')\n", (5976, 5984), True, 'import streamlit as st\n'), ((5994, 6013), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6005, 6013), True, 'import streamlit as st\n'), ((6082, 6104), 'streamlit.chat_message', 'st.chat_message', (['"""Bot"""'], {}), "('Bot')\n", (6097, 6104), True, 'import streamlit as st\n'), ((6471, 6500), 'streamlit.markdown', 'st.markdown', (["result['output']"], {}), "(result['output'])\n", (6482, 6500), True, 'import streamlit as st\n'), ((6153, 6167), 'streamlit.container', 'st.container', ([], {}), '()\n', (6165, 6167), True, 'import streamlit as st\n'), ((2592, 2782), 'langchain.schema.Document', 'Document', ([], {'id': 'r.id', 'page_content': 'r.title', 'metadata': "{'product id': r.id, 'title': r.title, 'author': r.author, 'publisher': r.\n publisher, 'description': r.description, 'price': r.price}"}), "(id=r.id, page_content=r.title, metadata={'product id': r.id,\n 'title': r.title, 'author': r.author, 'publisher': r.publisher,\n 'description': r.description, 'price': r.price})\n", (2600, 2782), False, 'from langchain.schema import BaseRetriever, Document, SystemMessage\n')] |
# import modules
import telebot
from telebot import *
import logging
import sqlite3
import os
import langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import TextLoader
from langchain.document_loaders import DirectoryLoader
from langchain.vectorstores import Chroma
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
# connect to the database
conn = sqlite3.connect(r"main.db", check_same_thread=False)
cur = conn.cursor()
# start logging
logging.basicConfig(level=logging.INFO, filename="../info.log", filemode='w')
# init a bot with token from file
bot_token_file = open("bot_token.txt", "r")
API_KEY = bot_token_file.readline()
bot_token_file.close()
bot = telebot.TeleBot("7174085128:AAGfMlZh5wUoV3vXfoGOYtb9vkN3SbqOmAE")
# set the openai token
token_file = open("openai_token.txt", "r")
token = token_file.readline()
token_file.close()
os.environ["OPENAI_API_KEY"] = token
docs_k = 65 # const
number_of_goods = 6 # const
goods = ["Philips EP2231/40", "Nivona CafeRomatica NICR 550", # list of goods
"Delonghi ECAM 370.70.B", "Polaris PACM 2065AC",
"Philips EP2030/10", "REDMOND RCM-1517"]
langchain.debug = False # debug is off
# read the vector databases
vectordb_list = []
embedding = OpenAIEmbeddings()
for i in range(number_of_goods):
vectordb_list.append(Chroma(embedding_function=embedding,
persist_directory="../output/"+str(i)))
for vectordb in vectordb_list:
print(vectordb._collection.count())
def get_info(itemID):
question = "Tell us about this coffee machine"
template = """You are a useful AI consultant for our household appliances store selling coffee machines.
Your task is to describe this coffee machine. Talk only about the merits.
Use the following pieces of context (Context) to answer the question (Question) at the end.
If you don't know the answer, just say you don't know, don't try to make up an answer.
First, make sure the attached text is relevant to the question.
If the question does not relate to the text, answer that you cannot answer this question.
Use a maximum of 15 sentences.
Give your answer as clearly as possible, briefly describing all the advantages of this particular coffee machine.
Context: {context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
vectordb = vectordb_list[itemID]
retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": docs_k})
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0,
max_tokens = 250)
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
result = qa_chain({"query": question})
return result["result"]
def get_answer(itemID, question):
template = """You are a useful AI consultant for our household appliances store selling coffee machines.
Your task is to clearly answer the buyer's question.
Use the following pieces of context (Context) to answer the question (Question) at the end.
If you don't know the answer, just say you don't know, don't try to make up an answer.
First, make sure the attached text is relevant to the question.
If the question does not relate to the text, answer that you cannot answer this question.
Use a maximum of 15 sentences.
Make your answer as clear as possible. Speak competently.
Context: {context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
vectordb = vectordb_list[itemID]
retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": docs_k})
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0,
max_tokens = 250)
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
result = qa_chain({"query": question})
return result["result"]
def check_step(step, id):
cur.execute("SELECT status FROM user WHERE userID = ?", (id,))
fetch_result = cur.fetchone()
if step in fetch_result:
return True
else:
return False
def get_itemID(userID):
cur.execute("SELECT itemID FROM user WHERE userID = ?", (userID,))
fetch_result = cur.fetchone()
return fetch_result[0]
@bot.message_handler(commands=["start"])
def start_message(message):
keyboard = types.ReplyKeyboardMarkup(
resize_keyboard = True,
one_time_keyboard=True
)
zero_machine = types.KeyboardButton(text="Philips EP2231/40")
first_machine = types.KeyboardButton(text="Nivona CafeRomatica NICR 550")
second_machine = types.KeyboardButton(text="Delonghi ECAM 370.70.B")
third_machine = types.KeyboardButton(text="Polaris PACM 2065AC")
fourth_machine = types.KeyboardButton(text="Philips EP2030/10")
fifth_machine = types.KeyboardButton(text="REDMOND RCM-1517")
keyboard.row(zero_machine, first_machine)
keyboard.row(second_machine, third_machine)
keyboard.row(fourth_machine, fifth_machine)
bot.send_message(message.chat.id, "Main menu", reply_markup=keyboard)
try:
cur.execute("INSERT INTO user VALUES (?, ?, ?);", (message.chat.id, "menu", 0))
except:
cur.execute("UPDATE user SET status = ? WHERE userID = ?;", ("menu", message.chat.id))
conn.commit()
@bot.message_handler(content_types="text", func=lambda message: check_step("menu", message.chat.id))
def machine_description(message):
if message.text in goods:
keyboard = types.ReplyKeyboardMarkup(
resize_keyboard=True,
one_time_keyboard=True
)
back_to_menu_button = types.KeyboardButton(text="Back to Menu")
keyboard.add(back_to_menu_button)
bot.send_message(message.chat.id, """Request accepted. Wait for a response...\nYou selected -> {}""".format(message.text))
description = get_info(goods.index(message.text))
bot.send_message(message.chat.id, description)
bot.send_message(message.chat.id, """You can now ask questions about this product or return to the main menu to view another one.""", reply_markup=keyboard)
# change user status in db
cur.execute("UPDATE user SET status = ?, itemID = ? WHERE userID = ?;", ("chat",
goods.index(message.text),
message.chat.id))
conn.commit()
else:
bot.send_message(message.chat.id, "Request rejected. You entered wrong product name!")
@bot.message_handler(content_types="text", func= lambda message: check_step("chat", message.chat.id))
def chat_with_ai(message):
keyboard = types.ReplyKeyboardMarkup(
resize_keyboard=True,
one_time_keyboard=True
)
back_to_menu_button = types.KeyboardButton(text="Back to Menu")
keyboard.add(back_to_menu_button)
if message.text == back_to_menu_button.text:
bot.send_message(message.chat.id, "Returning back to Menu")
cur.execute("UPDATE user SET status = ? WHERE userID = ?;", ("menu", message.chat.id))
conn.commit()
keyboard = types.ReplyKeyboardMarkup(
resize_keyboard = True,
one_time_keyboard=True
)
zero_machine = types.KeyboardButton(text="Philips EP2231/40")
first_machine = types.KeyboardButton(text="Nivona CafeRomatica NICR 550")
second_machine = types.KeyboardButton(text="Delonghi ECAM 370.70.B")
third_machine = types.KeyboardButton(text="Polaris PACM 2065AC")
fourth_machine = types.KeyboardButton(text="Philips EP2030/10")
fifth_machine = types.KeyboardButton(text="REDMOND RCM-1517")
keyboard.row(zero_machine, first_machine)
keyboard.row(second_machine, third_machine)
keyboard.row(fourth_machine, fifth_machine)
bot.send_message(message.chat.id, "Main menu", reply_markup=keyboard)
else:
itemID = get_itemID(message.chat.id)
answer = get_answer(itemID, message.text)
bot.send_message(message.chat.id, answer, reply_markup=keyboard)
bot.infinity_polling(timeout=10, long_polling_timeout = 5) | [
"langchain.prompts.PromptTemplate.from_template",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.chat_models.ChatOpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((571, 622), 'sqlite3.connect', 'sqlite3.connect', (['"""main.db"""'], {'check_same_thread': '(False)'}), "('main.db', check_same_thread=False)\n", (586, 622), False, 'import sqlite3\n'), ((661, 738), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': '"""../info.log"""', 'filemode': '"""w"""'}), "(level=logging.INFO, filename='../info.log', filemode='w')\n", (680, 738), False, 'import logging\n'), ((883, 948), 'telebot.TeleBot', 'telebot.TeleBot', (['"""7174085128:AAGfMlZh5wUoV3vXfoGOYtb9vkN3SbqOmAE"""'], {}), "('7174085128:AAGfMlZh5wUoV3vXfoGOYtb9vkN3SbqOmAE')\n", (898, 948), False, 'import telebot\n'), ((1436, 1454), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1452, 1454), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2592, 2630), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (2620, 2630), False, 'from langchain.prompts import PromptTemplate\n'), ((2776, 2845), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(250)'}), "(model_name='gpt-3.5-turbo', temperature=0, max_tokens=250)\n", (2786, 2845), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2888, 3027), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'retriever': 'retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm, retriever=retriever,\n return_source_documents=True, chain_type_kwargs={'prompt': QA_CHAIN_PROMPT}\n )\n", (2915, 3027), False, 'from langchain.chains import RetrievalQA\n'), ((3901, 3939), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3929, 3939), False, 'from langchain.prompts import PromptTemplate\n'), ((4085, 4154), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(250)'}), "(model_name='gpt-3.5-turbo', temperature=0, max_tokens=250)\n", (4095, 4154), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4197, 4336), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'retriever': 'retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm, retriever=retriever,\n return_source_documents=True, chain_type_kwargs={'prompt': QA_CHAIN_PROMPT}\n )\n", (4224, 4336), False, 'from langchain.chains import RetrievalQA\n')] |
# Standard Library Imports
import ast
import json
import os
import re
# Third-Party Imports
import textwrap
from typing import Any, Dict, List, Optional, Type
import langchain
import streamlit as st
from langchain.chains import LLMChain
from langchain.prompts import PromptTemplate
from langchain.tools import BaseTool
from openmm import (
AndersenThermostat,
BrownianIntegrator,
LangevinIntegrator,
LangevinMiddleIntegrator,
MonteCarloBarostat,
OpenMMException,
Platform,
VerletIntegrator,
app,
unit,
)
from openmm.app import (
PME,
AllBonds,
CutoffNonPeriodic,
CutoffPeriodic,
DCDReporter,
Ewald,
ForceField,
HAngles,
HBonds,
Modeller,
NoCutoff,
PDBFile,
PDBReporter,
PDBxFile,
Simulation,
StateDataReporter,
)
from openmm.unit import bar, femtoseconds, kelvin, nanometers, picosecond, picoseconds
from pydantic import BaseModel, Field
from mdagent.tools.base_tools.preprocess_tools import CleaningTools
# Local Library/Application Imports
from mdagent.utils import FileType, PathRegistry
# TODO delete files created from the simulation if not needed.
FORCEFIELD_LIST = [
"amber14/DNA.OL15.xml",
"amber14/DNA.bsc1.xml",
"amber14/RNA.OL3.xml",
"amber14/lipid17.xml",
"amber14/protein.ff14SB.xml",
"amber14/protein.ff15ipq.xml",
"amber14/spce.xml",
"amber14/tip3p.xml",
"amber14/tip3pfb.xml",
"amber14/tip4pew.xml",
"amber14/tip4pfb.xml",
"charmm36/spce.xml",
"charmm36/tip3p-pme-b.xml",
"charmm36/tip3p-pme-f.xml",
"charmm36/tip4p2005.xml",
"charmm36/tip4pew.xml",
"charmm36/tip5p.xml",
"charmm36/tip5pew.xml",
"charmm36/water.xml",
"absinth.xml",
"amber03.xml",
"amber03_obc.xml",
"amber10.xml",
"amber10_obc.xml",
"amber14-all",
"amber14-all.xml",
"amber96.xml",
"amber96_obc.xml",
"amber99Test.xml",
"amber99_obc.xml",
"amber99sb.xml",
"amber99sbildn.xml",
"amber99sbnmr.xml",
"amberfb15.xml",
"amoeba2009.xml",
"amoeba2009_gk.xml",
"amoeba2013.xml",
"amoeba2013_gk.xml",
"charmm36.xml",
"charmm_polar_2013.xml",
"hydrogens.xml",
"iamoeba.xml",
"pdbNames.xml",
"residues.xml",
"spce.xml",
"swm4ndp.xml",
"tip3p.xml",
"tip3pfb.xml",
"tip4pew.xml",
"tip4pfb.xml",
"tip5p.xml",
]
class SimulationFunctions:
def __init__(
self,
path_registry,
temperature: float = 0.05,
model_name: str = "gpt-4",
request_timeout: int = 1000,
max_tokens: int = 2000,
):
self.path_registry = path_registry
self.temperature = temperature
self.model_name = model_name
self.request_timeout = request_timeout
self.max_tokens = max_tokens
self.llm = langchain.chat_models.ChatOpenAI(
temperature=self.temperature,
model_name=self.model_name,
request_timeout=self.request_timeout,
max_tokens=self.request_timeout,
)
#######==================System Congifuration==================########
# System Configuration initialization.
def _create_system(
pdb,
forcefield,
nonbondedMethod="NoCutoff",
nonbondedCutoff=None,
ewaldErrorTolerance=None,
constraints="None",
rigidWater=False,
constraintTolerance=None,
**kwargs,
):
# Create a dictionary to hold system parameters
system_params = {
"nonbondedMethod": nonbondedMethod,
"constraints": constraints,
"rigidWater": rigidWater,
}
# Set nonbondedCutoff if applicable
if (
nonbondedMethod in ["PME", "CutoffNonPeriodic", "CutoffPeriodic"]
and nonbondedCutoff is not None
):
system_params["nonbondedCutoff"] = nonbondedCutoff
# Set ewaldErrorTolerance if PME is used
if nonbondedMethod == "PME" and ewaldErrorTolerance is not None:
system_params["ewaldErrorTolerance"] = ewaldErrorTolerance
# Set constraintTolerance if constraints are used
if constraints in ["HBonds", " AllBonds"] and constraintTolerance is not None:
system_params["constraintTolerance"] = constraintTolerance
elif system_params["rigidWater"] and constraintTolerance is not None:
system_params["constraintTolerance"] = constraintTolerance
# Update system_params with any additional parameters provided
system_params.update(kwargs)
system = forcefield.createSystem(pdb.topology, **system_params)
return system
########==================Integrator==================########
# Integrator
def _define_integrator(
integrator_type="LangevinMiddle",
temperature=300 * kelvin,
friction=1.0 / picoseconds,
timestep=0.004 * picoseconds,
**kwargs,
):
# Create a dictionary to hold integrator parameters
integrator_params = {
"temperature": temperature,
"friction": friction,
"timestep": timestep,
}
# Update integrator_params with any additional parameters provided
integrator_params.update(kwargs)
# Create the integrator
if integrator_type == "LangevinMiddle":
integrator = LangevinMiddleIntegrator(**integrator_params)
elif integrator_type == "Verlet":
integrator = VerletIntegrator(**integrator_params)
elif integrator_type == "Brownian":
integrator = BrownianIntegrator(**integrator_params)
else:
raise Exception("Integrator type not recognized")
return integrator
def _prompt_summary(self, query: str):
prompt_template = """Your input is the original query. Your
task is to parse through the user query.
and provide a summary of the file path input,
the type of preprocessing needed (this is the
same as cleaning the file), the forcefield
used for the simulation,
the ensemble of the simulation, the integrator needed,
the number of steps, the timestep, the temperature,
and other instructions.
and follow the format "name: description.
File Path: what is the file path of the file
you are using? it must include a .cif or .pdb extension.
Preprocessing: what preprocessing is needed?
you can choose from the following: standard cleaning,
remove water, add hydrogens, add hydrogens and remove
water. The default is add hydrogens and remove water.
Forcefield: what forcefields are you using?
you can choose from the following: AMBER, CHARMM,
OPLS, GROMACS. Default --> "amber14-all.xml, tip3p.xml".
Ensemble: what ensemble are you using?
you can choose from the following:
NPT, NVT, NVE. Default --> "NVT".
Integrator: what integrator are you using?
you can choose from the following:
Langevin, Verlet, Brownian.
The default depends on the ensemble
(NPT -> Langevin, NVT -> Langevin, NVE -> Verlet).
Number of Steps: how many steps
are you using? The default is 10000.
Timestep: what is the timestep?
Default --> "1 fs".
Temperature: what is the temperature?
Default --> "300 K".
Pressure: What is the pressure?
If NPT ensemble, the default is 1.0 bar, otherwise None.
Friction: what is the friction coefficient?
Default --> "1.0"
record_params: what parameters do you want to record?
you can choose from the following:
step, time, potentialEnergy, kineticEnergy,
totalEnergy, temperature, volume, density,
progress, remainingTime, speed, elapsedTime,
separator, systemMass, totalSteps, append.
Default --> ["step", "potentialEnergy", "temperature"].
Other Instructions: what other instructions do you have?
The default is none.
Example of the final output:
File Path: 1a1p.pdb
Preprocessing: standard cleaning
Forcefield: amber14-all.xml, tip3p.xml
Ensemble: NPT
Integrator: Langevin
Number of Steps: 10000
Timestep: 1 fs
Temperature: 300 K
Pressure: 1.0 bar
Friction: 1.0
record_params: ["step", "potentialEnergy", "temperature"]
Other Instructions: none
If there is not enough information in a category,
you may fill in with the default, but explicitly state so.
Here is the information:{query}"""
prompt = PromptTemplate(template=prompt_template, input_variables=["query"])
llm_chain = LLMChain(prompt=prompt, llm=self.llm)
return llm_chain.run(" ".join(query))
def _save_to_file(self, summary: str, filename: str):
"""Parse the summary string and
save it to a file in JSON format."""
# Split the summary into lines
lines = summary.strip().split("\n")
# Parse each line into a key and a value
summary_dict = {}
for line in lines:
key, value = line.split(":")
summary_dict[key.strip()] = value.strip()
# Save the dictionary to a file
with open(filename, "w") as f:
json.dump(summary_dict, f)
# add filename to registry
file_description = "Simulation Parameters"
self.path_registry.map_path(filename, filename, file_description)
def _instruction_summary(self, query: str):
summary = self._prompt_summary(query)
self._save_to_file(summary, "simulation_parameters.json")
return summary
def _setup_simulation_from_json(self, file_name):
# Open the json file and load the parameters
with open(file_name, "r") as f:
params = json.load(f)
return params
def _setup_and_run_simulation(self, query):
# Load the force field
# ask for inputs from the user
params = self._setup_simulation_from_json(query)
# forcefield key can be forcefield_files or Forcefield
if "forcefield_files" in params:
params["forcefield_files"] = (
params["forcefield_files"]
.replace("(default)", "")
.replace(" and ", ",")
.strip()
)
Forcefield_files = [
file.strip() for file in params["forcefield_files"].split(",")
]
Forcefield = Forcefield_files[0]
Water_model = Forcefield_files[1]
else:
params["Forcefield"] = (
params["Forcefield"]
.replace("(default)", "")
.replace(" and ", ",")
.strip()
)
Forcefield_files = [
file.strip() for file in params["Forcefield"].split(",")
]
Forcefield = Forcefield_files[0]
Water_model = Forcefield_files[1]
print("Setting up forcefields :", Forcefield, Water_model)
st.markdown("Setting up forcefields", unsafe_allow_html=True)
# check if forcefields end in .xml
if Forcefield.endswith(".xml") and Water_model.endswith(".xml"):
forcefield = ForceField(Forcefield, Water_model)
# adding forcefield to registry
# Load the PDB file
CleaningTools(self.path_registry)
pdbfile = self.path_registry.get_mapped_path(params["File Path"])
name = pdbfile.split(".")[0]
end = pdbfile.split(".")[1]
if end == "pdb":
pdb = PDBFile(pdbfile)
elif end == "cif":
pdb = PDBxFile(pdbfile)
modeller = Modeller(pdb.topology, pdb.positions)
system = forcefield.createSystem(
modeller.topology,
nonbondedMethod=app.PME,
nonbondedCutoff=1.0 * nanometers,
constraints=app.PME,
)
_integrator = params["Integrator"].split(" ")[0].strip()
_temp = params["Temperature"].split(" ")[0].strip()
_friction_coef = params["Friction"].split(" ")[0].strip()
_timestep = params["Timestep"].split(" ")[0].strip()
if _integrator == "Langevin":
print(
"Setting up Langevin integrator with Parameters:",
_temp,
"K",
_friction_coef,
"1/ps",
_timestep,
"fs",
)
st.markdown("Setting up Langevin integrator", unsafe_allow_html=True)
if params["Ensemble"] == "NPT":
_pressure = params["Pressure"].split(" ")[0].strip()
system.addForce(MonteCarloBarostat(_pressure * bar, _temp * kelvin))
integrator = LangevinIntegrator(
float(_temp) * kelvin,
float(_friction_coef) / picosecond,
float(_timestep) * femtoseconds,
)
elif _integrator == "Verlet":
if params["Ensemble"] == "NPT":
_pressure = params["Pressure"].split(" ")[0].strip()
system.addForce(AndersenThermostat(_temp * kelvin, 1 / picosecond))
system.addForce(MonteCarloBarostat(_pressure * bar, _temp * kelvin))
print(
"Setting up Verlet integrator with Parameters:",
_timestep,
"fs",
_temp,
"K",
_pressure,
"bar",
)
print("Setting up Verlet integrator with Parameters:", _timestep, "fs")
st.markdown("Setting up Verlet integrator", unsafe_allow_html=True)
integrator = VerletIntegrator(float(_timestep) * picoseconds)
simulation = Simulation(modeller.topology, system, integrator)
simulation.context.setPositions(modeller.positions)
simulation.minimizeEnergy()
# save initial positions to registry
file_name = "initial_positions.pdb"
with open(file_name, "w") as f:
PDBFile.writeFile(
simulation.topology,
simulation.context.getState(getPositions=True).getPositions(),
f,
)
print("Initial Positions saved to initial_positions.pdb")
simulation.reporters.append(PDBReporter(f"{name}.pdb", 1000))
# reporter_args = {"reportInterval": 1000}
reporter_args = {}
params["record_params"] = ast.literal_eval(params["record_params"])
for param in params["record_params"]:
if param in [
"step",
"time",
"potentialEnergy",
"kineticEnergy",
"totalEnergy",
"temperature",
"volume",
"density",
"progress",
"remainingTime",
"speed",
"elapsedTime",
"separator",
"systemMass",
"totalSteps",
"append",
]:
# The params from the json file should be booleans
reporter_args[param] = True
simulation.reporters.append(
StateDataReporter(f"{name}.csv", 1000, **reporter_args)
)
simulation.step(int(params["Number of Steps"].split(" ")[0].strip()))
# add filenames to registry
file_name1 = "simulation_trajectory.pdb"
file_description1 = "Simulation PDB, containing the simulation trajectory"
self.path_registry.map_path(file_name1, f"{name}.pdb", file_description1)
file_name2 = "simulation_data.csv"
file_description2 = (
"Simulation Data, containing step, potential energy, and temperature"
)
self.path_registry.map_path(file_name2, f"{name}.csv", file_description2)
return simulation
def _extract_parameters_path(self):
"""Check directory for parameters.json file."""
# Check if there is a parameters.json file in the directory.
if os.path.exists("simulation_parameters_summary.json"):
return "simulation_parameters_summary.json"
# If there's no exact match, check for
# any JSON file that contains 'parameters' in its name.
else:
for file in os.listdir("."):
if "parameters" in file and file.endswith(".json"):
return file
# If no matching file is found, raise an exception.
raise ValueError("No parameters.json file found in directory.")
class SetUpAndRunTool(BaseTool):
name = "SetUpAndRunTool"
description = """This tool will set up the simulation objects
and run the simulation.
It will ask for the parameters path.
input: json file
"""
path_registry: Optional[PathRegistry]
def __init__(
self,
path_registry: Optional[PathRegistry],
):
super().__init__()
self.path_registry = path_registry
def _run(self, query: str) -> str:
"""Use the tool"""
# find the parameters in the directory
try:
if self.path_registry is None: # this should not happen
return "Registry not initialized"
sim_fxns = SimulationFunctions(path_registry=self.path_registry)
parameters = sim_fxns._extract_parameters_path()
except ValueError as e:
return (
str(e)
+ """\nPlease use the Instruction summary tool with the
to create a parameters.json file in the directory."""
)
self.log("This are the parameters:")
self.log(parameters)
# print the parameters in json file
with open(parameters) as f:
params = json.load(f)
for key, value in params.items():
print(key, ":", value)
self.log("Are you sure you want to run the simulation? (y/n)")
response = input("yes or no: ")
if response.lower() in ["yes", "y"]:
sim_fxns._setup_and_run_simulation(parameters)
else:
return "Simulation interrupted due to human input"
return "Simulation Completed, simulation trajectory and data files saved."
def log(self, text, color="blue"):
if color == "blue":
print("\033[1;34m\t{}\033[00m".format(text))
if color == "red":
print("\033[31m\t{}\033[00m".format(text))
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
#######==================System Configuration==================########
# System Configuration
class SetUpandRunFunctionInput(BaseModel):
pdb_id: str
forcefield_files: List[str]
save: bool = Field(
True,
description=(
(
"Set to 'True' (default) to save the log files and trajectories "
"of the simulation. "
"If set to 'False', "
"the simulation is considered as being in a testing "
"or preliminary scripting stage, utilizing default parameters and "
"results are not saved. "
"This second setting is ideal for initial experimentation or "
"basic script development before customizing the "
"script for final use."
)
),
)
system_params: Dict[str, Any] = Field(
{
"nonbondedMethod": "NoCutoff",
"nonbondedCutoff": "1 * nanometers",
"ewaldErrorTolerance": None,
"constraints": "None",
"rigidWater": False,
"constraintTolerance": None,
"solvate": False,
},
description=(
"Parameters for the openmm system. "
"For nonbondedMethod, you can choose from the following:\n"
"NoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, PME. "
"If anything but NoCutoff is chosen,"
"you have to include a nonbondedCutoff"
"and a constrainTolerance.\n"
"If PME is chosen,"
"you have to include an ewaldErrorTolerance too."
"For constraints, you can choose from the following:\n"
"None, HBonds, AllBonds or OnlyWater."
"For rigidWater, you can choose from the following:\n"
"True, False.\n"
"Finally, if you want to solvate the system, before the simulation,"
"you can set solvate to True.\n"
"Example1:\n"
"{'nonbondedMethod': 'NoCutoff',\n"
"'constraints': 'None',\n"
"'rigidWater': False}\n"
"Example2:\n"
"{'nonbondedMethod': 'CutoffPeriodic',\n"
"'nonbondedCutoff': 1.0,\n"
"'constraints': 'HBonds',\n"
"'rigidWater': True,\n"
"'constraintTolerance': 0.00001,\n"
"'solvate': True} "
),
)
integrator_params: Dict[str, Any] = Field(
{
"integrator_type": "LangevinMiddle",
"Temperature": "300 * kelvin",
"Friction": "1.0 / picoseconds",
"Timestep": "0.002 * picoseconds",
"Pressure": "1.0 * bar",
},
description="""Parameters for the openmm integrator.""",
)
simulation_params: Dict[str, Any] = Field(
{
"Ensemble": "NVT",
"Number of Steps": 5000,
"record_interval_steps": 100,
"record_params": ["step", "potentialEnergy", "temperature"],
},
description="""Parameters for the openmm simulation.
The ensemble can be NPT, NVT or NVE.
The number of steps is the number of steps the simulation will run for.
record_interval_steps is the number of steps between each record:
hould be the number of steps divided by 100.
The record_params is a list of parameters that will
be recorded during the simulation The options are:
[Step,Time,Speed,Progress,RemainingTime,ElapsedTime,
PotentialEnergy,KineticEnergy,TotalEnergy,
Temperature,Volume,Density]""",
)
#########===================================================================############
class OpenMMSimulation:
def __init__(
self,
input_params: SetUpandRunFunctionInput,
path_registry: PathRegistry,
save: bool,
sim_id: str,
pdb_id: str,
):
self.params = input_params
self.save = save
self.sim_id = sim_id
self.pdb_id = pdb_id
self.int_params = (
self.params.integrator_params
if self.params.integrator_params is not None
else {
"integrator_type": "LangevinMiddle",
"Temperature": 300 * kelvin,
"Friction": 1.0 / picoseconds,
"Timestep": 0.002 * picoseconds,
"Pressure": 1.0 * bar,
}
)
self.sys_params = (
self.params.system_params
if self.params.system_params is not None
else {
"nonbondedMethod": NoCutoff,
"nonbondedCutoff": 1 * nanometers,
"ewaldErrorTolerance": None,
"constraints": AllBonds,
"rigidWater": True,
"constraintTolerance": 0.000001,
"solvate": False,
}
)
self.sim_params = (
self.params.simulation_params
if self.params.simulation_params is not None
else {
"Ensemble": "NVT",
"Number of Steps": 5000,
"record_interval_steps": 100,
"record_params": ["step", "potentialEnergy", "temperature"],
}
)
self.path_registry = path_registry
def setup_system(self):
print("Building system...")
st.markdown("Building system", unsafe_allow_html=True)
self.pdb_id = self.params.pdb_id
self.pdb_path = self.path_registry.get_mapped_path(self.pdb_id)
self.pdb = PDBFile(self.pdb_path)
self.forcefield = ForceField(*self.params.forcefield_files)
self.system = self._create_system(self.pdb, self.forcefield, **self.sys_params)
if self.sys_params.get("nonbondedMethod", None) in [
CutoffPeriodic,
PME,
]:
if self.sim_params["Ensemble"] == "NPT":
self.system.addForce(
MonteCarloBarostat(
self.int_params["Pressure"],
self.int_params["Temperature"],
self.sim_params.get("barostatInterval", 25),
)
)
def setup_integrator(self):
print("Setting up integrator...")
st.markdown("Setting up integrator", unsafe_allow_html=True)
int_params = self.int_params
integrator_type = int_params.get("integrator_type", "LangevinMiddle")
if integrator_type == "LangevinMiddle":
self.integrator = LangevinMiddleIntegrator(
int_params["Temperature"],
int_params["Friction"],
int_params["Timestep"],
)
elif integrator_type == "LangevinIntegrator":
self.integrator = LangevinIntegrator(
int_params["Temperature"],
int_params["Friction"],
int_params["Timestep"],
)
else:
raise ValueError("Invalid integrator type")
self.integrator.setConstraintTolerance(
self.sys_params.get("constraintTolerance", 0.000001)
)
def create_simulation(self):
print("Creating simulation...")
st.markdown("Creating simulation", unsafe_allow_html=True)
self.simulation = Simulation(
self.modeller.topology,
self.system,
self.integrator,
Platform.getPlatformByName("CPU"),
)
self.simulation.context.setPositions(self.modeller.positions)
# TEMPORARY FILE MANAGEMENT OR PATH REGISTRY MAPPING
if self.save:
trajectory_name = self.path_registry.write_file_name(
type=FileType.RECORD,
record_type="TRAJ",
protein_file_id=self.pdb_id,
Sim_id=self.sim_id,
term="dcd",
)
topology_name = self.path_registry.write_file_name(
type=FileType.RECORD,
record_type="TOP",
protein_file_id=self.pdb_id,
Sim_id=self.sim_id,
term="pdb",
)
log_name = self.path_registry.write_file_name(
type=FileType.RECORD,
record_type="LOG",
protein_file_id=self.pdb_id,
Sim_id=self.sim_id,
term="txt",
)
traj_desc = (
f"Simulation trajectory for protein {self.pdb_id}"
f" and simulation {self.sim_id}"
)
top_desc = (
f"Simulation topology for protein"
f"{self.pdb_id} and simulation {self.sim_id}"
)
log_desc = (
f"Simulation state log for protein {self.pdb_id} "
f"and simulation {self.sim_id}"
)
self.simulation.reporters.append(
DCDReporter(
f"{trajectory_name}",
self.sim_params["record_interval_steps"],
)
)
self.simulation.reporters.append(
PDBReporter(
f"{topology_name}",
self.sim_params["record_interval_steps"],
)
)
self.simulation.reporters.append(
StateDataReporter(
f"{log_name}",
self.sim_params["record_interval_steps"],
step=True,
potentialEnergy=True,
temperature=True,
separator="\t",
)
)
self.registry_records = [
("holder", f"files/records/{trajectory_name}", traj_desc),
("holder", f"files/records/{log_name}", log_desc),
("holder", f"files/records/{topology_name}", top_desc),
]
# TODO add checkpoint too?
else:
self.simulation.reporters.append(
DCDReporter(
"temp_trajectory.dcd",
self.sim_params["record_interval_steps"],
)
)
self.simulation.reporters.append(
PDBReporter(
"temp_topology.pdb",
self.sim_params["record_interval_steps"],
)
)
self.simulation.reporters.append(
StateDataReporter(
"temp_log.txt",
self.sim_params["record_interval_steps"],
step=True,
potentialEnergy=True,
temperature=True,
separator="\t",
)
)
def _create_system(
self,
pdb,
forcefield,
nonbondedMethod="NoCutoff",
nonbondedCutoff=None,
ewaldErrorTolerance=None,
constraints="None",
rigidWater=False,
constraintTolerance=None,
solvate=False,
**kwargs,
):
# Create a dictionary to hold system parameters
system_params = {
"nonbondedMethod": nonbondedMethod,
"constraints": constraints,
"rigidWater": rigidWater,
}
# Set nonbondedCutoff if applicable Had to double if pre-commit
if nonbondedMethod in ["PME", "CutoffNonPeriodic", "CutoffPeriodic"]:
if nonbondedCutoff is not None:
system_params["nonbondedCutoff"] = nonbondedCutoff
# Set ewaldErrorTolerance if PME is used
if nonbondedMethod == "PME" and ewaldErrorTolerance is not None:
system_params["ewaldErrorTolerance"] = ewaldErrorTolerance
# Set constraintTolerance if constraints are used
if constraints in ["HBonds", "AllBonds"] and constraintTolerance is not None:
pass
elif system_params["rigidWater"] and constraintTolerance is not None:
pass
# Update system_params with any additional parameters provided
system_params.update(kwargs)
# if use_constraint_tolerance:
# constraintTolerance = system_params.pop('constraintTolerance')
self.modeller = Modeller(pdb.topology, pdb.positions)
if solvate:
try:
self.modeller.addSolvent(forcefield)
except ValueError as e:
print("Error adding solvent", type(e).__name__, "–", e)
if "No Template for" in str(e):
raise ValueError(str(e))
except AttributeError as e:
print("Error adding solvent: ", type(e).__name__, "–", e)
print("Trying to add solvent with 1 nm padding")
if "NoneType" and "value_in_unit" in str(e):
try:
self.modeller.addSolvent(forcefield, padding=1 * nanometers)
except Exception as e:
print("Error adding solvent", type(e).__name__, "–", e)
raise (e)
system = forcefield.createSystem(self.modeller.topology, **system_params)
else:
system = forcefield.createSystem(self.modeller.topology, **system_params)
return system
def unit_to_string(self, unit):
"""Needed to convert units to strings for the script
Otherwise internal __str()__ method makes the script
not runnable"""
return f"{unit.value_in_unit(unit.unit)}*{unit.unit.get_name()}"
def _construct_script_content(
self,
pdb_path,
forcefield_files,
nonbonded_method,
constraints,
rigid_water,
constraint_tolerance,
nonbonded_cutoff,
ewald_error_tolerance,
hydrogen_mass,
time_step,
temperature,
friction,
ensemble,
pressure,
record_interval_steps,
solvate,
integrator_type,
):
script_content = f"""
# This script was generated by MDagent-Setup.
from openmm import *
from openmm.app import *
from openmm.unit import *
# Input Files
pdb = PDBFile('{pdb_path}')
forcefield = ForceField({forcefield_files})
# System Configuration
nonbondedMethod = {nonbonded_method}
constraints = {constraints}
rigidWater = {rigid_water}
"""
if rigid_water and constraint_tolerance is not None:
script_content += f"constraintTolerance = {constraint_tolerance}\n"
# Conditionally add nonbondedCutoff
if nonbonded_method != NoCutoff:
script_content += f"nonbondedCutoff = {nonbonded_cutoff}\n"
if nonbonded_method == PME:
script_content += f"ewaldErrorTolerance = {ewald_error_tolerance}\n"
if hydrogen_mass:
script_content += f"hydrogenMass = {hydrogen_mass}\n"
# ... other configurations ...
script_content += f"""
# Integration Options
dt = {time_step}
temperature = {temperature}
friction = {friction}
"""
if ensemble == "NPT":
script_content += f"""
pressure = {pressure}
barostatInterval = {self.sim_params.get("barostatInterval", 25)}
"""
# ... other integration options ...
script_content += f"""
# Simulation Options
steps = {self.sim_params.get("Number of Steps", record_interval_steps)}
equilibrationSteps = 1000
platform = Platform.getPlatformByName('CPU')
dcdReporter = DCDReporter('trajectory.dcd', 1000)
pdbReporter = PDBReporter('trajectory.pdb', 1000)
dataReporter = StateDataReporter('log.txt', {record_interval_steps},
totalSteps=steps,
step=True, speed=True, progress=True, elapsedTime=True, remainingTime=True,
potentialEnergy=True, temperature=True, volume=True, density=True,
separator='\t')
checkpointReporter = CheckpointReporter('checkpoint.chk', 5000)
# Minimize and Equilibrate
# ... code for minimization and equilibration ...
# Simulate
print('Building system...')
modeller = Modeller(pdb.topology, pdb.positions)
"""
if solvate:
script_content += (
"""modeller.addSolvent(forcefield, padding=1*nanometers)"""
)
if nonbonded_method == NoCutoff:
if hydrogen_mass:
script_content += """
system = forcefield.createSystem(modeller.topology,
nonbondedMethod=nonbondedMethod, constraints=constraints,
rigidWater=rigidWater, hydrogenMass=hydrogenMass)
"""
else:
script_content += """
system = forcefield.createSystem(modeller.topology,
nonbondedMethod=nonbondedMethod, constraints=constraints,
rigidWater=rigidWater)
"""
if nonbonded_method == CutoffNonPeriodic or nonbonded_method == CutoffPeriodic:
if hydrogen_mass:
script_content += """
system = forcefield.createSystem(modeller.topology,
nonbondedMethod=nonbondedMethod, nonbondedCutoff=nonbondedCutoff,
constraints=constraints, rigidWater=rigidWater,
hydrogenMass=hydrogenMass)
"""
else:
script_content += """
system = forcefield.createSystem(modeller.topology,
nonbondedMethod=nonbondedMethod, nonbondedCutoff=nonbondedCutoff,
constraints=constraints, rigidWater=rigidWater)
"""
if nonbonded_method == PME:
if hydrogen_mass:
script_content += """
system = forcefield.createSystem(modeller.topology,
nonbondedMethod=nonbondedMethod,
nonbondedCutoff=nonbondedCutoff, ewaldErrorTolerance=ewaldErrorTolerance,
constraints=constraints, rigidWater=rigidWater, hydrogenMass=hydrogenMass)
"""
else:
script_content += """
system = forcefield.createSystem(modeller.topology,
nonbondedMethod=nonbondedMethod,
nonbondedCutoff=nonbondedCutoff, ewaldErrorTolerance=ewaldErrorTolerance,
constraints=constraints, rigidWater=rigidWater)
"""
if ensemble == "NPT":
script_content += """
system.addForce(MonteCarloBarostat(pressure, temperature, barostatInterval))
"""
if integrator_type == "LangevinMiddle" and constraints != "None":
script_content += """
integrator = LangevinMiddleIntegrator(temperature, friction, dt)
integrator.setConstraintTolerance(constraintTolerance)
simulation = Simulation(modeller.topology, system, integrator, platform)
simulation.context.setPositions(modeller.positions)
"""
if integrator_type == "LangevinMiddle" and constraints == "None":
script_content += """
integrator = LangevinMiddleIntegrator(temperature, friction, dt)
simulation = Simulation(modeller.topology, system, integrator, platform)
simulation.context.setPositions(modeller.positions)
"""
script_content += """
# Minimize and Equilibrate
print('Performing energy minimization...')
simulation.minimizeEnergy()
print('Equilibrating...')
simulation.context.setVelocitiesToTemperature(temperature)
simulation.step(equilibrationSteps)
# Simulate
print('Simulating...')
simulation.reporters.append(dcdReporter)
simulation.reporters.append(pdbReporter)
simulation.reporters.append(dataReporter)
simulation.reporters.append(checkpointReporter)
simulation.currentStep = 0
simulation.step(steps)
"""
return script_content
def write_standalone_script(self, filename="reproduce_simulation.py"):
"""Extracting parameters from the class instance
Inspired by the code snippet provided from openmm-setup
https://github.com/openmm/openmm-setup
"""
pdb_path = self.pdb_path
forcefield_files = ", ".join(
f"'{file}'" for file in self.params["forcefield_files"]
)
nonbonded_method = self.sys_params.get("nonbondedMethod", NoCutoff)
nbCo = self.sys_params.get("nonbondedCutoff", 1 * nanometers)
nonbonded_cutoff = self.unit_to_string(nbCo)
constraints = self.sys_params.get("constraints", "None")
rigid_water = self.sys_params.get("rigidWater", False)
ewald_error_tolerance = self.sys_params.get("ewaldErrorTolerance", 0.0005)
constraint_tolerance = self.sys_params.get("constraintTolerance", None)
hydrogen_mass = self.sys_params.get("hydrogenMass", None)
solvate = self.sys_params.get("solvate", False)
integrator_type = self.int_params.get("integrator_type", "LangevinMiddle")
friction = self.int_params.get("Friction", 1.0 / picoseconds)
friction = f"{friction.value_in_unit(friction.unit)}{friction.unit.get_name()}"
_temp = self.int_params.get("Temperature", 300 * kelvin)
temperature = self.unit_to_string(_temp)
t_step = self.int_params.get("Timestep", 0.004 * picoseconds)
time_step = self.unit_to_string(t_step)
press = self.int_params.get("Pressure", 1.0 * bar)
pressure = self.unit_to_string(press)
ensemble = self.sim_params.get("Ensemble", "NVT")
self.sim_params.get("Number of Steps", 10000)
record_interval_steps = self.sim_params.get("record_interval_steps", 1000)
script_content = self._construct_script_content(
pdb_path,
forcefield_files,
nonbonded_method,
constraints,
rigid_water,
constraint_tolerance,
nonbonded_cutoff,
ewald_error_tolerance,
hydrogen_mass,
time_step,
temperature,
friction,
ensemble,
pressure,
record_interval_steps,
solvate,
integrator_type,
)
# Remove leading spaces for proper formatting
def remove_leading_spaces(text):
lines = text.split("\n")
stripped_lines = [line.lstrip() for line in lines]
return "\n".join(stripped_lines)
script_content = remove_leading_spaces(script_content)
script_content = textwrap.dedent(script_content).strip()
# Write to file
directory = "files/simulations"
if not os.path.exists(directory):
os.makedirs(directory)
with open(f"{directory}/{filename}", "w") as file:
file.write(script_content)
print(f"Standalone simulation script written to {directory}/{filename}")
st.markdown("Standalone simulation script written", unsafe_allow_html=True)
def run(self):
# Minimize and Equilibrate
print("Performing energy minimization...")
st.markdown("Performing energy minimization", unsafe_allow_html=True)
self.simulation.minimizeEnergy()
print("Minimization complete!")
top_name = f"files/pdb/{self.sim_id}_initial_positions.pdb"
top_description = f"Initial positions for simulation {self.sim_id}"
with open(top_name, "w") as f:
PDBFile.writeFile(
self.simulation.topology,
self.simulation.context.getState(getPositions=True).getPositions(),
f,
)
self.path_registry.map_path(f"top_{self.sim_id}", top_name, top_description)
print("Initial Positions saved to initial_positions.pdb")
st.markdown("Minimization complete! Equilibrating...", unsafe_allow_html=True)
print("Equilibrating...")
_temp = self.int_params["Temperature"]
self.simulation.context.setVelocitiesToTemperature(_temp)
_eq_steps = self.sim_params.get("equilibrationSteps", 1000)
self.simulation.step(_eq_steps)
# Simulate
print("Simulating...")
st.markdown("Simulating...", unsafe_allow_html=True)
self.simulation.currentStep = 0
self.simulation.step(self.sim_params["Number of Steps"])
print("Done!")
st.markdown("Done!", unsafe_allow_html=True)
if not self.save:
if os.path.exists("temp_trajectory.dcd"):
os.remove("temp_trajectory.dcd")
if os.path.exists("temp_log.txt"):
os.remove("temp_log.txt")
if os.path.exists("temp_checkpoint.chk"):
os.remove("temp_checkpoint.chk")
return "Simulation done!"
class SetUpandRunFunction(BaseTool):
name: str = "SetUpandRunFunction"
description: str = (
"This tool will set up and run a short simulation of a protein. "
"Then will write a standalone script that can be used "
"to reproduce the simulation or change accordingly for "
"a more elaborate simulation. It only runs short simulations because, "
"if there are errors, you can try again changing the input"
)
args_schema: Type[BaseModel] = SetUpandRunFunctionInput
path_registry: Optional[PathRegistry]
def __init__(self, path_registry: Optional[PathRegistry]):
super().__init__()
self.path_registry = path_registry
def _run(self, **input_args):
if self.path_registry is None:
return "Path registry not initialized"
input = self.check_system_params(input_args)
error = input.get("error", None)
if error:
print(f"error found: {error}")
return error
try:
pdb_id = input["pdb_id"]
# check if pdb_id is in the registry or as 1XYZ_112233 format
if pdb_id not in self.path_registry.list_path_names():
return "No pdb_id found in input, use the file id not the file name"
except KeyError:
return "No pdb_id found in input"
try:
save = input["save"] # either this simulation
# to save or not the output files from this simulation
except KeyError:
save = True
print(
"No 'save' key found in input, setting to True. "
"Record files will be deleted after script is written."
)
try:
file_name = self.path_registry.write_file_name(
type=FileType.SIMULATION,
type_of_sim=input["simulation_params"]["Ensemble"],
protein_file_id=pdb_id,
)
sim_id = self.path_registry.get_fileid(file_name, FileType.SIMULATION)
except Exception as e:
print(f"An exception was found: {str(e)}.")
return f"An exception was found trying to write the filenames: {str(e)}."
try:
openmmsim = OpenMMSimulation(
input, self.path_registry, save, sim_id, pdb_id
)
openmmsim.setup_system()
openmmsim.setup_integrator()
openmmsim.create_simulation()
print("simulation set!")
st.markdown("simulation set!", unsafe_allow_html=True)
except ValueError as e:
msg = str(e) + f"This were the inputs {input_args}"
if "No template for" in msg:
msg += (
"This error is likely due to non standard residues "
"in the protein, if you havent done it yet, try "
"cleaning the pdb file using the cleaning tool"
)
return msg
except FileNotFoundError:
return f"File not found, check File id. This were the inputs {input_args}"
except OpenMMException as e:
return f"OpenMM Exception: {str(e)}. This were the inputs {input_args}"
try:
openmmsim.run()
except Exception as e:
return (
f"An exception was found: {str(e)}. Not a problem, thats one "
"purpose of this tool: to run a short simulation to check for correct "
"initialization. "
""
"Try a) with different parameters like "
"nonbondedMethod, constraints, etc \n or\n"
"b) clean file inputs depending on error "
)
try:
openmmsim.write_standalone_script(filename=file_name)
self.path_registry.map_path(
sim_id,
f"files/simulations/{file_name}",
f"Basic Simulation of Protein {pdb_id}",
)
if save:
records = openmmsim.registry_records
# move record files to files/records/
print(os.listdir("."))
if not os.path.exists("files/records"):
os.makedirs("files/records")
for record in records:
os.rename(record[1].split("/")[-1], f"{record[1]}")
for record in records:
record_list = list(record)
record_list[0] = self.path_registry.get_fileid(
record_list[1].split("/")[-1], FileType.RECORD
)
record = tuple(record_list)
self.path_registry.map_path(*record)
return (
"Simulation done! \n Summary: \n"
"Record files written to files/records/ with IDs and descriptions: "
f"{[(record[0],record[2]) for record in records]}\n"
"Standalone script written to files/simulations/ with ID: "
f"{sim_id}.\n"
f"The initial topology file ID is top_{sim_id} saved in files/pdb/"
)
except Exception as e:
print(f"An exception was found: {str(e)}.")
return f"An exception was found trying to write the filenames: {str(e)}."
def _parse_cutoff(self, cutoff):
# Check if cutoff is already an OpenMM Quantity (has a unit)
possible_units = ["nm", "nanometer", "nanometers", "angstrom", "angstroms", "a"]
if isinstance(cutoff, unit.Quantity):
return cutoff
# Convert to string in case it's not (e.g., int or float)
cutoff = str(cutoff)
if cutoff[-1] == "s":
cutoff = cutoff[:-1]
# Remove spaces and convert to lowercase for easier parsing
cutoff = cutoff.replace(" ", "").lower()
if cutoff.endswith("s"):
cutoff = cutoff[:-1]
# Check for multiplication symbol and split if necessary
if "*" in cutoff:
# Split on the '*' and extract the numerical part and the unit part
num_part, unit_part = cutoff.split("*")
# Convert the numerical part to a float
num_value = float(num_part)
else:
# If there is no '*', it's either a number or a string like "1nm"
# Attempt to convert directly to float; if it fails,
# it must have a unit like "nm" or "angstrom"
try:
num_value = float(cutoff)
unit_part = "nm"
except ValueError:
for possible_unit in possible_units:
if possible_unit in cutoff:
num_value = float(cutoff.replace(possible_unit, ""))
unit_part = possible_unit
break
else:
# Use regular expression to extract the
# numerical part and the unit part
match = re.match(r"([+-]?[0-9]*\.?[0-9]+)([a-zA-Z]*)", cutoff)
if match:
num_part, unit_part = match.groups()
raise ValueError(
f"""Unknown unit for nonbondedCutoff
got {unit_part}. Try using nm or angstroms as
value * unit."""
)
# Now convert the unit part to an OpenMM unit
if unit_part in ["nm", "nanometer", "nanometers"]:
return num_value * unit.nanometers
elif unit_part in ["angstrom", "angstroms", "a"]:
return num_value * unit.angstroms
else:
# If the unit is not recognized, raise an error
raise ValueError(
f"""Unknown unit for nonbondedCutoff
got {unit_part}. Try using nm or angstroms as
value * unit."""
)
def _parse_parameter(self, parameter, default_unit, possible_units):
"""
Parse a parameter and return it as an OpenMM Quantity with the correct unit.
Args:
parameter (float, str, or unit.Quantity): The input parameter value
default_unit (unit.Unit): The default unit to use if none is provided
possible_units (dict): A mapping of strings to their respective unit objects
Returns:
unit.Quantity: The parameter as an OpenMM Quantity with the correct unit.
"""
error_msg = ""
if isinstance(parameter, unit.Quantity):
return parameter, error_msg
# Convert to string in case it's not (e.g., int or float)
parameter_str = str(parameter)
# Remove spaces and convert to lowercase for easier parsing
parameter_str = parameter_str.replace(" ", "").lower()
# Check for multiplication symbol and split if necessary
# e.g. "1*kelvin" or "1*ps^-1"
if "*" in parameter_str:
num_part, unit_part = parameter_str.split("*")
num_value = float(num_part)
elif "poundforce/inch^2" in parameter_str:
num_value = float(parameter_str.replace("poundforce/inch^2", ""))
unit_part = "poundforce/inch^2"
# Check for division symbol and split if necessary
# e.g. "1/ps" or "1/ps^-1"
elif "/" in parameter_str:
num_part, unit_part = parameter_str.split("/")
num_value = float(num_part)
unit_part = "/" + unit_part
elif "^-1" in parameter_str:
parameter_str = parameter_str.replace("^-1", "")
match = re.match(r"^(\d+(?:\.\d+)?)([a-zA-Z]+)$", parameter_str)
num_value = float(match.group(1))
unit_part = "/" + match.group(2)
else:
# Attempt to convert directly to float; if it fails,
# it must have a unit like "K", "ps", etc.
try:
num_value = float(parameter_str)
unit_part = default_unit
except ValueError:
match = re.match(r"([+-]?[0-9]*\.?[0-9]+)([a-zA-Z]*)", parameter_str)
if match:
num_part, unit_part = match.groups()
num_value = float(num_part)
else:
error_msg += f"Invalid format for parameter: '{parameter_str}'."
# Convert the unit part to an OpenMM unit
if unit_part.lower() in possible_units:
return num_value * possible_units[unit_part.lower()], error_msg
else:
# If the unit is not recognized, raise an error
error_msg += f"""Unknown unit '{unit_part}' for parameter.
Valid units include: {list(possible_units.keys())}."""
return parameter, error_msg
def parse_temperature(self, temperature):
possible_units = {
"k": unit.kelvin,
"kelvin": unit.kelvin,
}
return self._parse_parameter(temperature, "k", possible_units)
def parse_friction(self, friction):
possible_units = {
"/ps": (1 / unit.picoseconds),
"/picosecond": (1 / unit.picoseconds),
"/picoseconds": (1 / unit.picoseconds),
"picosecond^-1": (1 / unit.picoseconds),
"picoseconds^-1": (1 / unit.picoseconds),
"/ps^-1": (1 / unit.picoseconds),
"ps^-1": (1 / unit.picoseconds),
"1*ps^-1": (1 / unit.picoseconds),
}
return self._parse_parameter(friction, "1/ps", possible_units)
def parse_timestep(self, timestep):
possible_units = {
"ps": unit.picoseconds,
"picosecond": unit.picoseconds,
"picoseconds": unit.picoseconds,
"fs": unit.femtoseconds,
"femtosecond": unit.femtoseconds,
"femtoseconds": unit.femtoseconds,
"ns": unit.nanoseconds,
"nanosecond": unit.nanoseconds,
"nanoseconds": unit.nanoseconds,
}
return self._parse_parameter(timestep, "ps", possible_units)
def parse_pressure(self, pressure):
possible_units = {
"bar": unit.bar,
"atm": unit.atmospheres,
"atmosphere": unit.atmospheres,
"pascal": unit.pascals,
"pascals": unit.pascals,
"pa": unit.pascals,
"poundforce/inch^2": unit.psi,
"psi": unit.psi,
}
return self._parse_parameter(pressure, "bar", possible_units)
def _process_parameters(self, user_params, param_type="system_params"):
"""
Process user provided parameters,
converting strings to openmm objects if necessary.
"""
error_msg = ""
processed_params = {}
if param_type == "system_params":
for key, value in user_params.items():
if key == "nonbondedMethod" or key == "nonbondedmethod":
if value == "NoCutoff":
processed_params[key] = NoCutoff
elif value == "PME":
processed_params[key] = PME
elif value == "CutoffPeriodic":
processed_params[key] = CutoffPeriodic
elif value == "CutoffNonPeriodic":
processed_params[key] = CutoffNonPeriodic
elif value == "Ewald":
processed_params[key] = Ewald
else:
# Assume it's already an openmm object
processed_params[key] = value
if key == "nonbondedCutoff" or key == "nonbondedcutoff":
try:
processed_params[key] = self._parse_cutoff(value)
except ValueError as e:
error_msg += f"Invalid nonbondedCutoff: {e}. \n"
if key == "ewaldErrorTolerance" or key == "ewalderrortolerance":
try:
processed_params[key] = float(value)
except TypeError as e:
error_msg += (
f"Invalid ewaldErrorTolerance: {e}. "
"If you are using null or None, "
"just dont include it "
"as part of the parameters.\n"
)
if key == "constraints":
try:
if type(value) == str:
if value == "None":
processed_params[key] = None
elif value == "HBonds":
processed_params[key] = HBonds
elif value == "AllBonds":
processed_params[key] = AllBonds
elif value == "HAngles":
processed_params[key] = HAngles
else:
error_msg += (
f"Invalid constraints: Got {value}. "
"Try using None, HBonds, AllBonds or "
"HAngles\n"
)
else:
processed_params[key] = value
except TypeError as e:
error_msg += (
f"Invalid constraints: {e}. If you are using "
"null or None, just dont include as "
"part of the parameters.\n"
)
if key == "rigidWater" or key == "rigidwater":
if type(value) == bool:
processed_params[key] = value
elif value == "True":
processed_params[key] = True
elif value == "False":
processed_params[key] = False
else:
error_msg += (
f"Invalid rigidWater: got {value}. "
"Try using True or False.\n"
)
if key == "constraintTolerance" or key == "constrainttolerance":
try:
processed_params[key] = float(value)
except ValueError as e:
error_msg += f"Invalid constraintTolerance: {e}."
except TypeError as e:
error_msg += (
f"Invalid constraintTolerance: {e}. If "
"constraintTolerance is null or None, "
"just dont include as part of "
"the parameters.\n"
)
if key == "solvate":
try:
if type(value) == bool:
processed_params[key] = value
elif value == "True":
processed_params[key] = True
elif value == "False":
processed_params[key] = False
else:
error_msg += (
f"Invalid solvate: got {value}. "
"Use either True or False.\n"
)
except TypeError as e:
error_msg += (
f"Invalid solvate: {e}. If solvate is null or "
"None, just dont include as part of "
"the parameters.\n"
)
return processed_params, error_msg
if param_type == "integrator_params":
for key, value in user_params.items():
if key == "integrator_type" or key == "integratortype":
if value == "LangevinMiddle" or value == LangevinMiddleIntegrator:
processed_params[key] = "LangevinMiddle"
elif value == "Langevin" or value == LangevinIntegrator:
processed_params[key] = "Langevin"
elif value == "Verlet" or value == VerletIntegrator:
processed_params[key] = "Verlet"
elif value == "Brownian" or value == BrownianIntegrator:
processed_params[key] = "Brownian"
else:
error_msg += (
f"Invalid integrator_type: got {value}. "
"Try using LangevinMiddle, Langevin, "
"Verlet, or Brownian.\n"
)
if key == "Temperature" or key == "temperature":
temperature, msg = self.parse_temperature(value)
processed_params[key] = temperature
error_msg += msg
if key == "Friction" or key == "friction":
friction, msg = self.parse_friction(value)
processed_params[key] = friction
error_msg += msg
if key == "Timestep" or key == "timestep":
timestep, msg = self.parse_timestep(value)
processed_params[key] = timestep
error_msg += msg
if key == "Pressure" or key == "pressure":
pressure, msg = self.parse_pressure(value)
processed_params[key] = pressure
error_msg += msg
return processed_params, error_msg
if param_type == "simulation_params":
for key, value in user_params.items():
if key == "Ensemble" or key == "ensemble":
if value == "NPT":
processed_params[key] = "NPT"
elif value == "NVT":
processed_params[key] = "NVT"
elif value == "NVE":
processed_params[key] = "NVE"
else:
error_msg += (
f"Invalid Ensemble. got {value}. "
"Try using NPT, NVT, or NVE.\n"
)
if key == "Number of Steps" or key == "number of steps":
processed_params[key] = int(value)
if key == "record_interval_steps" or key == "record interval steps":
processed_params[key] = int(value)
if key == "record_params" or key == "record params":
processed_params[key] = value
return processed_params, error_msg
def check_system_params(cls, values):
"""Check that the system parameters are valid."""
# lowercase all keys in the dictionary
error_msg = ""
values = {k.lower(): v for k, v in values.items()}
system_params = values.get("system_params")
if system_params:
system_params, msg = cls._process_parameters(
system_params, param_type="system_params"
)
if msg != "":
error_msg += msg
else:
system_params = {
"nonbondedMethod": NoCutoff,
"nonbondedCutoff": 1 * nanometers,
"ewaldErrorTolerance": None,
"constraints": AllBonds,
"rigidWater": True,
"constraintTolerance": 0.00001,
"solvate": False,
}
integrator_params = values.get("integrator_params")
if integrator_params:
integrator_params, msg = cls._process_parameters(
integrator_params, param_type="integrator_params"
)
if msg != "":
error_msg += msg
else:
integrator_params = {
"integrator_type": "LangevinMiddle",
"Temperature": 300 * kelvin,
"Friction": 1.0 / picoseconds,
"Timestep": 0.004 * picoseconds,
"Pressure": 1.0 * bar,
}
simulation_params = values.get("simulation_params")
if simulation_params is None:
simulation_params = {
"Ensemble": "NVT",
"Number of Steps": 10000,
"record_interval_steps": 100,
"record_params": ["step", "potentialEnergy", "temperature"],
}
# lowercase all keys in the dictionary
# system_params = {k.lower(): v for k, v in system_params.items()}
# integrator_params = {k.lower(): v for k, v in integrator_params.items()}
# simulation_params = {k.lower(): v for k, v in simulation_params.items()}
nonbondedMethod = system_params.get("nonbondedMethod")
nonbondedCutoff = system_params.get("nonbondedCutoff")
ewaldErrorTolerance = system_params.get("ewaldErrorTolerance")
constraints = system_params.get("constraints")
rigidWater = system_params.get("rigidWater")
constraintTolerance = system_params.get("constraintTolerance")
methods_with_cutoff = {
"PME",
"CutoffNonPeriodic",
"CutoffPeriodic",
"Ewald",
PME,
CutoffNonPeriodic,
CutoffPeriodic,
Ewald,
}
constraints_with_tolerance = {
"HBonds",
"AllBonds",
"OnlyWater",
HBonds,
AllBonds,
}
if nonbondedMethod in methods_with_cutoff and nonbondedCutoff is None:
error_msg += """nonbondedCutoff must be specified if
nonbondedMethod is not NoCutoff\n"""
if nonbondedMethod in {"PME", PME} and ewaldErrorTolerance is None:
error_msg += """ewaldErrorTolerance must be specified when
nonbondedMethod is PME\n"""
if constraints in constraints_with_tolerance and constraintTolerance is None:
error_msg += """constraintTolerance must be specified when
constraints is HBonds or AllBonds"""
if rigidWater and constraintTolerance is None:
error_msg = "constraintTolerance must be specified if rigidWater is True"
"""Checking if the file is in the path"""
pdb_id = values.get("pdb_id")
if not pdb_id:
error_msg += "The pdb id is not present in the inputs"
"""Validating the forcefield files and Integrator"""
integrator_type = integrator_params.get("integrator_type")
if integrator_type not in ["LangevinMiddle", "Verlet", "Brownian"]:
error_msg += """integrator_type must be one of the following:
LangevinMiddle, Verlet, Brownian\n"""
if integrator_type == "LangevinMiddle":
friction = integrator_params.get("Friction")
if friction is None:
error_msg += """friction must be specified when
integrator_type is LangevinMiddle\n"""
timestep = integrator_params.get("Timestep")
if timestep is None:
error_msg += """timestep must be specified when
integrator_type is LangevinMiddle\n"""
temp = integrator_params.get("Temperature")
if temp is None:
error_msg += """temperature must be specified when
integrator_type is LangevinMiddle\n"""
if integrator_type == "Verlet":
timestep = integrator_params.get("Timestep")
if timestep is None:
error_msg += """timestep must be specified when
integrator_type is Verlet\n"""
if integrator_type == "Brownian":
temperature = integrator_params.get("Temperature")
if temperature is None:
error_msg += """temperature must be specified when
integrator_type is Brownian\n"""
# forcefield
forcefield_files = values.get("forcefield_files")
if forcefield_files is None or forcefield_files is []:
print("Setting default forcefields")
st.markdown("Setting default forcefields", unsafe_allow_html=True)
forcefield_files = ["amber14-all.xml", "amber14/tip3pfb.xml"]
elif len(forcefield_files) == 0:
print("Setting default forcefields v2")
st.markdown("Setting default forcefields", unsafe_allow_html=True)
forcefield_files = ["amber14-all.xml", "amber14/tip3pfb.xml"]
else:
for file in forcefield_files:
if file not in FORCEFIELD_LIST:
error_msg += "The forcefield file is not present"
save = values.get("save", True)
if type(save) != bool:
error_msg += "save must be a boolean value"
if error_msg != "":
return {
"error": error_msg
+ "\n Correct this and try again. \n Everthing else is fine"
}
values = {
"pdb_id": pdb_id,
"forcefield_files": forcefield_files,
"save": save,
"system_params": system_params,
"integrator_params": integrator_params,
"simulation_params": simulation_params,
}
# if no error, return the values
return values
async def _arun(self, query: str) -> str:
"""Use the tool asynchronously."""
raise NotImplementedError("custom_search does not support async")
def create_simulation_input(pdb_path, forcefield_files):
"""
This function takes a PDB file path and a list of forcefield files.
It creates and returns a PDBFile and ForceField object.
The forcefield_files list can contain one or more files.
If only one file is provided, it assumes that the file includes
both the forcefield and the water model if needed.
Parameters:
pdb_path (str): The file path to the PDB file.
forcefield_files (list of str): A list of file paths to the forcefield XML files.
Returns:
tuple: A tuple containing the PDBFile and ForceField objects.
"""
# Load the PDB file
pdb_path.split(".")[0]
end = pdb_path.split(".")[1]
if end == "pdb":
pdb = PDBFile(pdb_path)
elif end == "cif":
pdb = PDBxFile(pdb_path)
# Clean up forcefield files list and remove any empty strings
forcefield_files = (
forcefield_files.replace("(default)", "").replace(" and ", ",").strip()
)
Forcefield_files = [file.strip() for file in forcefield_files.split(",")]
Forcefield = Forcefield_files[0]
Water_model = Forcefield_files[1]
# check if they are part of the list
if Forcefield not in FORCEFIELD_LIST:
raise Exception("Forcefield not recognized")
if Water_model not in FORCEFIELD_LIST:
raise Exception("Water model not recognized")
forcefield = ForceField(Forcefield, Water_model)
# TODO Not all forcefields require water model
return pdb, forcefield
| [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.chat_models.ChatOpenAI"
] | [((20314, 20720), 'pydantic.Field', 'Field', (['(True)'], {'description': '"""Set to \'True\' (default) to save the log files and trajectories of the simulation. If set to \'False\', the simulation is considered as being in a testing or preliminary scripting stage, utilizing default parameters and results are not saved. This second setting is ideal for initial experimentation or basic script development before customizing the script for final use."""'}), '(True, description=\n "Set to \'True\' (default) to save the log files and trajectories of the simulation. If set to \'False\', the simulation is considered as being in a testing or preliminary scripting stage, utilizing default parameters and results are not saved. This second setting is ideal for initial experimentation or basic script development before customizing the script for final use."\n )\n', (20319, 20720), False, 'from pydantic import BaseModel, Field\n'), ((20979, 22020), 'pydantic.Field', 'Field', (["{'nonbondedMethod': 'NoCutoff', 'nonbondedCutoff': '1 * nanometers',\n 'ewaldErrorTolerance': None, 'constraints': 'None', 'rigidWater': False,\n 'constraintTolerance': None, 'solvate': False}"], {'description': '"""Parameters for the openmm system. For nonbondedMethod, you can choose from the following:\nNoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, PME. If anything but NoCutoff is chosen,you have to include a nonbondedCutoffand a constrainTolerance.\nIf PME is chosen,you have to include an ewaldErrorTolerance too.For constraints, you can choose from the following:\nNone, HBonds, AllBonds or OnlyWater.For rigidWater, you can choose from the following:\nTrue, False.\nFinally, if you want to solvate the system, before the simulation,you can set solvate to True.\nExample1:\n{\'nonbondedMethod\': \'NoCutoff\',\n\'constraints\': \'None\',\n\'rigidWater\': False}\nExample2:\n{\'nonbondedMethod\': \'CutoffPeriodic\',\n\'nonbondedCutoff\': 1.0,\n\'constraints\': \'HBonds\',\n\'rigidWater\': True,\n\'constraintTolerance\': 0.00001,\n\'solvate\': True} """'}), '({\'nonbondedMethod\': \'NoCutoff\', \'nonbondedCutoff\': \'1 * nanometers\',\n \'ewaldErrorTolerance\': None, \'constraints\': \'None\', \'rigidWater\': False,\n \'constraintTolerance\': None, \'solvate\': False}, description=\n """Parameters for the openmm system. For nonbondedMethod, you can choose from the following:\nNoCutoff, CutoffNonPeriodic, CutoffPeriodic, Ewald, PME. If anything but NoCutoff is chosen,you have to include a nonbondedCutoffand a constrainTolerance.\nIf PME is chosen,you have to include an ewaldErrorTolerance too.For constraints, you can choose from the following:\nNone, HBonds, AllBonds or OnlyWater.For rigidWater, you can choose from the following:\nTrue, False.\nFinally, if you want to solvate the system, before the simulation,you can set solvate to True.\nExample1:\n{\'nonbondedMethod\': \'NoCutoff\',\n\'constraints\': \'None\',\n\'rigidWater\': False}\nExample2:\n{\'nonbondedMethod\': \'CutoffPeriodic\',\n\'nonbondedCutoff\': 1.0,\n\'constraints\': \'HBonds\',\n\'rigidWater\': True,\n\'constraintTolerance\': 0.00001,\n\'solvate\': True} """\n )\n', (20984, 22020), False, 'from pydantic import BaseModel, Field\n'), ((22557, 22791), 'pydantic.Field', 'Field', (["{'integrator_type': 'LangevinMiddle', 'Temperature': '300 * kelvin',\n 'Friction': '1.0 / picoseconds', 'Timestep': '0.002 * picoseconds',\n 'Pressure': '1.0 * bar'}"], {'description': '"""Parameters for the openmm integrator."""'}), "({'integrator_type': 'LangevinMiddle', 'Temperature': '300 * kelvin',\n 'Friction': '1.0 / picoseconds', 'Timestep': '0.002 * picoseconds',\n 'Pressure': '1.0 * bar'}, description=\n 'Parameters for the openmm integrator.')\n", (22562, 22791), False, 'from pydantic import BaseModel, Field\n'), ((22917, 23653), 'pydantic.Field', 'Field', (["{'Ensemble': 'NVT', 'Number of Steps': 5000, 'record_interval_steps': 100,\n 'record_params': ['step', 'potentialEnergy', 'temperature']}"], {'description': '"""Parameters for the openmm simulation.\n The ensemble can be NPT, NVT or NVE.\n The number of steps is the number of steps the simulation will run for.\n record_interval_steps is the number of steps between each record:\n hould be the number of steps divided by 100.\n The record_params is a list of parameters that will\n be recorded during the simulation The options are:\n [Step,Time,Speed,Progress,RemainingTime,ElapsedTime,\n PotentialEnergy,KineticEnergy,TotalEnergy,\n Temperature,Volume,Density]"""'}), '({\'Ensemble\': \'NVT\', \'Number of Steps\': 5000, \'record_interval_steps\':\n 100, \'record_params\': [\'step\', \'potentialEnergy\', \'temperature\']},\n description=\n """Parameters for the openmm simulation.\n The ensemble can be NPT, NVT or NVE.\n The number of steps is the number of steps the simulation will run for.\n record_interval_steps is the number of steps between each record:\n hould be the number of steps divided by 100.\n The record_params is a list of parameters that will\n be recorded during the simulation The options are:\n [Step,Time,Speed,Progress,RemainingTime,ElapsedTime,\n PotentialEnergy,KineticEnergy,TotalEnergy,\n Temperature,Volume,Density]"""\n )\n', (22922, 23653), False, 'from pydantic import BaseModel, Field\n'), ((74259, 74294), 'openmm.app.ForceField', 'ForceField', (['Forcefield', 'Water_model'], {}), '(Forcefield, Water_model)\n', (74269, 74294), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((2848, 3019), 'langchain.chat_models.ChatOpenAI', 'langchain.chat_models.ChatOpenAI', ([], {'temperature': 'self.temperature', 'model_name': 'self.model_name', 'request_timeout': 'self.request_timeout', 'max_tokens': 'self.request_timeout'}), '(temperature=self.temperature, model_name=\n self.model_name, request_timeout=self.request_timeout, max_tokens=self.\n request_timeout)\n', (2880, 3019), False, 'import langchain\n'), ((9952, 10019), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['query']"}), "(template=prompt_template, input_variables=['query'])\n", (9966, 10019), False, 'from langchain.prompts import PromptTemplate\n'), ((10040, 10077), 'langchain.chains.LLMChain', 'LLMChain', ([], {'prompt': 'prompt', 'llm': 'self.llm'}), '(prompt=prompt, llm=self.llm)\n', (10048, 10077), False, 'from langchain.chains import LLMChain\n'), ((12416, 12477), 'streamlit.markdown', 'st.markdown', (['"""Setting up forcefields"""'], {'unsafe_allow_html': '(True)'}), "('Setting up forcefields', unsafe_allow_html=True)\n", (12427, 12477), True, 'import streamlit as st\n'), ((12732, 12765), 'mdagent.tools.base_tools.preprocess_tools.CleaningTools', 'CleaningTools', (['self.path_registry'], {}), '(self.path_registry)\n', (12745, 12765), False, 'from mdagent.tools.base_tools.preprocess_tools import CleaningTools\n'), ((13056, 13093), 'openmm.app.Modeller', 'Modeller', (['pdb.topology', 'pdb.positions'], {}), '(pdb.topology, pdb.positions)\n', (13064, 13093), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((15170, 15219), 'openmm.app.Simulation', 'Simulation', (['modeller.topology', 'system', 'integrator'], {}), '(modeller.topology, system, integrator)\n', (15180, 15219), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((15873, 15914), 'ast.literal_eval', 'ast.literal_eval', (["params['record_params']"], {}), "(params['record_params'])\n", (15889, 15914), False, 'import ast\n'), ((17472, 17524), 'os.path.exists', 'os.path.exists', (['"""simulation_parameters_summary.json"""'], {}), "('simulation_parameters_summary.json')\n", (17486, 17524), False, 'import os\n'), ((25493, 25547), 'streamlit.markdown', 'st.markdown', (['"""Building system"""'], {'unsafe_allow_html': '(True)'}), "('Building system', unsafe_allow_html=True)\n", (25504, 25547), True, 'import streamlit as st\n'), ((25680, 25702), 'openmm.app.PDBFile', 'PDBFile', (['self.pdb_path'], {}), '(self.pdb_path)\n', (25687, 25702), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((25729, 25770), 'openmm.app.ForceField', 'ForceField', (['*self.params.forcefield_files'], {}), '(*self.params.forcefield_files)\n', (25739, 25770), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((26409, 26469), 'streamlit.markdown', 'st.markdown', (['"""Setting up integrator"""'], {'unsafe_allow_html': '(True)'}), "('Setting up integrator', unsafe_allow_html=True)\n", (26420, 26469), True, 'import streamlit as st\n'), ((27344, 27402), 'streamlit.markdown', 'st.markdown', (['"""Creating simulation"""'], {'unsafe_allow_html': '(True)'}), "('Creating simulation', unsafe_allow_html=True)\n", (27355, 27402), True, 'import streamlit as st\n'), ((32351, 32388), 'openmm.app.Modeller', 'Modeller', (['pdb.topology', 'pdb.positions'], {}), '(pdb.topology, pdb.positions)\n', (32359, 32388), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((43173, 43248), 'streamlit.markdown', 'st.markdown', (['"""Standalone simulation script written"""'], {'unsafe_allow_html': '(True)'}), "('Standalone simulation script written', unsafe_allow_html=True)\n", (43184, 43248), True, 'import streamlit as st\n'), ((43363, 43432), 'streamlit.markdown', 'st.markdown', (['"""Performing energy minimization"""'], {'unsafe_allow_html': '(True)'}), "('Performing energy minimization', unsafe_allow_html=True)\n", (43374, 43432), True, 'import streamlit as st\n'), ((44047, 44125), 'streamlit.markdown', 'st.markdown', (['"""Minimization complete! Equilibrating..."""'], {'unsafe_allow_html': '(True)'}), "('Minimization complete! Equilibrating...', unsafe_allow_html=True)\n", (44058, 44125), True, 'import streamlit as st\n'), ((44439, 44491), 'streamlit.markdown', 'st.markdown', (['"""Simulating..."""'], {'unsafe_allow_html': '(True)'}), "('Simulating...', unsafe_allow_html=True)\n", (44450, 44491), True, 'import streamlit as st\n'), ((44628, 44672), 'streamlit.markdown', 'st.markdown', (['"""Done!"""'], {'unsafe_allow_html': '(True)'}), "('Done!', unsafe_allow_html=True)\n", (44639, 44672), True, 'import streamlit as st\n'), ((73603, 73620), 'openmm.app.PDBFile', 'PDBFile', (['pdb_path'], {}), '(pdb_path)\n', (73610, 73620), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((5414, 5459), 'openmm.LangevinMiddleIntegrator', 'LangevinMiddleIntegrator', ([], {}), '(**integrator_params)\n', (5438, 5459), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((10642, 10668), 'json.dump', 'json.dump', (['summary_dict', 'f'], {}), '(summary_dict, f)\n', (10651, 10668), False, 'import json\n'), ((11183, 11195), 'json.load', 'json.load', (['f'], {}), '(f)\n', (11192, 11195), False, 'import json\n'), ((12619, 12654), 'openmm.app.ForceField', 'ForceField', (['Forcefield', 'Water_model'], {}), '(Forcefield, Water_model)\n', (12629, 12654), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((12956, 12972), 'openmm.app.PDBFile', 'PDBFile', (['pdbfile'], {}), '(pdbfile)\n', (12963, 12972), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((13846, 13915), 'streamlit.markdown', 'st.markdown', (['"""Setting up Langevin integrator"""'], {'unsafe_allow_html': '(True)'}), "('Setting up Langevin integrator', unsafe_allow_html=True)\n", (13857, 13915), True, 'import streamlit as st\n'), ((15727, 15759), 'openmm.app.PDBReporter', 'PDBReporter', (['f"""{name}.pdb"""', '(1000)'], {}), "(f'{name}.pdb', 1000)\n", (15738, 15759), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((16625, 16680), 'openmm.app.StateDataReporter', 'StateDataReporter', (['f"""{name}.csv"""', '(1000)'], {}), "(f'{name}.csv', 1000, **reporter_args)\n", (16642, 16680), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((17731, 17746), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (17741, 17746), False, 'import os\n'), ((19272, 19284), 'json.load', 'json.load', (['f'], {}), '(f)\n', (19281, 19284), False, 'import json\n'), ((26664, 26767), 'openmm.LangevinMiddleIntegrator', 'LangevinMiddleIntegrator', (["int_params['Temperature']", "int_params['Friction']", "int_params['Timestep']"], {}), "(int_params['Temperature'], int_params['Friction'],\n int_params['Timestep'])\n", (26688, 26767), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((27543, 27576), 'openmm.Platform.getPlatformByName', 'Platform.getPlatformByName', (['"""CPU"""'], {}), "('CPU')\n", (27569, 27576), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((42922, 42947), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (42936, 42947), False, 'import os\n'), ((42961, 42983), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (42972, 42983), False, 'import os\n'), ((44714, 44751), 'os.path.exists', 'os.path.exists', (['"""temp_trajectory.dcd"""'], {}), "('temp_trajectory.dcd')\n", (44728, 44751), False, 'import os\n'), ((44817, 44847), 'os.path.exists', 'os.path.exists', (['"""temp_log.txt"""'], {}), "('temp_log.txt')\n", (44831, 44847), False, 'import os\n'), ((44906, 44943), 'os.path.exists', 'os.path.exists', (['"""temp_checkpoint.chk"""'], {}), "('temp_checkpoint.chk')\n", (44920, 44943), False, 'import os\n'), ((47535, 47589), 'streamlit.markdown', 'st.markdown', (['"""simulation set!"""'], {'unsafe_allow_html': '(True)'}), "('simulation set!', unsafe_allow_html=True)\n", (47546, 47589), True, 'import streamlit as st\n'), ((71482, 71548), 'streamlit.markdown', 'st.markdown', (['"""Setting default forcefields"""'], {'unsafe_allow_html': '(True)'}), "('Setting default forcefields', unsafe_allow_html=True)\n", (71493, 71548), True, 'import streamlit as st\n'), ((73658, 73676), 'openmm.app.PDBxFile', 'PDBxFile', (['pdb_path'], {}), '(pdb_path)\n', (73666, 73676), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((5527, 5564), 'openmm.VerletIntegrator', 'VerletIntegrator', ([], {}), '(**integrator_params)\n', (5543, 5564), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((13018, 13035), 'openmm.app.PDBxFile', 'PDBxFile', (['pdbfile'], {}), '(pdbfile)\n', (13026, 13035), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((15006, 15073), 'streamlit.markdown', 'st.markdown', (['"""Setting up Verlet integrator"""'], {'unsafe_allow_html': '(True)'}), "('Setting up Verlet integrator', unsafe_allow_html=True)\n", (15017, 15073), True, 'import streamlit as st\n'), ((26911, 27008), 'openmm.LangevinIntegrator', 'LangevinIntegrator', (["int_params['Temperature']", "int_params['Friction']", "int_params['Timestep']"], {}), "(int_params['Temperature'], int_params['Friction'],\n int_params['Timestep'])\n", (26929, 27008), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((29047, 29122), 'openmm.app.DCDReporter', 'DCDReporter', (['f"""{trajectory_name}"""', "self.sim_params['record_interval_steps']"], {}), "(f'{trajectory_name}', self.sim_params['record_interval_steps'])\n", (29058, 29122), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((29258, 29331), 'openmm.app.PDBReporter', 'PDBReporter', (['f"""{topology_name}"""', "self.sim_params['record_interval_steps']"], {}), "(f'{topology_name}', self.sim_params['record_interval_steps'])\n", (29269, 29331), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((29467, 29612), 'openmm.app.StateDataReporter', 'StateDataReporter', (['f"""{log_name}"""', "self.sim_params['record_interval_steps']"], {'step': '(True)', 'potentialEnergy': '(True)', 'temperature': '(True)', 'separator': '"""\t"""'}), "(f'{log_name}', self.sim_params['record_interval_steps'],\n step=True, potentialEnergy=True, temperature=True, separator='\\t')\n", (29484, 29612), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((30145, 30221), 'openmm.app.DCDReporter', 'DCDReporter', (['"""temp_trajectory.dcd"""', "self.sim_params['record_interval_steps']"], {}), "('temp_trajectory.dcd', self.sim_params['record_interval_steps'])\n", (30156, 30221), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((30357, 30431), 'openmm.app.PDBReporter', 'PDBReporter', (['"""temp_topology.pdb"""', "self.sim_params['record_interval_steps']"], {}), "('temp_topology.pdb', self.sim_params['record_interval_steps'])\n", (30368, 30431), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((30567, 30713), 'openmm.app.StateDataReporter', 'StateDataReporter', (['"""temp_log.txt"""', "self.sim_params['record_interval_steps']"], {'step': '(True)', 'potentialEnergy': '(True)', 'temperature': '(True)', 'separator': '"""\t"""'}), "('temp_log.txt', self.sim_params['record_interval_steps'],\n step=True, potentialEnergy=True, temperature=True, separator='\\t')\n", (30584, 30713), False, 'from openmm.app import PME, AllBonds, CutoffNonPeriodic, CutoffPeriodic, DCDReporter, Ewald, ForceField, HAngles, HBonds, Modeller, NoCutoff, PDBFile, PDBReporter, PDBxFile, Simulation, StateDataReporter\n'), ((33597, 33626), 'openmm.unit.value_in_unit', 'unit.value_in_unit', (['unit.unit'], {}), '(unit.unit)\n', (33615, 33626), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((33629, 33649), 'openmm.unit.unit.get_name', 'unit.unit.get_name', ([], {}), '()\n', (33647, 33649), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((42802, 42833), 'textwrap.dedent', 'textwrap.dedent', (['script_content'], {}), '(script_content)\n', (42817, 42833), False, 'import textwrap\n'), ((44769, 44801), 'os.remove', 'os.remove', (['"""temp_trajectory.dcd"""'], {}), "('temp_trajectory.dcd')\n", (44778, 44801), False, 'import os\n'), ((44865, 44890), 'os.remove', 'os.remove', (['"""temp_log.txt"""'], {}), "('temp_log.txt')\n", (44874, 44890), False, 'import os\n'), ((44961, 44993), 'os.remove', 'os.remove', (['"""temp_checkpoint.chk"""'], {}), "('temp_checkpoint.chk')\n", (44970, 44993), False, 'import os\n'), ((71728, 71794), 'streamlit.markdown', 'st.markdown', (['"""Setting default forcefields"""'], {'unsafe_allow_html': '(True)'}), "('Setting default forcefields', unsafe_allow_html=True)\n", (71739, 71794), True, 'import streamlit as st\n'), ((5634, 5673), 'openmm.BrownianIntegrator', 'BrownianIntegrator', ([], {}), '(**integrator_params)\n', (5652, 5673), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((14061, 14112), 'openmm.MonteCarloBarostat', 'MonteCarloBarostat', (['(_pressure * bar)', '(_temp * kelvin)'], {}), '(_pressure * bar, _temp * kelvin)\n', (14079, 14112), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((49165, 49180), 'os.listdir', 'os.listdir', (['"""."""'], {}), "('.')\n", (49175, 49180), False, 'import os\n'), ((49205, 49236), 'os.path.exists', 'os.path.exists', (['"""files/records"""'], {}), "('files/records')\n", (49219, 49236), False, 'import os\n'), ((49258, 49286), 'os.makedirs', 'os.makedirs', (['"""files/records"""'], {}), "('files/records')\n", (49269, 49286), False, 'import os\n'), ((14496, 14546), 'openmm.AndersenThermostat', 'AndersenThermostat', (['(_temp * kelvin)', '(1 / picosecond)'], {}), '(_temp * kelvin, 1 / picosecond)\n', (14514, 14546), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((14580, 14631), 'openmm.MonteCarloBarostat', 'MonteCarloBarostat', (['(_pressure * bar)', '(_temp * kelvin)'], {}), '(_pressure * bar, _temp * kelvin)\n', (14598, 14631), False, 'from openmm import AndersenThermostat, BrownianIntegrator, LangevinIntegrator, LangevinMiddleIntegrator, MonteCarloBarostat, OpenMMException, Platform, VerletIntegrator, app, unit\n'), ((52042, 52096), 're.match', 're.match', (['"""([+-]?[0-9]*\\\\.?[0-9]+)([a-zA-Z]*)"""', 'cutoff'], {}), "('([+-]?[0-9]*\\\\.?[0-9]+)([a-zA-Z]*)', cutoff)\n", (52050, 52096), False, 'import re\n'), ((54705, 54763), 're.match', 're.match', (['"""^(\\\\d+(?:\\\\.\\\\d+)?)([a-zA-Z]+)$"""', 'parameter_str'], {}), "('^(\\\\d+(?:\\\\.\\\\d+)?)([a-zA-Z]+)$', parameter_str)\n", (54713, 54763), False, 'import re\n'), ((55149, 55210), 're.match', 're.match', (['"""([+-]?[0-9]*\\\\.?[0-9]+)([a-zA-Z]*)"""', 'parameter_str'], {}), "('([+-]?[0-9]*\\\\.?[0-9]+)([a-zA-Z]*)', parameter_str)\n", (55157, 55210), False, 'import re\n')] |
import langchain
from langchain_openai import AzureChatOpenAI
from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory
from langchain.prompts.chat import MessagesPlaceholder
from tech_agents.command import Command, check_command
from tech_agents.dispatcher import MainDispatcherAgent
from tech_agents.template import default_value
class MainAgent:
llm: AzureChatOpenAI
memory: ConversationBufferMemory
chat_history: MessagesPlaceholder
verbose: bool
"""
MainAgentクラスは、メインのエージェントを表すクラスです。
このクラスは、AzureChatOpenAI、ConversationBufferMemory、MessagesPlaceholderなどの属性を持ちます。
メインエージェントは、指定された入力に対してAgentクラスを実行します。
"""
def __init__(
self,
llm: AzureChatOpenAI = default_value.default_llm,
memory: ConversationBufferMemory = default_value.default_memory,
chat_history: MessagesPlaceholder = default_value.default_chat_history,
verbose: bool = False,
):
"""
MainAgentクラスのコンストラクタです。
デフォルトの引数を使用して、AzureChatOpenAI、ConversationBufferMemory、MessagesPlaceholder、verboseを初期化します。
インスタンス化
------------
main_agent = MainAgent(
llm=あなたの使用したいLLM,
memory=あなたの使用したいメモリ,
chat_history=あなたの使用したい会話履歴,
verbose=デバッグモードを有効にするかどうか
)
実行
------------
message = "こんにちは"
output = main_agent.run(message)
print(output)
"""
# 引数の初期化
self.llm = llm
self.memory = memory
self.chat_history = chat_history
self.verbose = verbose
# メモリの読み取り専用化
self.readonly_memory = ReadOnlySharedMemory(memory=self.memory)
# デバッグモードの設定
langchain.debug = self.verbose
def run(self, user_message: str) -> str:
"""
メインエージェントを実行するメソッドです。
Agentクラスを生成し、指定された入力を渡して実行します。
"""
param = check_command(user_message)
if param.check_command_bool:
CommandAgent = Command(
llm=self.llm,
memory=self.memory,
readonly_memory=self.readonly_memory,
chat_history=self.chat_history,
verbose=self.verbose
)
return CommandAgent.run(param.command, user_message)
main_agent = MainDispatcherAgent(
llm=self.llm,
memory=self.memory,
readonly_memory=self.readonly_memory,
chat_history=self.chat_history,
verbose=self.verbose
)
return main_agent.run(user_message)
| [
"langchain.memory.ReadOnlySharedMemory"
] | [((1669, 1709), 'langchain.memory.ReadOnlySharedMemory', 'ReadOnlySharedMemory', ([], {'memory': 'self.memory'}), '(memory=self.memory)\n', (1689, 1709), False, 'from langchain.memory import ConversationBufferMemory, ReadOnlySharedMemory\n'), ((1926, 1953), 'tech_agents.command.check_command', 'check_command', (['user_message'], {}), '(user_message)\n', (1939, 1953), False, 'from tech_agents.command import Command, check_command\n'), ((2341, 2491), 'tech_agents.dispatcher.MainDispatcherAgent', 'MainDispatcherAgent', ([], {'llm': 'self.llm', 'memory': 'self.memory', 'readonly_memory': 'self.readonly_memory', 'chat_history': 'self.chat_history', 'verbose': 'self.verbose'}), '(llm=self.llm, memory=self.memory, readonly_memory=self.\n readonly_memory, chat_history=self.chat_history, verbose=self.verbose)\n', (2360, 2491), False, 'from tech_agents.dispatcher import MainDispatcherAgent\n'), ((2018, 2156), 'tech_agents.command.Command', 'Command', ([], {'llm': 'self.llm', 'memory': 'self.memory', 'readonly_memory': 'self.readonly_memory', 'chat_history': 'self.chat_history', 'verbose': 'self.verbose'}), '(llm=self.llm, memory=self.memory, readonly_memory=self.\n readonly_memory, chat_history=self.chat_history, verbose=self.verbose)\n', (2025, 2156), False, 'from tech_agents.command import Command, check_command\n')] |
from typing import List, TypedDict
import tiktoken
from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage
from langchain_openai import ChatOpenAI
from app.enums.langchain_enums import LangchainRole
from config import langchain_config, settings
class MessagesType(TypedDict):
role: str
content: str
class LangchainSendChatService:
def __init__(self, model_name: str, messages: List[MessagesType]):
if self._check_model_name() is False:
raise ValueError("model_name is invalid.")
self.model_name = model_name
self.messages = messages
def _check_model_name(self) -> bool:
if self.model_name not in langchain_config.USEABLE_MODEL_NAME:
return False
return True
def send_message(
self,
temperature: float = 0.7,
max_tokens: int = 1000,
) -> str:
langchain_messages = self._create_messages()
if self._check_tokens(langchain_messages, max_tokens) is False:
raise ValueError("model_name is invalid.")
parameters = {
"max_tokens": max_tokens,
"model_name": self.model_name,
"openai_api_key": settings.OPENAI_API_KEY,
"temperature": temperature,
}
chat = ChatOpenAI(**parameters)
result = chat.invoke(langchain_messages)
return result.content
def _check_tokens(
self,
langchain_messages: List[BaseMessage],
max_tokens: int,
) -> bool:
"""
token数のチェック
"""
if len(langchain_messages) == 0:
return False
encode_name = langchain_config.ENCODE_NAME[self.model_name]
tiktoken_enc = tiktoken.get_encoding(encode_name)
total_tokens = 0
for langchain_message in langchain_messages:
tokens = tiktoken_enc.encode(langchain_message.content)
total_tokens += len(tokens)
return langchain_config.MAX_TOKEN[self.model_name] > (total_tokens + max_tokens)
def _create_messages(self) -> List[BaseMessage]:
langchain_messages: List[BaseMessage] = []
for message in self.messages:
if message["role"] == LangchainRole.AI.value:
langchain_messages.append(AIMessage(content=message["content"]))
continue
if message["role"] == LangchainRole.HUMAN.value:
langchain_messages.append(HumanMessage(content=message["content"]))
continue
if message["role"] == LangchainRole.SYSTEM.value:
langchain_messages.append(SystemMessage(content=message["content"]))
continue
return langchain_messages
| [
"langchain.schema.AIMessage",
"langchain_openai.ChatOpenAI",
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage"
] | [((1294, 1318), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {}), '(**parameters)\n', (1304, 1318), False, 'from langchain_openai import ChatOpenAI\n'), ((1726, 1760), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['encode_name'], {}), '(encode_name)\n', (1747, 1760), False, 'import tiktoken\n'), ((2281, 2318), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': "message['content']"}), "(content=message['content'])\n", (2290, 2318), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2449, 2489), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': "message['content']"}), "(content=message['content'])\n", (2461, 2489), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n'), ((2621, 2662), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': "message['content']"}), "(content=message['content'])\n", (2634, 2662), False, 'from langchain.schema import AIMessage, BaseMessage, HumanMessage, SystemMessage\n')] |
# import modules
import telebot
from telebot import *
import logging
import sqlite3
import os
import langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter, CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.document_loaders import TextLoader
from langchain.document_loaders import DirectoryLoader
from langchain.vectorstores import Chroma
from langchain.prompts import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
# connect to the database
conn = sqlite3.connect(r"main.db", check_same_thread=False)
cur = conn.cursor()
# start logging
logging.basicConfig(level=logging.INFO, filename="../info.log", filemode='w')
# init a bot with token from file
bot_token_file = open("bot_token.txt", "r")
API_KEY = bot_token_file.readline()
bot_token_file.close()
os.environ["API_KEY"] = API_KEY
bot = telebot.TeleBot(API_KEY)
# set the openai token
token_file = open("openai_token.txt", "r")
token = token_file.readline()
token_file.close()
os.environ["OPENAI_API_KEY"] = token
docs_k = 65 # const
number_of_goods = 6 # const
goods = ["Philips EP2231/40", "Nivona CafeRomatica NICR 550", # list of goods
"Delonghi ECAM 370.70.B", "Polaris PACM 2065AC",
"Philips EP2030/10", "REDMOND RCM-1517"]
langchain.debug = False # debug is off
# read the vector databases
vectordb_list = []
embedding = OpenAIEmbeddings()
for i in range(number_of_goods):
vectordb_list.append(Chroma(embedding_function=embedding,
persist_directory="../output/"+str(i)))
for vectordb in vectordb_list:
print(vectordb._collection.count())
def get_info(itemID):
question = "Расскажи об этой кофемашине"
template = """Ты - полезный ИИ консультант для нашего магазина бытовой техники по продаже кофемашин.
Твое задание - описать данную кофемашину. Говори только о достоинствах.
Используйте следующие фрагменты контекста (Context), чтобы ответить на вопрос в конце (Question).
Если вы не знаете ответа, просто скажите, что не знаете, не пытайтесь придумывать ответ.
Сначала убедитесь, что прикрепленный текст имеет отношение к вопросу.
Если вопрос не имеет отшения к тексту, ответьте, что вы не можете ответить на данный вопрос.
Используйте максимум 15 предложений.
Дайте ответ как можно более понятным, рассказывая кратко про все достинства именно данной кофемашины.
Context: {context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
vectordb = vectordb_list[itemID]
retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": docs_k})
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0,
max_tokens = 250)
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
result = qa_chain({"query": question})
return result["result"]
def get_answer(itemID, question):
template = """Ты - полезный ИИ консультант для нашего магазина бытовой техники по продаже кофемашин.
Твое задание - понятно ответить на вопрос покупателя.
Используйте следующие фрагменты контекста (Context), чтобы ответить на вопрос в конце (Question).
Если вы не знаете ответа, просто скажите, что не знаете, не пытайтесь придумывать ответ.
Сначала убедитесь, что прикрепленный текст имеет отношение к вопросу.
Если вопрос не имеет отшения к тексту, ответьте, что вы не можете ответить на данный вопрос.
Используйте максимум 15 предложений.
Дайте ответ как можно более понятным. Говорите грамотно.
Context: {context}
Question: {question}"""
QA_CHAIN_PROMPT = PromptTemplate.from_template(template)
vectordb = vectordb_list[itemID]
retriever = vectordb.as_retriever(search_type="similarity", search_kwargs={"k": docs_k})
llm = ChatOpenAI(
model_name="gpt-3.5-turbo",
temperature=0,
max_tokens = 250)
qa_chain = RetrievalQA.from_chain_type(
llm,
retriever=retriever,
return_source_documents=True,
chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
result = qa_chain({"query": question})
return result["result"]
def check_step(step, id):
cur.execute("SELECT status FROM user WHERE userID = ?", (id,))
fetch_result = cur.fetchone()
if fetch_result == None:
return False
elif step in fetch_result:
return True
else:
return False
def get_itemID(userID):
cur.execute("SELECT itemID FROM user WHERE userID = ?", (userID,))
fetch_result = cur.fetchone()
return fetch_result[0]
@bot.message_handler(commands=["start"])
def start_message(message):
keyboard = types.ReplyKeyboardMarkup(
resize_keyboard = True,
one_time_keyboard=True
)
zero_machine = types.KeyboardButton(text="Philips EP2231/40")
first_machine = types.KeyboardButton(text="Nivona CafeRomatica NICR 550")
second_machine = types.KeyboardButton(text="Delonghi ECAM 370.70.B")
third_machine = types.KeyboardButton(text="Polaris PACM 2065AC")
fourth_machine = types.KeyboardButton(text="Philips EP2030/10")
fifth_machine = types.KeyboardButton(text="REDMOND RCM-1517")
keyboard.row(zero_machine, first_machine)
keyboard.row(second_machine, third_machine)
keyboard.row(fourth_machine, fifth_machine)
bot.send_message(message.chat.id, "📂 Главное меню")
bot.send_message(message.chat.id, "📝 Выберите интересующий Вас товар или напишите его имя вручную", reply_markup=keyboard)
try:
cur.execute("INSERT INTO user VALUES (?, ?, ?);", (message.chat.id, "menu", 0))
except:
cur.execute("UPDATE user SET status = ? WHERE userID = ?;", ("menu", message.chat.id))
conn.commit()
@bot.message_handler(content_types="text", func=lambda message: check_step("menu", message.chat.id))
def machine_description(message):
if message.text in goods:
keyboard = types.ReplyKeyboardMarkup(
resize_keyboard=True,
one_time_keyboard=True
)
back_to_menu_button = types.KeyboardButton(text="🗃️ Назад в меню")
keyboard.add(back_to_menu_button)
bot.send_message(message.chat.id, """⚙️ Запрос принят. Ожидайте ответа...\nВы выбрали -> {}""".format(message.text))
description = get_info(goods.index(message.text))
bot.send_message(message.chat.id, description)
bot.send_message(message.chat.id, """🔎 Сейчас Вы можете задать вопросы об этом товаре или вернуться в главное меню.""", reply_markup=keyboard)
# change user status in db
cur.execute("UPDATE user SET status = ?, itemID = ? WHERE userID = ?;", ("chat",
goods.index(message.text),
message.chat.id))
conn.commit()
else:
bot.send_message(message.chat.id, "❌ Запрос отклонён. Такого товара не существует!")
@bot.message_handler(content_types="text", func= lambda message: check_step("chat", message.chat.id))
def chat_with_ai(message):
keyboard = types.ReplyKeyboardMarkup(
resize_keyboard=True,
one_time_keyboard=True
)
back_to_menu_button = types.KeyboardButton(text="🗃️ Назад в меню")
keyboard.add(back_to_menu_button)
if message.text == back_to_menu_button.text:
bot.send_message(message.chat.id, "⛓️ Возврат в главное меню")
cur.execute("UPDATE user SET status = ? WHERE userID = ?;", ("menu", message.chat.id))
conn.commit()
keyboard = types.ReplyKeyboardMarkup(
resize_keyboard = True,
one_time_keyboard=True
)
zero_machine = types.KeyboardButton(text="Philips EP2231/40")
first_machine = types.KeyboardButton(text="Nivona CafeRomatica NICR 550")
second_machine = types.KeyboardButton(text="Delonghi ECAM 370.70.B")
third_machine = types.KeyboardButton(text="Polaris PACM 2065AC")
fourth_machine = types.KeyboardButton(text="Philips EP2030/10")
fifth_machine = types.KeyboardButton(text="REDMOND RCM-1517")
keyboard.row(zero_machine, first_machine)
keyboard.row(second_machine, third_machine)
keyboard.row(fourth_machine, fifth_machine)
bot.send_message(message.chat.id, "📂 Главное меню")
bot.send_message(message.chat.id, "📝 Выберите интересующий Вас товар или напишите его имя вручную", reply_markup=keyboard)
else:
itemID = get_itemID(message.chat.id)
answer = get_answer(itemID, message.text)
bot.send_message(message.chat.id, answer, reply_markup=keyboard)
bot.infinity_polling(timeout=10, long_polling_timeout = 5) | [
"langchain.prompts.PromptTemplate.from_template",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.chat_models.ChatOpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((571, 622), 'sqlite3.connect', 'sqlite3.connect', (['"""main.db"""'], {'check_same_thread': '(False)'}), "('main.db', check_same_thread=False)\n", (586, 622), False, 'import sqlite3\n'), ((661, 738), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': '"""../info.log"""', 'filemode': '"""w"""'}), "(level=logging.INFO, filename='../info.log', filemode='w')\n", (680, 738), False, 'import logging\n'), ((915, 939), 'telebot.TeleBot', 'telebot.TeleBot', (['API_KEY'], {}), '(API_KEY)\n', (930, 939), False, 'import telebot\n'), ((1427, 1445), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1443, 1445), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2561, 2599), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (2589, 2599), False, 'from langchain.prompts import PromptTemplate\n'), ((2745, 2814), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(250)'}), "(model_name='gpt-3.5-turbo', temperature=0, max_tokens=250)\n", (2755, 2814), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2857, 2996), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'retriever': 'retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm, retriever=retriever,\n return_source_documents=True, chain_type_kwargs={'prompt': QA_CHAIN_PROMPT}\n )\n", (2884, 2996), False, 'from langchain.chains import RetrievalQA\n'), ((3869, 3907), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['template'], {}), '(template)\n', (3897, 3907), False, 'from langchain.prompts import PromptTemplate\n'), ((4053, 4122), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'max_tokens': '(250)'}), "(model_name='gpt-3.5-turbo', temperature=0, max_tokens=250)\n", (4063, 4122), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4165, 4304), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', (['llm'], {'retriever': 'retriever', 'return_source_documents': '(True)', 'chain_type_kwargs': "{'prompt': QA_CHAIN_PROMPT}"}), "(llm, retriever=retriever,\n return_source_documents=True, chain_type_kwargs={'prompt': QA_CHAIN_PROMPT}\n )\n", (4192, 4304), False, 'from langchain.chains import RetrievalQA\n')] |
import langchain as lc
import openai as ai
import datasets as ds
import tiktoken as tk
import os
from langchain_openai import ChatOpenAI
from dotenv import load_dotenv
import os
# Load environment variables from .env file
load_dotenv()
# Get the OpenAI API key from the environment variable
openai_api_key = os.getenv("OPENAI_API_KEY")
if openai_api_key is None:
raise ValueError("No OpenAI API key found. Please set it in the .env file.")
# Initialize the ChatOpenAI with the API key
chat = ChatOpenAI(open_api_key=openai_api_key, model="gpt-3.5-turbo")
from langchain.schema import (
SystemMessage,
HumanMessage,
AIMessage
)
messages = [
SystemMessage(content="You are a helpful assistant."),
HumanMessage(content="Hi AI, how are you today?."),
AIMessage(content="I am great, thank you. How can I help you?"),
HumanMessage(content="I am looking for a restaurant in the center of Berlin."),
]
| [
"langchain.schema.AIMessage",
"langchain_openai.ChatOpenAI",
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage"
] | [((224, 237), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (235, 237), False, 'from dotenv import load_dotenv\n'), ((311, 338), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (320, 338), False, 'import os\n'), ((501, 563), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'open_api_key': 'openai_api_key', 'model': '"""gpt-3.5-turbo"""'}), "(open_api_key=openai_api_key, model='gpt-3.5-turbo')\n", (511, 563), False, 'from langchain_openai import ChatOpenAI\n'), ((667, 720), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': '"""You are a helpful assistant."""'}), "(content='You are a helpful assistant.')\n", (680, 720), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((726, 776), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""Hi AI, how are you today?."""'}), "(content='Hi AI, how are you today?.')\n", (738, 776), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((782, 845), 'langchain.schema.AIMessage', 'AIMessage', ([], {'content': '"""I am great, thank you. How can I help you?"""'}), "(content='I am great, thank you. How can I help you?')\n", (791, 845), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n'), ((851, 929), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': '"""I am looking for a restaurant in the center of Berlin."""'}), "(content='I am looking for a restaurant in the center of Berlin.')\n", (863, 929), False, 'from langchain.schema import SystemMessage, HumanMessage, AIMessage\n')] |
"""Push and pull to the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain.load.dump import dumps
from langchain.load.load import loads
from langchain.utils import get_from_env
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_key: Optional[str] = None) -> Client:
try:
from langchainhub import Client
except ImportError as e:
raise ImportError(
"Could not import langchainhub, please install with `pip install "
"langchainhub`."
) from e
api_url = api_url or get_from_env("api_url", "LANGCHAIN_HUB_API_URL")
api_key = api_key or get_from_env("api_key", "LANGCHAIN_HUB_API_KEY", default="")
api_key = api_key or get_from_env("api_key", "LANGCHAIN_API_KEY")
return Client(api_url, api_key=api_key)
def push(
repo_full_name: str,
object: Any,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
parent_commit_hash: Optional[str] = "latest",
) -> str:
"""
Pushes an object to the hub and returns the URL.
"""
client = _get_client(api_url=api_url, api_key=api_key)
manifest_json = dumps(object)
resp = client.push(
repo_full_name, manifest_json, parent_commit_hash=parent_commit_hash
)
commit_hash: str = resp["commit"]["commit_hash"]
return commit_hash
def pull(
owner_repo_commit: str,
*,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
) -> Any:
"""
Pulls an object from the hub and returns it.
"""
client = _get_client(api_url=api_url, api_key=api_key)
resp: str = client.pull(owner_repo_commit)
return loads(resp)
| [
"langchain.load.load.loads",
"langchainhub.Client",
"langchain.load.dump.dumps",
"langchain.utils.get_from_env"
] | [((862, 894), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (868, 894), False, 'from langchainhub import Client\n'), ((1234, 1247), 'langchain.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1239, 1247), False, 'from langchain.load.dump import dumps\n'), ((1740, 1751), 'langchain.load.load.loads', 'loads', (['resp'], {}), '(resp)\n', (1745, 1751), False, 'from langchain.load.load import loads\n'), ((646, 694), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_url"""', '"""LANGCHAIN_HUB_API_URL"""'], {}), "('api_url', 'LANGCHAIN_HUB_API_URL')\n", (658, 694), False, 'from langchain.utils import get_from_env\n'), ((720, 780), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_HUB_API_KEY"""'], {'default': '""""""'}), "('api_key', 'LANGCHAIN_HUB_API_KEY', default='')\n", (732, 780), False, 'from langchain.utils import get_from_env\n'), ((806, 850), 'langchain.utils.get_from_env', 'get_from_env', (['"""api_key"""', '"""LANGCHAIN_API_KEY"""'], {}), "('api_key', 'LANGCHAIN_API_KEY')\n", (818, 850), False, 'from langchain.utils import get_from_env\n')] |
from datetime import timedelta
import os
import subprocess
import whisper
import tempfile
import argparse
import langchain
from langchain.chat_models import ChatOpenAI, ChatGooglePalm
from langchain.schema import HumanMessage, SystemMessage, AIMessage
from langchain.prompts import (
ChatPromptTemplate,
PromptTemplate,
SystemMessagePromptTemplate,
AIMessagePromptTemplate,
HumanMessagePromptTemplate,
)
from langchain.chains import LLMChain
from langchain.callbacks import get_openai_callback
from tqdm import tqdm
def get_translate_chain(from_lang, to_lang):
template=f"You are a helpful assistant that translates {from_lang} to {to_lang}."
system_message_prompt = SystemMessagePromptTemplate.from_template(template)
human_template="Please translate \"{text}\""+f" from {from_lang} to {to_lang}. Give me the translated {to_lang} directly without saying anything else, do not use \"."
human_message_prompt = HumanMessagePromptTemplate.from_template(human_template)
chat_prompt = ChatPromptTemplate.from_messages([system_message_prompt, human_message_prompt])
# get a chat completion from the formatted messages
chat = ChatOpenAI()
chain = LLMChain(llm=chat, prompt=chat_prompt, verbose=True)
return chain
def gen_srt(video_path, model_name="medium", from_language="English", to_language="Chinese", embed=False, translate=True):
with tempfile.TemporaryDirectory() as temp_dir:
# 1. use ffmpeg to extract audio from video and save it to Temp folder
# Path to the temporary audio file
temp_audio_path = os.path.join(temp_dir, "extracted_audio.wav")
# Use ffmpeg to extract audio from video
print("Extracting audio from video...")
command = f"ffmpeg -i {video_path} -vn -ar 44100 -ac 2 -b:a 192k {temp_audio_path}"
# Execute the command
subprocess.call(command, shell=True)
model = whisper.load_model(model_name)
transcribe = model.transcribe(audio=temp_audio_path, language=from_language)
segments = transcribe['segments']
# 2. Use whisper to transcribe audio and save segments to srt file
if translate:
with get_openai_callback() as cb:
chain = get_translate_chain(from_language, to_language)
for segment in tqdm(segments):
segment['text'] = chain(segment['text'])['text']
print(cb)
# 3. Generate the SRT file
srtFilename = video_path.split(".")[0] + ".srt"
# overwrite the file if it already exists
if os.path.exists(srtFilename):
os.remove(srtFilename)
for segment in segments:
startTime = str(0)+str(timedelta(seconds=int(segment['start'])))+',000'
endTime = str(0)+str(timedelta(seconds=int(segment['end'])))+',000'
text = segment['text']
segmentId = segment['id']+1
segment = f"{segmentId}\n{startTime} --> {endTime}\n{text[1:] if text[0] == ' ' else text}\n\n"
with open(srtFilename, 'a', encoding='utf-8') as srtFile:
srtFile.write(segment)
# 4. Use FFMPEG to embed srt file into video
if not embed:
return
output_filename = video_path.split(".")[0] + "_subtitled.mp4"
if os.path.exists(output_filename):
os.remove(output_filename)
embed_command = f"ffmpeg -i {video_path} -vf subtitles={srtFilename} {output_filename}"
subprocess.call(embed_command, shell=True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Process some arguments')
# Add the arguments
parser.add_argument('-i', type=str, required=True, dest='input_file',
help='Input file name')
parser.add_argument('-m', type=str, default='medium', dest='model_name',
help='Model type, default is "medium"')
parser.add_argument('-f', type=str, default='English', dest='from_lang',
help='Translate from language, default is "English"')
parser.add_argument('-t', type=str, default='Chinese', dest='to_lang',
help='Translate to language, default is "Chinese"')
parser.add_argument('--embed', dest='embed', action='store_true',
help='Whether to Embed subtitles, default is False')
parser.add_argument('--translate', dest='translate', action='store_true',
help='Whether to Translate, default is False')
args = parser.parse_args()
gen_srt(args.input_file, model_name=args.model_name, embed=args.embed, translate=args.translate, from_language=args.from_lang, to_language=args.to_lang)
| [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.callbacks.get_openai_callback",
"langchain.chains.LLMChain",
"langchain.prompts.SystemMessagePromptTemplate.from_template"
] | [((696, 747), 'langchain.prompts.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (737, 747), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((946, 1002), 'langchain.prompts.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['human_template'], {}), '(human_template)\n', (986, 1002), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1021, 1100), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message_prompt, human_message_prompt]'], {}), '([system_message_prompt, human_message_prompt])\n', (1053, 1100), False, 'from langchain.prompts import ChatPromptTemplate, PromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1169, 1181), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (1179, 1181), False, 'from langchain.chat_models import ChatOpenAI, ChatGooglePalm\n'), ((1194, 1246), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'chat', 'prompt': 'chat_prompt', 'verbose': '(True)'}), '(llm=chat, prompt=chat_prompt, verbose=True)\n', (1202, 1246), False, 'from langchain.chains import LLMChain\n'), ((3618, 3679), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process some arguments"""'}), "(description='Process some arguments')\n", (3641, 3679), False, 'import argparse\n'), ((1398, 1427), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (1425, 1427), False, 'import tempfile\n'), ((1589, 1634), 'os.path.join', 'os.path.join', (['temp_dir', '"""extracted_audio.wav"""'], {}), "(temp_dir, 'extracted_audio.wav')\n", (1601, 1634), False, 'import os\n'), ((1880, 1916), 'subprocess.call', 'subprocess.call', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1895, 1916), False, 'import subprocess\n'), ((1934, 1964), 'whisper.load_model', 'whisper.load_model', (['model_name'], {}), '(model_name)\n', (1952, 1964), False, 'import whisper\n'), ((2619, 2646), 'os.path.exists', 'os.path.exists', (['srtFilename'], {}), '(srtFilename)\n', (2633, 2646), False, 'import os\n'), ((3357, 3388), 'os.path.exists', 'os.path.exists', (['output_filename'], {}), '(output_filename)\n', (3371, 3388), False, 'import os\n'), ((3533, 3575), 'subprocess.call', 'subprocess.call', (['embed_command'], {'shell': '(True)'}), '(embed_command, shell=True)\n', (3548, 3575), False, 'import subprocess\n'), ((2660, 2682), 'os.remove', 'os.remove', (['srtFilename'], {}), '(srtFilename)\n', (2669, 2682), False, 'import os\n'), ((3402, 3428), 'os.remove', 'os.remove', (['output_filename'], {}), '(output_filename)\n', (3411, 3428), False, 'import os\n'), ((2215, 2236), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2234, 2236), False, 'from langchain.callbacks import get_openai_callback\n'), ((2347, 2361), 'tqdm.tqdm', 'tqdm', (['segments'], {}), '(segments)\n', (2351, 2361), False, 'from tqdm import tqdm\n')] |
from langchain import OpenAI, LLMChain
from langchain.callbacks import StdOutCallbackHandler
from langchain.chat_models import ChatOpenAI
from src.agents.chat_chain import ChatChain
from src.agents.graphdb_traversal_chain import GraphDBTraversalChain, mem_query_template, mem_system_message
from src.memory.triple_modal_memory import TripleModalMemory
import os
from dotenv import load_dotenv
# Set up the cache
import langchain
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
# initialize the memory
load_dotenv()
uri = os.getenv("NEO4J_URI")
user = os.getenv("NEO4J_USER")
password = os.getenv("NEO4J_PASSWORD")
mem = TripleModalMemory(uri, user, password)
# Create memory from docks or load from file if it exists
ingested = os.path.exists('../data/triple_modal_memory.faiss')
if not ingested:
knowledge_path = r'C:\Users\colli\Documents\AIPapers'
mem.ingest_docs(knowledge_path)
mem.save()
print("Memory initialized and saved.")
else:
mem.load()
print("Memory loaded.")
handler = StdOutCallbackHandler()
llm = ChatOpenAI(
model_name="gpt-4", #"gpt-3.5-turbo"
temperature=0,
verbose=True
)
chain = ChatChain(llm=llm, prompt=mem_query_template, callbacks=[handler], system_message=mem_system_message)
knowledge_base_query_agent = GraphDBTraversalChain(llm_chain=chain, graph_vector_store=mem.vector_store)
# Example Research questions:
# What are different methods of providing language models with additional context to better answer questions?
# How can semantic search be used in conjunction with large language models in order to better answer questions?
# What are some techniques for achieving better general intelligence in language models?
def main_loop():
try:
while True:
question = input("Enter a question: ")
print(knowledge_base_query_agent.run(question))
except KeyboardInterrupt:
print("Shutdown: Saving...")
mem.save()
print("Shutdown: Complete")
else:
print("Completed all tasks.")
if __name__ == '__main__':
main_loop() | [
"langchain.callbacks.StdOutCallbackHandler",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.SQLiteCache"
] | [((495, 537), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (506, 537), False, 'from langchain.cache import SQLiteCache\n'), ((563, 576), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (574, 576), False, 'from dotenv import load_dotenv\n'), ((583, 605), 'os.getenv', 'os.getenv', (['"""NEO4J_URI"""'], {}), "('NEO4J_URI')\n", (592, 605), False, 'import os\n'), ((613, 636), 'os.getenv', 'os.getenv', (['"""NEO4J_USER"""'], {}), "('NEO4J_USER')\n", (622, 636), False, 'import os\n'), ((648, 675), 'os.getenv', 'os.getenv', (['"""NEO4J_PASSWORD"""'], {}), "('NEO4J_PASSWORD')\n", (657, 675), False, 'import os\n'), ((683, 721), 'src.memory.triple_modal_memory.TripleModalMemory', 'TripleModalMemory', (['uri', 'user', 'password'], {}), '(uri, user, password)\n', (700, 721), False, 'from src.memory.triple_modal_memory import TripleModalMemory\n'), ((792, 843), 'os.path.exists', 'os.path.exists', (['"""../data/triple_modal_memory.faiss"""'], {}), "('../data/triple_modal_memory.faiss')\n", (806, 843), False, 'import os\n'), ((1074, 1097), 'langchain.callbacks.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (1095, 1097), False, 'from langchain.callbacks import StdOutCallbackHandler\n'), ((1105, 1164), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-4"""', 'temperature': '(0)', 'verbose': '(True)'}), "(model_name='gpt-4', temperature=0, verbose=True)\n", (1115, 1164), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1204, 1309), 'src.agents.chat_chain.ChatChain', 'ChatChain', ([], {'llm': 'llm', 'prompt': 'mem_query_template', 'callbacks': '[handler]', 'system_message': 'mem_system_message'}), '(llm=llm, prompt=mem_query_template, callbacks=[handler],\n system_message=mem_system_message)\n', (1213, 1309), False, 'from src.agents.chat_chain import ChatChain\n'), ((1335, 1410), 'src.agents.graphdb_traversal_chain.GraphDBTraversalChain', 'GraphDBTraversalChain', ([], {'llm_chain': 'chain', 'graph_vector_store': 'mem.vector_store'}), '(llm_chain=chain, graph_vector_store=mem.vector_store)\n', (1356, 1410), False, 'from src.agents.graphdb_traversal_chain import GraphDBTraversalChain, mem_query_template, mem_system_message\n')] |
from __future__ import annotations
import logging
from functools import lru_cache
from typing import List, Optional
import langchain
from langchain.agents import AgentExecutor, Tool, initialize_agent
from langchain.agents.agent_types import AgentType
from langchain.callbacks import get_openai_callback
from langchain.chat_models import ChatOpenAI
from langchain.chat_models.base import BaseChatModel
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage
from langchain_experimental.plan_and_execute import (
PlanAndExecute,
load_agent_executor,
load_chat_planner,
)
from expert_gpts.llms.agent import HUMAN_SUFFIX, SYSTEM_PREFIX, ConvoOutputCustomParser
from shared.llm_manager_base import BaseLLMManager, Cost
from shared.llms.openai import GPT_3_5_TURBO, GPT_4, TEXT_ADA_EMBEDDING
from shared.llms.system_prompts import PLANNER_SYSTEM_PROMPT
langchain.debug = True
logger = logging.getLogger(__name__)
COSTS = {
GPT_3_5_TURBO: Cost(prompt=0.0015, completion=0.002),
GPT_4: Cost(prompt=0.03, completion=0.05),
TEXT_ADA_EMBEDDING: Cost(prompt=0.0001, completion=0.0001),
}
class OpenAIApiManager(BaseLLMManager):
_agents = {}
def __init__(self):
super().__init__(COSTS)
def get_agent_executor(
self,
llm,
agent_type: AgentType = AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
memory: Optional[BaseChatMemory] = None,
tools: Optional[List[Tool]] = None,
system_message: Optional[str] = SYSTEM_PREFIX,
human_message: Optional[str] = HUMAN_SUFFIX,
) -> AgentExecutor:
agent_kwargs = {
"output_parser": ConvoOutputCustomParser(),
}
if system_message:
agent_kwargs["system_message"] = system_message
if human_message:
agent_kwargs["human_message"] = human_message
return initialize_agent(
tools=tools,
llm=llm,
agent=agent_type,
memory=memory,
agent_kwargs=agent_kwargs,
)
def create_chat_completion(
self,
messages: List[BaseMessage], # type: ignore
model: str | None = GPT_3_5_TURBO,
temperature: float = 0,
max_tokens: int | None = None,
deployment_id=None,
openai_api_key=None,
) -> str:
llm = self.get_llm(max_tokens, model, temperature)
with get_openai_callback() as cb:
response = llm(messages, callbacks=[self.callbacks_handler])
self.update_cost(cb)
return response.content
def create_chat_completion_with_agent(
self,
user_input: str, # type: ignore
agent_type: AgentType = AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
model: str | None = GPT_3_5_TURBO,
agent_key: str = "default",
temperature: float = 0,
max_tokens: int | None = None,
memory: Optional[BaseChatMemory] = None,
tools: Optional[List[Tool]] = None,
) -> str:
llm = self.get_llm(max_tokens, model, temperature)
if agent_key not in self._agents:
self._agents[agent_key] = self.get_agent_executor(
llm, agent_type, memory, tools
)
agent = self._agents[agent_key]
with get_openai_callback() as cb:
response = agent.run(input=user_input, callbacks=[self.callbacks_handler])
self.update_cost(cb)
return response
def execute_plan(
self,
user_input: str, # type: ignore
model: str | None = GPT_3_5_TURBO,
agent_key: str = "default_plan",
temperature: float = 0,
max_tokens: int | None = None,
tools: Optional[List[Tool]] = None,
) -> str:
llm = self.get_llm(max_tokens, model, temperature)
if agent_key not in self._agents:
planner = load_chat_planner(llm, system_prompt=PLANNER_SYSTEM_PROMPT)
executor = load_agent_executor(llm, tools, verbose=True)
agent = PlanAndExecute(planner=planner, executor=executor, verbose=True)
self._agents[agent_key] = agent
agent = self._agents[agent_key]
with get_openai_callback() as cb:
response = agent.run(input=user_input, callbacks=[self.callbacks_handler])
self.update_cost(cb)
return response
@lru_cache
def get_llm(
self, max_tokens, model, temperature, as_predictor: bool = False
) -> BaseChatModel:
llm = ChatOpenAI(
model_name=model,
temperature=temperature,
max_tokens=max_tokens,
)
return llm
| [
"langchain.agents.initialize_agent",
"langchain_experimental.plan_and_execute.PlanAndExecute",
"langchain.chat_models.ChatOpenAI",
"langchain_experimental.plan_and_execute.load_chat_planner",
"langchain.callbacks.get_openai_callback",
"langchain_experimental.plan_and_execute.load_agent_executor"
] | [((946, 973), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (963, 973), False, 'import logging\n'), ((1004, 1041), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.0015)', 'completion': '(0.002)'}), '(prompt=0.0015, completion=0.002)\n', (1008, 1041), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1054, 1088), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.03)', 'completion': '(0.05)'}), '(prompt=0.03, completion=0.05)\n', (1058, 1088), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1114, 1152), 'shared.llm_manager_base.Cost', 'Cost', ([], {'prompt': '(0.0001)', 'completion': '(0.0001)'}), '(prompt=0.0001, completion=0.0001)\n', (1118, 1152), False, 'from shared.llm_manager_base import BaseLLMManager, Cost\n'), ((1906, 2008), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'tools': 'tools', 'llm': 'llm', 'agent': 'agent_type', 'memory': 'memory', 'agent_kwargs': 'agent_kwargs'}), '(tools=tools, llm=llm, agent=agent_type, memory=memory,\n agent_kwargs=agent_kwargs)\n', (1922, 2008), False, 'from langchain.agents import AgentExecutor, Tool, initialize_agent\n'), ((4514, 4590), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model', 'temperature': 'temperature', 'max_tokens': 'max_tokens'}), '(model_name=model, temperature=temperature, max_tokens=max_tokens)\n', (4524, 4590), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1683, 1708), 'expert_gpts.llms.agent.ConvoOutputCustomParser', 'ConvoOutputCustomParser', ([], {}), '()\n', (1706, 1708), False, 'from expert_gpts.llms.agent import HUMAN_SUFFIX, SYSTEM_PREFIX, ConvoOutputCustomParser\n'), ((2434, 2455), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2453, 2455), False, 'from langchain.callbacks import get_openai_callback\n'), ((3307, 3328), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (3326, 3328), False, 'from langchain.callbacks import get_openai_callback\n'), ((3890, 3949), 'langchain_experimental.plan_and_execute.load_chat_planner', 'load_chat_planner', (['llm'], {'system_prompt': 'PLANNER_SYSTEM_PROMPT'}), '(llm, system_prompt=PLANNER_SYSTEM_PROMPT)\n', (3907, 3949), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((3973, 4018), 'langchain_experimental.plan_and_execute.load_agent_executor', 'load_agent_executor', (['llm', 'tools'], {'verbose': '(True)'}), '(llm, tools, verbose=True)\n', (3992, 4018), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((4039, 4103), 'langchain_experimental.plan_and_execute.PlanAndExecute', 'PlanAndExecute', ([], {'planner': 'planner', 'executor': 'executor', 'verbose': '(True)'}), '(planner=planner, executor=executor, verbose=True)\n', (4053, 4103), False, 'from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner\n'), ((4201, 4222), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (4220, 4222), False, 'from langchain.callbacks import get_openai_callback\n')] |
import os
import utils
import traceback
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.chains import ConversationChain
from langchain.llms import OpenAI
import langchain
from langchain.cache import InMemoryCache
from langchain.llms import OpenAI
from langchain.chains.conversation.memory import ConversationSummaryBufferMemory,ConversationBufferMemory,ConversationBufferWindowMemory
from langchain.prompts import PromptTemplate
from embeddings import EmbeddingsManager
from flask import Flask, send_from_directory
import json
import time
import threading
import secrets
import string
import hashlib
from flask import request
from langchain.cache import InMemoryCache,SQLiteCache
import re
import requests
from waitress import serve
from translator import Translator
import sys
from query.discoursequery import DiscourseQuery
from query.embeddingsquery import EmbeddingsQuery
from Summary import Summary
import uuid
from langchain.llms import NLPCloud
from langchain.llms import AI21
from langchain.llms import Cohere
from SmartCache import SmartCache
CONFIG=None
QUERIERS=[]
args=sys.argv
confiFile=args[1] if len(args)>1 else "config.json"
print("Use config file", confiFile)
with open(confiFile, "r") as f:
CONFIG=json.load(f)
EmbeddingsManager.init(CONFIG)
Summary.init(CONFIG)
QUERIERS=[
EmbeddingsQuery(CONFIG),
DiscourseQuery(
CONFIG,CONFIG["JME_HUB_URL"],
searchFilter=CONFIG["JME_HUB_SEARCH_FILTER"],
knowledgeCutoff=CONFIG["JME_HUB_KNOWLEDGE_CUTOFF"]
)
]
Translator.init(CONFIG)
def getAffineDocs(question,context,keywords,shortQuestion, wordSalad=None, unitFilter=None,
maxFragmentsToReturn=3, maxFragmentsToSelect=12,merge=False):
affineDocs=[]
for q in QUERIERS:
print("Get affine docs from",q,"using question",question,"with context",context,"and keywords",keywords)
t=time.time()
v=q.getAffineDocs(
question, context, keywords,shortQuestion, wordSalad, unitFilter,
maxFragmentsToReturn=maxFragmentsToReturn,
maxFragmentsToSelect=maxFragmentsToSelect,
merge=merge
)
print("Completed in",time.time()-t,"seconds.")
if v!=None:
affineDocs.extend(v)
return affineDocs
def rewriteError(error):
if error.startswith("Rate limit reached ") :
return "Rate limit."
def rewrite(question):
# replace app, applet, game, application with simple application
question=re.sub(r"\b(app|applet|game|application)\b", "simple application", question, flags=re.IGNORECASE)
return question
def createChain():
# Backward compatibility
model_name=CONFIG.get("OPENAI_MODEL","text-davinci-003")
llm_name="openai"
########
llmx=CONFIG.get("LLM_MODEL",None) # "openai:text-davinci-003" "cohere:xlarge"
if llmx!=None:
if ":" in llmx:
llm_name,model_name=llmx.split(":")
else:
llm_name,model_name=llmx.split(".")
template = ""
template_path="prompts/"+llm_name+"."+model_name+".txt"
if not os.path.exists(template_path):
template_path="prompts/openai.text-davinci-003.txt"
with open(template_path, "r") as f:
template=f.read()
prompt = PromptTemplate(
input_variables=[ "history", "question", "summaries"],
template=template
)
llm=None
history_length=700
if llm_name=="openai":
max_tokens=512
temperature=0.0
if model_name=="text-davinci-003":
max_tokens=512
elif model_name=="code-davinci-002":
max_tokens=1024
#history_length=1024
llm=OpenAI(
temperature=temperature,
model_name=model_name,
max_tokens=max_tokens,
)
elif llm_name=="cohere":
llm=Cohere(
model=model_name,
max_tokens=700
)
history_length=200
elif llm_name=="ai21":
llm=AI21(
temperature=0.7,
model=model_name,
)
elif llm_name=="nlpcloud":
llm=NLPCloud(
model_name=model_name,
)
else:
raise Exception("Unknown LLM "+llm_name)
print("Use model ",model_name,"from",llm_name)
memory=ConversationSummaryBufferMemory(llm=llm, max_token_limit=history_length,human_prefix="QUESTION",ai_prefix="ANSWER", memory_key="history", input_key="question")
chain = load_qa_with_sources_chain(
llm,
memory=memory,
prompt=prompt,
verbose=True,
)
return chain
def extractQuestionData(question,wordSalad):
shortQuestion=Summary.summarizeMarkdown(question,min_length=100,max_length=1024,withCodeBlocks=False)
context=Summary.summarizeText(wordSalad,min_length=20,max_length=32)
keywords=[]
keywords.extend(Summary.getKeywords(shortQuestion,2))
keywords.extend(Summary.getKeywords(Summary.summarizeText(wordSalad,min_length=10,max_length=20),3))
return [question,shortQuestion,context,keywords,wordSalad]
def queryChain(chain,question):
wordSalad=""
for h in chain.memory.buffer: wordSalad+=h+" "
wordSalad+=" "+question
[question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda :extractQuestionData(question,wordSalad))
affineDocs=utils.enqueue(lambda :getAffineDocs(question,context,keywords,shortQuestion,wordSalad))
print("Found ",len(affineDocs), " affine docs")
print("Q: ", shortQuestion)
output=chain({"input_documents": affineDocs, "question": shortQuestion}, return_only_outputs=True)
print("A :",output)
return output
sessions={}
langchain.llm_cache = SmartCache(CONFIG)#SQLiteCache(database_path=CONFIG["CACHE_PATH"]+"/langchain.db")
def clearSessions():
while True:
time.sleep(60*5)
for session in sessions:
if sessions[session]["timeout"] < time.time():
del sessions[session]
threading.Thread(target=clearSessions).start()
def createSessionSecret():
hex_chars = string.hexdigits
timeHash=hashlib.sha256(str(time.time()).encode("utf-8")).hexdigest()[:12]
return ''.join(secrets.choice(hex_chars) for i in range(64))+timeHash
app = Flask(__name__)
@app.route("/langs")
def langs():
return json.dumps(Translator.getLangs())
@app.route("/session",methods = ['POST'])
def session():
body=request.get_json()
lang=body["lang"] if "lang" in body else "en"
if lang=="auto":
lang="en"
if not "sessionSecret" in body or body["sessionSecret"].strip()=="":
sessionSecret=createSessionSecret()
else:
sessionSecret=body["sessionSecret"]
if sessionSecret not in sessions:
sessions[sessionSecret]={
"chain": createChain(),
"timeout": time.time()+60*30
}
else:
sessions[sessionSecret]["timeout"]=time.time()+60*30
welcomeText=""
welcomeText+=Translator.translate("en", lang,"Hi there! I'm an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics.")
welcomeText+="<br><br>"
welcomeText+="<footer><span class=\"material-symbols-outlined\">tips_and_updates</span><span>"+Translator.translate("en", lang,"This chat bot is intended to provide helpful information, but accuracy is not guaranteed.")+"</span></footer>"
return json.dumps( {
"sessionSecret": sessionSecret,
"helloText":Translator.translate("en",lang,"Who are you?"),
"welcomeText":welcomeText
})
@app.route("/query",methods = ['POST'])
def query():
try:
body=request.get_json()
question=rewrite(body["question"])
lang=body["lang"] if "lang" in body else "en"
if lang == "auto":
lang=Translator.detect(question)
if lang!="en":
question=Translator.translate(lang,"en",question)
if len(question)==0:
raise Exception("Question is empty")
sessionSecret=body["sessionSecret"]
if sessionSecret not in sessions:
return json.dumps({"error": "Session expired"})
chain=sessions[sessionSecret]["chain"]
output=queryChain(chain,question)
if lang!="en":
output["output_text"]=Translator.translate("en",lang,output["output_text"])
#print(chain.memory.buffer)
return json.dumps(output)
except Exception as e:
print(e)
print(traceback.format_exc())
errorStr=str(e)
errorStr=rewriteError(errorStr)
return json.dumps({"error": errorStr})
@app.route('/<path:filename>')
def serveFrontend(filename):
return send_from_directory('frontend/', filename)
@app.route('/')
def serveIndex():
return send_from_directory('frontend/', "index.html")
@app.route('/docs', methods=['POST'])
def docs():
body=request.get_json()
question=body["question"]
maxFragmentsToReturn=int(body.get("maxFragmentsToReturn",3))
maxFragmentsToSelect=int(body.get("maxFragmentsToReturn",6))
wordSalad=body.get("context","")+" "+question
[question,shortQuestion,context,keywords,wordSalad]=utils.enqueue(lambda : extractQuestionData(question,wordSalad))
affineDocs=utils.enqueue(lambda : getAffineDocs(
question,context,keywords,shortQuestion,wordSalad,
maxFragmentsToReturn=maxFragmentsToReturn,
maxFragmentsToSelect=maxFragmentsToSelect
))
plainDocs=[
{
"content":doc.page_content,
"metadata":doc.metadata
} for doc in affineDocs
]
return json.dumps(plainDocs)
serve(app, host="0.0.0.0", port=8080, connection_limit=1000)
| [
"langchain.chains.conversation.memory.ConversationSummaryBufferMemory",
"langchain.llms.OpenAI",
"langchain.llms.AI21",
"langchain.llms.Cohere",
"langchain.chains.qa_with_sources.load_qa_with_sources_chain",
"langchain.llms.NLPCloud",
"langchain.prompts.PromptTemplate"
] | [((5785, 5803), 'SmartCache.SmartCache', 'SmartCache', (['CONFIG'], {}), '(CONFIG)\n', (5795, 5803), False, 'from SmartCache import SmartCache\n'), ((6330, 6345), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (6335, 6345), False, 'from flask import Flask, send_from_directory\n'), ((9830, 9890), 'waitress.serve', 'serve', (['app'], {'host': '"""0.0.0.0"""', 'port': '(8080)', 'connection_limit': '(1000)'}), "(app, host='0.0.0.0', port=8080, connection_limit=1000)\n", (9835, 9890), False, 'from waitress import serve\n'), ((1263, 1275), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1272, 1275), False, 'import json\n'), ((1280, 1310), 'embeddings.EmbeddingsManager.init', 'EmbeddingsManager.init', (['CONFIG'], {}), '(CONFIG)\n', (1302, 1310), False, 'from embeddings import EmbeddingsManager\n'), ((1315, 1335), 'Summary.Summary.init', 'Summary.init', (['CONFIG'], {}), '(CONFIG)\n', (1327, 1335), False, 'from Summary import Summary\n'), ((1591, 1614), 'translator.Translator.init', 'Translator.init', (['CONFIG'], {}), '(CONFIG)\n', (1606, 1614), False, 'from translator import Translator\n'), ((2557, 2659), 're.sub', 're.sub', (['"""\\\\b(app|applet|game|application)\\\\b"""', '"""simple application"""', 'question'], {'flags': 're.IGNORECASE'}), "('\\\\b(app|applet|game|application)\\\\b', 'simple application',\n question, flags=re.IGNORECASE)\n", (2563, 2659), False, 'import re\n'), ((3341, 3432), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['history', 'question', 'summaries']", 'template': 'template'}), "(input_variables=['history', 'question', 'summaries'],\n template=template)\n", (3355, 3432), False, 'from langchain.prompts import PromptTemplate\n'), ((4371, 4540), 'langchain.chains.conversation.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'max_token_limit': 'history_length', 'human_prefix': '"""QUESTION"""', 'ai_prefix': '"""ANSWER"""', 'memory_key': '"""history"""', 'input_key': '"""question"""'}), "(llm=llm, max_token_limit=history_length,\n human_prefix='QUESTION', ai_prefix='ANSWER', memory_key='history',\n input_key='question')\n", (4402, 4540), False, 'from langchain.chains.conversation.memory import ConversationSummaryBufferMemory, ConversationBufferMemory, ConversationBufferWindowMemory\n'), ((4543, 4618), 'langchain.chains.qa_with_sources.load_qa_with_sources_chain', 'load_qa_with_sources_chain', (['llm'], {'memory': 'memory', 'prompt': 'prompt', 'verbose': '(True)'}), '(llm, memory=memory, prompt=prompt, verbose=True)\n', (4569, 4618), False, 'from langchain.chains.qa_with_sources import load_qa_with_sources_chain\n'), ((4748, 4842), 'Summary.Summary.summarizeMarkdown', 'Summary.summarizeMarkdown', (['question'], {'min_length': '(100)', 'max_length': '(1024)', 'withCodeBlocks': '(False)'}), '(question, min_length=100, max_length=1024,\n withCodeBlocks=False)\n', (4773, 4842), False, 'from Summary import Summary\n'), ((4849, 4911), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(20)', 'max_length': '(32)'}), '(wordSalad, min_length=20, max_length=32)\n', (4870, 4911), False, 'from Summary import Summary\n'), ((6497, 6515), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (6513, 6515), False, 'from flask import request\n'), ((7046, 7280), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."""'], {}), '(\'en\', lang,\n "Hi there! I\'m an AI assistant for the open source game engine jMonkeyEngine. I can help you with questions related to the jMonkeyEngine source code, documentation, and other related topics."\n )\n', (7066, 7280), False, 'from translator import Translator\n'), ((8890, 8932), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', 'filename'], {}), "('frontend/', filename)\n", (8909, 8932), False, 'from flask import Flask, send_from_directory\n'), ((8979, 9025), 'flask.send_from_directory', 'send_from_directory', (['"""frontend/"""', '"""index.html"""'], {}), "('frontend/', 'index.html')\n", (8998, 9025), False, 'from flask import Flask, send_from_directory\n'), ((9086, 9104), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (9102, 9104), False, 'from flask import request\n'), ((9806, 9827), 'json.dumps', 'json.dumps', (['plainDocs'], {}), '(plainDocs)\n', (9816, 9827), False, 'import json\n'), ((1359, 1382), 'query.embeddingsquery.EmbeddingsQuery', 'EmbeddingsQuery', (['CONFIG'], {}), '(CONFIG)\n', (1374, 1382), False, 'from query.embeddingsquery import EmbeddingsQuery\n'), ((1392, 1545), 'query.discoursequery.DiscourseQuery', 'DiscourseQuery', (['CONFIG', "CONFIG['JME_HUB_URL']"], {'searchFilter': "CONFIG['JME_HUB_SEARCH_FILTER']", 'knowledgeCutoff': "CONFIG['JME_HUB_KNOWLEDGE_CUTOFF']"}), "(CONFIG, CONFIG['JME_HUB_URL'], searchFilter=CONFIG[\n 'JME_HUB_SEARCH_FILTER'], knowledgeCutoff=CONFIG[\n 'JME_HUB_KNOWLEDGE_CUTOFF'])\n", (1406, 1545), False, 'from query.discoursequery import DiscourseQuery\n'), ((1943, 1954), 'time.time', 'time.time', ([], {}), '()\n', (1952, 1954), False, 'import time\n'), ((3165, 3194), 'os.path.exists', 'os.path.exists', (['template_path'], {}), '(template_path)\n', (3179, 3194), False, 'import os\n'), ((3764, 3841), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': 'temperature', 'model_name': 'model_name', 'max_tokens': 'max_tokens'}), '(temperature=temperature, model_name=model_name, max_tokens=max_tokens)\n', (3770, 3841), False, 'from langchain.llms import OpenAI\n'), ((4946, 4983), 'Summary.Summary.getKeywords', 'Summary.getKeywords', (['shortQuestion', '(2)'], {}), '(shortQuestion, 2)\n', (4965, 4983), False, 'from Summary import Summary\n'), ((5914, 5932), 'time.sleep', 'time.sleep', (['(60 * 5)'], {}), '(60 * 5)\n', (5924, 5932), False, 'import time\n'), ((6061, 6099), 'threading.Thread', 'threading.Thread', ([], {'target': 'clearSessions'}), '(target=clearSessions)\n', (6077, 6099), False, 'import threading\n'), ((6407, 6428), 'translator.Translator.getLangs', 'Translator.getLangs', ([], {}), '()\n', (6426, 6428), False, 'from translator import Translator\n'), ((7801, 7819), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (7817, 7819), False, 'from flask import request\n'), ((8605, 8623), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (8615, 8623), False, 'import json\n'), ((3930, 3970), 'langchain.llms.Cohere', 'Cohere', ([], {'model': 'model_name', 'max_tokens': '(700)'}), '(model=model_name, max_tokens=700)\n', (3936, 3970), False, 'from langchain.llms import Cohere\n'), ((5024, 5086), 'Summary.Summary.summarizeText', 'Summary.summarizeText', (['wordSalad'], {'min_length': '(10)', 'max_length': '(20)'}), '(wordSalad, min_length=10, max_length=20)\n', (5045, 5086), False, 'from Summary import Summary\n'), ((6992, 7003), 'time.time', 'time.time', ([], {}), '()\n', (7001, 7003), False, 'import time\n'), ((7398, 7532), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""This chat bot is intended to provide helpful information, but accuracy is not guaranteed."""'], {}), "('en', lang,\n 'This chat bot is intended to provide helpful information, but accuracy is not guaranteed.'\n )\n", (7418, 7532), False, 'from translator import Translator\n'), ((7636, 7684), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', '"""Who are you?"""'], {}), "('en', lang, 'Who are you?')\n", (7656, 7684), False, 'from translator import Translator\n'), ((7971, 7998), 'translator.Translator.detect', 'Translator.detect', (['question'], {}), '(question)\n', (7988, 7998), False, 'from translator import Translator\n'), ((8044, 8086), 'translator.Translator.translate', 'Translator.translate', (['lang', '"""en"""', 'question'], {}), "(lang, 'en', question)\n", (8064, 8086), False, 'from translator import Translator\n'), ((8290, 8330), 'json.dumps', 'json.dumps', (["{'error': 'Session expired'}"], {}), "({'error': 'Session expired'})\n", (8300, 8330), False, 'import json\n'), ((8499, 8554), 'translator.Translator.translate', 'Translator.translate', (['"""en"""', 'lang', "output['output_text']"], {}), "('en', lang, output['output_text'])\n", (8519, 8554), False, 'from translator import Translator\n'), ((8785, 8816), 'json.dumps', 'json.dumps', (["{'error': errorStr}"], {}), "({'error': errorStr})\n", (8795, 8816), False, 'import json\n'), ((2241, 2252), 'time.time', 'time.time', ([], {}), '()\n', (2250, 2252), False, 'import time\n'), ((4072, 4111), 'langchain.llms.AI21', 'AI21', ([], {'temperature': '(0.7)', 'model': 'model_name'}), '(temperature=0.7, model=model_name)\n', (4076, 4111), False, 'from langchain.llms import AI21\n'), ((6010, 6021), 'time.time', 'time.time', ([], {}), '()\n', (6019, 6021), False, 'import time\n'), ((6267, 6292), 'secrets.choice', 'secrets.choice', (['hex_chars'], {}), '(hex_chars)\n', (6281, 6292), False, 'import secrets\n'), ((6911, 6922), 'time.time', 'time.time', ([], {}), '()\n', (6920, 6922), False, 'import time\n'), ((8682, 8704), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (8702, 8704), False, 'import traceback\n'), ((4193, 4224), 'langchain.llms.NLPCloud', 'NLPCloud', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (4201, 4224), False, 'from langchain.llms import NLPCloud\n'), ((6201, 6212), 'time.time', 'time.time', ([], {}), '()\n', (6210, 6212), False, 'import time\n')] |
import os
import streamlit as st
from PyPDF2 import PdfReader
import langchain
langchain.verbose = False
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains.question_answering import load_qa_chain
from langchain.llms import OpenAI
from langchain.callbacks import get_openai_callback
import requests
from bs4 import BeautifulSoup
def webscrap(name):
# Replace this URL with the one you want to scrape
url = f'https://www.{name}.com'
response = requests.get(url)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'html.parser')
page_text = soup.get_text()
return page_text
else:
return None
def main():
print(os.getenv('OPENAI_API_KEY'))
st.set_page_config(page_title="Webscrap chatbot")
st.header("Webscrap chatbot")
name = st.text_input("enter website name")
web_data= webscrap(name)
if web_data is not None:
text = web_data
# for page in pdf_reader.pages:
# text += page.extract_text()
max_length = 1800
original_string = text
temp_string = ""
strings_list = []
for character in original_string:
if len(temp_string) < max_length:
temp_string += character
else:
strings_list.append(temp_string)
temp_string = ""
if temp_string:
strings_list.append(temp_string)
#split into chunks
# create embeddings
embeddings = OpenAIEmbeddings()
knowledge_base = FAISS.from_texts(strings_list, embedding=embeddings)
user_question = st.text_input("Ask a question about your PDF")
if user_question:
docs = knowledge_base.similarity_search(user_question)
llm = OpenAI(model_name="gpt-3.5-turbo", temperature=0.9)
chain = load_qa_chain(llm, chain_type="stuff")
with get_openai_callback() as cb:
response = chain.run(input_documents = docs, question = user_question)
print(cb)
st.write(response)
if __name__ == '__main__':
main()
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.llms.OpenAI",
"langchain.callbacks.get_openai_callback",
"langchain.vectorstores.FAISS.from_texts",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((583, 600), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (595, 600), False, 'import requests\n'), ((853, 902), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Webscrap chatbot"""'}), "(page_title='Webscrap chatbot')\n", (871, 902), True, 'import streamlit as st\n'), ((907, 936), 'streamlit.header', 'st.header', (['"""Webscrap chatbot"""'], {}), "('Webscrap chatbot')\n", (916, 936), True, 'import streamlit as st\n'), ((949, 984), 'streamlit.text_input', 'st.text_input', (['"""enter website name"""'], {}), "('enter website name')\n", (962, 984), True, 'import streamlit as st\n'), ((653, 696), 'bs4.BeautifulSoup', 'BeautifulSoup', (['response.text', '"""html.parser"""'], {}), "(response.text, 'html.parser')\n", (666, 696), False, 'from bs4 import BeautifulSoup\n'), ((815, 842), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (824, 842), False, 'import os\n'), ((1660, 1678), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1676, 1678), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1704, 1756), 'langchain.vectorstores.FAISS.from_texts', 'FAISS.from_texts', (['strings_list'], {'embedding': 'embeddings'}), '(strings_list, embedding=embeddings)\n', (1720, 1756), False, 'from langchain.vectorstores import FAISS\n'), ((1782, 1828), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about your PDF"""'], {}), "('Ask a question about your PDF')\n", (1795, 1828), True, 'import streamlit as st\n'), ((1954, 2005), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0.9)'}), "(model_name='gpt-3.5-turbo', temperature=0.9)\n", (1960, 2005), False, 'from langchain.llms import OpenAI\n'), ((2027, 2065), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (2040, 2065), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2238, 2256), 'streamlit.write', 'st.write', (['response'], {}), '(response)\n', (2246, 2256), True, 'import streamlit as st\n'), ((2083, 2104), 'langchain.callbacks.get_openai_callback', 'get_openai_callback', ([], {}), '()\n', (2102, 2104), False, 'from langchain.callbacks import get_openai_callback\n')] |
# Wrapper for Hugging Face APIs for llmlib
from llmlib.base_model_wrapper import BaseModelWrapper
from llama_index import ListIndex, SimpleDirectoryReader
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, ServiceContext
from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader
from transformers import pipeline
import torch
from langchain.llms.base import LLM
from llama_index import LLMPredictor
from pprint import pprint
class CustomLLM(LLM):
model_name = "facebook/opt-iml-1.3b"
# I am not using a GPU, but you can add device="cuda:0"
# to the pipeline call if you have a local GPU or
# are running this on Google Colab:
pipeline = pipeline("text-generation",
model=model_name,
model_kwargs={"torch_dtype":torch.bfloat16})
def _call(self, prompt, stop = None):
prompt_length = len(prompt)
response = self.pipeline(prompt, max_new_tokens=200)
pprint(response)
first_response = response[0]["generated_text"]
# only return newly generated tokens
returned_text = first_response[prompt_length:]
return returned_text
@property
def _identifying_params(self):
return {"name_of_model": self.model_name}
@property
def _llm_type(self):
return "custom"
class HuggingFaceAiWrapper(BaseModelWrapper):
def __init__(self, key=None, embeddings_dir="./db_embeddings"):
super().__init__(embeddings_dir=embeddings_dir)
self.llm_predictor = LLMPredictor(llm=CustomLLM())
self.embed_model = LangchainEmbedding(HuggingFaceEmbeddings())
self.service_context = \
ServiceContext.from_defaults(llm_predictor=self.llm_predictor,
embed_model=self.embed_model)
max_input_size = 512
num_output = 64
max_chunk_overlap = 0 # 10
self.prompt_helper = PromptHelper(max_input_size, num_output,
max_chunk_overlap)
self.pipeline = None
# complete text:
def get_completion(self, prompt, max_tokens=64):
if self.pipeline is None:
self.pipeline = pipeline("text-generation",
model="facebook/opt-iml-1.3b",
model_kwargs={"torch_dtype":torch.bfloat16})
c = self.pipeline(prompt, max_new_tokens=max_tokens)
pprint(c)
try:
return c[0]["generated_text"]
except Exception as e:
print(e)
return ""
def create_local_embeddings_files_in_dir(self, path):
" path is a directory "
self.documents = SimpleDirectoryReader(path).load_data()
self.index = ListIndex.from_documents(documents=self.documents,
llm_predictor=self.llm_predictor,
prompt_helper=self.prompt_helper)
self.index = self.index.as_query_engine(llm_predictor=self.llm_predictor)
# query local embeddings:
def query_local_embeddings(self, query, n=10):
answer = self.index.query(query)
return answer | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((735, 830), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': 'model_name', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model=model_name, model_kwargs={'torch_dtype':\n torch.bfloat16})\n", (743, 830), False, 'from transformers import pipeline\n'), ((1022, 1038), 'pprint.pprint', 'pprint', (['response'], {}), '(response)\n', (1028, 1038), False, 'from pprint import pprint\n'), ((1731, 1828), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'self.llm_predictor', 'embed_model': 'self.embed_model'}), '(llm_predictor=self.llm_predictor, embed_model=\n self.embed_model)\n', (1759, 1828), False, 'from llama_index import LangchainEmbedding, ServiceContext\n'), ((1980, 2039), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (1992, 2039), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n'), ((2498, 2507), 'pprint.pprint', 'pprint', (['c'], {}), '(c)\n', (2504, 2507), False, 'from pprint import pprint\n'), ((2823, 2946), 'llama_index.ListIndex.from_documents', 'ListIndex.from_documents', ([], {'documents': 'self.documents', 'llm_predictor': 'self.llm_predictor', 'prompt_helper': 'self.prompt_helper'}), '(documents=self.documents, llm_predictor=self.\n llm_predictor, prompt_helper=self.prompt_helper)\n', (2847, 2946), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n'), ((1663, 1686), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {}), '()\n', (1684, 1686), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((2248, 2357), 'transformers.pipeline', 'pipeline', (['"""text-generation"""'], {'model': '"""facebook/opt-iml-1.3b"""', 'model_kwargs': "{'torch_dtype': torch.bfloat16}"}), "('text-generation', model='facebook/opt-iml-1.3b', model_kwargs={\n 'torch_dtype': torch.bfloat16})\n", (2256, 2357), False, 'from transformers import pipeline\n'), ((2762, 2789), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['path'], {}), '(path)\n', (2783, 2789), False, 'from llama_index import ListIndex, PromptHelper, SimpleDirectoryReader\n')] |
import logging
import ConsoleInterface
import langchain.schema
from langchain.agents import initialize_agent, AgentType #create_pandas_dataframe_agent
logger = logging.getLogger('ConsoleInterface')
'''
def PandasDataframeAgent(llm, Dataframe):
"""
Create a PandasDataframeAgent object.
Parameters:
llm (str): The llm parameter.
Dataframe (pandas.DataFrame): The DataFrame parameter.
Returns:
PandasDataframeAgent: The created PandasDataframeAgent object.
"""
PandasDataframeAgent = create_pandas_dataframe_agent(llm, df=Dataframe, verbose=True)
return PandasDataframeAgent
'''
def RunConversationalAgent(llm, Tools, Memory):
"""
Run the conversational agent.
Args:
llm: The language model used by the agent.
Tools: The tools available to the agent.
Memory: The memory used by the agent.
Returns:
None
"""
initialize_agent
Agent = initialize_agent(agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, llm=llm, tools=Tools, memory=Memory, verbose=True)
logger.info("Agent initialized successfully!\n")
while True:
query = input("Enter your query: ")
if query.lower() == "exit" or query.lower() == "quit":
break
try:
Agent.run(str(query))
except langchain.schema.OutputParserException as e:
# Extract the message from the exception
message = str(e)
# The message is in the form "Could not parse LLM output: `...`"
# So, we can split it by the backticks and take the second element
answer = message.split('`')[1]
logger.warning("\nError occured in retrieving answer from language model. Please check your query and try again. Answer stored in error message will be printed:\n")
logger.warning("\nAnswer: ", answer) | [
"langchain.agents.initialize_agent"
] | [((165, 202), 'logging.getLogger', 'logging.getLogger', (['"""ConsoleInterface"""'], {}), "('ConsoleInterface')\n", (182, 202), False, 'import logging\n'), ((946, 1067), 'langchain.agents.initialize_agent', 'initialize_agent', ([], {'agent': 'AgentType.CONVERSATIONAL_REACT_DESCRIPTION', 'llm': 'llm', 'tools': 'Tools', 'memory': 'Memory', 'verbose': '(True)'}), '(agent=AgentType.CONVERSATIONAL_REACT_DESCRIPTION, llm=llm,\n tools=Tools, memory=Memory, verbose=True)\n', (962, 1067), False, 'from langchain.agents import initialize_agent, AgentType\n')] |
import csv
from ctypes import Array
from typing import Any, Coroutine, List, Tuple
import io
import time
import re
import os
from fastapi import UploadFile
import asyncio
import langchain
from langchain.chat_models import ChatOpenAI
from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent
from langchain.tools import HumanInputRun, PythonAstREPLTool
from langchain.callbacks.tracers import ConsoleCallbackHandler
from langchain.callbacks import HumanApprovalCallbackHandler
from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory
from langchain import PromptTemplate
import pandas as pd
from langchain.output_parsers import PydanticOutputParser, OutputFixingParser
from util.tools import SessionHumanInputRun
import util.config as config
from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue
import redis
r = redis.from_url(os.environ.get("REDIS_URL"))
#r = redis.from_url('redis://:password@localhost:6379')
class Processor:
def __init__(self, session):
self.session = session
async def extract_csv_description(self, df: UploadFile|str, llm, memory) -> Coroutine[Any, Any, Tuple[pd.DataFrame, str]] :
df = pd.read_csv(df)
agent = create_pandas_dataframe_agent(llm=llm,df=df, agent_executor_kwargs={'handle_parsing_errors':True, 'memory':memory},
early_stopping_method="generate", verbose=True,
temperature=0,agent_type=AgentType.OPENAI_FUNCTIONS,)
descriptions = agent.run("""Describe what is the column name of each of the column table in detail in the following format:
<name of column 1>: <description of column 1>\n
<name of column 2>: <description of column 2>""", callbacks=[ConsoleCallbackHandler()])
return df, descriptions
async def _human_prompt(prompt, session):
r.publish(f'human_prompt_{session}', prompt)
async def _human_input(session):
p = r.pubsub(ignore_subscribe_messages=True)
p.subscribe(f'human_input_{session}')
message = None
while True:
message = p.get_message()
if message and message['type']=='message':
break
print("waiting for human input")
await asyncio.sleep(1)
return message['data'].decode('utf-8')
async def process_files(self, table_file, template_file, file_guid):
table_string = table_file.decode('utf-8')
template_string = template_file.decode('utf-8')
llm = ChatOpenAI(openai_api_key=config.OPENAI_API_KEY, temperature=0, model="gpt-3.5-turbo-0613", )
memory = ConversationSummaryBufferMemory(llm=llm,memory_key="chat_history", return_messages=True, max_token_limit=1500)
table_df, table_descriptions = await self.extract_csv_description(io.StringIO(table_string), llm, memory=memory)
r.publish(f'{self.session}_response', 'table_descriptions')
r.publish(f'{self.session}_response', table_descriptions)
template_df, template_descriptions = await self.extract_csv_description(io.StringIO(template_string), llm, memory=memory)
r.publish(f'{self.session}_response', 'template_descriptions')
r.publish(f'{self.session}_response', template_descriptions)
dfs =[table_df, template_df]
human_tool = SessionHumanInputRun(session=self.session)
human_tool.description = '''
Use this tool to take human input.
If the mapping is ambiguous, ask 'human' a question with options in the following format.
Make the human confirm the mapping by selecting the appropriate number.
- Question: The template column <template column name> should be mapped to which one of the table columns
(1: <table column name 1>, 2: <table column name 2> (Recommended), 3:<table column name 3>, ...)? Select the appropriate number or specify the column name.
'''
human_tool.prompt_func= Processor._human_prompt
human_tool.input_func = Processor._human_input
mappings = await self.get_mappings(llm, table_descriptions, template_descriptions, human_tool)
codes = await self.get_template_formatting_code(llm, table_df, template_df, human_tool, mappings, memory)
new_table_df = table_df.loc[:,[code.table_column for code in codes]]
for code in codes:
new_table_df[code.table_column].apply(lambda x: self.format_value(x,code=code.code))
r.set(f"{self.session}_{file_guid}", new_table_df.to_msgpack(compress='zlib'))
r.publish(f'{self.session}_response', f'file_guid:{file_guid}')
def format_value(self, source_value, code):
value = TransformValue(source=source_value,destination=source_value)
try:
exec(code, {'value':value})
except Exception as e:
r.publish(f'{self.session}_response',f'ERROR: \nCode: \n {code} \n Failed with error: \n{e}')
print(e)
return value.destination
async def get_mappings(self,llm, table_descriptions, template_descriptions, human_tool):
parser = PydanticOutputParser(pydantic_object=TemplateMappingList)
new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm)
agent = initialize_agent(
[human_tool],
llm,
agent=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
early_stopping_method="force",
temperature=0.3,
output_parser=new_parser,
)
descriptions = await agent.arun("""Map all the columns of the Template descriptions to columns of the table Descriptions:
- Table Descriptions:
""" + table_descriptions + """
- Template Descriptions:
""" + template_descriptions + """
Use the table and template descriptions above to determine the mapping based on similarity, formats and distribution.
If the table column names are ambiguous take human input.
""",callbacks=[ConsoleCallbackHandler()],)
print(descriptions)
mappings = new_parser.parse(descriptions)
return mappings
async def get_template_formatting_code(self, llm, table_df, template_df, human_tool, mappings: TemplateMappingList, memory):
dfs = []
dfs.append(table_df)
dfs.append(template_df)
df_locals = {}
df_locals[f"table_df"] = table_df
df_locals[f"template_df"] = template_df
parser = PydanticOutputParser(pydantic_object=TemplateMappingCode)
new_parser = OutputFixingParser.from_llm(parser=parser,llm=llm)
codes=[]
#The code should be in the format of a Python function taking as input a string and returning a string.
for mapping in mappings.template_mappings:
human_tool.description = f'''
Use this tool to get human approval. Always show the samples and code. The human can edit the code and approve it.
'''
table_df_samples = table_df[mapping.table_column].sample(5).to_list()
template_df_samples = template_df[mapping.template_column].sample(5).to_list()
agent = initialize_agent(
[PythonAstREPLTool(locals=df_locals)],
llm,
agent=AgentType.OPENAI_FUNCTIONS,
handle_parsing_errors=True,
early_stopping_method="force",
temperature=0.3,
output_parser=new_parser,
memory = memory,
memory_key = 'chat_history'
)
#The AI can determine the format of the column values only after sampling.
#As shown in the output below, generate the code as a Python function taking as input a string and returning a string and also include a call to the generated function.
code = agent.run(f'''Provide the code to bring the format of values in table_df column '{mapping.table_column}'
to the format of values in template_df column '{mapping.template_column}' based off the values, data types and formats.
Additional samples to be used to generate the code:
'{mapping.table_column}' sample values: [{table_df_samples}]
'{mapping.template_column}' samples values: [{template_df_samples}]
The input to the code will be a value object with the following attributes:
- source: The value of the table_df column '{mapping.table_column}'.
- destination: The value of the template_df column '{mapping.template_column}'.
Show the sample values using which the code is generated.
For example, for date columns, they may be in different formats, and it is necessary to change the format from dd.mm.yyyy to mm.dd.yyyy.
Final Answer:
```
```python
def format_value(source_value):
<code to transform source_value into destination_value>
return destination_value
value.destination = format_value(value.source)
```
```
Final Answer should contain the samples and code.
''', callbacks=[ConsoleCallbackHandler(), ])
print(code)
human_code = await human_tool.arun(code + '\nSpecify the code with ```python``` tags.')
regex = r"```python((.|\n|\t)*?)```"
code = human_code if re.match(regex, human_code) else code
matches = re.findall(regex, code)
code = ''
for match in matches:
code = code + '\n'+ '\n'.join(match)
codes.append(TemplateMappingCode(template_column=mapping.template_column,
table_column=mapping.table_column,
code=code))
return codes | [
"langchain.agents.initialize_agent",
"langchain.memory.ConversationSummaryBufferMemory",
"langchain.output_parsers.PydanticOutputParser",
"langchain.tools.PythonAstREPLTool",
"langchain.agents.create_pandas_dataframe_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.callbacks.tracers.ConsoleCallbackHandler",
"langchain.output_parsers.OutputFixingParser.from_llm"
] | [((963, 990), 'os.environ.get', 'os.environ.get', (['"""REDIS_URL"""'], {}), "('REDIS_URL')\n", (977, 990), False, 'import os\n'), ((1270, 1285), 'pandas.read_csv', 'pd.read_csv', (['df'], {}), '(df)\n', (1281, 1285), True, 'import pandas as pd\n'), ((1302, 1537), 'langchain.agents.create_pandas_dataframe_agent', 'create_pandas_dataframe_agent', ([], {'llm': 'llm', 'df': 'df', 'agent_executor_kwargs': "{'handle_parsing_errors': True, 'memory': memory}", 'early_stopping_method': '"""generate"""', 'verbose': '(True)', 'temperature': '(0)', 'agent_type': 'AgentType.OPENAI_FUNCTIONS'}), "(llm=llm, df=df, agent_executor_kwargs={\n 'handle_parsing_errors': True, 'memory': memory}, early_stopping_method\n ='generate', verbose=True, temperature=0, agent_type=AgentType.\n OPENAI_FUNCTIONS)\n", (1331, 1537), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((2683, 2779), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'config.OPENAI_API_KEY', 'temperature': '(0)', 'model': '"""gpt-3.5-turbo-0613"""'}), "(openai_api_key=config.OPENAI_API_KEY, temperature=0, model=\n 'gpt-3.5-turbo-0613')\n", (2693, 2779), False, 'from langchain.chat_models import ChatOpenAI\n'), ((2794, 2909), 'langchain.memory.ConversationSummaryBufferMemory', 'ConversationSummaryBufferMemory', ([], {'llm': 'llm', 'memory_key': '"""chat_history"""', 'return_messages': '(True)', 'max_token_limit': '(1500)'}), "(llm=llm, memory_key='chat_history',\n return_messages=True, max_token_limit=1500)\n", (2825, 2909), False, 'from langchain.memory import ConversationBufferMemory, ConversationSummaryBufferMemory\n'), ((3498, 3540), 'util.tools.SessionHumanInputRun', 'SessionHumanInputRun', ([], {'session': 'self.session'}), '(session=self.session)\n', (3518, 3540), False, 'from util.tools import SessionHumanInputRun\n'), ((4868, 4929), 'util.model.TransformValue', 'TransformValue', ([], {'source': 'source_value', 'destination': 'source_value'}), '(source=source_value, destination=source_value)\n', (4882, 4929), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((5288, 5345), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingList'}), '(pydantic_object=TemplateMappingList)\n', (5308, 5345), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5367, 5418), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (5394, 5418), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((5434, 5614), 'langchain.agents.initialize_agent', 'initialize_agent', (['[human_tool]', 'llm'], {'agent': 'AgentType.OPENAI_FUNCTIONS', 'handle_parsing_errors': '(True)', 'early_stopping_method': '"""force"""', 'temperature': '(0.3)', 'output_parser': 'new_parser'}), "([human_tool], llm, agent=AgentType.OPENAI_FUNCTIONS,\n handle_parsing_errors=True, early_stopping_method='force', temperature=\n 0.3, output_parser=new_parser)\n", (5450, 5614), False, 'from langchain.agents import create_csv_agent, load_tools, initialize_agent, AgentType, create_pandas_dataframe_agent\n'), ((6899, 6956), 'langchain.output_parsers.PydanticOutputParser', 'PydanticOutputParser', ([], {'pydantic_object': 'TemplateMappingCode'}), '(pydantic_object=TemplateMappingCode)\n', (6919, 6956), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((6978, 7029), 'langchain.output_parsers.OutputFixingParser.from_llm', 'OutputFixingParser.from_llm', ([], {'parser': 'parser', 'llm': 'llm'}), '(parser=parser, llm=llm)\n', (7005, 7029), False, 'from langchain.output_parsers import PydanticOutputParser, OutputFixingParser\n'), ((10532, 10555), 're.findall', 're.findall', (['regex', 'code'], {}), '(regex, code)\n', (10542, 10555), False, 'import re\n'), ((2425, 2441), 'asyncio.sleep', 'asyncio.sleep', (['(1)'], {}), '(1)\n', (2438, 2441), False, 'import asyncio\n'), ((2988, 3013), 'io.StringIO', 'io.StringIO', (['table_string'], {}), '(table_string)\n', (2999, 3013), False, 'import io\n'), ((3250, 3278), 'io.StringIO', 'io.StringIO', (['template_string'], {}), '(template_string)\n', (3261, 3278), False, 'import io\n'), ((10459, 10486), 're.match', 're.match', (['regex', 'human_code'], {}), '(regex, human_code)\n', (10467, 10486), False, 'import re\n'), ((10690, 10801), 'util.model.TemplateMappingCode', 'TemplateMappingCode', ([], {'template_column': 'mapping.template_column', 'table_column': 'mapping.table_column', 'code': 'code'}), '(template_column=mapping.template_column, table_column=\n mapping.table_column, code=code)\n', (10709, 10801), False, 'from util.model import TemplateMappingList, TemplateMapping, TemplateMappingCode, TransformValue\n'), ((1905, 1929), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (1927, 1929), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((7662, 7697), 'langchain.tools.PythonAstREPLTool', 'PythonAstREPLTool', ([], {'locals': 'df_locals'}), '(locals=df_locals)\n', (7679, 7697), False, 'from langchain.tools import HumanInputRun, PythonAstREPLTool\n'), ((6408, 6432), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (6430, 6432), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n'), ((10224, 10248), 'langchain.callbacks.tracers.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (10246, 10248), False, 'from langchain.callbacks.tracers import ConsoleCallbackHandler\n')] |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CUSTOM_TOOLS,
FILE_TOOLS,
OTHER_TOOLS,
)
from langflow.interface.tools.util import get_tool_params
from langflow.settings import settings
from langflow.template.field.base import TemplateField
from langflow.template.template.base import Template
from langflow.utils import util
from langflow.utils.util import build_template_from_class
TOOL_INPUTS = {
"str": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
placeholder="",
value="",
),
"llm": TemplateField(
field_type="BaseLanguageModel", required=True, is_list=False, show=True
),
"func": TemplateField(
field_type="function",
required=True,
is_list=False,
show=True,
multiline=True,
),
"code": TemplateField(
field_type="str",
required=True,
is_list=False,
show=True,
value="",
multiline=True,
),
"path": TemplateField(
field_type="file",
required=True,
is_list=False,
show=True,
value="",
suffixes=[".json", ".yaml", ".yml"],
fileTypes=["json", "yaml", "yml"],
),
}
class ToolCreator(LangChainTypeCreator):
type_name: str = "tools"
tools_dict: Optional[Dict] = None
@property
def type_to_loader_dict(self) -> Dict:
if self.tools_dict is None:
all_tools = {}
for tool, tool_fcn in ALL_TOOLS_NAMES.items():
tool_params = get_tool_params(tool_fcn)
tool_name = tool_params.get("name") or tool
if tool_name in settings.tools or settings.dev:
if tool_name == "JsonSpec":
tool_params["path"] = tool_params.pop("dict_") # type: ignore
all_tools[tool_name] = {
"type": tool,
"params": tool_params,
"fcn": tool_fcn,
}
self.tools_dict = all_tools
return self.tools_dict
def get_signature(self, name: str) -> Optional[Dict]:
"""Get the signature of a tool."""
base_classes = ["Tool", "BaseTool"]
fields = []
params = []
tool_params = {}
# Raise error if name is not in tools
if name not in self.type_to_loader_dict.keys():
raise ValueError("Tool not found")
tool_type: str = self.type_to_loader_dict[name]["type"] # type: ignore
# if tool_type in _BASE_TOOLS.keys():
# params = []
if tool_type in _LLM_TOOLS.keys():
params = ["llm"]
elif tool_type in _EXTRA_LLM_TOOLS.keys():
extra_keys = _EXTRA_LLM_TOOLS[tool_type][1]
params = ["llm"] + extra_keys
elif tool_type in _EXTRA_OPTIONAL_TOOLS.keys():
extra_keys = _EXTRA_OPTIONAL_TOOLS[tool_type][1]
params = extra_keys
# elif tool_type == "Tool":
# params = ["name", "description", "func"]
elif tool_type in CUSTOM_TOOLS:
# Get custom tool params
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes = ["function"]
if node := customs.get_custom_nodes("tools").get(tool_type):
return node
elif tool_type in FILE_TOOLS:
params = self.type_to_loader_dict[name]["params"] # type: ignore
base_classes += [name]
elif tool_type in OTHER_TOOLS:
tool_dict = build_template_from_class(tool_type, OTHER_TOOLS)
fields = tool_dict["template"]
# Pop unnecessary fields and add name
fields.pop("_type") # type: ignore
fields.pop("return_direct") # type: ignore
fields.pop("verbose") # type: ignore
tool_params = {
"name": fields.pop("name")["value"], # type: ignore
"description": fields.pop("description")["value"], # type: ignore
}
fields = [
TemplateField(name=name, field_type=field["type"], **field)
for name, field in fields.items() # type: ignore
]
base_classes += tool_dict["base_classes"]
# Copy the field and add the name
for param in params:
field = TOOL_INPUTS.get(param, TOOL_INPUTS["str"]).copy()
field.name = param
field.advanced = False
if param == "aiosession":
field.show = False
field.required = False
fields.append(field)
template = Template(fields=fields, type_name=tool_type)
tool_params = {**tool_params, **self.type_to_loader_dict[name]["params"]}
return {
"template": util.format_dict(template.to_dict()),
**tool_params,
"base_classes": base_classes,
}
def to_list(self) -> List[str]:
"""List all load tools"""
return list(self.type_to_loader_dict.keys())
tool_creator = ToolCreator()
| [
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((690, 792), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (703, 792), False, 'from langflow.template.field.base import TemplateField\n'), ((856, 946), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""BaseLanguageModel"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)'}), "(field_type='BaseLanguageModel', required=True, is_list=False,\n show=True)\n", (869, 946), False, 'from langflow.template.field.base import TemplateField\n'), ((970, 1068), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""function"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'multiline': '(True)'}), "(field_type='function', required=True, is_list=False, show=\n True, multiline=True)\n", (983, 1068), False, 'from langflow.template.field.base import TemplateField\n'), ((1124, 1226), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'multiline': '(True)'}), "(field_type='str', required=True, is_list=False, show=True,\n value='', multiline=True)\n", (1137, 1226), False, 'from langflow.template.field.base import TemplateField\n'), ((1291, 1454), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""file"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'value': '""""""', 'suffixes': "['.json', '.yaml', '.yml']", 'fileTypes': "['json', 'yaml', 'yml']"}), "(field_type='file', required=True, is_list=False, show=True,\n value='', suffixes=['.json', '.yaml', '.yml'], fileTypes=['json',\n 'yaml', 'yml'])\n", (1304, 1454), False, 'from langflow.template.field.base import TemplateField\n'), ((4975, 5019), 'langflow.template.template.base.Template', 'Template', ([], {'fields': 'fields', 'type_name': 'tool_type'}), '(fields=fields, type_name=tool_type)\n', (4983, 5019), False, 'from langflow.template.template.base import Template\n'), ((1779, 1802), 'langflow.interface.tools.constants.ALL_TOOLS_NAMES.items', 'ALL_TOOLS_NAMES.items', ([], {}), '()\n', (1800, 1802), False, 'from langflow.interface.tools.constants import ALL_TOOLS_NAMES, CUSTOM_TOOLS, FILE_TOOLS, OTHER_TOOLS\n'), ((2927, 2944), 'langchain.agents.load_tools._LLM_TOOLS.keys', '_LLM_TOOLS.keys', ([], {}), '()\n', (2942, 2944), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((1834, 1859), 'langflow.interface.tools.util.get_tool_params', 'get_tool_params', (['tool_fcn'], {}), '(tool_fcn)\n', (1849, 1859), False, 'from langflow.interface.tools.util import get_tool_params\n'), ((3001, 3024), 'langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys', '_EXTRA_LLM_TOOLS.keys', ([], {}), '()\n', (3022, 3024), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3150, 3178), 'langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys', '_EXTRA_OPTIONAL_TOOLS.keys', ([], {}), '()\n', (3176, 3178), False, 'from langchain.agents.load_tools import _EXTRA_LLM_TOOLS, _EXTRA_OPTIONAL_TOOLS, _LLM_TOOLS\n'), ((3874, 3923), 'langflow.utils.util.build_template_from_class', 'build_template_from_class', (['tool_type', 'OTHER_TOOLS'], {}), '(tool_type, OTHER_TOOLS)\n', (3899, 3923), False, 'from langflow.utils.util import build_template_from_class\n'), ((3582, 3615), 'langflow.custom.customs.get_custom_nodes', 'customs.get_custom_nodes', (['"""tools"""'], {}), "('tools')\n", (3606, 3615), False, 'from langflow.custom import customs\n'), ((4407, 4466), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'name': 'name', 'field_type': "field['type']"}), "(name=name, field_type=field['type'], **field)\n", (4420, 4466), False, 'from langflow.template.field.base import TemplateField\n')] |
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.question_answering import load_qa_chain
from langchain.embeddings.openai import OpenAIEmbeddings
from streamlit_option_menu import option_menu
from deep_translator import GoogleTranslator
from langchain.vectorstores import Pinecone
import streamlit_authenticator as stauth
from yaml.loader import SafeLoader
from langchain.llms import OpenAI
from dotenv import load_dotenv
from langchain import OpenAI
from PyPDF2 import PdfReader
import streamlit as st
import langchain
load_dotenv()
import pinecone
import openai
import time
import yaml
import os
# Initialization
pinecone.init(api_key="db6b2a8c-d59e-48e1-8d5c-4c2704622937",environment="gcp-starter")
llm=OpenAI(model_name="gpt-3.5-turbo-instruct")
chain=load_qa_chain(llm,chain_type="stuff")
index_name="langchainvector"
# Home Page
def home():
st.title("This is my Home page")
# Login Page
def login():
st.title("Login page")
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
authenticator.login('Login', location = 'main')
if st.session_state["authentication_status"]:
st.title(f'Welcome *{st.session_state["name"]}*')
st.subheader('Click on the Chat to upload document and access AI chatbot')
user_name = st.session_state["name"]
parent = os.getcwd()
path = os.path.join(parent, user_name)
if not os.path.exists(path):
os.mkdir(path)
with st.sidebar:
authenticator.logout("Logout", "sidebar")
elif st.session_state["authentication_status"] is False:
st.error('Username/password is incorrect')
elif st.session_state["authentication_status"] is None:
st.warning('Please enter your username and password')
# Register Page
def register():
st.title("Register page")
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
if authenticator.register_user('Register user', preauthorization=False):
st.success('User registration successfully')
with open('./config.yaml', 'a') as file:
yaml.dump(config, file, default_flow_style=False)
def forgot_pass():
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
username_forgot_pw, email, random_password = authenticator.forgot_password('Forgot password')
if username_forgot_pw:
st.success(f'New random password is : {random_password}.. Change it in next login')
elif username_forgot_pw == False:
st.error('Username not found')
with open('./config.yaml', 'w') as file:
yaml.dump(config, file, default_flow_style=False)
def change_pass():
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
if st.session_state["authentication_status"]:
if authenticator.reset_password(st.session_state["username"], 'Reset password'):
st.success('New password changed')
if not st.session_state["authentication_status"]:
st.subheader('You need to login to change the password')
with open('./config.yaml', 'w') as file:
yaml.dump(config, file, default_flow_style=False)
def update_profile():
with open('./config.yaml') as file:
config = yaml.load(file, Loader=SafeLoader)
authenticator = stauth.Authenticate(
config['credentials'],
config['cookie']['name'],
config['cookie']['key'],
config['cookie']['expiry_days'],
config['preauthorized']
)
if st.session_state["authentication_status"]:
if authenticator.update_user_details(st.session_state["username"], 'Update user details'):
st.success('Entries updated successfully')
if not st.session_state["authentication_status"]:
st.subheader('You need to login to update the profile')
with open('./config.yaml', 'a') as file:
yaml.dump(config, file, default_flow_style=False)
# Translatiton
def translate_text(text, source='auto', target='hi'):
return GoogleTranslator(source=source, target=target).translate(text)
# Extract document and create embeddings
def process_text():
text = ""
if not os.path.exists(st.session_state.txt_path):
os.mkdir(st.session_state.txt_path)
if st.session_state.doc_type == 'PDF':
for file in st.session_state.upload_folder:
pdfdata = PdfReader(file)
for page in pdfdata.pages:
text += page.extract_text()
else:
for file in pdf_folder:
for line in file:
text += str(line, encoding = 'utf-8')
file = open(st.session_state.txt_path + '/' + 'raw_text.txt' , 'w')
file.write(text)
text_splitter = RecursiveCharacterTextSplitter(
chunk_size=1000,
chunk_overlap=100,
length_function=len
)
chunks = text_splitter.split_text(text)
embeddings = OpenAIEmbeddings()
st.info('Creating OpenAI embeddings with PINECONE.... Please wait', icon="ℹ️")
st.session_state.vector_db = Pinecone.from_texts(chunks,embeddings,index_name=index_name)
st.success('Embeddings generated... Start the conversations', icon="✅")
def query_answer(query):
docs = st.session_state.vector_db.similarity_search(query, k=2)
response = chain.run(input_documents=docs, question=query)
return response
def chatbox():
for message in st.session_state.messages:
with st.chat_message(message['role']):
st.markdown(message['content'])
if prompt := st.chat_input('Ask question about PDF content'):
st.session_state.messages.append({'role' : 'user', 'content' : prompt})
with st.chat_message('user'):
st.markdown(prompt)
with st.chat_message('assistant'):
message_placeholder = st.empty()
raw_prompt = translate_text(prompt, 'auto', 'en')
result = query_answer(prompt)
result2 = ""
for chunk in result.split():
result2 += chunk + " "
time.sleep(0.1)
message_placeholder.markdown(result2 + "▌")
st.session_state.messages.append({"role": "assistant", "content": result})
def about(key):
selection = st.session_state[key]
if selection == 'Home':
home()
if selection == 'Login':
login()
if selection == 'Register':
register()
if selection == 'Forgot Password':
forgot_pass()
def tasks():
st.write('Tasks')
def main():
if 'vector_db' not in st.session_state:
st.session_state.vector_db = None
if 'txt_path' not in st.session_state:
st.session_state.txt_path = None
if 'doc_type' not in st.session_state:
st.session_state.doc_type = None
if 'upload_folder' not in st.session_state:
st.session_state.upload_folder = None
if 'messages' not in st.session_state:
st.session_state.messages = []
st.session_state.txt_path = os.path.join(os.getcwd(), 'extract_text')
with st.sidebar:
selected5 = option_menu(None, ["Home", "Login", "Register", 'Forgot Passoword'],
icons=['house', 'login', "register", 'gear'],
on_change=about, key='menu_5', orientation="vertical")
st.session_state.doc_type = st.selectbox('Document type', ('None','PDF','TXT', 'RST','MD'))
st.session_state.upload_folder = st.file_uploader('Upload files', type = ['pdf', 'txt', 'rst','md'], accept_multiple_files=True)
submitBtn = st.button('Submit')
if submitBtn:
process_text()
chatbox()
if __name__ == '__main__':
main()
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.vectorstores.Pinecone.from_texts",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.OpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((560, 573), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (571, 573), False, 'from dotenv import load_dotenv\n'), ((656, 749), 'pinecone.init', 'pinecone.init', ([], {'api_key': '"""db6b2a8c-d59e-48e1-8d5c-4c2704622937"""', 'environment': '"""gcp-starter"""'}), "(api_key='db6b2a8c-d59e-48e1-8d5c-4c2704622937', environment=\n 'gcp-starter')\n", (669, 749), False, 'import pinecone\n'), ((748, 791), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-instruct"""'}), "(model_name='gpt-3.5-turbo-instruct')\n", (754, 791), False, 'from langchain import OpenAI\n'), ((798, 836), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (811, 836), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((895, 927), 'streamlit.title', 'st.title', (['"""This is my Home page"""'], {}), "('This is my Home page')\n", (903, 927), True, 'import streamlit as st\n'), ((960, 982), 'streamlit.title', 'st.title', (['"""Login page"""'], {}), "('Login page')\n", (968, 982), True, 'import streamlit as st\n'), ((1096, 1257), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (1115, 1257), True, 'import streamlit_authenticator as stauth\n'), ((2076, 2101), 'streamlit.title', 'st.title', (['"""Register page"""'], {}), "('Register page')\n", (2084, 2101), True, 'import streamlit as st\n'), ((2215, 2376), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (2234, 2376), True, 'import streamlit_authenticator as stauth\n'), ((2780, 2941), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (2799, 2941), True, 'import streamlit_authenticator as stauth\n'), ((3509, 3670), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (3528, 3670), True, 'import streamlit_authenticator as stauth\n'), ((4252, 4413), 'streamlit_authenticator.Authenticate', 'stauth.Authenticate', (["config['credentials']", "config['cookie']['name']", "config['cookie']['key']", "config['cookie']['expiry_days']", "config['preauthorized']"], {}), "(config['credentials'], config['cookie']['name'], config\n ['cookie']['key'], config['cookie']['expiry_days'], config['preauthorized']\n )\n", (4271, 4413), True, 'import streamlit_authenticator as stauth\n'), ((5562, 5653), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1000)', 'chunk_overlap': '(100)', 'length_function': 'len'}), '(chunk_size=1000, chunk_overlap=100,\n length_function=len)\n', (5592, 5653), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((5715, 5733), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (5731, 5733), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((5735, 5813), 'streamlit.info', 'st.info', (['"""Creating OpenAI embeddings with PINECONE.... Please wait"""'], {'icon': '"""ℹ️"""'}), "('Creating OpenAI embeddings with PINECONE.... Please wait', icon='ℹ️')\n", (5742, 5813), True, 'import streamlit as st\n'), ((5844, 5906), 'langchain.vectorstores.Pinecone.from_texts', 'Pinecone.from_texts', (['chunks', 'embeddings'], {'index_name': 'index_name'}), '(chunks, embeddings, index_name=index_name)\n', (5863, 5906), False, 'from langchain.vectorstores import Pinecone\n'), ((5906, 5977), 'streamlit.success', 'st.success', (['"""Embeddings generated... Start the conversations"""'], {'icon': '"""✅"""'}), "('Embeddings generated... Start the conversations', icon='✅')\n", (5916, 5977), True, 'import streamlit as st\n'), ((6014, 6070), 'streamlit.session_state.vector_db.similarity_search', 'st.session_state.vector_db.similarity_search', (['query'], {'k': '(2)'}), '(query, k=2)\n', (6058, 6070), True, 'import streamlit as st\n'), ((7087, 7104), 'streamlit.write', 'st.write', (['"""Tasks"""'], {}), "('Tasks')\n", (7095, 7104), True, 'import streamlit as st\n'), ((1040, 1074), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (1049, 1074), False, 'import yaml\n'), ((1404, 1453), 'streamlit.title', 'st.title', (['f"""Welcome *{st.session_state[\'name\']}*"""'], {}), '(f"Welcome *{st.session_state[\'name\']}*")\n', (1412, 1453), True, 'import streamlit as st\n'), ((1462, 1536), 'streamlit.subheader', 'st.subheader', (['"""Click on the Chat to upload document and access AI chatbot"""'], {}), "('Click on the Chat to upload document and access AI chatbot')\n", (1474, 1536), True, 'import streamlit as st\n'), ((1599, 1610), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1608, 1610), False, 'import os\n'), ((1626, 1657), 'os.path.join', 'os.path.join', (['parent', 'user_name'], {}), '(parent, user_name)\n', (1638, 1657), False, 'import os\n'), ((2159, 2193), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (2168, 2193), False, 'import yaml\n'), ((2498, 2542), 'streamlit.success', 'st.success', (['"""User registration successfully"""'], {}), "('User registration successfully')\n", (2508, 2542), True, 'import streamlit as st\n'), ((2596, 2645), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (2605, 2645), False, 'import yaml\n'), ((2724, 2758), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (2733, 2758), False, 'import yaml\n'), ((3111, 3199), 'streamlit.success', 'st.success', (['f"""New random password is : {random_password}.. Change it in next login"""'], {}), "(\n f'New random password is : {random_password}.. Change it in next login')\n", (3121, 3199), True, 'import streamlit as st\n'), ((3325, 3374), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (3334, 3374), False, 'import yaml\n'), ((3453, 3487), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (3462, 3487), False, 'import yaml\n'), ((3955, 4011), 'streamlit.subheader', 'st.subheader', (['"""You need to login to change the password"""'], {}), "('You need to login to change the password')\n", (3967, 4011), True, 'import streamlit as st\n'), ((4065, 4114), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (4074, 4114), False, 'import yaml\n'), ((4196, 4230), 'yaml.load', 'yaml.load', (['file'], {'Loader': 'SafeLoader'}), '(file, Loader=SafeLoader)\n', (4205, 4230), False, 'import yaml\n'), ((4716, 4771), 'streamlit.subheader', 'st.subheader', (['"""You need to login to update the profile"""'], {}), "('You need to login to update the profile')\n", (4728, 4771), True, 'import streamlit as st\n'), ((4825, 4874), 'yaml.dump', 'yaml.dump', (['config', 'file'], {'default_flow_style': '(False)'}), '(config, file, default_flow_style=False)\n', (4834, 4874), False, 'import yaml\n'), ((5102, 5143), 'os.path.exists', 'os.path.exists', (['st.session_state.txt_path'], {}), '(st.session_state.txt_path)\n', (5116, 5143), False, 'import os\n'), ((5147, 5182), 'os.mkdir', 'os.mkdir', (['st.session_state.txt_path'], {}), '(st.session_state.txt_path)\n', (5155, 5182), False, 'import os\n'), ((6298, 6345), 'streamlit.chat_input', 'st.chat_input', (['"""Ask question about PDF content"""'], {}), "('Ask question about PDF content')\n", (6311, 6345), True, 'import streamlit as st\n'), ((6349, 6418), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (6381, 6418), True, 'import streamlit as st\n'), ((6780, 6854), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': result}"], {}), "({'role': 'assistant', 'content': result})\n", (6812, 6854), True, 'import streamlit as st\n'), ((7545, 7556), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (7554, 7556), False, 'import os\n'), ((7606, 7784), 'streamlit_option_menu.option_menu', 'option_menu', (['None', "['Home', 'Login', 'Register', 'Forgot Passoword']"], {'icons': "['house', 'login', 'register', 'gear']", 'on_change': 'about', 'key': '"""menu_5"""', 'orientation': '"""vertical"""'}), "(None, ['Home', 'Login', 'Register', 'Forgot Passoword'], icons=\n ['house', 'login', 'register', 'gear'], on_change=about, key='menu_5',\n orientation='vertical')\n", (7617, 7784), False, 'from streamlit_option_menu import option_menu\n'), ((7856, 7922), 'streamlit.selectbox', 'st.selectbox', (['"""Document type"""', "('None', 'PDF', 'TXT', 'RST', 'MD')"], {}), "('Document type', ('None', 'PDF', 'TXT', 'RST', 'MD'))\n", (7868, 7922), True, 'import streamlit as st\n'), ((7955, 8053), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload files"""'], {'type': "['pdf', 'txt', 'rst', 'md']", 'accept_multiple_files': '(True)'}), "('Upload files', type=['pdf', 'txt', 'rst', 'md'],\n accept_multiple_files=True)\n", (7971, 8053), True, 'import streamlit as st\n'), ((8065, 8084), 'streamlit.button', 'st.button', (['"""Submit"""'], {}), "('Submit')\n", (8074, 8084), True, 'import streamlit as st\n'), ((1673, 1693), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1687, 1693), False, 'import os\n'), ((1707, 1721), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (1715, 1721), False, 'import os\n'), ((1873, 1915), 'streamlit.error', 'st.error', (['"""Username/password is incorrect"""'], {}), "('Username/password is incorrect')\n", (1881, 1915), True, 'import streamlit as st\n'), ((3241, 3271), 'streamlit.error', 'st.error', (['"""Username not found"""'], {}), "('Username not found')\n", (3249, 3271), True, 'import streamlit as st\n'), ((3858, 3892), 'streamlit.success', 'st.success', (['"""New password changed"""'], {}), "('New password changed')\n", (3868, 3892), True, 'import streamlit as st\n'), ((4611, 4653), 'streamlit.success', 'st.success', (['"""Entries updated successfully"""'], {}), "('Entries updated successfully')\n", (4621, 4653), True, 'import streamlit as st\n'), ((4957, 5003), 'deep_translator.GoogleTranslator', 'GoogleTranslator', ([], {'source': 'source', 'target': 'target'}), '(source=source, target=target)\n', (4973, 5003), False, 'from deep_translator import GoogleTranslator\n'), ((5283, 5298), 'PyPDF2.PdfReader', 'PdfReader', (['file'], {}), '(file)\n', (5292, 5298), False, 'from PyPDF2 import PdfReader\n'), ((6215, 6247), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (6230, 6247), True, 'import streamlit as st\n'), ((6252, 6283), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (6263, 6283), True, 'import streamlit as st\n'), ((6428, 6451), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (6443, 6451), True, 'import streamlit as st\n'), ((6456, 6475), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6467, 6475), True, 'import streamlit as st\n'), ((6483, 6511), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6498, 6511), True, 'import streamlit as st\n'), ((6538, 6548), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6546, 6548), True, 'import streamlit as st\n'), ((1984, 2037), 'streamlit.warning', 'st.warning', (['"""Please enter your username and password"""'], {}), "('Please enter your username and password')\n", (1994, 2037), True, 'import streamlit as st\n'), ((6714, 6729), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6724, 6729), False, 'import time\n')] |
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
# invoking custom retriever
from redundant_filter_retriever import RedundantFilterRetriever
from dotenv import load_dotenv
import langchain
langchain.debug = True
load_dotenv()
# create our chat model
chat = ChatOpenAI()
embeddings = OpenAIEmbeddings()
# instance of chroma for similarity_search but not add contents to db
db = Chroma(
persist_directory="emb",
embedding_function=embeddings
)
# set RetrievalQA construct in langchain
# retriever -> object that take in string & return relevant docs
# call our custom retriever -> RedundantFilterRetriever instead of db.as_retriever()
retriever = RedundantFilterRetriever(
# pass in customized attributes -> embeddings & chroma
embeddings=embeddings,
chroma=db
)
# retriever = db.as_retriever()
chain = RetrievalQA.from_chain_type(
llm=chat,
retriever=retriever,
chain_type="stuff" # refine -> build an initial response, then give the LLM an opport. to update it with further context
# "map_reduce" -> build a summary of each doc, then feed each summary to final qn
# "stuff" -> take some context from the vector store & "stuff" it into the prompt
# "map_rerank" -> find relevant part of each doc & give it a score of how relevant it is
)
result = chain.run("What is an interesting fact about the English language")
print(result)
| [
"langchain.vectorstores.Chroma",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.chat_models.ChatOpenAI"
] | [((344, 357), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (355, 357), False, 'from dotenv import load_dotenv\n'), ((392, 404), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n', (402, 404), False, 'from langchain.chat_models import ChatOpenAI\n'), ((418, 436), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (434, 436), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((513, 575), 'langchain.vectorstores.Chroma', 'Chroma', ([], {'persist_directory': '"""emb"""', 'embedding_function': 'embeddings'}), "(persist_directory='emb', embedding_function=embeddings)\n", (519, 575), False, 'from langchain.vectorstores import Chroma\n'), ((792, 850), 'redundant_filter_retriever.RedundantFilterRetriever', 'RedundantFilterRetriever', ([], {'embeddings': 'embeddings', 'chroma': 'db'}), '(embeddings=embeddings, chroma=db)\n', (816, 850), False, 'from redundant_filter_retriever import RedundantFilterRetriever\n'), ((962, 1040), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'chat', 'retriever': 'retriever', 'chain_type': '"""stuff"""'}), "(llm=chat, retriever=retriever, chain_type='stuff')\n", (989, 1040), False, 'from langchain.chains import RetrievalQA\n')] |
import os
import logging
import pickle
import ssl
import dill
import langchain
from langchain.prompts import PromptTemplate
from langchain.llms import OpenAI, GooglePalm
from langchain.chains import LLMChain, RetrievalQAWithSourcesChain, AnalyzeDocumentChain
from langchain.chains.qa_with_sources import load_qa_with_sources_chain
from langchain.document_loaders import TextLoader, UnstructuredURLLoader
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.schema import StrOutputParser
from dotenv import load_dotenv
class Vectorizer():
llm = OpenAI(temperature=0.7, max_tokens=1024)
embeddings = OpenAIEmbeddings()
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
logging.getLogger("httpx").setLevel(logging.WARNING)
logger = logging.getLogger(__name__)
def __init__(self, file_path: str):
self.file_path = os.path.join(os.getcwd(), 'vectors', f'{file_path[:-4]}.pkl')
def vector(self, split_docs: list, ) -> bool:
self.logger.info('docs: %s', len(split_docs))
# Using OpenAIEmbeddings models to provide further correlational data for our resulting vector for better semantic relationship identification
vector_index = FAISS.from_documents(split_docs, self.embeddings)
self.logger.info('Vector embedding created')
# Exclude SSLContext from pickling
dill._dill._reverse_typemap[type(ssl.create_default_context())] = None
with open(self.file_path, 'wb') as f:
dill.dump(vector_index, f)
self.logger.info('Vector index saved')
return True
def load_index(self):
if os.path.exists(self.file_path):
with open(self.file_path, 'rb') as f:
vector_index = dill.load(f)
self.logger.info('Vector index loaded')
return vector_index
else:
self.logger.info('Vector index not found at the provided file path')
return False
| [
"langchain.vectorstores.FAISS.from_documents",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.llms.OpenAI"
] | [((670, 710), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.7)', 'max_tokens': '(1024)'}), '(temperature=0.7, max_tokens=1024)\n', (676, 710), False, 'from langchain.llms import OpenAI, GooglePalm\n'), ((728, 746), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (744, 746), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((751, 858), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""', 'level': 'logging.INFO'}), "(format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n", (770, 858), False, 'import logging\n'), ((924, 951), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (941, 951), False, 'import logging\n'), ((1371, 1420), 'langchain.vectorstores.FAISS.from_documents', 'FAISS.from_documents', (['split_docs', 'self.embeddings'], {}), '(split_docs, self.embeddings)\n', (1391, 1420), False, 'from langchain.vectorstores import FAISS\n'), ((1821, 1851), 'os.path.exists', 'os.path.exists', (['self.file_path'], {}), '(self.file_path)\n', (1835, 1851), False, 'import os\n'), ((858, 884), 'logging.getLogger', 'logging.getLogger', (['"""httpx"""'], {}), "('httpx')\n", (875, 884), False, 'import logging\n'), ((1035, 1046), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1044, 1046), False, 'import os\n'), ((1672, 1698), 'dill.dump', 'dill.dump', (['vector_index', 'f'], {}), '(vector_index, f)\n', (1681, 1698), False, 'import dill\n'), ((1567, 1595), 'ssl.create_default_context', 'ssl.create_default_context', ([], {}), '()\n', (1593, 1595), False, 'import ssl\n'), ((1934, 1946), 'dill.load', 'dill.load', (['f'], {}), '(f)\n', (1943, 1946), False, 'import dill\n')] |
# imports
import os, shutil, json, re
import pathlib
from langchain.document_loaders.unstructured import UnstructuredFileLoader
from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader
from langchain.document_loaders import UnstructuredURLLoader
from langchain.docstore.document import Document
from google.cloud import storage
import base64
import langchain.text_splitter as text_splitter
from dotenv import load_dotenv
import tempfile
import hashlib
from langchain.schema import Document
import logging
from my_llm.pubsub_manager import PubSubManager
import datetime
from .database import setup_database
from .database import delete_row_from_source
from .database import return_sources_last24
load_dotenv()
def contains_url(message_data):
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
if url_pattern.search(message_data):
return True
else:
return False
def extract_urls(text):
url_pattern = re.compile(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
urls = url_pattern.findall(text)
return urls
# utility functions
def convert_to_txt(file_path):
file_dir, file_name = os.path.split(file_path)
file_base, file_ext = os.path.splitext(file_name)
txt_file = os.path.join(file_dir, f"{file_base}.txt")
shutil.copyfile(file_path, txt_file)
return txt_file
def compute_sha1_from_file(file_path):
with open(file_path, "rb") as file:
bytes = file.read()
readable_hash = hashlib.sha1(bytes).hexdigest()
return readable_hash
def compute_sha1_from_content(content):
readable_hash = hashlib.sha1(content).hexdigest()
return readable_hash
def add_file_to_gcs(filename: str, vector_name:str, bucket_name: str=None, metadata:dict=None):
storage_client = storage.Client()
bucket_name = bucket_name if bucket_name is not None else os.getenv('GCS_BUCKET', None)
if bucket_name is None:
raise ValueError("No bucket found to upload to: GCS_BUCKET returned None")
if bucket_name.startswith("gs://"):
bucket_name = bucket_name.removeprefix("gs://")
logging.info(f"Bucket_name: {bucket_name}")
bucket = storage_client.get_bucket(bucket_name)
now = datetime.datetime.now()
year = now.strftime("%Y")
month = now.strftime("%m")
day = now.strftime("%d")
hour = now.strftime("%H")
bucket_filepath = f"{vector_name}/{year}/{month}/{day}/{hour}/{os.path.basename(filename)}"
blob = bucket.blob(bucket_filepath)
the_metadata = {
"vector_name": vector_name,
}
if metadata is not None:
the_metadata.update(metadata)
blob.metadata = the_metadata
#TODO: create cloud storage pubsub subscription?
blob.upload_from_filename(filename)
logging.info(f"File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}")
# create pubsub topic and subscription if necessary to receive notifications from cloud storage
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}")
sub_name = f"pubsub_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_to_store/{vector_name}")
setup_database(vector_name)
return f"gs://{bucket_name}/{bucket_filepath}"
def read_url_to_document(url: str, metadata: dict = None):
loader = UnstructuredURLLoader(urls=[url])
docs = loader.load()
if metadata is not None:
for doc in docs:
doc.metadata.update(metadata)
logging.info(f"UnstructuredURLLoader docs: {docs}")
return docs
def read_file_to_document(gs_file: pathlib.Path, split=False, metadata: dict = None):
#file_sha1 = compute_sha1_from_file(gs_file.name)
try:
#TODO: Use UnstructuredAPIFileLoader instead?
logging.info(f"Sending {gs_file} to UnstructuredAPIFileLoader")
loader = UnstructuredAPIFileLoader(gs_file, mode="elements", api_key="FAKE_API_KEY")
if split:
# only supported for some file types
docs = loader.load_and_split()
else:
docs = loader.load()
logging.info(f"Loaded docs for {gs_file} from UnstructuredAPIFileLoader")
except ValueError as e:
logging.info(f"Error for {gs_file} from UnstructuredAPIFileLoader: {str(e)}")
if "file type is not supported in partition" in str(e):
logging.info("trying locally via .txt conversion")
txt_file = None
try:
# Convert the file to .txt and try again
txt_file = convert_to_txt(gs_file)
loader = UnstructuredFileLoader(txt_file, mode="elements")
if split:
docs = loader.load_and_split()
else:
docs = loader.load()
except Exception as inner_e:
raise Exception("An error occurred during txt conversion or loading.") from inner_e
finally:
# Ensure cleanup happens if txt_file was created
if txt_file is not None and os.path.exists(txt_file):
os.remove(txt_file)
else:
raise
except Exception as e:
logging.error(f"An unexpected error occurred for {gs_file}: {str(e)}")
raise
for doc in docs:
#doc.metadata["file_sha1"] = file_sha1
logging.info(f"doc_content: {doc.page_content[:30]}")
if metadata is not None:
doc.metadata.update(metadata)
logging.info(f"gs_file: {gs_file} turned into {len(docs)} documents")
return docs
def choose_splitter(extension: str, chunk_size: int=1024, chunk_overlap:int=0):
if extension == ".py":
return text_splitter.PythonCodeTextSplitter()
elif extension == ".md":
return text_splitter.MarkdownTextSplitter()
return text_splitter.RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap)
def remove_whitespace(page_content: str):
return page_content.replace("\n", " ").replace("\r", " ").replace("\t", " ").replace(" ", " ")
def chunk_doc_to_docs(documents: list, extension: str = ".md"):
"""Turns a Document object into a list of many Document chunks"""
source_chunks = []
for document in documents:
splitter = choose_splitter(extension)
for chunk in splitter.split_text(remove_whitespace(document.page_content)):
source_chunks.append(Document(page_content=chunk, metadata=document.metadata))
return source_chunks
def data_to_embed_pubsub(data: dict, vector_name:str="documents"):
"""Triggered from a message on a Cloud Pub/Sub topic.
Args:
data JSON
"""
#hash = data['message']['data']
message_data = base64.b64decode(data['message']['data']).decode('utf-8')
attributes = data['message'].get('attributes', {})
messageId = data['message'].get('messageId')
publishTime = data['message'].get('publishTime')
logging.info(f"data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}")
logging.info(f"data_to_embed_pubsub data: {message_data}")
# pubsub from a Google Cloud Storage push topic
if attributes.get("eventType", None) is not None and attributes.get("payloadFormat", None) is not None:
eventType = attributes.get("eventType")
payloadFormat = attributes.get("payloadFormat")
if eventType == "OBJECT_FINALIZE" and payloadFormat == "JSON_API_V1":
logging.info("Got valid event from Google Cloud Storage")
the_object = attributes.get("objectId", None)
if the_object is None:
logging.info("No object found")
return attributes
if the_object.endswith("/"):
logging.info("GCS object is a directory only")
return attributes
# https://cloud.google.com/storage/docs/json_api/v1/objects#resource-representations
message_data = 'gs://' + attributes.get("bucketId") + '/' + the_object
if '/' in the_object:
bucket_vector_name = the_object.split('/')[0]
if len(bucket_vector_name) > 0 and vector_name != bucket_vector_name:
logging.info(f"Overwriting vector_name {vector_name} with {bucket_vector_name}")
vector_name = bucket_vector_name
attributes["attrs"] = f"namespace:{vector_name}"
logging.info(f"Constructed message_data: {message_data}")
metadata = attributes
logging.info(f"Found metadata in pubsub: {metadata}")
chunks = []
if message_data.startswith('"gs://'):
message_data = message_data.strip('\"')
if message_data.startswith("gs://"):
logging.info("Detected gs://")
bucket_name, file_name = message_data[5:].split("/", 1)
# Create a client
storage_client = storage.Client()
# Download the file from GCS
bucket = storage_client.get_bucket(bucket_name)
blob = bucket.blob(file_name)
file_name=pathlib.Path(file_name)
with tempfile.TemporaryDirectory() as temp_dir:
tmp_file_path = os.path.join(temp_dir, file_name.name)
blob.download_to_filename(tmp_file_path)
the_metadata = {
"source": message_data,
"type": "file_load_gcs",
"bucket_name": bucket_name
}
metadata.update(the_metadata)
docs = read_file_to_document(tmp_file_path, metadata=metadata)
chunks = chunk_doc_to_docs(docs, file_name.suffix)
logging.info(f"Split {file_name} into {len(chunks)} chunks")
elif message_data.startswith("http"):
logging.info(f"Got http message: {message_data}")
# just in case, extract the URL again
urls = extract_urls(message_data)
docs = []
for url in urls:
metadata["source"] = url
metadata["url"] = url
metadata["type"] = "url_load"
doc = read_url_to_document(url, metadata=metadata)
docs.extend(doc)
chunks = chunk_doc_to_docs(docs)
logging.info(f"Split {url} into {len(chunks)} chunks")
else:
logging.info("No gs:// detected")
the_json = json.loads(message_data)
the_metadata = the_json.get("metadata", {})
metadata.update(the_metadata)
the_content = the_json.get("page_content", None)
if metadata.get("source", None) is not None:
metadata["source"] = "No source embedded"
if the_content is None:
logging.info("No content found")
return {"metadata": "No content found"}
docs = [Document(page_content=the_content, metadata=metadata)]
publish_if_urls(the_content, vector_name)
chunks = chunk_doc_to_docs(docs)
logging.info(f"Split content into {len(chunks)} chunks")
publish_chunks(chunks, vector_name=vector_name)
logging.info(f"data_to_embed_pubsub published chunks with metadata: {metadata}")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"pubsub_state_messages")
pubsub_manager.publish_message(f"pubsub_chunk - Added doc with metadata: {metadata} to {vector_name}")
return metadata
def publish_if_urls(the_content, vector_name):
"""
Extracts URLs and puts them in a queue for processing on PubSub
"""
if contains_url(the_content):
logging.info("Detected http://")
urls = extract_urls(the_content)
for url in urls:
publish_text(url, vector_name)
def publish_chunks(chunks: list[Document], vector_name: str):
logging.info("Publishing chunks to embed_chunk")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"embed_chunk_{vector_name}")
sub_name = f"pubsub_chunk_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_chunk_to_store/{vector_name}")
setup_database(vector_name)
for chunk in chunks:
# Convert chunk to string, as Pub/Sub messages must be strings or bytes
chunk_str = chunk.json()
pubsub_manager.publish_message(chunk_str)
def publish_text(text:str, vector_name: str):
logging.info(f"Publishing text to app_to_pubsub_{vector_name}")
pubsub_manager = PubSubManager(vector_name, pubsub_topic=f"app_to_pubsub_{vector_name}")
sub_name = f"pubsub_to_store_{vector_name}"
sub_exists = pubsub_manager.subscription_exists(sub_name)
if not sub_exists:
pubsub_manager.create_subscription(sub_name,
push_endpoint=f"/pubsub_chunk_to_store/{vector_name}")
setup_database(vector_name)
pubsub_manager.publish_message(text)
def delete_source(source:str, vector_name:str):
logging.info(f"Deleting source: {source} from {vector_name}")
delete_row_from_source(source, vector_name)
logging.info(f"Deleted source: {source} from {vector_name}")
def return_sources_last24_(vector_name:str):
logging.info(f"Returning sources last 24")
rows = return_sources_last24(vector_name)
return rows
| [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.UnstructuredURLLoader",
"langchain.document_loaders.unstructured.UnstructuredAPIFileLoader",
"langchain.text_splitter.MarkdownTextSplitter",
"langchain.schema.Document",
"langchain.document_loaders.unstructured.UnstructuredFileLoader",
"langchain.text_splitter.PythonCodeTextSplitter"
] | [((719, 732), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (730, 732), False, 'from dotenv import load_dotenv\n'), ((784, 892), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (794, 892), False, 'import os, shutil, json, re\n'), ((1015, 1123), 're.compile', 're.compile', (['"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""'], {}), "(\n 'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\\\\\\\(\\\\\\\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'\n )\n", (1025, 1123), False, 'import os, shutil, json, re\n'), ((1242, 1266), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (1255, 1266), False, 'import os, shutil, json, re\n'), ((1293, 1320), 'os.path.splitext', 'os.path.splitext', (['file_name'], {}), '(file_name)\n', (1309, 1320), False, 'import os, shutil, json, re\n'), ((1336, 1378), 'os.path.join', 'os.path.join', (['file_dir', 'f"""{file_base}.txt"""'], {}), "(file_dir, f'{file_base}.txt')\n", (1348, 1378), False, 'import os, shutil, json, re\n'), ((1383, 1419), 'shutil.copyfile', 'shutil.copyfile', (['file_path', 'txt_file'], {}), '(file_path, txt_file)\n', (1398, 1419), False, 'import os, shutil, json, re\n'), ((1869, 1885), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (1883, 1885), False, 'from google.cloud import storage\n'), ((2200, 2243), 'logging.info', 'logging.info', (['f"""Bucket_name: {bucket_name}"""'], {}), "(f'Bucket_name: {bucket_name}')\n", (2212, 2243), False, 'import logging\n'), ((2306, 2329), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2327, 2329), False, 'import datetime\n'), ((2853, 2939), 'logging.info', 'logging.info', (['f"""File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}"""'], {}), "(\n f'File {filename} uploaded to gs://{bucket_name}/{bucket_filepath}')\n", (2865, 2939), False, 'import logging\n'), ((3058, 3129), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (3071, 3129), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((3584, 3617), 'langchain.document_loaders.UnstructuredURLLoader', 'UnstructuredURLLoader', ([], {'urls': '[url]'}), '(urls=[url])\n', (3605, 3617), False, 'from langchain.document_loaders import UnstructuredURLLoader\n'), ((3748, 3799), 'logging.info', 'logging.info', (['f"""UnstructuredURLLoader docs: {docs}"""'], {}), "(f'UnstructuredURLLoader docs: {docs}')\n", (3760, 3799), False, 'import logging\n'), ((6112, 6212), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'text_splitter.RecursiveCharacterTextSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size,\n chunk_overlap=chunk_overlap)\n', (6156, 6212), True, 'import langchain.text_splitter as text_splitter\n'), ((7229, 7342), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}"""'], {}), "(\n f'data_to_embed_pubsub was triggered by messageId {messageId} published at {publishTime}'\n )\n", (7241, 7342), False, 'import logging\n'), ((7337, 7395), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub data: {message_data}"""'], {}), "(f'data_to_embed_pubsub data: {message_data}')\n", (7349, 7395), False, 'import logging\n'), ((8835, 8888), 'logging.info', 'logging.info', (['f"""Found metadata in pubsub: {metadata}"""'], {}), "(f'Found metadata in pubsub: {metadata}')\n", (8847, 8888), False, 'import logging\n'), ((11324, 11409), 'logging.info', 'logging.info', (['f"""data_to_embed_pubsub published chunks with metadata: {metadata}"""'], {}), "(f'data_to_embed_pubsub published chunks with metadata: {metadata}'\n )\n", (11336, 11409), False, 'import logging\n'), ((11426, 11491), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""pubsub_state_messages"""'}), "(vector_name, pubsub_topic=f'pubsub_state_messages')\n", (11439, 11491), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12019, 12067), 'logging.info', 'logging.info', (['"""Publishing chunks to embed_chunk"""'], {}), "('Publishing chunks to embed_chunk')\n", (12031, 12067), False, 'import logging\n'), ((12094, 12163), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""embed_chunk_{vector_name}"""'}), "(vector_name, pubsub_topic=f'embed_chunk_{vector_name}')\n", (12107, 12163), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((12749, 12812), 'logging.info', 'logging.info', (['f"""Publishing text to app_to_pubsub_{vector_name}"""'], {}), "(f'Publishing text to app_to_pubsub_{vector_name}')\n", (12761, 12812), False, 'import logging\n'), ((12834, 12905), 'my_llm.pubsub_manager.PubSubManager', 'PubSubManager', (['vector_name'], {'pubsub_topic': 'f"""app_to_pubsub_{vector_name}"""'}), "(vector_name, pubsub_topic=f'app_to_pubsub_{vector_name}')\n", (12847, 12905), False, 'from my_llm.pubsub_manager import PubSubManager\n'), ((13331, 13392), 'logging.info', 'logging.info', (['f"""Deleting source: {source} from {vector_name}"""'], {}), "(f'Deleting source: {source} from {vector_name}')\n", (13343, 13392), False, 'import logging\n'), ((13445, 13505), 'logging.info', 'logging.info', (['f"""Deleted source: {source} from {vector_name}"""'], {}), "(f'Deleted source: {source} from {vector_name}')\n", (13457, 13505), False, 'import logging\n'), ((13557, 13599), 'logging.info', 'logging.info', (['f"""Returning sources last 24"""'], {}), "(f'Returning sources last 24')\n", (13569, 13599), False, 'import logging\n'), ((1949, 1978), 'os.getenv', 'os.getenv', (['"""GCS_BUCKET"""', 'None'], {}), "('GCS_BUCKET', None)\n", (1958, 1978), False, 'import os, shutil, json, re\n'), ((4044, 4107), 'logging.info', 'logging.info', (['f"""Sending {gs_file} to UnstructuredAPIFileLoader"""'], {}), "(f'Sending {gs_file} to UnstructuredAPIFileLoader')\n", (4056, 4107), False, 'import logging\n'), ((4125, 4200), 'langchain.document_loaders.unstructured.UnstructuredAPIFileLoader', 'UnstructuredAPIFileLoader', (['gs_file'], {'mode': '"""elements"""', 'api_key': '"""FAKE_API_KEY"""'}), "(gs_file, mode='elements', api_key='FAKE_API_KEY')\n", (4150, 4200), False, 'from langchain.document_loaders.unstructured import UnstructuredAPIFileLoader\n'), ((5632, 5685), 'logging.info', 'logging.info', (['f"""doc_content: {doc.page_content[:30]}"""'], {}), "(f'doc_content: {doc.page_content[:30]}')\n", (5644, 5685), False, 'import logging\n'), ((5976, 6014), 'langchain.text_splitter.PythonCodeTextSplitter', 'text_splitter.PythonCodeTextSplitter', ([], {}), '()\n', (6012, 6014), True, 'import langchain.text_splitter as text_splitter\n'), ((9047, 9077), 'logging.info', 'logging.info', (['"""Detected gs://"""'], {}), "('Detected gs://')\n", (9059, 9077), False, 'import logging\n'), ((9194, 9210), 'google.cloud.storage.Client', 'storage.Client', ([], {}), '()\n', (9208, 9210), False, 'from google.cloud import storage\n'), ((9362, 9385), 'pathlib.Path', 'pathlib.Path', (['file_name'], {}), '(file_name)\n', (9374, 9385), False, 'import pathlib\n'), ((11795, 11827), 'logging.info', 'logging.info', (['"""Detected http://"""'], {}), "('Detected http://')\n", (11807, 11827), False, 'import logging\n'), ((1691, 1712), 'hashlib.sha1', 'hashlib.sha1', (['content'], {}), '(content)\n', (1703, 1712), False, 'import hashlib\n'), ((2518, 2544), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (2534, 2544), False, 'import os, shutil, json, re\n'), ((4379, 4452), 'logging.info', 'logging.info', (['f"""Loaded docs for {gs_file} from UnstructuredAPIFileLoader"""'], {}), "(f'Loaded docs for {gs_file} from UnstructuredAPIFileLoader')\n", (4391, 4452), False, 'import logging\n'), ((6059, 6095), 'langchain.text_splitter.MarkdownTextSplitter', 'text_splitter.MarkdownTextSplitter', ([], {}), '()\n', (6093, 6095), True, 'import langchain.text_splitter as text_splitter\n'), ((7009, 7050), 'base64.b64decode', 'base64.b64decode', (["data['message']['data']"], {}), "(data['message']['data'])\n", (7025, 7050), False, 'import base64\n'), ((7751, 7808), 'logging.info', 'logging.info', (['"""Got valid event from Google Cloud Storage"""'], {}), "('Got valid event from Google Cloud Storage')\n", (7763, 7808), False, 'import logging\n'), ((8741, 8798), 'logging.info', 'logging.info', (['f"""Constructed message_data: {message_data}"""'], {}), "(f'Constructed message_data: {message_data}')\n", (8753, 8798), False, 'import logging\n'), ((9400, 9429), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (9427, 9429), False, 'import tempfile\n'), ((9471, 9509), 'os.path.join', 'os.path.join', (['temp_dir', 'file_name.name'], {}), '(temp_dir, file_name.name)\n', (9483, 9509), False, 'import os, shutil, json, re\n'), ((10036, 10085), 'logging.info', 'logging.info', (['f"""Got http message: {message_data}"""'], {}), "(f'Got http message: {message_data}')\n", (10048, 10085), False, 'import logging\n'), ((10548, 10581), 'logging.info', 'logging.info', (['"""No gs:// detected"""'], {}), "('No gs:// detected')\n", (10560, 10581), False, 'import logging\n'), ((10610, 10634), 'json.loads', 'json.loads', (['message_data'], {}), '(message_data)\n', (10620, 10634), False, 'import os, shutil, json, re\n'), ((1573, 1592), 'hashlib.sha1', 'hashlib.sha1', (['bytes'], {}), '(bytes)\n', (1585, 1592), False, 'import hashlib\n'), ((4643, 4693), 'logging.info', 'logging.info', (['"""trying locally via .txt conversion"""'], {}), "('trying locally via .txt conversion')\n", (4655, 4693), False, 'import logging\n'), ((6705, 6761), 'langchain.schema.Document', 'Document', ([], {'page_content': 'chunk', 'metadata': 'document.metadata'}), '(page_content=chunk, metadata=document.metadata)\n', (6713, 6761), False, 'from langchain.schema import Document\n'), ((7919, 7950), 'logging.info', 'logging.info', (['"""No object found"""'], {}), "('No object found')\n", (7931, 7950), False, 'import logging\n'), ((8055, 8101), 'logging.info', 'logging.info', (['"""GCS object is a directory only"""'], {}), "('GCS object is a directory only')\n", (8067, 8101), False, 'import logging\n'), ((10935, 10967), 'logging.info', 'logging.info', (['"""No content found"""'], {}), "('No content found')\n", (10947, 10967), False, 'import logging\n'), ((11045, 11098), 'langchain.schema.Document', 'Document', ([], {'page_content': 'the_content', 'metadata': 'metadata'}), '(page_content=the_content, metadata=metadata)\n', (11053, 11098), False, 'from langchain.schema import Document\n'), ((4872, 4921), 'langchain.document_loaders.unstructured.UnstructuredFileLoader', 'UnstructuredFileLoader', (['txt_file'], {'mode': '"""elements"""'}), "(txt_file, mode='elements')\n", (4894, 4921), False, 'from langchain.document_loaders.unstructured import UnstructuredFileLoader\n'), ((8533, 8618), 'logging.info', 'logging.info', (['f"""Overwriting vector_name {vector_name} with {bucket_vector_name}"""'], {}), "(f'Overwriting vector_name {vector_name} with {bucket_vector_name}'\n )\n", (8545, 8618), False, 'import logging\n'), ((5335, 5359), 'os.path.exists', 'os.path.exists', (['txt_file'], {}), '(txt_file)\n', (5349, 5359), False, 'import os, shutil, json, re\n'), ((5381, 5400), 'os.remove', 'os.remove', (['txt_file'], {}), '(txt_file)\n', (5390, 5400), False, 'import os, shutil, json, re\n')] |
from langchain.llms import LlamaCpp
from langchain.chat_models import ChatOpenAI
from langchain.chains.llm import LLMChain
from langchain.prompts import PromptTemplate
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.cache import SQLiteCache
import langchain
import itertools
from utils import setup_logger
from dotenv import load_dotenv
import os
# Load the .env file
load_dotenv()
OPEN_AI_KEY = os.getenv("OPEN_AI_KEY")
os.environ["OPENAI_API_KEY"] = OPEN_AI_KEY
logger = setup_logger('contr_detector_logger', 'app.log')
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
callback_manager = CallbackManager([StreamingStdOutCallbackHandler()])
llm_llama = LlamaCpp(
# model_path="llama-2-7b.Q4_K_M.gguf",
model_path="models/OpenOrca-Platypus2-13B-Q4_K_M.gguf",
temperature=0,
max_tokens=1000,
top_p=3,
callback_manager=callback_manager,
verbose=True, # Verbose is required to pass to the callback manager
)
# TODO: move the prompt to a file to be configured
prompt_template = """
Statement 1: {doc1}
Statement 2: {doc2}
Question: Are these two statements contradictory? Answer "yes" or "no".
"""
prompt = PromptTemplate.from_template(prompt_template)
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_key=OPEN_AI_KEY)
llm_chain = LLMChain(llm=llm, prompt=prompt)
def detect_contradictions(documents, metadatas, model_type: str):
contrs = []
for doc1, doc2 in itertools.combinations(zip(documents, metadatas), 2):
# print(doc1)
doc1, meta1 = doc1
doc2, meta2 = doc2
if model_type == "openAI":
llm = llm_chain
result = llm_chain({"doc1": doc1, "doc2": doc2}, return_only_outputs=True)
print(result)
if "yes" in result['text'].lower():
logger.info(f"Contradiction: {doc1} {doc2}")
print(f"Contradiction: {doc1} {doc2}")
contrs.append(((doc1, meta1), (doc2, meta2)))
# break # TODO: remove
else:
logger.info(f"No contradiction: {doc1} {doc2}")
print(f"No contradiction: {doc1} {doc2}")
else:
llm = llm_llama
prompt = f"""
Statement 1: {doc1}
Statement 2: {doc2}
Question: Are these two statements contradictory? Answer "yes" or "no".
"""
if "yes" in llm(prompt).lower():
logger.info(f"Contradiction: {doc1} {doc2}")
print(f"Contradiction: {doc1} {doc2}")
contrs.append(((doc1, meta1), (doc2, meta2)))
else:
logger.info(f"No contradiction: {doc1} {doc2}")
print(f"No contradiction: {doc1} {doc2}")
print("Done with checking for contradictions")
print(contrs)
return contrs | [
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.chains.llm.LLMChain",
"langchain.chat_models.ChatOpenAI",
"langchain.llms.LlamaCpp",
"langchain.cache.SQLiteCache",
"langchain.prompts.PromptTemplate.from_template"
] | [((476, 489), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (487, 489), False, 'from dotenv import load_dotenv\n'), ((505, 529), 'os.getenv', 'os.getenv', (['"""OPEN_AI_KEY"""'], {}), "('OPEN_AI_KEY')\n", (514, 529), False, 'import os\n'), ((584, 632), 'utils.setup_logger', 'setup_logger', (['"""contr_detector_logger"""', '"""app.log"""'], {}), "('contr_detector_logger', 'app.log')\n", (596, 632), False, 'from utils import setup_logger\n'), ((655, 697), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (666, 697), False, 'from langchain.cache import SQLiteCache\n'), ((783, 946), 'langchain.llms.LlamaCpp', 'LlamaCpp', ([], {'model_path': '"""models/OpenOrca-Platypus2-13B-Q4_K_M.gguf"""', 'temperature': '(0)', 'max_tokens': '(1000)', 'top_p': '(3)', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), "(model_path='models/OpenOrca-Platypus2-13B-Q4_K_M.gguf',\n temperature=0, max_tokens=1000, top_p=3, callback_manager=\n callback_manager, verbose=True)\n", (791, 946), False, 'from langchain.llms import LlamaCpp\n'), ((1340, 1385), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (1368, 1385), False, 'from langchain.prompts import PromptTemplate\n'), ((1393, 1479), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_key': 'OPEN_AI_KEY'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_key=\n OPEN_AI_KEY)\n", (1403, 1479), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1487, 1519), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1495, 1519), False, 'from langchain.chains.llm import LLMChain\n'), ((736, 768), 'langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler', 'StreamingStdOutCallbackHandler', ([], {}), '()\n', (766, 768), False, 'from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler\n')] |
import streamlit as st
import torch
from transformers import (
AutoTokenizer, AutoModelForCausalLM,
BitsAndBytesConfig,
TextStreamer,
)
import whisper
import os
############ config ############
# general config
whisper_model_names=["tiny", "base", "small", "medium", "large"]
data_root_path = os.path.join('.','data')
file_types = ['pdf','png','jpg','wav']
for filetype in file_types:
if not os.path.exists(os.path.join(data_root_path,filetype)):
os.makedirs(os.path.join(data_root_path,filetype))
# streamlit config
## Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = [{"role": "assistant", "content": "Type a message to start a conversation"}]
############ User Interface ############
# Title
st.title('LLAMA RAG Demo')
st.divider()
st.title('Model name and auth token')
# Configs
model_name = st.text_input('Enter your Hugging Face model name', value="meta-llama/Llama-2-7b-chat-hf")
auth_token = st.text_input('Enter your Hugging Face auth token', value="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")
system_prompt = st.text_area('Enter your system prompt', value="You are a helpful, respectful and honest assistant.")
whisper_model_name = st.selectbox('Select your whisper model',options=whisper_model_names)
use_cuda = st.checkbox('Use CUDA', value=True)
isfile = False
## File uploader
from streamlit import file_uploader
uploadedfile = file_uploader("Choose a \"PDF\" file (now support only pdf)")
if uploadedfile is not None:
isfile = True
with open(os.path.join(data_root_path,'pdf',uploadedfile.name),"wb") as f:
f.write(uploadedfile.getbuffer())
st.success("File uploaded successfully : {}".format(uploadedfile.name))
st.divider()
############ function ############
def clear_chat_history():
st.session_state.messages = [{"role": "assistant", "content": "Type a message to start a conversation"}]
for file in os.listdir(os.path.join(data_root_path,'pdf')):
os.remove(os.path.join(data_root_path,'pdf',file))
st.button('Clear Chat History', on_click=clear_chat_history)
# Load Tokenizer and Model
@st.cache_resource
def get_tokenizer_model():
# Create tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir='./model/', token=auth_token)
# Create model
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type='nf4',
bnb_4bit_compute_dtype=torch.float16,
)
model = AutoModelForCausalLM.from_pretrained(model_name,
cache_dir='./model/', token=auth_token,
quantization_config=quantization_config,
# rope_scaling={"type":"dynamic", "factor":2},
max_memory=f'{int(torch.cuda.mem_get_info()[0]/1024**3)-2}GB'
)
return tokenizer, model
# RAG engine
def get_rag_queryengine(_tokenizer, model, system_prompt):
from llama_index.prompts.prompts import SimpleInputPrompt
from llama_index.llms import HuggingFaceLLM
system_prompt_ = f"[INST] <<SYS>>\n{system_prompt}\n<</SYS>>\n\n"
query_wrapper_prompt = SimpleInputPrompt("{query_str} [/INST]")
llm = HuggingFaceLLM(context_window=4096,
max_new_tokens=256,
system_prompt=system_prompt_,
query_wrapper_prompt=query_wrapper_prompt,
model=model,
tokenizer=_tokenizer
)
# Create embeddings
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
embeddings=LangchainEmbedding(
HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
)
from llama_index import ServiceContext
from llama_index import set_global_service_context
service_context = ServiceContext.from_defaults(
chunk_size=1024,
llm=llm,
embed_model=embeddings
)
set_global_service_context(service_context)
from llama_index import VectorStoreIndex, download_loader
PyMuPDFReader = download_loader("PyMuPDFReader")
loader = PyMuPDFReader()
for file in os.listdir(os.path.join(data_root_path,'pdf')):
# !!! This is not a good way to load data. I will fix this later
# this makes the only last file in the folder to be loaded
documents = loader.load_data(file_path=os.path.join(data_root_path,'pdf',file), metadata=True)
index = VectorStoreIndex.from_documents(documents)
query_engine = index.as_query_engine()
return query_engine
# whisper
def whisper_stt(*,model, device, audio_path)->str:
# load model
# # model : model name of whisper. default is base
# # devie : argument from args. default is cpu
audio_model = whisper.load_model(model,device)
# stt - audio.wav
result = audio_model.transcribe(audio_path)
# return result : str list
return result["text"]
############ main ############
# Load Tokenizer and Model, RAG engine
tokenizer, model = get_tokenizer_model()
if isfile:
engine = get_rag_queryengine(tokenizer, model, system_prompt)
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
prompt = st.chat_input('User: ')
if prompt:
# update(append) chat history
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.write(prompt)
# Here... text streamer does not work as well as I intended with streamlit
# I will try to fix this later
if st.session_state.messages[-1]["role"] == "user":
if isfile:
with st.chat_message("assistant"):
# model inference
output_text = engine.query(prompt)
placeholder = st.empty()
placeholder.markdown(output_text)
st.session_state.messages.append({"role": "assistant", "content": output_text})
else:
with st.chat_message("assistant"):
# model inference
output_text = "Please upload a file first"
placeholder = st.empty()
placeholder.markdown(output_text)
st.session_state.messages.append({"role": "assistant", "content": output_text})
| [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((306, 331), 'os.path.join', 'os.path.join', (['"""."""', '"""data"""'], {}), "('.', 'data')\n", (318, 331), False, 'import os\n'), ((772, 798), 'streamlit.title', 'st.title', (['"""LLAMA RAG Demo"""'], {}), "('LLAMA RAG Demo')\n", (780, 798), True, 'import streamlit as st\n'), ((799, 811), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (809, 811), True, 'import streamlit as st\n'), ((813, 850), 'streamlit.title', 'st.title', (['"""Model name and auth token"""'], {}), "('Model name and auth token')\n", (821, 850), True, 'import streamlit as st\n'), ((874, 969), 'streamlit.text_input', 'st.text_input', (['"""Enter your Hugging Face model name"""'], {'value': '"""meta-llama/Llama-2-7b-chat-hf"""'}), "('Enter your Hugging Face model name', value=\n 'meta-llama/Llama-2-7b-chat-hf')\n", (887, 969), True, 'import streamlit as st\n'), ((978, 1081), 'streamlit.text_input', 'st.text_input', (['"""Enter your Hugging Face auth token"""'], {'value': '"""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"""'}), "('Enter your Hugging Face auth token', value=\n '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')\n", (991, 1081), True, 'import streamlit as st\n'), ((1093, 1199), 'streamlit.text_area', 'st.text_area', (['"""Enter your system prompt"""'], {'value': '"""You are a helpful, respectful and honest assistant."""'}), "('Enter your system prompt', value=\n 'You are a helpful, respectful and honest assistant.')\n", (1105, 1199), True, 'import streamlit as st\n'), ((1216, 1286), 'streamlit.selectbox', 'st.selectbox', (['"""Select your whisper model"""'], {'options': 'whisper_model_names'}), "('Select your whisper model', options=whisper_model_names)\n", (1228, 1286), True, 'import streamlit as st\n'), ((1297, 1332), 'streamlit.checkbox', 'st.checkbox', (['"""Use CUDA"""'], {'value': '(True)'}), "('Use CUDA', value=True)\n", (1308, 1332), True, 'import streamlit as st\n'), ((1416, 1475), 'streamlit.file_uploader', 'file_uploader', (['"""Choose a "PDF" file (now support only pdf)"""'], {}), '(\'Choose a "PDF" file (now support only pdf)\')\n', (1429, 1475), False, 'from streamlit import file_uploader\n'), ((1722, 1734), 'streamlit.divider', 'st.divider', ([], {}), '()\n', (1732, 1734), True, 'import streamlit as st\n'), ((2038, 2098), 'streamlit.button', 'st.button', (['"""Clear Chat History"""'], {'on_click': 'clear_chat_history'}), "('Clear Chat History', on_click=clear_chat_history)\n", (2047, 2098), True, 'import streamlit as st\n'), ((5259, 5282), 'streamlit.chat_input', 'st.chat_input', (['"""User: """'], {}), "('User: ')\n", (5272, 5282), True, 'import streamlit as st\n'), ((2212, 2298), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name'], {'cache_dir': '"""./model/"""', 'token': 'auth_token'}), "(model_name, cache_dir='./model/', token=\n auth_token)\n", (2241, 2298), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer\n'), ((2339, 2445), 'transformers.BitsAndBytesConfig', 'BitsAndBytesConfig', ([], {'load_in_4bit': '(True)', 'bnb_4bit_quant_type': '"""nf4"""', 'bnb_4bit_compute_dtype': 'torch.float16'}), "(load_in_4bit=True, bnb_4bit_quant_type='nf4',\n bnb_4bit_compute_dtype=torch.float16)\n", (2357, 2445), False, 'from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, TextStreamer\n'), ((3074, 3114), 'llama_index.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""{query_str} [/INST]"""'], {}), "('{query_str} [/INST]')\n", (3091, 3114), False, 'from llama_index.prompts.prompts import SimpleInputPrompt\n'), ((3125, 3297), 'llama_index.llms.HuggingFaceLLM', 'HuggingFaceLLM', ([], {'context_window': '(4096)', 'max_new_tokens': '(256)', 'system_prompt': 'system_prompt_', 'query_wrapper_prompt': 'query_wrapper_prompt', 'model': 'model', 'tokenizer': '_tokenizer'}), '(context_window=4096, max_new_tokens=256, system_prompt=\n system_prompt_, query_wrapper_prompt=query_wrapper_prompt, model=model,\n tokenizer=_tokenizer)\n', (3139, 3297), False, 'from llama_index.llms import HuggingFaceLLM\n'), ((3786, 3864), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(1024)', 'llm': 'llm', 'embed_model': 'embeddings'}), '(chunk_size=1024, llm=llm, embed_model=embeddings)\n', (3814, 3864), False, 'from llama_index import ServiceContext\n'), ((3899, 3942), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (3925, 3942), False, 'from llama_index import set_global_service_context\n'), ((4026, 4058), 'llama_index.download_loader', 'download_loader', (['"""PyMuPDFReader"""'], {}), "('PyMuPDFReader')\n", (4041, 4058), False, 'from llama_index import VectorStoreIndex, download_loader\n'), ((4407, 4449), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (4438, 4449), False, 'from llama_index import VectorStoreIndex, download_loader\n'), ((4720, 4753), 'whisper.load_model', 'whisper.load_model', (['model', 'device'], {}), '(model, device)\n', (4738, 4753), False, 'import whisper\n'), ((5332, 5401), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (5364, 5401), True, 'import streamlit as st\n'), ((1937, 1972), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""'], {}), "(data_root_path, 'pdf')\n", (1949, 1972), False, 'import os\n'), ((3607, 3659), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (3628, 3659), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((4115, 4150), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""'], {}), "(data_root_path, 'pdf')\n", (4127, 4150), False, 'import os\n'), ((5175, 5207), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5190, 5207), True, 'import streamlit as st\n'), ((5217, 5248), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (5228, 5248), True, 'import streamlit as st\n'), ((5411, 5434), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5426, 5434), True, 'import streamlit as st\n'), ((5444, 5460), 'streamlit.write', 'st.write', (['prompt'], {}), '(prompt)\n', (5452, 5460), True, 'import streamlit as st\n'), ((5847, 5926), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': output_text}"], {}), "({'role': 'assistant', 'content': output_text})\n", (5879, 5926), True, 'import streamlit as st\n'), ((6157, 6236), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': output_text}"], {}), "({'role': 'assistant', 'content': output_text})\n", (6189, 6236), True, 'import streamlit as st\n'), ((424, 462), 'os.path.join', 'os.path.join', (['data_root_path', 'filetype'], {}), '(data_root_path, filetype)\n', (436, 462), False, 'import os\n'), ((484, 522), 'os.path.join', 'os.path.join', (['data_root_path', 'filetype'], {}), '(data_root_path, filetype)\n', (496, 522), False, 'import os\n'), ((1539, 1593), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'uploadedfile.name'], {}), "(data_root_path, 'pdf', uploadedfile.name)\n", (1551, 1593), False, 'import os\n'), ((1992, 2033), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'file'], {}), "(data_root_path, 'pdf', file)\n", (2004, 2033), False, 'import os\n'), ((5648, 5676), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5663, 5676), True, 'import streamlit as st\n'), ((5782, 5792), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (5790, 5792), True, 'import streamlit as st\n'), ((5951, 5979), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (5966, 5979), True, 'import streamlit as st\n'), ((6092, 6102), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6100, 6102), True, 'import streamlit as st\n'), ((4339, 4380), 'os.path.join', 'os.path.join', (['data_root_path', '"""pdf"""', 'file'], {}), "(data_root_path, 'pdf', file)\n", (4351, 4380), False, 'import os\n'), ((2716, 2741), 'torch.cuda.mem_get_info', 'torch.cuda.mem_get_info', ([], {}), '()\n', (2739, 2741), False, 'import torch\n')] |
import streamlit as st
import langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain import OpenAI, VectorDBQA
from langchain.chains import RetrievalQAWithSourcesChain
import PyPDF2
#This function will go through pdf and extract and return list of page texts.
def read_and_textify(files):
text_list = []
sources_list = []
for file in files:
pdfReader = PyPDF2.PdfReader(file)
#print("Page Number:", len(pdfReader.pages))
for i in range(len(pdfReader.pages)):
pageObj = pdfReader.pages[i]
text = pageObj.extract_text()
pageObj.clear()
text_list.append(text)
sources_list.append(file.name + "_page_"+str(i))
return [text_list,sources_list]
st.set_page_config(layout="centered", page_title="Multidoc_QnA")
st.header("Multidoc_QnA")
st.write("---")
#file uploader
uploaded_files = st.file_uploader("Upload documents",accept_multiple_files=True, type=["txt","pdf"])
st.write("---")
if uploaded_files is None:
st.info(f"""Upload files to analyse""")
elif uploaded_files:
st.write(str(len(uploaded_files)) + " document(s) loaded..")
textify_output = read_and_textify(uploaded_files)
documents = textify_output[0]
sources = textify_output[1]
#extract embeddings
embeddings = OpenAIEmbeddings(openai_api_key = st.secrets["openai_api_key"])
#vstore with metadata. Here we will store page numbers.
vStore = Chroma.from_texts(documents, embeddings, metadatas=[{"source": s} for s in sources])
#deciding model
model_name = "gpt-3.5-turbo"
# model_name = "gpt-4"
retriever = vStore.as_retriever()
retriever.search_kwargs = {'k':2}
#initiate model
llm = OpenAI(model_name=model_name, openai_api_key = st.secrets["openai_api_key"], streaming=True)
model = RetrievalQAWithSourcesChain.from_chain_type(llm=llm, chain_type="stuff", retriever=retriever)
st.header("Ask your data")
user_q = st.text_area("Enter your questions here")
if st.button("Get Response"):
try:
with st.spinner("Model is working on it..."):
result = model({"question":user_q}, return_only_outputs=True)
st.subheader('Your response:')
st.write(result['answer'])
st.subheader('Source pages:')
st.write(result['sources'])
except Exception as e:
st.error(f"An error occurred: {e}")
st.error('Oops, the GPT response resulted in an error :( Please try again with a different question.')
| [
"langchain.chains.RetrievalQAWithSourcesChain.from_chain_type",
"langchain.vectorstores.Chroma.from_texts",
"langchain.OpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((868, 932), 'streamlit.set_page_config', 'st.set_page_config', ([], {'layout': '"""centered"""', 'page_title': '"""Multidoc_QnA"""'}), "(layout='centered', page_title='Multidoc_QnA')\n", (886, 932), True, 'import streamlit as st\n'), ((933, 958), 'streamlit.header', 'st.header', (['"""Multidoc_QnA"""'], {}), "('Multidoc_QnA')\n", (942, 958), True, 'import streamlit as st\n'), ((959, 974), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (967, 974), True, 'import streamlit as st\n'), ((1010, 1100), 'streamlit.file_uploader', 'st.file_uploader', (['"""Upload documents"""'], {'accept_multiple_files': '(True)', 'type': "['txt', 'pdf']"}), "('Upload documents', accept_multiple_files=True, type=[\n 'txt', 'pdf'])\n", (1026, 1100), True, 'import streamlit as st\n'), ((1094, 1109), 'streamlit.write', 'st.write', (['"""---"""'], {}), "('---')\n", (1102, 1109), True, 'import streamlit as st\n'), ((1140, 1175), 'streamlit.info', 'st.info', (['f"""Upload files to analyse"""'], {}), "(f'Upload files to analyse')\n", (1147, 1175), True, 'import streamlit as st\n'), ((510, 532), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['file'], {}), '(file)\n', (526, 532), False, 'import PyPDF2\n'), ((1424, 1485), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'openai_api_key': "st.secrets['openai_api_key']"}), "(openai_api_key=st.secrets['openai_api_key'])\n", (1440, 1485), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((1557, 1645), 'langchain.vectorstores.Chroma.from_texts', 'Chroma.from_texts', (['documents', 'embeddings'], {'metadatas': "[{'source': s} for s in sources]"}), "(documents, embeddings, metadatas=[{'source': s} for s in\n sources])\n", (1574, 1645), False, 'from langchain.vectorstores import Chroma\n'), ((1816, 1910), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': 'model_name', 'openai_api_key': "st.secrets['openai_api_key']", 'streaming': '(True)'}), "(model_name=model_name, openai_api_key=st.secrets['openai_api_key'],\n streaming=True)\n", (1822, 1910), False, 'from langchain import OpenAI, VectorDBQA\n'), ((1919, 2016), 'langchain.chains.RetrievalQAWithSourcesChain.from_chain_type', 'RetrievalQAWithSourcesChain.from_chain_type', ([], {'llm': 'llm', 'chain_type': '"""stuff"""', 'retriever': 'retriever'}), "(llm=llm, chain_type='stuff',\n retriever=retriever)\n", (1962, 2016), False, 'from langchain.chains import RetrievalQAWithSourcesChain\n'), ((2018, 2044), 'streamlit.header', 'st.header', (['"""Ask your data"""'], {}), "('Ask your data')\n", (2027, 2044), True, 'import streamlit as st\n'), ((2056, 2097), 'streamlit.text_area', 'st.text_area', (['"""Enter your questions here"""'], {}), "('Enter your questions here')\n", (2068, 2097), True, 'import streamlit as st\n'), ((2106, 2131), 'streamlit.button', 'st.button', (['"""Get Response"""'], {}), "('Get Response')\n", (2115, 2131), True, 'import streamlit as st\n'), ((2153, 2192), 'streamlit.spinner', 'st.spinner', (['"""Model is working on it..."""'], {}), "('Model is working on it...')\n", (2163, 2192), True, 'import streamlit as st\n'), ((2272, 2302), 'streamlit.subheader', 'st.subheader', (['"""Your response:"""'], {}), "('Your response:')\n", (2284, 2302), True, 'import streamlit as st\n'), ((2311, 2337), 'streamlit.write', 'st.write', (["result['answer']"], {}), "(result['answer'])\n", (2319, 2337), True, 'import streamlit as st\n'), ((2346, 2375), 'streamlit.subheader', 'st.subheader', (['"""Source pages:"""'], {}), "('Source pages:')\n", (2358, 2375), True, 'import streamlit as st\n'), ((2384, 2411), 'streamlit.write', 'st.write', (["result['sources']"], {}), "(result['sources'])\n", (2392, 2411), True, 'import streamlit as st\n'), ((2445, 2480), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (2453, 2480), True, 'import streamlit as st\n'), ((2487, 2599), 'streamlit.error', 'st.error', (['"""Oops, the GPT response resulted in an error :( Please try again with a different question."""'], {}), "(\n 'Oops, the GPT response resulted in an error :( Please try again with a different question.'\n )\n", (2495, 2599), True, 'import streamlit as st\n')] |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Type,
TypeVar,
Union,
cast,
)
from uuid import UUID, uuid4
import langchain
from langchain.callbacks.base import (
BaseCallbackHandler,
BaseCallbackManager,
ChainManagerMixin,
LLMManagerMixin,
RunManagerMixin,
ToolManagerMixin,
)
from langchain.callbacks.openai_info import OpenAICallbackHandler
from langchain.callbacks.stdout import StdOutCallbackHandler
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1
from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler
from langchain.callbacks.tracers.wandb import WandbTracer
from langchain.schema import (
AgentAction,
AgentFinish,
BaseMessage,
LLMResult,
get_buffer_string,
)
logger = logging.getLogger(__name__)
Callbacks = Optional[Union[List[BaseCallbackHandler], BaseCallbackManager]]
openai_callback_var: ContextVar[Optional[OpenAICallbackHandler]] = ContextVar(
"openai_callback", default=None
)
tracing_callback_var: ContextVar[
Optional[LangChainTracerV1]
] = ContextVar( # noqa: E501
"tracing_callback", default=None
)
wandb_tracing_callback_var: ContextVar[
Optional[WandbTracer]
] = ContextVar( # noqa: E501
"tracing_wandb_callback", default=None
)
tracing_v2_callback_var: ContextVar[
Optional[LangChainTracer]
] = ContextVar( # noqa: E501
"tracing_callback_v2", default=None
)
def _get_debug() -> bool:
return langchain.debug
@contextmanager
def get_openai_callback() -> Generator[OpenAICallbackHandler, None, None]:
"""Get OpenAI callback handler in a context manager."""
cb = OpenAICallbackHandler()
openai_callback_var.set(cb)
yield cb
openai_callback_var.set(None)
@contextmanager
def tracing_enabled(
session_name: str = "default",
) -> Generator[TracerSessionV1, None, None]:
"""Get Tracer in a context manager."""
cb = LangChainTracerV1()
session = cast(TracerSessionV1, cb.load_session(session_name))
tracing_callback_var.set(cb)
yield session
tracing_callback_var.set(None)
@contextmanager
def wandb_tracing_enabled(
session_name: str = "default",
) -> Generator[None, None, None]:
"""Get WandbTracer in a context manager."""
cb = WandbTracer()
wandb_tracing_callback_var.set(cb)
yield None
wandb_tracing_callback_var.set(None)
@contextmanager
def tracing_v2_enabled(
session_name: Optional[str] = None,
*,
example_id: Optional[Union[str, UUID]] = None,
) -> Generator[None, None, None]:
"""Get the experimental tracer handler in a context manager."""
# Issue a warning that this is experimental
warnings.warn(
"The tracing v2 API is in development. "
"This is not yet stable and may change in the future."
)
if isinstance(example_id, str):
example_id = UUID(example_id)
cb = LangChainTracer(
example_id=example_id,
session_name=session_name,
)
tracing_v2_callback_var.set(cb)
yield
tracing_v2_callback_var.set(None)
@contextmanager
def trace_as_chain_group(
group_name: str,
*,
session_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
) -> Generator[CallbackManager, None, None]:
"""Get a callback manager for a chain group in a context manager."""
cb = LangChainTracer(
session_name=session_name,
example_id=example_id,
)
cm = CallbackManager.configure(
inheritable_callbacks=[cb],
)
run_manager = cm.on_chain_start({"name": group_name}, {})
yield run_manager.get_child()
run_manager.on_chain_end({})
@asynccontextmanager
async def atrace_as_chain_group(
group_name: str,
*,
session_name: Optional[str] = None,
example_id: Optional[Union[str, UUID]] = None,
) -> AsyncGenerator[AsyncCallbackManager, None]:
"""Get a callback manager for a chain group in a context manager."""
cb = LangChainTracer(
session_name=session_name,
example_id=example_id,
)
cm = AsyncCallbackManager.configure(
inheritable_callbacks=[cb],
)
run_manager = await cm.on_chain_start({"name": group_name}, {})
try:
yield run_manager.get_child()
finally:
await run_manager.on_chain_end({})
def _handle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for CallbackManager."""
message_strings: Optional[List[str]] = None
for handler in handlers:
try:
if ignore_condition_name is None or not getattr(
handler, ignore_condition_name
):
getattr(handler, event_name)(*args, **kwargs)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
if message_strings is None:
message_strings = [get_buffer_string(m) for m in args[1]]
_handle_event(
[handler],
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
if handler.raise_error:
raise e
logger.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event_for_handler(
handler: BaseCallbackHandler,
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
try:
if ignore_condition_name is None or not getattr(handler, ignore_condition_name):
event = getattr(handler, event_name)
if asyncio.iscoroutinefunction(event):
await event(*args, **kwargs)
else:
await asyncio.get_event_loop().run_in_executor(
None, functools.partial(event, *args, **kwargs)
)
except NotImplementedError as e:
if event_name == "on_chat_model_start":
message_strings = [get_buffer_string(m) for m in args[1]]
await _ahandle_event_for_handler(
handler,
"on_llm_start",
"ignore_llm",
args[0],
message_strings,
*args[2:],
**kwargs,
)
else:
logger.warning(f"Error in {event_name} callback: {e}")
except Exception as e:
if handler.raise_error:
raise e
logger.warning(f"Error in {event_name} callback: {e}")
async def _ahandle_event(
handlers: List[BaseCallbackHandler],
event_name: str,
ignore_condition_name: Optional[str],
*args: Any,
**kwargs: Any,
) -> None:
"""Generic event handler for AsyncCallbackManager."""
await asyncio.gather(
*(
_ahandle_event_for_handler(
handler, event_name, ignore_condition_name, *args, **kwargs
)
for handler in handlers
)
)
BRM = TypeVar("BRM", bound="BaseRunManager")
class BaseRunManager(RunManagerMixin):
"""Base class for run manager (a bound callback manager)."""
def __init__(
self,
run_id: UUID,
handlers: List[BaseCallbackHandler],
inheritable_handlers: List[BaseCallbackHandler],
parent_run_id: Optional[UUID] = None,
) -> None:
"""Initialize run manager."""
self.run_id = run_id
self.handlers = handlers
self.inheritable_handlers = inheritable_handlers
self.parent_run_id = parent_run_id
@classmethod
def get_noop_manager(cls: Type[BRM]) -> BRM:
"""Return a manager that doesn't perform any operations."""
return cls(uuid4(), [], [])
class RunManager(BaseRunManager):
"""Sync Run Manager."""
def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
_handle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncRunManager(BaseRunManager):
"""Async Run Manager."""
async def on_text(
self,
text: str,
**kwargs: Any,
) -> Any:
"""Run when text is received."""
await _ahandle_event(
self.handlers,
"on_text",
None,
text,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForLLMRun(RunManager, LLMManagerMixin):
"""Callback manager for LLM run."""
def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
_handle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token=token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
_handle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
_handle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForLLMRun(AsyncRunManager, LLMManagerMixin):
"""Async callback manager for LLM run."""
async def on_llm_new_token(
self,
token: str,
**kwargs: Any,
) -> None:
"""Run when LLM generates a new token."""
await _ahandle_event(
self.handlers,
"on_llm_new_token",
"ignore_llm",
token,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when LLM ends running."""
await _ahandle_event(
self.handlers,
"on_llm_end",
"ignore_llm",
response,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_llm_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when LLM errors."""
await _ahandle_event(
self.handlers,
"on_llm_error",
"ignore_llm",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForChainRun(RunManager, ChainManagerMixin):
"""Callback manager for chain run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
_handle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
_handle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
_handle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
_handle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForChainRun(AsyncRunManager, ChainManagerMixin):
"""Async callback manager for chain run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_chain_end(self, outputs: Dict[str, Any], **kwargs: Any) -> None:
"""Run when chain ends running."""
await _ahandle_event(
self.handlers,
"on_chain_end",
"ignore_chain",
outputs,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_chain_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when chain errors."""
await _ahandle_event(
self.handlers,
"on_chain_error",
"ignore_chain",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_action(self, action: AgentAction, **kwargs: Any) -> Any:
"""Run when agent action is received."""
await _ahandle_event(
self.handlers,
"on_agent_action",
"ignore_agent",
action,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_agent_finish(self, finish: AgentFinish, **kwargs: Any) -> Any:
"""Run when agent finish is received."""
await _ahandle_event(
self.handlers,
"on_agent_finish",
"ignore_agent",
finish,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManagerForToolRun(RunManager, ToolManagerMixin):
"""Callback manager for tool run."""
def get_child(self) -> CallbackManager:
"""Get a child callback manager."""
manager = CallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
def on_tool_end(
self,
output: str,
**kwargs: Any,
) -> None:
"""Run when tool ends running."""
_handle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
_handle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class AsyncCallbackManagerForToolRun(AsyncRunManager, ToolManagerMixin):
"""Async callback manager for tool run."""
def get_child(self) -> AsyncCallbackManager:
"""Get a child callback manager."""
manager = AsyncCallbackManager([], parent_run_id=self.run_id)
manager.set_handlers(self.inheritable_handlers)
return manager
async def on_tool_end(self, output: str, **kwargs: Any) -> None:
"""Run when tool ends running."""
await _ahandle_event(
self.handlers,
"on_tool_end",
"ignore_agent",
output,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
async def on_tool_error(
self,
error: Union[Exception, KeyboardInterrupt],
**kwargs: Any,
) -> None:
"""Run when tool errors."""
await _ahandle_event(
self.handlers,
"on_tool_error",
"ignore_agent",
error,
run_id=self.run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
class CallbackManager(BaseCallbackManager):
"""Callback manager that can be used to handle callbacks from langchain."""
def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
# Re-use the LLM Run Manager since the outputs are treated
# the same for now
return CallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForChainRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> CallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
_handle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return CallbackManagerForToolRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> CallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
class AsyncCallbackManager(BaseCallbackManager):
"""Async callback manager that can be used to handle callbacks from LangChain."""
@property
def is_async(self) -> bool:
"""Return whether the handler is async."""
return True
async def on_llm_start(
self,
serialized: Dict[str, Any],
prompts: List[str],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForLLMRun:
"""Run when LLM starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_llm_start",
"ignore_llm",
serialized,
prompts,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_chat_model_start(
self,
serialized: Dict[str, Any],
messages: List[List[BaseMessage]],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> Any:
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chat_model_start",
"ignore_chat_model",
serialized,
messages,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForLLMRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_chain_start(
self,
serialized: Dict[str, Any],
inputs: Dict[str, Any],
run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForChainRun:
"""Run when chain starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_chain_start",
"ignore_chain",
serialized,
inputs,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForChainRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
async def on_tool_start(
self,
serialized: Dict[str, Any],
input_str: str,
run_id: Optional[UUID] = None,
parent_run_id: Optional[UUID] = None,
**kwargs: Any,
) -> AsyncCallbackManagerForToolRun:
"""Run when tool starts running."""
if run_id is None:
run_id = uuid4()
await _ahandle_event(
self.handlers,
"on_tool_start",
"ignore_agent",
serialized,
input_str,
run_id=run_id,
parent_run_id=self.parent_run_id,
**kwargs,
)
return AsyncCallbackManagerForToolRun(
run_id, self.handlers, self.inheritable_handlers, self.parent_run_id
)
@classmethod
def configure(
cls,
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> AsyncCallbackManager:
"""Configure the callback manager."""
return _configure(cls, inheritable_callbacks, local_callbacks, verbose)
T = TypeVar("T", CallbackManager, AsyncCallbackManager)
def env_var_is_set(env_var: str) -> bool:
"""Check if an environment variable is set."""
return env_var in os.environ and os.environ[env_var] not in (
"",
"0",
"false",
"False",
)
def _configure(
callback_manager_cls: Type[T],
inheritable_callbacks: Callbacks = None,
local_callbacks: Callbacks = None,
verbose: bool = False,
) -> T:
"""Configure the callback manager."""
callback_manager = callback_manager_cls([])
if inheritable_callbacks or local_callbacks:
if isinstance(inheritable_callbacks, list) or inheritable_callbacks is None:
inheritable_callbacks_ = inheritable_callbacks or []
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks_.copy(),
inheritable_handlers=inheritable_callbacks_.copy(),
)
else:
callback_manager = callback_manager_cls(
handlers=inheritable_callbacks.handlers,
inheritable_handlers=inheritable_callbacks.inheritable_handlers,
parent_run_id=inheritable_callbacks.parent_run_id,
)
local_handlers_ = (
local_callbacks
if isinstance(local_callbacks, list)
else (local_callbacks.handlers if local_callbacks else [])
)
for handler in local_handlers_:
callback_manager.add_handler(handler, False)
tracer = tracing_callback_var.get()
wandb_tracer = wandb_tracing_callback_var.get()
open_ai = openai_callback_var.get()
tracing_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING")
or tracer is not None
or env_var_is_set("LANGCHAIN_HANDLER")
)
wandb_tracing_enabled_ = (
env_var_is_set("LANGCHAIN_WANDB_TRACING") or wandb_tracer is not None
)
tracer_v2 = tracing_v2_callback_var.get()
tracing_v2_enabled_ = (
env_var_is_set("LANGCHAIN_TRACING_V2") or tracer_v2 is not None
)
tracer_session = os.environ.get("LANGCHAIN_SESSION")
debug = _get_debug()
if tracer_session is None:
tracer_session = "default"
if (
verbose
or debug
or tracing_enabled_
or tracing_v2_enabled_
or wandb_tracing_enabled_
or open_ai is not None
):
if verbose and not any(
isinstance(handler, StdOutCallbackHandler)
for handler in callback_manager.handlers
):
if debug:
pass
else:
callback_manager.add_handler(StdOutCallbackHandler(), False)
if debug and not any(
isinstance(handler, ConsoleCallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(ConsoleCallbackHandler(), True)
if tracing_enabled_ and not any(
isinstance(handler, LangChainTracerV1)
for handler in callback_manager.handlers
):
if tracer:
callback_manager.add_handler(tracer, True)
else:
handler = LangChainTracerV1()
handler.load_session(tracer_session)
callback_manager.add_handler(handler, True)
if wandb_tracing_enabled_ and not any(
isinstance(handler, WandbTracer) for handler in callback_manager.handlers
):
if wandb_tracer:
callback_manager.add_handler(wandb_tracer, True)
else:
handler = WandbTracer()
callback_manager.add_handler(handler, True)
if tracing_v2_enabled_ and not any(
isinstance(handler, LangChainTracer)
for handler in callback_manager.handlers
):
if tracer_v2:
callback_manager.add_handler(tracer_v2, True)
else:
try:
handler = LangChainTracer(session_name=tracer_session)
callback_manager.add_handler(handler, True)
except Exception as e:
logger.warning(
"Unable to load requested LangChainTracer."
" To disable this warning,"
" unset the LANGCHAIN_TRACING_V2 environment variables.",
e,
)
if open_ai is not None and not any(
isinstance(handler, OpenAICallbackHandler)
for handler in callback_manager.handlers
):
callback_manager.add_handler(open_ai, True)
return callback_manager
| [
"langchain.schema.get_buffer_string",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.langchain_v1.LangChainTracerV1"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars import ContextVar\n'), ((1406, 1450), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback"""'], {'default': 'None'}), "('tracing_callback', default=None)\n", (1416, 1450), False, 'from contextvars import ContextVar\n'), ((1541, 1591), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_wandb_callback"""'], {'default': 'None'}), "('tracing_wandb_callback', default=None)\n", (1551, 1591), False, 'from contextvars import ContextVar\n'), ((1684, 1731), 'contextvars.ContextVar', 'ContextVar', (['"""tracing_callback_v2"""'], {'default': 'None'}), "('tracing_callback_v2', default=None)\n", (1694, 1731), False, 'from contextvars import ContextVar\n'), ((7547, 7585), 'typing.TypeVar', 'TypeVar', (['"""BRM"""'], {'bound': '"""BaseRunManager"""'}), "('BRM', bound='BaseRunManager')\n", (7554, 7585), False, 'from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((24466, 24517), 'typing.TypeVar', 'TypeVar', (['"""T"""', 'CallbackManager', 'AsyncCallbackManager'], {}), "('T', CallbackManager, AsyncCallbackManager)\n", (24473, 24517), False, 'from typing import Any, AsyncGenerator, Dict, Generator, List, Optional, Type, TypeVar, Union, cast\n'), ((1969, 1992), 'langchain.callbacks.openai_info.OpenAICallbackHandler', 'OpenAICallbackHandler', ([], {}), '()\n', (1990, 1992), False, 'from langchain.callbacks.openai_info import OpenAICallbackHandler\n'), ((2243, 2262), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (2260, 2262), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((2587, 2600), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (2598, 2600), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((2990, 3107), 'warnings.warn', 'warnings.warn', (['"""The tracing v2 API is in development. This is not yet stable and may change in the future."""'], {}), "(\n 'The tracing v2 API is in development. This is not yet stable and may change in the future.'\n )\n", (3003, 3107), False, 'import warnings\n'), ((3206, 3271), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'example_id': 'example_id', 'session_name': 'session_name'}), '(example_id=example_id, session_name=session_name)\n', (3221, 3271), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((3669, 3734), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'session_name', 'example_id': 'example_id'}), '(session_name=session_name, example_id=example_id)\n', (3684, 3734), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((4272, 4337), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'session_name', 'example_id': 'example_id'}), '(session_name=session_name, example_id=example_id)\n', (4287, 4337), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((26539, 26574), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_SESSION"""'], {}), "('LANGCHAIN_SESSION')\n", (26553, 26574), False, 'import os\n'), ((3180, 3196), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (3184, 3196), False, 'from uuid import UUID, uuid4\n'), ((6199, 6233), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['event'], {}), '(event)\n', (6226, 6233), False, 'import asyncio\n'), ((8264, 8271), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (8269, 8271), False, 'from uuid import UUID, uuid4\n'), ((18037, 18044), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18042, 18044), False, 'from uuid import UUID, uuid4\n'), ((18744, 18751), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (18749, 18751), False, 'from uuid import UUID, uuid4\n'), ((19547, 19554), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (19552, 19554), False, 'from uuid import UUID, uuid4\n'), ((20282, 20289), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (20287, 20289), False, 'from uuid import UUID, uuid4\n'), ((21564, 21571), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (21569, 21571), False, 'from uuid import UUID, uuid4\n'), ((22225, 22232), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22230, 22232), False, 'from uuid import UUID, uuid4\n'), ((22958, 22965), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (22963, 22965), False, 'from uuid import UUID, uuid4\n'), ((23716, 23723), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (23721, 23723), False, 'from uuid import UUID, uuid4\n'), ((27319, 27343), 'langchain.callbacks.tracers.stdout.ConsoleCallbackHandler', 'ConsoleCallbackHandler', ([], {}), '()\n', (27341, 27343), False, 'from langchain.callbacks.tracers.stdout import ConsoleCallbackHandler\n'), ((27633, 27652), 'langchain.callbacks.tracers.langchain_v1.LangChainTracerV1', 'LangChainTracerV1', ([], {}), '()\n', (27650, 27652), False, 'from langchain.callbacks.tracers.langchain_v1 import LangChainTracerV1, TracerSessionV1\n'), ((28048, 28061), 'langchain.callbacks.tracers.wandb.WandbTracer', 'WandbTracer', ([], {}), '()\n', (28059, 28061), False, 'from langchain.callbacks.tracers.wandb import WandbTracer\n'), ((6564, 6584), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (6581, 6584), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((27096, 27119), 'langchain.callbacks.stdout.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (27117, 27119), False, 'from langchain.callbacks.stdout import StdOutCallbackHandler\n'), ((28436, 28480), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'session_name': 'tracer_session'}), '(session_name=tracer_session)\n', (28451, 28480), False, 'from langchain.callbacks.tracers.langchain import LangChainTracer\n'), ((6388, 6429), 'functools.partial', 'functools.partial', (['event', '*args'], {}), '(event, *args, **kwargs)\n', (6405, 6429), False, 'import functools\n'), ((5291, 5311), 'langchain.schema.get_buffer_string', 'get_buffer_string', (['m'], {}), '(m)\n', (5308, 5311), False, 'from langchain.schema import AgentAction, AgentFinish, BaseMessage, LLMResult, get_buffer_string\n'), ((6320, 6344), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (6342, 6344), False, 'import asyncio\n')] |
import langchain
from langchain.llms import VertexAI
from langchain.prompts import PromptTemplate, load_prompt
import wandb
from wandb.integration.langchain import WandbTracer
import streamlit as st
from google.oauth2 import service_account
# account_info = dict(st.secrets["GOOGLE_APPLICATION_CREDENTIALS"])
# credentials = service_account.Credentials.from_service_account_info(account_info)
def generate_prd_v3_palm(new_feature, new_feature_desc, wandb_name):
wandb.login(key=st.secrets["WANDB_API_KEY"])
wandb.init(
project="generate_prd_v3_palm",
config={
"model": "text-bison-001",
"temperature": 0.2
},
entity="arihantsheth",
name=wandb_name,
)
# llm = VertexAI(credentials=credentials, max_output_tokens=1024)
llm = VertexAI(project="synap-labs-390404", location="us-central1", credentials=dict(
st.secrets["GOOGLE_APPLICATION_CREDENTIALS"]), max_output_tokens=1024)
prompt_template = load_prompt("prompt_templates/generate_prd_template_v2.json") # For deployment
# prompt_template = load_prompt("../prompt_templates/generate_prd_template_v3.json") # For local testing
prompt = prompt_template.format(
new_feature=new_feature, new_feature_desc=new_feature_desc)
try:
output = llm(prompt, callbacks=[WandbTracer()])
except Exception as e:
print("GCP Authentication error")
print(e)
return
# with open(f"./generated_prds/{new_feature}_prd_v3_palm.md", "w") as f: # For deployment
# # with open(f"../generated_prds/{new_feature}_prd_palm.md", "w") as f: # For local testing
# f.write(output)
wandb.finish()
return output
| [
"langchain.prompts.load_prompt"
] | [((469, 513), 'wandb.login', 'wandb.login', ([], {'key': "st.secrets['WANDB_API_KEY']"}), "(key=st.secrets['WANDB_API_KEY'])\n", (480, 513), False, 'import wandb\n'), ((519, 666), 'wandb.init', 'wandb.init', ([], {'project': '"""generate_prd_v3_palm"""', 'config': "{'model': 'text-bison-001', 'temperature': 0.2}", 'entity': '"""arihantsheth"""', 'name': 'wandb_name'}), "(project='generate_prd_v3_palm', config={'model':\n 'text-bison-001', 'temperature': 0.2}, entity='arihantsheth', name=\n wandb_name)\n", (529, 666), False, 'import wandb\n'), ((993, 1054), 'langchain.prompts.load_prompt', 'load_prompt', (['"""prompt_templates/generate_prd_template_v2.json"""'], {}), "('prompt_templates/generate_prd_template_v2.json')\n", (1004, 1054), False, 'from langchain.prompts import PromptTemplate, load_prompt\n'), ((1679, 1693), 'wandb.finish', 'wandb.finish', ([], {}), '()\n', (1691, 1693), False, 'import wandb\n'), ((1339, 1352), 'wandb.integration.langchain.WandbTracer', 'WandbTracer', ([], {}), '()\n', (1350, 1352), False, 'from wandb.integration.langchain import WandbTracer\n')] |
#!/usr/bin/env python
# coding: utf-8
# # LangChain: Agents
#
# ## Outline:
#
# * Using built in LangChain tools: DuckDuckGo search and Wikipedia
# * Defining your own tools
# In[ ]:
import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
import warnings
warnings.filterwarnings("ignore")
# Note: LLM's do not always produce the same results. When executing the code in your notebook, you may get slightly different answers that those in the video.
# In[ ]:
# account for deprecation of LLM model
import datetime
# Get the current date
current_date = datetime.datetime.now().date()
# Define the date after which the model should be set to "gpt-3.5-turbo"
target_date = datetime.date(2024, 6, 12)
# Set the model variable based on the current date
if current_date > target_date:
llm_model = "gpt-3.5-turbo"
else:
llm_model = "gpt-3.5-turbo-0301"
# ## Built-in LangChain tools
# In[ ]:
#!pip install -U wikipedia
# In[ ]:
from langchain.agents.agent_toolkits import create_python_agent
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentType
from langchain.tools.python.tool import PythonREPLTool
from langchain.python import PythonREPL
from langchain.chat_models import ChatOpenAI
# In[ ]:
llm = ChatOpenAI(temperature=0, model=llm_model)
# In[ ]:
tools = load_tools(["llm-math","wikipedia"], llm=llm)
# In[ ]:
agent= initialize_agent(
tools,
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
# In[ ]:
agent("What is the 25% of 300?")
# ## Wikipedia example
# In[ ]:
question = "Tom M. Mitchell is an American computer scientist \
and the Founders University Professor at Carnegie Mellon University (CMU)\
what book did he write?"
result = agent(question)
# ## Python Agent
# In[ ]:
agent = create_python_agent(
llm,
tool=PythonREPLTool(),
verbose=True
)
# In[ ]:
customer_list = [["Harrison", "Chase"],
["Lang", "Chain"],
["Dolly", "Too"],
["Elle", "Elem"],
["Geoff","Fusion"],
["Trance","Former"],
["Jen","Ayai"]
]
# In[ ]:
agent.run(f"""Sort these customers by \
last name and then first name \
and print the output: {customer_list}""")
# #### View detailed outputs of the chains
# In[ ]:
import langchain
langchain.debug=True
agent.run(f"""Sort these customers by \
last name and then first name \
and print the output: {customer_list}""")
langchain.debug=False
# ## Define your own tool
# In[ ]:
#!pip install DateTime
# In[ ]:
from langchain.agents import tool
from datetime import date
# In[ ]:
@tool
def time(text: str) -> str:
"""Returns todays date, use this for any \
questions related to knowing todays date. \
The input should always be an empty string, \
and this function will always return todays \
date - any date mathmatics should occur \
outside this function."""
return str(date.today())
# In[ ]:
agent= initialize_agent(
tools + [time],
llm,
agent=AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION,
handle_parsing_errors=True,
verbose = True)
# **Note**:
#
# The agent will sometimes come to the wrong conclusion (agents are a work in progress!).
#
# If it does, please try running it again.
# In[ ]:
try:
result = agent("whats the date today?")
except:
print("exception on external access")
# Reminder: Download your notebook to you local computer to save your work.
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| [
"langchain.agents.initialize_agent",
"langchain.tools.python.tool.PythonREPLTool",
"langchain.agents.load_tools",
"langchain.chat_models.ChatOpenAI"
] | [((315, 348), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (338, 348), False, 'import warnings\n'), ((735, 761), 'datetime.date', 'datetime.date', (['(2024)', '(6)', '(12)'], {}), '(2024, 6, 12)\n', (748, 761), False, 'import datetime\n'), ((1324, 1366), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'llm_model'}), '(temperature=0, model=llm_model)\n', (1334, 1366), False, 'from langchain.chat_models import ChatOpenAI\n'), ((1388, 1434), 'langchain.agents.load_tools', 'load_tools', (["['llm-math', 'wikipedia']"], {'llm': 'llm'}), "(['llm-math', 'wikipedia'], llm=llm)\n", (1398, 1434), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((1454, 1579), 'langchain.agents.initialize_agent', 'initialize_agent', (['tools', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools, llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (1470, 1579), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((3139, 3273), 'langchain.agents.initialize_agent', 'initialize_agent', (['(tools + [time])', 'llm'], {'agent': 'AgentType.CHAT_ZERO_SHOT_REACT_DESCRIPTION', 'handle_parsing_errors': '(True)', 'verbose': '(True)'}), '(tools + [time], llm, agent=AgentType.\n CHAT_ZERO_SHOT_REACT_DESCRIPTION, handle_parsing_errors=True, verbose=True)\n', (3155, 3273), False, 'from langchain.agents import load_tools, initialize_agent\n'), ((260, 273), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (271, 273), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((616, 639), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (637, 639), False, 'import datetime\n'), ((1952, 1968), 'langchain.tools.python.tool.PythonREPLTool', 'PythonREPLTool', ([], {}), '()\n', (1966, 1968), False, 'from langchain.tools.python.tool import PythonREPLTool\n'), ((3105, 3117), 'datetime.date.today', 'date.today', ([], {}), '()\n', (3115, 3117), False, 'from datetime import date\n')] |
import sys
import pandas as pd
from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex
from llama_index.indices.base import BaseIndex
from llama_index.storage.docstore import SimpleDocumentStore
from llama_index.storage.index_store import SimpleIndexStore
from llama_index.vector_stores import SimpleVectorStore
from config import (
API_KEY,
DEPLOYMENT_NAME,
MODEL_NAME,
API_BASE,
API_VERSION,
EMBEDDING_MODEL,
EMBEDDING_DEPLOYMENT,
)
class LlamaQueryEngine:
def __init__(
self,
api_key=API_KEY,
deployment_name=DEPLOYMENT_NAME,
model_name=MODEL_NAME,
api_base=API_BASE,
api_version=API_VERSION,
embedding_model=EMBEDDING_MODEL,
embedding_deployment=EMBEDDING_DEPLOYMENT,
):
import openai
import logging
import os
from langchain.embeddings import OpenAIEmbeddings
from llama_index.llms import AzureOpenAI
from llama_index import LangchainEmbedding
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
openai.api_type = "azure"
openai.api_base = api_base
openai.api_version = api_version
os.environ["OPENAI_API_KEY"] = api_key
openai.api_key = os.getenv("OPENAI_API_KEY")
llm = AzureOpenAI(
deployment_name=deployment_name,
model=model_name,
temperature=0,
engine="gpt35",
max_tokens=2048
)
embedding_llm = LangchainEmbedding(
OpenAIEmbeddings(
model=embedding_model,
deployment=embedding_deployment,
openai_api_key=openai.api_key,
openai_api_base=openai.api_base,
openai_api_type=openai.api_type,
openai_api_version=openai.api_version,
),
embed_batch_size=1,
)
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embedding_llm,
)
set_global_service_context(service_context)
# index = VectorStoreIndex.from_documents(documents)
# self.index = index
# self.query_engine = index.as_query_engine()
self.index = None
self.query_engine = None
def load_doc_from_csv(self, csv_path, text_column="decoded_readme", max_docs=20, is_persist=False, has_persist=False, persist_dir="app/data/persist"):
if has_persist:
self.retrieve_index(persist_dir)
return
df = pd.read_csv(csv_path)
text_list = df[text_column].tolist()
text_list = text_list[:max_docs]
documents = [Document(text=t) for t in text_list]
index = VectorStoreIndex.from_documents(documents)
self.index = index
from llama_index.indices.postprocessor import SimilarityPostprocessor
from llama_index.query_engine import RetrieverQueryEngine
from llama_index.indices.vector_store import VectorIndexRetriever
from llama_index import get_response_synthesizer
# configure retriever
retriever = VectorIndexRetriever(
index=index,
similarity_top_k=2,
)
# configure response synthesizer
response_synthesizer = get_response_synthesizer()
# assemble query engine
query_engine = RetrieverQueryEngine(
retriever=retriever,
response_synthesizer=response_synthesizer,
node_postprocessors=[
SimilarityPostprocessor(similarity_cutoff=0.7)
]
)
self.query_engine = query_engine
# self.query_engine = index.as_query_engine()
if is_persist:
self.persist_index(persist_dir)
def retrieve_index(self, persist_dir):
storage_context = StorageContext.from_defaults(
docstore=SimpleDocumentStore.from_persist_dir(persist_dir=persist_dir),
vector_store=SimpleVectorStore.from_persist_dir(persist_dir=persist_dir),
index_store=SimpleIndexStore.from_persist_dir(persist_dir=persist_dir),
)
self.index = load_index_from_storage(storage_context)
self.query_engine = self.index.as_query_engine()
def persist_index(self, persist_dir):
self.index.storage_context.persist(persist_dir=persist_dir)
def query(self, query_text):
if not self.query_engine:
raise Exception("No query engine loaded")
return self.query_engine.query(query_text)
def get_index(self):
return self.index
| [
"langchain.embeddings.OpenAIEmbeddings"
] | [((1194, 1252), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (1213, 1252), False, 'import logging\n'), ((1435, 1462), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1444, 1462), False, 'import os\n'), ((1478, 1593), 'llama_index.llms.AzureOpenAI', 'AzureOpenAI', ([], {'deployment_name': 'deployment_name', 'model': 'model_name', 'temperature': '(0)', 'engine': '"""gpt35"""', 'max_tokens': '(2048)'}), "(deployment_name=deployment_name, model=model_name, temperature=\n 0, engine='gpt35', max_tokens=2048)\n", (1489, 1593), False, 'from llama_index.llms import AzureOpenAI\n'), ((2106, 2170), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embedding_llm'}), '(llm=llm, embed_model=embedding_llm)\n', (2134, 2170), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((2215, 2258), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (2241, 2258), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((2719, 2740), 'pandas.read_csv', 'pd.read_csv', (['csv_path'], {}), '(csv_path)\n', (2730, 2740), True, 'import pandas as pd\n'), ((2901, 2943), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {}), '(documents)\n', (2932, 2943), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((3298, 3351), 'llama_index.indices.vector_store.VectorIndexRetriever', 'VectorIndexRetriever', ([], {'index': 'index', 'similarity_top_k': '(2)'}), '(index=index, similarity_top_k=2)\n', (3318, 3351), False, 'from llama_index.indices.vector_store import VectorIndexRetriever\n'), ((3460, 3486), 'llama_index.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (3484, 3486), False, 'from llama_index import get_response_synthesizer\n'), ((4321, 4361), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (4344, 4361), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((1716, 1932), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'model': 'embedding_model', 'deployment': 'embedding_deployment', 'openai_api_key': 'openai.api_key', 'openai_api_base': 'openai.api_base', 'openai_api_type': 'openai.api_type', 'openai_api_version': 'openai.api_version'}), '(model=embedding_model, deployment=embedding_deployment,\n openai_api_key=openai.api_key, openai_api_base=openai.api_base,\n openai_api_type=openai.api_type, openai_api_version=openai.api_version)\n', (1732, 1932), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2848, 2864), 'llama_index.Document', 'Document', ([], {'text': 't'}), '(text=t)\n', (2856, 2864), False, 'from llama_index import Document, set_global_service_context, StorageContext, load_index_from_storage, VectorStoreIndex\n'), ((4057, 4118), 'llama_index.storage.docstore.SimpleDocumentStore.from_persist_dir', 'SimpleDocumentStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4093, 4118), False, 'from llama_index.storage.docstore import SimpleDocumentStore\n'), ((4145, 4204), 'llama_index.vector_stores.SimpleVectorStore.from_persist_dir', 'SimpleVectorStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4179, 4204), False, 'from llama_index.vector_stores import SimpleVectorStore\n'), ((4230, 4288), 'llama_index.storage.index_store.SimpleIndexStore.from_persist_dir', 'SimpleIndexStore.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (4263, 4288), False, 'from llama_index.storage.index_store import SimpleIndexStore\n'), ((3703, 3749), 'llama_index.indices.postprocessor.SimilarityPostprocessor', 'SimilarityPostprocessor', ([], {'similarity_cutoff': '(0.7)'}), '(similarity_cutoff=0.7)\n', (3726, 3749), False, 'from llama_index.indices.postprocessor import SimilarityPostprocessor\n')] |
import arxiv
import openai
import langchain
import pinecone
from langchain_community.document_loaders import ArxivLoader
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.llms import OpenAI
from langchain import PromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain.chains.question_answering import load_qa_chain
from langchain import OpenAI
from utils import *
import streamlit as st
import os
from dotenv import load_dotenv
load_dotenv()
openai_api_key = os.getenv('OPENAI_API_KEY')
pinecone_api_key = os.getenv('PINECONE_API_KEY')
environment = os.getenv('PINECONE_ENV')
llm_summary = ChatOpenAI(temperature=0.3, model_name="gpt-3.5-turbo-0125")
llm = OpenAI(model_name="gpt-3.5-turbo-0125", temperature=0.6, api_key=openai_api_key)
if 'summary' not in st.session_state:
st.session_state.summary = None
if 'documents' not in st.session_state:
st.session_state.documents = None
st.title('Arxiv Paper Summarizer and Interactive Q&A')
paper_id_input = st.text_input('Enter Arxiv Paper ID', '')
if st.button('Summarize Paper') and paper_id_input:
with st.spinner('Fetching and summarizing the paper...'):
try:
doc = arxiv_loader(paper_id=paper_id_input)
st.session_state.documents = chunk_data(docs=doc)
# st.write(st.session_state.documents)
chain = load_summarize_chain(
llm=llm_summary,
chain_type='map_reduce',
verbose=False
)
summary = chain.run(st.session_state.documents)
st.subheader('Summary')
st.write(summary)
except Exception as e:
st.error(f"An error occurred: {e}")
def initialize_index(index_name='arxiv-summarizer'):
# documents = chunk_data(docs=doc)
embeddings = OpenAIEmbeddings(api_key=openai_api_key)
index_name = index_name
# Make sure environment is correctly spelled (there was a typo in your provided code)
pinecone.Pinecone(
api_key=pinecone_api_key,
environment=environment
)
if st.session_state.documents:
index = Pinecone.from_documents(st.session_state.documents, embeddings, index_name=index_name)
else:
index = None
return index
index = initialize_index()
def retrieve_query(query, k=2):
matching_results = index.similarity_search(query, k=k)
return matching_results
def retrieve_answers(query):
chain = load_qa_chain(llm, chain_type='stuff')
doc_search = retrieve_query(query)
print(doc_search)
response = chain.run(input_documents=doc_search, question=query)
return response
if paper_id_input:
user_query = st.text_input("Ask a question about the paper:", '')
if user_query:
if st.button('Get Answer'):
with st.spinner('Retrieving your answer...'):
try:
answer = retrieve_answers(user_query)
st.subheader('Answer')
st.write(answer)
except Exception as e:
st.error(f"An error occurred while retrieving the answer: {e}")
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.chains.summarize.load_summarize_chain",
"langchain.vectorstores.Pinecone.from_documents",
"langchain.chat_models.ChatOpenAI",
"langchain.OpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((690, 703), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (701, 703), False, 'from dotenv import load_dotenv\n'), ((722, 749), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (731, 749), False, 'import os\n'), ((769, 798), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (778, 798), False, 'import os\n'), ((813, 838), 'os.getenv', 'os.getenv', (['"""PINECONE_ENV"""'], {}), "('PINECONE_ENV')\n", (822, 838), False, 'import os\n'), ((854, 914), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.3)', 'model_name': '"""gpt-3.5-turbo-0125"""'}), "(temperature=0.3, model_name='gpt-3.5-turbo-0125')\n", (864, 914), False, 'from langchain.chat_models import ChatOpenAI\n'), ((925, 1010), 'langchain.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-0125"""', 'temperature': '(0.6)', 'api_key': 'openai_api_key'}), "(model_name='gpt-3.5-turbo-0125', temperature=0.6, api_key=openai_api_key\n )\n", (931, 1010), False, 'from langchain import OpenAI\n'), ((1162, 1216), 'streamlit.title', 'st.title', (['"""Arxiv Paper Summarizer and Interactive Q&A"""'], {}), "('Arxiv Paper Summarizer and Interactive Q&A')\n", (1170, 1216), True, 'import streamlit as st\n'), ((1235, 1276), 'streamlit.text_input', 'st.text_input', (['"""Enter Arxiv Paper ID"""', '""""""'], {}), "('Enter Arxiv Paper ID', '')\n", (1248, 1276), True, 'import streamlit as st\n'), ((1281, 1309), 'streamlit.button', 'st.button', (['"""Summarize Paper"""'], {}), "('Summarize Paper')\n", (1290, 1309), True, 'import streamlit as st\n'), ((2076, 2116), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'api_key': 'openai_api_key'}), '(api_key=openai_api_key)\n', (2092, 2116), False, 'from langchain.embeddings.openai import OpenAIEmbeddings\n'), ((2239, 2307), 'pinecone.Pinecone', 'pinecone.Pinecone', ([], {'api_key': 'pinecone_api_key', 'environment': 'environment'}), '(api_key=pinecone_api_key, environment=environment)\n', (2256, 2307), False, 'import pinecone\n'), ((2706, 2744), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['llm'], {'chain_type': '"""stuff"""'}), "(llm, chain_type='stuff')\n", (2719, 2744), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2932, 2984), 'streamlit.text_input', 'st.text_input', (['"""Ask a question about the paper:"""', '""""""'], {}), "('Ask a question about the paper:', '')\n", (2945, 2984), True, 'import streamlit as st\n'), ((1339, 1390), 'streamlit.spinner', 'st.spinner', (['"""Fetching and summarizing the paper..."""'], {}), "('Fetching and summarizing the paper...')\n", (1349, 1390), True, 'import streamlit as st\n'), ((2381, 2472), 'langchain.vectorstores.Pinecone.from_documents', 'Pinecone.from_documents', (['st.session_state.documents', 'embeddings'], {'index_name': 'index_name'}), '(st.session_state.documents, embeddings, index_name=\n index_name)\n', (2404, 2472), False, 'from langchain.vectorstores import Pinecone\n'), ((3020, 3043), 'streamlit.button', 'st.button', (['"""Get Answer"""'], {}), "('Get Answer')\n", (3029, 3043), True, 'import streamlit as st\n'), ((1593, 1670), 'langchain.chains.summarize.load_summarize_chain', 'load_summarize_chain', ([], {'llm': 'llm_summary', 'chain_type': '"""map_reduce"""', 'verbose': '(False)'}), "(llm=llm_summary, chain_type='map_reduce', verbose=False)\n", (1613, 1670), False, 'from langchain.chains.summarize import load_summarize_chain\n'), ((1821, 1844), 'streamlit.subheader', 'st.subheader', (['"""Summary"""'], {}), "('Summary')\n", (1833, 1844), True, 'import streamlit as st\n'), ((1857, 1874), 'streamlit.write', 'st.write', (['summary'], {}), '(summary)\n', (1865, 1874), True, 'import streamlit as st\n'), ((1918, 1953), 'streamlit.error', 'st.error', (['f"""An error occurred: {e}"""'], {}), "(f'An error occurred: {e}')\n", (1926, 1953), True, 'import streamlit as st\n'), ((3062, 3101), 'streamlit.spinner', 'st.spinner', (['"""Retrieving your answer..."""'], {}), "('Retrieving your answer...')\n", (3072, 3101), True, 'import streamlit as st\n'), ((3202, 3224), 'streamlit.subheader', 'st.subheader', (['"""Answer"""'], {}), "('Answer')\n", (3214, 3224), True, 'import streamlit as st\n'), ((3245, 3261), 'streamlit.write', 'st.write', (['answer'], {}), '(answer)\n', (3253, 3261), True, 'import streamlit as st\n'), ((3321, 3384), 'streamlit.error', 'st.error', (['f"""An error occurred while retrieving the answer: {e}"""'], {}), "(f'An error occurred while retrieving the answer: {e}')\n", (3329, 3384), True, 'import streamlit as st\n')] |
"""Create a ChatVectorDBChain for question/answering."""
from langchain.callbacks.manager import AsyncCallbackManager
from langchain.callbacks.tracers import LangChainTracer
from langchain.chains import ChatVectorDBChain
from langchain.chains.chat_vector_db.prompts import (CONDENSE_QUESTION_PROMPT,
QA_PROMPT)
from langchain.chains.llm import LLMChain
from langchain.chains.question_answering import load_qa_chain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.vectorstores import Pinecone
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.chat import (
ChatPromptTemplate,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate,
)
# system_template = """Use the following pieces of context to answer the users question.
# If you don't know the answer, just say that you don't know, don't try to make up an answer.
# ----------------
# {context}"""
template = """You are a helpful AI assistant that answers questions about
an e-commerce company called "Sindabad.com" in a friendly and polite
manner. You will be given a context that will represent Sindabad.com's
product inventory. Users might ask about products, they might want to
know your suggestions as well. Most importantly, they might ask about
specific product and its associated product link. If they want to know
about product links, you will provide it accordingly with the help of the
given "Context". Answer the question in your own words as truthfully as
possible from the context given to you. If you do not know the answer to
the question, simply respond with "I don't know. Could you please rephrase
the question?". If questions are asked where there is no relevant information
available in the context, answer the question with your existing knowledge on
that question and "ignore" the "Context" given to you.
----------------
context: {context}"""
messages = [
SystemMessagePromptTemplate.from_template(template),
HumanMessagePromptTemplate.from_template("{question}")
]
prompt = ChatPromptTemplate.from_messages(messages)
def get_chain(
vectorstore: Pinecone,
question_handler,
stream_handler,
tracing: bool = False
) -> ChatVectorDBChain:
"""Create a ChatVectorDBChain for question/answering."""
# Construct a ChatVectorDBChain with a streaming llm for combine docs
# and a separate, non-streaming llm for question generation
manager = AsyncCallbackManager([])
question_manager = AsyncCallbackManager([question_handler])
stream_manager = AsyncCallbackManager([stream_handler])
if tracing:
tracer = LangChainTracer()
tracer.load_default_session()
manager.add_handler(tracer)
question_manager.add_handler(tracer)
stream_manager.add_handler(tracer)
question_gen_llm = ChatOpenAI(
temperature=0,
verbose=True,
callback_manager=question_manager,
)
streaming_llm = ChatOpenAI(
streaming=True,
callback_manager=stream_manager,
verbose=True,
temperature=0,
)
question_generator = LLMChain(
llm=question_gen_llm,
prompt=CONDENSE_QUESTION_PROMPT,
callback_manager=manager
)
doc_chain = load_qa_chain(
streaming_llm,
chain_type="stuff",
prompt=prompt,
callback_manager=manager
)
# qa = ChatVectorDBChain(
# vectorstore=vectorstore,
# combine_docs_chain=doc_chain,
# question_generator=question_generator,
# callback_manager=manager,
# )
qa = ConversationalRetrievalChain(
retriever=vectorstore.as_retriever(),
combine_docs_chain=doc_chain,
question_generator=question_generator,
callback_manager=manager
)
return qa
| [
"langchain.chains.question_answering.load_qa_chain",
"langchain.callbacks.tracers.LangChainTracer",
"langchain.callbacks.manager.AsyncCallbackManager",
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.chains.llm.LLMChain",
"langchain.prompts.chat.HumanMessagePromptTemplate.from_template",
"langchain.prompts.chat.ChatPromptTemplate.from_messages"
] | [((2109, 2151), 'langchain.prompts.chat.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['messages'], {}), '(messages)\n', (2141, 2151), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((1986, 2037), 'langchain.prompts.chat.SystemMessagePromptTemplate.from_template', 'SystemMessagePromptTemplate.from_template', (['template'], {}), '(template)\n', (2027, 2037), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2043, 2097), 'langchain.prompts.chat.HumanMessagePromptTemplate.from_template', 'HumanMessagePromptTemplate.from_template', (['"""{question}"""'], {}), "('{question}')\n", (2083, 2097), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, HumanMessagePromptTemplate\n'), ((2501, 2525), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[]'], {}), '([])\n', (2521, 2525), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2549, 2589), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[question_handler]'], {}), '([question_handler])\n', (2569, 2589), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2611, 2649), 'langchain.callbacks.manager.AsyncCallbackManager', 'AsyncCallbackManager', (['[stream_handler]'], {}), '([stream_handler])\n', (2631, 2649), False, 'from langchain.callbacks.manager import AsyncCallbackManager\n'), ((2887, 2961), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'verbose': '(True)', 'callback_manager': 'question_manager'}), '(temperature=0, verbose=True, callback_manager=question_manager)\n', (2897, 2961), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3013, 3105), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'streaming': '(True)', 'callback_manager': 'stream_manager', 'verbose': '(True)', 'temperature': '(0)'}), '(streaming=True, callback_manager=stream_manager, verbose=True,\n temperature=0)\n', (3023, 3105), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3167, 3260), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'question_gen_llm', 'prompt': 'CONDENSE_QUESTION_PROMPT', 'callback_manager': 'manager'}), '(llm=question_gen_llm, prompt=CONDENSE_QUESTION_PROMPT,\n callback_manager=manager)\n', (3175, 3260), False, 'from langchain.chains.llm import LLMChain\n'), ((3303, 3396), 'langchain.chains.question_answering.load_qa_chain', 'load_qa_chain', (['streaming_llm'], {'chain_type': '"""stuff"""', 'prompt': 'prompt', 'callback_manager': 'manager'}), "(streaming_llm, chain_type='stuff', prompt=prompt,\n callback_manager=manager)\n", (3316, 3396), False, 'from langchain.chains.question_answering import load_qa_chain\n'), ((2683, 2700), 'langchain.callbacks.tracers.LangChainTracer', 'LangChainTracer', ([], {}), '()\n', (2698, 2700), False, 'from langchain.callbacks.tracers import LangChainTracer\n')] |
from llama_index.core import VectorStoreIndex,SimpleDirectoryReader,ServiceContext
print("VectorStoreIndex,SimpleDirectoryReader,ServiceContext imported")
from llama_index.llms.huggingface import HuggingFaceLLM
print("HuggingFaceLLM imported")
from llama_index.core.prompts.prompts import SimpleInputPrompt
print("SimpleInputPrompt imported")
from ctransformers import AutoModelForCausalLM
print("AutoModelForCausalLM imported")
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
print("HuggingFaceEmbeddings imported")
from llama_index.core import ServiceContext
print("ServiceContext imported")
from llama_index.embeddings.langchain import LangchainEmbedding
print("LangchainEmbedding imported")
from langchain_community.document_loaders import PyPDFLoader
print("PyPDFLoader imported")
import json
import torch
import os
from dotenv import load_dotenv
load_dotenv()
HuggingFace_Api = os.environ.get('HF_TOKEN')
documents = SimpleDirectoryReader('./testing/docs').load_data()
print("SimpleDirectoryReader imported")
def get_system_prompt():
'''This function is used to load the system prompt from the prompts.json file'''
with open('prompts.json') as f:
data = json.load(f)
return data['Default']
query_wrapper_prompt=SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
def load_model(context_window: int, max_new_tokens: int):
'''This function is used to load the model from the HuggingFaceLLM'''
print(f"""Available Cuda: {torch.cuda.get_device_name()} \n
Trying to load the model model""")
try:
llm = HuggingFaceLLM(context_window=context_window,
max_new_tokens=max_new_tokens,
generate_kwargs={"temperature": 0.0, "do_sample": False},
system_prompt=get_system_prompt(),
query_wrapper_prompt=query_wrapper_prompt,
tokenizer_name="./meta",
model_name="./meta",
device_map="cuda",
# uncomment this if using CUDA to reduce memory usage
model_kwargs={"torch_dtype": torch.float16,"load_in_8bit":True }
)
print("Model Loaded")
return llm
except Exception as e:
print(f"Error: {e}")
return None
def embed_model():
'''This function is used to load the model from the LangchainEmbedding'''
embed = LangchainEmbedding(
HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2"))
service_context=ServiceContext.from_defaults(
chunk_size=1024,
llm=load_model(context_window=4096, max_new_tokens=256),
embed_model=embed
)
return service_context
def get_index():
'''This function is used to load the index from the VectorStoreIndex'''
index=VectorStoreIndex.from_documents(documents,service_context=embed_model())
return index
def main():
query_engine=get_index().as_query_engine()
response=query_engine.query("what is this PDF tells about?")
out = response
print(response)
if __name__ == "__main__":
main() | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((872, 885), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (883, 885), False, 'from dotenv import load_dotenv\n'), ((905, 931), 'os.environ.get', 'os.environ.get', (['"""HF_TOKEN"""'], {}), "('HF_TOKEN')\n", (919, 931), False, 'import os\n'), ((1262, 1315), 'llama_index.core.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""<|USER|>{query_str}<|ASSISTANT|>"""'], {}), "('<|USER|>{query_str}<|ASSISTANT|>')\n", (1279, 1315), False, 'from llama_index.core.prompts.prompts import SimpleInputPrompt\n'), ((945, 984), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./testing/docs"""'], {}), "('./testing/docs')\n", (966, 984), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((1200, 1212), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1209, 1212), False, 'import json\n'), ((2523, 2598), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-mpnet-base-v2"""'}), "(model_name='sentence-transformers/all-mpnet-base-v2')\n", (2544, 2598), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1481, 1509), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ([], {}), '()\n', (1507, 1509), False, 'import torch\n')] |
import itertools
from langchain.cache import InMemoryCache, SQLiteCache
import langchain
import pandas as pd
from certa.utils import merge_sources
import ellmer.models
import ellmer.metrics
from time import sleep, time
import traceback
from tqdm import tqdm
cache = "sqlite"
samples = 2
explanation_granularity = "attribute"
# setup langchain cache
if cache == "memory":
langchain.llm_cache = InMemoryCache()
elif cache == "sqlite":
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
llm_configs = [
{"model_type": "falcon", "model_name": "vilsonrodrigues/falcon-7b-instruct-sharded", "deployment_name": "local", "tag": "falcon"},
]
for llm_config in llm_configs:
pase = llm = ellmer.models.GenericEllmer(explanation_granularity=explanation_granularity, verbose=True,
deployment_name=llm_config['deployment_name'], temperature=0.01,
model_name=llm_config['model_name'], model_type=llm_config['model_type'],
prompts={"pase": "ellmer/prompts/lc_pase_llama2.txt"})
ptsew = ellmer.models.GenericEllmer(explanation_granularity=explanation_granularity, verbose=True,
deployment_name=llm_config['deployment_name'], temperature=0.01,
model_name=llm_config['model_name'], model_type=llm_config['model_type'],
prompts={
"ptse": {"er": "ellmer/prompts/er.txt", "why": "ellmer/prompts/er-why.txt",
"saliency": "ellmer/prompts/er-saliency-lc.txt",
"cf": "ellmer/prompts/er-cf-lc.txt"}})
# for each dataset in deepmatcher datasets
dataset_names = ['abt_buy', 'fodo_zaga', 'walmart_amazon']
base_dir = '/Users/tteofili/dev/cheapER/datasets/'
for d in dataset_names:
print(f'using dataset {d}')
dataset_dir = '/'.join([base_dir, d])
lsource = pd.read_csv(dataset_dir + '/tableA.csv')
rsource = pd.read_csv(dataset_dir + '/tableB.csv')
gt = pd.read_csv(dataset_dir + '/train.csv')
valid = pd.read_csv(dataset_dir + '/valid.csv')
test = pd.read_csv(dataset_dir + '/test.csv')
test_df = merge_sources(test, 'ltable_', 'rtable_', lsource, rsource, ['label'], [])
ellmers = {
"ptsew_" + llm_config['tag']: ptsew,
"pase_" + llm_config['tag']: pase,
}
result_files = []
all_llm_results = dict()
for key, llm in ellmers.items():
print(f'{key} on {d}')
curr_llm_results = []
start_time = time()
# generate predictions and explanations
test_data_df = test_df[:samples]
ranged = range(len(test_data_df))
for idx in tqdm(ranged, disable=False):
try:
rand_row = test_df.iloc[[idx]]
ltuple, rtuple = ellmer.utils.get_tuples(rand_row)
print(f'ltuple:\n{ltuple}\nrtuple:\n{rtuple}')
answer_dictionary = llm.predict_and_explain(ltuple, rtuple)
print(answer_dictionary)
prediction = answer_dictionary['prediction']
saliency = answer_dictionary['saliency']
cfs = [answer_dictionary['cf']]
curr_llm_results.append({"id": idx, "ltuple": ltuple, "rtuple": rtuple, "prediction": prediction,
"label": rand_row['label'].values[0], "saliency": saliency, "cfs": cfs})
except Exception:
traceback.print_exc()
print(f'error, waiting...')
sleep(10)
start_time += 10
| [
"langchain.cache.InMemoryCache",
"langchain.cache.SQLiteCache"
] | [((399, 414), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (412, 414), False, 'from langchain.cache import InMemoryCache, SQLiteCache\n'), ((465, 507), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (476, 507), False, 'from langchain.cache import InMemoryCache, SQLiteCache\n'), ((2112, 2152), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/tableA.csv')"], {}), "(dataset_dir + '/tableA.csv')\n", (2123, 2152), True, 'import pandas as pd\n'), ((2171, 2211), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/tableB.csv')"], {}), "(dataset_dir + '/tableB.csv')\n", (2182, 2211), True, 'import pandas as pd\n'), ((2225, 2264), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/train.csv')"], {}), "(dataset_dir + '/train.csv')\n", (2236, 2264), True, 'import pandas as pd\n'), ((2281, 2320), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/valid.csv')"], {}), "(dataset_dir + '/valid.csv')\n", (2292, 2320), True, 'import pandas as pd\n'), ((2336, 2374), 'pandas.read_csv', 'pd.read_csv', (["(dataset_dir + '/test.csv')"], {}), "(dataset_dir + '/test.csv')\n", (2347, 2374), True, 'import pandas as pd\n'), ((2393, 2467), 'certa.utils.merge_sources', 'merge_sources', (['test', '"""ltable_"""', '"""rtable_"""', 'lsource', 'rsource', "['label']", '[]'], {}), "(test, 'ltable_', 'rtable_', lsource, rsource, ['label'], [])\n", (2406, 2467), False, 'from certa.utils import merge_sources\n'), ((2790, 2796), 'time.time', 'time', ([], {}), '()\n', (2794, 2796), False, 'from time import sleep, time\n'), ((2964, 2991), 'tqdm.tqdm', 'tqdm', (['ranged'], {'disable': '(False)'}), '(ranged, disable=False)\n', (2968, 2991), False, 'from tqdm import tqdm\n'), ((3796, 3817), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (3815, 3817), False, 'import traceback\n'), ((3886, 3895), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (3891, 3895), False, 'from time import sleep, time\n')] |
# TODO speed up by extracting resume in structure and job beore sending to gpt4
import re
from bs4 import BeautifulSoup
from pyppeteer import launch
import uuid
import time
from PIL import Image
import numpy as np
from fastapi import FastAPI, File, UploadFile, Form
from fastapi import Request
from langchain.prompts import ChatPromptTemplate
import json
from prompts_json import json_schema, system_message_content_without_coverletter, system_message_structurize_json
from langchain.chains.openai_functions import create_structured_output_chain
import asyncio
import concurrent.futures
import threading
from pydantic import BaseModel
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
import PyPDF2
import os
import langchain
from langchain.cache import InMemoryCache
from langchain.cache import SQLiteCache
from pathlib import Path
from typing import Optional
from langchain.callbacks.base import AsyncCallbackHandler, BaseCallbackHandler
from typing import Any, Dict, List
from langchain.schema import LLMResult, HumanMessage
# load env variables
from dotenv import load_dotenv
load_dotenv()
# llm = ChatOpenAI(model='gpt-4', temperature=0.1, max_tokens=2000, top_p=1, frequency_penalty=0.0, presence_penalty=0.0, stop=["\n\n", "Human:", "System:"])
# llm = ChatOpenAI(model='gpt-3.5-turbo-16k', temperature=0.1)
#
llm = ChatOpenAI(model='gpt-4-0613', temperature=0.1)
# llm = ChatOpenAI(model='gpt-3.5-turbo-16k-0613', temperature=0.1) # TODO change back to GPT-4
def get_pdf_content(pdf_path):
pdf = fitz.open(pdf_path)
text = ""
for page in pdf:
text += page.get_text()
# TODO disable OCR because of package size
# if not text.strip():
# reader = easyocr.Reader(['en'])
# for page in pdf:
# pix = page.get_pixmap()
# img = Image.frombytes("RGB", [pix.width, pix.height], pix.samples)
# text += ' '.join([t[1] for t in reader.readtext(np.array(img))])
return text
def highlight_words_in_pdf(pdf_path, words_to_highlight):
pdf = fitz.open(pdf_path)
for word in words_to_highlight.split(","):
word = word.strip()
for page in pdf:
text_instances = page.search_for(word)
for inst in text_instances:
highlight = page.add_highlight_annot(inst)
temp_output_path = "/tmp/highlighted_output.pdf"
pdf.save(temp_output_path)
return temp_output_path
def extract_text_from_pdf(file_path):
with open(file_path, "rb") as file:
reader = PyPDF2.PdfReader(file)
text = ""
for page in range(len(reader.pages)):
text += reader.pages[page].extract_text()
return text
def handle_resume_upload(uploaded_file, resume_path):
if uploaded_file is not None:
file_details = {"FileName": uploaded_file.filename,
"FileType": uploaded_file.content_type}
if file_details["FileType"] == "application/pdf":
file_path = resume_path / f"resume_{uuid.uuid4()}.pdf"
with open(file_path, "wb") as f:
f.write(uploaded_file.file.read())
return extract_text_from_pdf(file_path), None
else:
return None, "Please upload a valid PDF file."
return None, None
async def structurize_with_gpt(text, model_name='gpt-3.5-turbo-16k-0613', system_message_content=system_message_structurize_json):
global system_message_structurize_json
response = await generate_response(
system_message_content, text, model_name)
return response
async def do_match(resume, job_desc):
global llm, system_message_content_without_coverletter, json_schema, system_message_structurize_json
# langchain.llm_cache = InMemoryCache()
# We can do the same thing with a SQLite cache
#TODO
langchain.llm_cache = SQLiteCache(database_path=".langchain.db")
# count number of words for resume and job description and print it
# start timer for processing time from now until the end of the function
start = time.time()
print(" Lenth before structurize:")
# counter number of words in resume and job description
print("Length of resume: " + str(len(resume)))
print("Length of job description: " + str(len(job_desc)))
# job_desc = structurize_with_gpt(job_desc)
# resume = structurize_with_gpt(resume)
print(" Lenth after structurize:")
print("Length of resume: " + str(len(resume)))
print("Length of job description: " + str(len(job_desc)))
system_message = SystemMessage(
content=system_message_content_without_coverletter)
human_message = HumanMessage(
content=f"Resume:\n{resume}\njob description:\n{job_desc}")
prompt = ChatPromptTemplate.from_messages(
[
system_message,
human_message
])
print(prompt)
# structurize is good but makes it extremly slow with gpt4
# chain = create_structured_output_chain(json_schema, llm, prompt, verbose=True)
chain = LLMChain(llm=llm, prompt=prompt)
output = await chain.arun({})
end = time.time()
# print processing time in seconds
print("Processing time: " + str(end - start))
# output = json.loads(output)
# get md first
# convert to json
# print (output)
# # now convert to json
# system_message = SystemMessage(content=system_message_convert_json)
# human_message = HumanMessage( content=f"{output}")
# prompt = ChatPromptTemplate.from_messages(
# [
# system_message,
# human_message
# ] )
# # print (prompt)
# # chain = create_structured_output_chain(json_schema, llm, prompt, verbose=True)
# chain = LLMChain(llm=llm, prompt=prompt)
# output = chain.run({})
return output
# messages = [system_message, human_message]
# result = llm(messages)
# return result.content
# URLS
async def get_page_content(url):
browser = await launch(handleSIGINT=False, handleSIGTERM=False, handleSIGHUP=False)
page = await browser.newPage()
await page.setViewport({'width': 1366, 'height': 768})
SAFARI_USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Safari/605.1.15"
await page.setUserAgent(SAFARI_USER_AGENT)
try:
await page.goto(url, waitUntil="domcontentloaded", timeout=60000)
except Exception as e: # It's better to catch a general exception for simplicity here.
print(f"Error: {e}")
await browser.close()
return None
content = await page.content()
await browser.close()
return content
def get_clean_text_from_url(url):
with concurrent.futures.ThreadPoolExecutor() as executor:
future = executor.submit(run_in_thread, url)
# call openai api to extract the relevant parts related to a job desciription and remove the rest
clean_page = future.result()
# if number of characters is greater than 3000: summarize the page
# print size of the clean page message
extracted_job = clean_page
print("Length of clean page: " + str(len(clean_page)))
if len(clean_page) > 3000:
system_message_content = "You summarize a given page and extract the part related to the job description. Dont make up anything, just extract the relevant parts."
response = generate_response(
system_message_content, clean_page, "gpt-3.5-turbo-16k")
extracted_job = response
print("Length of clean page after Extraction: " +
str(len(extracted_job)))
# print size of the extracted job
return extracted_job
def run_in_thread(url):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(get_clean_text_from_url_async(url))
finally:
loop.close()
async def get_clean_text_from_url_async(url):
content = await get_page_content(url)
if not content:
return None # return None if there was an error
soup = BeautifulSoup(content, 'html.parser')
for script in soup(['script', 'style']):
script.decompose()
clean_text = soup.get_text()
clean_text = re.sub(r'\n+', '\n', clean_text).strip()
return clean_text
# Usage example:
# text_content = get_clean_text_from_url("https://www.example.com")
# print(text_content)
class MyCustomSyncHandler(BaseCallbackHandler):
def on_llm_new_token(self, token: str, **kwargs) -> None:
print(f"Sync handler being called in a `thread_pool_executor`: token: {token}")
class MyCustomAsyncHandler(AsyncCallbackHandler):
"""Async callback handler that can be used to handle callbacks from langchain."""
async def on_llm_start(
self, serialized: Dict[str, Any], prompts: List[str], **kwargs: Any
) -> None:
"""Run when chain starts running."""
print("zzzz....")
await asyncio.sleep(0.3)
class_name = serialized["name"]
print("Hi! I just woke up. Your llm is starting")
async def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
"""Run when chain ends running."""
print("zzzz....")
await asyncio.sleep(0.3)
print("Hi! I just woke up. Your llm is ending")
async def generate_response(system_message_content: str, human_message_content: str, model_name: str = 'gpt-3.5-turbo') -> AIMessage:
"""
Generates a response based on the given system and human messages.
Args:
- system_message_content (str): The content of the system message.
- human_message_content (str): The content of the human message.
- model_name (str): The name of the model to use. Defaults to 'gpt-4'.
Returns:
- AIMessage: The response generated by the LLM.
"""
llm = ChatOpenAI(model=model_name, callbacks=[MyCustomSyncHandler(), MyCustomAsyncHandler()])
# Create SystemMessage
system_message = SystemMessage(content=system_message_content)
# Create HumanMessage
human_message = HumanMessage(content=human_message_content)
# Create messages list
messages = [system_message, human_message]
result = await llm.agenerate([messages])
result = result.generations[0][0].text
print (result)
# result = llm(messages)
return result
| [
"langchain.schema.HumanMessage",
"langchain.cache.SQLiteCache",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.chat_models.ChatOpenAI",
"langchain.schema.SystemMessage",
"langchain.chains.LLMChain"
] | [((1278, 1291), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1289, 1291), False, 'from dotenv import load_dotenv\n'), ((1523, 1570), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4-0613"""', 'temperature': '(0.1)'}), "(model='gpt-4-0613', temperature=0.1)\n", (1533, 1570), False, 'from langchain.chat_models import ChatOpenAI\n'), ((4022, 4064), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (4033, 4064), False, 'from langchain.cache import SQLiteCache\n'), ((4229, 4240), 'time.time', 'time.time', ([], {}), '()\n', (4238, 4240), False, 'import time\n'), ((4736, 4801), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message_content_without_coverletter'}), '(content=system_message_content_without_coverletter)\n', (4749, 4801), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((4831, 4904), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""Resume:\n{resume}\njob description:\n{job_desc}"""'}), '(content=f"""Resume:\n{resume}\njob description:\n{job_desc}""")\n', (4843, 4904), False, 'from langchain.schema import LLMResult, HumanMessage\n'), ((4926, 4991), 'langchain.prompts.ChatPromptTemplate.from_messages', 'ChatPromptTemplate.from_messages', (['[system_message, human_message]'], {}), '([system_message, human_message])\n', (4958, 4991), False, 'from langchain.prompts import ChatPromptTemplate\n'), ((5215, 5247), 'langchain.chains.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (5223, 5247), False, 'from langchain.chains import LLMChain\n'), ((5293, 5304), 'time.time', 'time.time', ([], {}), '()\n', (5302, 5304), False, 'import time\n'), ((7925, 7949), 'asyncio.new_event_loop', 'asyncio.new_event_loop', ([], {}), '()\n', (7947, 7949), False, 'import asyncio\n'), ((7954, 7982), 'asyncio.set_event_loop', 'asyncio.set_event_loop', (['loop'], {}), '(loop)\n', (7976, 7982), False, 'import asyncio\n'), ((8281, 8318), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (8294, 8318), False, 'from bs4 import BeautifulSoup\n'), ((10172, 10217), 'langchain.schema.SystemMessage', 'SystemMessage', ([], {'content': 'system_message_content'}), '(content=system_message_content)\n', (10185, 10217), False, 'from langchain.schema import AIMessage, HumanMessage, SystemMessage\n'), ((10265, 10308), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'human_message_content'}), '(content=human_message_content)\n', (10277, 10308), False, 'from langchain.schema import LLMResult, HumanMessage\n'), ((2710, 2732), 'PyPDF2.PdfReader', 'PyPDF2.PdfReader', (['file'], {}), '(file)\n', (2726, 2732), False, 'import PyPDF2\n'), ((6148, 6215), 'pyppeteer.launch', 'launch', ([], {'handleSIGINT': '(False)', 'handleSIGTERM': '(False)', 'handleSIGHUP': '(False)'}), '(handleSIGINT=False, handleSIGTERM=False, handleSIGHUP=False)\n', (6154, 6215), False, 'from pyppeteer import launch\n'), ((8443, 8475), 're.sub', 're.sub', (['"""\\\\n+"""', '"""\n"""', 'clean_text'], {}), "('\\\\n+', '\\n', clean_text)\n", (8449, 8475), False, 'import re\n'), ((9158, 9176), 'asyncio.sleep', 'asyncio.sleep', (['(0.3)'], {}), '(0.3)\n', (9171, 9176), False, 'import asyncio\n'), ((9435, 9453), 'asyncio.sleep', 'asyncio.sleep', (['(0.3)'], {}), '(0.3)\n', (9448, 9453), False, 'import asyncio\n'), ((3187, 3199), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3197, 3199), False, 'import uuid\n')] |