code
stringlengths
161
67.2k
apis
sequencelengths
1
24
extract_api
stringlengths
164
53.3k
import streamlit as st import os import sys import openai from streamlit_extras.switch_page_button import switch_page from llama_index import VectorStoreIndex, GithubRepositoryReader, ServiceContext, set_global_service_context from llama_index.llms import OpenAI from database.neo4j_connection import connect_to_db st.set_page_config( page_title="Authentication", page_icon="πŸ”", ) st.write("# Welcome to AI GitHub Repo reader!") st.sidebar.success("Fill the details to get started πŸ‘‰") st.write("## Enter the details below :") api_key = st.text_input("Enter your OpenAI API key (https://platform.openai.com/account/api-keys)", type="password") github_token = st.text_input("Enter your GitHub Token (https://github.com/settings/tokens)", type="password") repository_link = st.text_input("Enter the link of the repository by selecting a branch (e.g., https://github.com/openai/whisper/tree/main)", placeholder="https://github.com/owner/repository/tree/branch") st.session_state['repository_link'] = repository_link st.session_state['api_key'] = api_key st.session_state['github_token'] = github_token if not api_key or not github_token or not repository_link: st.sidebar.warning("⚠️ Please enter OpenAI API key, GitHub Token and GitHub Repository link.") else: if st.button("Submit", use_container_width=True): os.environ["OPENAI_API_KEY"] = api_key openai.api_key = os.environ["OPENAI_API_KEY"] os.environ["GITHUB_TOKEN"] = github_token llm = OpenAI(model="gpt-4", temperature=0.5, ) # configure service context service_context = ServiceContext.from_defaults(llm=llm) set_global_service_context(service_context) st.markdown("βœ… API Key, Token and Repository link submitted successfully!") with st.spinner("Loading documents from GitHub..."): try: url_parts = repository_link.split('/') owner = url_parts[3] repo = url_parts[4] branch = url_parts[-1].split('/')[-1] documents = GithubRepositoryReader( github_token=os.environ["GITHUB_TOKEN"], owner=owner, repo=repo, use_parser=False, verbose=False, ).load_data(branch=branch) # Create the vector store index and query engine connect_to_db(service_context, documents) #index = VectorStoreIndex.from_documents(documents, service_context=service_context) #index = VectorStoreIndex.from_documents(documents) #query_engine = index.as_query_engine() query_engine = index.as_query_engine( include_text=True, response_mode="tree_summarize", embedding_mode="hybrid", similarity_top_k=5, ) st.session_state['documents'] = documents st.session_state['query_engine'] = query_engine except Exception as e: st.error(f"An error occurred: {str(e)}") st.success('Done!', icon="βœ…") st.markdown("Click Next to see Repository Analysis") switch_page("repository analysis")
[ "llama_index.set_global_service_context", "llama_index.llms.OpenAI", "llama_index.ServiceContext.from_defaults", "llama_index.GithubRepositoryReader" ]
[((317, 379), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Authentication"""', 'page_icon': '"""πŸ”"""'}), "(page_title='Authentication', page_icon='πŸ”')\n", (335, 379), True, 'import streamlit as st\n'), ((392, 439), 'streamlit.write', 'st.write', (['"""# Welcome to AI GitHub Repo reader!"""'], {}), "('# Welcome to AI GitHub Repo reader!')\n", (400, 439), True, 'import streamlit as st\n'), ((441, 496), 'streamlit.sidebar.success', 'st.sidebar.success', (['"""Fill the details to get started πŸ‘‰"""'], {}), "('Fill the details to get started πŸ‘‰')\n", (459, 496), True, 'import streamlit as st\n'), ((498, 538), 'streamlit.write', 'st.write', (['"""## Enter the details below :"""'], {}), "('## Enter the details below :')\n", (506, 538), True, 'import streamlit as st\n'), ((549, 664), 'streamlit.text_input', 'st.text_input', (['"""Enter your OpenAI API key (https://platform.openai.com/account/api-keys)"""'], {'type': '"""password"""'}), "(\n 'Enter your OpenAI API key (https://platform.openai.com/account/api-keys)',\n type='password')\n", (562, 664), True, 'import streamlit as st\n'), ((671, 769), 'streamlit.text_input', 'st.text_input', (['"""Enter your GitHub Token (https://github.com/settings/tokens)"""'], {'type': '"""password"""'}), "('Enter your GitHub Token (https://github.com/settings/tokens)',\n type='password')\n", (684, 769), True, 'import streamlit as st\n'), ((784, 980), 'streamlit.text_input', 'st.text_input', (['"""Enter the link of the repository by selecting a branch (e.g., https://github.com/openai/whisper/tree/main)"""'], {'placeholder': '"""https://github.com/owner/repository/tree/branch"""'}), "(\n 'Enter the link of the repository by selecting a branch (e.g., https://github.com/openai/whisper/tree/main)'\n , placeholder='https://github.com/owner/repository/tree/branch')\n", (797, 980), True, 'import streamlit as st\n'), ((1176, 1275), 'streamlit.sidebar.warning', 'st.sidebar.warning', (['"""⚠️ Please enter OpenAI API key, GitHub Token and GitHub Repository link."""'], {}), "(\n '⚠️ Please enter OpenAI API key, GitHub Token and GitHub Repository link.')\n", (1194, 1275), True, 'import streamlit as st\n'), ((1284, 1329), 'streamlit.button', 'st.button', (['"""Submit"""'], {'use_container_width': '(True)'}), "('Submit', use_container_width=True)\n", (1293, 1329), True, 'import streamlit as st\n'), ((1497, 1535), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4"""', 'temperature': '(0.5)'}), "(model='gpt-4', temperature=0.5)\n", (1503, 1535), False, 'from llama_index.llms import OpenAI\n'), ((1600, 1637), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm'}), '(llm=llm)\n', (1628, 1637), False, 'from llama_index import VectorStoreIndex, GithubRepositoryReader, ServiceContext, set_global_service_context\n'), ((1646, 1689), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (1672, 1689), False, 'from llama_index import VectorStoreIndex, GithubRepositoryReader, ServiceContext, set_global_service_context\n'), ((1698, 1773), 'streamlit.markdown', 'st.markdown', (['"""βœ… API Key, Token and Repository link submitted successfully!"""'], {}), "('βœ… API Key, Token and Repository link submitted successfully!')\n", (1709, 1773), True, 'import streamlit as st\n'), ((1787, 1833), 'streamlit.spinner', 'st.spinner', (['"""Loading documents from GitHub..."""'], {}), "('Loading documents from GitHub...')\n", (1797, 1833), True, 'import streamlit as st\n'), ((3304, 3333), 'streamlit.success', 'st.success', (['"""Done!"""'], {'icon': '"""βœ…"""'}), "('Done!', icon='βœ…')\n", (3314, 3333), True, 'import streamlit as st\n'), ((3350, 3402), 'streamlit.markdown', 'st.markdown', (['"""Click Next to see Repository Analysis"""'], {}), "('Click Next to see Repository Analysis')\n", (3361, 3402), True, 'import streamlit as st\n'), ((3419, 3453), 'streamlit_extras.switch_page_button.switch_page', 'switch_page', (['"""repository analysis"""'], {}), "('repository analysis')\n", (3430, 3453), False, 'from streamlit_extras.switch_page_button import switch_page\n'), ((2465, 2506), 'database.neo4j_connection.connect_to_db', 'connect_to_db', (['service_context', 'documents'], {}), '(service_context, documents)\n', (2478, 2506), False, 'from database.neo4j_connection import connect_to_db\n'), ((2086, 2210), 'llama_index.GithubRepositoryReader', 'GithubRepositoryReader', ([], {'github_token': "os.environ['GITHUB_TOKEN']", 'owner': 'owner', 'repo': 'repo', 'use_parser': '(False)', 'verbose': '(False)'}), "(github_token=os.environ['GITHUB_TOKEN'], owner=owner,\n repo=repo, use_parser=False, verbose=False)\n", (2108, 2210), False, 'from llama_index import VectorStoreIndex, GithubRepositoryReader, ServiceContext, set_global_service_context\n')]
from llama_index.core import VectorStoreIndex,SimpleDirectoryReader,ServiceContext print("VectorStoreIndex,SimpleDirectoryReader,ServiceContext imported") from llama_index.llms.huggingface import HuggingFaceLLM print("HuggingFaceLLM imported") from llama_index.core.prompts.prompts import SimpleInputPrompt print("SimpleInputPrompt imported") from ctransformers import AutoModelForCausalLM print("AutoModelForCausalLM imported") from langchain.embeddings.huggingface import HuggingFaceEmbeddings print("HuggingFaceEmbeddings imported") from llama_index.core import ServiceContext print("ServiceContext imported") from llama_index.embeddings.langchain import LangchainEmbedding print("LangchainEmbedding imported") from langchain_community.document_loaders import PyPDFLoader print("PyPDFLoader imported") import json import torch import os from dotenv import load_dotenv load_dotenv() HuggingFace_Api = os.environ.get('HF_TOKEN') documents = SimpleDirectoryReader('./testing/docs').load_data() print("SimpleDirectoryReader imported") def get_system_prompt(): '''This function is used to load the system prompt from the prompts.json file''' with open('prompts.json') as f: data = json.load(f) return data['Default'] query_wrapper_prompt=SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>") def load_model(context_window: int, max_new_tokens: int): '''This function is used to load the model from the HuggingFaceLLM''' print(f"""Available Cuda: {torch.cuda.get_device_name()} \n Trying to load the model model""") try: llm = HuggingFaceLLM(context_window=context_window, max_new_tokens=max_new_tokens, generate_kwargs={"temperature": 0.0, "do_sample": False}, system_prompt=get_system_prompt(), query_wrapper_prompt=query_wrapper_prompt, tokenizer_name="./meta", model_name="./meta", device_map="cuda", # uncomment this if using CUDA to reduce memory usage model_kwargs={"torch_dtype": torch.float16,"load_in_8bit":True } ) print("Model Loaded") return llm except Exception as e: print(f"Error: {e}") return None def embed_model(): '''This function is used to load the model from the LangchainEmbedding''' embed = LangchainEmbedding( HuggingFaceEmbeddings(model_name="sentence-transformers/all-mpnet-base-v2")) service_context=ServiceContext.from_defaults( chunk_size=1024, llm=load_model(context_window=4096, max_new_tokens=256), embed_model=embed ) return service_context def get_index(): '''This function is used to load the index from the VectorStoreIndex''' index=VectorStoreIndex.from_documents(documents,service_context=embed_model()) return index def main(): query_engine=get_index().as_query_engine() response=query_engine.query("what is this PDF tells about?") out = response print(response) if __name__ == "__main__": main()
[ "llama_index.core.prompts.prompts.SimpleInputPrompt", "llama_index.core.SimpleDirectoryReader" ]
[((872, 885), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (883, 885), False, 'from dotenv import load_dotenv\n'), ((905, 931), 'os.environ.get', 'os.environ.get', (['"""HF_TOKEN"""'], {}), "('HF_TOKEN')\n", (919, 931), False, 'import os\n'), ((1262, 1315), 'llama_index.core.prompts.prompts.SimpleInputPrompt', 'SimpleInputPrompt', (['"""<|USER|>{query_str}<|ASSISTANT|>"""'], {}), "('<|USER|>{query_str}<|ASSISTANT|>')\n", (1279, 1315), False, 'from llama_index.core.prompts.prompts import SimpleInputPrompt\n'), ((945, 984), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./testing/docs"""'], {}), "('./testing/docs')\n", (966, 984), False, 'from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, ServiceContext\n'), ((1200, 1212), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1209, 1212), False, 'import json\n'), ((2523, 2598), 'langchain.embeddings.huggingface.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""sentence-transformers/all-mpnet-base-v2"""'}), "(model_name='sentence-transformers/all-mpnet-base-v2')\n", (2544, 2598), False, 'from langchain.embeddings.huggingface import HuggingFaceEmbeddings\n'), ((1481, 1509), 'torch.cuda.get_device_name', 'torch.cuda.get_device_name', ([], {}), '()\n', (1507, 1509), False, 'import torch\n')]
import logging from typing import Any, List, Optional from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.core.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, Embedding, ) logger = logging.getLogger(__name__) # For bge models that Gradient AI provides, it is suggested to add the instruction for retrieval. # Reference: https://huggingface.co/BAAI/bge-large-en-v1.5#model-list QUERY_INSTRUCTION_FOR_RETRIEVAL = ( "Represent this sentence for searching relevant passages:" ) GRADIENT_EMBED_BATCH_SIZE: int = 32_768 class GradientEmbedding(BaseEmbedding): """GradientAI embedding models. This class provides an interface to generate embeddings using a model deployed in Gradient AI. At the initialization it requires a model_id of the model deployed in the cluster. Note: Requires `gradientai` package to be available in the PYTHONPATH. It can be installed with `pip install gradientai`. """ embed_batch_size: int = Field(default=GRADIENT_EMBED_BATCH_SIZE, gt=0) _gradient: Any = PrivateAttr() _model: Any = PrivateAttr() @classmethod def class_name(cls) -> str: return "GradientEmbedding" def __init__( self, *, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, gradient_model_slug: str, gradient_access_token: Optional[str] = None, gradient_workspace_id: Optional[str] = None, gradient_host: Optional[str] = None, **kwargs: Any, ): """Initializes the GradientEmbedding class. During the initialization the `gradientai` package is imported. Using the access token, workspace id and the slug of the model, the model is fetched from Gradient AI and prepared to use. Args: embed_batch_size (int, optional): The batch size for embedding generation. Defaults to 10, must be > 0 and <= 100. gradient_model_slug (str): The model slug of the model in the Gradient AI account. gradient_access_token (str, optional): The access token of the Gradient AI account, if `None` read from the environment variable `GRADIENT_ACCESS_TOKEN`. gradient_workspace_id (str, optional): The workspace ID of the Gradient AI account, if `None` read from the environment variable `GRADIENT_WORKSPACE_ID`. gradient_host (str, optional): The host of the Gradient AI API. Defaults to None, which means the default host is used. Raises: ImportError: If the `gradientai` package is not available in the PYTHONPATH. ValueError: If the model cannot be fetched from Gradient AI. """ if embed_batch_size <= 0: raise ValueError(f"Embed batch size {embed_batch_size} must be > 0.") try: import gradientai except ImportError: raise ImportError("GradientEmbedding requires `pip install gradientai`.") self._gradient = gradientai.Gradient( access_token=gradient_access_token, workspace_id=gradient_workspace_id, host=gradient_host, ) try: self._model = self._gradient.get_embeddings_model(slug=gradient_model_slug) except gradientai.openapi.client.exceptions.UnauthorizedException as e: logger.error(f"Error while loading model {gradient_model_slug}.") self._gradient.close() raise ValueError("Unable to fetch the requested embeddings model") from e super().__init__( embed_batch_size=embed_batch_size, model_name=gradient_model_slug, **kwargs ) async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]: """ Embed the input sequence of text asynchronously. """ inputs = [{"input": text} for text in texts] result = await self._model.aembed(inputs=inputs).embeddings return [e.embedding for e in result] def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]: """ Embed the input sequence of text. """ inputs = [{"input": text} for text in texts] result = self._model.embed(inputs=inputs).embeddings return [e.embedding for e in result] def _get_text_embedding(self, text: str) -> Embedding: """Alias for _get_text_embeddings() with single text input.""" return self._get_text_embeddings([text])[0] async def _aget_text_embedding(self, text: str) -> Embedding: """Alias for _aget_text_embeddings() with single text input.""" embedding = await self._aget_text_embeddings([text]) return embedding[0] async def _aget_query_embedding(self, query: str) -> Embedding: embedding = await self._aget_text_embeddings( [f"{QUERY_INSTRUCTION_FOR_RETRIEVAL} {query}"] ) return embedding[0] def _get_query_embedding(self, query: str) -> Embedding: return self._get_text_embeddings( [f"{QUERY_INSTRUCTION_FOR_RETRIEVAL} {query}"] )[0]
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field" ]
[((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((1040, 1086), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'GRADIENT_EMBED_BATCH_SIZE', 'gt': '(0)'}), '(default=GRADIENT_EMBED_BATCH_SIZE, gt=0)\n', (1045, 1086), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1109, 1122), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1120, 1122), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1141, 1154), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1152, 1154), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3068, 3184), 'gradientai.Gradient', 'gradientai.Gradient', ([], {'access_token': 'gradient_access_token', 'workspace_id': 'gradient_workspace_id', 'host': 'gradient_host'}), '(access_token=gradient_access_token, workspace_id=\n gradient_workspace_id, host=gradient_host)\n', (3087, 3184), False, 'import gradientai\n')]
"""Answer inserter.""" from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.core.llms.llm import LLM from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate from llama_index.core.prompts.mixin import ( PromptDictType, PromptMixin, PromptMixinType, ) from llama_index.core.query_engine.flare.schema import QueryTask from llama_index.core.service_context import ServiceContext from llama_index.core.settings import Settings, llm_from_settings_or_context class BaseLookaheadAnswerInserter(PromptMixin): """Lookahead answer inserter. These are responsible for insert answers into a lookahead answer template. E.g. lookahead answer: Red is for [Search(What is the meaning of Ghana's flag being red?)], green for forests, and gold for mineral wealth. query: What is the meaning of Ghana's flag being red? query answer: "the blood of those who died in the country's struggle for independence" final answer: Red is for the blood of those who died in the country's struggle for independence, green for forests, and gold for mineral wealth. """ def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} @abstractmethod def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" DEFAULT_ANSWER_INSERT_PROMPT_TMPL = """ An existing 'lookahead response' is given below. The lookahead response contains `[Search(query)]` tags. Some queries have been executed and the response retrieved. The queries and answers are also given below. Also the previous response (the response before the lookahead response) is given below. Given the lookahead template, previous response, and also queries and answers, please 'fill in' the lookahead template with the appropriate answers. NOTE: Please make sure that the final response grammatically follows the previous response + lookahead template. For example, if the previous response is "New York City has a population of " and the lookahead template is "[Search(What is the population of New York City?)]", then the final response should be "8.4 million". NOTE: the lookahead template may not be a complete sentence and may contain trailing/leading commas, etc. Please preserve the original formatting of the lookahead template if possible. NOTE: NOTE: the exception to the above rule is if the answer to a query is equivalent to "I don't know" or "I don't have an answer". In this case, modify the lookahead template to indicate that the answer is not known. NOTE: the lookahead template may contain multiple `[Search(query)]` tags and only a subset of these queries have been executed. Do not replace the `[Search(query)]` tags that have not been executed. Previous Response: Lookahead Template: Red is for [Search(What is the meaning of Ghana's \ flag being red?)], green for forests, and gold for mineral wealth. Query-Answer Pairs: Query: What is the meaning of Ghana's flag being red? Answer: The red represents the blood of those who died in the country's struggle \ for independence Filled in Answers: Red is for the blood of those who died in the country's struggle for independence, \ green for forests, and gold for mineral wealth. Previous Response: One of the largest cities in the world Lookahead Template: , the city contains a population of [Search(What is the population \ of New York City?)] Query-Answer Pairs: Query: What is the population of New York City? Answer: The population of New York City is 8.4 million Synthesized Response: , the city contains a population of 8.4 million Previous Response: the city contains a population of Lookahead Template: [Search(What is the population of New York City?)] Query-Answer Pairs: Query: What is the population of New York City? Answer: The population of New York City is 8.4 million Synthesized Response: 8.4 million Previous Response: {prev_response} Lookahead Template: {lookahead_response} Query-Answer Pairs: {query_answer_pairs} Synthesized Response: """ DEFAULT_ANSWER_INSERT_PROMPT = PromptTemplate(DEFAULT_ANSWER_INSERT_PROMPT_TMPL) class LLMLookaheadAnswerInserter(BaseLookaheadAnswerInserter): """LLM Lookahead answer inserter. Takes in a lookahead response and a list of query tasks, and the lookahead answers, and inserts the answers into the lookahead response. """ def __init__( self, llm: Optional[LLM] = None, service_context: Optional[ServiceContext] = None, answer_insert_prompt: Optional[BasePromptTemplate] = None, ) -> None: """Init params.""" self._llm = llm or llm_from_settings_or_context(Settings, service_context) self._answer_insert_prompt = ( answer_insert_prompt or DEFAULT_ANSWER_INSERT_PROMPT ) def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return { "answer_insert_prompt": self._answer_insert_prompt, } def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "answer_insert_prompt" in prompts: self._answer_insert_prompt = prompts["answer_insert_prompt"] def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" prev_response = prev_response or "" query_answer_pairs = "" for query_task, answer in zip(query_tasks, answers): query_answer_pairs += f"Query: {query_task.query_str}\nAnswer: {answer}\n" return self._llm.predict( self._answer_insert_prompt, lookahead_response=response, query_answer_pairs=query_answer_pairs, prev_response=prev_response, ) class DirectLookaheadAnswerInserter(BaseLookaheadAnswerInserter): """Direct lookahead answer inserter. Simple inserter module that directly inserts answers into the [Search(query)] tags in the lookahead response. """ def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" for query_task, answer in zip(query_tasks, answers): response = ( response[: query_task.start_idx] + answer + response[query_task.end_idx + 1 :] ) return response
[ "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.prompts.base.PromptTemplate" ]
[((4287, 4336), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_ANSWER_INSERT_PROMPT_TMPL'], {}), '(DEFAULT_ANSWER_INSERT_PROMPT_TMPL)\n', (4301, 4336), False, 'from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((4860, 4915), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4888, 4915), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n')]
"""Retrieval evaluators.""" from typing import Any, List, Optional, Sequence, Tuple from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.evaluation.retrieval.base import ( BaseRetrievalEvaluator, RetrievalEvalMode, ) from llama_index.legacy.evaluation.retrieval.metrics_base import ( BaseRetrievalMetric, ) from llama_index.legacy.indices.base_retriever import BaseRetriever from llama_index.legacy.postprocessor.types import BaseNodePostprocessor from llama_index.legacy.schema import ImageNode, TextNode class RetrieverEvaluator(BaseRetrievalEvaluator): """Retriever evaluator. This module will evaluate a retriever using a set of metrics. Args: metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate retriever: Retriever to evaluate. node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval. """ retriever: BaseRetriever = Field(..., description="Retriever to evaluate") node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field( default=None, description="Optional post-processor" ) def __init__( self, metrics: Sequence[BaseRetrievalMetric], retriever: BaseRetriever, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, **kwargs: Any, ) -> None: """Init params.""" super().__init__( metrics=metrics, retriever=retriever, node_postprocessors=node_postprocessors, **kwargs, ) async def _aget_retrieved_ids_and_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids and texts, potentially applying a post-processor.""" retrieved_nodes = await self.retriever.aretrieve(query) if self.node_postprocessors: for node_postprocessor in self.node_postprocessors: retrieved_nodes = node_postprocessor.postprocess_nodes( retrieved_nodes, query_str=query ) return ( [node.node.node_id for node in retrieved_nodes], [node.node.text for node in retrieved_nodes], ) class MultiModalRetrieverEvaluator(BaseRetrievalEvaluator): """Retriever evaluator. This module will evaluate a retriever using a set of metrics. Args: metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate retriever: Retriever to evaluate. node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval. """ retriever: BaseRetriever = Field(..., description="Retriever to evaluate") node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field( default=None, description="Optional post-processor" ) def __init__( self, metrics: Sequence[BaseRetrievalMetric], retriever: BaseRetriever, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, **kwargs: Any, ) -> None: """Init params.""" super().__init__( metrics=metrics, retriever=retriever, node_postprocessors=node_postprocessors, **kwargs, ) async def _aget_retrieved_ids_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids.""" retrieved_nodes = await self.retriever.aretrieve(query) image_nodes: List[ImageNode] = [] text_nodes: List[TextNode] = [] if self.node_postprocessors: for node_postprocessor in self.node_postprocessors: retrieved_nodes = node_postprocessor.postprocess_nodes( retrieved_nodes, query_str=query ) for scored_node in retrieved_nodes: node = scored_node.node if isinstance(node, ImageNode): image_nodes.append(node) if node.text: text_nodes.append(node) if mode == "text": return ( [node.node_id for node in text_nodes], [node.text for node in text_nodes], ) elif mode == "image": return ( [node.node_id for node in image_nodes], [node.text for node in image_nodes], ) else: raise ValueError("Unsupported mode.")
[ "llama_index.legacy.bridge.pydantic.Field" ]
[((1038, 1085), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (1043, 1085), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1151, 1209), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (1156, 1209), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2787, 2834), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (2792, 2834), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2900, 2958), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (2905, 2958), False, 'from llama_index.legacy.bridge.pydantic import Field\n')]
from typing import Any, List, Optional from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, ) from llama_index.legacy.embeddings.huggingface_utils import ( format_query, format_text, get_pooling_mode, ) from llama_index.legacy.embeddings.pooling import Pooling from llama_index.legacy.utils import infer_torch_device class OptimumEmbedding(BaseEmbedding): folder_name: str = Field(description="Folder name to load from.") max_length: int = Field(description="Maximum length of input.") pooling: str = Field(description="Pooling strategy. One of ['cls', 'mean'].") normalize: str = Field(default=True, description="Normalize embeddings or not.") query_instruction: Optional[str] = Field( description="Instruction to prepend to query text." ) text_instruction: Optional[str] = Field( description="Instruction to prepend to text." ) cache_folder: Optional[str] = Field( description="Cache folder for huggingface files." ) _model: Any = PrivateAttr() _tokenizer: Any = PrivateAttr() _device: Any = PrivateAttr() def __init__( self, folder_name: str, pooling: Optional[str] = None, max_length: Optional[int] = None, normalize: bool = True, query_instruction: Optional[str] = None, text_instruction: Optional[str] = None, model: Optional[Any] = None, tokenizer: Optional[Any] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, callback_manager: Optional[CallbackManager] = None, device: Optional[str] = None, ): try: from optimum.onnxruntime import ORTModelForFeatureExtraction from transformers import AutoTokenizer except ImportError: raise ImportError( "OptimumEmbedding requires transformers to be installed.\n" "Please install transformers with " "`pip install transformers optimum[exporters]`." ) self._model = model or ORTModelForFeatureExtraction.from_pretrained(folder_name) self._tokenizer = tokenizer or AutoTokenizer.from_pretrained(folder_name) self._device = device or infer_torch_device() if max_length is None: try: max_length = int(self._model.config.max_position_embeddings) except Exception: raise ValueError( "Unable to find max_length from model config. " "Please provide max_length." ) if not pooling: pooling = get_pooling_mode(model) try: pooling = Pooling(pooling) except ValueError as exc: raise NotImplementedError( f"Pooling {pooling} unsupported, please pick one in" f" {[p.value for p in Pooling]}." ) from exc super().__init__( embed_batch_size=embed_batch_size, callback_manager=callback_manager, folder_name=folder_name, max_length=max_length, pooling=pooling, normalize=normalize, query_instruction=query_instruction, text_instruction=text_instruction, ) @classmethod def class_name(cls) -> str: return "OptimumEmbedding" @classmethod def create_and_save_optimum_model( cls, model_name_or_path: str, output_path: str, export_kwargs: Optional[dict] = None, ) -> None: try: from optimum.onnxruntime import ORTModelForFeatureExtraction from transformers import AutoTokenizer except ImportError: raise ImportError( "OptimumEmbedding requires transformers to be installed.\n" "Please install transformers with " "`pip install transformers optimum[exporters]`." ) export_kwargs = export_kwargs or {} model = ORTModelForFeatureExtraction.from_pretrained( model_name_or_path, export=True, **export_kwargs ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) model.save_pretrained(output_path) tokenizer.save_pretrained(output_path) print( f"Saved optimum model to {output_path}. Use it with " f"`embed_model = OptimumEmbedding(folder_name='{output_path}')`." ) def _mean_pooling(self, model_output: Any, attention_mask: Any) -> Any: """Mean Pooling - Take attention mask into account for correct averaging.""" import torch # First element of model_output contains all token embeddings token_embeddings = model_output[0] input_mask_expanded = ( attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() ) return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( input_mask_expanded.sum(1), min=1e-9 ) def _cls_pooling(self, model_output: list) -> Any: """Use the CLS token as the pooling token.""" return model_output[0][:, 0] def _embed(self, sentences: List[str]) -> List[List[float]]: """Embed sentences.""" encoded_input = self._tokenizer( sentences, padding=True, max_length=self.max_length, truncation=True, return_tensors="pt", ) # pop token_type_ids encoded_input.pop("token_type_ids", None) model_output = self._model(**encoded_input) if self.pooling == "cls": embeddings = self._cls_pooling(model_output) else: embeddings = self._mean_pooling( model_output, encoded_input["attention_mask"].to(self._device) ) if self.normalize: import torch embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings.tolist() def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" query = format_query(query, self.model_name, self.query_instruction) return self._embed([query])[0] async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding async.""" return self._get_query_embedding(query) async def _aget_text_embedding(self, text: str) -> List[float]: """Get text embedding async.""" return self._get_text_embedding(text) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" text = format_text(text, self.model_name, self.text_instruction) return self._embed([text])[0] def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Get text embeddings.""" texts = [ format_text(text, self.model_name, self.text_instruction) for text in texts ] return self._embed(texts)
[ "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.embeddings.huggingface_utils.format_text", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.embeddings.huggingface_utils.get_pooling_mode", "llama_index.legacy.embeddings.pooling.Pooling", "llama_index.legacy.embeddings.huggingface_utils.format_query", "llama_index.legacy.utils.infer_torch_device" ]
[((567, 613), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Folder name to load from."""'}), "(description='Folder name to load from.')\n", (572, 613), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((636, 681), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Maximum length of input."""'}), "(description='Maximum length of input.')\n", (641, 681), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((701, 763), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Pooling strategy. One of [\'cls\', \'mean\']."""'}), '(description="Pooling strategy. One of [\'cls\', \'mean\'].")\n', (706, 763), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((785, 848), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Normalize embeddings or not."""'}), "(default=True, description='Normalize embeddings or not.')\n", (790, 848), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((888, 946), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to query text."""'}), "(description='Instruction to prepend to query text.')\n", (893, 946), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((999, 1051), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to text."""'}), "(description='Instruction to prepend to text.')\n", (1004, 1051), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1100, 1156), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Cache folder for huggingface files."""'}), "(description='Cache folder for huggingface files.')\n", (1105, 1156), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1190, 1203), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1201, 1203), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1226, 1239), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1237, 1239), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1259, 1272), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1270, 1272), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4174, 4273), 'optimum.onnxruntime.ORTModelForFeatureExtraction.from_pretrained', 'ORTModelForFeatureExtraction.from_pretrained', (['model_name_or_path'], {'export': '(True)'}), '(model_name_or_path, export=\n True, **export_kwargs)\n', (4218, 4273), False, 'from optimum.onnxruntime import ORTModelForFeatureExtraction\n'), ((4311, 4360), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name_or_path'], {}), '(model_name_or_path)\n', (4340, 4360), False, 'from transformers import AutoTokenizer\n'), ((6290, 6350), 'llama_index.legacy.embeddings.huggingface_utils.format_query', 'format_query', (['query', 'self.model_name', 'self.query_instruction'], {}), '(query, self.model_name, self.query_instruction)\n', (6302, 6350), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((6816, 6873), 'llama_index.legacy.embeddings.huggingface_utils.format_text', 'format_text', (['text', 'self.model_name', 'self.text_instruction'], {}), '(text, self.model_name, self.text_instruction)\n', (6827, 6873), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((2218, 2275), 'optimum.onnxruntime.ORTModelForFeatureExtraction.from_pretrained', 'ORTModelForFeatureExtraction.from_pretrained', (['folder_name'], {}), '(folder_name)\n', (2262, 2275), False, 'from optimum.onnxruntime import ORTModelForFeatureExtraction\n'), ((2315, 2357), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['folder_name'], {}), '(folder_name)\n', (2344, 2357), False, 'from transformers import AutoTokenizer\n'), ((2391, 2411), 'llama_index.legacy.utils.infer_torch_device', 'infer_torch_device', ([], {}), '()\n', (2409, 2411), False, 'from llama_index.legacy.utils import infer_torch_device\n'), ((2784, 2807), 'llama_index.legacy.embeddings.huggingface_utils.get_pooling_mode', 'get_pooling_mode', (['model'], {}), '(model)\n', (2800, 2807), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((2843, 2859), 'llama_index.legacy.embeddings.pooling.Pooling', 'Pooling', (['pooling'], {}), '(pooling)\n', (2850, 2859), False, 'from llama_index.legacy.embeddings.pooling import Pooling\n'), ((5056, 5108), 'torch.sum', 'torch.sum', (['(token_embeddings * input_mask_expanded)', '(1)'], {}), '(token_embeddings * input_mask_expanded, 1)\n', (5065, 5108), False, 'import torch\n'), ((6085, 6138), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['embeddings'], {'p': '(2)', 'dim': '(1)'}), '(embeddings, p=2, dim=1)\n', (6114, 6138), False, 'import torch\n'), ((7053, 7110), 'llama_index.legacy.embeddings.huggingface_utils.format_text', 'format_text', (['text', 'self.model_name', 'self.text_instruction'], {}), '(text, self.model_name, self.text_instruction)\n', (7064, 7110), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n')]
"""LLM Chains for executing Retrival Augmented Generation.""" import base64 import os from functools import lru_cache from pathlib import Path from typing import TYPE_CHECKING, Generator, List, Optional import torch from langchain.embeddings import HuggingFaceEmbeddings from langchain.llms import HuggingFaceTextGenInference from langchain.text_splitter import SentenceTransformersTokenTextSplitter from llama_index.embeddings import LangchainEmbedding from llama_index import ( Prompt, ServiceContext, VectorStoreIndex, download_loader, set_global_service_context, ) from llama_index.postprocessor.types import BaseNodePostprocessor from llama_index.llms import LangChainLLM from llama_index.node_parser import SimpleNodeParser from llama_index.query_engine import RetrieverQueryEngine from llama_index.response.schema import StreamingResponse, Response from llama_index.schema import MetadataMode from llama_index.utils import globals_helper, get_tokenizer from llama_index.vector_stores import MilvusVectorStore, SimpleVectorStore from chain_server import configuration if TYPE_CHECKING: from llama_index.indices.base_retriever import BaseRetriever from llama_index.indices.query.schema import QueryBundle from llama_index.schema import NodeWithScore from llama_index.types import TokenGen from chain_server.configuration_wizard import ConfigWizard TEXT_SPLITTER_MODEL = "intfloat/e5-large-v2" TEXT_SPLITTER_CHUNCK_SIZE = 510 TEXT_SPLITTER_CHUNCK_OVERLAP = 200 EMBEDDING_MODEL = "intfloat/e5-large-v2" DEFAULT_NUM_TOKENS = 50 DEFAULT_MAX_CONTEXT = 800 LLAMA_CHAT_TEMPLATE = ( "<s>[INST] <<SYS>>" "You are a helpful, respectful and honest assistant." "Always answer as helpfully as possible, while being safe." "Please ensure that your responses are positive in nature." "<</SYS>>" "[/INST] {context_str} </s><s>[INST] {query_str} [/INST]" ) LLAMA_RAG_TEMPLATE = ( "<s>[INST] <<SYS>>" "Use the following context to answer the user's question. If you don't know the answer," "just say that you don't know, don't try to make up an answer." "<</SYS>>" "<s>[INST] Context: {context_str} Question: {query_str} Only return the helpful" " answer below and nothing else. Helpful answer:[/INST]" ) class LimitRetrievedNodesLength(BaseNodePostprocessor): """Llama Index chain filter to limit token lengths.""" def _postprocess_nodes( self, nodes: List["NodeWithScore"] = [], query_bundle: Optional["QueryBundle"] = None ) -> List["NodeWithScore"]: """Filter function.""" included_nodes = [] current_length = 0 limit = DEFAULT_MAX_CONTEXT tokenizer = get_tokenizer() for node in nodes: current_length += len( tokenizer( node.get_content(metadata_mode=MetadataMode.LLM) ) ) if current_length > limit: break included_nodes.append(node) return included_nodes @lru_cache def get_config() -> "ConfigWizard": """Parse the application configuration.""" config_file = os.environ.get("APP_CONFIG_FILE", "/dev/null") config = configuration.AppConfig.from_file(config_file) if config: return config raise RuntimeError("Unable to find configuration.") @lru_cache def get_llm() -> LangChainLLM: """Create the LLM connection.""" inference_server_url_local = "http://127.0.0.1:9090/" llm_local = HuggingFaceTextGenInference( inference_server_url=inference_server_url_local, max_new_tokens=100, top_k=10, top_p=0.95, typical_p=0.95, temperature=0.7, repetition_penalty=1.03, streaming=True ) return LangChainLLM(llm=llm_local) @lru_cache def get_embedding_model() -> LangchainEmbedding: """Create the embedding model.""" model_kwargs = {"device": "cpu"} device_str = os.environ.get('EMBEDDING_DEVICE', "cuda:1") if torch.cuda.is_available(): model_kwargs["device"] = device_str encode_kwargs = {"normalize_embeddings": False} hf_embeddings = HuggingFaceEmbeddings( model_name=EMBEDDING_MODEL, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, ) # Load in a specific embedding model return LangchainEmbedding(hf_embeddings) @lru_cache def get_vector_index() -> VectorStoreIndex: """Create the vector db index.""" config = get_config() vector_store = MilvusVectorStore(uri=config.milvus, dim=1024, overwrite=False) #vector_store = SimpleVectorStore() return VectorStoreIndex.from_vector_store(vector_store) @lru_cache def get_doc_retriever(num_nodes: int = 4) -> "BaseRetriever": """Create the document retriever.""" index = get_vector_index() return index.as_retriever(similarity_top_k=num_nodes) @lru_cache def set_service_context() -> None: """Set the global service context.""" service_context = ServiceContext.from_defaults( llm=get_llm(), embed_model=get_embedding_model() ) set_global_service_context(service_context) def llm_chain( context: str, question: str, num_tokens: int ) -> Generator[str, None, None]: """Execute a simple LLM chain using the components defined above.""" set_service_context() prompt = LLAMA_CHAT_TEMPLATE.format(context_str=context, query_str=question) response = get_llm().complete(prompt, max_new_tokens=num_tokens) for i in range(0, len(response.text), 20): yield response.text[i:i + 20] def llm_chain_streaming( context: str, question: str, num_tokens: int ) -> Generator[str, None, None]: """Execute a simple LLM chain using the components defined above.""" set_service_context() prompt = LLAMA_CHAT_TEMPLATE.format(context_str=context, query_str=question) response = get_llm().stream_complete(prompt, max_new_tokens=num_tokens) gen_response = (resp.delta for resp in response) return gen_response def rag_chain(prompt: str, num_tokens: int) -> "TokenGen": """Execute a Retrieval Augmented Generation chain using the components defined above.""" set_service_context() get_llm().llm.max_new_tokens = num_tokens # type: ignore retriever = get_doc_retriever(num_nodes=4) qa_template = Prompt(LLAMA_RAG_TEMPLATE) query_engine = RetrieverQueryEngine.from_args( retriever, text_qa_template=qa_template, node_postprocessors=[LimitRetrievedNodesLength()], streaming=False, ) response = query_engine.query(prompt) # Properly handle an empty response if isinstance(response, Response): for i in range(0, len(response.response), 20): yield response.response[i:i + 20] return Response([]).response # type: ignore def rag_chain_streaming(prompt: str, num_tokens: int) -> "TokenGen": """Execute a Retrieval Augmented Generation chain using the components defined above.""" set_service_context() get_llm().llm.max_new_tokens = num_tokens # type: ignore retriever = get_doc_retriever(num_nodes=4) qa_template = Prompt(LLAMA_RAG_TEMPLATE) query_engine = RetrieverQueryEngine.from_args( retriever, text_qa_template=qa_template, node_postprocessors=[LimitRetrievedNodesLength()], streaming=True, ) response = query_engine.query(prompt) # Properly handle an empty response if isinstance(response, StreamingResponse): return response.response_gen return StreamingResponse([]).response_gen # type: ignore def is_base64_encoded(s: str) -> bool: """Check if a string is base64 encoded.""" try: # Attempt to decode the string as base64 decoded_bytes = base64.b64decode(s) # Encode the decoded bytes back to a string to check if it's valid decoded_str = decoded_bytes.decode("utf-8") # If the original string and the decoded string match, it's base64 encoded return s == base64.b64encode(decoded_str.encode("utf-8")).decode("utf-8") except Exception: # pylint:disable = broad-exception-caught # An exception occurred during decoding, so it's not base64 encoded return False def ingest_docs(data_dir: str, filename: str) -> None: """Ingest documents to the VectorDB.""" unstruct_reader = download_loader("UnstructuredReader") loader = unstruct_reader() documents = loader.load_data(file=Path(data_dir), split_documents=False) encoded_filename = filename[:-4] if not is_base64_encoded(encoded_filename): encoded_filename = base64.b64encode(encoded_filename.encode("utf-8")).decode( "utf-8" ) for document in documents: document.metadata = {"filename": encoded_filename} index = get_vector_index() node_parser = SimpleNodeParser.from_defaults() nodes = node_parser.get_nodes_from_documents(documents) index.insert_nodes(nodes)
[ "llama_index.response.schema.Response", "llama_index.embeddings.LangchainEmbedding", "llama_index.VectorStoreIndex.from_vector_store", "llama_index.llms.LangChainLLM", "llama_index.vector_stores.MilvusVectorStore", "llama_index.Prompt", "llama_index.download_loader", "llama_index.node_parser.SimpleNodeParser.from_defaults", "llama_index.utils.get_tokenizer", "llama_index.response.schema.StreamingResponse", "llama_index.set_global_service_context" ]
[((3156, 3202), 'os.environ.get', 'os.environ.get', (['"""APP_CONFIG_FILE"""', '"""/dev/null"""'], {}), "('APP_CONFIG_FILE', '/dev/null')\n", (3170, 3202), False, 'import os\n'), ((3216, 3262), 'chain_server.configuration.AppConfig.from_file', 'configuration.AppConfig.from_file', (['config_file'], {}), '(config_file)\n', (3249, 3262), False, 'from chain_server import configuration\n'), ((3512, 3713), 'langchain.llms.HuggingFaceTextGenInference', 'HuggingFaceTextGenInference', ([], {'inference_server_url': 'inference_server_url_local', 'max_new_tokens': '(100)', 'top_k': '(10)', 'top_p': '(0.95)', 'typical_p': '(0.95)', 'temperature': '(0.7)', 'repetition_penalty': '(1.03)', 'streaming': '(True)'}), '(inference_server_url=inference_server_url_local,\n max_new_tokens=100, top_k=10, top_p=0.95, typical_p=0.95, temperature=\n 0.7, repetition_penalty=1.03, streaming=True)\n', (3539, 3713), False, 'from langchain.llms import HuggingFaceTextGenInference\n'), ((3787, 3814), 'llama_index.llms.LangChainLLM', 'LangChainLLM', ([], {'llm': 'llm_local'}), '(llm=llm_local)\n', (3799, 3814), False, 'from llama_index.llms import LangChainLLM\n'), ((3969, 4013), 'os.environ.get', 'os.environ.get', (['"""EMBEDDING_DEVICE"""', '"""cuda:1"""'], {}), "('EMBEDDING_DEVICE', 'cuda:1')\n", (3983, 4013), False, 'import os\n'), ((4021, 4046), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4044, 4046), False, 'import torch\n'), ((4165, 4274), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'EMBEDDING_MODEL', 'model_kwargs': 'model_kwargs', 'encode_kwargs': 'encode_kwargs'}), '(model_name=EMBEDDING_MODEL, model_kwargs=model_kwargs,\n encode_kwargs=encode_kwargs)\n', (4186, 4274), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((4355, 4388), 'llama_index.embeddings.LangchainEmbedding', 'LangchainEmbedding', (['hf_embeddings'], {}), '(hf_embeddings)\n', (4373, 4388), False, 'from llama_index.embeddings import LangchainEmbedding\n'), ((4529, 4592), 'llama_index.vector_stores.MilvusVectorStore', 'MilvusVectorStore', ([], {'uri': 'config.milvus', 'dim': '(1024)', 'overwrite': '(False)'}), '(uri=config.milvus, dim=1024, overwrite=False)\n', (4546, 4592), False, 'from llama_index.vector_stores import MilvusVectorStore, SimpleVectorStore\n'), ((4644, 4692), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {}), '(vector_store)\n', (4678, 4692), False, 'from llama_index import Prompt, ServiceContext, VectorStoreIndex, download_loader, set_global_service_context\n'), ((5107, 5150), 'llama_index.set_global_service_context', 'set_global_service_context', (['service_context'], {}), '(service_context)\n', (5133, 5150), False, 'from llama_index import Prompt, ServiceContext, VectorStoreIndex, download_loader, set_global_service_context\n'), ((6333, 6359), 'llama_index.Prompt', 'Prompt', (['LLAMA_RAG_TEMPLATE'], {}), '(LLAMA_RAG_TEMPLATE)\n', (6339, 6359), False, 'from llama_index import Prompt, ServiceContext, VectorStoreIndex, download_loader, set_global_service_context\n'), ((7146, 7172), 'llama_index.Prompt', 'Prompt', (['LLAMA_RAG_TEMPLATE'], {}), '(LLAMA_RAG_TEMPLATE)\n', (7152, 7172), False, 'from llama_index import Prompt, ServiceContext, VectorStoreIndex, download_loader, set_global_service_context\n'), ((8366, 8403), 'llama_index.download_loader', 'download_loader', (['"""UnstructuredReader"""'], {}), "('UnstructuredReader')\n", (8381, 8403), False, 'from llama_index import Prompt, ServiceContext, VectorStoreIndex, download_loader, set_global_service_context\n'), ((8856, 8888), 'llama_index.node_parser.SimpleNodeParser.from_defaults', 'SimpleNodeParser.from_defaults', ([], {}), '()\n', (8886, 8888), False, 'from llama_index.node_parser import SimpleNodeParser\n'), ((2703, 2718), 'llama_index.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (2716, 2718), False, 'from llama_index.utils import globals_helper, get_tokenizer\n'), ((6792, 6804), 'llama_index.response.schema.Response', 'Response', (['[]'], {}), '([])\n', (6800, 6804), False, 'from llama_index.response.schema import StreamingResponse, Response\n'), ((7549, 7570), 'llama_index.response.schema.StreamingResponse', 'StreamingResponse', (['[]'], {}), '([])\n', (7566, 7570), False, 'from llama_index.response.schema import StreamingResponse, Response\n'), ((7769, 7788), 'base64.b64decode', 'base64.b64decode', (['s'], {}), '(s)\n', (7785, 7788), False, 'import base64\n'), ((8473, 8487), 'pathlib.Path', 'Path', (['data_dir'], {}), '(data_dir)\n', (8477, 8487), False, 'from pathlib import Path\n')]
from llama_index import ServiceContext from llama_index import StorageContext, load_index_from_storage from omegaconf import DictConfig, OmegaConf import hydra from llama_index.evaluation import RetrieverEvaluator from llama_index.evaluation import ( EmbeddingQAFinetuneDataset, ) import pandas as pd @hydra.main(version_base=None, config_path="../../conf", config_name="config") def main(cfg: DictConfig): index_dir = cfg.retriever.evaluate.index_dir test_data_path = cfg.retriever.evaluate.test_data_path metrics = cfg.retriever.evaluate.metrics service_context = ServiceContext.from_defaults() # rebuild storage context storage_context = StorageContext.from_defaults(persist_dir=index_dir) # load index index = load_index_from_storage(storage_context, service_context=service_context) retriever = index.as_retriever() retriever_evaluator = RetrieverEvaluator.from_metric_names( metrics, retriever=retriever ) total_metrics = {m: 0.0 for m in metrics} qa_data = EmbeddingQAFinetuneDataset.from_json(test_data_path) metric_dicts = [] for qid, query in list(qa_data.queries.items())[:3]: relevant_doc_ids = qa_data.relevant_docs[qid] result = retriever_evaluator.evaluate( query=query, expected_ids=relevant_doc_ids ) metric_dicts.append(result.metric_vals_dict) full_df = pd.DataFrame(metric_dicts) for metric in metrics: metric_ave_val = full_df[metric].mean() print(f"{metric}: {metric_ave_val}") if __name__ == "__main__": main()
[ "llama_index.evaluation.EmbeddingQAFinetuneDataset.from_json", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage", "llama_index.evaluation.RetrieverEvaluator.from_metric_names" ]
[((308, 385), 'hydra.main', 'hydra.main', ([], {'version_base': 'None', 'config_path': '"""../../conf"""', 'config_name': '"""config"""'}), "(version_base=None, config_path='../../conf', config_name='config')\n", (318, 385), False, 'import hydra\n'), ((589, 619), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (617, 619), False, 'from llama_index import ServiceContext\n'), ((672, 723), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'index_dir'}), '(persist_dir=index_dir)\n', (700, 723), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((753, 826), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (776, 826), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((890, 956), 'llama_index.evaluation.RetrieverEvaluator.from_metric_names', 'RetrieverEvaluator.from_metric_names', (['metrics'], {'retriever': 'retriever'}), '(metrics, retriever=retriever)\n', (926, 956), False, 'from llama_index.evaluation import RetrieverEvaluator\n'), ((1031, 1083), 'llama_index.evaluation.EmbeddingQAFinetuneDataset.from_json', 'EmbeddingQAFinetuneDataset.from_json', (['test_data_path'], {}), '(test_data_path)\n', (1067, 1083), False, 'from llama_index.evaluation import EmbeddingQAFinetuneDataset\n'), ((1396, 1422), 'pandas.DataFrame', 'pd.DataFrame', (['metric_dicts'], {}), '(metric_dicts)\n', (1408, 1422), True, 'import pandas as pd\n')]
import os import time from typing import Any, Callable, List, Sequence from lib import constants from lib.index.helper import cur_simple_date_time_sec from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback from llama_index.core.base.llms.base import BaseLLM from llama_index.core.llms import CustomLLM from llama_index.core.llms import ChatMessage, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata class MultiLlmFallback(CustomLLM): llm_prio_list: List[BaseLLM] def __init__(self, llm_prio_list: List[BaseLLM]): super().__init__(llm_prio_list=llm_prio_list) def execute_and_fallback_on_error(self, task_func: Callable): exceptions = [] for chosen_index, llm in enumerate(self.llm_prio_list): start_time_ms = int(round(time.time() * 1000)) try: answer = task_func(llm) duration_sec = str((int(round(time.time() * 1000)) - start_time_ms) / 1000) message = f" -- MultiLlmFallback --- --- --- --- --- --- --- --- --- --- -- Successful answer from LLM on list-index {chosen_index} after {duration_sec} sec --- --- --- -- --- --- ---" message += f" ### Exceptions: {exceptions}" if exceptions else "" print(message) return answer except Exception as e: duration_sec = str((int(round(time.time() * 1000)) - start_time_ms) / 1000) print(f" XXX --- MultiLlmFallback --- Exception from LLM on list-index {chosen_index} --- XXX - {e} - XXX ---") exceptions.append(e) message = f" -- MultiLlmFallback --- --- --- --- --- --- --- --- --- --- -- Unsuccessful even with fallback --- --- --- -- --- --- ---" message += f" ### Exceptions: {exceptions}" if exceptions else "" raise Exception(message) @llm_chat_callback() def chat(self, messages, **kwargs): answer = self.execute_and_fallback_on_error(lambda worker: worker.chat(messages, **kwargs)) self.write_to_csv(messages, answer) return answer def write_to_csv(self, messages, answer): clz = self.class_name() filename = f"{constants.data_dir}/{constants.run_start_time_id}_{clz}_chat_log_fallback.csv" import pandas as pd ts = cur_simple_date_time_sec() df = pd.DataFrame({ "time_id": [ts for _ in messages], "role": [m.role for m in messages], "message": [m.content for m in messages], "answer": [answer for _ in messages], }) df.to_csv(filename, mode='a', header=not os.path.exists(filename), index=False) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: return self.execute_and_fallback_on_error(lambda worker: worker.stream_chat(messages, **kwargs)) @llm_completion_callback() def complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponse: return self.execute_and_fallback_on_error(lambda worker: worker.complete(prompt, formatted, **kwargs)) @llm_completion_callback() def stream_complete( self, prompt: str, formatted: bool = False, **kwargs: Any ) -> CompletionResponseGen: return self.execute_and_fallback_on_error(lambda worker: worker.stream_complete(prompt, formatted, **kwargs)) @classmethod def class_name(cls) -> str: return "MultiLlmFallback" @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return self.llm_prio_list[0].metadata
[ "llama_index.core.llms.callbacks.llm_completion_callback", "llama_index.core.llms.callbacks.llm_chat_callback" ]
[((1905, 1924), 'llama_index.core.llms.callbacks.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1922, 1924), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((2715, 2734), 'llama_index.core.llms.callbacks.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (2732, 2734), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((2958, 2983), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (2981, 2983), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((3218, 3243), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3241, 3243), False, 'from llama_index.core.llms.callbacks import llm_chat_callback, llm_completion_callback\n'), ((2352, 2378), 'lib.index.helper.cur_simple_date_time_sec', 'cur_simple_date_time_sec', ([], {}), '()\n', (2376, 2378), False, 'from lib.index.helper import cur_simple_date_time_sec\n'), ((2392, 2565), 'pandas.DataFrame', 'pd.DataFrame', (["{'time_id': [ts for _ in messages], 'role': [m.role for m in messages],\n 'message': [m.content for m in messages], 'answer': [answer for _ in\n messages]}"], {}), "({'time_id': [ts for _ in messages], 'role': [m.role for m in\n messages], 'message': [m.content for m in messages], 'answer': [answer for\n _ in messages]})\n", (2404, 2565), True, 'import pandas as pd\n'), ((2666, 2690), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (2680, 2690), False, 'import os\n'), ((824, 835), 'time.time', 'time.time', ([], {}), '()\n', (833, 835), False, 'import time\n'), ((948, 959), 'time.time', 'time.time', ([], {}), '()\n', (957, 959), False, 'import time\n'), ((1419, 1430), 'time.time', 'time.time', ([], {}), '()\n', (1428, 1430), False, 'import time\n')]
from llama_index import StorageContext, load_index_from_storage # rebuild storage context storage_context = StorageContext.from_defaults(persist_dir="./storage") # load index index = load_index_from_storage(storage_context)
[ "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage" ]
[((109, 162), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (137, 162), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((184, 224), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (207, 224), False, 'from llama_index import StorageContext, load_index_from_storage\n')]
"""Autoretriever prompts.""" from llama_index.legacy.prompts.base import PromptTemplate from llama_index.legacy.prompts.prompt_type import PromptType from llama_index.legacy.vector_stores.types import ( FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec, ) # NOTE: these prompts are inspired from langchain's self-query prompt, # and adapted to our use case. # https://github.com/hwchase17/langchain/tree/main/langchain/chains/query_constructor/prompt.py PREFIX = """\ Your goal is to structure the user's query to match the request schema provided below. << Structured Request Schema >> When responding use a markdown code snippet with a JSON object formatted in the \ following schema: {schema_str} The query string should contain only text that is expected to match the contents of \ documents. Any conditions in the filter should not be mentioned in the query as well. Make sure that filters only refer to attributes that exist in the data source. Make sure that filters take into account the descriptions of attributes. Make sure that filters are only used as needed. If there are no filters that should be \ applied return [] for the filter value.\ If the user's query explicitly mentions number of documents to retrieve, set top_k to \ that number, otherwise do not set top_k. """ example_info_1 = VectorStoreInfo( content_info="Lyrics of a song", metadata_info=[ MetadataInfo(name="artist", type="str", description="Name of the song artist"), MetadataInfo( name="genre", type="str", description='The song genre, one of "pop", "rock" or "rap"', ), ], ) example_query_1 = "What are songs by Taylor Swift or Katy Perry about teenage romance in the dance pop genre" example_output_1 = VectorStoreQuerySpec( query="what songs are about teenager love", filters=[ MetadataFilter(key="artist", value="Taylor Swift"), MetadataFilter(key="artist", value="Katy Perry"), MetadataFilter(key="genre", value="pop"), ], ) example_info_2 = VectorStoreInfo( content_info="Classic literature", metadata_info=[ MetadataInfo(name="author", type="str", description="Author name"), MetadataInfo( name="book_title", type="str", description="Book title", ), MetadataInfo( name="year", type="int", description="Year Published", ), MetadataInfo( name="pages", type="int", description="Number of pages", ), MetadataInfo( name="summary", type="str", description="A short summary of the book", ), ], ) example_query_2 = "What are some books by Jane Austen published after 1813 that explore the theme of marriage for social standing?" example_output_2 = VectorStoreQuerySpec( query="What books related to theme of marriage for social standing?", filters=[ MetadataFilter(key="year", value="1813", operator=FilterOperator.GT), MetadataFilter(key="author", value="Jane Austen"), ], ) EXAMPLES = f"""\ << Example 1. >> Data Source: ```json {example_info_1.json(indent=4)} ``` User Query: {example_query_1} Structured Request: ```json {example_output_1.json()} << Example 2. >> Data Source: ```json {example_info_2.json(indent=4)} ``` User Query: {example_query_2} Structured Request: ```json {example_output_2.json()} ``` """.replace( "{", "{{" ).replace( "}", "}}" ) SUFFIX = """ << Example 3. >> Data Source: ```json {info_str} ``` User Query: {query_str} Structured Request: """ DEFAULT_VECTARA_QUERY_PROMPT_TMPL = PREFIX + EXAMPLES + SUFFIX # deprecated, kept for backwards compatibility """Vector store query prompt.""" VectorStoreQueryPrompt = PromptTemplate DEFAULT_VECTARA_QUERY_PROMPT = PromptTemplate( template=DEFAULT_VECTARA_QUERY_PROMPT_TMPL, prompt_type=PromptType.VECTOR_STORE_QUERY, )
[ "llama_index.legacy.vector_stores.types.MetadataInfo", "llama_index.legacy.prompts.base.PromptTemplate", "llama_index.legacy.vector_stores.types.MetadataFilter" ]
[((3927, 4033), 'llama_index.legacy.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_VECTARA_QUERY_PROMPT_TMPL', 'prompt_type': 'PromptType.VECTOR_STORE_QUERY'}), '(template=DEFAULT_VECTARA_QUERY_PROMPT_TMPL, prompt_type=\n PromptType.VECTOR_STORE_QUERY)\n', (3941, 4033), False, 'from llama_index.legacy.prompts.base import PromptTemplate\n'), ((1451, 1529), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""artist"""', 'type': '"""str"""', 'description': '"""Name of the song artist"""'}), "(name='artist', type='str', description='Name of the song artist')\n", (1463, 1529), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1539, 1643), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""genre"""', 'type': '"""str"""', 'description': '"""The song genre, one of "pop", "rock" or "rap\\""""'}), '(name=\'genre\', type=\'str\', description=\n \'The song genre, one of "pop", "rock" or "rap"\')\n', (1551, 1643), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1919, 1969), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""artist"""', 'value': '"""Taylor Swift"""'}), "(key='artist', value='Taylor Swift')\n", (1933, 1969), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((1979, 2027), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""artist"""', 'value': '"""Katy Perry"""'}), "(key='artist', value='Katy Perry')\n", (1993, 2027), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2037, 2077), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""genre"""', 'value': '"""pop"""'}), "(key='genre', value='pop')\n", (2051, 2077), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2190, 2256), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""author"""', 'type': '"""str"""', 'description': '"""Author name"""'}), "(name='author', type='str', description='Author name')\n", (2202, 2256), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2266, 2335), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""book_title"""', 'type': '"""str"""', 'description': '"""Book title"""'}), "(name='book_title', type='str', description='Book title')\n", (2278, 2335), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2392, 2459), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""year"""', 'type': '"""int"""', 'description': '"""Year Published"""'}), "(name='year', type='int', description='Year Published')\n", (2404, 2459), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2516, 2585), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""pages"""', 'type': '"""int"""', 'description': '"""Number of pages"""'}), "(name='pages', type='int', description='Number of pages')\n", (2528, 2585), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((2642, 2730), 'llama_index.legacy.vector_stores.types.MetadataInfo', 'MetadataInfo', ([], {'name': '"""summary"""', 'type': '"""str"""', 'description': '"""A short summary of the book"""'}), "(name='summary', type='str', description=\n 'A short summary of the book')\n", (2654, 2730), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((3054, 3122), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""year"""', 'value': '"""1813"""', 'operator': 'FilterOperator.GT'}), "(key='year', value='1813', operator=FilterOperator.GT)\n", (3068, 3122), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n'), ((3132, 3181), 'llama_index.legacy.vector_stores.types.MetadataFilter', 'MetadataFilter', ([], {'key': '"""author"""', 'value': '"""Jane Austen"""'}), "(key='author', value='Jane Austen')\n", (3146, 3181), False, 'from llama_index.legacy.vector_stores.types import FilterOperator, MetadataFilter, MetadataInfo, VectorStoreInfo, VectorStoreQuerySpec\n')]
import asyncio from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence, Tuple, cast import pandas as pd from tqdm import tqdm from llama_index.core.async_utils import DEFAULT_NUM_WORKERS, run_jobs from llama_index.core.base.response.schema import PydanticResponse from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError from llama_index.core.callbacks.base import CallbackManager from llama_index.core.llms.llm import LLM from llama_index.core.node_parser.interface import NodeParser from llama_index.core.schema import BaseNode, Document, IndexNode, TextNode from llama_index.core.utils import get_tqdm_iterable DEFAULT_SUMMARY_QUERY_STR = """\ What is this table about? Give a very concise summary (imagine you are adding a new caption and summary for this table), \ and output the real/existing table title/caption if context provided.\ and output the real/existing table id if context provided.\ and also output whether or not the table should be kept.\ """ class TableColumnOutput(BaseModel): """Output from analyzing a table column.""" col_name: str col_type: str summary: Optional[str] = None def __str__(self) -> str: """Convert to string representation.""" return ( f"Column: {self.col_name}\nType: {self.col_type}\nSummary: {self.summary}" ) class TableOutput(BaseModel): """Output from analyzing a table.""" summary: str table_title: Optional[str] = None table_id: Optional[str] = None columns: List[TableColumnOutput] class Element(BaseModel): """Element object.""" id: str type: str element: Any title_level: Optional[int] = None table_output: Optional[TableOutput] = None table: Optional[pd.DataFrame] = None markdown: Optional[str] = None page_number: Optional[int] = None class Config: arbitrary_types_allowed = True class BaseElementNodeParser(NodeParser): """ Splits a document into Text Nodes and Index Nodes corresponding to embedded objects. Supports text and tables currently. """ callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True ) llm: Optional[LLM] = Field( default=None, description="LLM model to use for summarization." ) summary_query_str: str = Field( default=DEFAULT_SUMMARY_QUERY_STR, description="Query string to use for summarization.", ) num_workers: int = Field( default=DEFAULT_NUM_WORKERS, description="Num of workers for async jobs.", ) show_progress: bool = Field(default=True, description="Whether to show progress.") nested_node_parser: Optional[NodeParser] = Field( default=None, description="Other types of node parsers to handle some types of nodes.", ) @classmethod def class_name(cls) -> str: return "BaseElementNodeParser" @classmethod def from_defaults( cls, callback_manager: Optional[CallbackManager] = None, **kwargs: Any, ) -> "BaseElementNodeParser": callback_manager = callback_manager or CallbackManager([]) return cls( callback_manager=callback_manager, **kwargs, ) def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any, ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: nodes = self.get_nodes_from_node(node) all_nodes.extend(nodes) return all_nodes @abstractmethod def get_nodes_from_node(self, node: TextNode) -> List[BaseNode]: """Get nodes from node.""" @abstractmethod def extract_elements(self, text: str, **kwargs: Any) -> List[Element]: """Extract elements from text.""" def get_table_elements(self, elements: List[Element]) -> List[Element]: """Get table elements.""" return [e for e in elements if e.type == "table" or e.type == "table_text"] def get_text_elements(self, elements: List[Element]) -> List[Element]: """Get text elements.""" # TODO: There we should maybe do something with titles # and other elements in the future? return [e for e in elements if e.type != "table"] def extract_table_summaries(self, elements: List[Element]) -> None: """Go through elements, extract out summaries that are tables.""" from llama_index.core.indices.list.base import SummaryIndex from llama_index.core.service_context import ServiceContext if self.llm: llm = self.llm else: try: from llama_index.llms.openai import OpenAI # pants: no-infer-dep except ImportError as e: raise ImportError( "`llama-index-llms-openai` package not found." " Please install with `pip install llama-index-llms-openai`." ) llm = OpenAI() llm = cast(LLM, llm) service_context = ServiceContext.from_defaults(llm=llm, embed_model=None) table_context_list = [] for idx, element in tqdm(enumerate(elements)): if element.type not in ("table", "table_text"): continue table_context = str(element.element) if idx > 0 and str(elements[idx - 1].element).lower().strip().startswith( "table" ): table_context = str(elements[idx - 1].element) + "\n" + table_context if idx < len(elements) + 1 and str( elements[idx - 1].element ).lower().strip().startswith("table"): table_context += "\n" + str(elements[idx + 1].element) table_context_list.append(table_context) async def _get_table_output(table_context: str, summary_query_str: str) -> Any: index = SummaryIndex.from_documents( [Document(text=table_context)], service_context=service_context ) query_engine = index.as_query_engine(llm=llm, output_cls=TableOutput) try: response = await query_engine.aquery(summary_query_str) return cast(PydanticResponse, response).response except ValidationError: # There was a pydantic validation error, so we will run with text completion # fill in the summary and leave other fields blank query_engine = index.as_query_engine() response_txt = await query_engine.aquery(summary_query_str) return TableOutput(summary=str(response_txt), columns=[]) summary_jobs = [ _get_table_output(table_context, self.summary_query_str) for table_context in table_context_list ] summary_outputs = asyncio.run( run_jobs( summary_jobs, show_progress=self.show_progress, workers=self.num_workers ) ) for element, summary_output in zip(elements, summary_outputs): element.table_output = summary_output def get_base_nodes_and_mappings( self, nodes: List[BaseNode] ) -> Tuple[List[BaseNode], Dict]: """Get base nodes and mappings. Given a list of nodes and IndexNode objects, return the base nodes and a mapping from index id to child nodes (which are excluded from the base nodes). """ node_dict = {node.node_id: node for node in nodes} node_mappings = {} base_nodes = [] # first map index nodes to their child nodes nonbase_node_ids = set() for node in nodes: if isinstance(node, IndexNode): node_mappings[node.index_id] = node_dict[node.index_id] nonbase_node_ids.add(node.index_id) else: pass # then add all nodes that are not children of index nodes for node in nodes: if node.node_id not in nonbase_node_ids: base_nodes.append(node) return base_nodes, node_mappings def get_nodes_and_objects( self, nodes: List[BaseNode] ) -> Tuple[List[BaseNode], List[IndexNode]]: base_nodes, node_mappings = self.get_base_nodes_and_mappings(nodes) nodes = [] objects = [] for node in base_nodes: if isinstance(node, IndexNode): node.obj = node_mappings[node.index_id] objects.append(node) else: nodes.append(node) return nodes, objects def _get_nodes_from_buffer( self, buffer: List[str], node_parser: NodeParser ) -> List[BaseNode]: """Get nodes from buffer.""" doc = Document(text="\n\n".join(list(buffer))) return node_parser.get_nodes_from_documents([doc]) def get_nodes_from_elements( self, elements: List[Element], metadata_inherited: Optional[Dict[str, Any]] = None, ) -> List[BaseNode]: """Get nodes and mappings.""" from llama_index.core.node_parser import SentenceSplitter node_parser = self.nested_node_parser or SentenceSplitter() nodes = [] cur_text_el_buffer: List[str] = [] for element in elements: if element.type == "table" or element.type == "table_text": # flush text buffer for table if len(cur_text_el_buffer) > 0: cur_text_nodes = self._get_nodes_from_buffer( cur_text_el_buffer, node_parser ) nodes.extend(cur_text_nodes) cur_text_el_buffer = [] table_output = cast(TableOutput, element.table_output) table_md = "" if element.type == "table": table_df = cast(pd.DataFrame, element.table) # We serialize the table as markdown as it allow better accuracy # We do not use the table_df.to_markdown() method as it generate # a table with a token hungry format. table_md = "|" for col_name, col in table_df.items(): table_md += f"{col_name}|" table_md += "\n|" for col_name, col in table_df.items(): table_md += f"---|" table_md += "\n" for row in table_df.itertuples(): table_md += "|" for col in row[1:]: table_md += f"{col}|" table_md += "\n" elif element.type == "table_text": # if the table is non-perfect table, we still want to keep the original text of table table_md = str(element.element) table_id = element.id + "_table" table_ref_id = element.id + "_table_ref" col_schema = "\n\n".join([str(col) for col in table_output.columns]) # We build a summary of the table containing the extracted summary, and a description of the columns table_summary = str(table_output.summary) if table_output.table_title: table_summary += ",\nwith the following table title:\n" table_summary += str(table_output.table_title) table_summary += ",\nwith the following columns:\n" for col in table_output.columns: table_summary += f"- {col.col_name}: {col.summary}\n" index_node = IndexNode( text=table_summary, metadata={"col_schema": col_schema}, excluded_embed_metadata_keys=["col_schema"], id_=table_ref_id, index_id=table_id, ) table_str = table_summary + "\n" + table_md text_node = TextNode( text=table_str, id_=table_id, metadata={ # serialize the table as a dictionary string for dataframe of perfect table "table_df": ( str(table_df.to_dict()) if element.type == "table" else table_md ), # add table summary for retrieval purposes "table_summary": table_summary, }, excluded_embed_metadata_keys=["table_df", "table_summary"], excluded_llm_metadata_keys=["table_df", "table_summary"], ) nodes.extend([index_node, text_node]) else: cur_text_el_buffer.append(str(element.element)) # flush text buffer for the last batch if len(cur_text_el_buffer) > 0: cur_text_nodes = self._get_nodes_from_buffer( cur_text_el_buffer, node_parser ) nodes.extend(cur_text_nodes) cur_text_el_buffer = [] # remove empty nodes and keep node original metadata inherited from parent nodes for node in nodes: if metadata_inherited: node.metadata.update(metadata_inherited) return [node for node in nodes if len(node.text) > 0]
[ "llama_index.core.node_parser.SentenceSplitter", "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.bridge.pydantic.Field", "llama_index.core.service_context.ServiceContext.from_defaults", "llama_index.core.async_utils.run_jobs", "llama_index.core.schema.Document", "llama_index.core.schema.IndexNode", "llama_index.core.utils.get_tqdm_iterable", "llama_index.llms.openai.OpenAI" ]
[((2154, 2206), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (2159, 2206), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError\n'), ((2246, 2316), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""LLM model to use for summarization."""'}), "(default=None, description='LLM model to use for summarization.')\n", (2251, 2316), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError\n'), ((2360, 2459), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_SUMMARY_QUERY_STR', 'description': '"""Query string to use for summarization."""'}), "(default=DEFAULT_SUMMARY_QUERY_STR, description=\n 'Query string to use for summarization.')\n", (2365, 2459), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError\n'), ((2501, 2586), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_WORKERS', 'description': '"""Num of workers for async jobs."""'}), "(default=DEFAULT_NUM_WORKERS, description='Num of workers for async jobs.'\n )\n", (2506, 2586), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError\n'), ((2632, 2692), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Whether to show progress."""'}), "(default=True, description='Whether to show progress.')\n", (2637, 2692), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError\n'), ((2741, 2839), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Other types of node parsers to handle some types of nodes."""'}), "(default=None, description=\n 'Other types of node parsers to handle some types of nodes.')\n", (2746, 2839), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, ValidationError\n'), ((3511, 3567), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (3528, 3567), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((5180, 5194), 'typing.cast', 'cast', (['LLM', 'llm'], {}), '(LLM, llm)\n', (5184, 5194), False, 'from typing import Any, Dict, List, Optional, Sequence, Tuple, cast\n'), ((5222, 5277), 'llama_index.core.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'None'}), '(llm=llm, embed_model=None)\n', (5250, 5277), False, 'from llama_index.core.service_context import ServiceContext\n'), ((3165, 3184), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (3180, 3184), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((5157, 5165), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {}), '()\n', (5163, 5165), False, 'from llama_index.llms.openai import OpenAI\n'), ((7054, 7141), 'llama_index.core.async_utils.run_jobs', 'run_jobs', (['summary_jobs'], {'show_progress': 'self.show_progress', 'workers': 'self.num_workers'}), '(summary_jobs, show_progress=self.show_progress, workers=self.\n num_workers)\n', (7062, 7141), False, 'from llama_index.core.async_utils import DEFAULT_NUM_WORKERS, run_jobs\n'), ((9367, 9385), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (9383, 9385), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((9917, 9956), 'typing.cast', 'cast', (['TableOutput', 'element.table_output'], {}), '(TableOutput, element.table_output)\n', (9921, 9956), False, 'from typing import Any, Dict, List, Optional, Sequence, Tuple, cast\n'), ((11864, 12021), 'llama_index.core.schema.IndexNode', 'IndexNode', ([], {'text': 'table_summary', 'metadata': "{'col_schema': col_schema}", 'excluded_embed_metadata_keys': "['col_schema']", 'id_': 'table_ref_id', 'index_id': 'table_id'}), "(text=table_summary, metadata={'col_schema': col_schema},\n excluded_embed_metadata_keys=['col_schema'], id_=table_ref_id, index_id\n =table_id)\n", (11873, 12021), False, 'from llama_index.core.schema import BaseNode, Document, IndexNode, TextNode\n'), ((6132, 6160), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'table_context'}), '(text=table_context)\n', (6140, 6160), False, 'from llama_index.core.schema import BaseNode, Document, IndexNode, TextNode\n'), ((6403, 6435), 'typing.cast', 'cast', (['PydanticResponse', 'response'], {}), '(PydanticResponse, response)\n', (6407, 6435), False, 'from typing import Any, Dict, List, Optional, Sequence, Tuple, cast\n'), ((10062, 10095), 'typing.cast', 'cast', (['pd.DataFrame', 'element.table'], {}), '(pd.DataFrame, element.table)\n', (10066, 10095), False, 'from typing import Any, Dict, List, Optional, Sequence, Tuple, cast\n')]
# SPDX-FileCopyrightText: Copyright (c) 2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved. # SPDX-License-Identifier: MIT # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. import os from typing import Any, Callable, Dict, Optional, Sequence from llama_index.bridge.pydantic import Field, PrivateAttr from llama_index.callbacks import CallbackManager from llama_index.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS from llama_index.llms.base import ( ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback, ) from llama_index.llms.custom import CustomLLM from llama_index.llms.generic_utils import completion_response_to_chat_response from llama_index.llms.generic_utils import ( messages_to_prompt as generic_messages_to_prompt, ) from transformers import LlamaTokenizer import gc import json import torch import numpy as np from tensorrt_llm.runtime import ModelConfig, SamplingConfig import tensorrt_llm from pathlib import Path import uuid import time EOS_TOKEN = 2 PAD_TOKEN = 2 class TrtLlmAPI(CustomLLM): model_path: Optional[str] = Field( description="The path to the trt engine." ) temperature: float = Field(description="The temperature to use for sampling.") max_new_tokens: int = Field(description="The maximum number of tokens to generate.") context_window: int = Field( description="The maximum number of context tokens for the model." ) messages_to_prompt: Callable = Field( description="The function to convert messages to a prompt.", exclude=True ) completion_to_prompt: Callable = Field( description="The function to convert a completion to a prompt.", exclude=True ) generate_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Kwargs used for generation." ) model_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Kwargs used for model initialization." ) verbose: bool = Field(description="Whether to print verbose output.") _model: Any = PrivateAttr() _model_config: Any = PrivateAttr() _tokenizer: Any = PrivateAttr() _max_new_tokens = PrivateAttr() _sampling_config = PrivateAttr() _verbose = PrivateAttr() def __init__( self, model_path: Optional[str] = None, engine_name: Optional[str] = None, tokenizer_dir: Optional[str] = None, temperature: float = 0.1, max_new_tokens: int = DEFAULT_NUM_OUTPUTS, context_window: int = DEFAULT_CONTEXT_WINDOW, messages_to_prompt: Optional[Callable] = None, completion_to_prompt: Optional[Callable] = None, callback_manager: Optional[CallbackManager] = None, generate_kwargs: Optional[Dict[str, Any]] = None, model_kwargs: Optional[Dict[str, Any]] = None, verbose: bool = False ) -> None: model_kwargs = model_kwargs or {} model_kwargs.update({"n_ctx": context_window, "verbose": verbose}) self._max_new_tokens = max_new_tokens self._verbose = verbose # check if model is cached if model_path is not None: if not os.path.exists(model_path): raise ValueError( "Provided model path does not exist. " "Please check the path or provide a model_url to download." ) else: engine_dir = model_path engine_dir_path = Path(engine_dir) config_path = engine_dir_path / 'config.json' # config function with open(config_path, 'r') as f: config = json.load(f) use_gpt_attention_plugin = config['plugin_config']['gpt_attention_plugin'] remove_input_padding = config['plugin_config']['remove_input_padding'] tp_size = config['builder_config']['tensor_parallel'] pp_size = config['builder_config']['pipeline_parallel'] world_size = tp_size * pp_size assert world_size == tensorrt_llm.mpi_world_size(), \ f'Engine world size ({world_size}) != Runtime world size ({tensorrt_llm.mpi_world_size()})' num_heads = config['builder_config']['num_heads'] // tp_size hidden_size = config['builder_config']['hidden_size'] // tp_size vocab_size = config['builder_config']['vocab_size'] num_layers = config['builder_config']['num_layers'] num_kv_heads = config['builder_config'].get('num_kv_heads', num_heads) paged_kv_cache = config['plugin_config']['paged_kv_cache'] if config['builder_config'].get('multi_query_mode', False): tensorrt_llm.logger.warning( "`multi_query_mode` config is deprecated. Please rebuild the engine." ) num_kv_heads = 1 num_kv_heads = (num_kv_heads + tp_size - 1) // tp_size self._model_config = ModelConfig(num_heads=num_heads, num_kv_heads=num_kv_heads, hidden_size=hidden_size, vocab_size=vocab_size, num_layers=num_layers, gpt_attention_plugin=use_gpt_attention_plugin, paged_kv_cache=paged_kv_cache, remove_input_padding=remove_input_padding) assert pp_size == 1, 'Python runtime does not support pipeline parallelism' world_size = tp_size * pp_size runtime_rank = tensorrt_llm.mpi_rank() runtime_mapping = tensorrt_llm.Mapping(world_size, runtime_rank, tp_size=tp_size, pp_size=pp_size) torch.cuda.set_device(runtime_rank % runtime_mapping.gpus_per_node) self._tokenizer = LlamaTokenizer.from_pretrained(tokenizer_dir, legacy=False) self._sampling_config = SamplingConfig(end_id=EOS_TOKEN, pad_id=PAD_TOKEN, num_beams=1, temperature=temperature) serialize_path = engine_dir_path / engine_name with open(serialize_path, 'rb') as f: engine_buffer = f.read() decoder = tensorrt_llm.runtime.GenerationSession(self._model_config, engine_buffer, runtime_mapping, debug_mode=False) self._model = decoder messages_to_prompt = messages_to_prompt or generic_messages_to_prompt completion_to_prompt = completion_to_prompt or (lambda x: x) generate_kwargs = generate_kwargs or {} generate_kwargs.update( {"temperature": temperature, "max_tokens": max_new_tokens} ) super().__init__( model_path=model_path, temperature=temperature, context_window=context_window, max_new_tokens=max_new_tokens, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, callback_manager=callback_manager, generate_kwargs=generate_kwargs, model_kwargs=model_kwargs, verbose=verbose, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "TrtLlmAPI" @property def metadata(self) -> LLMMetadata: """LLM metadata.""" return LLMMetadata( context_window=self.context_window, num_output=self.max_new_tokens, model_name=self.model_path, ) @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: prompt = self.messages_to_prompt(messages) completion_response = self.complete(prompt, formatted=True, **kwargs) return completion_response_to_chat_response(completion_response) @llm_completion_callback() def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: self.generate_kwargs.update({"stream": False}) is_formatted = kwargs.pop("formatted", False) if not is_formatted: prompt = self.completion_to_prompt(prompt) input_text = prompt input_ids, input_lengths = self.parse_input(input_text, self._tokenizer, EOS_TOKEN, self._model_config) max_input_length = torch.max(input_lengths).item() self._model.setup(input_lengths.size(0), max_input_length, self._max_new_tokens, 1) # beam size is set to 1 if self._verbose: start_time = time.time() output_ids = self._model.decode(input_ids, input_lengths, self._sampling_config) torch.cuda.synchronize() elapsed_time = None if self._verbose: end_time = time.time() elapsed_time = end_time - start_time output_txt, output_token_ids = self.get_output(output_ids, input_lengths, self._max_new_tokens, self._tokenizer) if self._verbose: print(f"Input context length : {input_ids.shape[1]}") print(f"Inference time : {elapsed_time:.2f} seconds") print(f"Output context length : {len(output_token_ids)} ") print(f"Inference token/sec : {(len(output_token_ids) / elapsed_time):2f}") # call garbage collected after inference torch.cuda.empty_cache() gc.collect() return CompletionResponse(text=output_txt, raw=self.generate_completion_dict(output_txt)) def parse_input(self, input_text: str, tokenizer, end_id: int, remove_input_padding: bool): input_tokens = [] input_tokens.append( tokenizer.encode(input_text, add_special_tokens=False)) input_lengths = torch.tensor([len(x) for x in input_tokens], dtype=torch.int32, device='cuda') if remove_input_padding: input_ids = np.concatenate(input_tokens) input_ids = torch.tensor(input_ids, dtype=torch.int32, device='cuda').unsqueeze(0) else: input_ids = torch.nested.to_padded_tensor( torch.nested.nested_tensor(input_tokens, dtype=torch.int32), end_id).cuda() return input_ids, input_lengths def remove_extra_eos_ids(self, outputs): outputs.reverse() while outputs and outputs[0] == 2: outputs.pop(0) outputs.reverse() outputs.append(2) return outputs def get_output(self, output_ids, input_lengths, max_output_len, tokenizer): num_beams = output_ids.size(1) output_text = "" outputs = None for b in range(input_lengths.size(0)): for beam in range(num_beams): output_begin = input_lengths[b] output_end = input_lengths[b] + max_output_len outputs = output_ids[b][beam][output_begin:output_end].tolist() outputs = self.remove_extra_eos_ids(outputs) output_text = tokenizer.decode(outputs) return output_text, outputs def generate_completion_dict(self, text_str): """ Generate a dictionary for text completion details. Returns: dict: A dictionary containing completion details. """ completion_id: str = f"cmpl-{str(uuid.uuid4())}" created: int = int(time.time()) model_name: str = self._model if self._model is not None else self.model_path return { "id": completion_id, "object": "text_completion", "created": created, "model": model_name, "choices": [ { "text": text_str, "index": 0, "logprobs": None, "finish_reason": 'stop' } ], "usage": { "prompt_tokens": None, "completion_tokens": None, "total_tokens": None } } @llm_completion_callback() def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: pass
[ "llama_index.llms.base.LLMMetadata", "llama_index.bridge.pydantic.Field", "llama_index.llms.base.llm_completion_callback", "llama_index.bridge.pydantic.PrivateAttr", "llama_index.llms.base.llm_chat_callback", "llama_index.llms.generic_utils.completion_response_to_chat_response" ]
[((2151, 2199), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The path to the trt engine."""'}), "(description='The path to the trt engine.')\n", (2156, 2199), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2239, 2296), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (2244, 2296), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2323, 2385), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (2328, 2385), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2412, 2484), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of context tokens for the model."""'}), "(description='The maximum number of context tokens for the model.')\n", (2417, 2484), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2534, 2619), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The function to convert messages to a prompt."""', 'exclude': '(True)'}), "(description='The function to convert messages to a prompt.', exclude=True\n )\n", (2539, 2619), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2666, 2754), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""The function to convert a completion to a prompt."""', 'exclude': '(True)'}), "(description='The function to convert a completion to a prompt.',\n exclude=True)\n", (2671, 2754), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2803, 2873), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Kwargs used for generation."""'}), "(default_factory=dict, description='Kwargs used for generation.')\n", (2808, 2873), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((2923, 3008), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Kwargs used for model initialization."""'}), "(default_factory=dict, description='Kwargs used for model initialization.'\n )\n", (2928, 3008), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3038, 3091), 'llama_index.bridge.pydantic.Field', 'Field', ([], {'description': '"""Whether to print verbose output."""'}), "(description='Whether to print verbose output.')\n", (3043, 3091), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3111, 3124), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3122, 3124), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3150, 3163), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3161, 3163), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3186, 3199), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3197, 3199), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3222, 3235), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3233, 3235), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3259, 3272), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3270, 3272), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((3288, 3301), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3299, 3301), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((9389, 9408), 'llama_index.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9406, 9408), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((9701, 9726), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (9724, 9726), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((14134, 14159), 'llama_index.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (14157, 14159), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((9228, 9340), 'llama_index.llms.base.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.max_new_tokens', 'model_name': 'self.model_path'}), '(context_window=self.context_window, num_output=self.\n max_new_tokens, model_name=self.model_path)\n', (9239, 9340), False, 'from llama_index.llms.base import ChatMessage, ChatResponse, CompletionResponse, LLMMetadata, llm_chat_callback, llm_completion_callback\n'), ((9637, 9694), 'llama_index.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (9673, 9694), False, 'from llama_index.llms.generic_utils import completion_response_to_chat_response\n'), ((10577, 10601), 'torch.cuda.synchronize', 'torch.cuda.synchronize', ([], {}), '()\n', (10599, 10601), False, 'import torch\n'), ((11367, 11391), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (11389, 11391), False, 'import torch\n'), ((11400, 11412), 'gc.collect', 'gc.collect', ([], {}), '()\n', (11410, 11412), False, 'import gc\n'), ((10467, 10478), 'time.time', 'time.time', ([], {}), '()\n', (10476, 10478), False, 'import time\n'), ((10680, 10691), 'time.time', 'time.time', ([], {}), '()\n', (10689, 10691), False, 'import time\n'), ((11988, 12016), 'numpy.concatenate', 'np.concatenate', (['input_tokens'], {}), '(input_tokens)\n', (12002, 12016), True, 'import numpy as np\n'), ((13479, 13490), 'time.time', 'time.time', ([], {}), '()\n', (13488, 13490), False, 'import time\n'), ((4271, 4297), 'os.path.exists', 'os.path.exists', (['model_path'], {}), '(model_path)\n', (4285, 4297), False, 'import os\n'), ((4582, 4598), 'pathlib.Path', 'Path', (['engine_dir'], {}), '(engine_dir)\n', (4586, 4598), False, 'from pathlib import Path\n'), ((6180, 6445), 'tensorrt_llm.runtime.ModelConfig', 'ModelConfig', ([], {'num_heads': 'num_heads', 'num_kv_heads': 'num_kv_heads', 'hidden_size': 'hidden_size', 'vocab_size': 'vocab_size', 'num_layers': 'num_layers', 'gpt_attention_plugin': 'use_gpt_attention_plugin', 'paged_kv_cache': 'paged_kv_cache', 'remove_input_padding': 'remove_input_padding'}), '(num_heads=num_heads, num_kv_heads=num_kv_heads, hidden_size=\n hidden_size, vocab_size=vocab_size, num_layers=num_layers,\n gpt_attention_plugin=use_gpt_attention_plugin, paged_kv_cache=\n paged_kv_cache, remove_input_padding=remove_input_padding)\n', (6191, 6445), False, 'from tensorrt_llm.runtime import ModelConfig, SamplingConfig\n'), ((6947, 6970), 'tensorrt_llm.mpi_rank', 'tensorrt_llm.mpi_rank', ([], {}), '()\n', (6968, 6970), False, 'import tensorrt_llm\n'), ((7005, 7090), 'tensorrt_llm.Mapping', 'tensorrt_llm.Mapping', (['world_size', 'runtime_rank'], {'tp_size': 'tp_size', 'pp_size': 'pp_size'}), '(world_size, runtime_rank, tp_size=tp_size, pp_size=pp_size\n )\n', (7025, 7090), False, 'import tensorrt_llm\n'), ((7267, 7334), 'torch.cuda.set_device', 'torch.cuda.set_device', (['(runtime_rank % runtime_mapping.gpus_per_node)'], {}), '(runtime_rank % runtime_mapping.gpus_per_node)\n', (7288, 7334), False, 'import torch\n'), ((7369, 7428), 'transformers.LlamaTokenizer.from_pretrained', 'LlamaTokenizer.from_pretrained', (['tokenizer_dir'], {'legacy': '(False)'}), '(tokenizer_dir, legacy=False)\n', (7399, 7428), False, 'from transformers import LlamaTokenizer\n'), ((7469, 7562), 'tensorrt_llm.runtime.SamplingConfig', 'SamplingConfig', ([], {'end_id': 'EOS_TOKEN', 'pad_id': 'PAD_TOKEN', 'num_beams': '(1)', 'temperature': 'temperature'}), '(end_id=EOS_TOKEN, pad_id=PAD_TOKEN, num_beams=1, temperature\n =temperature)\n', (7483, 7562), False, 'from tensorrt_llm.runtime import ModelConfig, SamplingConfig\n'), ((7912, 8024), 'tensorrt_llm.runtime.GenerationSession', 'tensorrt_llm.runtime.GenerationSession', (['self._model_config', 'engine_buffer', 'runtime_mapping'], {'debug_mode': '(False)'}), '(self._model_config, engine_buffer,\n runtime_mapping, debug_mode=False)\n', (7950, 8024), False, 'import tensorrt_llm\n'), ((10268, 10292), 'torch.max', 'torch.max', (['input_lengths'], {}), '(input_lengths)\n', (10277, 10292), False, 'import torch\n'), ((4775, 4787), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4784, 4787), False, 'import json\n'), ((5192, 5221), 'tensorrt_llm.mpi_world_size', 'tensorrt_llm.mpi_world_size', ([], {}), '()\n', (5219, 5221), False, 'import tensorrt_llm\n'), ((5889, 5992), 'tensorrt_llm.logger.warning', 'tensorrt_llm.logger.warning', (['"""`multi_query_mode` config is deprecated. Please rebuild the engine."""'], {}), "(\n '`multi_query_mode` config is deprecated. Please rebuild the engine.')\n", (5916, 5992), False, 'import tensorrt_llm\n'), ((12041, 12098), 'torch.tensor', 'torch.tensor', (['input_ids'], {'dtype': 'torch.int32', 'device': '"""cuda"""'}), "(input_ids, dtype=torch.int32, device='cuda')\n", (12053, 12098), False, 'import torch\n'), ((13436, 13448), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (13446, 13448), False, 'import uuid\n'), ((5304, 5333), 'tensorrt_llm.mpi_world_size', 'tensorrt_llm.mpi_world_size', ([], {}), '()\n', (5331, 5333), False, 'import tensorrt_llm\n'), ((12234, 12293), 'torch.nested.nested_tensor', 'torch.nested.nested_tensor', (['input_tokens'], {'dtype': 'torch.int32'}), '(input_tokens, dtype=torch.int32)\n', (12260, 12293), False, 'import torch\n')]
from llama_index.core.callbacks.schema import CBEventType, EventPayload from llama_index.core.llms import ChatMessage, ChatResponse from llama_index.core.schema import NodeWithScore, TextNode import chainlit as cl @cl.on_chat_start async def start(): await cl.Message(content="LlamaIndexCb").send() cb = cl.LlamaIndexCallbackHandler() cb.on_event_start(CBEventType.RETRIEVE, payload={}) await cl.sleep(0.2) cb.on_event_end( CBEventType.RETRIEVE, payload={ EventPayload.NODES: [ NodeWithScore(node=TextNode(text="This is text1"), score=1) ] }, ) cb.on_event_start(CBEventType.LLM) await cl.sleep(0.2) response = ChatResponse(message=ChatMessage(content="This is the LLM response")) cb.on_event_end( CBEventType.LLM, payload={ EventPayload.RESPONSE: response, EventPayload.PROMPT: "This is the LLM prompt", }, )
[ "llama_index.core.schema.TextNode", "llama_index.core.llms.ChatMessage" ]
[((316, 346), 'chainlit.LlamaIndexCallbackHandler', 'cl.LlamaIndexCallbackHandler', ([], {}), '()\n', (344, 346), True, 'import chainlit as cl\n'), ((415, 428), 'chainlit.sleep', 'cl.sleep', (['(0.2)'], {}), '(0.2)\n', (423, 428), True, 'import chainlit as cl\n'), ((691, 704), 'chainlit.sleep', 'cl.sleep', (['(0.2)'], {}), '(0.2)\n', (699, 704), True, 'import chainlit as cl\n'), ((742, 789), 'llama_index.core.llms.ChatMessage', 'ChatMessage', ([], {'content': '"""This is the LLM response"""'}), "(content='This is the LLM response')\n", (753, 789), False, 'from llama_index.core.llms import ChatMessage, ChatResponse\n'), ((264, 298), 'chainlit.Message', 'cl.Message', ([], {'content': '"""LlamaIndexCb"""'}), "(content='LlamaIndexCb')\n", (274, 298), True, 'import chainlit as cl\n'), ((568, 598), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': '"""This is text1"""'}), "(text='This is text1')\n", (576, 598), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n')]
from typing import Optional, Union from llama_index import ServiceContext from llama_index.callbacks import CallbackManager from llama_index.embeddings.utils import EmbedType from llama_index.extractors import ( EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor, ) from llama_index.llms.utils import LLMType from llama_index.prompts import PromptTemplate from llama_index.prompts.base import BasePromptTemplate from llama_index.text_splitter import SentenceSplitter from autollm.callbacks.cost_calculating import CostCalculatingHandler from autollm.utils.llm_utils import set_default_prompt_template class AutoServiceContext: """AutoServiceContext extends the functionality of LlamaIndex's ServiceContext to include token counting. """ @staticmethod def from_defaults( llm: Optional[LLMType] = "default", embed_model: Optional[EmbedType] = "default", system_prompt: str = None, query_wrapper_prompt: Union[str, BasePromptTemplate] = None, enable_cost_calculator: bool = False, chunk_size: Optional[int] = 512, chunk_overlap: Optional[int] = 100, context_window: Optional[int] = None, enable_title_extractor: bool = False, enable_summary_extractor: bool = False, enable_qa_extractor: bool = False, enable_keyword_extractor: bool = False, enable_entity_extractor: bool = False, **kwargs) -> ServiceContext: """ Create a ServiceContext with default parameters with extended enable_token_counting functionality. If enable_token_counting is True, tracks the number of tokens used by the LLM for each query. Parameters: llm (LLM): The LLM to use for the query engine. Defaults to gpt-3.5-turbo. embed_model (BaseEmbedding): The embedding model to use for the query engine. Defaults to OpenAIEmbedding. system_prompt (str): The system prompt to use for the query engine. query_wrapper_prompt (Union[str, BasePromptTemplate]): The query wrapper prompt to use for the query engine. enable_cost_calculator (bool): Flag to enable cost calculator logging. chunk_size (int): The token chunk size for each chunk. chunk_overlap (int): The token overlap between each chunk. context_window (int): The maximum context size that will get sent to the LLM. enable_title_extractor (bool): Flag to enable title extractor. enable_summary_extractor (bool): Flag to enable summary extractor. enable_qa_extractor (bool): Flag to enable question answering extractor. enable_keyword_extractor (bool): Flag to enable keyword extractor. enable_entity_extractor (bool): Flag to enable entity extractor. **kwargs: Arbitrary keyword arguments. Returns: ServiceContext: The initialized ServiceContext from default parameters with extra token counting functionality. """ if not system_prompt and not query_wrapper_prompt: system_prompt, query_wrapper_prompt = set_default_prompt_template() # Convert query_wrapper_prompt to PromptTemplate if it is a string if isinstance(query_wrapper_prompt, str): query_wrapper_prompt = PromptTemplate(template=query_wrapper_prompt) callback_manager: CallbackManager = kwargs.get('callback_manager', CallbackManager()) kwargs.pop( 'callback_manager', None) # Make sure callback_manager is not passed to ServiceContext twice if enable_cost_calculator: llm_model_name = llm.metadata.model_name if not "default" else "gpt-3.5-turbo" callback_manager.add_handler(CostCalculatingHandler(model_name=llm_model_name, verbose=True)) sentence_splitter = SentenceSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap) transformations = [sentence_splitter] if enable_entity_extractor: transformations.append(EntityExtractor()) if enable_keyword_extractor: transformations.append(KeywordExtractor(llm=llm, keywords=5)) if enable_summary_extractor: transformations.append(SummaryExtractor(llm=llm, summaries=["prev", "self"])) if enable_title_extractor: transformations.append(TitleExtractor(llm=llm, nodes=5)) if enable_qa_extractor: transformations.append(QuestionsAnsweredExtractor(llm=llm, questions=5)) service_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model, transformations=transformations, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, chunk_size=chunk_size, chunk_overlap=chunk_overlap, context_window=context_window, callback_manager=callback_manager, **kwargs) return service_context
[ "llama_index.extractors.TitleExtractor", "llama_index.extractors.QuestionsAnsweredExtractor", "llama_index.ServiceContext.from_defaults", "llama_index.prompts.PromptTemplate", "llama_index.extractors.SummaryExtractor", "llama_index.extractors.EntityExtractor", "llama_index.extractors.KeywordExtractor", "llama_index.callbacks.CallbackManager", "llama_index.text_splitter.SentenceSplitter" ]
[((3952, 4020), 'llama_index.text_splitter.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap'}), '(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n', (3968, 4020), False, 'from llama_index.text_splitter import SentenceSplitter\n'), ((4643, 4954), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'transformations': 'transformations', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt', 'chunk_size': 'chunk_size', 'chunk_overlap': 'chunk_overlap', 'context_window': 'context_window', 'callback_manager': 'callback_manager'}), '(llm=llm, embed_model=embed_model,\n transformations=transformations, system_prompt=system_prompt,\n query_wrapper_prompt=query_wrapper_prompt, chunk_size=chunk_size,\n chunk_overlap=chunk_overlap, context_window=context_window,\n callback_manager=callback_manager, **kwargs)\n', (4671, 4954), False, 'from llama_index import ServiceContext\n'), ((3233, 3262), 'autollm.utils.llm_utils.set_default_prompt_template', 'set_default_prompt_template', ([], {}), '()\n', (3260, 3262), False, 'from autollm.utils.llm_utils import set_default_prompt_template\n'), ((3423, 3468), 'llama_index.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'query_wrapper_prompt'}), '(template=query_wrapper_prompt)\n', (3437, 3468), False, 'from llama_index.prompts import PromptTemplate\n'), ((3545, 3562), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (3560, 3562), False, 'from llama_index.callbacks import CallbackManager\n'), ((3858, 3921), 'autollm.callbacks.cost_calculating.CostCalculatingHandler', 'CostCalculatingHandler', ([], {'model_name': 'llm_model_name', 'verbose': '(True)'}), '(model_name=llm_model_name, verbose=True)\n', (3880, 3921), False, 'from autollm.callbacks.cost_calculating import CostCalculatingHandler\n'), ((4138, 4155), 'llama_index.extractors.EntityExtractor', 'EntityExtractor', ([], {}), '()\n', (4153, 4155), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n'), ((4229, 4266), 'llama_index.extractors.KeywordExtractor', 'KeywordExtractor', ([], {'llm': 'llm', 'keywords': '(5)'}), '(llm=llm, keywords=5)\n', (4245, 4266), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n'), ((4340, 4393), 'llama_index.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'llm': 'llm', 'summaries': "['prev', 'self']"}), "(llm=llm, summaries=['prev', 'self'])\n", (4356, 4393), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n'), ((4465, 4497), 'llama_index.extractors.TitleExtractor', 'TitleExtractor', ([], {'llm': 'llm', 'nodes': '(5)'}), '(llm=llm, nodes=5)\n', (4479, 4497), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n'), ((4566, 4614), 'llama_index.extractors.QuestionsAnsweredExtractor', 'QuestionsAnsweredExtractor', ([], {'llm': 'llm', 'questions': '(5)'}), '(llm=llm, questions=5)\n', (4592, 4614), False, 'from llama_index.extractors import EntityExtractor, KeywordExtractor, QuestionsAnsweredExtractor, SummaryExtractor, TitleExtractor\n')]
import torch from llama_index import WikipediaReader def divide_string(wiki_page, word_limit=50): divided_text = [] for each_page in wiki_page: words = each_page[0].text.split() for i in range(0, len(words), word_limit): chunk = ' '.join(words[i:i+word_limit]) divided_text.append(chunk) return divided_text def wiki_prompter(generator,tokenizer,question): fulltext = "A question is provided below. Given the question, extract " +\ "keywords from the text. Focus on extracting the keywords that we can use " +\ "to best lookup answers to the question. \n" +\ "---------------------\n" +\ "{}\n".format(question) +\ "---------------------\n" +\ "Provide keywords in the following comma-separated format.\nKeywords: " gen_in = tokenizer(fulltext, return_tensors="pt").input_ids.cuda() with torch.no_grad(): generated_ids = generator( gen_in, max_new_tokens=512, use_cache=True, pad_token_id=tokenizer.eos_token_id, num_return_sequences=1, do_sample=True, repetition_penalty=1.1, # 1.0 means 'off'. unfortunately if we penalize it it will not output Sphynx: temperature=0.5, # default: 1.0 top_k=50, # default: 50 top_p=1.0, # default: 1.0 early_stopping=True, ) generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] # for some reason, batch_decode returns an array of one element? text_without_prompt = generated_text[len(fulltext):] response = text_without_prompt response = response.split("===")[0] response.strip() print(response) keywords = response.split(", ") print(keywords) wiki_docs=[] for keyw in keywords: try: wiki_one = WikipediaReader().load_data(pages=[keyw], auto_suggest=False) wiki_docs.append(wiki_one) except: print("No wiki: "+keyw) divided_text = divide_string(wiki_docs, 250) answer_llama="" score_textlist = [0] * len(divided_text) for i, chunk in enumerate(divided_text): for t, keyw in enumerate(keywords): if keyw.lower() in chunk.lower(): score_textlist[i]=score_textlist[i]+1 answer_list=[] divided_text = [item for _, item in sorted(zip(score_textlist, divided_text), reverse=True)] divided_text.append("_") for i, chunk in enumerate(divided_text): if i<4 and not i==int(len(divided_text)-1): fulltext = "Context information is below. \n" +\ "---------------------\n" +\ "{}".format(chunk) +\ "\n---------------------\n" +\ "Given the context information and not prior knowledge, " +\ "answer the question: {}\n".format(question) +\ "Response: " elif i==int(len(divided_text)-1) and len(answer_list)>1 : fulltext = "The original question is as follows: {}\n".format(question) +\ "We have provided existing answers:\n" +\ "------------\n" +\ "{}\n".format(str("\n\n".join(answer_list))) +\ "------------\n" +\ "The best one answer: " else: continue print(fulltext) gen_in = tokenizer(fulltext, return_tensors="pt").input_ids.cuda() with torch.no_grad(): generated_ids = generator( gen_in, max_new_tokens=512, use_cache=True, pad_token_id=tokenizer.eos_token_id, num_return_sequences=1, do_sample=True, repetition_penalty=1.1, # 1.0 means 'off'. unfortunately if we penalize it it will not output Sphynx: temperature=0.5, # default: 1.0 top_k=50, # default: 50 top_p=1.0, # default: 1.0 early_stopping=True, ) generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] text_without_prompt = generated_text[len(fulltext):] answer_llama = text_without_prompt print() print("\nAnswer: " + answer_llama) print() answer_list.append(answer_llama) return answer_llama
[ "llama_index.WikipediaReader" ]
[((933, 948), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (946, 948), False, 'import torch\n'), ((3638, 3653), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3651, 3653), False, 'import torch\n'), ((1958, 1975), 'llama_index.WikipediaReader', 'WikipediaReader', ([], {}), '()\n', (1973, 1975), False, 'from llama_index import WikipediaReader\n')]
import logging import os from llama_index import ( StorageContext, load_index_from_storage, ) from app.engine.constants import STORAGE_DIR from app.engine.context import create_service_context def get_chat_engine(): service_context = create_service_context() # check if storage already exists if not os.path.exists(STORAGE_DIR): raise Exception( "StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first" ) logger = logging.getLogger("uvicorn") # load the existing index logger.info(f"Loading index from {STORAGE_DIR}...") storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR) index = load_index_from_storage(storage_context, service_context=service_context) logger.info(f"Finished loading index from {STORAGE_DIR}") return index.as_chat_engine()
[ "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage" ]
[((249, 273), 'app.engine.context.create_service_context', 'create_service_context', ([], {}), '()\n', (271, 273), False, 'from app.engine.context import create_service_context\n'), ((507, 535), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (524, 535), False, 'import logging\n'), ((644, 697), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'STORAGE_DIR'}), '(persist_dir=STORAGE_DIR)\n', (672, 697), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((710, 783), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (733, 783), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((323, 350), 'os.path.exists', 'os.path.exists', (['STORAGE_DIR'], {}), '(STORAGE_DIR)\n', (337, 350), False, 'import os\n')]
import logging import os from llama_index import ( StorageContext, load_index_from_storage, ) from app.engine.constants import STORAGE_DIR from app.engine.context import create_service_context def get_chat_engine(): service_context = create_service_context() # check if storage already exists if not os.path.exists(STORAGE_DIR): raise Exception( "StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first" ) logger = logging.getLogger("uvicorn") # load the existing index logger.info(f"Loading index from {STORAGE_DIR}...") storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR) index = load_index_from_storage(storage_context, service_context=service_context) logger.info(f"Finished loading index from {STORAGE_DIR}") return index.as_chat_engine()
[ "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage" ]
[((249, 273), 'app.engine.context.create_service_context', 'create_service_context', ([], {}), '()\n', (271, 273), False, 'from app.engine.context import create_service_context\n'), ((507, 535), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (524, 535), False, 'import logging\n'), ((644, 697), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'STORAGE_DIR'}), '(persist_dir=STORAGE_DIR)\n', (672, 697), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((710, 783), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (733, 783), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((323, 350), 'os.path.exists', 'os.path.exists', (['STORAGE_DIR'], {}), '(STORAGE_DIR)\n', (337, 350), False, 'import os\n')]
import logging import os from llama_index import ( StorageContext, load_index_from_storage, ) from app.engine.constants import STORAGE_DIR from app.engine.context import create_service_context def get_chat_engine(): service_context = create_service_context() # check if storage already exists if not os.path.exists(STORAGE_DIR): raise Exception( "StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first" ) logger = logging.getLogger("uvicorn") # load the existing index logger.info(f"Loading index from {STORAGE_DIR}...") storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR) index = load_index_from_storage(storage_context, service_context=service_context) logger.info(f"Finished loading index from {STORAGE_DIR}") return index.as_chat_engine()
[ "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage" ]
[((249, 273), 'app.engine.context.create_service_context', 'create_service_context', ([], {}), '()\n', (271, 273), False, 'from app.engine.context import create_service_context\n'), ((507, 535), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (524, 535), False, 'import logging\n'), ((644, 697), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'STORAGE_DIR'}), '(persist_dir=STORAGE_DIR)\n', (672, 697), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((710, 783), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (733, 783), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((323, 350), 'os.path.exists', 'os.path.exists', (['STORAGE_DIR'], {}), '(STORAGE_DIR)\n', (337, 350), False, 'import os\n')]
import logging import os from llama_index import ( StorageContext, load_index_from_storage, ) from app.engine.constants import STORAGE_DIR from app.engine.context import create_service_context def get_chat_engine(): service_context = create_service_context() # check if storage already exists if not os.path.exists(STORAGE_DIR): raise Exception( "StorageContext is empty - call 'python app/engine/generate.py' to generate the storage first" ) logger = logging.getLogger("uvicorn") # load the existing index logger.info(f"Loading index from {STORAGE_DIR}...") storage_context = StorageContext.from_defaults(persist_dir=STORAGE_DIR) index = load_index_from_storage(storage_context, service_context=service_context) logger.info(f"Finished loading index from {STORAGE_DIR}") return index.as_chat_engine()
[ "llama_index.StorageContext.from_defaults", "llama_index.load_index_from_storage" ]
[((249, 273), 'app.engine.context.create_service_context', 'create_service_context', ([], {}), '()\n', (271, 273), False, 'from app.engine.context import create_service_context\n'), ((507, 535), 'logging.getLogger', 'logging.getLogger', (['"""uvicorn"""'], {}), "('uvicorn')\n", (524, 535), False, 'import logging\n'), ((644, 697), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'STORAGE_DIR'}), '(persist_dir=STORAGE_DIR)\n', (672, 697), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((710, 783), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (733, 783), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((323, 350), 'os.path.exists', 'os.path.exists', (['STORAGE_DIR'], {}), '(STORAGE_DIR)\n', (337, 350), False, 'import os\n')]
from llama_index import PromptTemplate instruction_str = """\ 1. Convert the query to executable Python code using Pandas. 2. The final line of code should be a Python expression that can be called with the `eval()` function. 3. The code should represent a solution to the query. 4. PRINT ONLY THE EXPRESSION. 5. Do not quote the expression.""" new_prompt = PromptTemplate( """\ You are working with a pandas dataframe in Python. The name of the dataframe is `df`. This is the result of `print(df.head())`: {df_str} Follow these instructions: {instruction_str} Query: {query_str} Expression: """ ) context = """Purpose: The primary role of this agent is to assist users by providing accurate information about world population statistics and details about a country. """
[ "llama_index.PromptTemplate" ]
[((381, 660), 'llama_index.PromptTemplate', 'PromptTemplate', (['""" You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expression: """'], {}), '(\n """ You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expression: """\n )\n', (395, 660), False, 'from llama_index import PromptTemplate\n')]
from llama_index import PromptTemplate instruction_str = """\ 1. Convert the query to executable Python code using Pandas. 2. The final line of code should be a Python expression that can be called with the `eval()` function. 3. The code should represent a solution to the query. 4. PRINT ONLY THE EXPRESSION. 5. Do not quote the expression.""" new_prompt = PromptTemplate( """\ You are working with a pandas dataframe in Python. The name of the dataframe is `df`. This is the result of `print(df.head())`: {df_str} Follow these instructions: {instruction_str} Query: {query_str} Expression: """ ) context = """Purpose: The primary role of this agent is to assist users by providing accurate information about world population statistics and details about a country. """
[ "llama_index.PromptTemplate" ]
[((381, 660), 'llama_index.PromptTemplate', 'PromptTemplate', (['""" You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expression: """'], {}), '(\n """ You are working with a pandas dataframe in Python.\n The name of the dataframe is `df`.\n This is the result of `print(df.head())`:\n {df_str}\n\n Follow these instructions:\n {instruction_str}\n Query: {query_str}\n\n Expression: """\n )\n', (395, 660), False, 'from llama_index import PromptTemplate\n')]
from typing import Union, Optional, List from llama_index.chat_engine.types import BaseChatEngine, ChatMode from llama_index.embeddings.utils import EmbedType from llama_index.chat_engine import ContextChatEngine from llama_index.memory import ChatMemoryBuffer from lyzr.base.llm import LyzrLLMFactory from lyzr.base.service import LyzrService from lyzr.base.vector_store import LyzrVectorStoreIndex from lyzr.base.retrievers import LyzrRetriever from lyzr.utils.document_reading import ( read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents, ) def pdf_chat_( input_dir: Optional[str] = None, input_files: Optional[List] = None, exclude_hidden: bool = True, filename_as_id: bool = True, recursive: bool = True, required_exts: Optional[List[str]] = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_pdf_as_documents( input_dir=input_dir, input_files=input_files, exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive=recursive, required_exts=required_exts, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def txt_chat_( input_dir: Optional[str] = None, input_files: Optional[List] = None, exclude_hidden: bool = True, filename_as_id: bool = True, recursive: bool = True, required_exts: Optional[List[str]] = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_txt_as_documents( input_dir=input_dir, input_files=input_files, exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive=recursive, required_exts=required_exts, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def docx_chat_( input_dir: Optional[str] = None, input_files: Optional[List] = None, exclude_hidden: bool = True, filename_as_id: bool = True, recursive: bool = True, required_exts: Optional[List[str]] = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_docx_as_documents( input_dir=input_dir, input_files=input_files, exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive=recursive, required_exts=required_exts, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def webpage_chat_( url: str = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_webpage_as_documents( url=url, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def website_chat_( url: str = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_website_as_documents( url=url, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def youtube_chat_( urls: List[str] = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_youtube_as_documents( urls=urls, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine
[ "llama_index.memory.ChatMemoryBuffer.from_defaults" ]
[((1242, 1430), 'lyzr.utils.document_reading.read_pdf_as_documents', 'read_pdf_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (1263, 1430), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((2161, 2203), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (2189, 2203), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((2226, 2393), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (2251, 2393), False, 'from lyzr.base.service import LyzrService\n'), ((2457, 2573), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (2491, 2573), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((2600, 2678), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (2627, 2678), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((2707, 2755), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (2737, 2755), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((3528, 3716), 'lyzr.utils.document_reading.read_txt_as_documents', 'read_txt_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (3549, 3716), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((4447, 4489), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (4475, 4489), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((4512, 4679), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (4537, 4679), False, 'from lyzr.base.service import LyzrService\n'), ((4743, 4859), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (4777, 4859), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((4886, 4964), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (4913, 4964), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((4993, 5041), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (5023, 5041), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((5815, 6004), 'lyzr.utils.document_reading.read_docx_as_documents', 'read_docx_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (5837, 6004), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((6735, 6777), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (6763, 6777), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((6800, 6967), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (6825, 6967), False, 'from lyzr.base.service import LyzrService\n'), ((7031, 7147), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (7065, 7147), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((7174, 7252), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (7201, 7252), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((7281, 7329), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (7311, 7329), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((7909, 7943), 'lyzr.utils.document_reading.read_webpage_as_documents', 'read_webpage_as_documents', ([], {'url': 'url'}), '(url=url)\n', (7934, 7943), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((8643, 8685), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (8671, 8685), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((8708, 8875), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (8733, 8875), False, 'from lyzr.base.service import LyzrService\n'), ((8939, 9055), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (8973, 9055), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((9082, 9160), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (9109, 9160), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((9189, 9237), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (9219, 9237), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((9817, 9851), 'lyzr.utils.document_reading.read_website_as_documents', 'read_website_as_documents', ([], {'url': 'url'}), '(url=url)\n', (9842, 9851), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((10551, 10593), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (10579, 10593), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((10616, 10783), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (10641, 10783), False, 'from lyzr.base.service import LyzrService\n'), ((10847, 10963), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (10881, 10963), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((10990, 11068), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (11017, 11068), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((11097, 11145), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (11127, 11145), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((11732, 11768), 'lyzr.utils.document_reading.read_youtube_as_documents', 'read_youtube_as_documents', ([], {'urls': 'urls'}), '(urls=urls)\n', (11757, 11768), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((12468, 12510), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (12496, 12510), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((12533, 12700), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (12558, 12700), False, 'from lyzr.base.service import LyzrService\n'), ((12764, 12880), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (12798, 12880), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((12907, 12985), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (12934, 12985), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((13014, 13062), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (13044, 13062), False, 'from llama_index.memory import ChatMemoryBuffer\n')]
from typing import Union, Optional, List from llama_index.chat_engine.types import BaseChatEngine, ChatMode from llama_index.embeddings.utils import EmbedType from llama_index.chat_engine import ContextChatEngine from llama_index.memory import ChatMemoryBuffer from lyzr.base.llm import LyzrLLMFactory from lyzr.base.service import LyzrService from lyzr.base.vector_store import LyzrVectorStoreIndex from lyzr.base.retrievers import LyzrRetriever from lyzr.utils.document_reading import ( read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents, ) def pdf_chat_( input_dir: Optional[str] = None, input_files: Optional[List] = None, exclude_hidden: bool = True, filename_as_id: bool = True, recursive: bool = True, required_exts: Optional[List[str]] = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_pdf_as_documents( input_dir=input_dir, input_files=input_files, exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive=recursive, required_exts=required_exts, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def txt_chat_( input_dir: Optional[str] = None, input_files: Optional[List] = None, exclude_hidden: bool = True, filename_as_id: bool = True, recursive: bool = True, required_exts: Optional[List[str]] = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_txt_as_documents( input_dir=input_dir, input_files=input_files, exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive=recursive, required_exts=required_exts, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def docx_chat_( input_dir: Optional[str] = None, input_files: Optional[List] = None, exclude_hidden: bool = True, filename_as_id: bool = True, recursive: bool = True, required_exts: Optional[List[str]] = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_docx_as_documents( input_dir=input_dir, input_files=input_files, exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive=recursive, required_exts=required_exts, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def webpage_chat_( url: str = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_webpage_as_documents( url=url, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def website_chat_( url: str = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_website_as_documents( url=url, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine def youtube_chat_( urls: List[str] = None, system_prompt: str = None, query_wrapper_prompt: str = None, embed_model: Union[str, EmbedType] = "default", llm_params: dict = None, vector_store_params: dict = None, service_context_params: dict = None, chat_engine_params: dict = None, retriever_params: dict = None, ) -> BaseChatEngine: documents = read_youtube_as_documents( urls=urls, ) llm_params = ( { "model": "gpt-4-0125-preview", "temperature": 0, } if llm_params is None else llm_params ) vector_store_params = ( {"vector_store_type": "WeaviateVectorStore"} if vector_store_params is None else vector_store_params ) service_context_params = ( {} if service_context_params is None else service_context_params ) chat_engine_params = {} if chat_engine_params is None else chat_engine_params retriever_params = ( {"retriever_type": "QueryFusionRetriever"} if retriever_params is None else retriever_params ) llm = LyzrLLMFactory.from_defaults(**llm_params) service_context = LyzrService.from_defaults( llm=llm, embed_model=embed_model, system_prompt=system_prompt, query_wrapper_prompt=query_wrapper_prompt, **service_context_params, ) vector_store_index = LyzrVectorStoreIndex.from_defaults( **vector_store_params, documents=documents, service_context=service_context ) retriever = LyzrRetriever.from_defaults( **retriever_params, base_index=vector_store_index ) memory = ChatMemoryBuffer.from_defaults(token_limit=4000) chat_engine = ContextChatEngine( llm=llm, memory=memory, retriever=retriever, prefix_messages=list(), **chat_engine_params, ) return chat_engine
[ "llama_index.memory.ChatMemoryBuffer.from_defaults" ]
[((1242, 1430), 'lyzr.utils.document_reading.read_pdf_as_documents', 'read_pdf_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (1263, 1430), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((2161, 2203), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (2189, 2203), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((2226, 2393), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (2251, 2393), False, 'from lyzr.base.service import LyzrService\n'), ((2457, 2573), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (2491, 2573), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((2600, 2678), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (2627, 2678), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((2707, 2755), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (2737, 2755), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((3528, 3716), 'lyzr.utils.document_reading.read_txt_as_documents', 'read_txt_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (3549, 3716), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((4447, 4489), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (4475, 4489), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((4512, 4679), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (4537, 4679), False, 'from lyzr.base.service import LyzrService\n'), ((4743, 4859), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (4777, 4859), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((4886, 4964), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (4913, 4964), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((4993, 5041), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (5023, 5041), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((5815, 6004), 'lyzr.utils.document_reading.read_docx_as_documents', 'read_docx_as_documents', ([], {'input_dir': 'input_dir', 'input_files': 'input_files', 'exclude_hidden': 'exclude_hidden', 'filename_as_id': 'filename_as_id', 'recursive': 'recursive', 'required_exts': 'required_exts'}), '(input_dir=input_dir, input_files=input_files,\n exclude_hidden=exclude_hidden, filename_as_id=filename_as_id, recursive\n =recursive, required_exts=required_exts)\n', (5837, 6004), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((6735, 6777), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (6763, 6777), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((6800, 6967), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (6825, 6967), False, 'from lyzr.base.service import LyzrService\n'), ((7031, 7147), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (7065, 7147), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((7174, 7252), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (7201, 7252), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((7281, 7329), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (7311, 7329), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((7909, 7943), 'lyzr.utils.document_reading.read_webpage_as_documents', 'read_webpage_as_documents', ([], {'url': 'url'}), '(url=url)\n', (7934, 7943), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((8643, 8685), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (8671, 8685), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((8708, 8875), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (8733, 8875), False, 'from lyzr.base.service import LyzrService\n'), ((8939, 9055), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (8973, 9055), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((9082, 9160), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (9109, 9160), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((9189, 9237), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (9219, 9237), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((9817, 9851), 'lyzr.utils.document_reading.read_website_as_documents', 'read_website_as_documents', ([], {'url': 'url'}), '(url=url)\n', (9842, 9851), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((10551, 10593), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (10579, 10593), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((10616, 10783), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (10641, 10783), False, 'from lyzr.base.service import LyzrService\n'), ((10847, 10963), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (10881, 10963), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((10990, 11068), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (11017, 11068), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((11097, 11145), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (11127, 11145), False, 'from llama_index.memory import ChatMemoryBuffer\n'), ((11732, 11768), 'lyzr.utils.document_reading.read_youtube_as_documents', 'read_youtube_as_documents', ([], {'urls': 'urls'}), '(urls=urls)\n', (11757, 11768), False, 'from lyzr.utils.document_reading import read_pdf_as_documents, read_docx_as_documents, read_txt_as_documents, read_website_as_documents, read_webpage_as_documents, read_youtube_as_documents\n'), ((12468, 12510), 'lyzr.base.llm.LyzrLLMFactory.from_defaults', 'LyzrLLMFactory.from_defaults', ([], {}), '(**llm_params)\n', (12496, 12510), False, 'from lyzr.base.llm import LyzrLLMFactory\n'), ((12533, 12700), 'lyzr.base.service.LyzrService.from_defaults', 'LyzrService.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'system_prompt': 'system_prompt', 'query_wrapper_prompt': 'query_wrapper_prompt'}), '(llm=llm, embed_model=embed_model, system_prompt=\n system_prompt, query_wrapper_prompt=query_wrapper_prompt, **\n service_context_params)\n', (12558, 12700), False, 'from lyzr.base.service import LyzrService\n'), ((12764, 12880), 'lyzr.base.vector_store.LyzrVectorStoreIndex.from_defaults', 'LyzrVectorStoreIndex.from_defaults', ([], {'documents': 'documents', 'service_context': 'service_context'}), '(**vector_store_params, documents=\n documents, service_context=service_context)\n', (12798, 12880), False, 'from lyzr.base.vector_store import LyzrVectorStoreIndex\n'), ((12907, 12985), 'lyzr.base.retrievers.LyzrRetriever.from_defaults', 'LyzrRetriever.from_defaults', ([], {'base_index': 'vector_store_index'}), '(**retriever_params, base_index=vector_store_index)\n', (12934, 12985), False, 'from lyzr.base.retrievers import LyzrRetriever\n'), ((13014, 13062), 'llama_index.memory.ChatMemoryBuffer.from_defaults', 'ChatMemoryBuffer.from_defaults', ([], {'token_limit': '(4000)'}), '(token_limit=4000)\n', (13044, 13062), False, 'from llama_index.memory import ChatMemoryBuffer\n')]
"""Agent utils.""" from llama_index.core.agent.types import TaskStep from llama_index.core.base.llms.types import ChatMessage, MessageRole from llama_index.core.memory import BaseMemory def add_user_step_to_memory( step: TaskStep, memory: BaseMemory, verbose: bool = False ) -> None: """Add user step to memory.""" user_message = ChatMessage(content=step.input, role=MessageRole.USER) memory.put(user_message) if verbose: print(f"Added user message to memory: {step.input}")
[ "llama_index.core.base.llms.types.ChatMessage" ]
[((345, 399), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'content': 'step.input', 'role': 'MessageRole.USER'}), '(content=step.input, role=MessageRole.USER)\n', (356, 399), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n')]
"""Agent utils.""" from llama_index.core.agent.types import TaskStep from llama_index.core.base.llms.types import ChatMessage, MessageRole from llama_index.core.memory import BaseMemory def add_user_step_to_memory( step: TaskStep, memory: BaseMemory, verbose: bool = False ) -> None: """Add user step to memory.""" user_message = ChatMessage(content=step.input, role=MessageRole.USER) memory.put(user_message) if verbose: print(f"Added user message to memory: {step.input}")
[ "llama_index.core.base.llms.types.ChatMessage" ]
[((345, 399), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'content': 'step.input', 'role': 'MessageRole.USER'}), '(content=step.input, role=MessageRole.USER)\n', (356, 399), False, 'from llama_index.core.base.llms.types import ChatMessage, MessageRole\n')]
from llama_index.core.tools import FunctionTool def calculate_average(*values): """ Calculates the average of the provided values. """ return sum(values) / len(values) average_tool = FunctionTool.from_defaults( fn=calculate_average )
[ "llama_index.core.tools.FunctionTool.from_defaults" ]
[((200, 248), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'calculate_average'}), '(fn=calculate_average)\n', (226, 248), False, 'from llama_index.core.tools import FunctionTool\n')]
#ingest uploaded documents from global_settings import STORAGE_PATH, INDEX_STORAGE, CACHE_FILE from logging_functions import log_action from llama_index.core import SimpleDirectoryReader, VectorStoreIndex from llama_index.core.ingestion import IngestionPipeline, IngestionCache from llama_index.core.node_parser import TokenTextSplitter from llama_index.core.extractors import SummaryExtractor from llama_index.embeddings.openai import OpenAIEmbedding def ingest_documents(): documents = SimpleDirectoryReader( STORAGE_PATH, filename_as_id = True ).load_data() for doc in documents: print(doc.id_) log_action( f"File '{doc.id_}' uploaded user", action_type="UPLOAD" ) try: cached_hashes = IngestionCache.from_persist_path( CACHE_FILE ) print("Cache file found. Running using cache...") except: cached_hashes = "" print("No cache file found. Running without cache...") pipeline = IngestionPipeline( transformations=[ TokenTextSplitter( chunk_size=1024, chunk_overlap=20 ), SummaryExtractor(summaries=['self']), OpenAIEmbedding() ], cache=cached_hashes ) nodes = pipeline.run(documents=documents) pipeline.cache.persist(CACHE_FILE) return nodes if __name__ == "__main__": embedded_nodes = ingest_documents()
[ "llama_index.core.extractors.SummaryExtractor", "llama_index.core.ingestion.IngestionCache.from_persist_path", "llama_index.core.SimpleDirectoryReader", "llama_index.core.node_parser.TokenTextSplitter", "llama_index.embeddings.openai.OpenAIEmbedding" ]
[((644, 711), 'logging_functions.log_action', 'log_action', (['f"""File \'{doc.id_}\' uploaded user"""'], {'action_type': '"""UPLOAD"""'}), '(f"File \'{doc.id_}\' uploaded user", action_type=\'UPLOAD\')\n', (654, 711), False, 'from logging_functions import log_action\n'), ((786, 830), 'llama_index.core.ingestion.IngestionCache.from_persist_path', 'IngestionCache.from_persist_path', (['CACHE_FILE'], {}), '(CACHE_FILE)\n', (818, 830), False, 'from llama_index.core.ingestion import IngestionPipeline, IngestionCache\n'), ((493, 549), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['STORAGE_PATH'], {'filename_as_id': '(True)'}), '(STORAGE_PATH, filename_as_id=True)\n', (514, 549), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((1089, 1141), 'llama_index.core.node_parser.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(1024)', 'chunk_overlap': '(20)'}), '(chunk_size=1024, chunk_overlap=20)\n', (1106, 1141), False, 'from llama_index.core.node_parser import TokenTextSplitter\n'), ((1202, 1238), 'llama_index.core.extractors.SummaryExtractor', 'SummaryExtractor', ([], {'summaries': "['self']"}), "(summaries=['self'])\n", (1218, 1238), False, 'from llama_index.core.extractors import SummaryExtractor\n'), ((1252, 1269), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (1267, 1269), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n')]
import tiktoken from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings from llama_index.core.llms.mock import MockLLM from llama_index.core.callbacks import CallbackManager, TokenCountingHandler llm = MockLLM(max_tokens=256) token_counter = TokenCountingHandler( tokenizer=tiktoken.encoding_for_model("gpt-3.5-turbo").encode ) callback_manager = CallbackManager([token_counter]) Settings.callback_manager=callback_manager Settings.llm=llm documents = SimpleDirectoryReader("cost_prediction_samples").load_data() index = TreeIndex.from_documents( documents=documents, num_children=2, show_progress=True) print("Total LLM Token Count:", token_counter.total_llm_token_count)
[ "llama_index.core.TreeIndex.from_documents", "llama_index.core.callbacks.CallbackManager", "llama_index.core.SimpleDirectoryReader", "llama_index.core.llms.mock.MockLLM" ]
[((219, 242), 'llama_index.core.llms.mock.MockLLM', 'MockLLM', ([], {'max_tokens': '(256)'}), '(max_tokens=256)\n', (226, 242), False, 'from llama_index.core.llms.mock import MockLLM\n'), ((368, 400), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[token_counter]'], {}), '([token_counter])\n', (383, 400), False, 'from llama_index.core.callbacks import CallbackManager, TokenCountingHandler\n'), ((545, 631), 'llama_index.core.TreeIndex.from_documents', 'TreeIndex.from_documents', ([], {'documents': 'documents', 'num_children': '(2)', 'show_progress': '(True)'}), '(documents=documents, num_children=2, show_progress\n =True)\n', (569, 631), False, 'from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings\n'), ((475, 523), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""cost_prediction_samples"""'], {}), "('cost_prediction_samples')\n", (496, 523), False, 'from llama_index.core import TreeIndex, SimpleDirectoryReader, Settings\n'), ((295, 339), 'tiktoken.encoding_for_model', 'tiktoken.encoding_for_model', (['"""gpt-3.5-turbo"""'], {}), "('gpt-3.5-turbo')\n", (322, 339), False, 'import tiktoken\n')]
"""Llama Dataset Class.""" import asyncio import time from typing import List, Optional from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.bridge.pydantic import Field from llama_index.core.llama_dataset.base import ( BaseLlamaDataExample, BaseLlamaDataset, BaseLlamaExamplePrediction, BaseLlamaPredictionDataset, CreatedBy, ) from pandas import DataFrame as PandasDataFrame class RagExamplePrediction(BaseLlamaExamplePrediction): """RAG example prediction class. Args: response (str): The response generated by the LLM. contexts (Optional[List[str]]): The retrieved context (text) for generating response. """ response: str = Field( default_factory=str, description="The generated (predicted) response that can be compared to a reference (ground-truth) answer.", ) contexts: Optional[List[str]] = Field( default_factory=None, description="The contexts in raw text form used to generate the response.", ) @property def class_name(self) -> str: """Data example class name.""" return "RagExamplePrediction" class LabelledRagDataExample(BaseLlamaDataExample): """RAG example class. Analogous to traditional ML datasets, this dataset contains the "features" (i.e., query + context) to make a prediction and the "label" (i.e., response) to evaluate the prediction. Args: query (str): The user query query_by (CreatedBy): Query generated by human or ai (model-name) reference_contexts (Optional[List[str]]): The contexts used for response reference_answer ([str]): Reference answer to the query. An answer that would receive full marks upon evaluation. reference_answer_by: The reference answer generated by human or ai (model-name). """ query: str = Field( default_factory=str, description="The user query for the example." ) query_by: Optional[CreatedBy] = Field( default=None, description="What generated the query." ) reference_contexts: Optional[List[str]] = Field( default_factory=None, description="The contexts used to generate the reference answer.", ) reference_answer: str = Field( default_factory=str, description="The reference (ground-truth) answer to the example.", ) reference_answer_by: Optional[CreatedBy] = Field( default=None, description="What generated the reference answer." ) @property def class_name(self) -> str: """Data example class name.""" return "LabelledRagDataExample" class RagPredictionDataset(BaseLlamaPredictionDataset): """RagDataset class.""" _prediction_type = RagExamplePrediction def to_pandas(self) -> PandasDataFrame: """Create pandas dataframe.""" data = {} if self.predictions: data = { "response": [t.response for t in self.predictions], "contexts": [t.contexts for t in self.predictions], } return PandasDataFrame(data) @property def class_name(self) -> str: """Class name.""" return "RagPredictionDataset" class LabelledRagDataset(BaseLlamaDataset[BaseQueryEngine]): """RagDataset class.""" _example_type = LabelledRagDataExample def to_pandas(self) -> PandasDataFrame: """Create pandas dataframe.""" data = { "query": [t.query for t in self.examples], "reference_contexts": [t.reference_contexts for t in self.examples], "reference_answer": [t.reference_answer for t in self.examples], "reference_answer_by": [str(t.reference_answer_by) for t in self.examples], "query_by": [str(t.query_by) for t in self.examples], } return PandasDataFrame(data) async def _apredict_example( self, predictor: BaseQueryEngine, example: LabelledRagDataExample, sleep_time_in_seconds: int, ) -> RagExamplePrediction: """Async predict RAG example with a query engine.""" await asyncio.sleep(sleep_time_in_seconds) response = await predictor.aquery(example.query) return RagExamplePrediction( response=str(response), contexts=[s.text for s in response.source_nodes] ) def _predict_example( self, predictor: BaseQueryEngine, example: LabelledRagDataExample, sleep_time_in_seconds: int = 0, ) -> RagExamplePrediction: """Predict RAG example with a query engine.""" time.sleep(sleep_time_in_seconds) response = predictor.query(example.query) return RagExamplePrediction( response=str(response), contexts=[s.text for s in response.source_nodes] ) def _construct_prediction_dataset( self, predictions: List[RagExamplePrediction] ) -> RagPredictionDataset: """Construct prediction dataset.""" return RagPredictionDataset(predictions=predictions) @property def class_name(self) -> str: """Class name.""" return "LabelledRagDataset" # British English + American English LabeledRagDataExample = LabelledRagDataExample LabeledRagDataset = LabelledRagDataset
[ "llama_index.core.bridge.pydantic.Field" ]
[((764, 909), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The generated (predicted) response that can be compared to a reference (ground-truth) answer."""'}), "(default_factory=str, description=\n 'The generated (predicted) response that can be compared to a reference (ground-truth) answer.'\n )\n", (769, 909), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((959, 1067), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'None', 'description': '"""The contexts in raw text form used to generate the response."""'}), "(default_factory=None, description=\n 'The contexts in raw text form used to generate the response.')\n", (964, 1067), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1955, 2028), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The user query for the example."""'}), "(default_factory=str, description='The user query for the example.')\n", (1960, 2028), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2079, 2139), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""What generated the query."""'}), "(default=None, description='What generated the query.')\n", (2084, 2139), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2200, 2299), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'None', 'description': '"""The contexts used to generate the reference answer."""'}), "(default_factory=None, description=\n 'The contexts used to generate the reference answer.')\n", (2205, 2299), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2346, 2444), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The reference (ground-truth) answer to the example."""'}), "(default_factory=str, description=\n 'The reference (ground-truth) answer to the example.')\n", (2351, 2444), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2510, 2581), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""What generated the reference answer."""'}), "(default=None, description='What generated the reference answer.')\n", (2515, 2581), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((3172, 3193), 'pandas.DataFrame', 'PandasDataFrame', (['data'], {}), '(data)\n', (3187, 3193), True, 'from pandas import DataFrame as PandasDataFrame\n'), ((3935, 3956), 'pandas.DataFrame', 'PandasDataFrame', (['data'], {}), '(data)\n', (3950, 3956), True, 'from pandas import DataFrame as PandasDataFrame\n'), ((4702, 4735), 'time.sleep', 'time.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4712, 4735), False, 'import time\n'), ((4224, 4260), 'asyncio.sleep', 'asyncio.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4237, 4260), False, 'import asyncio\n')]
"""Llama Dataset Class.""" import asyncio import time from typing import List, Optional from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.bridge.pydantic import Field from llama_index.core.llama_dataset.base import ( BaseLlamaDataExample, BaseLlamaDataset, BaseLlamaExamplePrediction, BaseLlamaPredictionDataset, CreatedBy, ) from pandas import DataFrame as PandasDataFrame class RagExamplePrediction(BaseLlamaExamplePrediction): """RAG example prediction class. Args: response (str): The response generated by the LLM. contexts (Optional[List[str]]): The retrieved context (text) for generating response. """ response: str = Field( default_factory=str, description="The generated (predicted) response that can be compared to a reference (ground-truth) answer.", ) contexts: Optional[List[str]] = Field( default_factory=None, description="The contexts in raw text form used to generate the response.", ) @property def class_name(self) -> str: """Data example class name.""" return "RagExamplePrediction" class LabelledRagDataExample(BaseLlamaDataExample): """RAG example class. Analogous to traditional ML datasets, this dataset contains the "features" (i.e., query + context) to make a prediction and the "label" (i.e., response) to evaluate the prediction. Args: query (str): The user query query_by (CreatedBy): Query generated by human or ai (model-name) reference_contexts (Optional[List[str]]): The contexts used for response reference_answer ([str]): Reference answer to the query. An answer that would receive full marks upon evaluation. reference_answer_by: The reference answer generated by human or ai (model-name). """ query: str = Field( default_factory=str, description="The user query for the example." ) query_by: Optional[CreatedBy] = Field( default=None, description="What generated the query." ) reference_contexts: Optional[List[str]] = Field( default_factory=None, description="The contexts used to generate the reference answer.", ) reference_answer: str = Field( default_factory=str, description="The reference (ground-truth) answer to the example.", ) reference_answer_by: Optional[CreatedBy] = Field( default=None, description="What generated the reference answer." ) @property def class_name(self) -> str: """Data example class name.""" return "LabelledRagDataExample" class RagPredictionDataset(BaseLlamaPredictionDataset): """RagDataset class.""" _prediction_type = RagExamplePrediction def to_pandas(self) -> PandasDataFrame: """Create pandas dataframe.""" data = {} if self.predictions: data = { "response": [t.response for t in self.predictions], "contexts": [t.contexts for t in self.predictions], } return PandasDataFrame(data) @property def class_name(self) -> str: """Class name.""" return "RagPredictionDataset" class LabelledRagDataset(BaseLlamaDataset[BaseQueryEngine]): """RagDataset class.""" _example_type = LabelledRagDataExample def to_pandas(self) -> PandasDataFrame: """Create pandas dataframe.""" data = { "query": [t.query for t in self.examples], "reference_contexts": [t.reference_contexts for t in self.examples], "reference_answer": [t.reference_answer for t in self.examples], "reference_answer_by": [str(t.reference_answer_by) for t in self.examples], "query_by": [str(t.query_by) for t in self.examples], } return PandasDataFrame(data) async def _apredict_example( self, predictor: BaseQueryEngine, example: LabelledRagDataExample, sleep_time_in_seconds: int, ) -> RagExamplePrediction: """Async predict RAG example with a query engine.""" await asyncio.sleep(sleep_time_in_seconds) response = await predictor.aquery(example.query) return RagExamplePrediction( response=str(response), contexts=[s.text for s in response.source_nodes] ) def _predict_example( self, predictor: BaseQueryEngine, example: LabelledRagDataExample, sleep_time_in_seconds: int = 0, ) -> RagExamplePrediction: """Predict RAG example with a query engine.""" time.sleep(sleep_time_in_seconds) response = predictor.query(example.query) return RagExamplePrediction( response=str(response), contexts=[s.text for s in response.source_nodes] ) def _construct_prediction_dataset( self, predictions: List[RagExamplePrediction] ) -> RagPredictionDataset: """Construct prediction dataset.""" return RagPredictionDataset(predictions=predictions) @property def class_name(self) -> str: """Class name.""" return "LabelledRagDataset" # British English + American English LabeledRagDataExample = LabelledRagDataExample LabeledRagDataset = LabelledRagDataset
[ "llama_index.core.bridge.pydantic.Field" ]
[((764, 909), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The generated (predicted) response that can be compared to a reference (ground-truth) answer."""'}), "(default_factory=str, description=\n 'The generated (predicted) response that can be compared to a reference (ground-truth) answer.'\n )\n", (769, 909), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((959, 1067), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'None', 'description': '"""The contexts in raw text form used to generate the response."""'}), "(default_factory=None, description=\n 'The contexts in raw text form used to generate the response.')\n", (964, 1067), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1955, 2028), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The user query for the example."""'}), "(default_factory=str, description='The user query for the example.')\n", (1960, 2028), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2079, 2139), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""What generated the query."""'}), "(default=None, description='What generated the query.')\n", (2084, 2139), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2200, 2299), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'None', 'description': '"""The contexts used to generate the reference answer."""'}), "(default_factory=None, description=\n 'The contexts used to generate the reference answer.')\n", (2205, 2299), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2346, 2444), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The reference (ground-truth) answer to the example."""'}), "(default_factory=str, description=\n 'The reference (ground-truth) answer to the example.')\n", (2351, 2444), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2510, 2581), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""What generated the reference answer."""'}), "(default=None, description='What generated the reference answer.')\n", (2515, 2581), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((3172, 3193), 'pandas.DataFrame', 'PandasDataFrame', (['data'], {}), '(data)\n', (3187, 3193), True, 'from pandas import DataFrame as PandasDataFrame\n'), ((3935, 3956), 'pandas.DataFrame', 'PandasDataFrame', (['data'], {}), '(data)\n', (3950, 3956), True, 'from pandas import DataFrame as PandasDataFrame\n'), ((4702, 4735), 'time.sleep', 'time.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4712, 4735), False, 'import time\n'), ((4224, 4260), 'asyncio.sleep', 'asyncio.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4237, 4260), False, 'import asyncio\n')]
"""Llama Dataset Class.""" import asyncio import time from typing import List, Optional from llama_index.core.base.base_query_engine import BaseQueryEngine from llama_index.core.bridge.pydantic import Field from llama_index.core.llama_dataset.base import ( BaseLlamaDataExample, BaseLlamaDataset, BaseLlamaExamplePrediction, BaseLlamaPredictionDataset, CreatedBy, ) from pandas import DataFrame as PandasDataFrame class RagExamplePrediction(BaseLlamaExamplePrediction): """RAG example prediction class. Args: response (str): The response generated by the LLM. contexts (Optional[List[str]]): The retrieved context (text) for generating response. """ response: str = Field( default_factory=str, description="The generated (predicted) response that can be compared to a reference (ground-truth) answer.", ) contexts: Optional[List[str]] = Field( default_factory=None, description="The contexts in raw text form used to generate the response.", ) @property def class_name(self) -> str: """Data example class name.""" return "RagExamplePrediction" class LabelledRagDataExample(BaseLlamaDataExample): """RAG example class. Analogous to traditional ML datasets, this dataset contains the "features" (i.e., query + context) to make a prediction and the "label" (i.e., response) to evaluate the prediction. Args: query (str): The user query query_by (CreatedBy): Query generated by human or ai (model-name) reference_contexts (Optional[List[str]]): The contexts used for response reference_answer ([str]): Reference answer to the query. An answer that would receive full marks upon evaluation. reference_answer_by: The reference answer generated by human or ai (model-name). """ query: str = Field( default_factory=str, description="The user query for the example." ) query_by: Optional[CreatedBy] = Field( default=None, description="What generated the query." ) reference_contexts: Optional[List[str]] = Field( default_factory=None, description="The contexts used to generate the reference answer.", ) reference_answer: str = Field( default_factory=str, description="The reference (ground-truth) answer to the example.", ) reference_answer_by: Optional[CreatedBy] = Field( default=None, description="What generated the reference answer." ) @property def class_name(self) -> str: """Data example class name.""" return "LabelledRagDataExample" class RagPredictionDataset(BaseLlamaPredictionDataset): """RagDataset class.""" _prediction_type = RagExamplePrediction def to_pandas(self) -> PandasDataFrame: """Create pandas dataframe.""" data = {} if self.predictions: data = { "response": [t.response for t in self.predictions], "contexts": [t.contexts for t in self.predictions], } return PandasDataFrame(data) @property def class_name(self) -> str: """Class name.""" return "RagPredictionDataset" class LabelledRagDataset(BaseLlamaDataset[BaseQueryEngine]): """RagDataset class.""" _example_type = LabelledRagDataExample def to_pandas(self) -> PandasDataFrame: """Create pandas dataframe.""" data = { "query": [t.query for t in self.examples], "reference_contexts": [t.reference_contexts for t in self.examples], "reference_answer": [t.reference_answer for t in self.examples], "reference_answer_by": [str(t.reference_answer_by) for t in self.examples], "query_by": [str(t.query_by) for t in self.examples], } return PandasDataFrame(data) async def _apredict_example( self, predictor: BaseQueryEngine, example: LabelledRagDataExample, sleep_time_in_seconds: int, ) -> RagExamplePrediction: """Async predict RAG example with a query engine.""" await asyncio.sleep(sleep_time_in_seconds) response = await predictor.aquery(example.query) return RagExamplePrediction( response=str(response), contexts=[s.text for s in response.source_nodes] ) def _predict_example( self, predictor: BaseQueryEngine, example: LabelledRagDataExample, sleep_time_in_seconds: int = 0, ) -> RagExamplePrediction: """Predict RAG example with a query engine.""" time.sleep(sleep_time_in_seconds) response = predictor.query(example.query) return RagExamplePrediction( response=str(response), contexts=[s.text for s in response.source_nodes] ) def _construct_prediction_dataset( self, predictions: List[RagExamplePrediction] ) -> RagPredictionDataset: """Construct prediction dataset.""" return RagPredictionDataset(predictions=predictions) @property def class_name(self) -> str: """Class name.""" return "LabelledRagDataset" # British English + American English LabeledRagDataExample = LabelledRagDataExample LabeledRagDataset = LabelledRagDataset
[ "llama_index.core.bridge.pydantic.Field" ]
[((764, 909), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The generated (predicted) response that can be compared to a reference (ground-truth) answer."""'}), "(default_factory=str, description=\n 'The generated (predicted) response that can be compared to a reference (ground-truth) answer.'\n )\n", (769, 909), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((959, 1067), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'None', 'description': '"""The contexts in raw text form used to generate the response."""'}), "(default_factory=None, description=\n 'The contexts in raw text form used to generate the response.')\n", (964, 1067), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1955, 2028), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The user query for the example."""'}), "(default_factory=str, description='The user query for the example.')\n", (1960, 2028), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2079, 2139), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""What generated the query."""'}), "(default=None, description='What generated the query.')\n", (2084, 2139), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2200, 2299), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'None', 'description': '"""The contexts used to generate the reference answer."""'}), "(default_factory=None, description=\n 'The contexts used to generate the reference answer.')\n", (2205, 2299), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2346, 2444), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str', 'description': '"""The reference (ground-truth) answer to the example."""'}), "(default_factory=str, description=\n 'The reference (ground-truth) answer to the example.')\n", (2351, 2444), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2510, 2581), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""What generated the reference answer."""'}), "(default=None, description='What generated the reference answer.')\n", (2515, 2581), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((3172, 3193), 'pandas.DataFrame', 'PandasDataFrame', (['data'], {}), '(data)\n', (3187, 3193), True, 'from pandas import DataFrame as PandasDataFrame\n'), ((3935, 3956), 'pandas.DataFrame', 'PandasDataFrame', (['data'], {}), '(data)\n', (3950, 3956), True, 'from pandas import DataFrame as PandasDataFrame\n'), ((4702, 4735), 'time.sleep', 'time.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4712, 4735), False, 'import time\n'), ((4224, 4260), 'asyncio.sleep', 'asyncio.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4237, 4260), False, 'import asyncio\n')]
from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, MessageRole, ) from llama_index.core.types import TokenGen def response_gen_from_query_engine(response_gen: TokenGen) -> ChatResponseGen: response_str = "" for token in response_gen: response_str += token yield ChatResponse( message=ChatMessage(role=MessageRole.ASSISTANT, content=response_str), delta=token, )
[ "llama_index.core.base.llms.types.ChatMessage" ]
[((378, 439), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response_str'}), '(role=MessageRole.ASSISTANT, content=response_str)\n', (389, 439), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponse, ChatResponseGen, MessageRole\n')]
from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, MessageRole, ) from llama_index.core.types import TokenGen def response_gen_from_query_engine(response_gen: TokenGen) -> ChatResponseGen: response_str = "" for token in response_gen: response_str += token yield ChatResponse( message=ChatMessage(role=MessageRole.ASSISTANT, content=response_str), delta=token, )
[ "llama_index.core.base.llms.types.ChatMessage" ]
[((378, 439), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response_str'}), '(role=MessageRole.ASSISTANT, content=response_str)\n', (389, 439), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponse, ChatResponseGen, MessageRole\n')]
from llama_index.core.base.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, MessageRole, ) from llama_index.core.types import TokenGen def response_gen_from_query_engine(response_gen: TokenGen) -> ChatResponseGen: response_str = "" for token in response_gen: response_str += token yield ChatResponse( message=ChatMessage(role=MessageRole.ASSISTANT, content=response_str), delta=token, )
[ "llama_index.core.base.llms.types.ChatMessage" ]
[((378, 439), 'llama_index.core.base.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response_str'}), '(role=MessageRole.ASSISTANT, content=response_str)\n', (389, 439), False, 'from llama_index.core.base.llms.types import ChatMessage, ChatResponse, ChatResponseGen, MessageRole\n')]
"""DashScope llm api.""" from http import HTTPStatus from typing import Any, Dict, List, Optional, Sequence, Tuple from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.llms.dashscope_utils import ( chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response, ) class DashScopeGenerationModels: """DashScope Qwen serial models.""" QWEN_TURBO = "qwen-turbo" QWEN_PLUS = "qwen-plus" QWEN_MAX = "qwen-max" QWEN_MAX_1201 = "qwen-max-1201" QWEN_MAX_LONGCONTEXT = "qwen-max-longcontext" DASHSCOPE_MODEL_META = { DashScopeGenerationModels.QWEN_TURBO: { "context_window": 1024 * 8, "num_output": 1024 * 8, "is_chat_model": True, }, DashScopeGenerationModels.QWEN_PLUS: { "context_window": 1024 * 32, "num_output": 1024 * 32, "is_chat_model": True, }, DashScopeGenerationModels.QWEN_MAX: { "context_window": 1024 * 8, "num_output": 1024 * 8, "is_chat_model": True, }, DashScopeGenerationModels.QWEN_MAX_1201: { "context_window": 1024 * 8, "num_output": 1024 * 8, "is_chat_model": True, }, DashScopeGenerationModels.QWEN_MAX_LONGCONTEXT: { "context_window": 1024 * 30, "num_output": 1024 * 30, "is_chat_model": True, }, } def call_with_messages( model: str, messages: List[Dict], parameters: Optional[Dict] = None, api_key: Optional[str] = None, **kwargs: Any, ) -> Dict: try: from dashscope import Generation except ImportError: raise ValueError( "DashScope is not installed. Please install it with " "`pip install dashscope`." ) return Generation.call( model=model, messages=messages, api_key=api_key, **parameters ) class DashScope(CustomLLM): """DashScope LLM.""" model_name: str = Field( default=DashScopeGenerationModels.QWEN_MAX, description="The DashScope model to use.", ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate.", default=DEFAULT_NUM_OUTPUTS, gt=0, ) incremental_output: Optional[bool] = Field( description="Control stream output, If False, the subsequent \ output will include the content that has been \ output previously.", default=True, ) enable_search: Optional[bool] = Field( description="The model has a built-in Internet search service. \ This parameter controls whether the model refers to \ the Internet search results when generating text.", default=False, ) stop: Optional[Any] = Field( description="str, list of str or token_id, list of token id. It will automatically \ stop when the generated content is about to contain the specified string \ or token_ids, and the generated content does not contain \ the specified content.", default=None, ) temperature: Optional[float] = Field( description="The temperature to use during generation.", default=DEFAULT_TEMPERATURE, gte=0.0, lte=2.0, ) top_k: Optional[int] = Field( description="Sample counter when generate.", default=None ) top_p: Optional[float] = Field( description="Sample probability threshold when generate." ) seed: Optional[int] = Field( description="Random seed when generate.", default=1234, gte=0 ) repetition_penalty: Optional[float] = Field( description="Penalty for repeated words in generated text; \ 1.0 is no penalty, values greater than 1 discourage \ repetition.", default=None, ) api_key: str = Field( default=None, description="The DashScope API key.", exclude=True ) def __init__( self, model_name: Optional[str] = DashScopeGenerationModels.QWEN_MAX, max_tokens: Optional[int] = DEFAULT_NUM_OUTPUTS, incremental_output: Optional[int] = True, enable_search: Optional[bool] = False, stop: Optional[Any] = None, temperature: Optional[float] = DEFAULT_TEMPERATURE, top_k: Optional[int] = None, top_p: Optional[float] = None, seed: Optional[int] = 1234, api_key: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, **kwargs: Any, ): super().__init__( model_name=model_name, max_tokens=max_tokens, incremental_output=incremental_output, enable_search=enable_search, stop=stop, temperature=temperature, top_k=top_k, top_p=top_p, seed=seed, api_key=api_key, callback_manager=callback_manager, kwargs=kwargs, ) @classmethod def class_name(cls) -> str: return "DashScope_LLM" @property def metadata(self) -> LLMMetadata: DASHSCOPE_MODEL_META[self.model_name]["num_output"] = ( self.max_tokens or DASHSCOPE_MODEL_META[self.model_name]["num_output"] ) return LLMMetadata( model_name=self.model_name, **DASHSCOPE_MODEL_META[self.model_name] ) def _get_default_parameters(self) -> Dict: params: Dict[Any, Any] = {} if self.max_tokens is not None: params["max_tokens"] = self.max_tokens params["incremental_output"] = self.incremental_output params["enable_search"] = self.enable_search if self.stop is not None: params["stop"] = self.stop if self.temperature is not None: params["temperature"] = self.temperature if self.top_k is not None: params["top_k"] = self.top_k if self.top_p is not None: params["top_p"] = self.top_p if self.seed is not None: params["seed"] = self.seed return params def _get_input_parameters( self, prompt: str, **kwargs: Any ) -> Tuple[ChatMessage, Dict]: parameters = self._get_default_parameters() parameters.update(kwargs) parameters["stream"] = False # we only use message response parameters["result_format"] = "message" message = ChatMessage( role=MessageRole.USER.value, content=prompt, ) return message, parameters @llm_completion_callback() def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: message, parameters = self._get_input_parameters(prompt=prompt, **kwargs) parameters.pop("incremental_output", None) parameters.pop("stream", None) messages = chat_message_to_dashscope_messages([message]) response = call_with_messages( model=self.model_name, messages=messages, api_key=self.api_key, parameters=parameters, ) return dashscope_response_to_completion_response(response) @llm_completion_callback() def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: message, parameters = self._get_input_parameters(prompt=prompt, kwargs=kwargs) parameters["incremental_output"] = True parameters["stream"] = True responses = call_with_messages( model=self.model_name, messages=chat_message_to_dashscope_messages([message]), api_key=self.api_key, parameters=parameters, ) def gen() -> CompletionResponseGen: content = "" for response in responses: if response.status_code == HTTPStatus.OK: top_choice = response.output.choices[0] incremental_output = top_choice["message"]["content"] if not incremental_output: incremental_output = "" content += incremental_output yield CompletionResponse( text=content, delta=incremental_output, raw=response ) else: yield CompletionResponse(text="", raw=response) return return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: parameters = self._get_default_parameters() parameters.update({**kwargs}) parameters.pop("stream", None) parameters.pop("incremental_output", None) parameters["result_format"] = "message" # only use message format. response = call_with_messages( model=self.model_name, messages=chat_message_to_dashscope_messages(messages), api_key=self.api_key, parameters=parameters, ) return dashscope_response_to_chat_response(response) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: parameters = self._get_default_parameters() parameters.update({**kwargs}) parameters["stream"] = True parameters["incremental_output"] = True parameters["result_format"] = "message" # only use message format. response = call_with_messages( model=self.model_name, messages=chat_message_to_dashscope_messages(messages), api_key=self.api_key, parameters=parameters, ) def gen() -> ChatResponseGen: content = "" for r in response: if r.status_code == HTTPStatus.OK: top_choice = r.output.choices[0] incremental_output = top_choice["message"]["content"] role = top_choice["message"]["role"] content += incremental_output yield ChatResponse( message=ChatMessage(role=role, content=content), delta=incremental_output, raw=r, ) else: yield ChatResponse(message=ChatMessage(), raw=response) return return gen()
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages", "llama_index.legacy.llms.dashscope_utils.dashscope_response_to_chat_response", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.dashscope_utils.dashscope_response_to_completion_response" ]
[((2272, 2350), 'dashscope.Generation.call', 'Generation.call', ([], {'model': 'model', 'messages': 'messages', 'api_key': 'api_key'}), '(model=model, messages=messages, api_key=api_key, **parameters)\n', (2287, 2350), False, 'from dashscope import Generation\n'), ((2443, 2540), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DashScopeGenerationModels.QWEN_MAX', 'description': '"""The DashScope model to use."""'}), "(default=DashScopeGenerationModels.QWEN_MAX, description=\n 'The DashScope model to use.')\n", (2448, 2540), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2591, 2693), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""', 'default': 'DEFAULT_NUM_OUTPUTS', 'gt': '(0)'}), "(description='The maximum number of tokens to generate.', default=\n DEFAULT_NUM_OUTPUTS, gt=0)\n", (2596, 2693), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2761, 3038), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Control stream output, If False, the subsequent output will include the content that has been output previously."""', 'default': '(True)'}), "(description=\n 'Control stream output, If False, the subsequent output will include the content that has been output previously.'\n , default=True)\n", (2766, 3038), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3092, 3409), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The model has a built-in Internet search service. This parameter controls whether the model refers to the Internet search results when generating text."""', 'default': '(False)'}), "(description=\n 'The model has a built-in Internet search service. This parameter controls whether the model refers to the Internet search results when generating text.'\n , default=False)\n", (3097, 3409), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3453, 3855), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""str, list of str or token_id, list of token id. It will automatically stop when the generated content is about to contain the specified string or token_ids, and the generated content does not contain the specified content."""', 'default': 'None'}), "(description=\n 'str, list of str or token_id, list of token id. It will automatically stop when the generated content is about to contain the specified string or token_ids, and the generated content does not contain the specified content.'\n , default=None)\n", (3458, 3855), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3910, 4024), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use during generation."""', 'default': 'DEFAULT_TEMPERATURE', 'gte': '(0.0)', 'lte': '(2.0)'}), "(description='The temperature to use during generation.', default=\n DEFAULT_TEMPERATURE, gte=0.0, lte=2.0)\n", (3915, 4024), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4086, 4150), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Sample counter when generate."""', 'default': 'None'}), "(description='Sample counter when generate.', default=None)\n", (4091, 4150), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4194, 4258), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Sample probability threshold when generate."""'}), "(description='Sample probability threshold when generate.')\n", (4199, 4258), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4299, 4367), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Random seed when generate."""', 'default': '(1234)', 'gte': '(0)'}), "(description='Random seed when generate.', default=1234, gte=0)\n", (4304, 4367), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4424, 4700), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Penalty for repeated words in generated text; 1.0 is no penalty, values greater than 1 discourage repetition."""', 'default': 'None'}), "(description=\n 'Penalty for repeated words in generated text; 1.0 is no penalty, values greater than 1 discourage repetition.'\n , default=None)\n", (4429, 4700), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4737, 4808), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The DashScope API key."""', 'exclude': '(True)'}), "(default=None, description='The DashScope API key.', exclude=True)\n", (4742, 4808), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((7440, 7465), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7463, 7465), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8034, 8059), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8057, 8059), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9274, 9293), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9291, 9293), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9921, 9940), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9938, 9940), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6160, 6245), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'model_name': 'self.model_name'}), '(model_name=self.model_name, **DASHSCOPE_MODEL_META[self.model_name]\n )\n', (6171, 6245), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7307, 7363), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER.value', 'content': 'prompt'}), '(role=MessageRole.USER.value, content=prompt)\n', (7318, 7363), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7731, 7776), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['[message]'], {}), '([message])\n', (7765, 7776), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((7976, 8027), 'llama_index.legacy.llms.dashscope_utils.dashscope_response_to_completion_response', 'dashscope_response_to_completion_response', (['response'], {}), '(response)\n', (8017, 8027), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9869, 9914), 'llama_index.legacy.llms.dashscope_utils.dashscope_response_to_chat_response', 'dashscope_response_to_chat_response', (['response'], {}), '(response)\n', (9904, 9914), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((8411, 8456), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['[message]'], {}), '([message])\n', (8445, 8456), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9729, 9773), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['messages'], {}), '(messages)\n', (9763, 9773), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((10394, 10438), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['messages'], {}), '(messages)\n', (10428, 10438), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9010, 9082), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'incremental_output', 'raw': 'response'}), '(text=content, delta=incremental_output, raw=response)\n', (9028, 9082), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((9177, 9218), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': '""""""', 'raw': 'response'}), "(text='', raw=response)\n", (9195, 9218), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10971, 11010), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10982, 11010), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((11184, 11197), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {}), '()\n', (11195, 11197), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n')]
"""DashScope llm api.""" from http import HTTPStatus from typing import Any, Dict, List, Optional, Sequence, Tuple from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole, ) from llama_index.legacy.llms.base import ( llm_chat_callback, llm_completion_callback, ) from llama_index.legacy.llms.custom import CustomLLM from llama_index.legacy.llms.dashscope_utils import ( chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response, ) class DashScopeGenerationModels: """DashScope Qwen serial models.""" QWEN_TURBO = "qwen-turbo" QWEN_PLUS = "qwen-plus" QWEN_MAX = "qwen-max" QWEN_MAX_1201 = "qwen-max-1201" QWEN_MAX_LONGCONTEXT = "qwen-max-longcontext" DASHSCOPE_MODEL_META = { DashScopeGenerationModels.QWEN_TURBO: { "context_window": 1024 * 8, "num_output": 1024 * 8, "is_chat_model": True, }, DashScopeGenerationModels.QWEN_PLUS: { "context_window": 1024 * 32, "num_output": 1024 * 32, "is_chat_model": True, }, DashScopeGenerationModels.QWEN_MAX: { "context_window": 1024 * 8, "num_output": 1024 * 8, "is_chat_model": True, }, DashScopeGenerationModels.QWEN_MAX_1201: { "context_window": 1024 * 8, "num_output": 1024 * 8, "is_chat_model": True, }, DashScopeGenerationModels.QWEN_MAX_LONGCONTEXT: { "context_window": 1024 * 30, "num_output": 1024 * 30, "is_chat_model": True, }, } def call_with_messages( model: str, messages: List[Dict], parameters: Optional[Dict] = None, api_key: Optional[str] = None, **kwargs: Any, ) -> Dict: try: from dashscope import Generation except ImportError: raise ValueError( "DashScope is not installed. Please install it with " "`pip install dashscope`." ) return Generation.call( model=model, messages=messages, api_key=api_key, **parameters ) class DashScope(CustomLLM): """DashScope LLM.""" model_name: str = Field( default=DashScopeGenerationModels.QWEN_MAX, description="The DashScope model to use.", ) max_tokens: Optional[int] = Field( description="The maximum number of tokens to generate.", default=DEFAULT_NUM_OUTPUTS, gt=0, ) incremental_output: Optional[bool] = Field( description="Control stream output, If False, the subsequent \ output will include the content that has been \ output previously.", default=True, ) enable_search: Optional[bool] = Field( description="The model has a built-in Internet search service. \ This parameter controls whether the model refers to \ the Internet search results when generating text.", default=False, ) stop: Optional[Any] = Field( description="str, list of str or token_id, list of token id. It will automatically \ stop when the generated content is about to contain the specified string \ or token_ids, and the generated content does not contain \ the specified content.", default=None, ) temperature: Optional[float] = Field( description="The temperature to use during generation.", default=DEFAULT_TEMPERATURE, gte=0.0, lte=2.0, ) top_k: Optional[int] = Field( description="Sample counter when generate.", default=None ) top_p: Optional[float] = Field( description="Sample probability threshold when generate." ) seed: Optional[int] = Field( description="Random seed when generate.", default=1234, gte=0 ) repetition_penalty: Optional[float] = Field( description="Penalty for repeated words in generated text; \ 1.0 is no penalty, values greater than 1 discourage \ repetition.", default=None, ) api_key: str = Field( default=None, description="The DashScope API key.", exclude=True ) def __init__( self, model_name: Optional[str] = DashScopeGenerationModels.QWEN_MAX, max_tokens: Optional[int] = DEFAULT_NUM_OUTPUTS, incremental_output: Optional[int] = True, enable_search: Optional[bool] = False, stop: Optional[Any] = None, temperature: Optional[float] = DEFAULT_TEMPERATURE, top_k: Optional[int] = None, top_p: Optional[float] = None, seed: Optional[int] = 1234, api_key: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, **kwargs: Any, ): super().__init__( model_name=model_name, max_tokens=max_tokens, incremental_output=incremental_output, enable_search=enable_search, stop=stop, temperature=temperature, top_k=top_k, top_p=top_p, seed=seed, api_key=api_key, callback_manager=callback_manager, kwargs=kwargs, ) @classmethod def class_name(cls) -> str: return "DashScope_LLM" @property def metadata(self) -> LLMMetadata: DASHSCOPE_MODEL_META[self.model_name]["num_output"] = ( self.max_tokens or DASHSCOPE_MODEL_META[self.model_name]["num_output"] ) return LLMMetadata( model_name=self.model_name, **DASHSCOPE_MODEL_META[self.model_name] ) def _get_default_parameters(self) -> Dict: params: Dict[Any, Any] = {} if self.max_tokens is not None: params["max_tokens"] = self.max_tokens params["incremental_output"] = self.incremental_output params["enable_search"] = self.enable_search if self.stop is not None: params["stop"] = self.stop if self.temperature is not None: params["temperature"] = self.temperature if self.top_k is not None: params["top_k"] = self.top_k if self.top_p is not None: params["top_p"] = self.top_p if self.seed is not None: params["seed"] = self.seed return params def _get_input_parameters( self, prompt: str, **kwargs: Any ) -> Tuple[ChatMessage, Dict]: parameters = self._get_default_parameters() parameters.update(kwargs) parameters["stream"] = False # we only use message response parameters["result_format"] = "message" message = ChatMessage( role=MessageRole.USER.value, content=prompt, ) return message, parameters @llm_completion_callback() def complete(self, prompt: str, **kwargs: Any) -> CompletionResponse: message, parameters = self._get_input_parameters(prompt=prompt, **kwargs) parameters.pop("incremental_output", None) parameters.pop("stream", None) messages = chat_message_to_dashscope_messages([message]) response = call_with_messages( model=self.model_name, messages=messages, api_key=self.api_key, parameters=parameters, ) return dashscope_response_to_completion_response(response) @llm_completion_callback() def stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen: message, parameters = self._get_input_parameters(prompt=prompt, kwargs=kwargs) parameters["incremental_output"] = True parameters["stream"] = True responses = call_with_messages( model=self.model_name, messages=chat_message_to_dashscope_messages([message]), api_key=self.api_key, parameters=parameters, ) def gen() -> CompletionResponseGen: content = "" for response in responses: if response.status_code == HTTPStatus.OK: top_choice = response.output.choices[0] incremental_output = top_choice["message"]["content"] if not incremental_output: incremental_output = "" content += incremental_output yield CompletionResponse( text=content, delta=incremental_output, raw=response ) else: yield CompletionResponse(text="", raw=response) return return gen() @llm_chat_callback() def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: parameters = self._get_default_parameters() parameters.update({**kwargs}) parameters.pop("stream", None) parameters.pop("incremental_output", None) parameters["result_format"] = "message" # only use message format. response = call_with_messages( model=self.model_name, messages=chat_message_to_dashscope_messages(messages), api_key=self.api_key, parameters=parameters, ) return dashscope_response_to_chat_response(response) @llm_chat_callback() def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: parameters = self._get_default_parameters() parameters.update({**kwargs}) parameters["stream"] = True parameters["incremental_output"] = True parameters["result_format"] = "message" # only use message format. response = call_with_messages( model=self.model_name, messages=chat_message_to_dashscope_messages(messages), api_key=self.api_key, parameters=parameters, ) def gen() -> ChatResponseGen: content = "" for r in response: if r.status_code == HTTPStatus.OK: top_choice = r.output.choices[0] incremental_output = top_choice["message"]["content"] role = top_choice["message"]["role"] content += incremental_output yield ChatResponse( message=ChatMessage(role=role, content=content), delta=incremental_output, raw=r, ) else: yield ChatResponse(message=ChatMessage(), raw=response) return return gen()
[ "llama_index.legacy.core.llms.types.CompletionResponse", "llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages", "llama_index.legacy.llms.dashscope_utils.dashscope_response_to_chat_response", "llama_index.legacy.core.llms.types.LLMMetadata", "llama_index.legacy.llms.base.llm_chat_callback", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.llms.base.llm_completion_callback", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.llms.dashscope_utils.dashscope_response_to_completion_response" ]
[((2272, 2350), 'dashscope.Generation.call', 'Generation.call', ([], {'model': 'model', 'messages': 'messages', 'api_key': 'api_key'}), '(model=model, messages=messages, api_key=api_key, **parameters)\n', (2287, 2350), False, 'from dashscope import Generation\n'), ((2443, 2540), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DashScopeGenerationModels.QWEN_MAX', 'description': '"""The DashScope model to use."""'}), "(default=DashScopeGenerationModels.QWEN_MAX, description=\n 'The DashScope model to use.')\n", (2448, 2540), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2591, 2693), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""', 'default': 'DEFAULT_NUM_OUTPUTS', 'gt': '(0)'}), "(description='The maximum number of tokens to generate.', default=\n DEFAULT_NUM_OUTPUTS, gt=0)\n", (2596, 2693), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2761, 3038), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Control stream output, If False, the subsequent output will include the content that has been output previously."""', 'default': '(True)'}), "(description=\n 'Control stream output, If False, the subsequent output will include the content that has been output previously.'\n , default=True)\n", (2766, 3038), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3092, 3409), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The model has a built-in Internet search service. This parameter controls whether the model refers to the Internet search results when generating text."""', 'default': '(False)'}), "(description=\n 'The model has a built-in Internet search service. This parameter controls whether the model refers to the Internet search results when generating text.'\n , default=False)\n", (3097, 3409), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3453, 3855), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""str, list of str or token_id, list of token id. It will automatically stop when the generated content is about to contain the specified string or token_ids, and the generated content does not contain the specified content."""', 'default': 'None'}), "(description=\n 'str, list of str or token_id, list of token id. It will automatically stop when the generated content is about to contain the specified string or token_ids, and the generated content does not contain the specified content.'\n , default=None)\n", (3458, 3855), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3910, 4024), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use during generation."""', 'default': 'DEFAULT_TEMPERATURE', 'gte': '(0.0)', 'lte': '(2.0)'}), "(description='The temperature to use during generation.', default=\n DEFAULT_TEMPERATURE, gte=0.0, lte=2.0)\n", (3915, 4024), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4086, 4150), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Sample counter when generate."""', 'default': 'None'}), "(description='Sample counter when generate.', default=None)\n", (4091, 4150), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4194, 4258), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Sample probability threshold when generate."""'}), "(description='Sample probability threshold when generate.')\n", (4199, 4258), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4299, 4367), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Random seed when generate."""', 'default': '(1234)', 'gte': '(0)'}), "(description='Random seed when generate.', default=1234, gte=0)\n", (4304, 4367), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4424, 4700), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Penalty for repeated words in generated text; 1.0 is no penalty, values greater than 1 discourage repetition."""', 'default': 'None'}), "(description=\n 'Penalty for repeated words in generated text; 1.0 is no penalty, values greater than 1 discourage repetition.'\n , default=None)\n", (4429, 4700), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((4737, 4808), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The DashScope API key."""', 'exclude': '(True)'}), "(default=None, description='The DashScope API key.', exclude=True)\n", (4742, 4808), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((7440, 7465), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7463, 7465), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8034, 8059), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8057, 8059), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9274, 9293), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9291, 9293), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9921, 9940), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9938, 9940), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6160, 6245), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'model_name': 'self.model_name'}), '(model_name=self.model_name, **DASHSCOPE_MODEL_META[self.model_name]\n )\n', (6171, 6245), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7307, 7363), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.USER.value', 'content': 'prompt'}), '(role=MessageRole.USER.value, content=prompt)\n', (7318, 7363), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7731, 7776), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['[message]'], {}), '([message])\n', (7765, 7776), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((7976, 8027), 'llama_index.legacy.llms.dashscope_utils.dashscope_response_to_completion_response', 'dashscope_response_to_completion_response', (['response'], {}), '(response)\n', (8017, 8027), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9869, 9914), 'llama_index.legacy.llms.dashscope_utils.dashscope_response_to_chat_response', 'dashscope_response_to_chat_response', (['response'], {}), '(response)\n', (9904, 9914), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((8411, 8456), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['[message]'], {}), '([message])\n', (8445, 8456), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9729, 9773), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['messages'], {}), '(messages)\n', (9763, 9773), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((10394, 10438), 'llama_index.legacy.llms.dashscope_utils.chat_message_to_dashscope_messages', 'chat_message_to_dashscope_messages', (['messages'], {}), '(messages)\n', (10428, 10438), False, 'from llama_index.legacy.llms.dashscope_utils import chat_message_to_dashscope_messages, dashscope_response_to_chat_response, dashscope_response_to_completion_response\n'), ((9010, 9082), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'incremental_output', 'raw': 'response'}), '(text=content, delta=incremental_output, raw=response)\n', (9028, 9082), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((9177, 9218), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': '""""""', 'raw': 'response'}), "(text='', raw=response)\n", (9195, 9218), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10971, 11010), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10982, 11010), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((11184, 11197), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {}), '()\n', (11195, 11197), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n')]
"""Relevancy evaluation.""" from __future__ import annotations import asyncio from typing import Any, Optional, Sequence, Union from llama_index.core import ServiceContext from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.indices import SummaryIndex from llama_index.core.llms.llm import LLM from llama_index.core.prompts import BasePromptTemplate, PromptTemplate from llama_index.core.prompts.mixin import PromptDictType from llama_index.core.schema import Document from llama_index.core.settings import Settings, llm_from_settings_or_context DEFAULT_EVAL_TEMPLATE = PromptTemplate( "Your task is to evaluate if the response for the query \ is in line with the context information provided.\n" "You have two options to answer. Either YES/ NO.\n" "Answer - YES, if the response for the query \ is in line with context information otherwise NO.\n" "Query and Response: \n {query_str}\n" "Context: \n {context_str}\n" "Answer: " ) DEFAULT_REFINE_TEMPLATE = PromptTemplate( "We want to understand if the following query and response is" "in line with the context information: \n {query_str}\n" "We have provided an existing YES/NO answer: \n {existing_answer}\n" "We have the opportunity to refine the existing answer " "(only if needed) with some more context below.\n" "------------\n" "{context_msg}\n" "------------\n" "If the existing answer was already YES, still answer YES. " "If the information is present in the new context, answer YES. " "Otherwise answer NO.\n" ) class RelevancyEvaluator(BaseEvaluator): """Relenvancy evaluator. Evaluates the relevancy of retrieved contexts and response to a query. This evaluator considers the query string, retrieved contexts, and response string. Args: service_context(Optional[ServiceContext]): The service context to use for evaluation. raise_error(Optional[bool]): Whether to raise an error if the response is invalid. Defaults to False. eval_template(Optional[Union[str, BasePromptTemplate]]): The template to use for evaluation. refine_template(Optional[Union[str, BasePromptTemplate]]): The template to use for refinement. """ def __init__( self, llm: Optional[LLM] = None, raise_error: bool = False, eval_template: Optional[Union[str, BasePromptTemplate]] = None, refine_template: Optional[Union[str, BasePromptTemplate]] = None, # deprecated service_context: Optional[ServiceContext] = None, ) -> None: """Init params.""" self._llm = llm or llm_from_settings_or_context(Settings, service_context) self._raise_error = raise_error self._eval_template: BasePromptTemplate if isinstance(eval_template, str): self._eval_template = PromptTemplate(eval_template) else: self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE self._refine_template: BasePromptTemplate if isinstance(refine_template, str): self._refine_template = PromptTemplate(refine_template) else: self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE def _get_prompts(self) -> PromptDictType: """Get prompts.""" return { "eval_template": self._eval_template, "refine_template": self._refine_template, } def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "eval_template" in prompts: self._eval_template = prompts["eval_template"] if "refine_template" in prompts: self._refine_template = prompts["refine_template"] async def aevaluate( self, query: str | None = None, response: str | None = None, contexts: Sequence[str] | None = None, sleep_time_in_seconds: int = 0, **kwargs: Any, ) -> EvaluationResult: """Evaluate whether the contexts and response are relevant to the query.""" del kwargs # Unused if query is None or contexts is None or response is None: raise ValueError("query, contexts, and response must be provided") docs = [Document(text=context) for context in contexts] index = SummaryIndex.from_documents(docs) query_response = f"Question: {query}\nResponse: {response}" await asyncio.sleep(sleep_time_in_seconds) query_engine = index.as_query_engine( llm=self._llm, text_qa_template=self._eval_template, refine_template=self._refine_template, ) response_obj = await query_engine.aquery(query_response) raw_response_txt = str(response_obj) if "yes" in raw_response_txt.lower(): passing = True else: if self._raise_error: raise ValueError("The response is invalid") passing = False return EvaluationResult( query=query, response=response, passing=passing, score=1.0 if passing else 0.0, feedback=raw_response_txt, contexts=contexts, ) QueryResponseEvaluator = RelevancyEvaluator
[ "llama_index.core.prompts.PromptTemplate", "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.evaluation.base.EvaluationResult", "llama_index.core.schema.Document", "llama_index.core.indices.SummaryIndex.from_documents" ]
[((620, 974), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Your task is to evaluate if the response for the query is in line with the context information provided.\nYou have two options to answer. Either YES/ NO.\nAnswer - YES, if the response for the query is in line with context information otherwise NO.\nQuery and Response: \n {query_str}\nContext: \n {context_str}\nAnswer: """'], {}), '(\n """Your task is to evaluate if the response for the query is in line with the context information provided.\nYou have two options to answer. Either YES/ NO.\nAnswer - YES, if the response for the query is in line with context information otherwise NO.\nQuery and Response: \n {query_str}\nContext: \n {context_str}\nAnswer: """\n )\n', (634, 974), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((1040, 1530), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""We want to understand if the following query and response isin line with the context information: \n {query_str}\nWe have provided an existing YES/NO answer: \n {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""'], {}), '(\n """We want to understand if the following query and response isin line with the context information: \n {query_str}\nWe have provided an existing YES/NO answer: \n {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""\n )\n', (1054, 1530), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4408, 4441), 'llama_index.core.indices.SummaryIndex.from_documents', 'SummaryIndex.from_documents', (['docs'], {}), '(docs)\n', (4435, 4441), False, 'from llama_index.core.indices import SummaryIndex\n'), ((5085, 5231), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'query': 'query', 'response': 'response', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt', 'contexts': 'contexts'}), '(query=query, response=response, passing=passing, score=1.0 if\n passing else 0.0, feedback=raw_response_txt, contexts=contexts)\n', (5101, 5231), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((2722, 2777), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (2750, 2777), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n'), ((2944, 2973), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['eval_template'], {}), '(eval_template)\n', (2958, 2973), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((3193, 3224), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['refine_template'], {}), '(refine_template)\n', (3207, 3224), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4344, 4366), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'context'}), '(text=context)\n', (4352, 4366), False, 'from llama_index.core.schema import Document\n'), ((4526, 4562), 'asyncio.sleep', 'asyncio.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4539, 4562), False, 'import asyncio\n')]
"""Relevancy evaluation.""" from __future__ import annotations import asyncio from typing import Any, Optional, Sequence, Union from llama_index.core import ServiceContext from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult from llama_index.core.indices import SummaryIndex from llama_index.core.llms.llm import LLM from llama_index.core.prompts import BasePromptTemplate, PromptTemplate from llama_index.core.prompts.mixin import PromptDictType from llama_index.core.schema import Document from llama_index.core.settings import Settings, llm_from_settings_or_context DEFAULT_EVAL_TEMPLATE = PromptTemplate( "Your task is to evaluate if the response for the query \ is in line with the context information provided.\n" "You have two options to answer. Either YES/ NO.\n" "Answer - YES, if the response for the query \ is in line with context information otherwise NO.\n" "Query and Response: \n {query_str}\n" "Context: \n {context_str}\n" "Answer: " ) DEFAULT_REFINE_TEMPLATE = PromptTemplate( "We want to understand if the following query and response is" "in line with the context information: \n {query_str}\n" "We have provided an existing YES/NO answer: \n {existing_answer}\n" "We have the opportunity to refine the existing answer " "(only if needed) with some more context below.\n" "------------\n" "{context_msg}\n" "------------\n" "If the existing answer was already YES, still answer YES. " "If the information is present in the new context, answer YES. " "Otherwise answer NO.\n" ) class RelevancyEvaluator(BaseEvaluator): """Relenvancy evaluator. Evaluates the relevancy of retrieved contexts and response to a query. This evaluator considers the query string, retrieved contexts, and response string. Args: service_context(Optional[ServiceContext]): The service context to use for evaluation. raise_error(Optional[bool]): Whether to raise an error if the response is invalid. Defaults to False. eval_template(Optional[Union[str, BasePromptTemplate]]): The template to use for evaluation. refine_template(Optional[Union[str, BasePromptTemplate]]): The template to use for refinement. """ def __init__( self, llm: Optional[LLM] = None, raise_error: bool = False, eval_template: Optional[Union[str, BasePromptTemplate]] = None, refine_template: Optional[Union[str, BasePromptTemplate]] = None, # deprecated service_context: Optional[ServiceContext] = None, ) -> None: """Init params.""" self._llm = llm or llm_from_settings_or_context(Settings, service_context) self._raise_error = raise_error self._eval_template: BasePromptTemplate if isinstance(eval_template, str): self._eval_template = PromptTemplate(eval_template) else: self._eval_template = eval_template or DEFAULT_EVAL_TEMPLATE self._refine_template: BasePromptTemplate if isinstance(refine_template, str): self._refine_template = PromptTemplate(refine_template) else: self._refine_template = refine_template or DEFAULT_REFINE_TEMPLATE def _get_prompts(self) -> PromptDictType: """Get prompts.""" return { "eval_template": self._eval_template, "refine_template": self._refine_template, } def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "eval_template" in prompts: self._eval_template = prompts["eval_template"] if "refine_template" in prompts: self._refine_template = prompts["refine_template"] async def aevaluate( self, query: str | None = None, response: str | None = None, contexts: Sequence[str] | None = None, sleep_time_in_seconds: int = 0, **kwargs: Any, ) -> EvaluationResult: """Evaluate whether the contexts and response are relevant to the query.""" del kwargs # Unused if query is None or contexts is None or response is None: raise ValueError("query, contexts, and response must be provided") docs = [Document(text=context) for context in contexts] index = SummaryIndex.from_documents(docs) query_response = f"Question: {query}\nResponse: {response}" await asyncio.sleep(sleep_time_in_seconds) query_engine = index.as_query_engine( llm=self._llm, text_qa_template=self._eval_template, refine_template=self._refine_template, ) response_obj = await query_engine.aquery(query_response) raw_response_txt = str(response_obj) if "yes" in raw_response_txt.lower(): passing = True else: if self._raise_error: raise ValueError("The response is invalid") passing = False return EvaluationResult( query=query, response=response, passing=passing, score=1.0 if passing else 0.0, feedback=raw_response_txt, contexts=contexts, ) QueryResponseEvaluator = RelevancyEvaluator
[ "llama_index.core.prompts.PromptTemplate", "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.evaluation.base.EvaluationResult", "llama_index.core.schema.Document", "llama_index.core.indices.SummaryIndex.from_documents" ]
[((620, 974), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""Your task is to evaluate if the response for the query is in line with the context information provided.\nYou have two options to answer. Either YES/ NO.\nAnswer - YES, if the response for the query is in line with context information otherwise NO.\nQuery and Response: \n {query_str}\nContext: \n {context_str}\nAnswer: """'], {}), '(\n """Your task is to evaluate if the response for the query is in line with the context information provided.\nYou have two options to answer. Either YES/ NO.\nAnswer - YES, if the response for the query is in line with context information otherwise NO.\nQuery and Response: \n {query_str}\nContext: \n {context_str}\nAnswer: """\n )\n', (634, 974), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((1040, 1530), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['"""We want to understand if the following query and response isin line with the context information: \n {query_str}\nWe have provided an existing YES/NO answer: \n {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""'], {}), '(\n """We want to understand if the following query and response isin line with the context information: \n {query_str}\nWe have provided an existing YES/NO answer: \n {existing_answer}\nWe have the opportunity to refine the existing answer (only if needed) with some more context below.\n------------\n{context_msg}\n------------\nIf the existing answer was already YES, still answer YES. If the information is present in the new context, answer YES. Otherwise answer NO.\n"""\n )\n', (1054, 1530), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4408, 4441), 'llama_index.core.indices.SummaryIndex.from_documents', 'SummaryIndex.from_documents', (['docs'], {}), '(docs)\n', (4435, 4441), False, 'from llama_index.core.indices import SummaryIndex\n'), ((5085, 5231), 'llama_index.core.evaluation.base.EvaluationResult', 'EvaluationResult', ([], {'query': 'query', 'response': 'response', 'passing': 'passing', 'score': '(1.0 if passing else 0.0)', 'feedback': 'raw_response_txt', 'contexts': 'contexts'}), '(query=query, response=response, passing=passing, score=1.0 if\n passing else 0.0, feedback=raw_response_txt, contexts=contexts)\n', (5101, 5231), False, 'from llama_index.core.evaluation.base import BaseEvaluator, EvaluationResult\n'), ((2722, 2777), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (2750, 2777), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n'), ((2944, 2973), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['eval_template'], {}), '(eval_template)\n', (2958, 2973), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((3193, 3224), 'llama_index.core.prompts.PromptTemplate', 'PromptTemplate', (['refine_template'], {}), '(refine_template)\n', (3207, 3224), False, 'from llama_index.core.prompts import BasePromptTemplate, PromptTemplate\n'), ((4344, 4366), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'context'}), '(text=context)\n', (4352, 4366), False, 'from llama_index.core.schema import Document\n'), ((4526, 4562), 'asyncio.sleep', 'asyncio.sleep', (['sleep_time_in_seconds'], {}), '(sleep_time_in_seconds)\n', (4539, 4562), False, 'import asyncio\n')]
"""Base tool spec class.""" import asyncio from inspect import signature from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.tools.function_tool import FunctionTool from llama_index.core.tools.types import ToolMetadata from llama_index.core.tools.utils import create_schema_from_function AsyncCallable = Callable[..., Awaitable[Any]] # TODO: deprecate the Tuple (there's no use for it) SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]] class BaseToolSpec: """Base tool spec class.""" # list of functions that you'd want to convert to spec spec_functions: List[SPEC_FUNCTION_TYPE] def get_fn_schema_from_fn_name( self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None ) -> Optional[Type[BaseModel]]: """Return map from function name. Return type is Optional, meaning that the schema can be None. In this case, it's up to the downstream tool implementation to infer the schema. """ spec_functions = spec_functions or self.spec_functions for fn in spec_functions: if fn == fn_name: return create_schema_from_function(fn_name, getattr(self, fn_name)) raise ValueError(f"Invalid function name: {fn_name}") def get_metadata_from_fn_name( self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None ) -> Optional[ToolMetadata]: """Return map from function name. Return type is Optional, meaning that the schema can be None. In this case, it's up to the downstream tool implementation to infer the schema. """ try: func = getattr(self, fn_name) except AttributeError: return None name = fn_name docstring = func.__doc__ or "" description = f"{name}{signature(func)}\n{docstring}" fn_schema = self.get_fn_schema_from_fn_name( fn_name, spec_functions=spec_functions ) return ToolMetadata(name=name, description=description, fn_schema=fn_schema) def to_tool_list( self, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None, func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None, ) -> List[FunctionTool]: """Convert tool spec to list of tools.""" spec_functions = spec_functions or self.spec_functions func_to_metadata_mapping = func_to_metadata_mapping or {} tool_list = [] for func_spec in spec_functions: func_sync = None func_async = None if isinstance(func_spec, str): func = getattr(self, func_spec) if asyncio.iscoroutinefunction(func): func_async = func else: func_sync = func metadata = func_to_metadata_mapping.get(func_spec, None) if metadata is None: metadata = self.get_metadata_from_fn_name(func_spec) elif isinstance(func_spec, tuple) and len(func_spec) == 2: func_sync = getattr(self, func_spec[0]) func_async = getattr(self, func_spec[1]) metadata = func_to_metadata_mapping.get(func_spec[0], None) if metadata is None: metadata = func_to_metadata_mapping.get(func_spec[1], None) if metadata is None: metadata = self.get_metadata_from_fn_name(func_spec[0]) else: raise ValueError( "spec_functions must be of type: List[Union[str, Tuple[str, str]]]" ) if func_sync is None: if func_async is not None: func_sync = patch_sync(func_async) else: raise ValueError( f"Could not retrieve a function for spec: {func_spec}" ) tool = FunctionTool.from_defaults( fn=func_sync, async_fn=func_async, tool_metadata=metadata, ) tool_list.append(tool) return tool_list def patch_sync(func_async: AsyncCallable) -> Callable: """Patch sync function from async function.""" def patched_sync(*args: Any, **kwargs: Any) -> Any: loop = asyncio.get_event_loop() return loop.run_until_complete(func_async(*args, **kwargs)) return patched_sync
[ "llama_index.core.tools.function_tool.FunctionTool.from_defaults", "llama_index.core.tools.types.ToolMetadata" ]
[((2092, 2161), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description', 'fn_schema': 'fn_schema'}), '(name=name, description=description, fn_schema=fn_schema)\n', (2104, 2161), False, 'from llama_index.core.tools.types import ToolMetadata\n'), ((4457, 4481), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4479, 4481), False, 'import asyncio\n'), ((4068, 4158), 'llama_index.core.tools.function_tool.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'func_sync', 'async_fn': 'func_async', 'tool_metadata': 'metadata'}), '(fn=func_sync, async_fn=func_async, tool_metadata\n =metadata)\n', (4094, 4158), False, 'from llama_index.core.tools.function_tool import FunctionTool\n'), ((1932, 1947), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (1941, 1947), False, 'from inspect import signature\n'), ((2783, 2816), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (2810, 2816), False, 'import asyncio\n')]
"""Base tool spec class.""" import asyncio from inspect import signature from typing import Any, Awaitable, Callable, Dict, List, Optional, Tuple, Type, Union from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.tools.function_tool import FunctionTool from llama_index.core.tools.types import ToolMetadata from llama_index.core.tools.utils import create_schema_from_function AsyncCallable = Callable[..., Awaitable[Any]] # TODO: deprecate the Tuple (there's no use for it) SPEC_FUNCTION_TYPE = Union[str, Tuple[str, str]] class BaseToolSpec: """Base tool spec class.""" # list of functions that you'd want to convert to spec spec_functions: List[SPEC_FUNCTION_TYPE] def get_fn_schema_from_fn_name( self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None ) -> Optional[Type[BaseModel]]: """Return map from function name. Return type is Optional, meaning that the schema can be None. In this case, it's up to the downstream tool implementation to infer the schema. """ spec_functions = spec_functions or self.spec_functions for fn in spec_functions: if fn == fn_name: return create_schema_from_function(fn_name, getattr(self, fn_name)) raise ValueError(f"Invalid function name: {fn_name}") def get_metadata_from_fn_name( self, fn_name: str, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None ) -> Optional[ToolMetadata]: """Return map from function name. Return type is Optional, meaning that the schema can be None. In this case, it's up to the downstream tool implementation to infer the schema. """ try: func = getattr(self, fn_name) except AttributeError: return None name = fn_name docstring = func.__doc__ or "" description = f"{name}{signature(func)}\n{docstring}" fn_schema = self.get_fn_schema_from_fn_name( fn_name, spec_functions=spec_functions ) return ToolMetadata(name=name, description=description, fn_schema=fn_schema) def to_tool_list( self, spec_functions: Optional[List[SPEC_FUNCTION_TYPE]] = None, func_to_metadata_mapping: Optional[Dict[str, ToolMetadata]] = None, ) -> List[FunctionTool]: """Convert tool spec to list of tools.""" spec_functions = spec_functions or self.spec_functions func_to_metadata_mapping = func_to_metadata_mapping or {} tool_list = [] for func_spec in spec_functions: func_sync = None func_async = None if isinstance(func_spec, str): func = getattr(self, func_spec) if asyncio.iscoroutinefunction(func): func_async = func else: func_sync = func metadata = func_to_metadata_mapping.get(func_spec, None) if metadata is None: metadata = self.get_metadata_from_fn_name(func_spec) elif isinstance(func_spec, tuple) and len(func_spec) == 2: func_sync = getattr(self, func_spec[0]) func_async = getattr(self, func_spec[1]) metadata = func_to_metadata_mapping.get(func_spec[0], None) if metadata is None: metadata = func_to_metadata_mapping.get(func_spec[1], None) if metadata is None: metadata = self.get_metadata_from_fn_name(func_spec[0]) else: raise ValueError( "spec_functions must be of type: List[Union[str, Tuple[str, str]]]" ) if func_sync is None: if func_async is not None: func_sync = patch_sync(func_async) else: raise ValueError( f"Could not retrieve a function for spec: {func_spec}" ) tool = FunctionTool.from_defaults( fn=func_sync, async_fn=func_async, tool_metadata=metadata, ) tool_list.append(tool) return tool_list def patch_sync(func_async: AsyncCallable) -> Callable: """Patch sync function from async function.""" def patched_sync(*args: Any, **kwargs: Any) -> Any: loop = asyncio.get_event_loop() return loop.run_until_complete(func_async(*args, **kwargs)) return patched_sync
[ "llama_index.core.tools.function_tool.FunctionTool.from_defaults", "llama_index.core.tools.types.ToolMetadata" ]
[((2092, 2161), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description', 'fn_schema': 'fn_schema'}), '(name=name, description=description, fn_schema=fn_schema)\n', (2104, 2161), False, 'from llama_index.core.tools.types import ToolMetadata\n'), ((4457, 4481), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4479, 4481), False, 'import asyncio\n'), ((4068, 4158), 'llama_index.core.tools.function_tool.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'func_sync', 'async_fn': 'func_async', 'tool_metadata': 'metadata'}), '(fn=func_sync, async_fn=func_async, tool_metadata\n =metadata)\n', (4094, 4158), False, 'from llama_index.core.tools.function_tool import FunctionTool\n'), ((1932, 1947), 'inspect.signature', 'signature', (['func'], {}), '(func)\n', (1941, 1947), False, 'from inspect import signature\n'), ((2783, 2816), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['func'], {}), '(func)\n', (2810, 2816), False, 'import asyncio\n')]
"""Node parser interface.""" from abc import ABC, abstractmethod from typing import Any, Callable, List, Sequence from llama_index.core.bridge.pydantic import Field, validator from llama_index.core.callbacks import CallbackManager, CBEventType, EventPayload from llama_index.core.node_parser.node_utils import ( build_nodes_from_splits, default_id_func, ) from llama_index.core.schema import ( BaseNode, Document, MetadataMode, NodeRelationship, TransformComponent, ) from llama_index.core.utils import get_tqdm_iterable class NodeParser(TransformComponent, ABC): """Base interface for node parser.""" include_metadata: bool = Field( default=True, description="Whether or not to consider metadata when splitting." ) include_prev_next_rel: bool = Field( default=True, description="Include prev/next node relationships." ) callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True ) id_func: Callable = Field( default=None, description="Function to generate node IDs.", exclude=True, ) class Config: arbitrary_types_allowed = True @validator("id_func", pre=True) def _validate_id_func(cls, v: Any) -> Any: if v is None: return default_id_func return v @abstractmethod def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any, ) -> List[BaseNode]: ... def get_nodes_from_documents( self, documents: Sequence[Document], show_progress: bool = False, **kwargs: Any, ) -> List[BaseNode]: """Parse documents into nodes. Args: documents (Sequence[Document]): documents to parse show_progress (bool): whether to show progress bar """ doc_id_to_document = {doc.id_: doc for doc in documents} with self.callback_manager.event( CBEventType.NODE_PARSING, payload={EventPayload.DOCUMENTS: documents} ) as event: nodes = self._parse_nodes(documents, show_progress=show_progress, **kwargs) for i, node in enumerate(nodes): if ( node.ref_doc_id is not None and node.ref_doc_id in doc_id_to_document ): ref_doc = doc_id_to_document[node.ref_doc_id] start_char_idx = ref_doc.text.find( node.get_content(metadata_mode=MetadataMode.NONE) ) # update start/end char idx if start_char_idx >= 0: node.start_char_idx = start_char_idx node.end_char_idx = start_char_idx + len( node.get_content(metadata_mode=MetadataMode.NONE) ) # update metadata if self.include_metadata: node.metadata.update( doc_id_to_document[node.ref_doc_id].metadata ) if self.include_prev_next_rel: if i > 0: node.relationships[NodeRelationship.PREVIOUS] = nodes[ i - 1 ].as_related_node_info() if i < len(nodes) - 1: node.relationships[NodeRelationship.NEXT] = nodes[ i + 1 ].as_related_node_info() event.on_end({EventPayload.NODES: nodes}) return nodes def __call__(self, nodes: List[BaseNode], **kwargs: Any) -> List[BaseNode]: return self.get_nodes_from_documents(nodes, **kwargs) class TextSplitter(NodeParser): @abstractmethod def split_text(self, text: str) -> List[str]: ... def split_texts(self, texts: List[str]) -> List[str]: nested_texts = [self.split_text(text) for text in texts] return [item for sublist in nested_texts for item in sublist] def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: splits = self.split_text(node.get_content()) all_nodes.extend( build_nodes_from_splits(splits, node, id_func=self.id_func) ) return all_nodes class MetadataAwareTextSplitter(TextSplitter): @abstractmethod def split_text_metadata_aware(self, text: str, metadata_str: str) -> List[str]: ... def split_texts_metadata_aware( self, texts: List[str], metadata_strs: List[str] ) -> List[str]: if len(texts) != len(metadata_strs): raise ValueError("Texts and metadata_strs must have the same length") nested_texts = [ self.split_text_metadata_aware(text, metadata) for text, metadata in zip(texts, metadata_strs) ] return [item for sublist in nested_texts for item in sublist] def _get_metadata_str(self, node: BaseNode) -> str: """Helper function to get the proper metadata str for splitting.""" embed_metadata_str = node.get_metadata_str(mode=MetadataMode.EMBED) llm_metadata_str = node.get_metadata_str(mode=MetadataMode.LLM) # use the longest metadata str for splitting if len(embed_metadata_str) > len(llm_metadata_str): metadata_str = embed_metadata_str else: metadata_str = llm_metadata_str return metadata_str def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: metadata_str = self._get_metadata_str(node) splits = self.split_text_metadata_aware( node.get_content(metadata_mode=MetadataMode.NONE), metadata_str=metadata_str, ) all_nodes.extend( build_nodes_from_splits(splits, node, id_func=self.id_func) ) return all_nodes
[ "llama_index.core.utils.get_tqdm_iterable", "llama_index.core.node_parser.node_utils.build_nodes_from_splits", "llama_index.core.bridge.pydantic.validator", "llama_index.core.bridge.pydantic.Field" ]
[((668, 759), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Whether or not to consider metadata when splitting."""'}), "(default=True, description=\n 'Whether or not to consider metadata when splitting.')\n", (673, 759), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((803, 875), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Include prev/next node relationships."""'}), "(default=True, description='Include prev/next node relationships.')\n", (808, 875), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((930, 982), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (935, 982), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((1021, 1100), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Function to generate node IDs."""', 'exclude': '(True)'}), "(default=None, description='Function to generate node IDs.', exclude=True)\n", (1026, 1100), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((1196, 1226), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""id_func"""'], {'pre': '(True)'}), "('id_func', pre=True)\n", (1205, 1226), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((4341, 4397), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (4358, 4397), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((6002, 6058), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (6019, 6058), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((4543, 4602), 'llama_index.core.node_parser.node_utils.build_nodes_from_splits', 'build_nodes_from_splits', (['splits', 'node'], {'id_func': 'self.id_func'}), '(splits, node, id_func=self.id_func)\n', (4566, 4602), False, 'from llama_index.core.node_parser.node_utils import build_nodes_from_splits, default_id_func\n'), ((6380, 6439), 'llama_index.core.node_parser.node_utils.build_nodes_from_splits', 'build_nodes_from_splits', (['splits', 'node'], {'id_func': 'self.id_func'}), '(splits, node, id_func=self.id_func)\n', (6403, 6439), False, 'from llama_index.core.node_parser.node_utils import build_nodes_from_splits, default_id_func\n')]
"""Node parser interface.""" from abc import ABC, abstractmethod from typing import Any, Callable, List, Sequence from llama_index.core.bridge.pydantic import Field, validator from llama_index.core.callbacks import CallbackManager, CBEventType, EventPayload from llama_index.core.node_parser.node_utils import ( build_nodes_from_splits, default_id_func, ) from llama_index.core.schema import ( BaseNode, Document, MetadataMode, NodeRelationship, TransformComponent, ) from llama_index.core.utils import get_tqdm_iterable class NodeParser(TransformComponent, ABC): """Base interface for node parser.""" include_metadata: bool = Field( default=True, description="Whether or not to consider metadata when splitting." ) include_prev_next_rel: bool = Field( default=True, description="Include prev/next node relationships." ) callback_manager: CallbackManager = Field( default_factory=CallbackManager, exclude=True ) id_func: Callable = Field( default=None, description="Function to generate node IDs.", exclude=True, ) class Config: arbitrary_types_allowed = True @validator("id_func", pre=True) def _validate_id_func(cls, v: Any) -> Any: if v is None: return default_id_func return v @abstractmethod def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any, ) -> List[BaseNode]: ... def get_nodes_from_documents( self, documents: Sequence[Document], show_progress: bool = False, **kwargs: Any, ) -> List[BaseNode]: """Parse documents into nodes. Args: documents (Sequence[Document]): documents to parse show_progress (bool): whether to show progress bar """ doc_id_to_document = {doc.id_: doc for doc in documents} with self.callback_manager.event( CBEventType.NODE_PARSING, payload={EventPayload.DOCUMENTS: documents} ) as event: nodes = self._parse_nodes(documents, show_progress=show_progress, **kwargs) for i, node in enumerate(nodes): if ( node.ref_doc_id is not None and node.ref_doc_id in doc_id_to_document ): ref_doc = doc_id_to_document[node.ref_doc_id] start_char_idx = ref_doc.text.find( node.get_content(metadata_mode=MetadataMode.NONE) ) # update start/end char idx if start_char_idx >= 0: node.start_char_idx = start_char_idx node.end_char_idx = start_char_idx + len( node.get_content(metadata_mode=MetadataMode.NONE) ) # update metadata if self.include_metadata: node.metadata.update( doc_id_to_document[node.ref_doc_id].metadata ) if self.include_prev_next_rel: if i > 0: node.relationships[NodeRelationship.PREVIOUS] = nodes[ i - 1 ].as_related_node_info() if i < len(nodes) - 1: node.relationships[NodeRelationship.NEXT] = nodes[ i + 1 ].as_related_node_info() event.on_end({EventPayload.NODES: nodes}) return nodes def __call__(self, nodes: List[BaseNode], **kwargs: Any) -> List[BaseNode]: return self.get_nodes_from_documents(nodes, **kwargs) class TextSplitter(NodeParser): @abstractmethod def split_text(self, text: str) -> List[str]: ... def split_texts(self, texts: List[str]) -> List[str]: nested_texts = [self.split_text(text) for text in texts] return [item for sublist in nested_texts for item in sublist] def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: splits = self.split_text(node.get_content()) all_nodes.extend( build_nodes_from_splits(splits, node, id_func=self.id_func) ) return all_nodes class MetadataAwareTextSplitter(TextSplitter): @abstractmethod def split_text_metadata_aware(self, text: str, metadata_str: str) -> List[str]: ... def split_texts_metadata_aware( self, texts: List[str], metadata_strs: List[str] ) -> List[str]: if len(texts) != len(metadata_strs): raise ValueError("Texts and metadata_strs must have the same length") nested_texts = [ self.split_text_metadata_aware(text, metadata) for text, metadata in zip(texts, metadata_strs) ] return [item for sublist in nested_texts for item in sublist] def _get_metadata_str(self, node: BaseNode) -> str: """Helper function to get the proper metadata str for splitting.""" embed_metadata_str = node.get_metadata_str(mode=MetadataMode.EMBED) llm_metadata_str = node.get_metadata_str(mode=MetadataMode.LLM) # use the longest metadata str for splitting if len(embed_metadata_str) > len(llm_metadata_str): metadata_str = embed_metadata_str else: metadata_str = llm_metadata_str return metadata_str def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: metadata_str = self._get_metadata_str(node) splits = self.split_text_metadata_aware( node.get_content(metadata_mode=MetadataMode.NONE), metadata_str=metadata_str, ) all_nodes.extend( build_nodes_from_splits(splits, node, id_func=self.id_func) ) return all_nodes
[ "llama_index.core.utils.get_tqdm_iterable", "llama_index.core.node_parser.node_utils.build_nodes_from_splits", "llama_index.core.bridge.pydantic.validator", "llama_index.core.bridge.pydantic.Field" ]
[((668, 759), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Whether or not to consider metadata when splitting."""'}), "(default=True, description=\n 'Whether or not to consider metadata when splitting.')\n", (673, 759), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((803, 875), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Include prev/next node relationships."""'}), "(default=True, description='Include prev/next node relationships.')\n", (808, 875), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((930, 982), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (935, 982), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((1021, 1100), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Function to generate node IDs."""', 'exclude': '(True)'}), "(default=None, description='Function to generate node IDs.', exclude=True)\n", (1026, 1100), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((1196, 1226), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""id_func"""'], {'pre': '(True)'}), "('id_func', pre=True)\n", (1205, 1226), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((4341, 4397), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (4358, 4397), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((6002, 6058), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (6019, 6058), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((4543, 4602), 'llama_index.core.node_parser.node_utils.build_nodes_from_splits', 'build_nodes_from_splits', (['splits', 'node'], {'id_func': 'self.id_func'}), '(splits, node, id_func=self.id_func)\n', (4566, 4602), False, 'from llama_index.core.node_parser.node_utils import build_nodes_from_splits, default_id_func\n'), ((6380, 6439), 'llama_index.core.node_parser.node_utils.build_nodes_from_splits', 'build_nodes_from_splits', (['splits', 'node'], {'id_func': 'self.id_func'}), '(splits, node, id_func=self.id_func)\n', (6403, 6439), False, 'from llama_index.core.node_parser.node_utils import build_nodes_from_splits, default_id_func\n')]
"""Tree Index inserter.""" from typing import Optional, Sequence from llama_index.core.data_structs.data_structs import IndexGraph from llama_index.core.indices.prompt_helper import PromptHelper from llama_index.core.indices.tree.utils import get_numbered_text_from_nodes from llama_index.core.indices.utils import ( extract_numbers_given_response, get_sorted_node_list, ) from llama_index.core.llms.llm import LLM from llama_index.core.prompts.base import BasePromptTemplate from llama_index.core.prompts.default_prompts import ( DEFAULT_INSERT_PROMPT, DEFAULT_SUMMARY_PROMPT, ) from llama_index.core.schema import BaseNode, MetadataMode, TextNode from llama_index.core.service_context import ServiceContext from llama_index.core.settings import ( Settings, llm_from_settings_or_context, ) from llama_index.core.storage.docstore import BaseDocumentStore from llama_index.core.storage.docstore.registry import get_default_docstore class TreeIndexInserter: """LlamaIndex inserter.""" def __init__( self, index_graph: IndexGraph, service_context: Optional[ServiceContext] = None, llm: Optional[LLM] = None, num_children: int = 10, insert_prompt: BasePromptTemplate = DEFAULT_INSERT_PROMPT, summary_prompt: BasePromptTemplate = DEFAULT_SUMMARY_PROMPT, docstore: Optional[BaseDocumentStore] = None, ) -> None: """Initialize with params.""" if num_children < 2: raise ValueError("Invalid number of children.") self.num_children = num_children self.summary_prompt = summary_prompt self.insert_prompt = insert_prompt self.index_graph = index_graph self._llm = llm or llm_from_settings_or_context(Settings, service_context) self._prompt_helper = Settings._prompt_helper or PromptHelper.from_llm_metadata( self._llm.metadata, ) self._docstore = docstore or get_default_docstore() def _insert_under_parent_and_consolidate( self, text_node: BaseNode, parent_node: Optional[BaseNode] ) -> None: """Insert node under parent and consolidate. Consolidation will happen by dividing up child nodes, and creating a new intermediate layer of nodes. """ # perform insertion self.index_graph.insert_under_parent(text_node, parent_node) # if under num_children limit, then we're fine if len(self.index_graph.get_children(parent_node)) <= self.num_children: return else: # perform consolidation cur_graph_node_ids = self.index_graph.get_children(parent_node) cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids) cur_graph_node_list = get_sorted_node_list(cur_graph_nodes) # this layer is all leaf nodes, consolidate and split leaf nodes # consolidate and split leaf nodes in half # TODO: do better splitting (with a GPT prompt etc.) half1 = cur_graph_node_list[: len(cur_graph_nodes) // 2] half2 = cur_graph_node_list[len(cur_graph_nodes) // 2 :] truncated_chunks = self._prompt_helper.truncate( prompt=self.summary_prompt, text_chunks=[ node.get_content(metadata_mode=MetadataMode.LLM) for node in half1 ], ) text_chunk1 = "\n".join(truncated_chunks) summary1 = self._llm.predict(self.summary_prompt, context_str=text_chunk1) node1 = TextNode(text=summary1) self.index_graph.insert(node1, children_nodes=half1) truncated_chunks = self._prompt_helper.truncate( prompt=self.summary_prompt, text_chunks=[ node.get_content(metadata_mode=MetadataMode.LLM) for node in half2 ], ) text_chunk2 = "\n".join(truncated_chunks) summary2 = self._llm.predict(self.summary_prompt, context_str=text_chunk2) node2 = TextNode(text=summary2) self.index_graph.insert(node2, children_nodes=half2) # insert half1 and half2 as new children of parent_node # first remove child indices from parent node if parent_node is not None: self.index_graph.node_id_to_children_ids[parent_node.node_id] = [] else: self.index_graph.root_nodes = {} self.index_graph.insert_under_parent( node1, parent_node, new_index=self.index_graph.get_index(node1) ) self._docstore.add_documents([node1], allow_update=False) self.index_graph.insert_under_parent( node2, parent_node, new_index=self.index_graph.get_index(node2) ) self._docstore.add_documents([node2], allow_update=False) def _insert_node( self, node: BaseNode, parent_node: Optional[BaseNode] = None ) -> None: """Insert node.""" cur_graph_node_ids = self.index_graph.get_children(parent_node) cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids) cur_graph_node_list = get_sorted_node_list(cur_graph_nodes) # if cur_graph_nodes is empty (start with empty graph), then insert under # parent (insert new root node) if len(cur_graph_nodes) == 0: self._insert_under_parent_and_consolidate(node, parent_node) # check if leaf nodes, then just insert under parent elif len(self.index_graph.get_children(cur_graph_node_list[0])) == 0: self._insert_under_parent_and_consolidate(node, parent_node) # else try to find the right summary node to insert under else: text_splitter = self._prompt_helper.get_text_splitter_given_prompt( prompt=self.insert_prompt, num_chunks=len(cur_graph_node_list), ) numbered_text = get_numbered_text_from_nodes( cur_graph_node_list, text_splitter=text_splitter ) response = self._llm.predict( self.insert_prompt, new_chunk_text=node.get_content(metadata_mode=MetadataMode.LLM), num_chunks=len(cur_graph_node_list), context_list=numbered_text, ) numbers = extract_numbers_given_response(response) if numbers is None or len(numbers) == 0: # NOTE: if we can't extract a number, then we just insert under parent self._insert_under_parent_and_consolidate(node, parent_node) elif int(numbers[0]) > len(cur_graph_node_list): # NOTE: if number is out of range, then we just insert under parent self._insert_under_parent_and_consolidate(node, parent_node) else: selected_node = cur_graph_node_list[int(numbers[0]) - 1] self._insert_node(node, selected_node) # now we need to update summary for parent node, since we # need to bubble updated summaries up the tree if parent_node is not None: # refetch children cur_graph_node_ids = self.index_graph.get_children(parent_node) cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids) cur_graph_node_list = get_sorted_node_list(cur_graph_nodes) truncated_chunks = self._prompt_helper.truncate( prompt=self.summary_prompt, text_chunks=[ node.get_content(metadata_mode=MetadataMode.LLM) for node in cur_graph_node_list ], ) text_chunk = "\n".join(truncated_chunks) new_summary = self._llm.predict(self.summary_prompt, context_str=text_chunk) parent_node.set_content(new_summary) def insert(self, nodes: Sequence[BaseNode]) -> None: """Insert into index_graph.""" for node in nodes: self._insert_node(node)
[ "llama_index.core.indices.tree.utils.get_numbered_text_from_nodes", "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.storage.docstore.registry.get_default_docstore", "llama_index.core.indices.utils.extract_numbers_given_response", "llama_index.core.schema.TextNode", "llama_index.core.indices.utils.get_sorted_node_list", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata" ]
[((5228, 5265), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (5248, 5265), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((1733, 1788), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (1761, 1788), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n'), ((1846, 1896), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (1876, 1896), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((1957, 1979), 'llama_index.core.storage.docstore.registry.get_default_docstore', 'get_default_docstore', ([], {}), '()\n', (1977, 1979), False, 'from llama_index.core.storage.docstore.registry import get_default_docstore\n'), ((2786, 2823), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (2806, 2823), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((3577, 3600), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'summary1'}), '(text=summary1)\n', (3585, 3600), False, 'from llama_index.core.schema import BaseNode, MetadataMode, TextNode\n'), ((4083, 4106), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'summary2'}), '(text=summary2)\n', (4091, 4106), False, 'from llama_index.core.schema import BaseNode, MetadataMode, TextNode\n'), ((7414, 7451), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (7434, 7451), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((6009, 6087), 'llama_index.core.indices.tree.utils.get_numbered_text_from_nodes', 'get_numbered_text_from_nodes', (['cur_graph_node_list'], {'text_splitter': 'text_splitter'}), '(cur_graph_node_list, text_splitter=text_splitter)\n', (6037, 6087), False, 'from llama_index.core.indices.tree.utils import get_numbered_text_from_nodes\n'), ((6410, 6450), 'llama_index.core.indices.utils.extract_numbers_given_response', 'extract_numbers_given_response', (['response'], {}), '(response)\n', (6440, 6450), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n')]
"""Tree Index inserter.""" from typing import Optional, Sequence from llama_index.core.data_structs.data_structs import IndexGraph from llama_index.core.indices.prompt_helper import PromptHelper from llama_index.core.indices.tree.utils import get_numbered_text_from_nodes from llama_index.core.indices.utils import ( extract_numbers_given_response, get_sorted_node_list, ) from llama_index.core.llms.llm import LLM from llama_index.core.prompts.base import BasePromptTemplate from llama_index.core.prompts.default_prompts import ( DEFAULT_INSERT_PROMPT, DEFAULT_SUMMARY_PROMPT, ) from llama_index.core.schema import BaseNode, MetadataMode, TextNode from llama_index.core.service_context import ServiceContext from llama_index.core.settings import ( Settings, llm_from_settings_or_context, ) from llama_index.core.storage.docstore import BaseDocumentStore from llama_index.core.storage.docstore.registry import get_default_docstore class TreeIndexInserter: """LlamaIndex inserter.""" def __init__( self, index_graph: IndexGraph, service_context: Optional[ServiceContext] = None, llm: Optional[LLM] = None, num_children: int = 10, insert_prompt: BasePromptTemplate = DEFAULT_INSERT_PROMPT, summary_prompt: BasePromptTemplate = DEFAULT_SUMMARY_PROMPT, docstore: Optional[BaseDocumentStore] = None, ) -> None: """Initialize with params.""" if num_children < 2: raise ValueError("Invalid number of children.") self.num_children = num_children self.summary_prompt = summary_prompt self.insert_prompt = insert_prompt self.index_graph = index_graph self._llm = llm or llm_from_settings_or_context(Settings, service_context) self._prompt_helper = Settings._prompt_helper or PromptHelper.from_llm_metadata( self._llm.metadata, ) self._docstore = docstore or get_default_docstore() def _insert_under_parent_and_consolidate( self, text_node: BaseNode, parent_node: Optional[BaseNode] ) -> None: """Insert node under parent and consolidate. Consolidation will happen by dividing up child nodes, and creating a new intermediate layer of nodes. """ # perform insertion self.index_graph.insert_under_parent(text_node, parent_node) # if under num_children limit, then we're fine if len(self.index_graph.get_children(parent_node)) <= self.num_children: return else: # perform consolidation cur_graph_node_ids = self.index_graph.get_children(parent_node) cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids) cur_graph_node_list = get_sorted_node_list(cur_graph_nodes) # this layer is all leaf nodes, consolidate and split leaf nodes # consolidate and split leaf nodes in half # TODO: do better splitting (with a GPT prompt etc.) half1 = cur_graph_node_list[: len(cur_graph_nodes) // 2] half2 = cur_graph_node_list[len(cur_graph_nodes) // 2 :] truncated_chunks = self._prompt_helper.truncate( prompt=self.summary_prompt, text_chunks=[ node.get_content(metadata_mode=MetadataMode.LLM) for node in half1 ], ) text_chunk1 = "\n".join(truncated_chunks) summary1 = self._llm.predict(self.summary_prompt, context_str=text_chunk1) node1 = TextNode(text=summary1) self.index_graph.insert(node1, children_nodes=half1) truncated_chunks = self._prompt_helper.truncate( prompt=self.summary_prompt, text_chunks=[ node.get_content(metadata_mode=MetadataMode.LLM) for node in half2 ], ) text_chunk2 = "\n".join(truncated_chunks) summary2 = self._llm.predict(self.summary_prompt, context_str=text_chunk2) node2 = TextNode(text=summary2) self.index_graph.insert(node2, children_nodes=half2) # insert half1 and half2 as new children of parent_node # first remove child indices from parent node if parent_node is not None: self.index_graph.node_id_to_children_ids[parent_node.node_id] = [] else: self.index_graph.root_nodes = {} self.index_graph.insert_under_parent( node1, parent_node, new_index=self.index_graph.get_index(node1) ) self._docstore.add_documents([node1], allow_update=False) self.index_graph.insert_under_parent( node2, parent_node, new_index=self.index_graph.get_index(node2) ) self._docstore.add_documents([node2], allow_update=False) def _insert_node( self, node: BaseNode, parent_node: Optional[BaseNode] = None ) -> None: """Insert node.""" cur_graph_node_ids = self.index_graph.get_children(parent_node) cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids) cur_graph_node_list = get_sorted_node_list(cur_graph_nodes) # if cur_graph_nodes is empty (start with empty graph), then insert under # parent (insert new root node) if len(cur_graph_nodes) == 0: self._insert_under_parent_and_consolidate(node, parent_node) # check if leaf nodes, then just insert under parent elif len(self.index_graph.get_children(cur_graph_node_list[0])) == 0: self._insert_under_parent_and_consolidate(node, parent_node) # else try to find the right summary node to insert under else: text_splitter = self._prompt_helper.get_text_splitter_given_prompt( prompt=self.insert_prompt, num_chunks=len(cur_graph_node_list), ) numbered_text = get_numbered_text_from_nodes( cur_graph_node_list, text_splitter=text_splitter ) response = self._llm.predict( self.insert_prompt, new_chunk_text=node.get_content(metadata_mode=MetadataMode.LLM), num_chunks=len(cur_graph_node_list), context_list=numbered_text, ) numbers = extract_numbers_given_response(response) if numbers is None or len(numbers) == 0: # NOTE: if we can't extract a number, then we just insert under parent self._insert_under_parent_and_consolidate(node, parent_node) elif int(numbers[0]) > len(cur_graph_node_list): # NOTE: if number is out of range, then we just insert under parent self._insert_under_parent_and_consolidate(node, parent_node) else: selected_node = cur_graph_node_list[int(numbers[0]) - 1] self._insert_node(node, selected_node) # now we need to update summary for parent node, since we # need to bubble updated summaries up the tree if parent_node is not None: # refetch children cur_graph_node_ids = self.index_graph.get_children(parent_node) cur_graph_nodes = self._docstore.get_node_dict(cur_graph_node_ids) cur_graph_node_list = get_sorted_node_list(cur_graph_nodes) truncated_chunks = self._prompt_helper.truncate( prompt=self.summary_prompt, text_chunks=[ node.get_content(metadata_mode=MetadataMode.LLM) for node in cur_graph_node_list ], ) text_chunk = "\n".join(truncated_chunks) new_summary = self._llm.predict(self.summary_prompt, context_str=text_chunk) parent_node.set_content(new_summary) def insert(self, nodes: Sequence[BaseNode]) -> None: """Insert into index_graph.""" for node in nodes: self._insert_node(node)
[ "llama_index.core.indices.tree.utils.get_numbered_text_from_nodes", "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.storage.docstore.registry.get_default_docstore", "llama_index.core.indices.utils.extract_numbers_given_response", "llama_index.core.schema.TextNode", "llama_index.core.indices.utils.get_sorted_node_list", "llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata" ]
[((5228, 5265), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (5248, 5265), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((1733, 1788), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (1761, 1788), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n'), ((1846, 1896), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (1876, 1896), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((1957, 1979), 'llama_index.core.storage.docstore.registry.get_default_docstore', 'get_default_docstore', ([], {}), '()\n', (1977, 1979), False, 'from llama_index.core.storage.docstore.registry import get_default_docstore\n'), ((2786, 2823), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (2806, 2823), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((3577, 3600), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'summary1'}), '(text=summary1)\n', (3585, 3600), False, 'from llama_index.core.schema import BaseNode, MetadataMode, TextNode\n'), ((4083, 4106), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'summary2'}), '(text=summary2)\n', (4091, 4106), False, 'from llama_index.core.schema import BaseNode, MetadataMode, TextNode\n'), ((7414, 7451), 'llama_index.core.indices.utils.get_sorted_node_list', 'get_sorted_node_list', (['cur_graph_nodes'], {}), '(cur_graph_nodes)\n', (7434, 7451), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n'), ((6009, 6087), 'llama_index.core.indices.tree.utils.get_numbered_text_from_nodes', 'get_numbered_text_from_nodes', (['cur_graph_node_list'], {'text_splitter': 'text_splitter'}), '(cur_graph_node_list, text_splitter=text_splitter)\n', (6037, 6087), False, 'from llama_index.core.indices.tree.utils import get_numbered_text_from_nodes\n'), ((6410, 6450), 'llama_index.core.indices.utils.extract_numbers_given_response', 'extract_numbers_given_response', (['response'], {}), '(response)\n', (6440, 6450), False, 'from llama_index.core.indices.utils import extract_numbers_given_response, get_sorted_node_list\n')]
"""JSON node parser.""" import json from typing import Any, Dict, Generator, List, Optional, Sequence from llama_index.core.callbacks.base import CallbackManager from llama_index.core.node_parser.interface import NodeParser from llama_index.core.node_parser.node_utils import build_nodes_from_splits from llama_index.core.schema import BaseNode, MetadataMode, TextNode from llama_index.core.utils import get_tqdm_iterable class JSONNodeParser(NodeParser): """JSON node parser. Splits a document into Nodes using custom JSON splitting logic. Args: include_metadata (bool): whether to include metadata in nodes include_prev_next_rel (bool): whether to include prev/next relationships """ @classmethod def from_defaults( cls, include_metadata: bool = True, include_prev_next_rel: bool = True, callback_manager: Optional[CallbackManager] = None, ) -> "JSONNodeParser": callback_manager = callback_manager or CallbackManager([]) return cls( include_metadata=include_metadata, include_prev_next_rel=include_prev_next_rel, callback_manager=callback_manager, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "JSONNodeParser" def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: nodes = self.get_nodes_from_node(node) all_nodes.extend(nodes) return all_nodes def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]: """Get nodes from document.""" text = node.get_content(metadata_mode=MetadataMode.NONE) try: data = json.loads(text) except json.JSONDecodeError: # Handle invalid JSON input here return [] json_nodes = [] if isinstance(data, dict): lines = [*self._depth_first_yield(data, 0, [])] json_nodes.extend( build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func) ) elif isinstance(data, list): for json_object in data: lines = [*self._depth_first_yield(json_object, 0, [])] json_nodes.extend( build_nodes_from_splits( ["\n".join(lines)], node, id_func=self.id_func ) ) else: raise ValueError("JSON is invalid") return json_nodes def _depth_first_yield( self, json_data: Dict, levels_back: int, path: List[str] ) -> Generator[str, None, None]: """Do depth first yield of all of the leaf nodes of a JSON. Combines keys in the JSON tree using spaces. If levels_back is set to 0, prints all levels. """ if isinstance(json_data, dict): for key, value in json_data.items(): new_path = path[:] new_path.append(key) yield from self._depth_first_yield(value, levels_back, new_path) elif isinstance(json_data, list): for _, value in enumerate(json_data): yield from self._depth_first_yield(value, levels_back, path) else: new_path = path[-levels_back:] new_path.append(str(json_data)) yield " ".join(new_path)
[ "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.utils.get_tqdm_iterable" ]
[((1510, 1566), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (1527, 1566), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((995, 1014), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1010, 1014), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((1928, 1944), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (1938, 1944), False, 'import json\n')]
"""JSON node parser.""" import json from typing import Any, Dict, Generator, List, Optional, Sequence from llama_index.core.callbacks.base import CallbackManager from llama_index.core.node_parser.interface import NodeParser from llama_index.core.node_parser.node_utils import build_nodes_from_splits from llama_index.core.schema import BaseNode, MetadataMode, TextNode from llama_index.core.utils import get_tqdm_iterable class JSONNodeParser(NodeParser): """JSON node parser. Splits a document into Nodes using custom JSON splitting logic. Args: include_metadata (bool): whether to include metadata in nodes include_prev_next_rel (bool): whether to include prev/next relationships """ @classmethod def from_defaults( cls, include_metadata: bool = True, include_prev_next_rel: bool = True, callback_manager: Optional[CallbackManager] = None, ) -> "JSONNodeParser": callback_manager = callback_manager or CallbackManager([]) return cls( include_metadata=include_metadata, include_prev_next_rel=include_prev_next_rel, callback_manager=callback_manager, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "JSONNodeParser" def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: nodes = self.get_nodes_from_node(node) all_nodes.extend(nodes) return all_nodes def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]: """Get nodes from document.""" text = node.get_content(metadata_mode=MetadataMode.NONE) try: data = json.loads(text) except json.JSONDecodeError: # Handle invalid JSON input here return [] json_nodes = [] if isinstance(data, dict): lines = [*self._depth_first_yield(data, 0, [])] json_nodes.extend( build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func) ) elif isinstance(data, list): for json_object in data: lines = [*self._depth_first_yield(json_object, 0, [])] json_nodes.extend( build_nodes_from_splits( ["\n".join(lines)], node, id_func=self.id_func ) ) else: raise ValueError("JSON is invalid") return json_nodes def _depth_first_yield( self, json_data: Dict, levels_back: int, path: List[str] ) -> Generator[str, None, None]: """Do depth first yield of all of the leaf nodes of a JSON. Combines keys in the JSON tree using spaces. If levels_back is set to 0, prints all levels. """ if isinstance(json_data, dict): for key, value in json_data.items(): new_path = path[:] new_path.append(key) yield from self._depth_first_yield(value, levels_back, new_path) elif isinstance(json_data, list): for _, value in enumerate(json_data): yield from self._depth_first_yield(value, levels_back, path) else: new_path = path[-levels_back:] new_path.append(str(json_data)) yield " ".join(new_path)
[ "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.utils.get_tqdm_iterable" ]
[((1510, 1566), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (1527, 1566), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((995, 1014), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1010, 1014), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((1928, 1944), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (1938, 1944), False, 'import json\n')]
"""JSON node parser.""" import json from typing import Any, Dict, Generator, List, Optional, Sequence from llama_index.core.callbacks.base import CallbackManager from llama_index.core.node_parser.interface import NodeParser from llama_index.core.node_parser.node_utils import build_nodes_from_splits from llama_index.core.schema import BaseNode, MetadataMode, TextNode from llama_index.core.utils import get_tqdm_iterable class JSONNodeParser(NodeParser): """JSON node parser. Splits a document into Nodes using custom JSON splitting logic. Args: include_metadata (bool): whether to include metadata in nodes include_prev_next_rel (bool): whether to include prev/next relationships """ @classmethod def from_defaults( cls, include_metadata: bool = True, include_prev_next_rel: bool = True, callback_manager: Optional[CallbackManager] = None, ) -> "JSONNodeParser": callback_manager = callback_manager or CallbackManager([]) return cls( include_metadata=include_metadata, include_prev_next_rel=include_prev_next_rel, callback_manager=callback_manager, ) @classmethod def class_name(cls) -> str: """Get class name.""" return "JSONNodeParser" def _parse_nodes( self, nodes: Sequence[BaseNode], show_progress: bool = False, **kwargs: Any ) -> List[BaseNode]: all_nodes: List[BaseNode] = [] nodes_with_progress = get_tqdm_iterable(nodes, show_progress, "Parsing nodes") for node in nodes_with_progress: nodes = self.get_nodes_from_node(node) all_nodes.extend(nodes) return all_nodes def get_nodes_from_node(self, node: BaseNode) -> List[TextNode]: """Get nodes from document.""" text = node.get_content(metadata_mode=MetadataMode.NONE) try: data = json.loads(text) except json.JSONDecodeError: # Handle invalid JSON input here return [] json_nodes = [] if isinstance(data, dict): lines = [*self._depth_first_yield(data, 0, [])] json_nodes.extend( build_nodes_from_splits(["\n".join(lines)], node, id_func=self.id_func) ) elif isinstance(data, list): for json_object in data: lines = [*self._depth_first_yield(json_object, 0, [])] json_nodes.extend( build_nodes_from_splits( ["\n".join(lines)], node, id_func=self.id_func ) ) else: raise ValueError("JSON is invalid") return json_nodes def _depth_first_yield( self, json_data: Dict, levels_back: int, path: List[str] ) -> Generator[str, None, None]: """Do depth first yield of all of the leaf nodes of a JSON. Combines keys in the JSON tree using spaces. If levels_back is set to 0, prints all levels. """ if isinstance(json_data, dict): for key, value in json_data.items(): new_path = path[:] new_path.append(key) yield from self._depth_first_yield(value, levels_back, new_path) elif isinstance(json_data, list): for _, value in enumerate(json_data): yield from self._depth_first_yield(value, levels_back, path) else: new_path = path[-levels_back:] new_path.append(str(json_data)) yield " ".join(new_path)
[ "llama_index.core.callbacks.base.CallbackManager", "llama_index.core.utils.get_tqdm_iterable" ]
[((1510, 1566), 'llama_index.core.utils.get_tqdm_iterable', 'get_tqdm_iterable', (['nodes', 'show_progress', '"""Parsing nodes"""'], {}), "(nodes, show_progress, 'Parsing nodes')\n", (1527, 1566), False, 'from llama_index.core.utils import get_tqdm_iterable\n'), ((995, 1014), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1010, 1014), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((1928, 1944), 'json.loads', 'json.loads', (['text'], {}), '(text)\n', (1938, 1944), False, 'import json\n')]
from typing import TYPE_CHECKING, Any, Optional from llama_index.legacy.core.base_query_engine import BaseQueryEngine if TYPE_CHECKING: from llama_index.legacy.langchain_helpers.agents.tools import ( LlamaIndexTool, ) from llama_index.legacy.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput DEFAULT_NAME = "query_engine_tool" DEFAULT_DESCRIPTION = """Useful for running a natural language query against a knowledge base and get back a natural language response. """ class QueryEngineTool(AsyncBaseTool): """Query engine tool. A tool making use of a query engine. Args: query_engine (BaseQueryEngine): A query engine. metadata (ToolMetadata): The associated metadata of the query engine. """ def __init__( self, query_engine: BaseQueryEngine, metadata: ToolMetadata, resolve_input_errors: bool = True, ) -> None: self._query_engine = query_engine self._metadata = metadata self._resolve_input_errors = resolve_input_errors @classmethod def from_defaults( cls, query_engine: BaseQueryEngine, name: Optional[str] = None, description: Optional[str] = None, resolve_input_errors: bool = True, ) -> "QueryEngineTool": name = name or DEFAULT_NAME description = description or DEFAULT_DESCRIPTION metadata = ToolMetadata(name=name, description=description) return cls( query_engine=query_engine, metadata=metadata, resolve_input_errors=resolve_input_errors, ) @property def query_engine(self) -> BaseQueryEngine: return self._query_engine @property def metadata(self) -> ToolMetadata: return self._metadata def call(self, *args: Any, **kwargs: Any) -> ToolOutput: if args is not None and len(args) > 0: query_str = str(args[0]) elif kwargs is not None and "input" in kwargs: # NOTE: this assumes our default function schema of `input` query_str = kwargs["input"] elif kwargs is not None and self._resolve_input_errors: query_str = str(kwargs) else: raise ValueError( "Cannot call query engine without specifying `input` parameter." ) response = self._query_engine.query(query_str) return ToolOutput( content=str(response), tool_name=self.metadata.name, raw_input={"input": query_str}, raw_output=response, ) async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput: if args is not None and len(args) > 0: query_str = str(args[0]) elif kwargs is not None and "input" in kwargs: # NOTE: this assumes our default function schema of `input` query_str = kwargs["input"] elif kwargs is not None and self._resolve_input_errors: query_str = str(kwargs) else: raise ValueError("Cannot call query engine without inputs") response = await self._query_engine.aquery(query_str) return ToolOutput( content=str(response), tool_name=self.metadata.name, raw_input={"input": query_str}, raw_output=response, ) def as_langchain_tool(self) -> "LlamaIndexTool": from llama_index.legacy.langchain_helpers.agents.tools import ( IndexToolConfig, LlamaIndexTool, ) tool_config = IndexToolConfig( query_engine=self.query_engine, name=self.metadata.name, description=self.metadata.description, ) return LlamaIndexTool.from_tool_config(tool_config=tool_config)
[ "llama_index.legacy.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config", "llama_index.legacy.tools.types.ToolMetadata", "llama_index.legacy.langchain_helpers.agents.tools.IndexToolConfig" ]
[((1408, 1456), 'llama_index.legacy.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1420, 1456), False, 'from llama_index.legacy.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3568, 3683), 'llama_index.legacy.langchain_helpers.agents.tools.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'self.query_engine', 'name': 'self.metadata.name', 'description': 'self.metadata.description'}), '(query_engine=self.query_engine, name=self.metadata.name,\n description=self.metadata.description)\n', (3583, 3683), False, 'from llama_index.legacy.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n'), ((3742, 3798), 'llama_index.legacy.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', ([], {'tool_config': 'tool_config'}), '(tool_config=tool_config)\n', (3773, 3798), False, 'from llama_index.legacy.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n')]
from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "EvaluatingLlmSurveyPaperDataset", "./data" ) # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": main()
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((249, 316), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""EvaluatingLlmSurveyPaperDataset"""', '"""./data"""'], {}), "('EvaluatingLlmSurveyPaperDataset', './data')\n", (271, 316), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((375, 427), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (406, 427), False, 'from llama_index.core import VectorStoreIndex\n'), ((520, 569), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (539, 569), False, 'from llama_index.core.llama_pack import download_llama_pack\n')]
from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "EvaluatingLlmSurveyPaperDataset", "./data" ) # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack") rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": main()
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents" ]
[((249, 316), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""EvaluatingLlmSurveyPaperDataset"""', '"""./data"""'], {}), "('EvaluatingLlmSurveyPaperDataset', './data')\n", (271, 316), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((375, 427), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (406, 427), False, 'from llama_index.core import VectorStoreIndex\n'), ((520, 569), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (539, 569), False, 'from llama_index.core.llama_pack import download_llama_pack\n')]
import json import os import warnings from enum import Enum from typing import Any, Callable, Dict, List, Literal, Optional, Sequence from deprecated import deprecated from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks.base import CallbackManager from llama_index.legacy.constants import DEFAULT_EMBED_BATCH_SIZE from llama_index.legacy.core.embeddings.base import BaseEmbedding, Embedding from llama_index.legacy.core.llms.types import ChatMessage from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class PROVIDERS(str, Enum): AMAZON = "amazon" COHERE = "cohere" class Models(str, Enum): TITAN_EMBEDDING = "amazon.titan-embed-text-v1" TITAN_EMBEDDING_G1_TEXT_02 = "amazon.titan-embed-g1-text-02" COHERE_EMBED_ENGLISH_V3 = "cohere.embed-english-v3" COHERE_EMBED_MULTILINGUAL_V3 = "cohere.embed-multilingual-v3" PROVIDER_SPECIFIC_IDENTIFIERS = { PROVIDERS.AMAZON.value: { "get_embeddings_func": lambda r: r.get("embedding"), }, PROVIDERS.COHERE.value: { "get_embeddings_func": lambda r: r.get("embeddings")[0], }, } class BedrockEmbedding(BaseEmbedding): model: str = Field(description="The modelId of the Bedrock model to use.") profile_name: Optional[str] = Field( description="The name of aws profile to use. If not given, then the default profile is used.", exclude=True, ) aws_access_key_id: Optional[str] = Field( description="AWS Access Key ID to use", exclude=True ) aws_secret_access_key: Optional[str] = Field( description="AWS Secret Access Key to use", exclude=True ) aws_session_token: Optional[str] = Field( description="AWS Session Token to use", exclude=True ) region_name: Optional[str] = Field( description="AWS region name to use. Uses region configured in AWS CLI if not passed", exclude=True, ) botocore_session: Optional[Any] = Field( description="Use this Botocore session instead of creating a new default one.", exclude=True, ) botocore_config: Optional[Any] = Field( description="Custom configuration object to use instead of the default generated one.", exclude=True, ) max_retries: int = Field( default=10, description="The maximum number of API retries.", gt=0 ) timeout: float = Field( default=60.0, description="The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.", ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the bedrock client." ) _client: Any = PrivateAttr() def __init__( self, model: str = Models.TITAN_EMBEDDING, profile_name: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, region_name: Optional[str] = None, client: Optional[Any] = None, botocore_session: Optional[Any] = None, botocore_config: Optional[Any] = None, additional_kwargs: Optional[Dict[str, Any]] = None, max_retries: int = 10, timeout: float = 60.0, callback_manager: Optional[CallbackManager] = None, # base class system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, **kwargs: Any, ): additional_kwargs = additional_kwargs or {} session_kwargs = { "profile_name": profile_name, "region_name": region_name, "aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "aws_session_token": aws_session_token, "botocore_session": botocore_session, } config = None try: import boto3 from botocore.config import Config config = ( Config( retries={"max_attempts": max_retries, "mode": "standard"}, connect_timeout=timeout, read_timeout=timeout, ) if botocore_config is None else botocore_config ) session = boto3.Session(**session_kwargs) except ImportError: raise ImportError( "boto3 package not found, install with" "'pip install boto3'" ) # Prior to general availability, custom boto3 wheel files were # distributed that used the bedrock service to invokeModel. # This check prevents any services still using those wheel files # from breaking if client is not None: self._client = client elif "bedrock-runtime" in session.get_available_services(): self._client = session.client("bedrock-runtime", config=config) else: self._client = session.client("bedrock", config=config) super().__init__( model=model, max_retries=max_retries, timeout=timeout, botocore_config=config, profile_name=profile_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=region_name, botocore_session=botocore_session, additional_kwargs=additional_kwargs, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, **kwargs, ) @staticmethod def list_supported_models() -> Dict[str, List[str]]: list_models = {} for provider in PROVIDERS: list_models[provider.value] = [m.value for m in Models] return list_models @classmethod def class_name(self) -> str: return "BedrockEmbedding" @deprecated( version="0.9.48", reason=( "Use the provided kwargs in the constructor, " "set_credentials will be removed in future releases." ), action="once", ) def set_credentials( self, aws_region: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, aws_profile: Optional[str] = None, ) -> None: aws_region = aws_region or os.getenv("AWS_REGION") aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID") aws_secret_access_key = aws_secret_access_key or os.getenv( "AWS_SECRET_ACCESS_KEY" ) aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN") if aws_region is None: warnings.warn( "AWS_REGION not found. Set environment variable AWS_REGION or set aws_region" ) if aws_access_key_id is None: warnings.warn( "AWS_ACCESS_KEY_ID not found. Set environment variable AWS_ACCESS_KEY_ID or set aws_access_key_id" ) assert aws_access_key_id is not None if aws_secret_access_key is None: warnings.warn( "AWS_SECRET_ACCESS_KEY not found. Set environment variable AWS_SECRET_ACCESS_KEY or set aws_secret_access_key" ) assert aws_secret_access_key is not None if aws_session_token is None: warnings.warn( "AWS_SESSION_TOKEN not found. Set environment variable AWS_SESSION_TOKEN or set aws_session_token" ) assert aws_session_token is not None session_kwargs = { "profile_name": aws_profile, "region_name": aws_region, "aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "aws_session_token": aws_session_token, } try: import boto3 session = boto3.Session(**session_kwargs) except ImportError: raise ImportError( "boto3 package not found, install with" "'pip install boto3'" ) if "bedrock-runtime" in session.get_available_services(): self._client = session.client("bedrock-runtime") else: self._client = session.client("bedrock") @classmethod @deprecated( version="0.9.48", reason=( "Use the provided kwargs in the constructor, " "set_credentials will be removed in future releases." ), action="once", ) def from_credentials( cls, model_name: str = Models.TITAN_EMBEDDING, aws_region: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, aws_profile: Optional[str] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, callback_manager: Optional[CallbackManager] = None, verbose: bool = False, ) -> "BedrockEmbedding": """ Instantiate using AWS credentials. Args: model_name (str) : Name of the model aws_access_key_id (str): AWS access key ID aws_secret_access_key (str): AWS secret access key aws_session_token (str): AWS session token aws_region (str): AWS region where the service is located aws_profile (str): AWS profile, when None, default profile is chosen automatically Example: .. code-block:: python from llama_index.embeddings import BedrockEmbedding # Define the model name model_name = "your_model_name" embeddings = BedrockEmbedding.from_credentials( model_name, aws_access_key_id, aws_secret_access_key, aws_session_token, aws_region, aws_profile, ) """ session_kwargs = { "profile_name": aws_profile, "region_name": aws_region, "aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "aws_session_token": aws_session_token, } try: import boto3 session = boto3.Session(**session_kwargs) except ImportError: raise ImportError( "boto3 package not found, install with" "'pip install boto3'" ) if "bedrock-runtime" in session.get_available_services(): client = session.client("bedrock-runtime") else: client = session.client("bedrock") return cls( client=client, model=model_name, embed_batch_size=embed_batch_size, callback_manager=callback_manager, verbose=verbose, ) def _get_embedding(self, payload: str, type: Literal["text", "query"]) -> Embedding: if self._client is None: self.set_credentials() if self._client is None: raise ValueError("Client not set") provider = self.model.split(".")[0] request_body = self._get_request_body(provider, payload, type) response = self._client.invoke_model( body=request_body, modelId=self.model, accept="application/json", contentType="application/json", ) resp = json.loads(response.get("body").read().decode("utf-8")) identifiers = PROVIDER_SPECIFIC_IDENTIFIERS.get(provider, None) if identifiers is None: raise ValueError("Provider not supported") return identifiers["get_embeddings_func"](resp) def _get_query_embedding(self, query: str) -> Embedding: return self._get_embedding(query, "query") def _get_text_embedding(self, text: str) -> Embedding: return self._get_embedding(text, "text") def _get_request_body( self, provider: str, payload: str, type: Literal["text", "query"] ) -> Any: """Build the request body as per the provider. Currently supported providers are amazon, cohere. amazon: Sample Payload of type str "Hello World!" cohere: Sample Payload of type dict of following format { 'texts': ["This is a test document", "This is another document"], 'input_type': 'search_document', 'truncate': 'NONE' } """ if provider == PROVIDERS.AMAZON: request_body = json.dumps({"inputText": payload}) elif provider == PROVIDERS.COHERE: input_types = { "text": "search_document", "query": "search_query", } request_body = json.dumps( { "texts": [payload], "input_type": input_types[type], "truncate": "NONE", } ) else: raise ValueError("Provider not supported") return request_body async def _aget_query_embedding(self, query: str) -> Embedding: return self._get_embedding(query, "query") async def _aget_text_embedding(self, text: str) -> Embedding: return self._get_embedding(text, "text")
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field" ]
[((1210, 1271), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The modelId of the Bedrock model to use."""'}), "(description='The modelId of the Bedrock model to use.')\n", (1215, 1271), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1306, 1430), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The name of aws profile to use. If not given, then the default profile is used."""', 'exclude': '(True)'}), "(description=\n 'The name of aws profile to use. If not given, then the default profile is used.'\n , exclude=True)\n", (1311, 1430), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1483, 1542), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Access Key ID to use"""', 'exclude': '(True)'}), "(description='AWS Access Key ID to use', exclude=True)\n", (1488, 1542), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1600, 1663), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Secret Access Key to use"""', 'exclude': '(True)'}), "(description='AWS Secret Access Key to use', exclude=True)\n", (1605, 1663), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1717, 1776), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Session Token to use"""', 'exclude': '(True)'}), "(description='AWS Session Token to use', exclude=True)\n", (1722, 1776), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1824, 1939), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS region name to use. Uses region configured in AWS CLI if not passed"""', 'exclude': '(True)'}), "(description=\n 'AWS region name to use. Uses region configured in AWS CLI if not passed',\n exclude=True)\n", (1829, 1939), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1992, 2100), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Use this Botocore session instead of creating a new default one."""', 'exclude': '(True)'}), "(description=\n 'Use this Botocore session instead of creating a new default one.',\n exclude=True)\n", (1997, 2100), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2152, 2268), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Custom configuration object to use instead of the default generated one."""', 'exclude': '(True)'}), "(description=\n 'Custom configuration object to use instead of the default generated one.',\n exclude=True)\n", (2157, 2268), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2306, 2379), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""', 'gt': '(0)'}), "(default=10, description='The maximum number of API retries.', gt=0)\n", (2311, 2379), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2415, 2563), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(60.0)', 'description': '"""The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts."""'}), "(default=60.0, description=\n 'The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.'\n )\n", (2420, 2563), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2617, 2706), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the bedrock client."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the bedrock client.')\n", (2622, 2706), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2735, 2748), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2746, 2748), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((6449, 6608), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.9.48"""', 'reason': '"""Use the provided kwargs in the constructor, set_credentials will be removed in future releases."""', 'action': '"""once"""'}), "(version='0.9.48', reason=\n 'Use the provided kwargs in the constructor, set_credentials will be removed in future releases.'\n , action='once')\n", (6459, 6608), False, 'from deprecated import deprecated\n'), ((8956, 9115), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.9.48"""', 'reason': '"""Use the provided kwargs in the constructor, set_credentials will be removed in future releases."""', 'action': '"""once"""'}), "(version='0.9.48', reason=\n 'Use the provided kwargs in the constructor, set_credentials will be removed in future releases.'\n , action='once')\n", (8966, 9115), False, 'from deprecated import deprecated\n'), ((4611, 4642), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (4624, 4642), False, 'import boto3\n'), ((6994, 7017), 'os.getenv', 'os.getenv', (['"""AWS_REGION"""'], {}), "('AWS_REGION')\n", (7003, 7017), False, 'import os\n'), ((7067, 7097), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (7076, 7097), False, 'import os\n'), ((7155, 7189), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (7164, 7189), False, 'import os\n'), ((7261, 7291), 'os.getenv', 'os.getenv', (['"""AWS_SESSION_TOKEN"""'], {}), "('AWS_SESSION_TOKEN')\n", (7270, 7291), False, 'import os\n'), ((7336, 7438), 'warnings.warn', 'warnings.warn', (['"""AWS_REGION not found. Set environment variable AWS_REGION or set aws_region"""'], {}), "(\n 'AWS_REGION not found. Set environment variable AWS_REGION or set aws_region'\n )\n", (7349, 7438), False, 'import warnings\n'), ((7510, 7633), 'warnings.warn', 'warnings.warn', (['"""AWS_ACCESS_KEY_ID not found. Set environment variable AWS_ACCESS_KEY_ID or set aws_access_key_id"""'], {}), "(\n 'AWS_ACCESS_KEY_ID not found. Set environment variable AWS_ACCESS_KEY_ID or set aws_access_key_id'\n )\n", (7523, 7633), False, 'import warnings\n'), ((7758, 7893), 'warnings.warn', 'warnings.warn', (['"""AWS_SECRET_ACCESS_KEY not found. Set environment variable AWS_SECRET_ACCESS_KEY or set aws_secret_access_key"""'], {}), "(\n 'AWS_SECRET_ACCESS_KEY not found. Set environment variable AWS_SECRET_ACCESS_KEY or set aws_secret_access_key'\n )\n", (7771, 7893), False, 'import warnings\n'), ((8018, 8141), 'warnings.warn', 'warnings.warn', (['"""AWS_SESSION_TOKEN not found. Set environment variable AWS_SESSION_TOKEN or set aws_session_token"""'], {}), "(\n 'AWS_SESSION_TOKEN not found. Set environment variable AWS_SESSION_TOKEN or set aws_session_token'\n )\n", (8031, 8141), False, 'import warnings\n'), ((8555, 8586), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (8568, 8586), False, 'import boto3\n'), ((11051, 11082), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (11064, 11082), False, 'import boto3\n'), ((13358, 13392), 'json.dumps', 'json.dumps', (["{'inputText': payload}"], {}), "({'inputText': payload})\n", (13368, 13392), False, 'import json\n'), ((4303, 4419), 'botocore.config.Config', 'Config', ([], {'retries': "{'max_attempts': max_retries, 'mode': 'standard'}", 'connect_timeout': 'timeout', 'read_timeout': 'timeout'}), "(retries={'max_attempts': max_retries, 'mode': 'standard'},\n connect_timeout=timeout, read_timeout=timeout)\n", (4309, 4419), False, 'from botocore.config import Config\n'), ((13589, 13678), 'json.dumps', 'json.dumps', (["{'texts': [payload], 'input_type': input_types[type], 'truncate': 'NONE'}"], {}), "({'texts': [payload], 'input_type': input_types[type], 'truncate':\n 'NONE'})\n", (13599, 13678), False, 'import json\n')]
import json import os import warnings from enum import Enum from typing import Any, Callable, Dict, List, Literal, Optional, Sequence from deprecated import deprecated from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks.base import CallbackManager from llama_index.legacy.constants import DEFAULT_EMBED_BATCH_SIZE from llama_index.legacy.core.embeddings.base import BaseEmbedding, Embedding from llama_index.legacy.core.llms.types import ChatMessage from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode class PROVIDERS(str, Enum): AMAZON = "amazon" COHERE = "cohere" class Models(str, Enum): TITAN_EMBEDDING = "amazon.titan-embed-text-v1" TITAN_EMBEDDING_G1_TEXT_02 = "amazon.titan-embed-g1-text-02" COHERE_EMBED_ENGLISH_V3 = "cohere.embed-english-v3" COHERE_EMBED_MULTILINGUAL_V3 = "cohere.embed-multilingual-v3" PROVIDER_SPECIFIC_IDENTIFIERS = { PROVIDERS.AMAZON.value: { "get_embeddings_func": lambda r: r.get("embedding"), }, PROVIDERS.COHERE.value: { "get_embeddings_func": lambda r: r.get("embeddings")[0], }, } class BedrockEmbedding(BaseEmbedding): model: str = Field(description="The modelId of the Bedrock model to use.") profile_name: Optional[str] = Field( description="The name of aws profile to use. If not given, then the default profile is used.", exclude=True, ) aws_access_key_id: Optional[str] = Field( description="AWS Access Key ID to use", exclude=True ) aws_secret_access_key: Optional[str] = Field( description="AWS Secret Access Key to use", exclude=True ) aws_session_token: Optional[str] = Field( description="AWS Session Token to use", exclude=True ) region_name: Optional[str] = Field( description="AWS region name to use. Uses region configured in AWS CLI if not passed", exclude=True, ) botocore_session: Optional[Any] = Field( description="Use this Botocore session instead of creating a new default one.", exclude=True, ) botocore_config: Optional[Any] = Field( description="Custom configuration object to use instead of the default generated one.", exclude=True, ) max_retries: int = Field( default=10, description="The maximum number of API retries.", gt=0 ) timeout: float = Field( default=60.0, description="The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.", ) additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the bedrock client." ) _client: Any = PrivateAttr() def __init__( self, model: str = Models.TITAN_EMBEDDING, profile_name: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, region_name: Optional[str] = None, client: Optional[Any] = None, botocore_session: Optional[Any] = None, botocore_config: Optional[Any] = None, additional_kwargs: Optional[Dict[str, Any]] = None, max_retries: int = 10, timeout: float = 60.0, callback_manager: Optional[CallbackManager] = None, # base class system_prompt: Optional[str] = None, messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None, completion_to_prompt: Optional[Callable[[str], str]] = None, pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT, output_parser: Optional[BaseOutputParser] = None, **kwargs: Any, ): additional_kwargs = additional_kwargs or {} session_kwargs = { "profile_name": profile_name, "region_name": region_name, "aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "aws_session_token": aws_session_token, "botocore_session": botocore_session, } config = None try: import boto3 from botocore.config import Config config = ( Config( retries={"max_attempts": max_retries, "mode": "standard"}, connect_timeout=timeout, read_timeout=timeout, ) if botocore_config is None else botocore_config ) session = boto3.Session(**session_kwargs) except ImportError: raise ImportError( "boto3 package not found, install with" "'pip install boto3'" ) # Prior to general availability, custom boto3 wheel files were # distributed that used the bedrock service to invokeModel. # This check prevents any services still using those wheel files # from breaking if client is not None: self._client = client elif "bedrock-runtime" in session.get_available_services(): self._client = session.client("bedrock-runtime", config=config) else: self._client = session.client("bedrock", config=config) super().__init__( model=model, max_retries=max_retries, timeout=timeout, botocore_config=config, profile_name=profile_name, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, aws_session_token=aws_session_token, region_name=region_name, botocore_session=botocore_session, additional_kwargs=additional_kwargs, callback_manager=callback_manager, system_prompt=system_prompt, messages_to_prompt=messages_to_prompt, completion_to_prompt=completion_to_prompt, pydantic_program_mode=pydantic_program_mode, output_parser=output_parser, **kwargs, ) @staticmethod def list_supported_models() -> Dict[str, List[str]]: list_models = {} for provider in PROVIDERS: list_models[provider.value] = [m.value for m in Models] return list_models @classmethod def class_name(self) -> str: return "BedrockEmbedding" @deprecated( version="0.9.48", reason=( "Use the provided kwargs in the constructor, " "set_credentials will be removed in future releases." ), action="once", ) def set_credentials( self, aws_region: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, aws_profile: Optional[str] = None, ) -> None: aws_region = aws_region or os.getenv("AWS_REGION") aws_access_key_id = aws_access_key_id or os.getenv("AWS_ACCESS_KEY_ID") aws_secret_access_key = aws_secret_access_key or os.getenv( "AWS_SECRET_ACCESS_KEY" ) aws_session_token = aws_session_token or os.getenv("AWS_SESSION_TOKEN") if aws_region is None: warnings.warn( "AWS_REGION not found. Set environment variable AWS_REGION or set aws_region" ) if aws_access_key_id is None: warnings.warn( "AWS_ACCESS_KEY_ID not found. Set environment variable AWS_ACCESS_KEY_ID or set aws_access_key_id" ) assert aws_access_key_id is not None if aws_secret_access_key is None: warnings.warn( "AWS_SECRET_ACCESS_KEY not found. Set environment variable AWS_SECRET_ACCESS_KEY or set aws_secret_access_key" ) assert aws_secret_access_key is not None if aws_session_token is None: warnings.warn( "AWS_SESSION_TOKEN not found. Set environment variable AWS_SESSION_TOKEN or set aws_session_token" ) assert aws_session_token is not None session_kwargs = { "profile_name": aws_profile, "region_name": aws_region, "aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "aws_session_token": aws_session_token, } try: import boto3 session = boto3.Session(**session_kwargs) except ImportError: raise ImportError( "boto3 package not found, install with" "'pip install boto3'" ) if "bedrock-runtime" in session.get_available_services(): self._client = session.client("bedrock-runtime") else: self._client = session.client("bedrock") @classmethod @deprecated( version="0.9.48", reason=( "Use the provided kwargs in the constructor, " "set_credentials will be removed in future releases." ), action="once", ) def from_credentials( cls, model_name: str = Models.TITAN_EMBEDDING, aws_region: Optional[str] = None, aws_access_key_id: Optional[str] = None, aws_secret_access_key: Optional[str] = None, aws_session_token: Optional[str] = None, aws_profile: Optional[str] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, callback_manager: Optional[CallbackManager] = None, verbose: bool = False, ) -> "BedrockEmbedding": """ Instantiate using AWS credentials. Args: model_name (str) : Name of the model aws_access_key_id (str): AWS access key ID aws_secret_access_key (str): AWS secret access key aws_session_token (str): AWS session token aws_region (str): AWS region where the service is located aws_profile (str): AWS profile, when None, default profile is chosen automatically Example: .. code-block:: python from llama_index.embeddings import BedrockEmbedding # Define the model name model_name = "your_model_name" embeddings = BedrockEmbedding.from_credentials( model_name, aws_access_key_id, aws_secret_access_key, aws_session_token, aws_region, aws_profile, ) """ session_kwargs = { "profile_name": aws_profile, "region_name": aws_region, "aws_access_key_id": aws_access_key_id, "aws_secret_access_key": aws_secret_access_key, "aws_session_token": aws_session_token, } try: import boto3 session = boto3.Session(**session_kwargs) except ImportError: raise ImportError( "boto3 package not found, install with" "'pip install boto3'" ) if "bedrock-runtime" in session.get_available_services(): client = session.client("bedrock-runtime") else: client = session.client("bedrock") return cls( client=client, model=model_name, embed_batch_size=embed_batch_size, callback_manager=callback_manager, verbose=verbose, ) def _get_embedding(self, payload: str, type: Literal["text", "query"]) -> Embedding: if self._client is None: self.set_credentials() if self._client is None: raise ValueError("Client not set") provider = self.model.split(".")[0] request_body = self._get_request_body(provider, payload, type) response = self._client.invoke_model( body=request_body, modelId=self.model, accept="application/json", contentType="application/json", ) resp = json.loads(response.get("body").read().decode("utf-8")) identifiers = PROVIDER_SPECIFIC_IDENTIFIERS.get(provider, None) if identifiers is None: raise ValueError("Provider not supported") return identifiers["get_embeddings_func"](resp) def _get_query_embedding(self, query: str) -> Embedding: return self._get_embedding(query, "query") def _get_text_embedding(self, text: str) -> Embedding: return self._get_embedding(text, "text") def _get_request_body( self, provider: str, payload: str, type: Literal["text", "query"] ) -> Any: """Build the request body as per the provider. Currently supported providers are amazon, cohere. amazon: Sample Payload of type str "Hello World!" cohere: Sample Payload of type dict of following format { 'texts': ["This is a test document", "This is another document"], 'input_type': 'search_document', 'truncate': 'NONE' } """ if provider == PROVIDERS.AMAZON: request_body = json.dumps({"inputText": payload}) elif provider == PROVIDERS.COHERE: input_types = { "text": "search_document", "query": "search_query", } request_body = json.dumps( { "texts": [payload], "input_type": input_types[type], "truncate": "NONE", } ) else: raise ValueError("Provider not supported") return request_body async def _aget_query_embedding(self, query: str) -> Embedding: return self._get_embedding(query, "query") async def _aget_text_embedding(self, text: str) -> Embedding: return self._get_embedding(text, "text")
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field" ]
[((1210, 1271), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The modelId of the Bedrock model to use."""'}), "(description='The modelId of the Bedrock model to use.')\n", (1215, 1271), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1306, 1430), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The name of aws profile to use. If not given, then the default profile is used."""', 'exclude': '(True)'}), "(description=\n 'The name of aws profile to use. If not given, then the default profile is used.'\n , exclude=True)\n", (1311, 1430), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1483, 1542), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Access Key ID to use"""', 'exclude': '(True)'}), "(description='AWS Access Key ID to use', exclude=True)\n", (1488, 1542), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1600, 1663), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Secret Access Key to use"""', 'exclude': '(True)'}), "(description='AWS Secret Access Key to use', exclude=True)\n", (1605, 1663), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1717, 1776), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Session Token to use"""', 'exclude': '(True)'}), "(description='AWS Session Token to use', exclude=True)\n", (1722, 1776), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1824, 1939), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS region name to use. Uses region configured in AWS CLI if not passed"""', 'exclude': '(True)'}), "(description=\n 'AWS region name to use. Uses region configured in AWS CLI if not passed',\n exclude=True)\n", (1829, 1939), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1992, 2100), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Use this Botocore session instead of creating a new default one."""', 'exclude': '(True)'}), "(description=\n 'Use this Botocore session instead of creating a new default one.',\n exclude=True)\n", (1997, 2100), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2152, 2268), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Custom configuration object to use instead of the default generated one."""', 'exclude': '(True)'}), "(description=\n 'Custom configuration object to use instead of the default generated one.',\n exclude=True)\n", (2157, 2268), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2306, 2379), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""', 'gt': '(0)'}), "(default=10, description='The maximum number of API retries.', gt=0)\n", (2311, 2379), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2415, 2563), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(60.0)', 'description': '"""The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts."""'}), "(default=60.0, description=\n 'The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.'\n )\n", (2420, 2563), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2617, 2706), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the bedrock client."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the bedrock client.')\n", (2622, 2706), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2735, 2748), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2746, 2748), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((6449, 6608), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.9.48"""', 'reason': '"""Use the provided kwargs in the constructor, set_credentials will be removed in future releases."""', 'action': '"""once"""'}), "(version='0.9.48', reason=\n 'Use the provided kwargs in the constructor, set_credentials will be removed in future releases.'\n , action='once')\n", (6459, 6608), False, 'from deprecated import deprecated\n'), ((8956, 9115), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.9.48"""', 'reason': '"""Use the provided kwargs in the constructor, set_credentials will be removed in future releases."""', 'action': '"""once"""'}), "(version='0.9.48', reason=\n 'Use the provided kwargs in the constructor, set_credentials will be removed in future releases.'\n , action='once')\n", (8966, 9115), False, 'from deprecated import deprecated\n'), ((4611, 4642), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (4624, 4642), False, 'import boto3\n'), ((6994, 7017), 'os.getenv', 'os.getenv', (['"""AWS_REGION"""'], {}), "('AWS_REGION')\n", (7003, 7017), False, 'import os\n'), ((7067, 7097), 'os.getenv', 'os.getenv', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (7076, 7097), False, 'import os\n'), ((7155, 7189), 'os.getenv', 'os.getenv', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (7164, 7189), False, 'import os\n'), ((7261, 7291), 'os.getenv', 'os.getenv', (['"""AWS_SESSION_TOKEN"""'], {}), "('AWS_SESSION_TOKEN')\n", (7270, 7291), False, 'import os\n'), ((7336, 7438), 'warnings.warn', 'warnings.warn', (['"""AWS_REGION not found. Set environment variable AWS_REGION or set aws_region"""'], {}), "(\n 'AWS_REGION not found. Set environment variable AWS_REGION or set aws_region'\n )\n", (7349, 7438), False, 'import warnings\n'), ((7510, 7633), 'warnings.warn', 'warnings.warn', (['"""AWS_ACCESS_KEY_ID not found. Set environment variable AWS_ACCESS_KEY_ID or set aws_access_key_id"""'], {}), "(\n 'AWS_ACCESS_KEY_ID not found. Set environment variable AWS_ACCESS_KEY_ID or set aws_access_key_id'\n )\n", (7523, 7633), False, 'import warnings\n'), ((7758, 7893), 'warnings.warn', 'warnings.warn', (['"""AWS_SECRET_ACCESS_KEY not found. Set environment variable AWS_SECRET_ACCESS_KEY or set aws_secret_access_key"""'], {}), "(\n 'AWS_SECRET_ACCESS_KEY not found. Set environment variable AWS_SECRET_ACCESS_KEY or set aws_secret_access_key'\n )\n", (7771, 7893), False, 'import warnings\n'), ((8018, 8141), 'warnings.warn', 'warnings.warn', (['"""AWS_SESSION_TOKEN not found. Set environment variable AWS_SESSION_TOKEN or set aws_session_token"""'], {}), "(\n 'AWS_SESSION_TOKEN not found. Set environment variable AWS_SESSION_TOKEN or set aws_session_token'\n )\n", (8031, 8141), False, 'import warnings\n'), ((8555, 8586), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (8568, 8586), False, 'import boto3\n'), ((11051, 11082), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (11064, 11082), False, 'import boto3\n'), ((13358, 13392), 'json.dumps', 'json.dumps', (["{'inputText': payload}"], {}), "({'inputText': payload})\n", (13368, 13392), False, 'import json\n'), ((4303, 4419), 'botocore.config.Config', 'Config', ([], {'retries': "{'max_attempts': max_retries, 'mode': 'standard'}", 'connect_timeout': 'timeout', 'read_timeout': 'timeout'}), "(retries={'max_attempts': max_retries, 'mode': 'standard'},\n connect_timeout=timeout, read_timeout=timeout)\n", (4309, 4419), False, 'from botocore.config import Config\n'), ((13589, 13678), 'json.dumps', 'json.dumps', (["{'texts': [payload], 'input_type': input_types[type], 'truncate': 'NONE'}"], {}), "({'texts': [payload], 'input_type': input_types[type], 'truncate':\n 'NONE'})\n", (13599, 13678), False, 'import json\n')]
from pathlib import Path from llama_index import download_loader from llama_index import SimpleDirectoryReader PDFReader = download_loader("PDFReader") def getdocument(filename : str,filetype:str): if filetype == "pdf": loader = PDFReader() elif filetype == "txt": loader = SimpleDirectoryReader('./example') document = loader.load_data(file=Path(filename)) return document
[ "llama_index.download_loader", "llama_index.SimpleDirectoryReader" ]
[((124, 152), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (139, 152), False, 'from llama_index import download_loader\n'), ((300, 334), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./example"""'], {}), "('./example')\n", (321, 334), False, 'from llama_index import SimpleDirectoryReader\n'), ((380, 394), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (384, 394), False, 'from pathlib import Path\n')]
from pathlib import Path from llama_index import download_loader from llama_index import SimpleDirectoryReader PDFReader = download_loader("PDFReader") def getdocument(filename : str,filetype:str): if filetype == "pdf": loader = PDFReader() elif filetype == "txt": loader = SimpleDirectoryReader('./example') document = loader.load_data(file=Path(filename)) return document
[ "llama_index.download_loader", "llama_index.SimpleDirectoryReader" ]
[((124, 152), 'llama_index.download_loader', 'download_loader', (['"""PDFReader"""'], {}), "('PDFReader')\n", (139, 152), False, 'from llama_index import download_loader\n'), ((300, 334), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""./example"""'], {}), "('./example')\n", (321, 334), False, 'from llama_index import SimpleDirectoryReader\n'), ((380, 394), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (384, 394), False, 'from pathlib import Path\n')]
"""Utils for jupyter notebook.""" import os from io import BytesIO from typing import Any, Dict, List, Tuple import matplotlib.pyplot as plt import requests from IPython.display import Markdown, display from llama_index.core.base.response.schema import Response from llama_index.core.img_utils import b64_2_img from llama_index.core.schema import ImageNode, MetadataMode, NodeWithScore from llama_index.core.utils import truncate_text from PIL import Image DEFAULT_THUMBNAIL_SIZE = (512, 512) DEFAULT_IMAGE_MATRIX = (3, 3) DEFAULT_SHOW_TOP_K = 3 def display_image(img_str: str, size: Tuple[int, int] = DEFAULT_THUMBNAIL_SIZE) -> None: """Display base64 encoded image str as image for jupyter notebook.""" img = b64_2_img(img_str) img.thumbnail(size) display(img) def display_image_uris( image_paths: List[str], image_matrix: Tuple[int, int] = DEFAULT_IMAGE_MATRIX, top_k: int = DEFAULT_SHOW_TOP_K, ) -> None: """Display base64 encoded image str as image for jupyter notebook.""" images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths[:top_k]: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(image_matrix[0], image_matrix[1], images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= image_matrix[0] * image_matrix[1]: break def display_source_node( source_node: NodeWithScore, source_length: int = 100, show_source_metadata: bool = False, metadata_mode: MetadataMode = MetadataMode.NONE, ) -> None: """Display source node for jupyter notebook.""" source_text_fmt = truncate_text( source_node.node.get_content(metadata_mode=metadata_mode).strip(), source_length ) text_md = ( f"**Node ID:** {source_node.node.node_id}<br>" f"**Similarity:** {source_node.score}<br>" f"**Text:** {source_text_fmt}<br>" ) if show_source_metadata: text_md += f"**Metadata:** {source_node.node.metadata}<br>" if isinstance(source_node.node, ImageNode): text_md += "**Image:**" display(Markdown(text_md)) if isinstance(source_node.node, ImageNode) and source_node.node.image is not None: display_image(source_node.node.image) def display_metadata(metadata: Dict[str, Any]) -> None: """Display metadata for jupyter notebook.""" display(metadata) def display_response( response: Response, source_length: int = 100, show_source: bool = False, show_metadata: bool = False, show_source_metadata: bool = False, ) -> None: """Display response for jupyter notebook.""" if response.response is None: response_text = "None" else: response_text = response.response.strip() display(Markdown(f"**`Final Response:`** {response_text}")) if show_source: for ind, source_node in enumerate(response.source_nodes): display(Markdown("---")) display( Markdown(f"**`Source Node {ind + 1}/{len(response.source_nodes)}`**") ) display_source_node( source_node, source_length=source_length, show_source_metadata=show_source_metadata, ) if show_metadata: if response.metadata is not None: display_metadata(response.metadata) def display_query_and_multimodal_response( query_str: str, response: Response, plot_height: int = 2, plot_width: int = 5 ) -> None: """For displaying a query and its multi-modal response.""" if response.metadata: image_nodes = response.metadata["image_nodes"] or [] else: image_nodes = [] num_subplots = len(image_nodes) f, axarr = plt.subplots(1, num_subplots) f.set_figheight(plot_height) f.set_figwidth(plot_width) ix = 0 for ix, scored_img_node in enumerate(image_nodes): img_node = scored_img_node.node image = None if img_node.image_url: img_response = requests.get(img_node.image_url) image = Image.open(BytesIO(img_response.content)) elif img_node.image_path: image = Image.open(img_node.image_path).convert("RGB") else: raise ValueError( "A retrieved image must have image_path or image_url specified." ) if num_subplots > 1: axarr[ix].imshow(image) axarr[ix].set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9) else: axarr.imshow(image) axarr.set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9) f.tight_layout() print(f"Query: {query_str}\n=======") print(f"Retrieved Images:\n") plt.show() print("=======") print(f"Response: {response.response}\n=======\n")
[ "llama_index.core.img_utils.b64_2_img" ]
[((723, 741), 'llama_index.core.img_utils.b64_2_img', 'b64_2_img', (['img_str'], {}), '(img_str)\n', (732, 741), False, 'from llama_index.core.img_utils import b64_2_img\n'), ((770, 782), 'IPython.display.display', 'display', (['img'], {}), '(img)\n', (777, 782), False, 'from IPython.display import Markdown, display\n'), ((1042, 1069), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1052, 1069), True, 'import matplotlib.pyplot as plt\n'), ((2470, 2487), 'IPython.display.display', 'display', (['metadata'], {}), '(metadata)\n', (2477, 2487), False, 'from IPython.display import Markdown, display\n'), ((3831, 3860), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'num_subplots'], {}), '(1, num_subplots)\n', (3843, 3860), True, 'import matplotlib.pyplot as plt\n'), ((4816, 4826), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4824, 4826), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1146), 'os.path.isfile', 'os.path.isfile', (['img_path'], {}), '(img_path)\n', (1136, 1146), False, 'import os\n'), ((2207, 2224), 'IPython.display.Markdown', 'Markdown', (['text_md'], {}), '(text_md)\n', (2215, 2224), False, 'from IPython.display import Markdown, display\n'), ((2868, 2918), 'IPython.display.Markdown', 'Markdown', (['f"""**`Final Response:`** {response_text}"""'], {}), "(f'**`Final Response:`** {response_text}')\n", (2876, 2918), False, 'from IPython.display import Markdown, display\n'), ((1168, 1188), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1178, 1188), False, 'from PIL import Image\n'), ((1202, 1265), 'matplotlib.pyplot.subplot', 'plt.subplot', (['image_matrix[0]', 'image_matrix[1]', '(images_shown + 1)'], {}), '(image_matrix[0], image_matrix[1], images_shown + 1)\n', (1213, 1265), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1295), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1288, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1322), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1318, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1349), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1345, 1349), True, 'import matplotlib.pyplot as plt\n'), ((4110, 4142), 'requests.get', 'requests.get', (['img_node.image_url'], {}), '(img_node.image_url)\n', (4122, 4142), False, 'import requests\n'), ((3026, 3041), 'IPython.display.Markdown', 'Markdown', (['"""---"""'], {}), "('---')\n", (3034, 3041), False, 'from IPython.display import Markdown, display\n'), ((4174, 4203), 'io.BytesIO', 'BytesIO', (['img_response.content'], {}), '(img_response.content)\n', (4181, 4203), False, 'from io import BytesIO\n'), ((4259, 4290), 'PIL.Image.open', 'Image.open', (['img_node.image_path'], {}), '(img_node.image_path)\n', (4269, 4290), False, 'from PIL import Image\n')]
"""Utils for jupyter notebook.""" import os from io import BytesIO from typing import Any, Dict, List, Tuple import matplotlib.pyplot as plt import requests from IPython.display import Markdown, display from llama_index.core.base.response.schema import Response from llama_index.core.img_utils import b64_2_img from llama_index.core.schema import ImageNode, MetadataMode, NodeWithScore from llama_index.core.utils import truncate_text from PIL import Image DEFAULT_THUMBNAIL_SIZE = (512, 512) DEFAULT_IMAGE_MATRIX = (3, 3) DEFAULT_SHOW_TOP_K = 3 def display_image(img_str: str, size: Tuple[int, int] = DEFAULT_THUMBNAIL_SIZE) -> None: """Display base64 encoded image str as image for jupyter notebook.""" img = b64_2_img(img_str) img.thumbnail(size) display(img) def display_image_uris( image_paths: List[str], image_matrix: Tuple[int, int] = DEFAULT_IMAGE_MATRIX, top_k: int = DEFAULT_SHOW_TOP_K, ) -> None: """Display base64 encoded image str as image for jupyter notebook.""" images_shown = 0 plt.figure(figsize=(16, 9)) for img_path in image_paths[:top_k]: if os.path.isfile(img_path): image = Image.open(img_path) plt.subplot(image_matrix[0], image_matrix[1], images_shown + 1) plt.imshow(image) plt.xticks([]) plt.yticks([]) images_shown += 1 if images_shown >= image_matrix[0] * image_matrix[1]: break def display_source_node( source_node: NodeWithScore, source_length: int = 100, show_source_metadata: bool = False, metadata_mode: MetadataMode = MetadataMode.NONE, ) -> None: """Display source node for jupyter notebook.""" source_text_fmt = truncate_text( source_node.node.get_content(metadata_mode=metadata_mode).strip(), source_length ) text_md = ( f"**Node ID:** {source_node.node.node_id}<br>" f"**Similarity:** {source_node.score}<br>" f"**Text:** {source_text_fmt}<br>" ) if show_source_metadata: text_md += f"**Metadata:** {source_node.node.metadata}<br>" if isinstance(source_node.node, ImageNode): text_md += "**Image:**" display(Markdown(text_md)) if isinstance(source_node.node, ImageNode) and source_node.node.image is not None: display_image(source_node.node.image) def display_metadata(metadata: Dict[str, Any]) -> None: """Display metadata for jupyter notebook.""" display(metadata) def display_response( response: Response, source_length: int = 100, show_source: bool = False, show_metadata: bool = False, show_source_metadata: bool = False, ) -> None: """Display response for jupyter notebook.""" if response.response is None: response_text = "None" else: response_text = response.response.strip() display(Markdown(f"**`Final Response:`** {response_text}")) if show_source: for ind, source_node in enumerate(response.source_nodes): display(Markdown("---")) display( Markdown(f"**`Source Node {ind + 1}/{len(response.source_nodes)}`**") ) display_source_node( source_node, source_length=source_length, show_source_metadata=show_source_metadata, ) if show_metadata: if response.metadata is not None: display_metadata(response.metadata) def display_query_and_multimodal_response( query_str: str, response: Response, plot_height: int = 2, plot_width: int = 5 ) -> None: """For displaying a query and its multi-modal response.""" if response.metadata: image_nodes = response.metadata["image_nodes"] or [] else: image_nodes = [] num_subplots = len(image_nodes) f, axarr = plt.subplots(1, num_subplots) f.set_figheight(plot_height) f.set_figwidth(plot_width) ix = 0 for ix, scored_img_node in enumerate(image_nodes): img_node = scored_img_node.node image = None if img_node.image_url: img_response = requests.get(img_node.image_url) image = Image.open(BytesIO(img_response.content)) elif img_node.image_path: image = Image.open(img_node.image_path).convert("RGB") else: raise ValueError( "A retrieved image must have image_path or image_url specified." ) if num_subplots > 1: axarr[ix].imshow(image) axarr[ix].set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9) else: axarr.imshow(image) axarr.set_title(f"Retrieved Position: {ix}", pad=10, fontsize=9) f.tight_layout() print(f"Query: {query_str}\n=======") print(f"Retrieved Images:\n") plt.show() print("=======") print(f"Response: {response.response}\n=======\n")
[ "llama_index.core.img_utils.b64_2_img" ]
[((723, 741), 'llama_index.core.img_utils.b64_2_img', 'b64_2_img', (['img_str'], {}), '(img_str)\n', (732, 741), False, 'from llama_index.core.img_utils import b64_2_img\n'), ((770, 782), 'IPython.display.display', 'display', (['img'], {}), '(img)\n', (777, 782), False, 'from IPython.display import Markdown, display\n'), ((1042, 1069), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(16, 9)'}), '(figsize=(16, 9))\n', (1052, 1069), True, 'import matplotlib.pyplot as plt\n'), ((2470, 2487), 'IPython.display.display', 'display', (['metadata'], {}), '(metadata)\n', (2477, 2487), False, 'from IPython.display import Markdown, display\n'), ((3831, 3860), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'num_subplots'], {}), '(1, num_subplots)\n', (3843, 3860), True, 'import matplotlib.pyplot as plt\n'), ((4816, 4826), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4824, 4826), True, 'import matplotlib.pyplot as plt\n'), ((1122, 1146), 'os.path.isfile', 'os.path.isfile', (['img_path'], {}), '(img_path)\n', (1136, 1146), False, 'import os\n'), ((2207, 2224), 'IPython.display.Markdown', 'Markdown', (['text_md'], {}), '(text_md)\n', (2215, 2224), False, 'from IPython.display import Markdown, display\n'), ((2868, 2918), 'IPython.display.Markdown', 'Markdown', (['f"""**`Final Response:`** {response_text}"""'], {}), "(f'**`Final Response:`** {response_text}')\n", (2876, 2918), False, 'from IPython.display import Markdown, display\n'), ((1168, 1188), 'PIL.Image.open', 'Image.open', (['img_path'], {}), '(img_path)\n', (1178, 1188), False, 'from PIL import Image\n'), ((1202, 1265), 'matplotlib.pyplot.subplot', 'plt.subplot', (['image_matrix[0]', 'image_matrix[1]', '(images_shown + 1)'], {}), '(image_matrix[0], image_matrix[1], images_shown + 1)\n', (1213, 1265), True, 'import matplotlib.pyplot as plt\n'), ((1278, 1295), 'matplotlib.pyplot.imshow', 'plt.imshow', (['image'], {}), '(image)\n', (1288, 1295), True, 'import matplotlib.pyplot as plt\n'), ((1308, 1322), 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), '([])\n', (1318, 1322), True, 'import matplotlib.pyplot as plt\n'), ((1335, 1349), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (1345, 1349), True, 'import matplotlib.pyplot as plt\n'), ((4110, 4142), 'requests.get', 'requests.get', (['img_node.image_url'], {}), '(img_node.image_url)\n', (4122, 4142), False, 'import requests\n'), ((3026, 3041), 'IPython.display.Markdown', 'Markdown', (['"""---"""'], {}), "('---')\n", (3034, 3041), False, 'from IPython.display import Markdown, display\n'), ((4174, 4203), 'io.BytesIO', 'BytesIO', (['img_response.content'], {}), '(img_response.content)\n', (4181, 4203), False, 'from io import BytesIO\n'), ((4259, 4290), 'PIL.Image.open', 'Image.open', (['img_node.image_path'], {}), '(img_node.image_path)\n', (4269, 4290), False, 'from PIL import Image\n')]
from typing import Optional, Type from llama_index.legacy.download.module import ( LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download, ) from llama_index.legacy.llama_pack.base import BaseLlamaPack def download_llama_pack( llama_pack_class: str, download_dir: str, llama_hub_url: str = LLAMA_HUB_URL, refresh_cache: bool = True, skip_load: bool = False, ) -> Optional[Type[BaseLlamaPack]]: """Download a single LlamaPack from Llama Hub. Args: llama_pack_class: The name of the LlamaPack class you want to download, such as `GmailOpenAIAgentPack`. refresh_cache: If true, the local cache will be skipped and the loader will be fetched directly from the remote repo. download_dir: Custom dirpath to download the pack into. Returns: A Loader. """ pack_cls = download_llama_module( llama_pack_class, llama_hub_url=llama_hub_url, refresh_cache=refresh_cache, custom_path=download_dir, library_path="llama_packs/library.json", disable_library_cache=True, override_path=True, skip_load=skip_load, ) track_download(llama_pack_class, MODULE_TYPE.LLAMAPACK) if pack_cls is None: return None if not issubclass(pack_cls, BaseLlamaPack): raise ValueError(f"Tool class {pack_cls} must be a subclass of BaseToolSpec.") return pack_cls
[ "llama_index.legacy.download.module.download_llama_module", "llama_index.legacy.download.module.track_download" ]
[((887, 1134), 'llama_index.legacy.download.module.download_llama_module', 'download_llama_module', (['llama_pack_class'], {'llama_hub_url': 'llama_hub_url', 'refresh_cache': 'refresh_cache', 'custom_path': 'download_dir', 'library_path': '"""llama_packs/library.json"""', 'disable_library_cache': '(True)', 'override_path': '(True)', 'skip_load': 'skip_load'}), "(llama_pack_class, llama_hub_url=llama_hub_url,\n refresh_cache=refresh_cache, custom_path=download_dir, library_path=\n 'llama_packs/library.json', disable_library_cache=True, override_path=\n True, skip_load=skip_load)\n", (908, 1134), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n'), ((1196, 1251), 'llama_index.legacy.download.module.track_download', 'track_download', (['llama_pack_class', 'MODULE_TYPE.LLAMAPACK'], {}), '(llama_pack_class, MODULE_TYPE.LLAMAPACK)\n', (1210, 1251), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n')]
from typing import Any, Dict, List, Optional, Sequence, Tuple from llama_index.core.base.response.schema import RESPONSE_TYPE, Response from llama_index.core.callbacks.base import CallbackManager from llama_index.core.callbacks.schema import CBEventType, EventPayload from llama_index.core.indices.multi_modal import MultiModalVectorIndexRetriever from llama_index.core.indices.query.base import BaseQueryEngine from llama_index.core.indices.query.schema import QueryBundle, QueryType from llama_index.core.multi_modal_llms.base import MultiModalLLM from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.prompts import BasePromptTemplate from llama_index.core.prompts.default_prompts import DEFAULT_TEXT_QA_PROMPT from llama_index.core.prompts.mixin import PromptMixinType from llama_index.core.schema import ImageNode, NodeWithScore def _get_image_and_text_nodes( nodes: List[NodeWithScore], ) -> Tuple[List[NodeWithScore], List[NodeWithScore]]: image_nodes = [] text_nodes = [] for res_node in nodes: if isinstance(res_node.node, ImageNode): image_nodes.append(res_node) else: text_nodes.append(res_node) return image_nodes, text_nodes class SimpleMultiModalQueryEngine(BaseQueryEngine): """Simple Multi Modal Retriever query engine. Assumes that retrieved text context fits within context window of LLM, along with images. Args: retriever (MultiModalVectorIndexRetriever): A retriever object. multi_modal_llm (Optional[MultiModalLLM]): MultiModalLLM Models. text_qa_template (Optional[BasePromptTemplate]): Text QA Prompt Template. image_qa_template (Optional[BasePromptTemplate]): Image QA Prompt Template. node_postprocessors (Optional[List[BaseNodePostprocessor]]): Node Postprocessors. callback_manager (Optional[CallbackManager]): A callback manager. """ def __init__( self, retriever: MultiModalVectorIndexRetriever, multi_modal_llm: Optional[MultiModalLLM] = None, text_qa_template: Optional[BasePromptTemplate] = None, image_qa_template: Optional[BasePromptTemplate] = None, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, callback_manager: Optional[CallbackManager] = None, **kwargs: Any, ) -> None: self._retriever = retriever if multi_modal_llm: self._multi_modal_llm = multi_modal_llm else: try: from llama_index.multi_modal_llms.openai import ( OpenAIMultiModal, ) # pants: no-infer-dep self._multi_modal_llm = OpenAIMultiModal( model="gpt-4-vision-preview", max_new_tokens=1000 ) except ImportError as e: raise ImportError( "`llama-index-multi-modal-llms-openai` package cannot be found. " "Please install it by using `pip install `llama-index-multi-modal-llms-openai`" ) self._text_qa_template = text_qa_template or DEFAULT_TEXT_QA_PROMPT self._image_qa_template = image_qa_template or DEFAULT_TEXT_QA_PROMPT self._node_postprocessors = node_postprocessors or [] callback_manager = callback_manager or CallbackManager([]) for node_postprocessor in self._node_postprocessors: node_postprocessor.callback_manager = callback_manager super().__init__(callback_manager) def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return {"text_qa_template": self._text_qa_template} def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} def _apply_node_postprocessors( self, nodes: List[NodeWithScore], query_bundle: QueryBundle ) -> List[NodeWithScore]: for node_postprocessor in self._node_postprocessors: nodes = node_postprocessor.postprocess_nodes( nodes, query_bundle=query_bundle ) return nodes def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: nodes = self._retriever.retrieve(query_bundle) return self._apply_node_postprocessors(nodes, query_bundle=query_bundle) async def aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: nodes = await self._retriever.aretrieve(query_bundle) return self._apply_node_postprocessors(nodes, query_bundle=query_bundle) def synthesize( self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]] = None, ) -> RESPONSE_TYPE: image_nodes, text_nodes = _get_image_and_text_nodes(nodes) context_str = "\n\n".join([r.get_content() for r in text_nodes]) fmt_prompt = self._text_qa_template.format( context_str=context_str, query_str=query_bundle.query_str ) llm_response = self._multi_modal_llm.complete( prompt=fmt_prompt, image_documents=[image_node.node for image_node in image_nodes], ) return Response( response=str(llm_response), source_nodes=nodes, metadata={"text_nodes": text_nodes, "image_nodes": image_nodes}, ) def _get_response_with_images( self, prompt_str: str, image_nodes: List[ImageNode], ) -> RESPONSE_TYPE: fmt_prompt = self._image_qa_template.format( query_str=prompt_str, ) llm_response = self._multi_modal_llm.complete( prompt=fmt_prompt, image_documents=[image_node.node for image_node in image_nodes], ) return Response( response=str(llm_response), source_nodes=image_nodes, metadata={"image_nodes": image_nodes}, ) async def asynthesize( self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]] = None, ) -> RESPONSE_TYPE: image_nodes, text_nodes = _get_image_and_text_nodes(nodes) context_str = "\n\n".join([r.get_content() for r in text_nodes]) fmt_prompt = self._text_qa_template.format( context_str=context_str, query_str=query_bundle.query_str ) llm_response = await self._multi_modal_llm.acomplete( prompt=fmt_prompt, image_documents=[image_node.node for image_node in image_nodes], ) return Response( response=str(llm_response), source_nodes=nodes, metadata={"text_nodes": text_nodes, "image_nodes": image_nodes}, ) def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: """Answer a query.""" with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str} ) as query_event: with self.callback_manager.event( CBEventType.RETRIEVE, payload={EventPayload.QUERY_STR: query_bundle.query_str}, ) as retrieve_event: nodes = self.retrieve(query_bundle) retrieve_event.on_end( payload={EventPayload.NODES: nodes}, ) response = self.synthesize( query_bundle, nodes=nodes, ) query_event.on_end(payload={EventPayload.RESPONSE: response}) return response def image_query(self, image_path: QueryType, prompt_str: str) -> RESPONSE_TYPE: """Answer a image query.""" with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: str(image_path)} ) as query_event: with self.callback_manager.event( CBEventType.RETRIEVE, payload={EventPayload.QUERY_STR: str(image_path)}, ) as retrieve_event: nodes = self._retriever.image_to_image_retrieve(image_path) retrieve_event.on_end( payload={EventPayload.NODES: nodes}, ) image_nodes, _ = _get_image_and_text_nodes(nodes) response = self._get_response_with_images( prompt_str=prompt_str, image_nodes=image_nodes, ) query_event.on_end(payload={EventPayload.RESPONSE: response}) return response async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: """Answer a query.""" with self.callback_manager.event( CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str} ) as query_event: with self.callback_manager.event( CBEventType.RETRIEVE, payload={EventPayload.QUERY_STR: query_bundle.query_str}, ) as retrieve_event: nodes = await self.aretrieve(query_bundle) retrieve_event.on_end( payload={EventPayload.NODES: nodes}, ) response = await self.asynthesize( query_bundle, nodes=nodes, ) query_event.on_end(payload={EventPayload.RESPONSE: response}) return response @property def retriever(self) -> MultiModalVectorIndexRetriever: """Get the retriever object.""" return self._retriever
[ "llama_index.core.callbacks.base.CallbackManager", "llama_index.multi_modal_llms.openai.OpenAIMultiModal" ]
[((3353, 3372), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (3368, 3372), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((2707, 2774), 'llama_index.multi_modal_llms.openai.OpenAIMultiModal', 'OpenAIMultiModal', ([], {'model': '"""gpt-4-vision-preview"""', 'max_new_tokens': '(1000)'}), "(model='gpt-4-vision-preview', max_new_tokens=1000)\n", (2723, 2774), False, 'from llama_index.multi_modal_llms.openai import OpenAIMultiModal\n')]
import json from typing import Sequence from llama_index.legacy.prompts.base import PromptTemplate from llama_index.legacy.question_gen.types import SubQuestion from llama_index.legacy.tools.types import ToolMetadata # deprecated, kept for backward compatibility SubQuestionPrompt = PromptTemplate def build_tools_text(tools: Sequence[ToolMetadata]) -> str: tools_dict = {} for tool in tools: tools_dict[tool.name] = tool.description return json.dumps(tools_dict, indent=4) PREFIX = """\ Given a user question, and a list of tools, output a list of relevant sub-questions \ in json markdown that when composed can help answer the full user question: """ example_query_str = ( "Compare and contrast the revenue growth and EBITDA of Uber and Lyft for year 2021" ) example_tools = [ ToolMetadata( name="uber_10k", description="Provides information about Uber financials for year 2021", ), ToolMetadata( name="lyft_10k", description="Provides information about Lyft financials for year 2021", ), ] example_tools_str = build_tools_text(example_tools) example_output = [ SubQuestion( sub_question="What is the revenue growth of Uber", tool_name="uber_10k" ), SubQuestion(sub_question="What is the EBITDA of Uber", tool_name="uber_10k"), SubQuestion( sub_question="What is the revenue growth of Lyft", tool_name="lyft_10k" ), SubQuestion(sub_question="What is the EBITDA of Lyft", tool_name="lyft_10k"), ] example_output_str = json.dumps({"items": [x.dict() for x in example_output]}, indent=4) EXAMPLES = f"""\ # Example 1 <Tools> ```json {example_tools_str} ``` <User Question> {example_query_str} <Output> ```json {example_output_str} ``` """.replace( "{", "{{" ).replace( "}", "}}" ) SUFFIX = """\ # Example 2 <Tools> ```json {tools_str} ``` <User Question> {query_str} <Output> """ DEFAULT_SUB_QUESTION_PROMPT_TMPL = PREFIX + EXAMPLES + SUFFIX
[ "llama_index.legacy.question_gen.types.SubQuestion", "llama_index.legacy.tools.types.ToolMetadata" ]
[((465, 497), 'json.dumps', 'json.dumps', (['tools_dict'], {'indent': '(4)'}), '(tools_dict, indent=4)\n', (475, 497), False, 'import json\n'), ((817, 923), 'llama_index.legacy.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': '"""uber_10k"""', 'description': '"""Provides information about Uber financials for year 2021"""'}), "(name='uber_10k', description=\n 'Provides information about Uber financials for year 2021')\n", (829, 923), False, 'from llama_index.legacy.tools.types import ToolMetadata\n'), ((947, 1053), 'llama_index.legacy.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': '"""lyft_10k"""', 'description': '"""Provides information about Lyft financials for year 2021"""'}), "(name='lyft_10k', description=\n 'Provides information about Lyft financials for year 2021')\n", (959, 1053), False, 'from llama_index.legacy.tools.types import ToolMetadata\n'), ((1150, 1239), 'llama_index.legacy.question_gen.types.SubQuestion', 'SubQuestion', ([], {'sub_question': '"""What is the revenue growth of Uber"""', 'tool_name': '"""uber_10k"""'}), "(sub_question='What is the revenue growth of Uber', tool_name=\n 'uber_10k')\n", (1161, 1239), False, 'from llama_index.legacy.question_gen.types import SubQuestion\n'), ((1254, 1330), 'llama_index.legacy.question_gen.types.SubQuestion', 'SubQuestion', ([], {'sub_question': '"""What is the EBITDA of Uber"""', 'tool_name': '"""uber_10k"""'}), "(sub_question='What is the EBITDA of Uber', tool_name='uber_10k')\n", (1265, 1330), False, 'from llama_index.legacy.question_gen.types import SubQuestion\n'), ((1336, 1425), 'llama_index.legacy.question_gen.types.SubQuestion', 'SubQuestion', ([], {'sub_question': '"""What is the revenue growth of Lyft"""', 'tool_name': '"""lyft_10k"""'}), "(sub_question='What is the revenue growth of Lyft', tool_name=\n 'lyft_10k')\n", (1347, 1425), False, 'from llama_index.legacy.question_gen.types import SubQuestion\n'), ((1440, 1516), 'llama_index.legacy.question_gen.types.SubQuestion', 'SubQuestion', ([], {'sub_question': '"""What is the EBITDA of Lyft"""', 'tool_name': '"""lyft_10k"""'}), "(sub_question='What is the EBITDA of Lyft', tool_name='lyft_10k')\n", (1451, 1516), False, 'from llama_index.legacy.question_gen.types import SubQuestion\n')]
from typing import Any, List, Optional from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, ) from llama_index.legacy.embeddings.huggingface_utils import ( DEFAULT_INSTRUCT_MODEL, get_query_instruct_for_model_name, get_text_instruct_for_model_name, ) class InstructorEmbedding(BaseEmbedding): query_instruction: Optional[str] = Field( description="Instruction to prepend to query text." ) text_instruction: Optional[str] = Field( description="Instruction to prepend to text." ) cache_folder: Optional[str] = Field( description="Cache folder for huggingface files." ) _model: Any = PrivateAttr() def __init__( self, model_name: str = DEFAULT_INSTRUCT_MODEL, query_instruction: Optional[str] = None, text_instruction: Optional[str] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, cache_folder: Optional[str] = None, device: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, ): try: from InstructorEmbedding import INSTRUCTOR except ImportError: raise ImportError( "InstructorEmbedding requires instructor to be installed.\n" "Please install transformers with `pip install InstructorEmbedding`." ) self._model = INSTRUCTOR(model_name, cache_folder=cache_folder, device=device) super().__init__( embed_batch_size=embed_batch_size, callback_manager=callback_manager, model_name=model_name, query_instruction=query_instruction, text_instruction=text_instruction, cache_folder=cache_folder, ) @classmethod def class_name(cls) -> str: return "InstructorEmbedding" def _format_query_text(self, query_text: str) -> List[str]: """Format query text.""" instruction = self.text_instruction if instruction is None: instruction = get_query_instruct_for_model_name(self.model_name) return [instruction, query_text] def _format_text(self, text: str) -> List[str]: """Format text.""" instruction = self.text_instruction if instruction is None: instruction = get_text_instruct_for_model_name(self.model_name) return [instruction, text] def _embed(self, instruct_sentence_pairs: List[List[str]]) -> List[List[float]]: """Embed sentences.""" return self._model.encode(instruct_sentence_pairs).tolist() def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" query_pair = self._format_query_text(query) return self._embed([query_pair])[0] async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding async.""" return self._get_query_embedding(query) async def _aget_text_embedding(self, text: str) -> List[float]: """Get text embedding async.""" return self._get_text_embedding(text) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" text_pair = self._format_text(text) return self._embed([text_pair])[0] def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Get text embeddings.""" text_pairs = [self._format_text(text) for text in texts] return self._embed(text_pairs)
[ "llama_index.legacy.embeddings.huggingface_utils.get_query_instruct_for_model_name", "llama_index.legacy.embeddings.huggingface_utils.get_text_instruct_for_model_name", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field" ]
[((520, 578), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to query text."""'}), "(description='Instruction to prepend to query text.')\n", (525, 578), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((631, 683), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to text."""'}), "(description='Instruction to prepend to text.')\n", (636, 683), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((732, 788), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Cache folder for huggingface files."""'}), "(description='Cache folder for huggingface files.')\n", (737, 788), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((822, 835), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (833, 835), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1549, 1613), 'InstructorEmbedding.INSTRUCTOR', 'INSTRUCTOR', (['model_name'], {'cache_folder': 'cache_folder', 'device': 'device'}), '(model_name, cache_folder=cache_folder, device=device)\n', (1559, 1613), False, 'from InstructorEmbedding import INSTRUCTOR\n'), ((2203, 2253), 'llama_index.legacy.embeddings.huggingface_utils.get_query_instruct_for_model_name', 'get_query_instruct_for_model_name', (['self.model_name'], {}), '(self.model_name)\n', (2236, 2253), False, 'from llama_index.legacy.embeddings.huggingface_utils import DEFAULT_INSTRUCT_MODEL, get_query_instruct_for_model_name, get_text_instruct_for_model_name\n'), ((2479, 2528), 'llama_index.legacy.embeddings.huggingface_utils.get_text_instruct_for_model_name', 'get_text_instruct_for_model_name', (['self.model_name'], {}), '(self.model_name)\n', (2511, 2528), False, 'from llama_index.legacy.embeddings.huggingface_utils import DEFAULT_INSTRUCT_MODEL, get_query_instruct_for_model_name, get_text_instruct_for_model_name\n')]
from typing import Any, List, Optional from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, ) from llama_index.legacy.embeddings.huggingface_utils import ( DEFAULT_INSTRUCT_MODEL, get_query_instruct_for_model_name, get_text_instruct_for_model_name, ) class InstructorEmbedding(BaseEmbedding): query_instruction: Optional[str] = Field( description="Instruction to prepend to query text." ) text_instruction: Optional[str] = Field( description="Instruction to prepend to text." ) cache_folder: Optional[str] = Field( description="Cache folder for huggingface files." ) _model: Any = PrivateAttr() def __init__( self, model_name: str = DEFAULT_INSTRUCT_MODEL, query_instruction: Optional[str] = None, text_instruction: Optional[str] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, cache_folder: Optional[str] = None, device: Optional[str] = None, callback_manager: Optional[CallbackManager] = None, ): try: from InstructorEmbedding import INSTRUCTOR except ImportError: raise ImportError( "InstructorEmbedding requires instructor to be installed.\n" "Please install transformers with `pip install InstructorEmbedding`." ) self._model = INSTRUCTOR(model_name, cache_folder=cache_folder, device=device) super().__init__( embed_batch_size=embed_batch_size, callback_manager=callback_manager, model_name=model_name, query_instruction=query_instruction, text_instruction=text_instruction, cache_folder=cache_folder, ) @classmethod def class_name(cls) -> str: return "InstructorEmbedding" def _format_query_text(self, query_text: str) -> List[str]: """Format query text.""" instruction = self.text_instruction if instruction is None: instruction = get_query_instruct_for_model_name(self.model_name) return [instruction, query_text] def _format_text(self, text: str) -> List[str]: """Format text.""" instruction = self.text_instruction if instruction is None: instruction = get_text_instruct_for_model_name(self.model_name) return [instruction, text] def _embed(self, instruct_sentence_pairs: List[List[str]]) -> List[List[float]]: """Embed sentences.""" return self._model.encode(instruct_sentence_pairs).tolist() def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" query_pair = self._format_query_text(query) return self._embed([query_pair])[0] async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding async.""" return self._get_query_embedding(query) async def _aget_text_embedding(self, text: str) -> List[float]: """Get text embedding async.""" return self._get_text_embedding(text) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" text_pair = self._format_text(text) return self._embed([text_pair])[0] def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Get text embeddings.""" text_pairs = [self._format_text(text) for text in texts] return self._embed(text_pairs)
[ "llama_index.legacy.embeddings.huggingface_utils.get_query_instruct_for_model_name", "llama_index.legacy.embeddings.huggingface_utils.get_text_instruct_for_model_name", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field" ]
[((520, 578), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to query text."""'}), "(description='Instruction to prepend to query text.')\n", (525, 578), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((631, 683), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to text."""'}), "(description='Instruction to prepend to text.')\n", (636, 683), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((732, 788), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Cache folder for huggingface files."""'}), "(description='Cache folder for huggingface files.')\n", (737, 788), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((822, 835), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (833, 835), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1549, 1613), 'InstructorEmbedding.INSTRUCTOR', 'INSTRUCTOR', (['model_name'], {'cache_folder': 'cache_folder', 'device': 'device'}), '(model_name, cache_folder=cache_folder, device=device)\n', (1559, 1613), False, 'from InstructorEmbedding import INSTRUCTOR\n'), ((2203, 2253), 'llama_index.legacy.embeddings.huggingface_utils.get_query_instruct_for_model_name', 'get_query_instruct_for_model_name', (['self.model_name'], {}), '(self.model_name)\n', (2236, 2253), False, 'from llama_index.legacy.embeddings.huggingface_utils import DEFAULT_INSTRUCT_MODEL, get_query_instruct_for_model_name, get_text_instruct_for_model_name\n'), ((2479, 2528), 'llama_index.legacy.embeddings.huggingface_utils.get_text_instruct_for_model_name', 'get_text_instruct_for_model_name', (['self.model_name'], {}), '(self.model_name)\n', (2511, 2528), False, 'from llama_index.legacy.embeddings.huggingface_utils import DEFAULT_INSTRUCT_MODEL, get_query_instruct_for_model_name, get_text_instruct_for_model_name\n')]
"""Base retrieval abstractions.""" import asyncio from abc import abstractmethod from enum import Enum from typing import Any, Dict, List, Optional, Tuple from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.evaluation.retrieval.metrics import resolve_metrics from llama_index.core.evaluation.retrieval.metrics_base import ( BaseRetrievalMetric, RetrievalMetricResult, ) from llama_index.core.llama_dataset.legacy.embedding import ( EmbeddingQAFinetuneDataset, ) class RetrievalEvalMode(str, Enum): """Evaluation of retrieval modality.""" TEXT = "text" IMAGE = "image" @classmethod def from_str(cls, label: str) -> "RetrievalEvalMode": if label == "text": return RetrievalEvalMode.TEXT elif label == "image": return RetrievalEvalMode.IMAGE else: raise NotImplementedError class RetrievalEvalResult(BaseModel): """Retrieval eval result. NOTE: this abstraction might change in the future. Attributes: query (str): Query string expected_ids (List[str]): Expected ids retrieved_ids (List[str]): Retrieved ids metric_dict (Dict[str, BaseRetrievalMetric]): \ Metric dictionary for the evaluation """ class Config: arbitrary_types_allowed = True query: str = Field(..., description="Query string") expected_ids: List[str] = Field(..., description="Expected ids") expected_texts: Optional[List[str]] = Field( default=None, description="Expected texts associated with nodes provided in `expected_ids`", ) retrieved_ids: List[str] = Field(..., description="Retrieved ids") retrieved_texts: List[str] = Field(..., description="Retrieved texts") mode: "RetrievalEvalMode" = Field( default=RetrievalEvalMode.TEXT, description="text or image" ) metric_dict: Dict[str, RetrievalMetricResult] = Field( ..., description="Metric dictionary for the evaluation" ) @property def metric_vals_dict(self) -> Dict[str, float]: """Dictionary of metric values.""" return {k: v.score for k, v in self.metric_dict.items()} def __str__(self) -> str: """String representation.""" return f"Query: {self.query}\n" f"Metrics: {self.metric_vals_dict!s}\n" class BaseRetrievalEvaluator(BaseModel): """Base Retrieval Evaluator class.""" metrics: List[BaseRetrievalMetric] = Field( ..., description="List of metrics to evaluate" ) class Config: arbitrary_types_allowed = True @classmethod def from_metric_names( cls, metric_names: List[str], **kwargs: Any ) -> "BaseRetrievalEvaluator": """Create evaluator from metric names. Args: metric_names (List[str]): List of metric names **kwargs: Additional arguments for the evaluator """ metric_types = resolve_metrics(metric_names) return cls(metrics=[metric() for metric in metric_types], **kwargs) @abstractmethod async def _aget_retrieved_ids_and_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids and texts.""" raise NotImplementedError def evaluate( self, query: str, expected_ids: List[str], expected_texts: Optional[List[str]] = None, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT, **kwargs: Any, ) -> RetrievalEvalResult: """Run evaluation results with query string and expected ids. Args: query (str): Query string expected_ids (List[str]): Expected ids Returns: RetrievalEvalResult: Evaluation result """ return asyncio.run( self.aevaluate( query=query, expected_ids=expected_ids, expected_texts=expected_texts, mode=mode, **kwargs, ) ) # @abstractmethod async def aevaluate( self, query: str, expected_ids: List[str], expected_texts: Optional[List[str]] = None, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT, **kwargs: Any, ) -> RetrievalEvalResult: """Run evaluation with query string, retrieved contexts, and generated response string. Subclasses can override this method to provide custom evaluation logic and take in additional arguments. """ retrieved_ids, retrieved_texts = await self._aget_retrieved_ids_and_texts( query, mode ) metric_dict = {} for metric in self.metrics: eval_result = metric.compute( query, expected_ids, retrieved_ids, expected_texts, retrieved_texts ) metric_dict[metric.metric_name] = eval_result return RetrievalEvalResult( query=query, expected_ids=expected_ids, expected_texts=expected_texts, retrieved_ids=retrieved_ids, retrieved_texts=retrieved_texts, mode=mode, metric_dict=metric_dict, ) async def aevaluate_dataset( self, dataset: EmbeddingQAFinetuneDataset, workers: int = 2, show_progress: bool = False, **kwargs: Any, ) -> List[RetrievalEvalResult]: """Run evaluation with dataset.""" semaphore = asyncio.Semaphore(workers) async def eval_worker( query: str, expected_ids: List[str], mode: RetrievalEvalMode ) -> RetrievalEvalResult: async with semaphore: return await self.aevaluate(query, expected_ids=expected_ids, mode=mode) response_jobs = [] mode = RetrievalEvalMode.from_str(dataset.mode) for query_id, query in dataset.queries.items(): expected_ids = dataset.relevant_docs[query_id] response_jobs.append(eval_worker(query, expected_ids, mode)) if show_progress: from tqdm.asyncio import tqdm_asyncio eval_results = await tqdm_asyncio.gather(*response_jobs) else: eval_results = await asyncio.gather(*response_jobs) return eval_results
[ "llama_index.core.evaluation.retrieval.metrics.resolve_metrics", "llama_index.core.bridge.pydantic.Field" ]
[((1364, 1402), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Query string"""'}), "(..., description='Query string')\n", (1369, 1402), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1433, 1471), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Expected ids"""'}), "(..., description='Expected ids')\n", (1438, 1471), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1514, 1617), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Expected texts associated with nodes provided in `expected_ids`"""'}), "(default=None, description=\n 'Expected texts associated with nodes provided in `expected_ids`')\n", (1519, 1617), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1667, 1706), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retrieved ids"""'}), "(..., description='Retrieved ids')\n", (1672, 1706), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1740, 1781), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retrieved texts"""'}), "(..., description='Retrieved texts')\n", (1745, 1781), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1814, 1880), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'RetrievalEvalMode.TEXT', 'description': '"""text or image"""'}), "(default=RetrievalEvalMode.TEXT, description='text or image')\n", (1819, 1880), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1947, 2009), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Metric dictionary for the evaluation"""'}), "(..., description='Metric dictionary for the evaluation')\n", (1952, 2009), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2474, 2527), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""List of metrics to evaluate"""'}), "(..., description='List of metrics to evaluate')\n", (2479, 2527), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2950, 2979), 'llama_index.core.evaluation.retrieval.metrics.resolve_metrics', 'resolve_metrics', (['metric_names'], {}), '(metric_names)\n', (2965, 2979), False, 'from llama_index.core.evaluation.retrieval.metrics import resolve_metrics\n'), ((5539, 5565), 'asyncio.Semaphore', 'asyncio.Semaphore', (['workers'], {}), '(workers)\n', (5556, 5565), False, 'import asyncio\n'), ((6210, 6245), 'tqdm.asyncio.tqdm_asyncio.gather', 'tqdm_asyncio.gather', (['*response_jobs'], {}), '(*response_jobs)\n', (6229, 6245), False, 'from tqdm.asyncio import tqdm_asyncio\n'), ((6293, 6323), 'asyncio.gather', 'asyncio.gather', (['*response_jobs'], {}), '(*response_jobs)\n', (6307, 6323), False, 'import asyncio\n')]
"""Base retrieval abstractions.""" import asyncio from abc import abstractmethod from enum import Enum from typing import Any, Dict, List, Optional, Tuple from llama_index.core.bridge.pydantic import BaseModel, Field from llama_index.core.evaluation.retrieval.metrics import resolve_metrics from llama_index.core.evaluation.retrieval.metrics_base import ( BaseRetrievalMetric, RetrievalMetricResult, ) from llama_index.core.llama_dataset.legacy.embedding import ( EmbeddingQAFinetuneDataset, ) class RetrievalEvalMode(str, Enum): """Evaluation of retrieval modality.""" TEXT = "text" IMAGE = "image" @classmethod def from_str(cls, label: str) -> "RetrievalEvalMode": if label == "text": return RetrievalEvalMode.TEXT elif label == "image": return RetrievalEvalMode.IMAGE else: raise NotImplementedError class RetrievalEvalResult(BaseModel): """Retrieval eval result. NOTE: this abstraction might change in the future. Attributes: query (str): Query string expected_ids (List[str]): Expected ids retrieved_ids (List[str]): Retrieved ids metric_dict (Dict[str, BaseRetrievalMetric]): \ Metric dictionary for the evaluation """ class Config: arbitrary_types_allowed = True query: str = Field(..., description="Query string") expected_ids: List[str] = Field(..., description="Expected ids") expected_texts: Optional[List[str]] = Field( default=None, description="Expected texts associated with nodes provided in `expected_ids`", ) retrieved_ids: List[str] = Field(..., description="Retrieved ids") retrieved_texts: List[str] = Field(..., description="Retrieved texts") mode: "RetrievalEvalMode" = Field( default=RetrievalEvalMode.TEXT, description="text or image" ) metric_dict: Dict[str, RetrievalMetricResult] = Field( ..., description="Metric dictionary for the evaluation" ) @property def metric_vals_dict(self) -> Dict[str, float]: """Dictionary of metric values.""" return {k: v.score for k, v in self.metric_dict.items()} def __str__(self) -> str: """String representation.""" return f"Query: {self.query}\n" f"Metrics: {self.metric_vals_dict!s}\n" class BaseRetrievalEvaluator(BaseModel): """Base Retrieval Evaluator class.""" metrics: List[BaseRetrievalMetric] = Field( ..., description="List of metrics to evaluate" ) class Config: arbitrary_types_allowed = True @classmethod def from_metric_names( cls, metric_names: List[str], **kwargs: Any ) -> "BaseRetrievalEvaluator": """Create evaluator from metric names. Args: metric_names (List[str]): List of metric names **kwargs: Additional arguments for the evaluator """ metric_types = resolve_metrics(metric_names) return cls(metrics=[metric() for metric in metric_types], **kwargs) @abstractmethod async def _aget_retrieved_ids_and_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids and texts.""" raise NotImplementedError def evaluate( self, query: str, expected_ids: List[str], expected_texts: Optional[List[str]] = None, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT, **kwargs: Any, ) -> RetrievalEvalResult: """Run evaluation results with query string and expected ids. Args: query (str): Query string expected_ids (List[str]): Expected ids Returns: RetrievalEvalResult: Evaluation result """ return asyncio.run( self.aevaluate( query=query, expected_ids=expected_ids, expected_texts=expected_texts, mode=mode, **kwargs, ) ) # @abstractmethod async def aevaluate( self, query: str, expected_ids: List[str], expected_texts: Optional[List[str]] = None, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT, **kwargs: Any, ) -> RetrievalEvalResult: """Run evaluation with query string, retrieved contexts, and generated response string. Subclasses can override this method to provide custom evaluation logic and take in additional arguments. """ retrieved_ids, retrieved_texts = await self._aget_retrieved_ids_and_texts( query, mode ) metric_dict = {} for metric in self.metrics: eval_result = metric.compute( query, expected_ids, retrieved_ids, expected_texts, retrieved_texts ) metric_dict[metric.metric_name] = eval_result return RetrievalEvalResult( query=query, expected_ids=expected_ids, expected_texts=expected_texts, retrieved_ids=retrieved_ids, retrieved_texts=retrieved_texts, mode=mode, metric_dict=metric_dict, ) async def aevaluate_dataset( self, dataset: EmbeddingQAFinetuneDataset, workers: int = 2, show_progress: bool = False, **kwargs: Any, ) -> List[RetrievalEvalResult]: """Run evaluation with dataset.""" semaphore = asyncio.Semaphore(workers) async def eval_worker( query: str, expected_ids: List[str], mode: RetrievalEvalMode ) -> RetrievalEvalResult: async with semaphore: return await self.aevaluate(query, expected_ids=expected_ids, mode=mode) response_jobs = [] mode = RetrievalEvalMode.from_str(dataset.mode) for query_id, query in dataset.queries.items(): expected_ids = dataset.relevant_docs[query_id] response_jobs.append(eval_worker(query, expected_ids, mode)) if show_progress: from tqdm.asyncio import tqdm_asyncio eval_results = await tqdm_asyncio.gather(*response_jobs) else: eval_results = await asyncio.gather(*response_jobs) return eval_results
[ "llama_index.core.evaluation.retrieval.metrics.resolve_metrics", "llama_index.core.bridge.pydantic.Field" ]
[((1364, 1402), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Query string"""'}), "(..., description='Query string')\n", (1369, 1402), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1433, 1471), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Expected ids"""'}), "(..., description='Expected ids')\n", (1438, 1471), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1514, 1617), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Expected texts associated with nodes provided in `expected_ids`"""'}), "(default=None, description=\n 'Expected texts associated with nodes provided in `expected_ids`')\n", (1519, 1617), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1667, 1706), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retrieved ids"""'}), "(..., description='Retrieved ids')\n", (1672, 1706), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1740, 1781), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retrieved texts"""'}), "(..., description='Retrieved texts')\n", (1745, 1781), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1814, 1880), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'RetrievalEvalMode.TEXT', 'description': '"""text or image"""'}), "(default=RetrievalEvalMode.TEXT, description='text or image')\n", (1819, 1880), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1947, 2009), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Metric dictionary for the evaluation"""'}), "(..., description='Metric dictionary for the evaluation')\n", (1952, 2009), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2474, 2527), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""List of metrics to evaluate"""'}), "(..., description='List of metrics to evaluate')\n", (2479, 2527), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2950, 2979), 'llama_index.core.evaluation.retrieval.metrics.resolve_metrics', 'resolve_metrics', (['metric_names'], {}), '(metric_names)\n', (2965, 2979), False, 'from llama_index.core.evaluation.retrieval.metrics import resolve_metrics\n'), ((5539, 5565), 'asyncio.Semaphore', 'asyncio.Semaphore', (['workers'], {}), '(workers)\n', (5556, 5565), False, 'import asyncio\n'), ((6210, 6245), 'tqdm.asyncio.tqdm_asyncio.gather', 'tqdm_asyncio.gather', (['*response_jobs'], {}), '(*response_jobs)\n', (6229, 6245), False, 'from tqdm.asyncio import tqdm_asyncio\n'), ((6293, 6323), 'asyncio.gather', 'asyncio.gather', (['*response_jobs'], {}), '(*response_jobs)\n', (6307, 6323), False, 'import asyncio\n')]
"""Code splitter.""" from typing import Any, Callable, List, Optional from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks.base import CallbackManager from llama_index.legacy.callbacks.schema import CBEventType, EventPayload from llama_index.legacy.node_parser.interface import TextSplitter from llama_index.legacy.node_parser.node_utils import default_id_func from llama_index.legacy.schema import Document DEFAULT_CHUNK_LINES = 40 DEFAULT_LINES_OVERLAP = 15 DEFAULT_MAX_CHARS = 1500 class CodeSplitter(TextSplitter): """Split code using a AST parser. Thank you to Kevin Lu / SweepAI for suggesting this elegant code splitting solution. https://docs.sweep.dev/blogs/chunking-2m-files """ language: str = Field( description="The programming language of the code being split." ) chunk_lines: int = Field( default=DEFAULT_CHUNK_LINES, description="The number of lines to include in each chunk.", gt=0, ) chunk_lines_overlap: int = Field( default=DEFAULT_LINES_OVERLAP, description="How many lines of code each chunk overlaps with.", gt=0, ) max_chars: int = Field( default=DEFAULT_MAX_CHARS, description="Maximum number of characters per chunk.", gt=0, ) _parser: Any = PrivateAttr() def __init__( self, language: str, chunk_lines: int = DEFAULT_CHUNK_LINES, chunk_lines_overlap: int = DEFAULT_LINES_OVERLAP, max_chars: int = DEFAULT_MAX_CHARS, parser: Any = None, callback_manager: Optional[CallbackManager] = None, include_metadata: bool = True, include_prev_next_rel: bool = True, id_func: Optional[Callable[[int, Document], str]] = None, ) -> None: """Initialize a CodeSplitter.""" from tree_sitter import Parser if parser is None: try: import tree_sitter_languages parser = tree_sitter_languages.get_parser(language) except ImportError: raise ImportError( "Please install tree_sitter_languages to use CodeSplitter." "Or pass in a parser object." ) except Exception: print( f"Could not get parser for language {language}. Check " "https://github.com/grantjenks/py-tree-sitter-languages#license " "for a list of valid languages." ) raise if not isinstance(parser, Parser): raise ValueError("Parser must be a tree-sitter Parser object.") self._parser = parser callback_manager = callback_manager or CallbackManager([]) id_func = id_func or default_id_func super().__init__( language=language, chunk_lines=chunk_lines, chunk_lines_overlap=chunk_lines_overlap, max_chars=max_chars, callback_manager=callback_manager, include_metadata=include_metadata, include_prev_next_rel=include_prev_next_rel, id_func=id_func, ) @classmethod def from_defaults( cls, language: str, chunk_lines: int = DEFAULT_CHUNK_LINES, chunk_lines_overlap: int = DEFAULT_LINES_OVERLAP, max_chars: int = DEFAULT_MAX_CHARS, callback_manager: Optional[CallbackManager] = None, parser: Any = None, ) -> "CodeSplitter": """Create a CodeSplitter with default values.""" return cls( language=language, chunk_lines=chunk_lines, chunk_lines_overlap=chunk_lines_overlap, max_chars=max_chars, parser=parser, ) @classmethod def class_name(cls) -> str: return "CodeSplitter" def _chunk_node(self, node: Any, text: str, last_end: int = 0) -> List[str]: new_chunks = [] current_chunk = "" for child in node.children: if child.end_byte - child.start_byte > self.max_chars: # Child is too big, recursively chunk the child if len(current_chunk) > 0: new_chunks.append(current_chunk) current_chunk = "" new_chunks.extend(self._chunk_node(child, text, last_end)) elif ( len(current_chunk) + child.end_byte - child.start_byte > self.max_chars ): # Child would make the current chunk too big, so start a new chunk new_chunks.append(current_chunk) current_chunk = text[last_end : child.end_byte] else: current_chunk += text[last_end : child.end_byte] last_end = child.end_byte if len(current_chunk) > 0: new_chunks.append(current_chunk) return new_chunks def split_text(self, text: str) -> List[str]: """Split incoming code and return chunks using the AST.""" with self.callback_manager.event( CBEventType.CHUNKING, payload={EventPayload.CHUNKS: [text]} ) as event: tree = self._parser.parse(bytes(text, "utf-8")) if ( not tree.root_node.children or tree.root_node.children[0].type != "ERROR" ): chunks = [ chunk.strip() for chunk in self._chunk_node(tree.root_node, text) ] event.on_end( payload={EventPayload.CHUNKS: chunks}, ) return chunks else: raise ValueError(f"Could not parse code with language {self.language}.") # TODO: set up auto-language detection using something like https://github.com/yoeo/guesslang.
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.callbacks.base.CallbackManager" ]
[((779, 849), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The programming language of the code being split."""'}), "(description='The programming language of the code being split.')\n", (784, 849), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((887, 993), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CHUNK_LINES', 'description': '"""The number of lines to include in each chunk."""', 'gt': '(0)'}), "(default=DEFAULT_CHUNK_LINES, description=\n 'The number of lines to include in each chunk.', gt=0)\n", (892, 993), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1051, 1162), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_LINES_OVERLAP', 'description': '"""How many lines of code each chunk overlaps with."""', 'gt': '(0)'}), "(default=DEFAULT_LINES_OVERLAP, description=\n 'How many lines of code each chunk overlaps with.', gt=0)\n", (1056, 1162), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1210, 1308), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MAX_CHARS', 'description': '"""Maximum number of characters per chunk."""', 'gt': '(0)'}), "(default=DEFAULT_MAX_CHARS, description=\n 'Maximum number of characters per chunk.', gt=0)\n", (1215, 1308), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1354, 1367), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1365, 1367), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2786, 2805), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2801, 2805), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((2022, 2064), 'tree_sitter_languages.get_parser', 'tree_sitter_languages.get_parser', (['language'], {}), '(language)\n', (2054, 2064), False, 'import tree_sitter_languages\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex from llama_index.llms import OpenAI async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "DocugamiKgRagSec10Q", "./docugami_kg_rag_sec_10_q" ) # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff") judge_llm = OpenAI(model="gpt-3.5-turbo") rag_evaluator = RagEvaluatorPack( query_engine=query_engine, rag_dataset=rag_dataset, judge_llm=judge_llm ) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents", "llama_index.llms.OpenAI" ]
[((301, 376), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""DocugamiKgRagSec10Q"""', '"""./docugami_kg_rag_sec_10_q"""'], {}), "('DocugamiKgRagSec10Q', './docugami_kg_rag_sec_10_q')\n", (323, 376), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((435, 487), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (466, 487), False, 'from llama_index.core import VectorStoreIndex\n'), ((580, 635), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (599, 635), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((652, 681), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (658, 681), False, 'from llama_index.llms import OpenAI\n'), ((1567, 1591), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1589, 1591), False, 'import asyncio\n')]
import asyncio from llama_index.core.llama_dataset import download_llama_dataset from llama_index.core.llama_pack import download_llama_pack from llama_index.core import VectorStoreIndex from llama_index.llms import OpenAI async def main(): # DOWNLOAD LLAMADATASET rag_dataset, documents = download_llama_dataset( "DocugamiKgRagSec10Q", "./docugami_kg_rag_sec_10_q" ) # BUILD BASIC RAG PIPELINE index = VectorStoreIndex.from_documents(documents=documents) query_engine = index.as_query_engine() # EVALUATE WITH PACK RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff") judge_llm = OpenAI(model="gpt-3.5-turbo") rag_evaluator = RagEvaluatorPack( query_engine=query_engine, rag_dataset=rag_dataset, judge_llm=judge_llm ) ############################################################################ # NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 # # then you'll need to use different batch_size and sleep_time_in_seconds. # # For Usage Tier 1, settings that seemed to work well were batch_size=5, # # and sleep_time_in_seconds=15 (as of December 2023.) # ############################################################################ benchmark_df = await rag_evaluator.arun( batch_size=20, # batches the number of openai api calls to make sleep_time_in_seconds=1, # number of seconds sleep before making an api call ) print(benchmark_df) if __name__ == "__main__": loop = asyncio.get_event_loop() loop.run_until_complete(main)
[ "llama_index.core.llama_dataset.download_llama_dataset", "llama_index.core.llama_pack.download_llama_pack", "llama_index.core.VectorStoreIndex.from_documents", "llama_index.llms.OpenAI" ]
[((301, 376), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""DocugamiKgRagSec10Q"""', '"""./docugami_kg_rag_sec_10_q"""'], {}), "('DocugamiKgRagSec10Q', './docugami_kg_rag_sec_10_q')\n", (323, 376), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((435, 487), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (466, 487), False, 'from llama_index.core import VectorStoreIndex\n'), ((580, 635), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (599, 635), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((652, 681), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (658, 681), False, 'from llama_index.llms import OpenAI\n'), ((1567, 1591), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1589, 1591), False, 'import asyncio\n')]
"""Table node mapping.""" from typing import Any, Dict, Optional, Sequence from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.objects.base_node_mapping import ( DEFAULT_PERSIST_DIR, DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, ) from llama_index.core.schema import BaseNode, TextNode from llama_index.core.utilities.sql_wrapper import SQLDatabase class SQLTableSchema(BaseModel): """Lightweight representation of a SQL table.""" table_name: str context_str: Optional[str] = None class SQLTableNodeMapping(BaseObjectNodeMapping[SQLTableSchema]): """SQL Table node mapping.""" def __init__(self, sql_database: SQLDatabase) -> None: self._sql_database = sql_database @classmethod def from_objects( cls, objs: Sequence[SQLTableSchema], *args: Any, sql_database: Optional[SQLDatabase] = None, **kwargs: Any, ) -> "BaseObjectNodeMapping": """Initialize node mapping.""" if sql_database is None: raise ValueError("Must provide sql_database") # ignore objs, since we are building from sql_database return cls(sql_database) def _add_object(self, obj: SQLTableSchema) -> None: raise NotImplementedError def to_node(self, obj: SQLTableSchema) -> TextNode: """To node.""" # taken from existing schema logic table_text = ( f"Schema of table {obj.table_name}:\n" f"{self._sql_database.get_single_table_info(obj.table_name)}\n" ) metadata = {"name": obj.table_name} if obj.context_str is not None: table_text += f"Context of table {obj.table_name}:\n" table_text += obj.context_str metadata["context"] = obj.context_str return TextNode( text=table_text, metadata=metadata, excluded_embed_metadata_keys=["name", "context"], excluded_llm_metadata_keys=["name", "context"], ) def _from_node(self, node: BaseNode) -> SQLTableSchema: """From node.""" if node.metadata is None: raise ValueError("Metadata must be set") return SQLTableSchema( table_name=node.metadata["name"], context_str=node.metadata.get("context") ) @property def obj_node_mapping(self) -> Dict[int, Any]: """The mapping data structure between node and object.""" raise NotImplementedError("Subclasses should implement this!") def persist( self, persist_dir: str = ..., obj_node_mapping_fname: str = ... ) -> None: """Persist objs.""" raise NotImplementedError("Subclasses should implement this!") @classmethod def from_persist_dir( cls, persist_dir: str = DEFAULT_PERSIST_DIR, obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME, ) -> "SQLTableNodeMapping": raise NotImplementedError( "This object node mapping does not support persist method." )
[ "llama_index.core.schema.TextNode" ]
[((1821, 1968), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'table_text', 'metadata': 'metadata', 'excluded_embed_metadata_keys': "['name', 'context']", 'excluded_llm_metadata_keys': "['name', 'context']"}), "(text=table_text, metadata=metadata, excluded_embed_metadata_keys=[\n 'name', 'context'], excluded_llm_metadata_keys=['name', 'context'])\n", (1829, 1968), False, 'from llama_index.core.schema import BaseNode, TextNode\n')]
"""Table node mapping.""" from typing import Any, Dict, Optional, Sequence from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.objects.base_node_mapping import ( DEFAULT_PERSIST_DIR, DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, ) from llama_index.core.schema import BaseNode, TextNode from llama_index.core.utilities.sql_wrapper import SQLDatabase class SQLTableSchema(BaseModel): """Lightweight representation of a SQL table.""" table_name: str context_str: Optional[str] = None class SQLTableNodeMapping(BaseObjectNodeMapping[SQLTableSchema]): """SQL Table node mapping.""" def __init__(self, sql_database: SQLDatabase) -> None: self._sql_database = sql_database @classmethod def from_objects( cls, objs: Sequence[SQLTableSchema], *args: Any, sql_database: Optional[SQLDatabase] = None, **kwargs: Any, ) -> "BaseObjectNodeMapping": """Initialize node mapping.""" if sql_database is None: raise ValueError("Must provide sql_database") # ignore objs, since we are building from sql_database return cls(sql_database) def _add_object(self, obj: SQLTableSchema) -> None: raise NotImplementedError def to_node(self, obj: SQLTableSchema) -> TextNode: """To node.""" # taken from existing schema logic table_text = ( f"Schema of table {obj.table_name}:\n" f"{self._sql_database.get_single_table_info(obj.table_name)}\n" ) metadata = {"name": obj.table_name} if obj.context_str is not None: table_text += f"Context of table {obj.table_name}:\n" table_text += obj.context_str metadata["context"] = obj.context_str return TextNode( text=table_text, metadata=metadata, excluded_embed_metadata_keys=["name", "context"], excluded_llm_metadata_keys=["name", "context"], ) def _from_node(self, node: BaseNode) -> SQLTableSchema: """From node.""" if node.metadata is None: raise ValueError("Metadata must be set") return SQLTableSchema( table_name=node.metadata["name"], context_str=node.metadata.get("context") ) @property def obj_node_mapping(self) -> Dict[int, Any]: """The mapping data structure between node and object.""" raise NotImplementedError("Subclasses should implement this!") def persist( self, persist_dir: str = ..., obj_node_mapping_fname: str = ... ) -> None: """Persist objs.""" raise NotImplementedError("Subclasses should implement this!") @classmethod def from_persist_dir( cls, persist_dir: str = DEFAULT_PERSIST_DIR, obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME, ) -> "SQLTableNodeMapping": raise NotImplementedError( "This object node mapping does not support persist method." )
[ "llama_index.core.schema.TextNode" ]
[((1821, 1968), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'table_text', 'metadata': 'metadata', 'excluded_embed_metadata_keys': "['name', 'context']", 'excluded_llm_metadata_keys': "['name', 'context']"}), "(text=table_text, metadata=metadata, excluded_embed_metadata_keys=[\n 'name', 'context'], excluded_llm_metadata_keys=['name', 'context'])\n", (1829, 1968), False, 'from llama_index.core.schema import BaseNode, TextNode\n')]
"""Table node mapping.""" from typing import Any, Dict, Optional, Sequence from llama_index.core.bridge.pydantic import BaseModel from llama_index.core.objects.base_node_mapping import ( DEFAULT_PERSIST_DIR, DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, ) from llama_index.core.schema import BaseNode, TextNode from llama_index.core.utilities.sql_wrapper import SQLDatabase class SQLTableSchema(BaseModel): """Lightweight representation of a SQL table.""" table_name: str context_str: Optional[str] = None class SQLTableNodeMapping(BaseObjectNodeMapping[SQLTableSchema]): """SQL Table node mapping.""" def __init__(self, sql_database: SQLDatabase) -> None: self._sql_database = sql_database @classmethod def from_objects( cls, objs: Sequence[SQLTableSchema], *args: Any, sql_database: Optional[SQLDatabase] = None, **kwargs: Any, ) -> "BaseObjectNodeMapping": """Initialize node mapping.""" if sql_database is None: raise ValueError("Must provide sql_database") # ignore objs, since we are building from sql_database return cls(sql_database) def _add_object(self, obj: SQLTableSchema) -> None: raise NotImplementedError def to_node(self, obj: SQLTableSchema) -> TextNode: """To node.""" # taken from existing schema logic table_text = ( f"Schema of table {obj.table_name}:\n" f"{self._sql_database.get_single_table_info(obj.table_name)}\n" ) metadata = {"name": obj.table_name} if obj.context_str is not None: table_text += f"Context of table {obj.table_name}:\n" table_text += obj.context_str metadata["context"] = obj.context_str return TextNode( text=table_text, metadata=metadata, excluded_embed_metadata_keys=["name", "context"], excluded_llm_metadata_keys=["name", "context"], ) def _from_node(self, node: BaseNode) -> SQLTableSchema: """From node.""" if node.metadata is None: raise ValueError("Metadata must be set") return SQLTableSchema( table_name=node.metadata["name"], context_str=node.metadata.get("context") ) @property def obj_node_mapping(self) -> Dict[int, Any]: """The mapping data structure between node and object.""" raise NotImplementedError("Subclasses should implement this!") def persist( self, persist_dir: str = ..., obj_node_mapping_fname: str = ... ) -> None: """Persist objs.""" raise NotImplementedError("Subclasses should implement this!") @classmethod def from_persist_dir( cls, persist_dir: str = DEFAULT_PERSIST_DIR, obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME, ) -> "SQLTableNodeMapping": raise NotImplementedError( "This object node mapping does not support persist method." )
[ "llama_index.core.schema.TextNode" ]
[((1821, 1968), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'table_text', 'metadata': 'metadata', 'excluded_embed_metadata_keys': "['name', 'context']", 'excluded_llm_metadata_keys': "['name', 'context']"}), "(text=table_text, metadata=metadata, excluded_embed_metadata_keys=[\n 'name', 'context'], excluded_llm_metadata_keys=['name', 'context'])\n", (1829, 1968), False, 'from llama_index.core.schema import BaseNode, TextNode\n')]
"""Base query engine.""" import logging from abc import abstractmethod from typing import Any, Dict, List, Optional, Sequence from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.callbacks.base import CallbackManager from llama_index.legacy.core.query_pipeline.query_component import ( ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable, ) from llama_index.legacy.core.response.schema import RESPONSE_TYPE from llama_index.legacy.prompts.mixin import PromptDictType, PromptMixin from llama_index.legacy.schema import NodeWithScore, QueryBundle, QueryType logger = logging.getLogger(__name__) class BaseQueryEngine(ChainableMixin, PromptMixin): """Base query engine.""" def __init__(self, callback_manager: Optional[CallbackManager]) -> None: self.callback_manager = callback_manager or CallbackManager([]) def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" def query(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE: with self.callback_manager.as_trace("query"): if isinstance(str_or_query_bundle, str): str_or_query_bundle = QueryBundle(str_or_query_bundle) return self._query(str_or_query_bundle) async def aquery(self, str_or_query_bundle: QueryType) -> RESPONSE_TYPE: with self.callback_manager.as_trace("query"): if isinstance(str_or_query_bundle, str): str_or_query_bundle = QueryBundle(str_or_query_bundle) return await self._aquery(str_or_query_bundle) def retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]: raise NotImplementedError( "This query engine does not support retrieve, use query directly" ) def synthesize( self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]] = None, ) -> RESPONSE_TYPE: raise NotImplementedError( "This query engine does not support synthesize, use query directly" ) async def asynthesize( self, query_bundle: QueryBundle, nodes: List[NodeWithScore], additional_source_nodes: Optional[Sequence[NodeWithScore]] = None, ) -> RESPONSE_TYPE: raise NotImplementedError( "This query engine does not support asynthesize, use aquery directly" ) @abstractmethod def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: pass @abstractmethod async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE: pass def _as_query_component(self, **kwargs: Any) -> QueryComponent: """Return a query component.""" return QueryEngineComponent(query_engine=self) class QueryEngineComponent(QueryComponent): """Query engine component.""" query_engine: BaseQueryEngine = Field(..., description="Query engine") class Config: arbitrary_types_allowed = True def set_callback_manager(self, callback_manager: CallbackManager) -> None: """Set callback manager.""" self.query_engine.callback_manager = callback_manager def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]: """Validate component inputs during run_component.""" # make sure input is a string input["input"] = validate_and_convert_stringable(input["input"]) return input def _run_component(self, **kwargs: Any) -> Any: """Run component.""" output = self.query_engine.query(kwargs["input"]) return {"output": output} async def _arun_component(self, **kwargs: Any) -> Any: """Run component.""" output = await self.query_engine.aquery(kwargs["input"]) return {"output": output} @property def input_keys(self) -> InputKeys: """Input keys.""" return InputKeys.from_keys({"input"}) @property def output_keys(self) -> OutputKeys: """Output keys.""" return OutputKeys.from_keys({"output"})
[ "llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys", "llama_index.legacy.callbacks.base.CallbackManager", "llama_index.legacy.schema.QueryBundle", "llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable" ]
[((647, 674), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (664, 674), False, 'import logging\n'), ((3066, 3104), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Query engine"""'}), "(..., description='Query engine')\n", (3071, 3104), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3550, 3597), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (3581, 3597), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((4076, 4106), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (4095, 4106), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((4205, 4237), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (4225, 4237), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((888, 907), 'llama_index.legacy.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (903, 907), False, 'from llama_index.legacy.callbacks.base import CallbackManager\n'), ((1311, 1343), 'llama_index.legacy.schema.QueryBundle', 'QueryBundle', (['str_or_query_bundle'], {}), '(str_or_query_bundle)\n', (1322, 1343), False, 'from llama_index.legacy.schema import NodeWithScore, QueryBundle, QueryType\n'), ((1619, 1651), 'llama_index.legacy.schema.QueryBundle', 'QueryBundle', (['str_or_query_bundle'], {}), '(str_or_query_bundle)\n', (1630, 1651), False, 'from llama_index.legacy.schema import NodeWithScore, QueryBundle, QueryType\n')]
import os from shutil import rmtree from typing import Callable, Dict, List, Optional import tqdm from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.schema import Document, QueryBundle from llama_index.core.utils import get_cache_dir class BeirEvaluator: """ Refer to: https://github.com/beir-cellar/beir for a full list of supported datasets and a full description of BEIR. """ def __init__(self) -> None: try: pass except ImportError: raise ImportError( "Please install beir to use this feature: " "`pip install beir`", ) def _download_datasets(self, datasets: List[str] = ["nfcorpus"]) -> Dict[str, str]: from beir import util cache_dir = get_cache_dir() dataset_paths = {} for dataset in datasets: dataset_full_path = os.path.join(cache_dir, "datasets", "BeIR__" + dataset) if not os.path.exists(dataset_full_path): url = f"""https://public.ukp.informatik.tu-darmstadt.de/thakur\ /BEIR/datasets/{dataset}.zip""" try: util.download_and_unzip(url, dataset_full_path) except Exception as e: print( "Dataset:", dataset, "not found at:", url, "Removing cached dir" ) rmtree(dataset_full_path) raise ValueError(f"invalid BEIR dataset: {dataset}") from e print("Dataset:", dataset, "downloaded at:", dataset_full_path) dataset_paths[dataset] = os.path.join(dataset_full_path, dataset) return dataset_paths def run( self, create_retriever: Callable[[List[Document]], BaseRetriever], datasets: List[str] = ["nfcorpus"], metrics_k_values: List[int] = [3, 10], node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, ) -> None: from beir.datasets.data_loader import GenericDataLoader from beir.retrieval.evaluation import EvaluateRetrieval dataset_paths = self._download_datasets(datasets) for dataset in datasets: dataset_path = dataset_paths[dataset] print("Evaluating on dataset:", dataset) print("-------------------------------------") corpus, queries, qrels = GenericDataLoader(data_folder=dataset_path).load( split="test" ) documents = [] for id, val in corpus.items(): doc = Document( text=val["text"], metadata={"title": val["title"], "doc_id": id} ) documents.append(doc) retriever = create_retriever(documents) print("Retriever created for: ", dataset) print("Evaluating retriever on questions against qrels") results = {} for key, query in tqdm.tqdm(queries.items()): nodes_with_score = retriever.retrieve(query) node_postprocessors = node_postprocessors or [] for node_postprocessor in node_postprocessors: nodes_with_score = node_postprocessor.postprocess_nodes( nodes_with_score, query_bundle=QueryBundle(query_str=query) ) results[key] = { node.node.metadata["doc_id"]: node.score for node in nodes_with_score } ndcg, map_, recall, precision = EvaluateRetrieval.evaluate( qrels, results, metrics_k_values ) print("Results for:", dataset) for k in metrics_k_values: print( { f"NDCG@{k}": ndcg[f"NDCG@{k}"], f"MAP@{k}": map_[f"MAP@{k}"], f"Recall@{k}": recall[f"Recall@{k}"], f"precision@{k}": precision[f"P@{k}"], } ) print("-------------------------------------")
[ "llama_index.core.utils.get_cache_dir", "llama_index.core.schema.QueryBundle", "llama_index.core.schema.Document" ]
[((861, 876), 'llama_index.core.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (874, 876), False, 'from llama_index.core.utils import get_cache_dir\n'), ((970, 1025), 'os.path.join', 'os.path.join', (['cache_dir', '"""datasets"""', "('BeIR__' + dataset)"], {}), "(cache_dir, 'datasets', 'BeIR__' + dataset)\n", (982, 1025), False, 'import os\n'), ((1698, 1738), 'os.path.join', 'os.path.join', (['dataset_full_path', 'dataset'], {}), '(dataset_full_path, dataset)\n', (1710, 1738), False, 'import os\n'), ((3642, 3702), 'beir.retrieval.evaluation.EvaluateRetrieval.evaluate', 'EvaluateRetrieval.evaluate', (['qrels', 'results', 'metrics_k_values'], {}), '(qrels, results, metrics_k_values)\n', (3668, 3702), False, 'from beir.retrieval.evaluation import EvaluateRetrieval\n'), ((1045, 1078), 'os.path.exists', 'os.path.exists', (['dataset_full_path'], {}), '(dataset_full_path)\n', (1059, 1078), False, 'import os\n'), ((2652, 2726), 'llama_index.core.schema.Document', 'Document', ([], {'text': "val['text']", 'metadata': "{'title': val['title'], 'doc_id': id}"}), "(text=val['text'], metadata={'title': val['title'], 'doc_id': id})\n", (2660, 2726), False, 'from llama_index.core.schema import Document, QueryBundle\n'), ((1233, 1280), 'beir.util.download_and_unzip', 'util.download_and_unzip', (['url', 'dataset_full_path'], {}), '(url, dataset_full_path)\n', (1256, 1280), False, 'from beir import util\n'), ((2466, 2509), 'beir.datasets.data_loader.GenericDataLoader', 'GenericDataLoader', ([], {'data_folder': 'dataset_path'}), '(data_folder=dataset_path)\n', (2483, 2509), False, 'from beir.datasets.data_loader import GenericDataLoader\n'), ((1478, 1503), 'shutil.rmtree', 'rmtree', (['dataset_full_path'], {}), '(dataset_full_path)\n', (1484, 1503), False, 'from shutil import rmtree\n'), ((3385, 3413), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (3396, 3413), False, 'from llama_index.core.schema import Document, QueryBundle\n')]
import os from shutil import rmtree from typing import Callable, Dict, List, Optional import tqdm from llama_index.core.base.base_retriever import BaseRetriever from llama_index.core.postprocessor.types import BaseNodePostprocessor from llama_index.core.schema import Document, QueryBundle from llama_index.core.utils import get_cache_dir class BeirEvaluator: """ Refer to: https://github.com/beir-cellar/beir for a full list of supported datasets and a full description of BEIR. """ def __init__(self) -> None: try: pass except ImportError: raise ImportError( "Please install beir to use this feature: " "`pip install beir`", ) def _download_datasets(self, datasets: List[str] = ["nfcorpus"]) -> Dict[str, str]: from beir import util cache_dir = get_cache_dir() dataset_paths = {} for dataset in datasets: dataset_full_path = os.path.join(cache_dir, "datasets", "BeIR__" + dataset) if not os.path.exists(dataset_full_path): url = f"""https://public.ukp.informatik.tu-darmstadt.de/thakur\ /BEIR/datasets/{dataset}.zip""" try: util.download_and_unzip(url, dataset_full_path) except Exception as e: print( "Dataset:", dataset, "not found at:", url, "Removing cached dir" ) rmtree(dataset_full_path) raise ValueError(f"invalid BEIR dataset: {dataset}") from e print("Dataset:", dataset, "downloaded at:", dataset_full_path) dataset_paths[dataset] = os.path.join(dataset_full_path, dataset) return dataset_paths def run( self, create_retriever: Callable[[List[Document]], BaseRetriever], datasets: List[str] = ["nfcorpus"], metrics_k_values: List[int] = [3, 10], node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, ) -> None: from beir.datasets.data_loader import GenericDataLoader from beir.retrieval.evaluation import EvaluateRetrieval dataset_paths = self._download_datasets(datasets) for dataset in datasets: dataset_path = dataset_paths[dataset] print("Evaluating on dataset:", dataset) print("-------------------------------------") corpus, queries, qrels = GenericDataLoader(data_folder=dataset_path).load( split="test" ) documents = [] for id, val in corpus.items(): doc = Document( text=val["text"], metadata={"title": val["title"], "doc_id": id} ) documents.append(doc) retriever = create_retriever(documents) print("Retriever created for: ", dataset) print("Evaluating retriever on questions against qrels") results = {} for key, query in tqdm.tqdm(queries.items()): nodes_with_score = retriever.retrieve(query) node_postprocessors = node_postprocessors or [] for node_postprocessor in node_postprocessors: nodes_with_score = node_postprocessor.postprocess_nodes( nodes_with_score, query_bundle=QueryBundle(query_str=query) ) results[key] = { node.node.metadata["doc_id"]: node.score for node in nodes_with_score } ndcg, map_, recall, precision = EvaluateRetrieval.evaluate( qrels, results, metrics_k_values ) print("Results for:", dataset) for k in metrics_k_values: print( { f"NDCG@{k}": ndcg[f"NDCG@{k}"], f"MAP@{k}": map_[f"MAP@{k}"], f"Recall@{k}": recall[f"Recall@{k}"], f"precision@{k}": precision[f"P@{k}"], } ) print("-------------------------------------")
[ "llama_index.core.utils.get_cache_dir", "llama_index.core.schema.QueryBundle", "llama_index.core.schema.Document" ]
[((861, 876), 'llama_index.core.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (874, 876), False, 'from llama_index.core.utils import get_cache_dir\n'), ((970, 1025), 'os.path.join', 'os.path.join', (['cache_dir', '"""datasets"""', "('BeIR__' + dataset)"], {}), "(cache_dir, 'datasets', 'BeIR__' + dataset)\n", (982, 1025), False, 'import os\n'), ((1698, 1738), 'os.path.join', 'os.path.join', (['dataset_full_path', 'dataset'], {}), '(dataset_full_path, dataset)\n', (1710, 1738), False, 'import os\n'), ((3642, 3702), 'beir.retrieval.evaluation.EvaluateRetrieval.evaluate', 'EvaluateRetrieval.evaluate', (['qrels', 'results', 'metrics_k_values'], {}), '(qrels, results, metrics_k_values)\n', (3668, 3702), False, 'from beir.retrieval.evaluation import EvaluateRetrieval\n'), ((1045, 1078), 'os.path.exists', 'os.path.exists', (['dataset_full_path'], {}), '(dataset_full_path)\n', (1059, 1078), False, 'import os\n'), ((2652, 2726), 'llama_index.core.schema.Document', 'Document', ([], {'text': "val['text']", 'metadata': "{'title': val['title'], 'doc_id': id}"}), "(text=val['text'], metadata={'title': val['title'], 'doc_id': id})\n", (2660, 2726), False, 'from llama_index.core.schema import Document, QueryBundle\n'), ((1233, 1280), 'beir.util.download_and_unzip', 'util.download_and_unzip', (['url', 'dataset_full_path'], {}), '(url, dataset_full_path)\n', (1256, 1280), False, 'from beir import util\n'), ((2466, 2509), 'beir.datasets.data_loader.GenericDataLoader', 'GenericDataLoader', ([], {'data_folder': 'dataset_path'}), '(data_folder=dataset_path)\n', (2483, 2509), False, 'from beir.datasets.data_loader import GenericDataLoader\n'), ((1478, 1503), 'shutil.rmtree', 'rmtree', (['dataset_full_path'], {}), '(dataset_full_path)\n', (1484, 1503), False, 'from shutil import rmtree\n'), ((3385, 3413), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (3396, 3413), False, 'from llama_index.core.schema import Document, QueryBundle\n')]
import logging from typing import Any, Dict, Generator, List, Optional, Tuple, Type, Union, cast from llama_index.legacy.agent.openai.utils import resolve_tool_choice from llama_index.legacy.llms.llm import LLM from llama_index.legacy.llms.openai import OpenAI from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool from llama_index.legacy.program.llm_prompt_program import BaseLLMFunctionProgram from llama_index.legacy.program.utils import create_list_model from llama_index.legacy.prompts.base import BasePromptTemplate, PromptTemplate from llama_index.legacy.types import Model _logger = logging.getLogger(__name__) def _default_tool_choice( output_cls: Type[Model], allow_multiple: bool = False ) -> Union[str, Dict[str, Any]]: """Default OpenAI tool to choose.""" if allow_multiple: return "auto" else: schema = output_cls.schema() return resolve_tool_choice(schema["title"]) def _get_json_str(raw_str: str, start_idx: int) -> Tuple[Optional[str], int]: """Extract JSON str from raw string and start index.""" raw_str = raw_str[start_idx:] stack_count = 0 for i, c in enumerate(raw_str): if c == "{": stack_count += 1 if c == "}": stack_count -= 1 if stack_count == 0: return raw_str[: i + 1], i + 2 + start_idx return None, start_idx def _parse_tool_calls( tool_calls: List[OpenAIToolCall], output_cls: Type[Model], allow_multiple: bool = False, verbose: bool = False, ) -> Union[Model, List[Model]]: outputs = [] for tool_call in tool_calls: function_call = tool_call.function # validations to get passed mypy assert function_call is not None assert function_call.name is not None assert function_call.arguments is not None if verbose: name = function_call.name arguments_str = function_call.arguments print(f"Function call: {name} with args: {arguments_str}") if isinstance(function_call.arguments, dict): output = output_cls.parse_obj(function_call.arguments) else: output = output_cls.parse_raw(function_call.arguments) outputs.append(output) if allow_multiple: return outputs else: if len(outputs) > 1: _logger.warning( "Multiple outputs found, returning first one. " "If you want to return all outputs, set output_multiple=True." ) return outputs[0] class OpenAIPydanticProgram(BaseLLMFunctionProgram[LLM]): """ An OpenAI-based function that returns a pydantic model. Note: this interface is not yet stable. """ def __init__( self, output_cls: Type[Model], llm: LLM, prompt: BasePromptTemplate, tool_choice: Union[str, Dict[str, Any]], allow_multiple: bool = False, verbose: bool = False, ) -> None: """Init params.""" self._output_cls = output_cls self._llm = llm self._prompt = prompt self._verbose = verbose self._allow_multiple = allow_multiple self._tool_choice = tool_choice @classmethod def from_defaults( cls, output_cls: Type[Model], prompt_template_str: Optional[str] = None, prompt: Optional[PromptTemplate] = None, llm: Optional[LLM] = None, verbose: bool = False, allow_multiple: bool = False, tool_choice: Optional[Union[str, Dict[str, Any]]] = None, **kwargs: Any, ) -> "OpenAIPydanticProgram": llm = llm or OpenAI(model="gpt-3.5-turbo-0613") if not isinstance(llm, OpenAI): raise ValueError( "OpenAIPydanticProgram only supports OpenAI LLMs. " f"Got: {type(llm)}" ) if not llm.metadata.is_function_calling_model: raise ValueError( f"Model name {llm.metadata.model_name} does not support " "function calling API. " ) if prompt is None and prompt_template_str is None: raise ValueError("Must provide either prompt or prompt_template_str.") if prompt is not None and prompt_template_str is not None: raise ValueError("Must provide either prompt or prompt_template_str.") if prompt_template_str is not None: prompt = PromptTemplate(prompt_template_str) tool_choice = tool_choice or _default_tool_choice(output_cls, allow_multiple) return cls( output_cls=output_cls, llm=llm, prompt=cast(PromptTemplate, prompt), tool_choice=tool_choice, allow_multiple=allow_multiple, verbose=verbose, ) @property def output_cls(self) -> Type[Model]: return self._output_cls @property def prompt(self) -> BasePromptTemplate: return self._prompt @prompt.setter def prompt(self, prompt: BasePromptTemplate) -> None: self._prompt = prompt def __call__( self, llm_kwargs: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any, ) -> Union[Model, List[Model]]: llm_kwargs = llm_kwargs or {} description = self._description_eval(**kwargs) openai_fn_spec = to_openai_tool(self._output_cls, description=description) messages = self._prompt.format_messages(llm=self._llm, **kwargs) chat_response = self._llm.chat( messages=messages, tools=[openai_fn_spec], tool_choice=self._tool_choice, **llm_kwargs, ) message = chat_response.message if "tool_calls" not in message.additional_kwargs: raise ValueError( "Expected tool_calls in ai_message.additional_kwargs, " "but none found." ) tool_calls = message.additional_kwargs["tool_calls"] return _parse_tool_calls( tool_calls, output_cls=self.output_cls, allow_multiple=self._allow_multiple, verbose=self._verbose, ) async def acall( self, llm_kwargs: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any, ) -> Union[Model, List[Model]]: llm_kwargs = llm_kwargs or {} description = self._description_eval(**kwargs) openai_fn_spec = to_openai_tool(self._output_cls, description=description) messages = self._prompt.format_messages(llm=self._llm, **kwargs) chat_response = await self._llm.achat( messages=messages, tools=[openai_fn_spec], tool_choice=self._tool_choice, **llm_kwargs, ) message = chat_response.message if "tool_calls" not in message.additional_kwargs: raise ValueError( "Expected function call in ai_message.additional_kwargs, " "but none found." ) tool_calls = message.additional_kwargs["tool_calls"] return _parse_tool_calls( tool_calls, output_cls=self.output_cls, allow_multiple=self._allow_multiple, verbose=self._verbose, ) def stream_list( self, llm_kwargs: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any, ) -> Generator[Model, None, None]: """Streams a list of objects.""" llm_kwargs = llm_kwargs or {} messages = self._prompt.format_messages(llm=self._llm, **kwargs) description = self._description_eval(**kwargs) list_output_cls = create_list_model(self._output_cls) openai_fn_spec = to_openai_tool(list_output_cls, description=description) chat_response_gen = self._llm.stream_chat( messages=messages, tools=[openai_fn_spec], tool_choice=_default_tool_choice(list_output_cls), **llm_kwargs, ) # extract function call arguments # obj_start_idx finds start position (before a new "{" in JSON) obj_start_idx: int = -1 # NOTE: uninitialized for stream_resp in chat_response_gen: kwargs = stream_resp.message.additional_kwargs tool_calls = kwargs["tool_calls"] if len(tool_calls) == 0: continue # NOTE: right now assume only one tool call # TODO: handle parallel tool calls in streaming setting fn_args = kwargs["tool_calls"][0].function.arguments # this is inspired by `get_object` from `MultiTaskBase` in # the openai_function_call repo if fn_args.find("[") != -1: if obj_start_idx == -1: obj_start_idx = fn_args.find("[") + 1 else: # keep going until we find the start position continue new_obj_json_str, obj_start_idx = _get_json_str(fn_args, obj_start_idx) if new_obj_json_str is not None: obj_json_str = new_obj_json_str obj = self._output_cls.parse_raw(obj_json_str) if self._verbose: print(f"Extracted object: {obj.json()}") yield obj def _description_eval(self, **kwargs: Any) -> Optional[str]: description = kwargs.get("description", None) ## __doc__ checks if docstring is provided in the Pydantic Model if not (self._output_cls.__doc__ or description): raise ValueError( "Must provide description for your Pydantic Model. Either provide a docstring or add `description=<your_description>` to the method. Required to convert Pydantic Model to OpenAI Function." ) ## If both docstring and description are provided, raise error if self._output_cls.__doc__ and description: raise ValueError( "Must provide either a docstring or a description, not both." ) return description
[ "llama_index.legacy.agent.openai.utils.resolve_tool_choice", "llama_index.legacy.llms.openai_utils.to_openai_tool", "llama_index.legacy.llms.openai.OpenAI", "llama_index.legacy.prompts.base.PromptTemplate", "llama_index.legacy.program.utils.create_list_model" ]
[((619, 646), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (636, 646), False, 'import logging\n'), ((914, 950), 'llama_index.legacy.agent.openai.utils.resolve_tool_choice', 'resolve_tool_choice', (["schema['title']"], {}), "(schema['title'])\n", (933, 950), False, 'from llama_index.legacy.agent.openai.utils import resolve_tool_choice\n'), ((5395, 5452), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['self._output_cls'], {'description': 'description'}), '(self._output_cls, description=description)\n', (5409, 5452), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((6503, 6560), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['self._output_cls'], {'description': 'description'}), '(self._output_cls, description=description)\n', (6517, 6560), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((7740, 7775), 'llama_index.legacy.program.utils.create_list_model', 'create_list_model', (['self._output_cls'], {}), '(self._output_cls)\n', (7757, 7775), False, 'from llama_index.legacy.program.utils import create_list_model\n'), ((7801, 7857), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['list_output_cls'], {'description': 'description'}), '(list_output_cls, description=description)\n', (7815, 7857), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((3679, 3713), 'llama_index.legacy.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (3685, 3713), False, 'from llama_index.legacy.llms.openai import OpenAI\n'), ((4460, 4495), 'llama_index.legacy.prompts.base.PromptTemplate', 'PromptTemplate', (['prompt_template_str'], {}), '(prompt_template_str)\n', (4474, 4495), False, 'from llama_index.legacy.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((4679, 4707), 'typing.cast', 'cast', (['PromptTemplate', 'prompt'], {}), '(PromptTemplate, prompt)\n', (4683, 4707), False, 'from typing import Any, Dict, Generator, List, Optional, Tuple, Type, Union, cast\n')]
import logging from typing import Any, Dict, Generator, List, Optional, Tuple, Type, Union, cast from llama_index.legacy.agent.openai.utils import resolve_tool_choice from llama_index.legacy.llms.llm import LLM from llama_index.legacy.llms.openai import OpenAI from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool from llama_index.legacy.program.llm_prompt_program import BaseLLMFunctionProgram from llama_index.legacy.program.utils import create_list_model from llama_index.legacy.prompts.base import BasePromptTemplate, PromptTemplate from llama_index.legacy.types import Model _logger = logging.getLogger(__name__) def _default_tool_choice( output_cls: Type[Model], allow_multiple: bool = False ) -> Union[str, Dict[str, Any]]: """Default OpenAI tool to choose.""" if allow_multiple: return "auto" else: schema = output_cls.schema() return resolve_tool_choice(schema["title"]) def _get_json_str(raw_str: str, start_idx: int) -> Tuple[Optional[str], int]: """Extract JSON str from raw string and start index.""" raw_str = raw_str[start_idx:] stack_count = 0 for i, c in enumerate(raw_str): if c == "{": stack_count += 1 if c == "}": stack_count -= 1 if stack_count == 0: return raw_str[: i + 1], i + 2 + start_idx return None, start_idx def _parse_tool_calls( tool_calls: List[OpenAIToolCall], output_cls: Type[Model], allow_multiple: bool = False, verbose: bool = False, ) -> Union[Model, List[Model]]: outputs = [] for tool_call in tool_calls: function_call = tool_call.function # validations to get passed mypy assert function_call is not None assert function_call.name is not None assert function_call.arguments is not None if verbose: name = function_call.name arguments_str = function_call.arguments print(f"Function call: {name} with args: {arguments_str}") if isinstance(function_call.arguments, dict): output = output_cls.parse_obj(function_call.arguments) else: output = output_cls.parse_raw(function_call.arguments) outputs.append(output) if allow_multiple: return outputs else: if len(outputs) > 1: _logger.warning( "Multiple outputs found, returning first one. " "If you want to return all outputs, set output_multiple=True." ) return outputs[0] class OpenAIPydanticProgram(BaseLLMFunctionProgram[LLM]): """ An OpenAI-based function that returns a pydantic model. Note: this interface is not yet stable. """ def __init__( self, output_cls: Type[Model], llm: LLM, prompt: BasePromptTemplate, tool_choice: Union[str, Dict[str, Any]], allow_multiple: bool = False, verbose: bool = False, ) -> None: """Init params.""" self._output_cls = output_cls self._llm = llm self._prompt = prompt self._verbose = verbose self._allow_multiple = allow_multiple self._tool_choice = tool_choice @classmethod def from_defaults( cls, output_cls: Type[Model], prompt_template_str: Optional[str] = None, prompt: Optional[PromptTemplate] = None, llm: Optional[LLM] = None, verbose: bool = False, allow_multiple: bool = False, tool_choice: Optional[Union[str, Dict[str, Any]]] = None, **kwargs: Any, ) -> "OpenAIPydanticProgram": llm = llm or OpenAI(model="gpt-3.5-turbo-0613") if not isinstance(llm, OpenAI): raise ValueError( "OpenAIPydanticProgram only supports OpenAI LLMs. " f"Got: {type(llm)}" ) if not llm.metadata.is_function_calling_model: raise ValueError( f"Model name {llm.metadata.model_name} does not support " "function calling API. " ) if prompt is None and prompt_template_str is None: raise ValueError("Must provide either prompt or prompt_template_str.") if prompt is not None and prompt_template_str is not None: raise ValueError("Must provide either prompt or prompt_template_str.") if prompt_template_str is not None: prompt = PromptTemplate(prompt_template_str) tool_choice = tool_choice or _default_tool_choice(output_cls, allow_multiple) return cls( output_cls=output_cls, llm=llm, prompt=cast(PromptTemplate, prompt), tool_choice=tool_choice, allow_multiple=allow_multiple, verbose=verbose, ) @property def output_cls(self) -> Type[Model]: return self._output_cls @property def prompt(self) -> BasePromptTemplate: return self._prompt @prompt.setter def prompt(self, prompt: BasePromptTemplate) -> None: self._prompt = prompt def __call__( self, llm_kwargs: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any, ) -> Union[Model, List[Model]]: llm_kwargs = llm_kwargs or {} description = self._description_eval(**kwargs) openai_fn_spec = to_openai_tool(self._output_cls, description=description) messages = self._prompt.format_messages(llm=self._llm, **kwargs) chat_response = self._llm.chat( messages=messages, tools=[openai_fn_spec], tool_choice=self._tool_choice, **llm_kwargs, ) message = chat_response.message if "tool_calls" not in message.additional_kwargs: raise ValueError( "Expected tool_calls in ai_message.additional_kwargs, " "but none found." ) tool_calls = message.additional_kwargs["tool_calls"] return _parse_tool_calls( tool_calls, output_cls=self.output_cls, allow_multiple=self._allow_multiple, verbose=self._verbose, ) async def acall( self, llm_kwargs: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any, ) -> Union[Model, List[Model]]: llm_kwargs = llm_kwargs or {} description = self._description_eval(**kwargs) openai_fn_spec = to_openai_tool(self._output_cls, description=description) messages = self._prompt.format_messages(llm=self._llm, **kwargs) chat_response = await self._llm.achat( messages=messages, tools=[openai_fn_spec], tool_choice=self._tool_choice, **llm_kwargs, ) message = chat_response.message if "tool_calls" not in message.additional_kwargs: raise ValueError( "Expected function call in ai_message.additional_kwargs, " "but none found." ) tool_calls = message.additional_kwargs["tool_calls"] return _parse_tool_calls( tool_calls, output_cls=self.output_cls, allow_multiple=self._allow_multiple, verbose=self._verbose, ) def stream_list( self, llm_kwargs: Optional[Dict[str, Any]] = None, *args: Any, **kwargs: Any, ) -> Generator[Model, None, None]: """Streams a list of objects.""" llm_kwargs = llm_kwargs or {} messages = self._prompt.format_messages(llm=self._llm, **kwargs) description = self._description_eval(**kwargs) list_output_cls = create_list_model(self._output_cls) openai_fn_spec = to_openai_tool(list_output_cls, description=description) chat_response_gen = self._llm.stream_chat( messages=messages, tools=[openai_fn_spec], tool_choice=_default_tool_choice(list_output_cls), **llm_kwargs, ) # extract function call arguments # obj_start_idx finds start position (before a new "{" in JSON) obj_start_idx: int = -1 # NOTE: uninitialized for stream_resp in chat_response_gen: kwargs = stream_resp.message.additional_kwargs tool_calls = kwargs["tool_calls"] if len(tool_calls) == 0: continue # NOTE: right now assume only one tool call # TODO: handle parallel tool calls in streaming setting fn_args = kwargs["tool_calls"][0].function.arguments # this is inspired by `get_object` from `MultiTaskBase` in # the openai_function_call repo if fn_args.find("[") != -1: if obj_start_idx == -1: obj_start_idx = fn_args.find("[") + 1 else: # keep going until we find the start position continue new_obj_json_str, obj_start_idx = _get_json_str(fn_args, obj_start_idx) if new_obj_json_str is not None: obj_json_str = new_obj_json_str obj = self._output_cls.parse_raw(obj_json_str) if self._verbose: print(f"Extracted object: {obj.json()}") yield obj def _description_eval(self, **kwargs: Any) -> Optional[str]: description = kwargs.get("description", None) ## __doc__ checks if docstring is provided in the Pydantic Model if not (self._output_cls.__doc__ or description): raise ValueError( "Must provide description for your Pydantic Model. Either provide a docstring or add `description=<your_description>` to the method. Required to convert Pydantic Model to OpenAI Function." ) ## If both docstring and description are provided, raise error if self._output_cls.__doc__ and description: raise ValueError( "Must provide either a docstring or a description, not both." ) return description
[ "llama_index.legacy.agent.openai.utils.resolve_tool_choice", "llama_index.legacy.llms.openai_utils.to_openai_tool", "llama_index.legacy.llms.openai.OpenAI", "llama_index.legacy.prompts.base.PromptTemplate", "llama_index.legacy.program.utils.create_list_model" ]
[((619, 646), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (636, 646), False, 'import logging\n'), ((914, 950), 'llama_index.legacy.agent.openai.utils.resolve_tool_choice', 'resolve_tool_choice', (["schema['title']"], {}), "(schema['title'])\n", (933, 950), False, 'from llama_index.legacy.agent.openai.utils import resolve_tool_choice\n'), ((5395, 5452), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['self._output_cls'], {'description': 'description'}), '(self._output_cls, description=description)\n', (5409, 5452), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((6503, 6560), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['self._output_cls'], {'description': 'description'}), '(self._output_cls, description=description)\n', (6517, 6560), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((7740, 7775), 'llama_index.legacy.program.utils.create_list_model', 'create_list_model', (['self._output_cls'], {}), '(self._output_cls)\n', (7757, 7775), False, 'from llama_index.legacy.program.utils import create_list_model\n'), ((7801, 7857), 'llama_index.legacy.llms.openai_utils.to_openai_tool', 'to_openai_tool', (['list_output_cls'], {'description': 'description'}), '(list_output_cls, description=description)\n', (7815, 7857), False, 'from llama_index.legacy.llms.openai_utils import OpenAIToolCall, to_openai_tool\n'), ((3679, 3713), 'llama_index.legacy.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo-0613"""'}), "(model='gpt-3.5-turbo-0613')\n", (3685, 3713), False, 'from llama_index.legacy.llms.openai import OpenAI\n'), ((4460, 4495), 'llama_index.legacy.prompts.base.PromptTemplate', 'PromptTemplate', (['prompt_template_str'], {}), '(prompt_template_str)\n', (4474, 4495), False, 'from llama_index.legacy.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((4679, 4707), 'typing.cast', 'cast', (['PromptTemplate', 'prompt'], {}), '(PromptTemplate, prompt)\n', (4683, 4707), False, 'from typing import Any, Dict, Generator, List, Optional, Tuple, Type, Union, cast\n')]
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast import httpx from openai import AsyncOpenAI from openai import OpenAI as SyncOpenAI from openai.types.chat import ChatCompletionMessageParam from openai.types.chat.chat_completion_chunk import ( ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall, ) from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import ( DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE, ) from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole, ) from llama_index.legacy.llms.generic_utils import ( messages_to_prompt as generic_messages_to_prompt, ) from llama_index.legacy.llms.openai_utils import ( from_openai_message, resolve_openai_credentials, to_openai_message_dicts, ) from llama_index.legacy.multi_modal_llms import ( MultiModalLLM, MultiModalLLMMetadata, ) from llama_index.legacy.multi_modal_llms.openai_utils import ( GPT4V_MODELS, generate_openai_multi_modal_chat_message, ) from llama_index.legacy.schema import ImageDocument class OpenAIMultiModal(MultiModalLLM): model: str = Field(description="The Multi-Modal model to use from OpenAI.") temperature: float = Field(description="The temperature to use for sampling.") max_new_tokens: Optional[int] = Field( description=" The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt", gt=0, ) context_window: Optional[int] = Field( description="The maximum number of context tokens for the model.", gt=0, ) image_detail: str = Field( description="The level of details for image in API calls. Can be low, high, or auto" ) max_retries: int = Field( default=3, description="Maximum number of retries.", gte=0, ) timeout: float = Field( default=60.0, description="The timeout, in seconds, for API requests.", gte=0, ) api_key: str = Field(default=None, description="The OpenAI API key.", exclude=True) api_base: str = Field(default=None, description="The base URL for OpenAI API.") api_version: str = Field(description="The API version for OpenAI API.") additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the OpenAI API." ) default_headers: Dict[str, str] = Field( default=None, description="The default headers for API requests." ) _messages_to_prompt: Callable = PrivateAttr() _completion_to_prompt: Callable = PrivateAttr() _client: SyncOpenAI = PrivateAttr() _aclient: AsyncOpenAI = PrivateAttr() _http_client: Optional[httpx.Client] = PrivateAttr() def __init__( self, model: str = "gpt-4-vision-preview", temperature: float = DEFAULT_TEMPERATURE, max_new_tokens: Optional[int] = 300, additional_kwargs: Optional[Dict[str, Any]] = None, context_window: Optional[int] = DEFAULT_CONTEXT_WINDOW, max_retries: int = 3, timeout: float = 60.0, image_detail: str = "low", api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, messages_to_prompt: Optional[Callable] = None, completion_to_prompt: Optional[Callable] = None, callback_manager: Optional[CallbackManager] = None, default_headers: Optional[Dict[str, str]] = None, http_client: Optional[httpx.Client] = None, **kwargs: Any, ) -> None: self._messages_to_prompt = messages_to_prompt or generic_messages_to_prompt self._completion_to_prompt = completion_to_prompt or (lambda x: x) api_key, api_base, api_version = resolve_openai_credentials( api_key=api_key, api_base=api_base, api_version=api_version, ) super().__init__( model=model, temperature=temperature, max_new_tokens=max_new_tokens, additional_kwargs=additional_kwargs or {}, context_window=context_window, image_detail=image_detail, max_retries=max_retries, timeout=timeout, api_key=api_key, api_base=api_base, api_version=api_version, callback_manager=callback_manager, default_headers=default_headers, **kwargs, ) self._http_client = http_client self._client, self._aclient = self._get_clients(**kwargs) def _get_clients(self, **kwargs: Any) -> Tuple[SyncOpenAI, AsyncOpenAI]: client = SyncOpenAI(**self._get_credential_kwargs()) aclient = AsyncOpenAI(**self._get_credential_kwargs()) return client, aclient @classmethod def class_name(cls) -> str: return "openai_multi_modal_llm" @property def metadata(self) -> MultiModalLLMMetadata: """Multi Modal LLM metadata.""" return MultiModalLLMMetadata( num_output=self.max_new_tokens or DEFAULT_NUM_OUTPUTS, model_name=self.model, ) def _get_credential_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { "api_key": self.api_key, "base_url": self.api_base, "max_retries": self.max_retries, "default_headers": self.default_headers, "http_client": self._http_client, "timeout": self.timeout, **kwargs, } def _get_multi_modal_chat_messages( self, prompt: str, role: str, image_documents: Sequence[ImageDocument], **kwargs: Any, ) -> List[ChatCompletionMessageParam]: return to_openai_message_dicts( [ generate_openai_multi_modal_chat_message( prompt=prompt, role=role, image_documents=image_documents, image_detail=self.image_detail, ) ] ) # Model Params for OpenAI GPT4V model. def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]: if self.model not in GPT4V_MODELS: raise ValueError( f"Invalid model {self.model}. " f"Available models are: {list(GPT4V_MODELS.keys())}" ) base_kwargs = {"model": self.model, "temperature": self.temperature, **kwargs} if self.max_new_tokens is not None: # If max_tokens is None, don't include in the payload: # https://platform.openai.com/docs/api-reference/chat # https://platform.openai.com/docs/api-reference/completions base_kwargs["max_tokens"] = self.max_new_tokens return {**base_kwargs, **self.additional_kwargs} def _get_response_token_counts(self, raw_response: Any) -> dict: """Get the token usage reported by the response.""" if not isinstance(raw_response, dict): return {} usage = raw_response.get("usage", {}) # NOTE: other model providers that use the OpenAI client may not report usage if usage is None: return {} return { "prompt_tokens": usage.get("prompt_tokens", 0), "completion_tokens": usage.get("completion_tokens", 0), "total_tokens": usage.get("total_tokens", 0), } def _complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_model_kwargs(**kwargs) message_dict = self._get_multi_modal_chat_messages( prompt=prompt, role=MessageRole.USER, image_documents=image_documents ) response = self._client.chat.completions.create( messages=message_dict, stream=False, **all_kwargs, ) return CompletionResponse( text=response.choices[0].message.content, raw=response, additional_kwargs=self._get_response_token_counts(response), ) def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: all_kwargs = self._get_model_kwargs(**kwargs) message_dicts = to_openai_message_dicts(messages) response = self._client.chat.completions.create( messages=message_dicts, stream=False, **all_kwargs, ) openai_message = response.choices[0].message message = from_openai_message(openai_message) return ChatResponse( message=message, raw=response, additional_kwargs=self._get_response_token_counts(response), ) def _stream_complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponseGen: all_kwargs = self._get_model_kwargs(**kwargs) message_dict = self._get_multi_modal_chat_messages( prompt=prompt, role=MessageRole.USER, image_documents=image_documents ) def gen() -> CompletionResponseGen: text = "" for response in self._client.chat.completions.create( messages=message_dict, stream=True, **all_kwargs, ): response = cast(ChatCompletionChunk, response) if len(response.choices) > 0: delta = response.choices[0].delta else: delta = ChoiceDelta() # update using deltas content_delta = delta.content or "" text += content_delta yield CompletionResponse( delta=content_delta, text=text, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() def _stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: message_dicts = to_openai_message_dicts(messages) def gen() -> ChatResponseGen: content = "" tool_calls: List[ChoiceDeltaToolCall] = [] is_function = False for response in self._client.chat.completions.create( messages=message_dicts, stream=True, **self._get_model_kwargs(**kwargs), ): response = cast(ChatCompletionChunk, response) if len(response.choices) > 0: delta = response.choices[0].delta else: delta = ChoiceDelta() # check if this chunk is the start of a function call if delta.tool_calls: is_function = True # update using deltas role = delta.role or MessageRole.ASSISTANT content_delta = delta.content or "" content += content_delta additional_kwargs = {} if is_function: tool_calls = self._update_tool_calls(tool_calls, delta.tool_calls) additional_kwargs["tool_calls"] = tool_calls yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() def complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponse: return self._complete(prompt, image_documents, **kwargs) def stream_complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponseGen: return self._stream_complete(prompt, image_documents, **kwargs) def chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: return self._chat(messages, **kwargs) def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseGen: return self._stream_chat(messages, **kwargs) # ===== Async Endpoints ===== async def _acomplete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_model_kwargs(**kwargs) message_dict = self._get_multi_modal_chat_messages( prompt=prompt, role=MessageRole.USER, image_documents=image_documents ) response = await self._aclient.chat.completions.create( messages=message_dict, stream=False, **all_kwargs, ) return CompletionResponse( text=response.choices[0].message.content, raw=response, additional_kwargs=self._get_response_token_counts(response), ) async def acomplete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponse: return await self._acomplete(prompt, image_documents, **kwargs) async def _astream_complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponseAsyncGen: all_kwargs = self._get_model_kwargs(**kwargs) message_dict = self._get_multi_modal_chat_messages( prompt=prompt, role=MessageRole.USER, image_documents=image_documents ) async def gen() -> CompletionResponseAsyncGen: text = "" async for response in await self._aclient.chat.completions.create( messages=message_dict, stream=True, **all_kwargs, ): response = cast(ChatCompletionChunk, response) if len(response.choices) > 0: delta = response.choices[0].delta else: delta = ChoiceDelta() # update using deltas content_delta = delta.content or "" text += content_delta yield CompletionResponse( delta=content_delta, text=text, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() async def _achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: all_kwargs = self._get_model_kwargs(**kwargs) message_dicts = to_openai_message_dicts(messages) response = await self._aclient.chat.completions.create( messages=message_dicts, stream=False, **all_kwargs, ) openai_message = response.choices[0].message message = from_openai_message(openai_message) return ChatResponse( message=message, raw=response, additional_kwargs=self._get_response_token_counts(response), ) async def _astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: message_dicts = to_openai_message_dicts(messages) async def gen() -> ChatResponseAsyncGen: content = "" tool_calls: List[ChoiceDeltaToolCall] = [] is_function = False async for response in await self._aclient.chat.completions.create( messages=message_dicts, stream=True, **self._get_model_kwargs(**kwargs), ): response = cast(ChatCompletionChunk, response) if len(response.choices) > 0: delta = response.choices[0].delta else: delta = ChoiceDelta() # check if this chunk is the start of a function call if delta.tool_calls: is_function = True # update using deltas role = delta.role or MessageRole.ASSISTANT content_delta = delta.content or "" content += content_delta additional_kwargs = {} if is_function: tool_calls = self._update_tool_calls(tool_calls, delta.tool_calls) additional_kwargs["tool_calls"] = tool_calls yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() async def astream_complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponseAsyncGen: return await self._astream_complete(prompt, image_documents, **kwargs) async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: return await self._achat(messages, **kwargs) async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseAsyncGen: return await self._astream_chat(messages, **kwargs)
[ "llama_index.legacy.llms.openai_utils.from_openai_message", "llama_index.legacy.multi_modal_llms.MultiModalLLMMetadata", "llama_index.legacy.multi_modal_llms.openai_utils.generate_openai_multi_modal_chat_message", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.llms.openai_utils.to_openai_message_dicts", "llama_index.legacy.multi_modal_llms.openai_utils.GPT4V_MODELS.keys", "llama_index.legacy.llms.openai_utils.resolve_openai_credentials" ]
[((1407, 1469), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Multi-Modal model to use from OpenAI."""'}), "(description='The Multi-Modal model to use from OpenAI.')\n", (1412, 1469), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1495, 1552), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1500, 1552), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1589, 1713), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '""" The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt"""', 'gt': '(0)'}), "(description=\n ' The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt'\n , gt=0)\n", (1594, 1713), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1763, 1841), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(description='The maximum number of context tokens for the model.', gt=0)\n", (1768, 1841), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1889, 1985), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The level of details for image in API calls. Can be low, high, or auto"""'}), "(description=\n 'The level of details for image in API calls. Can be low, high, or auto')\n", (1894, 1985), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2018, 2083), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(3)', 'description': '"""Maximum number of retries."""', 'gte': '(0)'}), "(default=3, description='Maximum number of retries.', gte=0)\n", (2023, 2083), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2136, 2225), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(60.0)', 'description': '"""The timeout, in seconds, for API requests."""', 'gte': '(0)'}), "(default=60.0, description=\n 'The timeout, in seconds, for API requests.', gte=0)\n", (2141, 2225), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2271, 2339), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The OpenAI API key."""', 'exclude': '(True)'}), "(default=None, description='The OpenAI API key.', exclude=True)\n", (2276, 2339), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2360, 2423), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The base URL for OpenAI API."""'}), "(default=None, description='The base URL for OpenAI API.')\n", (2365, 2423), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2447, 2499), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The API version for OpenAI API."""'}), "(description='The API version for OpenAI API.')\n", (2452, 2499), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2540, 2625), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the OpenAI API."""'}), "(default_factory=dict, description='Additional kwargs for the OpenAI API.'\n )\n", (2545, 2625), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2673, 2745), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The default headers for API requests."""'}), "(default=None, description='The default headers for API requests.')\n", (2678, 2745), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2797, 2810), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2808, 2810), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2849, 2862), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2860, 2862), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2889, 2902), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2900, 2902), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2931, 2944), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2942, 2944), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2988, 3001), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2999, 3001), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4037, 4129), 'llama_index.legacy.llms.openai_utils.resolve_openai_credentials', 'resolve_openai_credentials', ([], {'api_key': 'api_key', 'api_base': 'api_base', 'api_version': 'api_version'}), '(api_key=api_key, api_base=api_base, api_version=\n api_version)\n', (4063, 4129), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((5276, 5379), 'llama_index.legacy.multi_modal_llms.MultiModalLLMMetadata', 'MultiModalLLMMetadata', ([], {'num_output': '(self.max_new_tokens or DEFAULT_NUM_OUTPUTS)', 'model_name': 'self.model'}), '(num_output=self.max_new_tokens or DEFAULT_NUM_OUTPUTS,\n model_name=self.model)\n', (5297, 5379), False, 'from llama_index.legacy.multi_modal_llms import MultiModalLLM, MultiModalLLMMetadata\n'), ((8542, 8575), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (8565, 8575), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((8802, 8837), 'llama_index.legacy.llms.openai_utils.from_openai_message', 'from_openai_message', (['openai_message'], {}), '(openai_message)\n', (8821, 8837), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((10361, 10394), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (10384, 10394), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15114, 15147), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (15137, 15147), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15381, 15416), 'llama_index.legacy.llms.openai_utils.from_openai_message', 'from_openai_message', (['openai_message'], {}), '(openai_message)\n', (15400, 15416), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15731, 15764), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (15754, 15764), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((6070, 6205), 'llama_index.legacy.multi_modal_llms.openai_utils.generate_openai_multi_modal_chat_message', 'generate_openai_multi_modal_chat_message', ([], {'prompt': 'prompt', 'role': 'role', 'image_documents': 'image_documents', 'image_detail': 'self.image_detail'}), '(prompt=prompt, role=role,\n image_documents=image_documents, image_detail=self.image_detail)\n', (6110, 6205), False, 'from llama_index.legacy.multi_modal_llms.openai_utils import GPT4V_MODELS, generate_openai_multi_modal_chat_message\n'), ((9628, 9663), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (9632, 9663), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((10776, 10811), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (10780, 10811), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((14330, 14365), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (14334, 14365), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((16170, 16205), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (16174, 16205), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((9814, 9827), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (9825, 9827), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((10962, 10975), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (10973, 10975), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((14516, 14529), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (14527, 14529), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((16356, 16369), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (16367, 16369), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((6602, 6621), 'llama_index.legacy.multi_modal_llms.openai_utils.GPT4V_MODELS.keys', 'GPT4V_MODELS.keys', ([], {}), '()\n', (6619, 6621), False, 'from llama_index.legacy.multi_modal_llms.openai_utils import GPT4V_MODELS, generate_openai_multi_modal_chat_message\n'), ((11603, 11679), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (11614, 11679), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((16997, 17073), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (17008, 17073), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n')]
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast import httpx from openai import AsyncOpenAI from openai import OpenAI as SyncOpenAI from openai.types.chat import ChatCompletionMessageParam from openai.types.chat.chat_completion_chunk import ( ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall, ) from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.constants import ( DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE, ) from llama_index.legacy.core.llms.types import ( ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole, ) from llama_index.legacy.llms.generic_utils import ( messages_to_prompt as generic_messages_to_prompt, ) from llama_index.legacy.llms.openai_utils import ( from_openai_message, resolve_openai_credentials, to_openai_message_dicts, ) from llama_index.legacy.multi_modal_llms import ( MultiModalLLM, MultiModalLLMMetadata, ) from llama_index.legacy.multi_modal_llms.openai_utils import ( GPT4V_MODELS, generate_openai_multi_modal_chat_message, ) from llama_index.legacy.schema import ImageDocument class OpenAIMultiModal(MultiModalLLM): model: str = Field(description="The Multi-Modal model to use from OpenAI.") temperature: float = Field(description="The temperature to use for sampling.") max_new_tokens: Optional[int] = Field( description=" The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt", gt=0, ) context_window: Optional[int] = Field( description="The maximum number of context tokens for the model.", gt=0, ) image_detail: str = Field( description="The level of details for image in API calls. Can be low, high, or auto" ) max_retries: int = Field( default=3, description="Maximum number of retries.", gte=0, ) timeout: float = Field( default=60.0, description="The timeout, in seconds, for API requests.", gte=0, ) api_key: str = Field(default=None, description="The OpenAI API key.", exclude=True) api_base: str = Field(default=None, description="The base URL for OpenAI API.") api_version: str = Field(description="The API version for OpenAI API.") additional_kwargs: Dict[str, Any] = Field( default_factory=dict, description="Additional kwargs for the OpenAI API." ) default_headers: Dict[str, str] = Field( default=None, description="The default headers for API requests." ) _messages_to_prompt: Callable = PrivateAttr() _completion_to_prompt: Callable = PrivateAttr() _client: SyncOpenAI = PrivateAttr() _aclient: AsyncOpenAI = PrivateAttr() _http_client: Optional[httpx.Client] = PrivateAttr() def __init__( self, model: str = "gpt-4-vision-preview", temperature: float = DEFAULT_TEMPERATURE, max_new_tokens: Optional[int] = 300, additional_kwargs: Optional[Dict[str, Any]] = None, context_window: Optional[int] = DEFAULT_CONTEXT_WINDOW, max_retries: int = 3, timeout: float = 60.0, image_detail: str = "low", api_key: Optional[str] = None, api_base: Optional[str] = None, api_version: Optional[str] = None, messages_to_prompt: Optional[Callable] = None, completion_to_prompt: Optional[Callable] = None, callback_manager: Optional[CallbackManager] = None, default_headers: Optional[Dict[str, str]] = None, http_client: Optional[httpx.Client] = None, **kwargs: Any, ) -> None: self._messages_to_prompt = messages_to_prompt or generic_messages_to_prompt self._completion_to_prompt = completion_to_prompt or (lambda x: x) api_key, api_base, api_version = resolve_openai_credentials( api_key=api_key, api_base=api_base, api_version=api_version, ) super().__init__( model=model, temperature=temperature, max_new_tokens=max_new_tokens, additional_kwargs=additional_kwargs or {}, context_window=context_window, image_detail=image_detail, max_retries=max_retries, timeout=timeout, api_key=api_key, api_base=api_base, api_version=api_version, callback_manager=callback_manager, default_headers=default_headers, **kwargs, ) self._http_client = http_client self._client, self._aclient = self._get_clients(**kwargs) def _get_clients(self, **kwargs: Any) -> Tuple[SyncOpenAI, AsyncOpenAI]: client = SyncOpenAI(**self._get_credential_kwargs()) aclient = AsyncOpenAI(**self._get_credential_kwargs()) return client, aclient @classmethod def class_name(cls) -> str: return "openai_multi_modal_llm" @property def metadata(self) -> MultiModalLLMMetadata: """Multi Modal LLM metadata.""" return MultiModalLLMMetadata( num_output=self.max_new_tokens or DEFAULT_NUM_OUTPUTS, model_name=self.model, ) def _get_credential_kwargs(self, **kwargs: Any) -> Dict[str, Any]: return { "api_key": self.api_key, "base_url": self.api_base, "max_retries": self.max_retries, "default_headers": self.default_headers, "http_client": self._http_client, "timeout": self.timeout, **kwargs, } def _get_multi_modal_chat_messages( self, prompt: str, role: str, image_documents: Sequence[ImageDocument], **kwargs: Any, ) -> List[ChatCompletionMessageParam]: return to_openai_message_dicts( [ generate_openai_multi_modal_chat_message( prompt=prompt, role=role, image_documents=image_documents, image_detail=self.image_detail, ) ] ) # Model Params for OpenAI GPT4V model. def _get_model_kwargs(self, **kwargs: Any) -> Dict[str, Any]: if self.model not in GPT4V_MODELS: raise ValueError( f"Invalid model {self.model}. " f"Available models are: {list(GPT4V_MODELS.keys())}" ) base_kwargs = {"model": self.model, "temperature": self.temperature, **kwargs} if self.max_new_tokens is not None: # If max_tokens is None, don't include in the payload: # https://platform.openai.com/docs/api-reference/chat # https://platform.openai.com/docs/api-reference/completions base_kwargs["max_tokens"] = self.max_new_tokens return {**base_kwargs, **self.additional_kwargs} def _get_response_token_counts(self, raw_response: Any) -> dict: """Get the token usage reported by the response.""" if not isinstance(raw_response, dict): return {} usage = raw_response.get("usage", {}) # NOTE: other model providers that use the OpenAI client may not report usage if usage is None: return {} return { "prompt_tokens": usage.get("prompt_tokens", 0), "completion_tokens": usage.get("completion_tokens", 0), "total_tokens": usage.get("total_tokens", 0), } def _complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_model_kwargs(**kwargs) message_dict = self._get_multi_modal_chat_messages( prompt=prompt, role=MessageRole.USER, image_documents=image_documents ) response = self._client.chat.completions.create( messages=message_dict, stream=False, **all_kwargs, ) return CompletionResponse( text=response.choices[0].message.content, raw=response, additional_kwargs=self._get_response_token_counts(response), ) def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse: all_kwargs = self._get_model_kwargs(**kwargs) message_dicts = to_openai_message_dicts(messages) response = self._client.chat.completions.create( messages=message_dicts, stream=False, **all_kwargs, ) openai_message = response.choices[0].message message = from_openai_message(openai_message) return ChatResponse( message=message, raw=response, additional_kwargs=self._get_response_token_counts(response), ) def _stream_complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponseGen: all_kwargs = self._get_model_kwargs(**kwargs) message_dict = self._get_multi_modal_chat_messages( prompt=prompt, role=MessageRole.USER, image_documents=image_documents ) def gen() -> CompletionResponseGen: text = "" for response in self._client.chat.completions.create( messages=message_dict, stream=True, **all_kwargs, ): response = cast(ChatCompletionChunk, response) if len(response.choices) > 0: delta = response.choices[0].delta else: delta = ChoiceDelta() # update using deltas content_delta = delta.content or "" text += content_delta yield CompletionResponse( delta=content_delta, text=text, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() def _stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseGen: message_dicts = to_openai_message_dicts(messages) def gen() -> ChatResponseGen: content = "" tool_calls: List[ChoiceDeltaToolCall] = [] is_function = False for response in self._client.chat.completions.create( messages=message_dicts, stream=True, **self._get_model_kwargs(**kwargs), ): response = cast(ChatCompletionChunk, response) if len(response.choices) > 0: delta = response.choices[0].delta else: delta = ChoiceDelta() # check if this chunk is the start of a function call if delta.tool_calls: is_function = True # update using deltas role = delta.role or MessageRole.ASSISTANT content_delta = delta.content or "" content += content_delta additional_kwargs = {} if is_function: tool_calls = self._update_tool_calls(tool_calls, delta.tool_calls) additional_kwargs["tool_calls"] = tool_calls yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() def complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponse: return self._complete(prompt, image_documents, **kwargs) def stream_complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponseGen: return self._stream_complete(prompt, image_documents, **kwargs) def chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: return self._chat(messages, **kwargs) def stream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseGen: return self._stream_chat(messages, **kwargs) # ===== Async Endpoints ===== async def _acomplete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponse: all_kwargs = self._get_model_kwargs(**kwargs) message_dict = self._get_multi_modal_chat_messages( prompt=prompt, role=MessageRole.USER, image_documents=image_documents ) response = await self._aclient.chat.completions.create( messages=message_dict, stream=False, **all_kwargs, ) return CompletionResponse( text=response.choices[0].message.content, raw=response, additional_kwargs=self._get_response_token_counts(response), ) async def acomplete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponse: return await self._acomplete(prompt, image_documents, **kwargs) async def _astream_complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponseAsyncGen: all_kwargs = self._get_model_kwargs(**kwargs) message_dict = self._get_multi_modal_chat_messages( prompt=prompt, role=MessageRole.USER, image_documents=image_documents ) async def gen() -> CompletionResponseAsyncGen: text = "" async for response in await self._aclient.chat.completions.create( messages=message_dict, stream=True, **all_kwargs, ): response = cast(ChatCompletionChunk, response) if len(response.choices) > 0: delta = response.choices[0].delta else: delta = ChoiceDelta() # update using deltas content_delta = delta.content or "" text += content_delta yield CompletionResponse( delta=content_delta, text=text, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() async def _achat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponse: all_kwargs = self._get_model_kwargs(**kwargs) message_dicts = to_openai_message_dicts(messages) response = await self._aclient.chat.completions.create( messages=message_dicts, stream=False, **all_kwargs, ) openai_message = response.choices[0].message message = from_openai_message(openai_message) return ChatResponse( message=message, raw=response, additional_kwargs=self._get_response_token_counts(response), ) async def _astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any ) -> ChatResponseAsyncGen: message_dicts = to_openai_message_dicts(messages) async def gen() -> ChatResponseAsyncGen: content = "" tool_calls: List[ChoiceDeltaToolCall] = [] is_function = False async for response in await self._aclient.chat.completions.create( messages=message_dicts, stream=True, **self._get_model_kwargs(**kwargs), ): response = cast(ChatCompletionChunk, response) if len(response.choices) > 0: delta = response.choices[0].delta else: delta = ChoiceDelta() # check if this chunk is the start of a function call if delta.tool_calls: is_function = True # update using deltas role = delta.role or MessageRole.ASSISTANT content_delta = delta.content or "" content += content_delta additional_kwargs = {} if is_function: tool_calls = self._update_tool_calls(tool_calls, delta.tool_calls) additional_kwargs["tool_calls"] = tool_calls yield ChatResponse( message=ChatMessage( role=role, content=content, additional_kwargs=additional_kwargs, ), delta=content_delta, raw=response, additional_kwargs=self._get_response_token_counts(response), ) return gen() async def astream_complete( self, prompt: str, image_documents: Sequence[ImageDocument], **kwargs: Any ) -> CompletionResponseAsyncGen: return await self._astream_complete(prompt, image_documents, **kwargs) async def achat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponse: return await self._achat(messages, **kwargs) async def astream_chat( self, messages: Sequence[ChatMessage], **kwargs: Any, ) -> ChatResponseAsyncGen: return await self._astream_chat(messages, **kwargs)
[ "llama_index.legacy.llms.openai_utils.from_openai_message", "llama_index.legacy.multi_modal_llms.MultiModalLLMMetadata", "llama_index.legacy.multi_modal_llms.openai_utils.generate_openai_multi_modal_chat_message", "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.core.llms.types.ChatMessage", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.llms.openai_utils.to_openai_message_dicts", "llama_index.legacy.multi_modal_llms.openai_utils.GPT4V_MODELS.keys", "llama_index.legacy.llms.openai_utils.resolve_openai_credentials" ]
[((1407, 1469), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Multi-Modal model to use from OpenAI."""'}), "(description='The Multi-Modal model to use from OpenAI.')\n", (1412, 1469), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1495, 1552), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1500, 1552), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1589, 1713), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '""" The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt"""', 'gt': '(0)'}), "(description=\n ' The maximum numbers of tokens to generate, ignoring the number of tokens in the prompt'\n , gt=0)\n", (1594, 1713), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1763, 1841), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(description='The maximum number of context tokens for the model.', gt=0)\n", (1768, 1841), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1889, 1985), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The level of details for image in API calls. Can be low, high, or auto"""'}), "(description=\n 'The level of details for image in API calls. Can be low, high, or auto')\n", (1894, 1985), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2018, 2083), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(3)', 'description': '"""Maximum number of retries."""', 'gte': '(0)'}), "(default=3, description='Maximum number of retries.', gte=0)\n", (2023, 2083), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2136, 2225), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(60.0)', 'description': '"""The timeout, in seconds, for API requests."""', 'gte': '(0)'}), "(default=60.0, description=\n 'The timeout, in seconds, for API requests.', gte=0)\n", (2141, 2225), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2271, 2339), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The OpenAI API key."""', 'exclude': '(True)'}), "(default=None, description='The OpenAI API key.', exclude=True)\n", (2276, 2339), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2360, 2423), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The base URL for OpenAI API."""'}), "(default=None, description='The base URL for OpenAI API.')\n", (2365, 2423), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2447, 2499), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The API version for OpenAI API."""'}), "(description='The API version for OpenAI API.')\n", (2452, 2499), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2540, 2625), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the OpenAI API."""'}), "(default_factory=dict, description='Additional kwargs for the OpenAI API.'\n )\n", (2545, 2625), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2673, 2745), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The default headers for API requests."""'}), "(default=None, description='The default headers for API requests.')\n", (2678, 2745), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2797, 2810), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2808, 2810), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2849, 2862), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2860, 2862), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2889, 2902), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2900, 2902), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2931, 2944), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2942, 2944), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2988, 3001), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2999, 3001), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4037, 4129), 'llama_index.legacy.llms.openai_utils.resolve_openai_credentials', 'resolve_openai_credentials', ([], {'api_key': 'api_key', 'api_base': 'api_base', 'api_version': 'api_version'}), '(api_key=api_key, api_base=api_base, api_version=\n api_version)\n', (4063, 4129), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((5276, 5379), 'llama_index.legacy.multi_modal_llms.MultiModalLLMMetadata', 'MultiModalLLMMetadata', ([], {'num_output': '(self.max_new_tokens or DEFAULT_NUM_OUTPUTS)', 'model_name': 'self.model'}), '(num_output=self.max_new_tokens or DEFAULT_NUM_OUTPUTS,\n model_name=self.model)\n', (5297, 5379), False, 'from llama_index.legacy.multi_modal_llms import MultiModalLLM, MultiModalLLMMetadata\n'), ((8542, 8575), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (8565, 8575), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((8802, 8837), 'llama_index.legacy.llms.openai_utils.from_openai_message', 'from_openai_message', (['openai_message'], {}), '(openai_message)\n', (8821, 8837), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((10361, 10394), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (10384, 10394), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15114, 15147), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (15137, 15147), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15381, 15416), 'llama_index.legacy.llms.openai_utils.from_openai_message', 'from_openai_message', (['openai_message'], {}), '(openai_message)\n', (15400, 15416), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((15731, 15764), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (15754, 15764), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message, resolve_openai_credentials, to_openai_message_dicts\n'), ((6070, 6205), 'llama_index.legacy.multi_modal_llms.openai_utils.generate_openai_multi_modal_chat_message', 'generate_openai_multi_modal_chat_message', ([], {'prompt': 'prompt', 'role': 'role', 'image_documents': 'image_documents', 'image_detail': 'self.image_detail'}), '(prompt=prompt, role=role,\n image_documents=image_documents, image_detail=self.image_detail)\n', (6110, 6205), False, 'from llama_index.legacy.multi_modal_llms.openai_utils import GPT4V_MODELS, generate_openai_multi_modal_chat_message\n'), ((9628, 9663), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (9632, 9663), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((10776, 10811), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (10780, 10811), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((14330, 14365), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (14334, 14365), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((16170, 16205), 'typing.cast', 'cast', (['ChatCompletionChunk', 'response'], {}), '(ChatCompletionChunk, response)\n', (16174, 16205), False, 'from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, cast\n'), ((9814, 9827), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (9825, 9827), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((10962, 10975), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (10973, 10975), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((14516, 14529), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (14527, 14529), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((16356, 16369), 'openai.types.chat.chat_completion_chunk.ChoiceDelta', 'ChoiceDelta', ([], {}), '()\n', (16367, 16369), False, 'from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta, ChoiceDeltaToolCall\n'), ((6602, 6621), 'llama_index.legacy.multi_modal_llms.openai_utils.GPT4V_MODELS.keys', 'GPT4V_MODELS.keys', ([], {}), '()\n', (6619, 6621), False, 'from llama_index.legacy.multi_modal_llms.openai_utils import GPT4V_MODELS, generate_openai_multi_modal_chat_message\n'), ((11603, 11679), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (11614, 11679), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((16997, 17073), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (17008, 17073), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n')]
import os from dotenv import load_dotenv, find_dotenv import numpy as np from trulens_eval import ( Feedback, TruLlama, OpenAI ) from trulens_eval.feedback import Groundedness import nest_asyncio nest_asyncio.apply() def get_openai_api_key(): _ = load_dotenv(find_dotenv()) return os.getenv("OPENAI_API_KEY") def get_hf_api_key(): _ = load_dotenv(find_dotenv()) return os.getenv("HUGGINGFACE_API_KEY") openai = OpenAI() qa_relevance = ( Feedback(openai.relevance_with_cot_reasons, name="Answer Relevance") .on_input_output() ) qs_relevance = ( Feedback(openai.relevance_with_cot_reasons, name = "Context Relevance") .on_input() .on(TruLlama.select_source_nodes().node.text) .aggregate(np.mean) ) #grounded = Groundedness(groundedness_provider=openai, summarize_provider=openai) grounded = Groundedness(groundedness_provider=openai) groundedness = ( Feedback(grounded.groundedness_measure_with_cot_reasons, name="Groundedness") .on(TruLlama.select_source_nodes().node.text) .on_output() .aggregate(grounded.grounded_statements_aggregator) ) feedbacks = [qa_relevance, qs_relevance, groundedness] def get_trulens_recorder(query_engine, feedbacks, app_id): tru_recorder = TruLlama( query_engine, app_id=app_id, feedbacks=feedbacks ) return tru_recorder def get_prebuilt_trulens_recorder(query_engine, app_id): tru_recorder = TruLlama( query_engine, app_id=app_id, feedbacks=feedbacks ) return tru_recorder from llama_index import ServiceContext, VectorStoreIndex, StorageContext from llama_index.node_parser import SentenceWindowNodeParser from llama_index.indices.postprocessor import MetadataReplacementPostProcessor from llama_index.indices.postprocessor import SentenceTransformerRerank from llama_index import load_index_from_storage import os def build_sentence_window_index( document, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="sentence_index" ): # create the sentence window node parser w/ default settings node_parser = SentenceWindowNodeParser.from_defaults( window_size=3, window_metadata_key="window", original_text_metadata_key="original_text", ) sentence_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model, node_parser=node_parser, ) if not os.path.exists(save_dir): sentence_index = VectorStoreIndex.from_documents( [document], service_context=sentence_context ) sentence_index.storage_context.persist(persist_dir=save_dir) else: sentence_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=save_dir), service_context=sentence_context, ) return sentence_index def get_sentence_window_query_engine( sentence_index, similarity_top_k=6, rerank_top_n=2, ): # define postprocessors postproc = MetadataReplacementPostProcessor(target_metadata_key="window") rerank = SentenceTransformerRerank( top_n=rerank_top_n, model="BAAI/bge-reranker-base" ) sentence_window_engine = sentence_index.as_query_engine( similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank] ) return sentence_window_engine from llama_index.node_parser import HierarchicalNodeParser from llama_index.node_parser import get_leaf_nodes from llama_index import StorageContext from llama_index.retrievers import AutoMergingRetriever from llama_index.indices.postprocessor import SentenceTransformerRerank from llama_index.query_engine import RetrieverQueryEngine def build_automerging_index( documents, llm, embed_model="local:BAAI/bge-small-en-v1.5", save_dir="merging_index", chunk_sizes=None, ): chunk_sizes = chunk_sizes or [2048, 512, 128] node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes) nodes = node_parser.get_nodes_from_documents(documents) leaf_nodes = get_leaf_nodes(nodes) merging_context = ServiceContext.from_defaults( llm=llm, embed_model=embed_model, ) storage_context = StorageContext.from_defaults() storage_context.docstore.add_documents(nodes) if not os.path.exists(save_dir): automerging_index = VectorStoreIndex( leaf_nodes, storage_context=storage_context, service_context=merging_context ) automerging_index.storage_context.persist(persist_dir=save_dir) else: automerging_index = load_index_from_storage( StorageContext.from_defaults(persist_dir=save_dir), service_context=merging_context, ) return automerging_index def get_automerging_query_engine( automerging_index, similarity_top_k=12, rerank_top_n=2, ): base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k) retriever = AutoMergingRetriever( base_retriever, automerging_index.storage_context, verbose=True ) rerank = SentenceTransformerRerank( top_n=rerank_top_n, model="BAAI/bge-reranker-base" ) auto_merging_engine = RetrieverQueryEngine.from_args( retriever, node_postprocessors=[rerank] ) return auto_merging_engine
[ "llama_index.VectorStoreIndex.from_documents", "llama_index.retrievers.AutoMergingRetriever", "llama_index.node_parser.get_leaf_nodes", "llama_index.ServiceContext.from_defaults", "llama_index.StorageContext.from_defaults", "llama_index.node_parser.SentenceWindowNodeParser.from_defaults", "llama_index.VectorStoreIndex", "llama_index.node_parser.HierarchicalNodeParser.from_defaults", "llama_index.query_engine.RetrieverQueryEngine.from_args", "llama_index.indices.postprocessor.MetadataReplacementPostProcessor", "llama_index.indices.postprocessor.SentenceTransformerRerank" ]
[((211, 231), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (229, 231), False, 'import nest_asyncio\n'), ((449, 457), 'trulens_eval.OpenAI', 'OpenAI', ([], {}), '()\n', (455, 457), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((854, 896), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'openai'}), '(groundedness_provider=openai)\n', (866, 896), False, 'from trulens_eval.feedback import Groundedness\n'), ((307, 334), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (316, 334), False, 'import os\n'), ((406, 438), 'os.getenv', 'os.getenv', (['"""HUGGINGFACE_API_KEY"""'], {}), "('HUGGINGFACE_API_KEY')\n", (415, 438), False, 'import os\n'), ((1269, 1327), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': 'feedbacks'}), '(query_engine, app_id=app_id, feedbacks=feedbacks)\n', (1277, 1327), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((1459, 1517), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': 'feedbacks'}), '(query_engine, app_id=app_id, feedbacks=feedbacks)\n', (1467, 1517), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((2130, 2262), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(3)', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=3, window_metadata_key=\n 'window', original_text_metadata_key='original_text')\n", (2168, 2262), False, 'from llama_index.node_parser import SentenceWindowNodeParser\n'), ((2312, 2404), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (2340, 2404), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((3019, 3081), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (3051, 3081), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor\n'), ((3095, 3172), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (3120, 3172), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((3935, 3996), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3971, 3996), False, 'from llama_index.node_parser import HierarchicalNodeParser\n'), ((4074, 4095), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (4088, 4095), False, 'from llama_index.node_parser import get_leaf_nodes\n'), ((4118, 4180), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (4146, 4180), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((4226, 4256), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (4254, 4256), False, 'from llama_index import StorageContext\n'), ((4983, 5072), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (5003, 5072), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((5096, 5173), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (5121, 5173), False, 'from llama_index.indices.postprocessor import SentenceTransformerRerank\n'), ((5214, 5285), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5244, 5285), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((280, 293), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (291, 293), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((379, 392), 'dotenv.find_dotenv', 'find_dotenv', ([], {}), '()\n', (390, 392), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((480, 548), 'trulens_eval.Feedback', 'Feedback', (['openai.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(openai.relevance_with_cot_reasons, name='Answer Relevance')\n", (488, 548), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((2442, 2466), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2456, 2466), False, 'import os\n'), ((2493, 2570), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['[document]'], {'service_context': 'sentence_context'}), '([document], service_context=sentence_context)\n', (2524, 2570), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((4319, 4343), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4333, 4343), False, 'import os\n'), ((4373, 4471), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4389, 4471), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext\n'), ((2734, 2784), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2762, 2784), False, 'from llama_index import StorageContext\n'), ((4637, 4687), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4665, 4687), False, 'from llama_index import StorageContext\n'), ((692, 722), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (720, 722), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((596, 665), 'trulens_eval.Feedback', 'Feedback', (['openai.relevance_with_cot_reasons'], {'name': '"""Context Relevance"""'}), "(openai.relevance_with_cot_reasons, name='Context Relevance')\n", (604, 665), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((919, 996), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (927, 996), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n'), ((1009, 1039), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (1037, 1039), False, 'from trulens_eval import Feedback, TruLlama, OpenAI\n')]
"""SQL Structured Store.""" from collections import defaultdict from enum import Enum from typing import Any, Optional, Sequence, Union from sqlalchemy import Table from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.data_structs.table import SQLStructTable from llama_index.legacy.indices.common.struct_store.schema import SQLContextContainer from llama_index.legacy.indices.common.struct_store.sql import ( SQLStructDatapointExtractor, ) from llama_index.legacy.indices.struct_store.base import BaseStructStoreIndex from llama_index.legacy.indices.struct_store.container_builder import ( SQLContextContainerBuilder, ) from llama_index.legacy.schema import BaseNode from llama_index.legacy.service_context import ServiceContext from llama_index.legacy.utilities.sql_wrapper import SQLDatabase class SQLQueryMode(str, Enum): SQL = "sql" NL = "nl" class SQLStructStoreIndex(BaseStructStoreIndex[SQLStructTable]): """SQL Struct Store Index. The SQLStructStoreIndex is an index that uses a SQL database under the hood. During index construction, the data can be inferred from unstructured documents given a schema extract prompt, or it can be pre-loaded in the database. During query time, the user can either specify a raw SQL query or a natural language query to retrieve their data. NOTE: this is deprecated. Args: documents (Optional[Sequence[DOCUMENTS_INPUT]]): Documents to index. NOTE: in the SQL index, this is an optional field. sql_database (Optional[SQLDatabase]): SQL database to use, including table names to specify. See :ref:`Ref-Struct-Store` for more details. table_name (Optional[str]): Name of the table to use for extracting data. Either table_name or table must be specified. table (Optional[Table]): SQLAlchemy Table object to use. Specifying the Table object explicitly, instead of the table name, allows you to pass in a view. Either table_name or table must be specified. sql_context_container (Optional[SQLContextContainer]): SQL context container. an be generated from a SQLContextContainerBuilder. See :ref:`Ref-Struct-Store` for more details. """ index_struct_cls = SQLStructTable def __init__( self, nodes: Optional[Sequence[BaseNode]] = None, index_struct: Optional[SQLStructTable] = None, service_context: Optional[ServiceContext] = None, sql_database: Optional[SQLDatabase] = None, table_name: Optional[str] = None, table: Optional[Table] = None, ref_doc_id_column: Optional[str] = None, sql_context_container: Optional[SQLContextContainer] = None, **kwargs: Any, ) -> None: """Initialize params.""" if sql_database is None: raise ValueError("sql_database must be specified") self.sql_database = sql_database # needed here for data extractor self._ref_doc_id_column = ref_doc_id_column self._table_name = table_name self._table = table # if documents aren't specified, pass in a blank [] if index_struct is None: nodes = nodes or [] super().__init__( nodes=nodes, index_struct=index_struct, service_context=service_context, **kwargs, ) # TODO: index_struct context_dict is deprecated, # we're migrating storage of information to here. if sql_context_container is None: container_builder = SQLContextContainerBuilder(sql_database) sql_context_container = container_builder.build_context_container() self.sql_context_container = sql_context_container @property def ref_doc_id_column(self) -> Optional[str]: return self._ref_doc_id_column def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> SQLStructTable: """Build index from nodes.""" index_struct = self.index_struct_cls() if len(nodes) == 0: return index_struct else: data_extractor = SQLStructDatapointExtractor( self._service_context.llm, self.schema_extract_prompt, self.output_parser, self.sql_database, table_name=self._table_name, table=self._table, ref_doc_id_column=self._ref_doc_id_column, ) # group nodes by ids source_to_node = defaultdict(list) for node in nodes: source_to_node[node.ref_doc_id].append(node) for node_set in source_to_node.values(): data_extractor.insert_datapoint_from_nodes(node_set) return index_struct def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None: """Insert a document.""" data_extractor = SQLStructDatapointExtractor( self._service_context.llm, self.schema_extract_prompt, self.output_parser, self.sql_database, table_name=self._table_name, table=self._table, ref_doc_id_column=self._ref_doc_id_column, ) data_extractor.insert_datapoint_from_nodes(nodes) def as_retriever(self, **kwargs: Any) -> BaseRetriever: raise NotImplementedError("Not supported") def as_query_engine( self, query_mode: Union[str, SQLQueryMode] = SQLQueryMode.NL, **kwargs: Any ) -> BaseQueryEngine: # NOTE: lazy import from llama_index.legacy.indices.struct_store.sql_query import ( NLStructStoreQueryEngine, SQLStructStoreQueryEngine, ) if query_mode == SQLQueryMode.NL: return NLStructStoreQueryEngine(self, **kwargs) elif query_mode == SQLQueryMode.SQL: return SQLStructStoreQueryEngine(self, **kwargs) else: raise ValueError(f"Unknown query mode: {query_mode}") GPTSQLStructStoreIndex = SQLStructStoreIndex
[ "llama_index.legacy.indices.struct_store.container_builder.SQLContextContainerBuilder", "llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor", "llama_index.legacy.indices.struct_store.sql_query.NLStructStoreQueryEngine", "llama_index.legacy.indices.struct_store.sql_query.SQLStructStoreQueryEngine" ]
[((5106, 5332), 'llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor', 'SQLStructDatapointExtractor', (['self._service_context.llm', 'self.schema_extract_prompt', 'self.output_parser', 'self.sql_database'], {'table_name': 'self._table_name', 'table': 'self._table', 'ref_doc_id_column': 'self._ref_doc_id_column'}), '(self._service_context.llm, self.\n schema_extract_prompt, self.output_parser, self.sql_database,\n table_name=self._table_name, table=self._table, ref_doc_id_column=self.\n _ref_doc_id_column)\n', (5133, 5332), False, 'from llama_index.legacy.indices.common.struct_store.sql import SQLStructDatapointExtractor\n'), ((3747, 3787), 'llama_index.legacy.indices.struct_store.container_builder.SQLContextContainerBuilder', 'SQLContextContainerBuilder', (['sql_database'], {}), '(sql_database)\n', (3773, 3787), False, 'from llama_index.legacy.indices.struct_store.container_builder import SQLContextContainerBuilder\n'), ((4304, 4530), 'llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor', 'SQLStructDatapointExtractor', (['self._service_context.llm', 'self.schema_extract_prompt', 'self.output_parser', 'self.sql_database'], {'table_name': 'self._table_name', 'table': 'self._table', 'ref_doc_id_column': 'self._ref_doc_id_column'}), '(self._service_context.llm, self.\n schema_extract_prompt, self.output_parser, self.sql_database,\n table_name=self._table_name, table=self._table, ref_doc_id_column=self.\n _ref_doc_id_column)\n', (4331, 4530), False, 'from llama_index.legacy.indices.common.struct_store.sql import SQLStructDatapointExtractor\n'), ((4706, 4723), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4717, 4723), False, 'from collections import defaultdict\n'), ((5969, 6009), 'llama_index.legacy.indices.struct_store.sql_query.NLStructStoreQueryEngine', 'NLStructStoreQueryEngine', (['self'], {}), '(self, **kwargs)\n', (5993, 6009), False, 'from llama_index.legacy.indices.struct_store.sql_query import NLStructStoreQueryEngine, SQLStructStoreQueryEngine\n'), ((6074, 6115), 'llama_index.legacy.indices.struct_store.sql_query.SQLStructStoreQueryEngine', 'SQLStructStoreQueryEngine', (['self'], {}), '(self, **kwargs)\n', (6099, 6115), False, 'from llama_index.legacy.indices.struct_store.sql_query import NLStructStoreQueryEngine, SQLStructStoreQueryEngine\n')]
"""SQL Structured Store.""" from collections import defaultdict from enum import Enum from typing import Any, Optional, Sequence, Union from sqlalchemy import Table from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.data_structs.table import SQLStructTable from llama_index.legacy.indices.common.struct_store.schema import SQLContextContainer from llama_index.legacy.indices.common.struct_store.sql import ( SQLStructDatapointExtractor, ) from llama_index.legacy.indices.struct_store.base import BaseStructStoreIndex from llama_index.legacy.indices.struct_store.container_builder import ( SQLContextContainerBuilder, ) from llama_index.legacy.schema import BaseNode from llama_index.legacy.service_context import ServiceContext from llama_index.legacy.utilities.sql_wrapper import SQLDatabase class SQLQueryMode(str, Enum): SQL = "sql" NL = "nl" class SQLStructStoreIndex(BaseStructStoreIndex[SQLStructTable]): """SQL Struct Store Index. The SQLStructStoreIndex is an index that uses a SQL database under the hood. During index construction, the data can be inferred from unstructured documents given a schema extract prompt, or it can be pre-loaded in the database. During query time, the user can either specify a raw SQL query or a natural language query to retrieve their data. NOTE: this is deprecated. Args: documents (Optional[Sequence[DOCUMENTS_INPUT]]): Documents to index. NOTE: in the SQL index, this is an optional field. sql_database (Optional[SQLDatabase]): SQL database to use, including table names to specify. See :ref:`Ref-Struct-Store` for more details. table_name (Optional[str]): Name of the table to use for extracting data. Either table_name or table must be specified. table (Optional[Table]): SQLAlchemy Table object to use. Specifying the Table object explicitly, instead of the table name, allows you to pass in a view. Either table_name or table must be specified. sql_context_container (Optional[SQLContextContainer]): SQL context container. an be generated from a SQLContextContainerBuilder. See :ref:`Ref-Struct-Store` for more details. """ index_struct_cls = SQLStructTable def __init__( self, nodes: Optional[Sequence[BaseNode]] = None, index_struct: Optional[SQLStructTable] = None, service_context: Optional[ServiceContext] = None, sql_database: Optional[SQLDatabase] = None, table_name: Optional[str] = None, table: Optional[Table] = None, ref_doc_id_column: Optional[str] = None, sql_context_container: Optional[SQLContextContainer] = None, **kwargs: Any, ) -> None: """Initialize params.""" if sql_database is None: raise ValueError("sql_database must be specified") self.sql_database = sql_database # needed here for data extractor self._ref_doc_id_column = ref_doc_id_column self._table_name = table_name self._table = table # if documents aren't specified, pass in a blank [] if index_struct is None: nodes = nodes or [] super().__init__( nodes=nodes, index_struct=index_struct, service_context=service_context, **kwargs, ) # TODO: index_struct context_dict is deprecated, # we're migrating storage of information to here. if sql_context_container is None: container_builder = SQLContextContainerBuilder(sql_database) sql_context_container = container_builder.build_context_container() self.sql_context_container = sql_context_container @property def ref_doc_id_column(self) -> Optional[str]: return self._ref_doc_id_column def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> SQLStructTable: """Build index from nodes.""" index_struct = self.index_struct_cls() if len(nodes) == 0: return index_struct else: data_extractor = SQLStructDatapointExtractor( self._service_context.llm, self.schema_extract_prompt, self.output_parser, self.sql_database, table_name=self._table_name, table=self._table, ref_doc_id_column=self._ref_doc_id_column, ) # group nodes by ids source_to_node = defaultdict(list) for node in nodes: source_to_node[node.ref_doc_id].append(node) for node_set in source_to_node.values(): data_extractor.insert_datapoint_from_nodes(node_set) return index_struct def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None: """Insert a document.""" data_extractor = SQLStructDatapointExtractor( self._service_context.llm, self.schema_extract_prompt, self.output_parser, self.sql_database, table_name=self._table_name, table=self._table, ref_doc_id_column=self._ref_doc_id_column, ) data_extractor.insert_datapoint_from_nodes(nodes) def as_retriever(self, **kwargs: Any) -> BaseRetriever: raise NotImplementedError("Not supported") def as_query_engine( self, query_mode: Union[str, SQLQueryMode] = SQLQueryMode.NL, **kwargs: Any ) -> BaseQueryEngine: # NOTE: lazy import from llama_index.legacy.indices.struct_store.sql_query import ( NLStructStoreQueryEngine, SQLStructStoreQueryEngine, ) if query_mode == SQLQueryMode.NL: return NLStructStoreQueryEngine(self, **kwargs) elif query_mode == SQLQueryMode.SQL: return SQLStructStoreQueryEngine(self, **kwargs) else: raise ValueError(f"Unknown query mode: {query_mode}") GPTSQLStructStoreIndex = SQLStructStoreIndex
[ "llama_index.legacy.indices.struct_store.container_builder.SQLContextContainerBuilder", "llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor", "llama_index.legacy.indices.struct_store.sql_query.NLStructStoreQueryEngine", "llama_index.legacy.indices.struct_store.sql_query.SQLStructStoreQueryEngine" ]
[((5106, 5332), 'llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor', 'SQLStructDatapointExtractor', (['self._service_context.llm', 'self.schema_extract_prompt', 'self.output_parser', 'self.sql_database'], {'table_name': 'self._table_name', 'table': 'self._table', 'ref_doc_id_column': 'self._ref_doc_id_column'}), '(self._service_context.llm, self.\n schema_extract_prompt, self.output_parser, self.sql_database,\n table_name=self._table_name, table=self._table, ref_doc_id_column=self.\n _ref_doc_id_column)\n', (5133, 5332), False, 'from llama_index.legacy.indices.common.struct_store.sql import SQLStructDatapointExtractor\n'), ((3747, 3787), 'llama_index.legacy.indices.struct_store.container_builder.SQLContextContainerBuilder', 'SQLContextContainerBuilder', (['sql_database'], {}), '(sql_database)\n', (3773, 3787), False, 'from llama_index.legacy.indices.struct_store.container_builder import SQLContextContainerBuilder\n'), ((4304, 4530), 'llama_index.legacy.indices.common.struct_store.sql.SQLStructDatapointExtractor', 'SQLStructDatapointExtractor', (['self._service_context.llm', 'self.schema_extract_prompt', 'self.output_parser', 'self.sql_database'], {'table_name': 'self._table_name', 'table': 'self._table', 'ref_doc_id_column': 'self._ref_doc_id_column'}), '(self._service_context.llm, self.\n schema_extract_prompt, self.output_parser, self.sql_database,\n table_name=self._table_name, table=self._table, ref_doc_id_column=self.\n _ref_doc_id_column)\n', (4331, 4530), False, 'from llama_index.legacy.indices.common.struct_store.sql import SQLStructDatapointExtractor\n'), ((4706, 4723), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4717, 4723), False, 'from collections import defaultdict\n'), ((5969, 6009), 'llama_index.legacy.indices.struct_store.sql_query.NLStructStoreQueryEngine', 'NLStructStoreQueryEngine', (['self'], {}), '(self, **kwargs)\n', (5993, 6009), False, 'from llama_index.legacy.indices.struct_store.sql_query import NLStructStoreQueryEngine, SQLStructStoreQueryEngine\n'), ((6074, 6115), 'llama_index.legacy.indices.struct_store.sql_query.SQLStructStoreQueryEngine', 'SQLStructStoreQueryEngine', (['self'], {}), '(self, **kwargs)\n', (6099, 6115), False, 'from llama_index.legacy.indices.struct_store.sql_query import NLStructStoreQueryEngine, SQLStructStoreQueryEngine\n')]
from llama_index.core.tools import FunctionTool import os note_file = os.path.join("data", "notes.txt") def save_note(note): if not os.path.exists(note_file): open(note_file, "w") with open(note_file, "a") as f: f.writelines([note + "\n"]) return "note saved" note_engine = FunctionTool.from_defaults( fn=save_note, name="note_saver", description="this tool can save a text based note to a file for the user", )
[ "llama_index.core.tools.FunctionTool.from_defaults" ]
[((71, 104), 'os.path.join', 'os.path.join', (['"""data"""', '"""notes.txt"""'], {}), "('data', 'notes.txt')\n", (83, 104), False, 'import os\n'), ((309, 448), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'save_note', 'name': '"""note_saver"""', 'description': '"""this tool can save a text based note to a file for the user"""'}), "(fn=save_note, name='note_saver', description=\n 'this tool can save a text based note to a file for the user')\n", (335, 448), False, 'from llama_index.core.tools import FunctionTool\n'), ((139, 164), 'os.path.exists', 'os.path.exists', (['note_file'], {}), '(note_file)\n', (153, 164), False, 'import os\n')]
from llama_index.core.tools import FunctionTool import os note_file = os.path.join("data", "notes.txt") def save_note(note): if not os.path.exists(note_file): open(note_file, "w") with open(note_file, "a") as f: f.writelines([note + "\n"]) return "note saved" note_engine = FunctionTool.from_defaults( fn=save_note, name="note_saver", description="this tool can save a text based note to a file for the user", )
[ "llama_index.core.tools.FunctionTool.from_defaults" ]
[((71, 104), 'os.path.join', 'os.path.join', (['"""data"""', '"""notes.txt"""'], {}), "('data', 'notes.txt')\n", (83, 104), False, 'import os\n'), ((309, 448), 'llama_index.core.tools.FunctionTool.from_defaults', 'FunctionTool.from_defaults', ([], {'fn': 'save_note', 'name': '"""note_saver"""', 'description': '"""this tool can save a text based note to a file for the user"""'}), "(fn=save_note, name='note_saver', description=\n 'this tool can save a text based note to a file for the user')\n", (335, 448), False, 'from llama_index.core.tools import FunctionTool\n'), ((139, 164), 'os.path.exists', 'os.path.exists', (['note_file'], {}), '(note_file)\n', (153, 164), False, 'import os\n')]
from llama_index.core.node_parser import SentenceWindowNodeParser from llama_index.readers.file import FlatReader from pathlib import Path reader = FlatReader() document = reader.load_data(Path("files/sample_document1.txt")) parser = SentenceWindowNodeParser.from_defaults( window_size=2, window_metadata_key="text_window", original_text_metadata_key="original_sentence" ) nodes = parser.get_nodes_from_documents(document) for node in nodes: print(f"Metadata {node.metadata} \nText: {node.text}\n")
[ "llama_index.readers.file.FlatReader", "llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults" ]
[((149, 161), 'llama_index.readers.file.FlatReader', 'FlatReader', ([], {}), '()\n', (159, 161), False, 'from llama_index.readers.file import FlatReader\n'), ((236, 377), 'llama_index.core.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': '(2)', 'window_metadata_key': '"""text_window"""', 'original_text_metadata_key': '"""original_sentence"""'}), "(window_size=2, window_metadata_key=\n 'text_window', original_text_metadata_key='original_sentence')\n", (274, 377), False, 'from llama_index.core.node_parser import SentenceWindowNodeParser\n'), ((190, 224), 'pathlib.Path', 'Path', (['"""files/sample_document1.txt"""'], {}), "('files/sample_document1.txt')\n", (194, 224), False, 'from pathlib import Path\n')]
# uses brave (requires api key) for web search then uses ollama for local embedding and inference, for a cost-free web RAG # requires ollama to be installed and running import os import json import logging import sys import requests from dotenv import load_dotenv from requests.adapters import HTTPAdapter from urllib3.util.retry import Retry from llama_index.embeddings.ollama import OllamaEmbedding from llama_index.core import VectorStoreIndex, Document from llama_index.tools.brave_search import BraveSearchToolSpec from llama_index.readers.web import SimpleWebPageReader # Local Model Setup from llama_index.core import Settings Settings.embed_model = OllamaEmbedding(model_name="nomic-embed-text") # Make sure to run: ollama pull nomic-embed-text from llama_index.llms.ollama import Ollama Settings.llm = Ollama(model="mistral", request_timeout=360.0) # Make sure to run: ollama pull mistral # Constants USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36' HEADERS = {'User-Agent': USER_AGENT} RETRIES = Retry(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504]) def setup_logging(): """ Initialize logging configuration to output logs to stdout. """ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) def load_environment_variables(): """ Load environment variables from the .env file. :return: The Brave API key. """ load_dotenv() return os.getenv('BRAVE_API_KEY') def perform_search(query, api_key): """ Perform a search using the Brave Search API. :param query: The search query. :param api_key: The Brave API key. :return: The search response. """ tool_spec = BraveSearchToolSpec(api_key=api_key) return tool_spec.brave_search(query=query) def extract_search_results(response): """ Extract search results from the Brave Search API response. :param response: The search response. :return: A list of search results. """ documents = [doc.text for doc in response] search_results = [] for document in documents: response_data = json.loads(document) search_results.extend(response_data.get('web', {}).get('results', [])) return search_results def scrape_web_pages(search_results): """ Scrape web pages from the URLs obtained from the search results. :param search_results: The list of search results. :return: A list of scraped documents. """ session = requests.Session() session.mount('http://', HTTPAdapter(max_retries=RETRIES)) session.mount('https://', HTTPAdapter(max_retries=RETRIES)) all_documents = [] for result in search_results: url = result.get('url') try: response = session.get(url, headers=HEADERS, timeout=10) response.raise_for_status() doc = Document(text=response.text, url=url) all_documents.append(doc) except requests.exceptions.RequestException as e: logging.error(f"Failed to scrape {url}: {e}") return all_documents def main(): """ Main function to orchestrate the search, scraping, and querying process. """ setup_logging() api_key = load_environment_variables() my_query = "What is RAG, retrieval augmented generation?" response = perform_search(my_query, api_key) search_results = extract_search_results(response) all_documents = scrape_web_pages(search_results) # Load all the scraped documents into the vector store index = VectorStoreIndex.from_documents(all_documents) # Use the index to query with the language model query_engine = index.as_query_engine() response = query_engine.query(my_query) print(response) if __name__ == "__main__": main()
[ "llama_index.llms.ollama.Ollama", "llama_index.tools.brave_search.BraveSearchToolSpec", "llama_index.embeddings.ollama.OllamaEmbedding", "llama_index.core.VectorStoreIndex.from_documents", "llama_index.core.Document" ]
[((660, 706), 'llama_index.embeddings.ollama.OllamaEmbedding', 'OllamaEmbedding', ([], {'model_name': '"""nomic-embed-text"""'}), "(model_name='nomic-embed-text')\n", (675, 706), False, 'from llama_index.embeddings.ollama import OllamaEmbedding\n'), ((814, 860), 'llama_index.llms.ollama.Ollama', 'Ollama', ([], {'model': '"""mistral"""', 'request_timeout': '(360.0)'}), "(model='mistral', request_timeout=360.0)\n", (820, 860), False, 'from llama_index.llms.ollama import Ollama\n'), ((1082, 1155), 'urllib3.util.retry.Retry', 'Retry', ([], {'total': '(5)', 'backoff_factor': '(0.1)', 'status_forcelist': '[500, 502, 503, 504]'}), '(total=5, backoff_factor=0.1, status_forcelist=[500, 502, 503, 504])\n', (1087, 1155), False, 'from urllib3.util.retry import Retry\n'), ((1261, 1320), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.DEBUG'}), '(stream=sys.stdout, level=logging.DEBUG)\n', (1280, 1320), False, 'import logging\n'), ((1536, 1549), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1547, 1549), False, 'from dotenv import load_dotenv\n'), ((1561, 1587), 'os.getenv', 'os.getenv', (['"""BRAVE_API_KEY"""'], {}), "('BRAVE_API_KEY')\n", (1570, 1587), False, 'import os\n'), ((1815, 1851), 'llama_index.tools.brave_search.BraveSearchToolSpec', 'BraveSearchToolSpec', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (1834, 1851), False, 'from llama_index.tools.brave_search import BraveSearchToolSpec\n'), ((2585, 2603), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2601, 2603), False, 'import requests\n'), ((3642, 3688), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['all_documents'], {}), '(all_documents)\n', (3673, 3688), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((1356, 1396), 'logging.StreamHandler', 'logging.StreamHandler', ([], {'stream': 'sys.stdout'}), '(stream=sys.stdout)\n', (1377, 1396), False, 'import logging\n'), ((2224, 2244), 'json.loads', 'json.loads', (['document'], {}), '(document)\n', (2234, 2244), False, 'import json\n'), ((2633, 2665), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2644, 2665), False, 'from requests.adapters import HTTPAdapter\n'), ((2697, 2729), 'requests.adapters.HTTPAdapter', 'HTTPAdapter', ([], {'max_retries': 'RETRIES'}), '(max_retries=RETRIES)\n', (2708, 2729), False, 'from requests.adapters import HTTPAdapter\n'), ((1325, 1344), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (1342, 1344), False, 'import logging\n'), ((2961, 2998), 'llama_index.core.Document', 'Document', ([], {'text': 'response.text', 'url': 'url'}), '(text=response.text, url=url)\n', (2969, 2998), False, 'from llama_index.core import VectorStoreIndex, Document\n'), ((3107, 3152), 'logging.error', 'logging.error', (['f"""Failed to scrape {url}: {e}"""'], {}), "(f'Failed to scrape {url}: {e}')\n", (3120, 3152), False, 'import logging\n')]
"""Composability graphs.""" from typing import Any, Dict, List, Optional, Sequence, Type, cast from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_index.legacy.data_structs.data_structs import IndexStruct from llama_index.legacy.indices.base import BaseIndex from llama_index.legacy.schema import ( IndexNode, NodeRelationship, ObjectType, RelatedNodeInfo, ) from llama_index.legacy.service_context import ServiceContext from llama_index.legacy.storage.storage_context import StorageContext class ComposableGraph: """Composable graph.""" def __init__( self, all_indices: Dict[str, BaseIndex], root_id: str, storage_context: Optional[StorageContext] = None, ) -> None: """Init params.""" self._all_indices = all_indices self._root_id = root_id self.storage_context = storage_context @property def root_id(self) -> str: return self._root_id @property def all_indices(self) -> Dict[str, BaseIndex]: return self._all_indices @property def root_index(self) -> BaseIndex: return self._all_indices[self._root_id] @property def index_struct(self) -> IndexStruct: return self._all_indices[self._root_id].index_struct @property def service_context(self) -> ServiceContext: return self._all_indices[self._root_id].service_context @classmethod def from_indices( cls, root_index_cls: Type[BaseIndex], children_indices: Sequence[BaseIndex], index_summaries: Optional[Sequence[str]] = None, service_context: Optional[ServiceContext] = None, storage_context: Optional[StorageContext] = None, **kwargs: Any, ) -> "ComposableGraph": # type: ignore """Create composable graph using this index class as the root.""" service_context = service_context or ServiceContext.from_defaults() with service_context.callback_manager.as_trace("graph_construction"): if index_summaries is None: for index in children_indices: if index.index_struct.summary is None: raise ValueError( "Summary must be set for children indices. " "If the index does a summary " "(through index.index_struct.summary), then " "it must be specified with then `index_summaries` " "argument in this function. We will support " "automatically setting the summary in the future." ) index_summaries = [ index.index_struct.summary for index in children_indices ] else: # set summaries for each index for index, summary in zip(children_indices, index_summaries): index.index_struct.summary = summary if len(children_indices) != len(index_summaries): raise ValueError("indices and index_summaries must have same length!") # construct index nodes index_nodes = [] for index, summary in zip(children_indices, index_summaries): assert isinstance(index.index_struct, IndexStruct) index_node = IndexNode( text=summary, index_id=index.index_id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id=index.index_id, node_type=ObjectType.INDEX ) }, ) index_nodes.append(index_node) # construct root index root_index = root_index_cls( nodes=index_nodes, service_context=service_context, storage_context=storage_context, **kwargs, ) # type: ignore all_indices: List[BaseIndex] = [ *cast(List[BaseIndex], children_indices), root_index, ] return cls( all_indices={index.index_id: index for index in all_indices}, root_id=root_index.index_id, storage_context=storage_context, ) def get_index(self, index_struct_id: Optional[str] = None) -> BaseIndex: """Get index from index struct id.""" if index_struct_id is None: index_struct_id = self._root_id return self._all_indices[index_struct_id] def as_query_engine(self, **kwargs: Any) -> BaseQueryEngine: # NOTE: lazy import from llama_index.legacy.query_engine.graph_query_engine import ( ComposableGraphQueryEngine, ) return ComposableGraphQueryEngine(self, **kwargs)
[ "llama_index.legacy.service_context.ServiceContext.from_defaults", "llama_index.legacy.query_engine.graph_query_engine.ComposableGraphQueryEngine", "llama_index.legacy.schema.RelatedNodeInfo" ]
[((4914, 4956), 'llama_index.legacy.query_engine.graph_query_engine.ComposableGraphQueryEngine', 'ComposableGraphQueryEngine', (['self'], {}), '(self, **kwargs)\n', (4940, 4956), False, 'from llama_index.legacy.query_engine.graph_query_engine import ComposableGraphQueryEngine\n'), ((1930, 1960), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (1958, 1960), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((4133, 4172), 'typing.cast', 'cast', (['List[BaseIndex]', 'children_indices'], {}), '(List[BaseIndex], children_indices)\n', (4137, 4172), False, 'from typing import Any, Dict, List, Optional, Sequence, Type, cast\n'), ((3584, 3651), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'index.index_id', 'node_type': 'ObjectType.INDEX'}), '(node_id=index.index_id, node_type=ObjectType.INDEX)\n', (3599, 3651), False, 'from llama_index.legacy.schema import IndexNode, NodeRelationship, ObjectType, RelatedNodeInfo\n')]
"""Composability graphs.""" from typing import Any, Dict, List, Optional, Sequence, Type, cast from llama_index.legacy.core.base_query_engine import BaseQueryEngine from llama_index.legacy.data_structs.data_structs import IndexStruct from llama_index.legacy.indices.base import BaseIndex from llama_index.legacy.schema import ( IndexNode, NodeRelationship, ObjectType, RelatedNodeInfo, ) from llama_index.legacy.service_context import ServiceContext from llama_index.legacy.storage.storage_context import StorageContext class ComposableGraph: """Composable graph.""" def __init__( self, all_indices: Dict[str, BaseIndex], root_id: str, storage_context: Optional[StorageContext] = None, ) -> None: """Init params.""" self._all_indices = all_indices self._root_id = root_id self.storage_context = storage_context @property def root_id(self) -> str: return self._root_id @property def all_indices(self) -> Dict[str, BaseIndex]: return self._all_indices @property def root_index(self) -> BaseIndex: return self._all_indices[self._root_id] @property def index_struct(self) -> IndexStruct: return self._all_indices[self._root_id].index_struct @property def service_context(self) -> ServiceContext: return self._all_indices[self._root_id].service_context @classmethod def from_indices( cls, root_index_cls: Type[BaseIndex], children_indices: Sequence[BaseIndex], index_summaries: Optional[Sequence[str]] = None, service_context: Optional[ServiceContext] = None, storage_context: Optional[StorageContext] = None, **kwargs: Any, ) -> "ComposableGraph": # type: ignore """Create composable graph using this index class as the root.""" service_context = service_context or ServiceContext.from_defaults() with service_context.callback_manager.as_trace("graph_construction"): if index_summaries is None: for index in children_indices: if index.index_struct.summary is None: raise ValueError( "Summary must be set for children indices. " "If the index does a summary " "(through index.index_struct.summary), then " "it must be specified with then `index_summaries` " "argument in this function. We will support " "automatically setting the summary in the future." ) index_summaries = [ index.index_struct.summary for index in children_indices ] else: # set summaries for each index for index, summary in zip(children_indices, index_summaries): index.index_struct.summary = summary if len(children_indices) != len(index_summaries): raise ValueError("indices and index_summaries must have same length!") # construct index nodes index_nodes = [] for index, summary in zip(children_indices, index_summaries): assert isinstance(index.index_struct, IndexStruct) index_node = IndexNode( text=summary, index_id=index.index_id, relationships={ NodeRelationship.SOURCE: RelatedNodeInfo( node_id=index.index_id, node_type=ObjectType.INDEX ) }, ) index_nodes.append(index_node) # construct root index root_index = root_index_cls( nodes=index_nodes, service_context=service_context, storage_context=storage_context, **kwargs, ) # type: ignore all_indices: List[BaseIndex] = [ *cast(List[BaseIndex], children_indices), root_index, ] return cls( all_indices={index.index_id: index for index in all_indices}, root_id=root_index.index_id, storage_context=storage_context, ) def get_index(self, index_struct_id: Optional[str] = None) -> BaseIndex: """Get index from index struct id.""" if index_struct_id is None: index_struct_id = self._root_id return self._all_indices[index_struct_id] def as_query_engine(self, **kwargs: Any) -> BaseQueryEngine: # NOTE: lazy import from llama_index.legacy.query_engine.graph_query_engine import ( ComposableGraphQueryEngine, ) return ComposableGraphQueryEngine(self, **kwargs)
[ "llama_index.legacy.service_context.ServiceContext.from_defaults", "llama_index.legacy.query_engine.graph_query_engine.ComposableGraphQueryEngine", "llama_index.legacy.schema.RelatedNodeInfo" ]
[((4914, 4956), 'llama_index.legacy.query_engine.graph_query_engine.ComposableGraphQueryEngine', 'ComposableGraphQueryEngine', (['self'], {}), '(self, **kwargs)\n', (4940, 4956), False, 'from llama_index.legacy.query_engine.graph_query_engine import ComposableGraphQueryEngine\n'), ((1930, 1960), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (1958, 1960), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((4133, 4172), 'typing.cast', 'cast', (['List[BaseIndex]', 'children_indices'], {}), '(List[BaseIndex], children_indices)\n', (4137, 4172), False, 'from typing import Any, Dict, List, Optional, Sequence, Type, cast\n'), ((3584, 3651), 'llama_index.legacy.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': 'index.index_id', 'node_type': 'ObjectType.INDEX'}), '(node_id=index.index_id, node_type=ObjectType.INDEX)\n', (3599, 3651), False, 'from llama_index.legacy.schema import IndexNode, NodeRelationship, ObjectType, RelatedNodeInfo\n')]
from langchain.callbacks import CallbackManager from llama_index import ServiceContext, PromptHelper, LLMPredictor from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler from core.embedding.openai_embedding import OpenAIEmbedding from core.llm.llm_builder import LLMBuilder class IndexBuilder: @classmethod def get_default_service_context(cls, tenant_id: str) -> ServiceContext: # set number of output tokens num_output = 512 # only for verbose callback_manager = CallbackManager([DifyStdOutCallbackHandler()]) llm = LLMBuilder.to_llm( tenant_id=tenant_id, model_name='text-davinci-003', temperature=0, max_tokens=num_output, callback_manager=callback_manager, ) llm_predictor = LLMPredictor(llm=llm) # These parameters here will affect the logic of segmenting the final synthesized response. # The number of refinement iterations in the synthesis process depends # on whether the length of the segmented output exceeds the max_input_size. prompt_helper = PromptHelper( max_input_size=3500, num_output=num_output, max_chunk_overlap=20 ) provider = LLMBuilder.get_default_provider(tenant_id) model_credentials = LLMBuilder.get_model_credentials( tenant_id=tenant_id, model_provider=provider, model_name='text-embedding-ada-002' ) return ServiceContext.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=OpenAIEmbedding(**model_credentials), ) @classmethod def get_fake_llm_service_context(cls, tenant_id: str) -> ServiceContext: llm = LLMBuilder.to_llm( tenant_id=tenant_id, model_name='fake' ) return ServiceContext.from_defaults( llm_predictor=LLMPredictor(llm=llm), embed_model=OpenAIEmbedding() )
[ "llama_index.PromptHelper", "llama_index.LLMPredictor" ]
[((599, 745), 'core.llm.llm_builder.LLMBuilder.to_llm', 'LLMBuilder.to_llm', ([], {'tenant_id': 'tenant_id', 'model_name': '"""text-davinci-003"""', 'temperature': '(0)', 'max_tokens': 'num_output', 'callback_manager': 'callback_manager'}), "(tenant_id=tenant_id, model_name='text-davinci-003',\n temperature=0, max_tokens=num_output, callback_manager=callback_manager)\n", (616, 745), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((838, 859), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (850, 859), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((1148, 1226), 'llama_index.PromptHelper', 'PromptHelper', ([], {'max_input_size': '(3500)', 'num_output': 'num_output', 'max_chunk_overlap': '(20)'}), '(max_input_size=3500, num_output=num_output, max_chunk_overlap=20)\n', (1160, 1226), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((1293, 1335), 'core.llm.llm_builder.LLMBuilder.get_default_provider', 'LLMBuilder.get_default_provider', (['tenant_id'], {}), '(tenant_id)\n', (1324, 1335), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((1365, 1485), 'core.llm.llm_builder.LLMBuilder.get_model_credentials', 'LLMBuilder.get_model_credentials', ([], {'tenant_id': 'tenant_id', 'model_provider': 'provider', 'model_name': '"""text-embedding-ada-002"""'}), "(tenant_id=tenant_id, model_provider=\n provider, model_name='text-embedding-ada-002')\n", (1397, 1485), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((1836, 1893), 'core.llm.llm_builder.LLMBuilder.to_llm', 'LLMBuilder.to_llm', ([], {'tenant_id': 'tenant_id', 'model_name': '"""fake"""'}), "(tenant_id=tenant_id, model_name='fake')\n", (1853, 1893), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((554, 581), 'core.callback_handler.std_out_callback_handler.DifyStdOutCallbackHandler', 'DifyStdOutCallbackHandler', ([], {}), '()\n', (579, 581), False, 'from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler\n'), ((1679, 1715), 'core.embedding.openai_embedding.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '(**model_credentials)\n', (1694, 1715), False, 'from core.embedding.openai_embedding import OpenAIEmbedding\n'), ((2000, 2021), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (2012, 2021), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((2047, 2064), 'core.embedding.openai_embedding.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (2062, 2064), False, 'from core.embedding.openai_embedding import OpenAIEmbedding\n')]
from langchain.callbacks import CallbackManager from llama_index import ServiceContext, PromptHelper, LLMPredictor from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler from core.embedding.openai_embedding import OpenAIEmbedding from core.llm.llm_builder import LLMBuilder class IndexBuilder: @classmethod def get_default_service_context(cls, tenant_id: str) -> ServiceContext: # set number of output tokens num_output = 512 # only for verbose callback_manager = CallbackManager([DifyStdOutCallbackHandler()]) llm = LLMBuilder.to_llm( tenant_id=tenant_id, model_name='text-davinci-003', temperature=0, max_tokens=num_output, callback_manager=callback_manager, ) llm_predictor = LLMPredictor(llm=llm) # These parameters here will affect the logic of segmenting the final synthesized response. # The number of refinement iterations in the synthesis process depends # on whether the length of the segmented output exceeds the max_input_size. prompt_helper = PromptHelper( max_input_size=3500, num_output=num_output, max_chunk_overlap=20 ) provider = LLMBuilder.get_default_provider(tenant_id) model_credentials = LLMBuilder.get_model_credentials( tenant_id=tenant_id, model_provider=provider, model_name='text-embedding-ada-002' ) return ServiceContext.from_defaults( llm_predictor=llm_predictor, prompt_helper=prompt_helper, embed_model=OpenAIEmbedding(**model_credentials), ) @classmethod def get_fake_llm_service_context(cls, tenant_id: str) -> ServiceContext: llm = LLMBuilder.to_llm( tenant_id=tenant_id, model_name='fake' ) return ServiceContext.from_defaults( llm_predictor=LLMPredictor(llm=llm), embed_model=OpenAIEmbedding() )
[ "llama_index.PromptHelper", "llama_index.LLMPredictor" ]
[((599, 745), 'core.llm.llm_builder.LLMBuilder.to_llm', 'LLMBuilder.to_llm', ([], {'tenant_id': 'tenant_id', 'model_name': '"""text-davinci-003"""', 'temperature': '(0)', 'max_tokens': 'num_output', 'callback_manager': 'callback_manager'}), "(tenant_id=tenant_id, model_name='text-davinci-003',\n temperature=0, max_tokens=num_output, callback_manager=callback_manager)\n", (616, 745), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((838, 859), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (850, 859), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((1148, 1226), 'llama_index.PromptHelper', 'PromptHelper', ([], {'max_input_size': '(3500)', 'num_output': 'num_output', 'max_chunk_overlap': '(20)'}), '(max_input_size=3500, num_output=num_output, max_chunk_overlap=20)\n', (1160, 1226), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((1293, 1335), 'core.llm.llm_builder.LLMBuilder.get_default_provider', 'LLMBuilder.get_default_provider', (['tenant_id'], {}), '(tenant_id)\n', (1324, 1335), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((1365, 1485), 'core.llm.llm_builder.LLMBuilder.get_model_credentials', 'LLMBuilder.get_model_credentials', ([], {'tenant_id': 'tenant_id', 'model_provider': 'provider', 'model_name': '"""text-embedding-ada-002"""'}), "(tenant_id=tenant_id, model_provider=\n provider, model_name='text-embedding-ada-002')\n", (1397, 1485), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((1836, 1893), 'core.llm.llm_builder.LLMBuilder.to_llm', 'LLMBuilder.to_llm', ([], {'tenant_id': 'tenant_id', 'model_name': '"""fake"""'}), "(tenant_id=tenant_id, model_name='fake')\n", (1853, 1893), False, 'from core.llm.llm_builder import LLMBuilder\n'), ((554, 581), 'core.callback_handler.std_out_callback_handler.DifyStdOutCallbackHandler', 'DifyStdOutCallbackHandler', ([], {}), '()\n', (579, 581), False, 'from core.callback_handler.std_out_callback_handler import DifyStdOutCallbackHandler\n'), ((1679, 1715), 'core.embedding.openai_embedding.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '(**model_credentials)\n', (1694, 1715), False, 'from core.embedding.openai_embedding import OpenAIEmbedding\n'), ((2000, 2021), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (2012, 2021), False, 'from llama_index import ServiceContext, PromptHelper, LLMPredictor\n'), ((2047, 2064), 'core.embedding.openai_embedding.OpenAIEmbedding', 'OpenAIEmbedding', ([], {}), '()\n', (2062, 2064), False, 'from core.embedding.openai_embedding import OpenAIEmbedding\n')]
"""Default prompt selectors.""" from llama_index.core.prompts import SelectorPromptTemplate from llama_index.core.prompts.chat_prompts import ( CHAT_REFINE_PROMPT, CHAT_REFINE_TABLE_CONTEXT_PROMPT, CHAT_TEXT_QA_PROMPT, CHAT_TREE_SUMMARIZE_PROMPT, ) from llama_index.core.prompts.default_prompts import ( DEFAULT_REFINE_PROMPT, DEFAULT_REFINE_TABLE_CONTEXT_PROMPT, DEFAULT_TEXT_QA_PROMPT, DEFAULT_TREE_SUMMARIZE_PROMPT, ) from llama_index.core.prompts.utils import is_chat_model DEFAULT_TEXT_QA_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_TEXT_QA_PROMPT, conditionals=[(is_chat_model, CHAT_TEXT_QA_PROMPT)], ) DEFAULT_TREE_SUMMARIZE_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_TREE_SUMMARIZE_PROMPT, conditionals=[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)], ) DEFAULT_REFINE_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_REFINE_PROMPT, conditionals=[(is_chat_model, CHAT_REFINE_PROMPT)], ) DEFAULT_REFINE_TABLE_CONTEXT_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT, conditionals=[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)], )
[ "llama_index.core.prompts.SelectorPromptTemplate" ]
[((540, 660), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_TEXT_QA_PROMPT', 'conditionals': '[(is_chat_model, CHAT_TEXT_QA_PROMPT)]'}), '(default_template=DEFAULT_TEXT_QA_PROMPT,\n conditionals=[(is_chat_model, CHAT_TEXT_QA_PROMPT)])\n', (562, 660), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((705, 839), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_TREE_SUMMARIZE_PROMPT', 'conditionals': '[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)]'}), '(default_template=DEFAULT_TREE_SUMMARIZE_PROMPT,\n conditionals=[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)])\n', (727, 839), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((876, 995), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_REFINE_PROMPT', 'conditionals': '[(is_chat_model, CHAT_REFINE_PROMPT)]'}), '(default_template=DEFAULT_REFINE_PROMPT, conditionals\n =[(is_chat_model, CHAT_REFINE_PROMPT)])\n', (898, 995), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((1045, 1191), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_REFINE_TABLE_CONTEXT_PROMPT', 'conditionals': '[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)]'}), '(default_template=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT,\n conditionals=[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)])\n', (1067, 1191), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n')]
"""Default prompt selectors.""" from llama_index.core.prompts import SelectorPromptTemplate from llama_index.core.prompts.chat_prompts import ( CHAT_REFINE_PROMPT, CHAT_REFINE_TABLE_CONTEXT_PROMPT, CHAT_TEXT_QA_PROMPT, CHAT_TREE_SUMMARIZE_PROMPT, ) from llama_index.core.prompts.default_prompts import ( DEFAULT_REFINE_PROMPT, DEFAULT_REFINE_TABLE_CONTEXT_PROMPT, DEFAULT_TEXT_QA_PROMPT, DEFAULT_TREE_SUMMARIZE_PROMPT, ) from llama_index.core.prompts.utils import is_chat_model DEFAULT_TEXT_QA_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_TEXT_QA_PROMPT, conditionals=[(is_chat_model, CHAT_TEXT_QA_PROMPT)], ) DEFAULT_TREE_SUMMARIZE_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_TREE_SUMMARIZE_PROMPT, conditionals=[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)], ) DEFAULT_REFINE_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_REFINE_PROMPT, conditionals=[(is_chat_model, CHAT_REFINE_PROMPT)], ) DEFAULT_REFINE_TABLE_CONTEXT_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT, conditionals=[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)], )
[ "llama_index.core.prompts.SelectorPromptTemplate" ]
[((540, 660), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_TEXT_QA_PROMPT', 'conditionals': '[(is_chat_model, CHAT_TEXT_QA_PROMPT)]'}), '(default_template=DEFAULT_TEXT_QA_PROMPT,\n conditionals=[(is_chat_model, CHAT_TEXT_QA_PROMPT)])\n', (562, 660), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((705, 839), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_TREE_SUMMARIZE_PROMPT', 'conditionals': '[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)]'}), '(default_template=DEFAULT_TREE_SUMMARIZE_PROMPT,\n conditionals=[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)])\n', (727, 839), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((876, 995), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_REFINE_PROMPT', 'conditionals': '[(is_chat_model, CHAT_REFINE_PROMPT)]'}), '(default_template=DEFAULT_REFINE_PROMPT, conditionals\n =[(is_chat_model, CHAT_REFINE_PROMPT)])\n', (898, 995), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((1045, 1191), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_REFINE_TABLE_CONTEXT_PROMPT', 'conditionals': '[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)]'}), '(default_template=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT,\n conditionals=[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)])\n', (1067, 1191), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n')]
"""Default prompt selectors.""" from llama_index.core.prompts import SelectorPromptTemplate from llama_index.core.prompts.chat_prompts import ( CHAT_REFINE_PROMPT, CHAT_REFINE_TABLE_CONTEXT_PROMPT, CHAT_TEXT_QA_PROMPT, CHAT_TREE_SUMMARIZE_PROMPT, ) from llama_index.core.prompts.default_prompts import ( DEFAULT_REFINE_PROMPT, DEFAULT_REFINE_TABLE_CONTEXT_PROMPT, DEFAULT_TEXT_QA_PROMPT, DEFAULT_TREE_SUMMARIZE_PROMPT, ) from llama_index.core.prompts.utils import is_chat_model DEFAULT_TEXT_QA_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_TEXT_QA_PROMPT, conditionals=[(is_chat_model, CHAT_TEXT_QA_PROMPT)], ) DEFAULT_TREE_SUMMARIZE_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_TREE_SUMMARIZE_PROMPT, conditionals=[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)], ) DEFAULT_REFINE_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_REFINE_PROMPT, conditionals=[(is_chat_model, CHAT_REFINE_PROMPT)], ) DEFAULT_REFINE_TABLE_CONTEXT_PROMPT_SEL = SelectorPromptTemplate( default_template=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT, conditionals=[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)], )
[ "llama_index.core.prompts.SelectorPromptTemplate" ]
[((540, 660), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_TEXT_QA_PROMPT', 'conditionals': '[(is_chat_model, CHAT_TEXT_QA_PROMPT)]'}), '(default_template=DEFAULT_TEXT_QA_PROMPT,\n conditionals=[(is_chat_model, CHAT_TEXT_QA_PROMPT)])\n', (562, 660), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((705, 839), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_TREE_SUMMARIZE_PROMPT', 'conditionals': '[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)]'}), '(default_template=DEFAULT_TREE_SUMMARIZE_PROMPT,\n conditionals=[(is_chat_model, CHAT_TREE_SUMMARIZE_PROMPT)])\n', (727, 839), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((876, 995), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_REFINE_PROMPT', 'conditionals': '[(is_chat_model, CHAT_REFINE_PROMPT)]'}), '(default_template=DEFAULT_REFINE_PROMPT, conditionals\n =[(is_chat_model, CHAT_REFINE_PROMPT)])\n', (898, 995), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n'), ((1045, 1191), 'llama_index.core.prompts.SelectorPromptTemplate', 'SelectorPromptTemplate', ([], {'default_template': 'DEFAULT_REFINE_TABLE_CONTEXT_PROMPT', 'conditionals': '[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)]'}), '(default_template=DEFAULT_REFINE_TABLE_CONTEXT_PROMPT,\n conditionals=[(is_chat_model, CHAT_REFINE_TABLE_CONTEXT_PROMPT)])\n', (1067, 1191), False, 'from llama_index.core.prompts import SelectorPromptTemplate\n')]
"""Langchain memory wrapper (for LlamaIndex).""" from typing import Any, Dict, List, Optional from llama_index.core.bridge.langchain import ( AIMessage, BaseChatMemory, BaseMessage, HumanMessage, ) from llama_index.core.bridge.langchain import BaseMemory as Memory from llama_index.core.bridge.pydantic import Field from llama_index.core.indices.base import BaseIndex from llama_index.core.schema import Document from llama_index.core.utils import get_new_id def get_prompt_input_key(inputs: Dict[str, Any], memory_variables: List[str]) -> str: """Get prompt input key. Copied over from langchain. """ # "stop" is a special key that can be passed as input but is not used to # format the prompt. prompt_input_keys = list(set(inputs).difference([*memory_variables, "stop"])) if len(prompt_input_keys) != 1: raise ValueError(f"One input key expected got {prompt_input_keys}") return prompt_input_keys[0] class GPTIndexMemory(Memory): """Langchain memory wrapper (for LlamaIndex). Args: human_prefix (str): Prefix for human input. Defaults to "Human". ai_prefix (str): Prefix for AI output. Defaults to "AI". memory_key (str): Key for memory. Defaults to "history". index (BaseIndex): LlamaIndex instance. query_kwargs (Dict[str, Any]): Keyword arguments for LlamaIndex query. input_key (Optional[str]): Input key. Defaults to None. output_key (Optional[str]): Output key. Defaults to None. """ human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" index: BaseIndex query_kwargs: Dict = Field(default_factory=dict) output_key: Optional[str] = None input_key: Optional[str] = None @property def memory_variables(self) -> List[str]: """Return memory variables.""" return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key return prompt_input_key def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return key-value pairs given the text input to the chain.""" prompt_input_key = self._get_prompt_input_key(inputs) query_str = inputs[prompt_input_key] # TODO: wrap in prompt # TODO: add option to return the raw text # NOTE: currently it's a hack query_engine = self.index.as_query_engine(**self.query_kwargs) response = query_engine.query(query_str) return {self.memory_key: str(response)} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save the context of this model run to memory.""" prompt_input_key = self._get_prompt_input_key(inputs) if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = next(iter(outputs.keys())) else: output_key = self.output_key human = f"{self.human_prefix}: " + inputs[prompt_input_key] ai = f"{self.ai_prefix}: " + outputs[output_key] doc_text = f"{human}\n{ai}" doc = Document(text=doc_text) self.index.insert(doc) def clear(self) -> None: """Clear memory contents.""" def __repr__(self) -> str: """Return representation.""" return "GPTIndexMemory()" class GPTIndexChatMemory(BaseChatMemory): """Langchain chat memory wrapper (for LlamaIndex). Args: human_prefix (str): Prefix for human input. Defaults to "Human". ai_prefix (str): Prefix for AI output. Defaults to "AI". memory_key (str): Key for memory. Defaults to "history". index (BaseIndex): LlamaIndex instance. query_kwargs (Dict[str, Any]): Keyword arguments for LlamaIndex query. input_key (Optional[str]): Input key. Defaults to None. output_key (Optional[str]): Output key. Defaults to None. """ human_prefix: str = "Human" ai_prefix: str = "AI" memory_key: str = "history" index: BaseIndex query_kwargs: Dict = Field(default_factory=dict) output_key: Optional[str] = None input_key: Optional[str] = None return_source: bool = False id_to_message: Dict[str, BaseMessage] = Field(default_factory=dict) @property def memory_variables(self) -> List[str]: """Return memory variables.""" return [self.memory_key] def _get_prompt_input_key(self, inputs: Dict[str, Any]) -> str: if self.input_key is None: prompt_input_key = get_prompt_input_key(inputs, self.memory_variables) else: prompt_input_key = self.input_key return prompt_input_key def load_memory_variables(self, inputs: Dict[str, Any]) -> Dict[str, str]: """Return key-value pairs given the text input to the chain.""" prompt_input_key = self._get_prompt_input_key(inputs) query_str = inputs[prompt_input_key] query_engine = self.index.as_query_engine(**self.query_kwargs) response_obj = query_engine.query(query_str) if self.return_source: source_nodes = response_obj.source_nodes if self.return_messages: # get source messages from ids source_ids = [sn.node.node_id for sn in source_nodes] source_messages = [ m for id, m in self.id_to_message.items() if id in source_ids ] # NOTE: type List[BaseMessage] response: Any = source_messages else: source_texts = [sn.node.get_content() for sn in source_nodes] response = "\n\n".join(source_texts) else: response = str(response_obj) return {self.memory_key: response} def save_context(self, inputs: Dict[str, Any], outputs: Dict[str, str]) -> None: """Save the context of this model run to memory.""" prompt_input_key = self._get_prompt_input_key(inputs) if self.output_key is None: if len(outputs) != 1: raise ValueError(f"One output key expected, got {outputs.keys()}") output_key = next(iter(outputs.keys())) else: output_key = self.output_key # a bit different than existing langchain implementation # because we want to track id's for messages human_message = HumanMessage(content=inputs[prompt_input_key]) human_message_id = get_new_id(set(self.id_to_message.keys())) ai_message = AIMessage(content=outputs[output_key]) ai_message_id = get_new_id( set(self.id_to_message.keys()).union({human_message_id}) ) self.chat_memory.messages.append(human_message) self.chat_memory.messages.append(ai_message) self.id_to_message[human_message_id] = human_message self.id_to_message[ai_message_id] = ai_message human_txt = f"{self.human_prefix}: " + inputs[prompt_input_key] ai_txt = f"{self.ai_prefix}: " + outputs[output_key] human_doc = Document(text=human_txt, id_=human_message_id) ai_doc = Document(text=ai_txt, id_=ai_message_id) self.index.insert(human_doc) self.index.insert(ai_doc) def clear(self) -> None: """Clear memory contents.""" def __repr__(self) -> str: """Return representation.""" return "GPTIndexMemory()"
[ "llama_index.core.bridge.langchain.AIMessage", "llama_index.core.bridge.langchain.HumanMessage", "llama_index.core.bridge.pydantic.Field", "llama_index.core.schema.Document" ]
[((1663, 1690), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1668, 1690), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((4306, 4333), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (4311, 4333), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((4484, 4511), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (4489, 4511), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((3365, 3388), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'doc_text'}), '(text=doc_text)\n', (3373, 3388), False, 'from llama_index.core.schema import Document\n'), ((6634, 6680), 'llama_index.core.bridge.langchain.HumanMessage', 'HumanMessage', ([], {'content': 'inputs[prompt_input_key]'}), '(content=inputs[prompt_input_key])\n', (6646, 6680), False, 'from llama_index.core.bridge.langchain import AIMessage, BaseChatMemory, BaseMessage, HumanMessage\n'), ((6772, 6810), 'llama_index.core.bridge.langchain.AIMessage', 'AIMessage', ([], {'content': 'outputs[output_key]'}), '(content=outputs[output_key])\n', (6781, 6810), False, 'from llama_index.core.bridge.langchain import AIMessage, BaseChatMemory, BaseMessage, HumanMessage\n'), ((7307, 7353), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'human_txt', 'id_': 'human_message_id'}), '(text=human_txt, id_=human_message_id)\n', (7315, 7353), False, 'from llama_index.core.schema import Document\n'), ((7371, 7411), 'llama_index.core.schema.Document', 'Document', ([], {'text': 'ai_txt', 'id_': 'ai_message_id'}), '(text=ai_txt, id_=ai_message_id)\n', (7379, 7411), False, 'from llama_index.core.schema import Document\n')]
import logging from typing import Any, List, Optional from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.core.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, Embedding, ) logger = logging.getLogger(__name__) # For bge models that Gradient AI provides, it is suggested to add the instruction for retrieval. # Reference: https://huggingface.co/BAAI/bge-large-en-v1.5#model-list QUERY_INSTRUCTION_FOR_RETRIEVAL = ( "Represent this sentence for searching relevant passages:" ) GRADIENT_EMBED_BATCH_SIZE: int = 32_768 class GradientEmbedding(BaseEmbedding): """GradientAI embedding models. This class provides an interface to generate embeddings using a model deployed in Gradient AI. At the initialization it requires a model_id of the model deployed in the cluster. Note: Requires `gradientai` package to be available in the PYTHONPATH. It can be installed with `pip install gradientai`. """ embed_batch_size: int = Field(default=GRADIENT_EMBED_BATCH_SIZE, gt=0) _gradient: Any = PrivateAttr() _model: Any = PrivateAttr() @classmethod def class_name(cls) -> str: return "GradientEmbedding" def __init__( self, *, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, gradient_model_slug: str, gradient_access_token: Optional[str] = None, gradient_workspace_id: Optional[str] = None, gradient_host: Optional[str] = None, **kwargs: Any, ): """Initializes the GradientEmbedding class. During the initialization the `gradientai` package is imported. Using the access token, workspace id and the slug of the model, the model is fetched from Gradient AI and prepared to use. Args: embed_batch_size (int, optional): The batch size for embedding generation. Defaults to 10, must be > 0 and <= 100. gradient_model_slug (str): The model slug of the model in the Gradient AI account. gradient_access_token (str, optional): The access token of the Gradient AI account, if `None` read from the environment variable `GRADIENT_ACCESS_TOKEN`. gradient_workspace_id (str, optional): The workspace ID of the Gradient AI account, if `None` read from the environment variable `GRADIENT_WORKSPACE_ID`. gradient_host (str, optional): The host of the Gradient AI API. Defaults to None, which means the default host is used. Raises: ImportError: If the `gradientai` package is not available in the PYTHONPATH. ValueError: If the model cannot be fetched from Gradient AI. """ if embed_batch_size <= 0: raise ValueError(f"Embed batch size {embed_batch_size} must be > 0.") try: import gradientai except ImportError: raise ImportError("GradientEmbedding requires `pip install gradientai`.") self._gradient = gradientai.Gradient( access_token=gradient_access_token, workspace_id=gradient_workspace_id, host=gradient_host, ) try: self._model = self._gradient.get_embeddings_model(slug=gradient_model_slug) except gradientai.openapi.client.exceptions.UnauthorizedException as e: logger.error(f"Error while loading model {gradient_model_slug}.") self._gradient.close() raise ValueError("Unable to fetch the requested embeddings model") from e super().__init__( embed_batch_size=embed_batch_size, model_name=gradient_model_slug, **kwargs ) async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]: """ Embed the input sequence of text asynchronously. """ inputs = [{"input": text} for text in texts] result = await self._model.aembed(inputs=inputs).embeddings return [e.embedding for e in result] def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]: """ Embed the input sequence of text. """ inputs = [{"input": text} for text in texts] result = self._model.embed(inputs=inputs).embeddings return [e.embedding for e in result] def _get_text_embedding(self, text: str) -> Embedding: """Alias for _get_text_embeddings() with single text input.""" return self._get_text_embeddings([text])[0] async def _aget_text_embedding(self, text: str) -> Embedding: """Alias for _aget_text_embeddings() with single text input.""" embedding = await self._aget_text_embeddings([text]) return embedding[0] async def _aget_query_embedding(self, query: str) -> Embedding: embedding = await self._aget_text_embeddings( [f"{QUERY_INSTRUCTION_FOR_RETRIEVAL} {query}"] ) return embedding[0] def _get_query_embedding(self, query: str) -> Embedding: return self._get_text_embeddings( [f"{QUERY_INSTRUCTION_FOR_RETRIEVAL} {query}"] )[0]
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field" ]
[((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((1040, 1086), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'GRADIENT_EMBED_BATCH_SIZE', 'gt': '(0)'}), '(default=GRADIENT_EMBED_BATCH_SIZE, gt=0)\n', (1045, 1086), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1109, 1122), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1120, 1122), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1141, 1154), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1152, 1154), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3068, 3184), 'gradientai.Gradient', 'gradientai.Gradient', ([], {'access_token': 'gradient_access_token', 'workspace_id': 'gradient_workspace_id', 'host': 'gradient_host'}), '(access_token=gradient_access_token, workspace_id=\n gradient_workspace_id, host=gradient_host)\n', (3087, 3184), False, 'import gradientai\n')]
import logging from typing import Any, List, Optional from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.core.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, Embedding, ) logger = logging.getLogger(__name__) # For bge models that Gradient AI provides, it is suggested to add the instruction for retrieval. # Reference: https://huggingface.co/BAAI/bge-large-en-v1.5#model-list QUERY_INSTRUCTION_FOR_RETRIEVAL = ( "Represent this sentence for searching relevant passages:" ) GRADIENT_EMBED_BATCH_SIZE: int = 32_768 class GradientEmbedding(BaseEmbedding): """GradientAI embedding models. This class provides an interface to generate embeddings using a model deployed in Gradient AI. At the initialization it requires a model_id of the model deployed in the cluster. Note: Requires `gradientai` package to be available in the PYTHONPATH. It can be installed with `pip install gradientai`. """ embed_batch_size: int = Field(default=GRADIENT_EMBED_BATCH_SIZE, gt=0) _gradient: Any = PrivateAttr() _model: Any = PrivateAttr() @classmethod def class_name(cls) -> str: return "GradientEmbedding" def __init__( self, *, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, gradient_model_slug: str, gradient_access_token: Optional[str] = None, gradient_workspace_id: Optional[str] = None, gradient_host: Optional[str] = None, **kwargs: Any, ): """Initializes the GradientEmbedding class. During the initialization the `gradientai` package is imported. Using the access token, workspace id and the slug of the model, the model is fetched from Gradient AI and prepared to use. Args: embed_batch_size (int, optional): The batch size for embedding generation. Defaults to 10, must be > 0 and <= 100. gradient_model_slug (str): The model slug of the model in the Gradient AI account. gradient_access_token (str, optional): The access token of the Gradient AI account, if `None` read from the environment variable `GRADIENT_ACCESS_TOKEN`. gradient_workspace_id (str, optional): The workspace ID of the Gradient AI account, if `None` read from the environment variable `GRADIENT_WORKSPACE_ID`. gradient_host (str, optional): The host of the Gradient AI API. Defaults to None, which means the default host is used. Raises: ImportError: If the `gradientai` package is not available in the PYTHONPATH. ValueError: If the model cannot be fetched from Gradient AI. """ if embed_batch_size <= 0: raise ValueError(f"Embed batch size {embed_batch_size} must be > 0.") try: import gradientai except ImportError: raise ImportError("GradientEmbedding requires `pip install gradientai`.") self._gradient = gradientai.Gradient( access_token=gradient_access_token, workspace_id=gradient_workspace_id, host=gradient_host, ) try: self._model = self._gradient.get_embeddings_model(slug=gradient_model_slug) except gradientai.openapi.client.exceptions.UnauthorizedException as e: logger.error(f"Error while loading model {gradient_model_slug}.") self._gradient.close() raise ValueError("Unable to fetch the requested embeddings model") from e super().__init__( embed_batch_size=embed_batch_size, model_name=gradient_model_slug, **kwargs ) async def _aget_text_embeddings(self, texts: List[str]) -> List[Embedding]: """ Embed the input sequence of text asynchronously. """ inputs = [{"input": text} for text in texts] result = await self._model.aembed(inputs=inputs).embeddings return [e.embedding for e in result] def _get_text_embeddings(self, texts: List[str]) -> List[Embedding]: """ Embed the input sequence of text. """ inputs = [{"input": text} for text in texts] result = self._model.embed(inputs=inputs).embeddings return [e.embedding for e in result] def _get_text_embedding(self, text: str) -> Embedding: """Alias for _get_text_embeddings() with single text input.""" return self._get_text_embeddings([text])[0] async def _aget_text_embedding(self, text: str) -> Embedding: """Alias for _aget_text_embeddings() with single text input.""" embedding = await self._aget_text_embeddings([text]) return embedding[0] async def _aget_query_embedding(self, query: str) -> Embedding: embedding = await self._aget_text_embeddings( [f"{QUERY_INSTRUCTION_FOR_RETRIEVAL} {query}"] ) return embedding[0] def _get_query_embedding(self, query: str) -> Embedding: return self._get_text_embeddings( [f"{QUERY_INSTRUCTION_FOR_RETRIEVAL} {query}"] )[0]
[ "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.bridge.pydantic.Field" ]
[((251, 278), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (268, 278), False, 'import logging\n'), ((1040, 1086), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'GRADIENT_EMBED_BATCH_SIZE', 'gt': '(0)'}), '(default=GRADIENT_EMBED_BATCH_SIZE, gt=0)\n', (1045, 1086), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1109, 1122), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1120, 1122), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1141, 1154), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1152, 1154), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3068, 3184), 'gradientai.Gradient', 'gradientai.Gradient', ([], {'access_token': 'gradient_access_token', 'workspace_id': 'gradient_workspace_id', 'host': 'gradient_host'}), '(access_token=gradient_access_token, workspace_id=\n gradient_workspace_id, host=gradient_host)\n', (3087, 3184), False, 'import gradientai\n')]
"""Answer inserter.""" from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.core.llms.llm import LLM from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate from llama_index.core.prompts.mixin import ( PromptDictType, PromptMixin, PromptMixinType, ) from llama_index.core.query_engine.flare.schema import QueryTask from llama_index.core.service_context import ServiceContext from llama_index.core.settings import Settings, llm_from_settings_or_context class BaseLookaheadAnswerInserter(PromptMixin): """Lookahead answer inserter. These are responsible for insert answers into a lookahead answer template. E.g. lookahead answer: Red is for [Search(What is the meaning of Ghana's flag being red?)], green for forests, and gold for mineral wealth. query: What is the meaning of Ghana's flag being red? query answer: "the blood of those who died in the country's struggle for independence" final answer: Red is for the blood of those who died in the country's struggle for independence, green for forests, and gold for mineral wealth. """ def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} @abstractmethod def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" DEFAULT_ANSWER_INSERT_PROMPT_TMPL = """ An existing 'lookahead response' is given below. The lookahead response contains `[Search(query)]` tags. Some queries have been executed and the response retrieved. The queries and answers are also given below. Also the previous response (the response before the lookahead response) is given below. Given the lookahead template, previous response, and also queries and answers, please 'fill in' the lookahead template with the appropriate answers. NOTE: Please make sure that the final response grammatically follows the previous response + lookahead template. For example, if the previous response is "New York City has a population of " and the lookahead template is "[Search(What is the population of New York City?)]", then the final response should be "8.4 million". NOTE: the lookahead template may not be a complete sentence and may contain trailing/leading commas, etc. Please preserve the original formatting of the lookahead template if possible. NOTE: NOTE: the exception to the above rule is if the answer to a query is equivalent to "I don't know" or "I don't have an answer". In this case, modify the lookahead template to indicate that the answer is not known. NOTE: the lookahead template may contain multiple `[Search(query)]` tags and only a subset of these queries have been executed. Do not replace the `[Search(query)]` tags that have not been executed. Previous Response: Lookahead Template: Red is for [Search(What is the meaning of Ghana's \ flag being red?)], green for forests, and gold for mineral wealth. Query-Answer Pairs: Query: What is the meaning of Ghana's flag being red? Answer: The red represents the blood of those who died in the country's struggle \ for independence Filled in Answers: Red is for the blood of those who died in the country's struggle for independence, \ green for forests, and gold for mineral wealth. Previous Response: One of the largest cities in the world Lookahead Template: , the city contains a population of [Search(What is the population \ of New York City?)] Query-Answer Pairs: Query: What is the population of New York City? Answer: The population of New York City is 8.4 million Synthesized Response: , the city contains a population of 8.4 million Previous Response: the city contains a population of Lookahead Template: [Search(What is the population of New York City?)] Query-Answer Pairs: Query: What is the population of New York City? Answer: The population of New York City is 8.4 million Synthesized Response: 8.4 million Previous Response: {prev_response} Lookahead Template: {lookahead_response} Query-Answer Pairs: {query_answer_pairs} Synthesized Response: """ DEFAULT_ANSWER_INSERT_PROMPT = PromptTemplate(DEFAULT_ANSWER_INSERT_PROMPT_TMPL) class LLMLookaheadAnswerInserter(BaseLookaheadAnswerInserter): """LLM Lookahead answer inserter. Takes in a lookahead response and a list of query tasks, and the lookahead answers, and inserts the answers into the lookahead response. """ def __init__( self, llm: Optional[LLM] = None, service_context: Optional[ServiceContext] = None, answer_insert_prompt: Optional[BasePromptTemplate] = None, ) -> None: """Init params.""" self._llm = llm or llm_from_settings_or_context(Settings, service_context) self._answer_insert_prompt = ( answer_insert_prompt or DEFAULT_ANSWER_INSERT_PROMPT ) def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return { "answer_insert_prompt": self._answer_insert_prompt, } def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "answer_insert_prompt" in prompts: self._answer_insert_prompt = prompts["answer_insert_prompt"] def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" prev_response = prev_response or "" query_answer_pairs = "" for query_task, answer in zip(query_tasks, answers): query_answer_pairs += f"Query: {query_task.query_str}\nAnswer: {answer}\n" return self._llm.predict( self._answer_insert_prompt, lookahead_response=response, query_answer_pairs=query_answer_pairs, prev_response=prev_response, ) class DirectLookaheadAnswerInserter(BaseLookaheadAnswerInserter): """Direct lookahead answer inserter. Simple inserter module that directly inserts answers into the [Search(query)] tags in the lookahead response. """ def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" for query_task, answer in zip(query_tasks, answers): response = ( response[: query_task.start_idx] + answer + response[query_task.end_idx + 1 :] ) return response
[ "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.prompts.base.PromptTemplate" ]
[((4287, 4336), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_ANSWER_INSERT_PROMPT_TMPL'], {}), '(DEFAULT_ANSWER_INSERT_PROMPT_TMPL)\n', (4301, 4336), False, 'from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((4860, 4915), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4888, 4915), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n')]
"""Answer inserter.""" from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.core.llms.llm import LLM from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate from llama_index.core.prompts.mixin import ( PromptDictType, PromptMixin, PromptMixinType, ) from llama_index.core.query_engine.flare.schema import QueryTask from llama_index.core.service_context import ServiceContext from llama_index.core.settings import Settings, llm_from_settings_or_context class BaseLookaheadAnswerInserter(PromptMixin): """Lookahead answer inserter. These are responsible for insert answers into a lookahead answer template. E.g. lookahead answer: Red is for [Search(What is the meaning of Ghana's flag being red?)], green for forests, and gold for mineral wealth. query: What is the meaning of Ghana's flag being red? query answer: "the blood of those who died in the country's struggle for independence" final answer: Red is for the blood of those who died in the country's struggle for independence, green for forests, and gold for mineral wealth. """ def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} @abstractmethod def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" DEFAULT_ANSWER_INSERT_PROMPT_TMPL = """ An existing 'lookahead response' is given below. The lookahead response contains `[Search(query)]` tags. Some queries have been executed and the response retrieved. The queries and answers are also given below. Also the previous response (the response before the lookahead response) is given below. Given the lookahead template, previous response, and also queries and answers, please 'fill in' the lookahead template with the appropriate answers. NOTE: Please make sure that the final response grammatically follows the previous response + lookahead template. For example, if the previous response is "New York City has a population of " and the lookahead template is "[Search(What is the population of New York City?)]", then the final response should be "8.4 million". NOTE: the lookahead template may not be a complete sentence and may contain trailing/leading commas, etc. Please preserve the original formatting of the lookahead template if possible. NOTE: NOTE: the exception to the above rule is if the answer to a query is equivalent to "I don't know" or "I don't have an answer". In this case, modify the lookahead template to indicate that the answer is not known. NOTE: the lookahead template may contain multiple `[Search(query)]` tags and only a subset of these queries have been executed. Do not replace the `[Search(query)]` tags that have not been executed. Previous Response: Lookahead Template: Red is for [Search(What is the meaning of Ghana's \ flag being red?)], green for forests, and gold for mineral wealth. Query-Answer Pairs: Query: What is the meaning of Ghana's flag being red? Answer: The red represents the blood of those who died in the country's struggle \ for independence Filled in Answers: Red is for the blood of those who died in the country's struggle for independence, \ green for forests, and gold for mineral wealth. Previous Response: One of the largest cities in the world Lookahead Template: , the city contains a population of [Search(What is the population \ of New York City?)] Query-Answer Pairs: Query: What is the population of New York City? Answer: The population of New York City is 8.4 million Synthesized Response: , the city contains a population of 8.4 million Previous Response: the city contains a population of Lookahead Template: [Search(What is the population of New York City?)] Query-Answer Pairs: Query: What is the population of New York City? Answer: The population of New York City is 8.4 million Synthesized Response: 8.4 million Previous Response: {prev_response} Lookahead Template: {lookahead_response} Query-Answer Pairs: {query_answer_pairs} Synthesized Response: """ DEFAULT_ANSWER_INSERT_PROMPT = PromptTemplate(DEFAULT_ANSWER_INSERT_PROMPT_TMPL) class LLMLookaheadAnswerInserter(BaseLookaheadAnswerInserter): """LLM Lookahead answer inserter. Takes in a lookahead response and a list of query tasks, and the lookahead answers, and inserts the answers into the lookahead response. """ def __init__( self, llm: Optional[LLM] = None, service_context: Optional[ServiceContext] = None, answer_insert_prompt: Optional[BasePromptTemplate] = None, ) -> None: """Init params.""" self._llm = llm or llm_from_settings_or_context(Settings, service_context) self._answer_insert_prompt = ( answer_insert_prompt or DEFAULT_ANSWER_INSERT_PROMPT ) def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return { "answer_insert_prompt": self._answer_insert_prompt, } def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "answer_insert_prompt" in prompts: self._answer_insert_prompt = prompts["answer_insert_prompt"] def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" prev_response = prev_response or "" query_answer_pairs = "" for query_task, answer in zip(query_tasks, answers): query_answer_pairs += f"Query: {query_task.query_str}\nAnswer: {answer}\n" return self._llm.predict( self._answer_insert_prompt, lookahead_response=response, query_answer_pairs=query_answer_pairs, prev_response=prev_response, ) class DirectLookaheadAnswerInserter(BaseLookaheadAnswerInserter): """Direct lookahead answer inserter. Simple inserter module that directly inserts answers into the [Search(query)] tags in the lookahead response. """ def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" for query_task, answer in zip(query_tasks, answers): response = ( response[: query_task.start_idx] + answer + response[query_task.end_idx + 1 :] ) return response
[ "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.prompts.base.PromptTemplate" ]
[((4287, 4336), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_ANSWER_INSERT_PROMPT_TMPL'], {}), '(DEFAULT_ANSWER_INSERT_PROMPT_TMPL)\n', (4301, 4336), False, 'from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((4860, 4915), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4888, 4915), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n')]
"""Answer inserter.""" from abc import abstractmethod from typing import Any, Dict, List, Optional from llama_index.core.llms.llm import LLM from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate from llama_index.core.prompts.mixin import ( PromptDictType, PromptMixin, PromptMixinType, ) from llama_index.core.query_engine.flare.schema import QueryTask from llama_index.core.service_context import ServiceContext from llama_index.core.settings import Settings, llm_from_settings_or_context class BaseLookaheadAnswerInserter(PromptMixin): """Lookahead answer inserter. These are responsible for insert answers into a lookahead answer template. E.g. lookahead answer: Red is for [Search(What is the meaning of Ghana's flag being red?)], green for forests, and gold for mineral wealth. query: What is the meaning of Ghana's flag being red? query answer: "the blood of those who died in the country's struggle for independence" final answer: Red is for the blood of those who died in the country's struggle for independence, green for forests, and gold for mineral wealth. """ def _get_prompt_modules(self) -> PromptMixinType: """Get prompt sub-modules.""" return {} @abstractmethod def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" DEFAULT_ANSWER_INSERT_PROMPT_TMPL = """ An existing 'lookahead response' is given below. The lookahead response contains `[Search(query)]` tags. Some queries have been executed and the response retrieved. The queries and answers are also given below. Also the previous response (the response before the lookahead response) is given below. Given the lookahead template, previous response, and also queries and answers, please 'fill in' the lookahead template with the appropriate answers. NOTE: Please make sure that the final response grammatically follows the previous response + lookahead template. For example, if the previous response is "New York City has a population of " and the lookahead template is "[Search(What is the population of New York City?)]", then the final response should be "8.4 million". NOTE: the lookahead template may not be a complete sentence and may contain trailing/leading commas, etc. Please preserve the original formatting of the lookahead template if possible. NOTE: NOTE: the exception to the above rule is if the answer to a query is equivalent to "I don't know" or "I don't have an answer". In this case, modify the lookahead template to indicate that the answer is not known. NOTE: the lookahead template may contain multiple `[Search(query)]` tags and only a subset of these queries have been executed. Do not replace the `[Search(query)]` tags that have not been executed. Previous Response: Lookahead Template: Red is for [Search(What is the meaning of Ghana's \ flag being red?)], green for forests, and gold for mineral wealth. Query-Answer Pairs: Query: What is the meaning of Ghana's flag being red? Answer: The red represents the blood of those who died in the country's struggle \ for independence Filled in Answers: Red is for the blood of those who died in the country's struggle for independence, \ green for forests, and gold for mineral wealth. Previous Response: One of the largest cities in the world Lookahead Template: , the city contains a population of [Search(What is the population \ of New York City?)] Query-Answer Pairs: Query: What is the population of New York City? Answer: The population of New York City is 8.4 million Synthesized Response: , the city contains a population of 8.4 million Previous Response: the city contains a population of Lookahead Template: [Search(What is the population of New York City?)] Query-Answer Pairs: Query: What is the population of New York City? Answer: The population of New York City is 8.4 million Synthesized Response: 8.4 million Previous Response: {prev_response} Lookahead Template: {lookahead_response} Query-Answer Pairs: {query_answer_pairs} Synthesized Response: """ DEFAULT_ANSWER_INSERT_PROMPT = PromptTemplate(DEFAULT_ANSWER_INSERT_PROMPT_TMPL) class LLMLookaheadAnswerInserter(BaseLookaheadAnswerInserter): """LLM Lookahead answer inserter. Takes in a lookahead response and a list of query tasks, and the lookahead answers, and inserts the answers into the lookahead response. """ def __init__( self, llm: Optional[LLM] = None, service_context: Optional[ServiceContext] = None, answer_insert_prompt: Optional[BasePromptTemplate] = None, ) -> None: """Init params.""" self._llm = llm or llm_from_settings_or_context(Settings, service_context) self._answer_insert_prompt = ( answer_insert_prompt or DEFAULT_ANSWER_INSERT_PROMPT ) def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return { "answer_insert_prompt": self._answer_insert_prompt, } def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" if "answer_insert_prompt" in prompts: self._answer_insert_prompt = prompts["answer_insert_prompt"] def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" prev_response = prev_response or "" query_answer_pairs = "" for query_task, answer in zip(query_tasks, answers): query_answer_pairs += f"Query: {query_task.query_str}\nAnswer: {answer}\n" return self._llm.predict( self._answer_insert_prompt, lookahead_response=response, query_answer_pairs=query_answer_pairs, prev_response=prev_response, ) class DirectLookaheadAnswerInserter(BaseLookaheadAnswerInserter): """Direct lookahead answer inserter. Simple inserter module that directly inserts answers into the [Search(query)] tags in the lookahead response. """ def _get_prompts(self) -> Dict[str, Any]: """Get prompts.""" return {} def _update_prompts(self, prompts: PromptDictType) -> None: """Update prompts.""" def insert( self, response: str, query_tasks: List[QueryTask], answers: List[str], prev_response: Optional[str] = None, ) -> str: """Insert answers into response.""" for query_task, answer in zip(query_tasks, answers): response = ( response[: query_task.start_idx] + answer + response[query_task.end_idx + 1 :] ) return response
[ "llama_index.core.settings.llm_from_settings_or_context", "llama_index.core.prompts.base.PromptTemplate" ]
[((4287, 4336), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['DEFAULT_ANSWER_INSERT_PROMPT_TMPL'], {}), '(DEFAULT_ANSWER_INSERT_PROMPT_TMPL)\n', (4301, 4336), False, 'from llama_index.core.prompts.base import BasePromptTemplate, PromptTemplate\n'), ((4860, 4915), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (4888, 4915), False, 'from llama_index.core.settings import Settings, llm_from_settings_or_context\n')]
"""Retrieval evaluators.""" from typing import Any, List, Optional, Sequence, Tuple from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.evaluation.retrieval.base import ( BaseRetrievalEvaluator, RetrievalEvalMode, ) from llama_index.legacy.evaluation.retrieval.metrics_base import ( BaseRetrievalMetric, ) from llama_index.legacy.indices.base_retriever import BaseRetriever from llama_index.legacy.postprocessor.types import BaseNodePostprocessor from llama_index.legacy.schema import ImageNode, TextNode class RetrieverEvaluator(BaseRetrievalEvaluator): """Retriever evaluator. This module will evaluate a retriever using a set of metrics. Args: metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate retriever: Retriever to evaluate. node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval. """ retriever: BaseRetriever = Field(..., description="Retriever to evaluate") node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field( default=None, description="Optional post-processor" ) def __init__( self, metrics: Sequence[BaseRetrievalMetric], retriever: BaseRetriever, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, **kwargs: Any, ) -> None: """Init params.""" super().__init__( metrics=metrics, retriever=retriever, node_postprocessors=node_postprocessors, **kwargs, ) async def _aget_retrieved_ids_and_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids and texts, potentially applying a post-processor.""" retrieved_nodes = await self.retriever.aretrieve(query) if self.node_postprocessors: for node_postprocessor in self.node_postprocessors: retrieved_nodes = node_postprocessor.postprocess_nodes( retrieved_nodes, query_str=query ) return ( [node.node.node_id for node in retrieved_nodes], [node.node.text for node in retrieved_nodes], ) class MultiModalRetrieverEvaluator(BaseRetrievalEvaluator): """Retriever evaluator. This module will evaluate a retriever using a set of metrics. Args: metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate retriever: Retriever to evaluate. node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval. """ retriever: BaseRetriever = Field(..., description="Retriever to evaluate") node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field( default=None, description="Optional post-processor" ) def __init__( self, metrics: Sequence[BaseRetrievalMetric], retriever: BaseRetriever, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, **kwargs: Any, ) -> None: """Init params.""" super().__init__( metrics=metrics, retriever=retriever, node_postprocessors=node_postprocessors, **kwargs, ) async def _aget_retrieved_ids_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids.""" retrieved_nodes = await self.retriever.aretrieve(query) image_nodes: List[ImageNode] = [] text_nodes: List[TextNode] = [] if self.node_postprocessors: for node_postprocessor in self.node_postprocessors: retrieved_nodes = node_postprocessor.postprocess_nodes( retrieved_nodes, query_str=query ) for scored_node in retrieved_nodes: node = scored_node.node if isinstance(node, ImageNode): image_nodes.append(node) if node.text: text_nodes.append(node) if mode == "text": return ( [node.node_id for node in text_nodes], [node.text for node in text_nodes], ) elif mode == "image": return ( [node.node_id for node in image_nodes], [node.text for node in image_nodes], ) else: raise ValueError("Unsupported mode.")
[ "llama_index.legacy.bridge.pydantic.Field" ]
[((1038, 1085), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (1043, 1085), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1151, 1209), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (1156, 1209), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2787, 2834), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (2792, 2834), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2900, 2958), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (2905, 2958), False, 'from llama_index.legacy.bridge.pydantic import Field\n')]
"""Retrieval evaluators.""" from typing import Any, List, Optional, Sequence, Tuple from llama_index.legacy.bridge.pydantic import Field from llama_index.legacy.core.base_retriever import BaseRetriever from llama_index.legacy.evaluation.retrieval.base import ( BaseRetrievalEvaluator, RetrievalEvalMode, ) from llama_index.legacy.evaluation.retrieval.metrics_base import ( BaseRetrievalMetric, ) from llama_index.legacy.indices.base_retriever import BaseRetriever from llama_index.legacy.postprocessor.types import BaseNodePostprocessor from llama_index.legacy.schema import ImageNode, TextNode class RetrieverEvaluator(BaseRetrievalEvaluator): """Retriever evaluator. This module will evaluate a retriever using a set of metrics. Args: metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate retriever: Retriever to evaluate. node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval. """ retriever: BaseRetriever = Field(..., description="Retriever to evaluate") node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field( default=None, description="Optional post-processor" ) def __init__( self, metrics: Sequence[BaseRetrievalMetric], retriever: BaseRetriever, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, **kwargs: Any, ) -> None: """Init params.""" super().__init__( metrics=metrics, retriever=retriever, node_postprocessors=node_postprocessors, **kwargs, ) async def _aget_retrieved_ids_and_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids and texts, potentially applying a post-processor.""" retrieved_nodes = await self.retriever.aretrieve(query) if self.node_postprocessors: for node_postprocessor in self.node_postprocessors: retrieved_nodes = node_postprocessor.postprocess_nodes( retrieved_nodes, query_str=query ) return ( [node.node.node_id for node in retrieved_nodes], [node.node.text for node in retrieved_nodes], ) class MultiModalRetrieverEvaluator(BaseRetrievalEvaluator): """Retriever evaluator. This module will evaluate a retriever using a set of metrics. Args: metrics (List[BaseRetrievalMetric]): Sequence of metrics to evaluate retriever: Retriever to evaluate. node_postprocessors (Optional[List[BaseNodePostprocessor]]): Post-processor to apply after retrieval. """ retriever: BaseRetriever = Field(..., description="Retriever to evaluate") node_postprocessors: Optional[List[BaseNodePostprocessor]] = Field( default=None, description="Optional post-processor" ) def __init__( self, metrics: Sequence[BaseRetrievalMetric], retriever: BaseRetriever, node_postprocessors: Optional[List[BaseNodePostprocessor]] = None, **kwargs: Any, ) -> None: """Init params.""" super().__init__( metrics=metrics, retriever=retriever, node_postprocessors=node_postprocessors, **kwargs, ) async def _aget_retrieved_ids_texts( self, query: str, mode: RetrievalEvalMode = RetrievalEvalMode.TEXT ) -> Tuple[List[str], List[str]]: """Get retrieved ids.""" retrieved_nodes = await self.retriever.aretrieve(query) image_nodes: List[ImageNode] = [] text_nodes: List[TextNode] = [] if self.node_postprocessors: for node_postprocessor in self.node_postprocessors: retrieved_nodes = node_postprocessor.postprocess_nodes( retrieved_nodes, query_str=query ) for scored_node in retrieved_nodes: node = scored_node.node if isinstance(node, ImageNode): image_nodes.append(node) if node.text: text_nodes.append(node) if mode == "text": return ( [node.node_id for node in text_nodes], [node.text for node in text_nodes], ) elif mode == "image": return ( [node.node_id for node in image_nodes], [node.text for node in image_nodes], ) else: raise ValueError("Unsupported mode.")
[ "llama_index.legacy.bridge.pydantic.Field" ]
[((1038, 1085), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (1043, 1085), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1151, 1209), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (1156, 1209), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2787, 2834), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever to evaluate"""'}), "(..., description='Retriever to evaluate')\n", (2792, 2834), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2900, 2958), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Optional post-processor"""'}), "(default=None, description='Optional post-processor')\n", (2905, 2958), False, 'from llama_index.legacy.bridge.pydantic import Field\n')]
from typing import Any, List, Optional from llama_index.legacy.bridge.pydantic import Field, PrivateAttr from llama_index.legacy.callbacks import CallbackManager from llama_index.legacy.core.embeddings.base import ( DEFAULT_EMBED_BATCH_SIZE, BaseEmbedding, ) from llama_index.legacy.embeddings.huggingface_utils import ( format_query, format_text, get_pooling_mode, ) from llama_index.legacy.embeddings.pooling import Pooling from llama_index.legacy.utils import infer_torch_device class OptimumEmbedding(BaseEmbedding): folder_name: str = Field(description="Folder name to load from.") max_length: int = Field(description="Maximum length of input.") pooling: str = Field(description="Pooling strategy. One of ['cls', 'mean'].") normalize: str = Field(default=True, description="Normalize embeddings or not.") query_instruction: Optional[str] = Field( description="Instruction to prepend to query text." ) text_instruction: Optional[str] = Field( description="Instruction to prepend to text." ) cache_folder: Optional[str] = Field( description="Cache folder for huggingface files." ) _model: Any = PrivateAttr() _tokenizer: Any = PrivateAttr() _device: Any = PrivateAttr() def __init__( self, folder_name: str, pooling: Optional[str] = None, max_length: Optional[int] = None, normalize: bool = True, query_instruction: Optional[str] = None, text_instruction: Optional[str] = None, model: Optional[Any] = None, tokenizer: Optional[Any] = None, embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE, callback_manager: Optional[CallbackManager] = None, device: Optional[str] = None, ): try: from optimum.onnxruntime import ORTModelForFeatureExtraction from transformers import AutoTokenizer except ImportError: raise ImportError( "OptimumEmbedding requires transformers to be installed.\n" "Please install transformers with " "`pip install transformers optimum[exporters]`." ) self._model = model or ORTModelForFeatureExtraction.from_pretrained(folder_name) self._tokenizer = tokenizer or AutoTokenizer.from_pretrained(folder_name) self._device = device or infer_torch_device() if max_length is None: try: max_length = int(self._model.config.max_position_embeddings) except Exception: raise ValueError( "Unable to find max_length from model config. " "Please provide max_length." ) if not pooling: pooling = get_pooling_mode(model) try: pooling = Pooling(pooling) except ValueError as exc: raise NotImplementedError( f"Pooling {pooling} unsupported, please pick one in" f" {[p.value for p in Pooling]}." ) from exc super().__init__( embed_batch_size=embed_batch_size, callback_manager=callback_manager, folder_name=folder_name, max_length=max_length, pooling=pooling, normalize=normalize, query_instruction=query_instruction, text_instruction=text_instruction, ) @classmethod def class_name(cls) -> str: return "OptimumEmbedding" @classmethod def create_and_save_optimum_model( cls, model_name_or_path: str, output_path: str, export_kwargs: Optional[dict] = None, ) -> None: try: from optimum.onnxruntime import ORTModelForFeatureExtraction from transformers import AutoTokenizer except ImportError: raise ImportError( "OptimumEmbedding requires transformers to be installed.\n" "Please install transformers with " "`pip install transformers optimum[exporters]`." ) export_kwargs = export_kwargs or {} model = ORTModelForFeatureExtraction.from_pretrained( model_name_or_path, export=True, **export_kwargs ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) model.save_pretrained(output_path) tokenizer.save_pretrained(output_path) print( f"Saved optimum model to {output_path}. Use it with " f"`embed_model = OptimumEmbedding(folder_name='{output_path}')`." ) def _mean_pooling(self, model_output: Any, attention_mask: Any) -> Any: """Mean Pooling - Take attention mask into account for correct averaging.""" import torch # First element of model_output contains all token embeddings token_embeddings = model_output[0] input_mask_expanded = ( attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() ) return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp( input_mask_expanded.sum(1), min=1e-9 ) def _cls_pooling(self, model_output: list) -> Any: """Use the CLS token as the pooling token.""" return model_output[0][:, 0] def _embed(self, sentences: List[str]) -> List[List[float]]: """Embed sentences.""" encoded_input = self._tokenizer( sentences, padding=True, max_length=self.max_length, truncation=True, return_tensors="pt", ) # pop token_type_ids encoded_input.pop("token_type_ids", None) model_output = self._model(**encoded_input) if self.pooling == "cls": embeddings = self._cls_pooling(model_output) else: embeddings = self._mean_pooling( model_output, encoded_input["attention_mask"].to(self._device) ) if self.normalize: import torch embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1) return embeddings.tolist() def _get_query_embedding(self, query: str) -> List[float]: """Get query embedding.""" query = format_query(query, self.model_name, self.query_instruction) return self._embed([query])[0] async def _aget_query_embedding(self, query: str) -> List[float]: """Get query embedding async.""" return self._get_query_embedding(query) async def _aget_text_embedding(self, text: str) -> List[float]: """Get text embedding async.""" return self._get_text_embedding(text) def _get_text_embedding(self, text: str) -> List[float]: """Get text embedding.""" text = format_text(text, self.model_name, self.text_instruction) return self._embed([text])[0] def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]: """Get text embeddings.""" texts = [ format_text(text, self.model_name, self.text_instruction) for text in texts ] return self._embed(texts)
[ "llama_index.legacy.bridge.pydantic.Field", "llama_index.legacy.embeddings.huggingface_utils.format_text", "llama_index.legacy.bridge.pydantic.PrivateAttr", "llama_index.legacy.embeddings.huggingface_utils.get_pooling_mode", "llama_index.legacy.embeddings.pooling.Pooling", "llama_index.legacy.embeddings.huggingface_utils.format_query", "llama_index.legacy.utils.infer_torch_device" ]
[((567, 613), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Folder name to load from."""'}), "(description='Folder name to load from.')\n", (572, 613), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((636, 681), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Maximum length of input."""'}), "(description='Maximum length of input.')\n", (641, 681), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((701, 763), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Pooling strategy. One of [\'cls\', \'mean\']."""'}), '(description="Pooling strategy. One of [\'cls\', \'mean\'].")\n', (706, 763), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((785, 848), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Normalize embeddings or not."""'}), "(default=True, description='Normalize embeddings or not.')\n", (790, 848), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((888, 946), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to query text."""'}), "(description='Instruction to prepend to query text.')\n", (893, 946), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((999, 1051), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Instruction to prepend to text."""'}), "(description='Instruction to prepend to text.')\n", (1004, 1051), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1100, 1156), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Cache folder for huggingface files."""'}), "(description='Cache folder for huggingface files.')\n", (1105, 1156), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1190, 1203), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1201, 1203), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1226, 1239), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1237, 1239), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1259, 1272), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1270, 1272), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4174, 4273), 'optimum.onnxruntime.ORTModelForFeatureExtraction.from_pretrained', 'ORTModelForFeatureExtraction.from_pretrained', (['model_name_or_path'], {'export': '(True)'}), '(model_name_or_path, export=\n True, **export_kwargs)\n', (4218, 4273), False, 'from optimum.onnxruntime import ORTModelForFeatureExtraction\n'), ((4311, 4360), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['model_name_or_path'], {}), '(model_name_or_path)\n', (4340, 4360), False, 'from transformers import AutoTokenizer\n'), ((6290, 6350), 'llama_index.legacy.embeddings.huggingface_utils.format_query', 'format_query', (['query', 'self.model_name', 'self.query_instruction'], {}), '(query, self.model_name, self.query_instruction)\n', (6302, 6350), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((6816, 6873), 'llama_index.legacy.embeddings.huggingface_utils.format_text', 'format_text', (['text', 'self.model_name', 'self.text_instruction'], {}), '(text, self.model_name, self.text_instruction)\n', (6827, 6873), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((2218, 2275), 'optimum.onnxruntime.ORTModelForFeatureExtraction.from_pretrained', 'ORTModelForFeatureExtraction.from_pretrained', (['folder_name'], {}), '(folder_name)\n', (2262, 2275), False, 'from optimum.onnxruntime import ORTModelForFeatureExtraction\n'), ((2315, 2357), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['folder_name'], {}), '(folder_name)\n', (2344, 2357), False, 'from transformers import AutoTokenizer\n'), ((2391, 2411), 'llama_index.legacy.utils.infer_torch_device', 'infer_torch_device', ([], {}), '()\n', (2409, 2411), False, 'from llama_index.legacy.utils import infer_torch_device\n'), ((2784, 2807), 'llama_index.legacy.embeddings.huggingface_utils.get_pooling_mode', 'get_pooling_mode', (['model'], {}), '(model)\n', (2800, 2807), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n'), ((2843, 2859), 'llama_index.legacy.embeddings.pooling.Pooling', 'Pooling', (['pooling'], {}), '(pooling)\n', (2850, 2859), False, 'from llama_index.legacy.embeddings.pooling import Pooling\n'), ((5056, 5108), 'torch.sum', 'torch.sum', (['(token_embeddings * input_mask_expanded)', '(1)'], {}), '(token_embeddings * input_mask_expanded, 1)\n', (5065, 5108), False, 'import torch\n'), ((6085, 6138), 'torch.nn.functional.normalize', 'torch.nn.functional.normalize', (['embeddings'], {'p': '(2)', 'dim': '(1)'}), '(embeddings, p=2, dim=1)\n', (6114, 6138), False, 'import torch\n'), ((7053, 7110), 'llama_index.legacy.embeddings.huggingface_utils.format_text', 'format_text', (['text', 'self.model_name', 'self.text_instruction'], {}), '(text, self.model_name, self.text_instruction)\n', (7064, 7110), False, 'from llama_index.legacy.embeddings.huggingface_utils import format_query, format_text, get_pooling_mode\n')]