code
stringlengths 161
233k
| apis
sequencelengths 1
24
| extract_api
stringlengths 162
68.5k
|
---|---|---|
"""FastAPI app creation, logger configuration and main API routes."""
import logging
from fastapi import Depends, FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from injector import Injector
from llama_index.core.callbacks import CallbackManager
from llama_index.core.callbacks.global_handlers import create_global_handler
from llama_index.core.settings import Settings as LlamaIndexSettings
from private_gpt.server.chat.chat_router import chat_router
from private_gpt.server.chunks.chunks_router import chunks_router
from private_gpt.server.completions.completions_router import completions_router
from private_gpt.server.embeddings.embeddings_router import embeddings_router
from private_gpt.server.health.health_router import health_router
from private_gpt.server.ingest.ingest_router import ingest_router
from private_gpt.settings.settings import Settings
logger = logging.getLogger(__name__)
def create_app(root_injector: Injector) -> FastAPI:
# Start the API
async def bind_injector_to_request(request: Request) -> None:
request.state.injector = root_injector
app = FastAPI(dependencies=[Depends(bind_injector_to_request)])
app.include_router(completions_router)
app.include_router(chat_router)
app.include_router(chunks_router)
app.include_router(ingest_router)
app.include_router(embeddings_router)
app.include_router(health_router)
# Add LlamaIndex simple observability
global_handler = create_global_handler("simple")
LlamaIndexSettings.callback_manager = CallbackManager([global_handler])
settings = root_injector.get(Settings)
if settings.server.cors.enabled:
logger.debug("Setting up CORS middleware")
app.add_middleware(
CORSMiddleware,
allow_credentials=settings.server.cors.allow_credentials,
allow_origins=settings.server.cors.allow_origins,
allow_origin_regex=settings.server.cors.allow_origin_regex,
allow_methods=settings.server.cors.allow_methods,
allow_headers=settings.server.cors.allow_headers,
)
if settings.ui.enabled:
logger.debug("Importing the UI module")
try:
from private_gpt.ui.ui import PrivateGptUi
except ImportError as e:
raise ImportError(
"UI dependencies not found, install with `poetry install --extras ui`"
) from e
ui = root_injector.get(PrivateGptUi)
ui.mount_in_app(app, settings.ui.path)
return app
| [
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.callbacks.global_handlers.create_global_handler"
] | [((894, 921), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (911, 921), False, 'import logging\n'), ((1479, 1510), 'llama_index.core.callbacks.global_handlers.create_global_handler', 'create_global_handler', (['"""simple"""'], {}), "('simple')\n", (1500, 1510), False, 'from llama_index.core.callbacks.global_handlers import create_global_handler\n'), ((1553, 1586), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[global_handler]'], {}), '([global_handler])\n', (1568, 1586), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((1143, 1176), 'fastapi.Depends', 'Depends', (['bind_injector_to_request'], {}), '(bind_injector_to_request)\n', (1150, 1176), False, 'from fastapi import Depends, FastAPI, Request\n')] |
import json
import os
from typing import Dict, List, Optional, Type
from loguru import logger
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentChunkWithScore,
DocumentMetadataFilter,
Query,
QueryResult,
QueryWithEmbedding,
)
from llama_index.indices.base import BaseGPTIndex
from llama_index.indices.vector_store.base import GPTVectorStoreIndex
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import Response
from llama_index.data_structs.node_v2 import Node, DocumentRelationship, NodeWithScore
from llama_index.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.data_structs.struct_type import IndexStructType
from llama_index.indices.response.builder import ResponseMode
INDEX_STRUCT_TYPE_STR = os.environ.get(
"LLAMA_INDEX_TYPE", IndexStructType.SIMPLE_DICT.value
)
INDEX_JSON_PATH = os.environ.get("LLAMA_INDEX_JSON_PATH", None)
QUERY_KWARGS_JSON_PATH = os.environ.get("LLAMA_QUERY_KWARGS_JSON_PATH", None)
RESPONSE_MODE = os.environ.get("LLAMA_RESPONSE_MODE", ResponseMode.NO_TEXT.value)
EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES = [
IndexStructType.DICT,
IndexStructType.WEAVIATE,
IndexStructType.PINECONE,
IndexStructType.QDRANT,
IndexStructType.CHROMA,
IndexStructType.VECTOR_STORE,
]
def _create_or_load_index(
index_type_str: Optional[str] = None,
index_json_path: Optional[str] = None,
index_type_to_index_cls: Optional[dict[str, Type[BaseGPTIndex]]] = None,
) -> BaseGPTIndex:
"""Create or load index from json path."""
index_json_path = index_json_path or INDEX_JSON_PATH
index_type_to_index_cls = (
index_type_to_index_cls or INDEX_STRUCT_TYPE_TO_INDEX_CLASS
)
index_type_str = index_type_str or INDEX_STRUCT_TYPE_STR
index_type = IndexStructType(index_type_str)
if index_type not in index_type_to_index_cls:
raise ValueError(f"Unknown index type: {index_type}")
if index_type in EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES:
raise ValueError("Please use vector store directly.")
index_cls = index_type_to_index_cls[index_type]
if index_json_path is None:
return index_cls(nodes=[]) # Create empty index
else:
return index_cls.load_from_disk(index_json_path) # Load index from disk
def _create_or_load_query_kwargs(
query_kwargs_json_path: Optional[str] = None,
) -> Optional[dict]:
"""Create or load query kwargs from json path."""
query_kwargs_json_path = query_kwargs_json_path or QUERY_KWARGS_JSON_PATH
query_kargs: Optional[dict] = None
if query_kwargs_json_path is not None:
with open(INDEX_JSON_PATH, "r") as f:
query_kargs = json.load(f)
return query_kargs
def _doc_chunk_to_node(doc_chunk: DocumentChunk, source_doc_id: str) -> Node:
"""Convert document chunk to Node"""
return Node(
doc_id=doc_chunk.id,
text=doc_chunk.text,
embedding=doc_chunk.embedding,
extra_info=doc_chunk.metadata.dict(),
relationships={DocumentRelationship.SOURCE: source_doc_id},
)
def _query_with_embedding_to_query_bundle(query: QueryWithEmbedding) -> QueryBundle:
return QueryBundle(
query_str=query.query,
embedding=query.embedding,
)
def _source_node_to_doc_chunk_with_score(
node_with_score: NodeWithScore,
) -> DocumentChunkWithScore:
node = node_with_score.node
if node.extra_info is not None:
metadata = DocumentChunkMetadata(**node.extra_info)
else:
metadata = DocumentChunkMetadata()
return DocumentChunkWithScore(
id=node.doc_id,
text=node.text,
score=node_with_score.score if node_with_score.score is not None else 1.0,
metadata=metadata,
)
def _response_to_query_result(
response: Response, query: QueryWithEmbedding
) -> QueryResult:
results = [
_source_node_to_doc_chunk_with_score(node) for node in response.source_nodes
]
return QueryResult(
query=query.query,
results=results,
)
class LlamaDataStore(DataStore):
def __init__(
self, index: Optional[BaseGPTIndex] = None, query_kwargs: Optional[dict] = None
):
self._index = index or _create_or_load_index()
self._query_kwargs = query_kwargs or _create_or_load_query_kwargs()
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of list of document chunks and inserts them into the database.
Return a list of document ids.
"""
doc_ids = []
for doc_id, doc_chunks in chunks.items():
logger.debug(f"Upserting {doc_id} with {len(doc_chunks)} chunks")
nodes = [
_doc_chunk_to_node(doc_chunk=doc_chunk, source_doc_id=doc_id)
for doc_chunk in doc_chunks
]
self._index.insert_nodes(nodes)
doc_ids.append(doc_id)
return doc_ids
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and
returns a list of query results with matching document chunks and scores.
"""
query_result_all = []
for query in queries:
if query.filter is not None:
logger.warning("Filters are not supported yet, ignoring for now.")
query_bundle = _query_with_embedding_to_query_bundle(query)
# Setup query kwargs
if self._query_kwargs is not None:
query_kwargs = self._query_kwargs
else:
query_kwargs = {}
# TODO: support top_k for other indices
if isinstance(self._index, GPTVectorStoreIndex):
query_kwargs["similarity_top_k"] = query.top_k
response = await self._index.aquery(
query_bundle, response_mode=RESPONSE_MODE, **query_kwargs
)
query_result = _response_to_query_result(response, query)
query_result_all.append(query_result)
return query_result_all
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Returns whether the operation was successful.
"""
if delete_all:
logger.warning("Delete all not supported yet.")
return False
if filter is not None:
logger.warning("Filters are not supported yet.")
return False
if ids is not None:
for id_ in ids:
try:
self._index.delete(id_)
except NotImplementedError:
# NOTE: some indices does not support delete yet.
logger.warning(f"{type(self._index)} does not support delete yet.")
return False
return True
| [
"llama_index.data_structs.struct_type.IndexStructType",
"llama_index.indices.query.schema.QueryBundle"
] | [((860, 929), 'os.environ.get', 'os.environ.get', (['"""LLAMA_INDEX_TYPE"""', 'IndexStructType.SIMPLE_DICT.value'], {}), "('LLAMA_INDEX_TYPE', IndexStructType.SIMPLE_DICT.value)\n", (874, 929), False, 'import os\n'), ((954, 999), 'os.environ.get', 'os.environ.get', (['"""LLAMA_INDEX_JSON_PATH"""', 'None'], {}), "('LLAMA_INDEX_JSON_PATH', None)\n", (968, 999), False, 'import os\n'), ((1025, 1077), 'os.environ.get', 'os.environ.get', (['"""LLAMA_QUERY_KWARGS_JSON_PATH"""', 'None'], {}), "('LLAMA_QUERY_KWARGS_JSON_PATH', None)\n", (1039, 1077), False, 'import os\n'), ((1094, 1159), 'os.environ.get', 'os.environ.get', (['"""LLAMA_RESPONSE_MODE"""', 'ResponseMode.NO_TEXT.value'], {}), "('LLAMA_RESPONSE_MODE', ResponseMode.NO_TEXT.value)\n", (1108, 1159), False, 'import os\n'), ((1882, 1913), 'llama_index.data_structs.struct_type.IndexStructType', 'IndexStructType', (['index_type_str'], {}), '(index_type_str)\n', (1897, 1913), False, 'from llama_index.data_structs.struct_type import IndexStructType\n'), ((3268, 3329), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query.query', 'embedding': 'query.embedding'}), '(query_str=query.query, embedding=query.embedding)\n', (3279, 3329), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((3655, 3812), 'models.models.DocumentChunkWithScore', 'DocumentChunkWithScore', ([], {'id': 'node.doc_id', 'text': 'node.text', 'score': '(node_with_score.score if node_with_score.score is not None else 1.0)', 'metadata': 'metadata'}), '(id=node.doc_id, text=node.text, score=\n node_with_score.score if node_with_score.score is not None else 1.0,\n metadata=metadata)\n', (3677, 3812), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((4062, 4109), 'models.models.QueryResult', 'QueryResult', ([], {'query': 'query.query', 'results': 'results'}), '(query=query.query, results=results)\n', (4073, 4109), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((3549, 3589), 'models.models.DocumentChunkMetadata', 'DocumentChunkMetadata', ([], {}), '(**node.extra_info)\n', (3570, 3589), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((3619, 3642), 'models.models.DocumentChunkMetadata', 'DocumentChunkMetadata', ([], {}), '()\n', (3640, 3642), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((2779, 2791), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2788, 2791), False, 'import json\n'), ((6623, 6670), 'loguru.logger.warning', 'logger.warning', (['"""Delete all not supported yet."""'], {}), "('Delete all not supported yet.')\n", (6637, 6670), False, 'from loguru import logger\n'), ((6740, 6788), 'loguru.logger.warning', 'logger.warning', (['"""Filters are not supported yet."""'], {}), "('Filters are not supported yet.')\n", (6754, 6788), False, 'from loguru import logger\n'), ((5454, 5520), 'loguru.logger.warning', 'logger.warning', (['"""Filters are not supported yet, ignoring for now."""'], {}), "('Filters are not supported yet, ignoring for now.')\n", (5468, 5520), False, 'from loguru import logger\n')] |
import os
import weaviate
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores import WeaviateVectorStore
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.callbacks.base import CallbackManager
from llama_index import (
LLMPredictor,
ServiceContext,
StorageContext,
VectorStoreIndex,
)
import chainlit as cl
from llama_index.llms import LocalAI
from llama_index.embeddings import HuggingFaceEmbedding
import yaml
# Load the configuration file
with open("config.yaml", "r") as ymlfile:
cfg = yaml.safe_load(ymlfile)
# Get the values from the configuration file or set the default values
temperature = cfg['localAI'].get('temperature', 0)
model_name = cfg['localAI'].get('modelName', "gpt-3.5-turbo")
api_base = cfg['localAI'].get('apiBase', "http://local-ai.default")
api_key = cfg['localAI'].get('apiKey', "stub")
streaming = cfg['localAI'].get('streaming', True)
weaviate_url = cfg['weviate'].get('url', "http://weviate.default")
index_name = cfg['weviate'].get('index', "AIChroma")
query_mode = cfg['query'].get('mode', "hybrid")
topK = cfg['query'].get('topK', 1)
alpha = cfg['query'].get('alpha', 0.0)
embed_model_name = cfg['embedding'].get('model', "BAAI/bge-small-en-v1.5")
chunk_size = cfg['query'].get('chunkSize', 1024)
embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
llm = LocalAI(temperature=temperature, model_name=model_name, api_base=api_base, api_key=api_key, streaming=streaming)
llm.globally_use_chat_completions = True;
client = weaviate.Client(weaviate_url)
vector_store = WeaviateVectorStore(weaviate_client=client, index_name=index_name)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
@cl.on_chat_start
async def factory():
llm_predictor = LLMPredictor(
llm=llm
)
service_context = ServiceContext.from_defaults(embed_model=embed_model, callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]), llm_predictor=llm_predictor, chunk_size=chunk_size)
index = VectorStoreIndex.from_vector_store(
vector_store,
storage_context=storage_context,
service_context=service_context
)
query_engine = index.as_query_engine(vector_store_query_mode=query_mode, similarity_top_k=topK, alpha=alpha, streaming=True)
cl.user_session.set("query_engine", query_engine)
@cl.on_message
async def main(message: cl.Message):
query_engine = cl.user_session.get("query_engine")
response = await cl.make_async(query_engine.query)(message.content)
response_message = cl.Message(content="")
for token in response.response_gen:
await response_message.stream_token(token=token)
if response.response_txt:
response_message.content = response.response_txt
await response_message.send()
| [
"llama_index.LLMPredictor",
"llama_index.StorageContext.from_defaults",
"llama_index.vector_stores.WeaviateVectorStore",
"llama_index.llms.LocalAI",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((1360, 1409), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embed_model_name'}), '(model_name=embed_model_name)\n', (1380, 1409), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1418, 1534), 'llama_index.llms.LocalAI', 'LocalAI', ([], {'temperature': 'temperature', 'model_name': 'model_name', 'api_base': 'api_base', 'api_key': 'api_key', 'streaming': 'streaming'}), '(temperature=temperature, model_name=model_name, api_base=api_base,\n api_key=api_key, streaming=streaming)\n', (1425, 1534), False, 'from llama_index.llms import LocalAI\n'), ((1582, 1611), 'weaviate.Client', 'weaviate.Client', (['weaviate_url'], {}), '(weaviate_url)\n', (1597, 1611), False, 'import weaviate\n'), ((1627, 1693), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'client', 'index_name': 'index_name'}), '(weaviate_client=client, index_name=index_name)\n', (1646, 1693), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((1712, 1767), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1740, 1767), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex\n'), ((604, 627), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (618, 627), False, 'import yaml\n'), ((1829, 1850), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1841, 1850), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex\n'), ((2079, 2198), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(vector_store, storage_context=\n storage_context, service_context=service_context)\n', (2113, 2198), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex\n'), ((2359, 2408), 'chainlit.user_session.set', 'cl.user_session.set', (['"""query_engine"""', 'query_engine'], {}), "('query_engine', query_engine)\n", (2378, 2408), True, 'import chainlit as cl\n'), ((2482, 2517), 'chainlit.user_session.get', 'cl.user_session.get', (['"""query_engine"""'], {}), "('query_engine')\n", (2501, 2517), True, 'import chainlit as cl\n'), ((2614, 2636), 'chainlit.Message', 'cl.Message', ([], {'content': '""""""'}), "(content='')\n", (2624, 2636), True, 'import chainlit as cl\n'), ((2539, 2572), 'chainlit.make_async', 'cl.make_async', (['query_engine.query'], {}), '(query_engine.query)\n', (2552, 2572), True, 'import chainlit as cl\n'), ((1980, 2010), 'chainlit.LlamaIndexCallbackHandler', 'cl.LlamaIndexCallbackHandler', ([], {}), '()\n', (2008, 2010), True, 'import chainlit as cl\n')] |
import typer
import uuid
from typing import Optional, List, Any
import os
import numpy as np
from memgpt.utils import is_valid_url, printd
from memgpt.data_types import EmbeddingConfig
from memgpt.credentials import MemGPTCredentials
from memgpt.constants import MAX_EMBEDDING_DIM, EMBEDDING_TO_TOKENIZER_MAP, EMBEDDING_TO_TOKENIZER_DEFAULT
# from llama_index.core.base.embeddings import BaseEmbedding
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import Document as LlamaIndexDocument
# from llama_index.core.base.embeddings import BaseEmbedding
# from llama_index.core.embeddings import BaseEmbedding
# from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.bridge.pydantic import PrivateAttr
# from llama_index.embeddings.base import BaseEmbedding
# from llama_index.embeddings.huggingface_utils import format_text
import tiktoken
def parse_and_chunk_text(text: str, chunk_size: int) -> List[str]:
parser = SentenceSplitter(chunk_size=chunk_size)
llama_index_docs = [LlamaIndexDocument(text=text)]
nodes = parser.get_nodes_from_documents(llama_index_docs)
return [n.text for n in nodes]
def truncate_text(text: str, max_length: int, encoding) -> str:
# truncate the text based on max_length and encoding
encoded_text = encoding.encode(text)[:max_length]
return encoding.decode(encoded_text)
def check_and_split_text(text: str, embedding_model: str) -> List[str]:
"""Split text into chunks of max_length tokens or less"""
if embedding_model in EMBEDDING_TO_TOKENIZER_MAP:
encoding = tiktoken.get_encoding(EMBEDDING_TO_TOKENIZER_MAP[embedding_model])
else:
print(f"Warning: couldn't find tokenizer for model {embedding_model}, using default tokenizer {EMBEDDING_TO_TOKENIZER_DEFAULT}")
encoding = tiktoken.get_encoding(EMBEDDING_TO_TOKENIZER_DEFAULT)
num_tokens = len(encoding.encode(text))
# determine max length
if hasattr(encoding, "max_length"):
# TODO(fix) this is broken
max_length = encoding.max_length
else:
# TODO: figure out the real number
printd(f"Warning: couldn't find max_length for tokenizer {embedding_model}, using default max_length 8191")
max_length = 8191
# truncate text if too long
if num_tokens > max_length:
print(f"Warning: text is too long ({num_tokens} tokens), truncating to {max_length} tokens.")
# First, apply any necessary formatting
formatted_text = format_text(text, embedding_model)
# Then truncate
text = truncate_text(formatted_text, max_length, encoding)
return [text]
class EmbeddingEndpoint:
"""Implementation for OpenAI compatible endpoint"""
# """ Based off llama index https://github.com/run-llama/llama_index/blob/a98bdb8ecee513dc2e880f56674e7fd157d1dc3a/llama_index/embeddings/text_embeddings_inference.py """
# _user: str = PrivateAttr()
# _timeout: float = PrivateAttr()
# _base_url: str = PrivateAttr()
def __init__(
self,
model: str,
base_url: str,
user: str,
timeout: float = 60.0,
**kwargs: Any,
):
if not is_valid_url(base_url):
raise ValueError(
f"Embeddings endpoint was provided an invalid URL (set to: '{base_url}'). Make sure embedding_endpoint is set correctly in your MemGPT config."
)
self.model_name = model
self._user = user
self._base_url = base_url
self._timeout = timeout
def _call_api(self, text: str) -> List[float]:
if not is_valid_url(self._base_url):
raise ValueError(
f"Embeddings endpoint does not have a valid URL (set to: '{self._base_url}'). Make sure embedding_endpoint is set correctly in your MemGPT config."
)
import httpx
headers = {"Content-Type": "application/json"}
json_data = {"input": text, "model": self.model_name, "user": self._user}
with httpx.Client() as client:
response = client.post(
f"{self._base_url}/embeddings",
headers=headers,
json=json_data,
timeout=self._timeout,
)
response_json = response.json()
if isinstance(response_json, list):
# embedding directly in response
embedding = response_json
elif isinstance(response_json, dict):
# TEI embedding packaged inside openai-style response
try:
embedding = response_json["data"][0]["embedding"]
except (KeyError, IndexError):
raise TypeError(f"Got back an unexpected payload from text embedding function, response=\n{response_json}")
else:
# unknown response, can't parse
raise TypeError(f"Got back an unexpected payload from text embedding function, response=\n{response_json}")
return embedding
def get_text_embedding(self, text: str) -> List[float]:
return self._call_api(text)
def default_embedding_model():
# default to hugging face model running local
# warning: this is a terrible model
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
os.environ["TOKENIZERS_PARALLELISM"] = "False"
model = "BAAI/bge-small-en-v1.5"
return HuggingFaceEmbedding(model_name=model)
def query_embedding(embedding_model, query_text: str):
"""Generate padded embedding for querying database"""
query_vec = embedding_model.get_text_embedding(query_text)
query_vec = np.array(query_vec)
query_vec = np.pad(query_vec, (0, MAX_EMBEDDING_DIM - query_vec.shape[0]), mode="constant").tolist()
return query_vec
def embedding_model(config: EmbeddingConfig, user_id: Optional[uuid.UUID] = None):
"""Return LlamaIndex embedding model to use for embeddings"""
endpoint_type = config.embedding_endpoint_type
# TODO refactor to pass credentials through args
credentials = MemGPTCredentials.load()
if endpoint_type == "openai":
assert credentials.openai_key is not None
from llama_index.embeddings.openai import OpenAIEmbedding
additional_kwargs = {"user_id": user_id} if user_id else {}
model = OpenAIEmbedding(
api_base=config.embedding_endpoint,
api_key=credentials.openai_key,
additional_kwargs=additional_kwargs,
)
return model
elif endpoint_type == "azure":
assert all(
[
credentials.azure_key is not None,
credentials.azure_embedding_endpoint is not None,
credentials.azure_version is not None,
]
)
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings
model = "text-embedding-ada-002"
deployment = credentials.azure_embedding_deployment if credentials.azure_embedding_deployment is not None else model
return AzureOpenAIEmbedding(
model=model,
deployment_name=deployment,
api_key=credentials.azure_key,
azure_endpoint=credentials.azure_endpoint,
api_version=credentials.azure_version,
)
elif endpoint_type == "hugging-face":
return EmbeddingEndpoint(
model=config.embedding_model,
base_url=config.embedding_endpoint,
user=user_id,
)
else:
return default_embedding_model()
| [
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.Document",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((981, 1020), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size'}), '(chunk_size=chunk_size)\n', (997, 1020), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((5381, 5419), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'model'}), '(model_name=model)\n', (5401, 5419), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((5614, 5633), 'numpy.array', 'np.array', (['query_vec'], {}), '(query_vec)\n', (5622, 5633), True, 'import numpy as np\n'), ((6035, 6059), 'memgpt.credentials.MemGPTCredentials.load', 'MemGPTCredentials.load', ([], {}), '()\n', (6057, 6059), False, 'from memgpt.credentials import MemGPTCredentials\n'), ((1045, 1074), 'llama_index.core.Document', 'LlamaIndexDocument', ([], {'text': 'text'}), '(text=text)\n', (1063, 1074), True, 'from llama_index.core import Document as LlamaIndexDocument\n'), ((1601, 1667), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['EMBEDDING_TO_TOKENIZER_MAP[embedding_model]'], {}), '(EMBEDDING_TO_TOKENIZER_MAP[embedding_model])\n', (1622, 1667), False, 'import tiktoken\n'), ((1834, 1887), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['EMBEDDING_TO_TOKENIZER_DEFAULT'], {}), '(EMBEDDING_TO_TOKENIZER_DEFAULT)\n', (1855, 1887), False, 'import tiktoken\n'), ((2138, 2255), 'memgpt.utils.printd', 'printd', (['f"""Warning: couldn\'t find max_length for tokenizer {embedding_model}, using default max_length 8191"""'], {}), '(\n f"Warning: couldn\'t find max_length for tokenizer {embedding_model}, using default max_length 8191"\n )\n', (2144, 2255), False, 'from memgpt.utils import is_valid_url, printd\n'), ((6296, 6421), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_base': 'config.embedding_endpoint', 'api_key': 'credentials.openai_key', 'additional_kwargs': 'additional_kwargs'}), '(api_base=config.embedding_endpoint, api_key=credentials.\n openai_key, additional_kwargs=additional_kwargs)\n', (6311, 6421), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((3196, 3218), 'memgpt.utils.is_valid_url', 'is_valid_url', (['base_url'], {}), '(base_url)\n', (3208, 3218), False, 'from memgpt.utils import is_valid_url, printd\n'), ((3615, 3643), 'memgpt.utils.is_valid_url', 'is_valid_url', (['self._base_url'], {}), '(self._base_url)\n', (3627, 3643), False, 'from memgpt.utils import is_valid_url, printd\n'), ((4026, 4040), 'httpx.Client', 'httpx.Client', ([], {}), '()\n', (4038, 4040), False, 'import httpx\n'), ((5650, 5729), 'numpy.pad', 'np.pad', (['query_vec', '(0, MAX_EMBEDDING_DIM - query_vec.shape[0])'], {'mode': '"""constant"""'}), "(query_vec, (0, MAX_EMBEDDING_DIM - query_vec.shape[0]), mode='constant')\n", (5656, 5729), True, 'import numpy as np\n'), ((7100, 7283), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'model': 'model', 'deployment_name': 'deployment', 'api_key': 'credentials.azure_key', 'azure_endpoint': 'credentials.azure_endpoint', 'api_version': 'credentials.azure_version'}), '(model=model, deployment_name=deployment, api_key=\n credentials.azure_key, azure_endpoint=credentials.azure_endpoint,\n api_version=credentials.azure_version)\n', (7120, 7283), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n')] |
import logging
import os
from typing import Optional
from typing import Type
import openai
from langchain.chat_models import ChatOpenAI
from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from pydantic import BaseModel, Field
from superagi.config.config import get_config
from superagi.llms.base_llm import BaseLlm
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.tools.base_tool import BaseTool
from superagi.types.vector_store_types import VectorStoreType
from superagi.vector_store.chromadb import ChromaDB
class QueryResource(BaseModel):
"""Input for QueryResource tool."""
query: str = Field(..., description="the search query to search resources")
class QueryResourceTool(BaseTool):
"""
Read File tool
Attributes:
name : The name.
description : The description.
args_schema : The args schema.
"""
name: str = "QueryResource"
args_schema: Type[BaseModel] = QueryResource
description: str = "Tool searches resources content and extracts relevant information to perform the given task." \
"Tool is given preference over other search/read file tools for relevant data." \
"Resources content is taken from the files: {summary}"
agent_id: int = None
llm: Optional[BaseLlm] = None
def _execute(self, query: str):
openai.api_key = self.llm.get_api_key()
os.environ["OPENAI_API_KEY"] = self.llm.get_api_key()
llm_predictor_chatgpt = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=self.llm.get_model(),
openai_api_key=get_config("OPENAI_API_KEY")))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt)
vector_store_name = VectorStoreType.get_vector_store_type(
self.get_tool_config(key="RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = self.get_tool_config(key="RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
logging.info(f"vector_store_name {vector_store_name}")
logging.info(f"vector_store_index_name {vector_store_index_name}")
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
logging.info(f"vector_store {vector_store}")
as_query_engine_args = dict(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="agent_id",
value=str(self.agent_id)
)
]
)
)
if vector_store_name == VectorStoreType.CHROMA:
as_query_engine_args["chroma_collection"] = ChromaDB.create_collection(
collection_name=vector_store_index_name)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
query_engine = index.as_query_engine(
**as_query_engine_args
)
try:
response = query_engine.query(query)
except ValueError as e:
logging.error(f"ValueError {e}")
response = "Document not found"
return response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((754, 816), 'pydantic.Field', 'Field', (['...'], {'description': '"""the search query to search resources"""'}), "(..., description='the search query to search resources')\n", (759, 816), False, 'from pydantic import BaseModel, Field\n'), ((1840, 1905), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt'}), '(llm_predictor=llm_predictor_chatgpt)\n', (1868, 1905), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2173, 2227), 'logging.info', 'logging.info', (['f"""vector_store_name {vector_store_name}"""'], {}), "(f'vector_store_name {vector_store_name}')\n", (2185, 2227), False, 'import logging\n'), ((2236, 2302), 'logging.info', 'logging.info', (['f"""vector_store_index_name {vector_store_index_name}"""'], {}), "(f'vector_store_index_name {vector_store_index_name}')\n", (2248, 2302), False, 'import logging\n'), ((2421, 2465), 'logging.info', 'logging.info', (['f"""vector_store {vector_store}"""'], {}), "(f'vector_store {vector_store}')\n", (2433, 2465), False, 'import logging\n'), ((2970, 3068), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (3004, 3068), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2869, 2936), 'superagi.vector_store.chromadb.ChromaDB.create_collection', 'ChromaDB.create_collection', ([], {'collection_name': 'vector_store_index_name'}), '(collection_name=vector_store_index_name)\n', (2895, 2936), False, 'from superagi.vector_store.chromadb import ChromaDB\n'), ((2326, 2393), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (2349, 2393), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n'), ((3262, 3294), 'logging.error', 'logging.error', (['f"""ValueError {e}"""'], {}), "(f'ValueError {e}')\n", (3275, 3294), False, 'import logging\n'), ((1782, 1810), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1792, 1810), False, 'from superagi.config.config import get_config\n')] |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from typing import Any, List, Optional
from sentence_transformers import CrossEncoder
from typing import Optional, Sequence
from langchain_core.documents import Document
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from llama_index.bridge.pydantic import Field, PrivateAttr
class LangchainReranker(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
model_name_or_path: str = Field()
_model: Any = PrivateAttr()
top_n: int = Field()
device: str = Field()
max_length: int = Field()
batch_size: int = Field()
# show_progress_bar: bool = None
num_workers: int = Field()
# activation_fct = None
# apply_softmax = False
def __init__(self,
model_name_or_path: str,
top_n: int = 3,
device: str = "cuda",
max_length: int = 1024,
batch_size: int = 32,
# show_progress_bar: bool = None,
num_workers: int = 0,
# activation_fct = None,
# apply_softmax = False,
):
# self.top_n=top_n
# self.model_name_or_path=model_name_or_path
# self.device=device
# self.max_length=max_length
# self.batch_size=batch_size
# self.show_progress_bar=show_progress_bar
# self.num_workers=num_workers
# self.activation_fct=activation_fct
# self.apply_softmax=apply_softmax
self._model = CrossEncoder(model_name=model_name_or_path, max_length=1024, device=device)
super().__init__(
top_n=top_n,
model_name_or_path=model_name_or_path,
device=device,
max_length=max_length,
batch_size=batch_size,
# show_progress_bar=show_progress_bar,
num_workers=num_workers,
# activation_fct=activation_fct,
# apply_softmax=apply_softmax
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
sentence_pairs = [[query, _doc] for _doc in _docs]
results = self._model.predict(sentences=sentence_pairs,
batch_size=self.batch_size,
# show_progress_bar=self.show_progress_bar,
num_workers=self.num_workers,
# activation_fct=self.activation_fct,
# apply_softmax=self.apply_softmax,
convert_to_tensor=True
)
top_k = self.top_n if self.top_n < len(results) else len(results)
values, indices = results.topk(top_k)
final_results = []
for value, index in zip(values, indices):
doc = doc_list[index]
doc.metadata["relevance_score"] = value
final_results.append(doc)
return final_results
if __name__ == "__main__":
from configs import (LLM_MODELS,
VECTOR_SEARCH_TOP_K,
SCORE_THRESHOLD,
TEMPERATURE,
USE_RERANKER,
RERANKER_MODEL,
RERANKER_MAX_LENGTH,
MODEL_PATH)
from server.utils import embedding_device
if USE_RERANKER:
reranker_model_path = MODEL_PATH["reranker"].get(RERANKER_MODEL, "BAAI/bge-reranker-large")
print("-----------------model path------------------")
print(reranker_model_path)
reranker_model = LangchainReranker(top_n=3,
device=embedding_device(),
max_length=RERANKER_MAX_LENGTH,
model_name_or_path=reranker_model_path
)
| [
"llama_index.bridge.pydantic.Field",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((602, 609), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (607, 609), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((628, 641), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (639, 641), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((659, 666), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (664, 666), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((685, 692), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (690, 692), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((715, 722), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (720, 722), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((745, 752), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (750, 752), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((813, 820), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (818, 820), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1674, 1749), 'sentence_transformers.CrossEncoder', 'CrossEncoder', ([], {'model_name': 'model_name_or_path', 'max_length': '(1024)', 'device': 'device'}), '(model_name=model_name_or_path, max_length=1024, device=device)\n', (1686, 1749), False, 'from sentence_transformers import CrossEncoder\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((4520, 4538), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (4536, 4538), False, 'from server.utils import embedding_device\n')] |
'''
Below helper functions are implemented in this script:
build_sentence_window_index - VectorStore Index for Sentence window RAG technique
get_sentence_window_query_engine - query enginer for the above index
build_automerging_index - VectorStore Index for Auto-merging RAG technique
get_automerging_query_engine - query enginer for the above index
Evaluation function:
get_prebuilt_trulens_recorder - evaluation function with all the feedback functions
'''
import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
############################################################################## Function 1 ###########################################################
def build_sentence_window_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
sentence_window_size=3,
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=sentence_window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
documents, service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
############################################################################## Function 2 ###########################################################
def get_sentence_window_query_engine(
sentence_index, similarity_top_k=6, rerank_top_n=2
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
############################################################################## Function 3 ###########################################################
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None
):
# chunk sizes for all the layers (factor of 4)
chunk_sizes = chunk_sizes or [2048, 512, 128]
# Hierarchical node parser to parse the tree nodes (parent and children)
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
# getting all intermediate and parent nodes
nodes = node_parser.get_nodes_from_documents(documents)
# getting only the leaf nodes
leaf_nodes = get_leaf_nodes(nodes)
# required service context to initialize both llm and embed model
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
# storage context to store the intermediate and parent nodes in a docstore, because the index is built only on the leaf nodes
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context
)
return automerging_index
############################################################################## Function 4 ###########################################################
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=6,
):
# retriever is used to merge the child nodes into the parent nodes
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
# Ranking is used to select top k relevant chunks from similarity_top_k
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model='BAAI/bge-reranker-base'
)
# getting query engine with the above mentioned retiriever and reranker
automerging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return automerging_engine
############################################################################## Function 5 ###########################################################
def get_prebuilt_trulens_recorder(query_engine, app_id):
# Feedback functions
# Answer Relevance
provider = fOpenAI()
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons,
name="Answer Relevance"
).on_input_output()
# Context Relevance
context_selection = TruLlama.select_source_nodes().node.text
f_qs_relevance = (
Feedback(provider.qs_relevance,
name="Context Relevance")
.on_input()
.on(context_selection)
.aggregate(np.mean)
)
# Groundedness
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons,
name="Groundedness"
)
.on(context_selection)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks = [
f_qa_relevance,
f_qs_relevance,
f_groundedness
]
)
return tru_recorder
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor"
] | [((1446, 1596), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'sentence_window_size', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=sentence_window_size,\n window_metadata_key='window', original_text_metadata_key='original_text')\n", (1484, 1596), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((1647, 1739), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (1675, 1739), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2493, 2555), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2525, 2555), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((2569, 2646), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (2594, 2646), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((3368, 3429), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3404, 3429), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3591, 3612), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (3605, 3612), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3706, 3768), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (3734, 3768), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((3944, 3974), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (3972, 3974), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4938, 5027), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (4958, 5027), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((5128, 5205), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (5153, 5205), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((5322, 5393), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5352, 5393), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((5711, 5720), 'trulens_eval.OpenAI', 'fOpenAI', ([], {}), '()\n', (5718, 5720), True, 'from trulens_eval import OpenAI as fOpenAI\n'), ((6162, 6206), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (6174, 6206), False, 'from trulens_eval.feedback import Groundedness\n'), ((6488, 6589), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_qs_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_qs_relevance, f_groundedness])\n', (6496, 6589), False, 'from trulens_eval import Feedback, TruLlama\n'), ((1777, 1801), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1791, 1801), False, 'import os\n'), ((1828, 1904), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'sentence_context'}), '(documents, service_context=sentence_context)\n', (1859, 1904), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4037, 4061), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4051, 4061), False, 'import os\n'), ((4091, 4189), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4107, 4189), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2068, 2118), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2096, 2118), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4356, 4406), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4384, 4406), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((5743, 5813), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (5751, 5813), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5903, 5933), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (5931, 5933), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5972, 6029), 'trulens_eval.Feedback', 'Feedback', (['provider.qs_relevance'], {'name': '"""Context Relevance"""'}), "(provider.qs_relevance, name='Context Relevance')\n", (5980, 6029), False, 'from trulens_eval import Feedback, TruLlama\n'), ((6239, 6316), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (6247, 6316), False, 'from trulens_eval import Feedback, TruLlama\n')] |
import os
import streamlit as st
from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
st.set_page_config(
page_title="Chat with the PDM docs, powered by LlamaIndex",
page_icon="📝",
layout="centered",
initial_sidebar_state="auto",
menu_items=None,
)
st.title("Chat with the PDM docs, powered by LlamaIndex 💬🦙")
st.info(
"PDM - A modern Python package and dependency manager. "
"Check out the full documentation at [PDM docs](https://pdm-project.org).",
icon="📃",
)
Settings.llm = OpenAI(
api_key=st.secrets.get("openai_key"),
api_base=st.secrets.get("openai_base"),
model="gpt-3.5-turbo",
temperature=0.5,
system_prompt="You are an expert on PDM and your job is to answer technical questions. "
"Assume that all questions are related to PDM. Keep your answers technical and based on facts - do not hallucinate features.",
)
Settings.embed_model = OpenAIEmbedding(api_base=st.secrets.get("openai_base"), api_key=st.secrets.get("openai_key"))
DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "docs/docs")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{
"role": "assistant",
"content": "Ask me a question about PDM!",
}
]
@st.cache_resource(show_spinner=False)
def load_data():
with st.spinner(text="Loading and indexing the PDM docs - hang tight! This should take 1-2 minutes."):
reader = SimpleDirectoryReader(input_dir=DATA_PATH, recursive=True, required_exts=[".md"])
docs = reader.load_data()
index = VectorStoreIndex.from_documents(docs)
return index
index = load_data()
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
st.session_state.chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((215, 384), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with the PDM docs, powered by LlamaIndex"""', 'page_icon': '"""📝"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=\n 'Chat with the PDM docs, powered by LlamaIndex', page_icon='📝', layout=\n 'centered', initial_sidebar_state='auto', menu_items=None)\n", (233, 384), True, 'import streamlit as st\n'), ((398, 458), 'streamlit.title', 'st.title', (['"""Chat with the PDM docs, powered by LlamaIndex 💬🦙"""'], {}), "('Chat with the PDM docs, powered by LlamaIndex 💬🦙')\n", (406, 458), True, 'import streamlit as st\n'), ((459, 616), 'streamlit.info', 'st.info', (['"""PDM - A modern Python package and dependency manager. Check out the full documentation at [PDM docs](https://pdm-project.org)."""'], {'icon': '"""📃"""'}), "(\n 'PDM - A modern Python package and dependency manager. Check out the full documentation at [PDM docs](https://pdm-project.org).'\n , icon='📃')\n", (466, 616), True, 'import streamlit as st\n'), ((1446, 1483), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1463, 1483), True, 'import streamlit as st\n'), ((1230, 1253), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1251, 1253), True, 'import streamlit as st\n'), ((1863, 1886), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1884, 1886), True, 'import streamlit as st\n'), ((2033, 2063), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (2046, 2063), True, 'import streamlit as st\n'), ((2119, 2188), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (2151, 2188), True, 'import streamlit as st\n'), ((660, 688), 'streamlit.secrets.get', 'st.secrets.get', (['"""openai_key"""'], {}), "('openai_key')\n", (674, 688), True, 'import streamlit as st\n'), ((703, 732), 'streamlit.secrets.get', 'st.secrets.get', (['"""openai_base"""'], {}), "('openai_base')\n", (717, 732), True, 'import streamlit as st\n'), ((1056, 1085), 'streamlit.secrets.get', 'st.secrets.get', (['"""openai_base"""'], {}), "('openai_base')\n", (1070, 1085), True, 'import streamlit as st\n'), ((1095, 1123), 'streamlit.secrets.get', 'st.secrets.get', (['"""openai_key"""'], {}), "('openai_key')\n", (1109, 1123), True, 'import streamlit as st\n'), ((1167, 1192), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1182, 1192), False, 'import os\n'), ((1510, 1616), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the PDM docs - hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the PDM docs - hang tight! This should take 1-2 minutes.'\n )\n", (1520, 1616), True, 'import streamlit as st\n'), ((1625, 1711), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'DATA_PATH', 'recursive': '(True)', 'required_exts': "['.md']"}), "(input_dir=DATA_PATH, recursive=True, required_exts=[\n '.md'])\n", (1646, 1711), False, 'from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex\n'), ((1757, 1794), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (1788, 1794), False, 'from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex\n'), ((2276, 2308), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2291, 2308), True, 'import streamlit as st\n'), ((2318, 2346), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2326, 2346), True, 'import streamlit as st\n'), ((2479, 2507), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2494, 2507), True, 'import streamlit as st\n'), ((2522, 2547), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2532, 2547), True, 'import streamlit as st\n'), ((2572, 2613), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['prompt'], {}), '(prompt)\n', (2605, 2613), True, 'import streamlit as st\n'), ((2626, 2653), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2634, 2653), True, 'import streamlit as st\n'), ((2740, 2781), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2772, 2781), True, 'import streamlit as st\n')] |
from typing import Callable, List
def split_text_keep_separator(text: str, separator: str) -> List[str]:
"""Split text with separator and keep the separator at the end of each split."""
parts = text.split(separator)
result = [separator + s if i > 0 else s for i, s in enumerate(parts)]
return [s for s in result if s]
def split_by_sep(sep: str, keep_sep: bool = True) -> Callable[[str], List[str]]:
"""Split text by separator."""
if keep_sep:
return lambda text: split_text_keep_separator(text, sep)
else:
return lambda text: text.split(sep)
def split_by_char() -> Callable[[str], List[str]]:
"""Split text by character."""
return lambda text: list(text)
def split_by_sentence_tokenizer() -> Callable[[str], List[str]]:
import os
import nltk
from llama_index.utils import get_cache_dir
cache_dir = get_cache_dir()
nltk_data_dir = os.environ.get("NLTK_DATA", cache_dir)
# update nltk path for nltk so that it finds the data
if nltk_data_dir not in nltk.data.path:
nltk.data.path.append(nltk_data_dir)
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt", download_dir=nltk_data_dir)
tokenizer = nltk.tokenize.PunktSentenceTokenizer()
# get the spans and then return the sentences
# using the start index of each span
# instead of using end, use the start of the next span if available
def split(text: str) -> List[str]:
spans = list(tokenizer.span_tokenize(text))
sentences = []
for i, span in enumerate(spans):
start = span[0]
if i < len(spans) - 1:
end = spans[i + 1][0]
else:
end = len(text)
sentences.append(text[start:end])
return sentences
return split
def split_by_regex(regex: str) -> Callable[[str], List[str]]:
"""Split text by regex."""
import re
return lambda text: re.findall(regex, text)
def split_by_phrase_regex() -> Callable[[str], List[str]]:
"""Split text by phrase regex.
This regular expression will split the sentences into phrases,
where each phrase is a sequence of one or more non-comma,
non-period, and non-semicolon characters, followed by an optional comma,
period, or semicolon. The regular expression will also capture the
delimiters themselves as separate items in the list of phrases.
"""
regex = "[^,.;。]+[,.;。]?"
return split_by_regex(regex)
| [
"llama_index.utils.get_cache_dir"
] | [((876, 891), 'llama_index.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (889, 891), False, 'from llama_index.utils import get_cache_dir\n'), ((912, 950), 'os.environ.get', 'os.environ.get', (['"""NLTK_DATA"""', 'cache_dir'], {}), "('NLTK_DATA', cache_dir)\n", (926, 950), False, 'import os\n'), ((1252, 1290), 'nltk.tokenize.PunktSentenceTokenizer', 'nltk.tokenize.PunktSentenceTokenizer', ([], {}), '()\n', (1288, 1290), False, 'import nltk\n'), ((1062, 1098), 'nltk.data.path.append', 'nltk.data.path.append', (['nltk_data_dir'], {}), '(nltk_data_dir)\n', (1083, 1098), False, 'import nltk\n'), ((1117, 1151), 'nltk.data.find', 'nltk.data.find', (['"""tokenizers/punkt"""'], {}), "('tokenizers/punkt')\n", (1131, 1151), False, 'import nltk\n'), ((1985, 2008), 're.findall', 're.findall', (['regex', 'text'], {}), '(regex, text)\n', (1995, 2008), False, 'import re\n'), ((1184, 1234), 'nltk.download', 'nltk.download', (['"""punkt"""'], {'download_dir': 'nltk_data_dir'}), "('punkt', download_dir=nltk_data_dir)\n", (1197, 1234), False, 'import nltk\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from sqlalchemy import make_url
from llama_index.vector_stores.postgres import PGVectorStore
# from llama_index.llms.llama_cpp import LlamaCPP
import psycopg2
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
from llama_index.core.schema import NodeWithScore
from typing import Optional
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import QueryBundle
from llama_index.core.retrievers import BaseRetriever
from typing import Any, List
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.vector_stores import VectorStoreQuery
import argparse
def load_vector_database(username, password):
db_name = "example_db"
host = "localhost"
password = password
port = "5432"
user = username
# conn = psycopg2.connect(connection_string)
conn = psycopg2.connect(
dbname="postgres",
host=host,
password=password,
port=port,
user=user,
)
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {db_name}")
c.execute(f"CREATE DATABASE {db_name}")
vector_store = PGVectorStore.from_params(
database=db_name,
host=host,
password=password,
port=port,
user=user,
table_name="llama2_paper",
embed_dim=384, # openai embedding dimension
)
return vector_store
def load_data(data_path):
loader = PyMuPDFReader()
documents = loader.load(file_path=data_path)
text_parser = SentenceSplitter(
chunk_size=1024,
# separator=" ",
)
text_chunks = []
# maintain relationship with source doc index, to help inject doc metadata in (3)
doc_idxs = []
for doc_idx, doc in enumerate(documents):
cur_text_chunks = text_parser.split_text(doc.text)
text_chunks.extend(cur_text_chunks)
doc_idxs.extend([doc_idx] * len(cur_text_chunks))
from llama_index.core.schema import TextNode
nodes = []
for idx, text_chunk in enumerate(text_chunks):
node = TextNode(
text=text_chunk,
)
src_doc = documents[doc_idxs[idx]]
node.metadata = src_doc.metadata
nodes.append(node)
return nodes
class VectorDBRetriever(BaseRetriever):
"""Retriever over a postgres vector store."""
def __init__(
self,
vector_store: PGVectorStore,
embed_model: Any,
query_mode: str = "default",
similarity_top_k: int = 2,
) -> None:
"""Init params."""
self._vector_store = vector_store
self._embed_model = embed_model
self._query_mode = query_mode
self._similarity_top_k = similarity_top_k
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
query_embedding = self._embed_model.get_query_embedding(
query_bundle.query_str
)
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=self._similarity_top_k,
mode=self._query_mode,
)
query_result = self._vector_store.query(vector_store_query)
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
return nodes_with_scores
def completion_to_prompt(completion):
return f"<|system|>\n</s>\n<|user|>\n{completion}</s>\n<|assistant|>\n"
# Transform a list of chat messages into zephyr-specific input
def messages_to_prompt(messages):
prompt = ""
for message in messages:
if message.role == "system":
prompt += f"<|system|>\n{message.content}</s>\n"
elif message.role == "user":
prompt += f"<|user|>\n{message.content}</s>\n"
elif message.role == "assistant":
prompt += f"<|assistant|>\n{message.content}</s>\n"
# ensure we start with a system prompt, insert blank if needed
if not prompt.startswith("<|system|>\n"):
prompt = "<|system|>\n</s>\n" + prompt
# add final assistant prompt
prompt = prompt + "<|assistant|>\n"
return prompt
def main(args):
embed_model = HuggingFaceEmbedding(model_name=args.embedding_model_path)
# Use custom LLM in BigDL
from bigdl.llm.llamaindex.llms import BigdlLLM
llm = BigdlLLM(
model_name=args.model_path,
tokenizer_name=args.model_path,
context_window=512,
max_new_tokens=args.n_predict,
generate_kwargs={"temperature": 0.7, "do_sample": False},
model_kwargs={},
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
device_map="xpu",
)
vector_store = load_vector_database(username=args.user, password=args.password)
nodes = load_data(data_path=args.data)
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
vector_store.add(nodes)
# query_str = "Can you tell me about the key concepts for safety finetuning"
query_str = "Explain about the training data for Llama 2"
query_embedding = embed_model.get_query_embedding(query_str)
# construct vector store query
query_mode = "default"
# query_mode = "sparse"
# query_mode = "hybrid"
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, mode=query_mode
)
# returns a VectorStoreQueryResult
query_result = vector_store.query(vector_store_query)
# print("Retrieval Results: ")
# print(query_result.nodes[0].get_content())
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
retriever = VectorDBRetriever(
vector_store, embed_model, query_mode="default", similarity_top_k=1
)
query_engine = RetrieverQueryEngine.from_args(retriever, llm=llm)
# query_str = "How does Llama 2 perform compared to other open-source models?"
query_str = args.question
response = query_engine.query(query_str)
print("------------RESPONSE GENERATION---------------------")
print(str(response))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='LlamaIndex BigdlLLM Example')
parser.add_argument('-m','--model-path', type=str, required=True,
help='the path to transformers model')
parser.add_argument('-q', '--question', type=str, default='How does Llama 2 perform compared to other open-source models?',
help='qustion you want to ask.')
parser.add_argument('-d','--data',type=str, default='./data/llama2.pdf',
help="the data used during retrieval")
parser.add_argument('-u', '--user', type=str, required=True,
help="user name in the database postgres")
parser.add_argument('-p','--password', type=str, required=True,
help="the password of the user in the database")
parser.add_argument('-e','--embedding-model-path',default="BAAI/bge-small-en",
help="the path to embedding model path")
parser.add_argument('-n','--n-predict', type=int, default=32,
help='max number of predict tokens')
args = parser.parse_args()
main(args) | [
"llama_index.vector_stores.postgres.PGVectorStore.from_params",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.schema.TextNode",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.schema.NodeWithScore",
"llama_index.core.vector_stores.VectorStoreQuery",
"llama_index.core.query_engine.RetrieverQueryEngine.from_args",
"llama_index.readers.file.PyMuPDFReader"
] | [((1521, 1612), 'psycopg2.connect', 'psycopg2.connect', ([], {'dbname': '"""postgres"""', 'host': 'host', 'password': 'password', 'port': 'port', 'user': 'user'}), "(dbname='postgres', host=host, password=password, port=port,\n user=user)\n", (1537, 1612), False, 'import psycopg2\n'), ((1841, 1982), 'llama_index.vector_stores.postgres.PGVectorStore.from_params', 'PGVectorStore.from_params', ([], {'database': 'db_name', 'host': 'host', 'password': 'password', 'port': 'port', 'user': 'user', 'table_name': '"""llama2_paper"""', 'embed_dim': '(384)'}), "(database=db_name, host=host, password=password,\n port=port, user=user, table_name='llama2_paper', embed_dim=384)\n", (1866, 1982), False, 'from llama_index.vector_stores.postgres import PGVectorStore\n'), ((2137, 2152), 'llama_index.readers.file.PyMuPDFReader', 'PyMuPDFReader', ([], {}), '()\n', (2150, 2152), False, 'from llama_index.readers.file import PyMuPDFReader\n'), ((2222, 2255), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(1024)'}), '(chunk_size=1024)\n', (2238, 2255), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((5116, 5174), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'args.embedding_model_path'}), '(model_name=args.embedding_model_path)\n', (5136, 5174), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((5271, 5583), 'bigdl.llm.llamaindex.llms.BigdlLLM', 'BigdlLLM', ([], {'model_name': 'args.model_path', 'tokenizer_name': 'args.model_path', 'context_window': '(512)', 'max_new_tokens': 'args.n_predict', 'generate_kwargs': "{'temperature': 0.7, 'do_sample': False}", 'model_kwargs': '{}', 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt', 'device_map': '"""xpu"""'}), "(model_name=args.model_path, tokenizer_name=args.model_path,\n context_window=512, max_new_tokens=args.n_predict, generate_kwargs={\n 'temperature': 0.7, 'do_sample': False}, model_kwargs={},\n messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt, device_map='xpu')\n", (5279, 5583), False, 'from bigdl.llm.llamaindex.llms import BigdlLLM\n'), ((6353, 6444), 'llama_index.core.vector_stores.VectorStoreQuery', 'VectorStoreQuery', ([], {'query_embedding': 'query_embedding', 'similarity_top_k': '(2)', 'mode': 'query_mode'}), '(query_embedding=query_embedding, similarity_top_k=2, mode=\n query_mode)\n', (6369, 6444), False, 'from llama_index.core.vector_stores import VectorStoreQuery\n'), ((7083, 7133), 'llama_index.core.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'llm': 'llm'}), '(retriever, llm=llm)\n', (7113, 7133), False, 'from llama_index.core.query_engine import RetrieverQueryEngine\n'), ((7428, 7494), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LlamaIndex BigdlLLM Example"""'}), "(description='LlamaIndex BigdlLLM Example')\n", (7451, 7494), False, 'import argparse\n'), ((2759, 2784), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'text_chunk'}), '(text=text_chunk)\n', (2767, 2784), False, 'from llama_index.core.schema import TextNode\n'), ((3682, 3800), 'llama_index.core.vector_stores.VectorStoreQuery', 'VectorStoreQuery', ([], {'query_embedding': 'query_embedding', 'similarity_top_k': 'self._similarity_top_k', 'mode': 'self._query_mode'}), '(query_embedding=query_embedding, similarity_top_k=self.\n _similarity_top_k, mode=self._query_mode)\n', (3698, 3800), False, 'from llama_index.core.vector_stores import VectorStoreQuery\n'), ((6893, 6930), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node', 'score': 'score'}), '(node=node, score=score)\n', (6906, 6930), False, 'from llama_index.core.schema import NodeWithScore\n'), ((4191, 4228), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node', 'score': 'score'}), '(node=node, score=score)\n', (4204, 4228), False, 'from llama_index.core.schema import NodeWithScore\n')] |
import os
import logging
import hashlib
import random
import uuid
import openai
from pathlib import Path
from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage
from llama_index.readers.schema.base import Document
from langchain.chat_models import ChatOpenAI
from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, ResultReason, CancellationReason, SpeechSynthesisOutputFormat
from azure.cognitiveservices.speech.audio import AudioOutputConfig
from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud
from app.prompt import get_prompt_template
from app.util import get_language_code, get_youtube_video_id
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
SPEECH_KEY = os.environ.get('SPEECH_KEY')
SPEECH_REGION = os.environ.get('SPEECH_REGION')
openai.api_key = OPENAI_API_KEY
index_cache_web_dir = Path('/tmp/myGPTReader/cache_web/')
index_cache_file_dir = Path('/data/myGPTReader/file/')
index_cache_voice_dir = Path('/tmp/myGPTReader/voice/')
if not index_cache_web_dir.is_dir():
index_cache_web_dir.mkdir(parents=True, exist_ok=True)
if not index_cache_voice_dir.is_dir():
index_cache_voice_dir.mkdir(parents=True, exist_ok=True)
if not index_cache_file_dir.is_dir():
index_cache_file_dir.mkdir(parents=True, exist_ok=True)
llm_predictor = LLMPredictor(llm=ChatOpenAI(
temperature=0, model_name="gpt-3.5-turbo"))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
web_storage_context = StorageContext.from_defaults()
file_storage_context = StorageContext.from_defaults()
def get_unique_md5(urls):
urls_str = ''.join(sorted(urls))
hashed_str = hashlib.md5(urls_str.encode('utf-8')).hexdigest()
return hashed_str
def format_dialog_messages(messages):
return "\n".join(messages)
def get_document_from_youtube_id(video_id):
if video_id is None:
return None
transcript = get_youtube_transcript(video_id)
if transcript is None:
return None
return Document(transcript)
def remove_prompt_from_text(text):
return text.replace('chatGPT:', '').strip()
def get_documents_from_urls(urls):
documents = []
for url in urls['page_urls']:
document = Document(scrape_website(url))
documents.append(document)
if len(urls['rss_urls']) > 0:
rss_documents = RssReader().load_data(urls['rss_urls'])
documents = documents + rss_documents
if len(urls['phantomjscloud_urls']) > 0:
for url in urls['phantomjscloud_urls']:
document = Document(scrape_website_by_phantomjscloud(url))
documents.append(document)
if len(urls['youtube_urls']) > 0:
for url in urls['youtube_urls']:
video_id = get_youtube_video_id(url)
document = get_document_from_youtube_id(video_id)
if (document is not None):
documents.append(document)
else:
documents.append(Document(f"Can't get transcript from youtube video: {url}"))
return documents
def get_index_from_web_cache(name):
try:
index = load_index_from_storage(web_storage_context, index_id=name)
except Exception as e:
logging.error(e)
return None
return index
def get_index_from_file_cache(name):
try:
index = load_index_from_storage(file_storage_context, index_id=name)
except Exception as e:
logging.error(e)
return None
return index
def get_index_name_from_file(file: str):
file_md5_with_extension = str(Path(file).relative_to(index_cache_file_dir).name)
file_md5 = file_md5_with_extension.split('.')[0]
return file_md5
def get_answer_from_chatGPT(messages):
dialog_messages = format_dialog_messages(messages)
logging.info('=====> Use chatGPT to answer!')
logging.info(dialog_messages)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": dialog_messages}]
)
logging.info(completion.usage)
total_tokens = completion.usage.total_tokens
return completion.choices[0].message.content, total_tokens, None
def get_answer_from_llama_web(messages, urls):
dialog_messages = format_dialog_messages(messages)
lang_code = get_language_code(remove_prompt_from_text(messages[-1]))
combained_urls = get_urls(urls)
logging.info(combained_urls)
index_file_name = get_unique_md5(urls)
index = get_index_from_web_cache(index_file_name)
if index is None:
logging.info(f"=====> Build index from web!")
documents = get_documents_from_urls(combained_urls)
logging.info(documents)
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.set_index_id(index_file_name)
index.storage_context.persist()
logging.info(
f"=====> Save index to disk path: {index_cache_web_dir / index_file_name}")
prompt = get_prompt_template(lang_code)
logging.info('=====> Use llama web with chatGPT to answer!')
logging.info('=====> dialog_messages')
logging.info(dialog_messages)
logging.info('=====> text_qa_template')
logging.info(prompt.prompt)
answer = index.as_query_engine(text_qa_template=prompt).query(dialog_messages)
total_llm_model_tokens = llm_predictor.last_token_usage
total_embedding_model_tokens = service_context.embed_model.last_token_usage
return answer, total_llm_model_tokens, total_embedding_model_tokens
def get_answer_from_llama_file(messages, file):
dialog_messages = format_dialog_messages(messages)
lang_code = get_language_code(remove_prompt_from_text(messages[-1]))
index_name = get_index_name_from_file(file)
index = get_index_from_file_cache(index_name)
if index is None:
logging.info(f"=====> Build index from file!")
documents = SimpleDirectoryReader(input_files=[file]).load_data()
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.set_index_id(index_name)
index.storage_context.persist()
logging.info(
f"=====> Save index to disk path: {index_cache_file_dir / index_name}")
prompt = get_prompt_template(lang_code)
logging.info('=====> Use llama file with chatGPT to answer!')
logging.info('=====> dialog_messages')
logging.info(dialog_messages)
logging.info('=====> text_qa_template')
logging.info(prompt)
answer = answer = index.as_query_engine(text_qa_template=prompt).query(dialog_messages)
total_llm_model_tokens = llm_predictor.last_token_usage
total_embedding_model_tokens = service_context.embed_model.last_token_usage
return answer, total_llm_model_tokens, total_embedding_model_tokens
def get_text_from_whisper(voice_file_path):
with open(voice_file_path, "rb") as f:
transcript = openai.Audio.transcribe("whisper-1", f)
return transcript.text
lang_code_voice_map = {
'zh': ['zh-CN-XiaoxiaoNeural', 'zh-CN-XiaohanNeural', 'zh-CN-YunxiNeural', 'zh-CN-YunyangNeural'],
'en': ['en-US-JennyNeural', 'en-US-RogerNeural', 'en-IN-NeerjaNeural', 'en-IN-PrabhatNeural', 'en-AU-AnnetteNeural', 'en-AU-CarlyNeural', 'en-GB-AbbiNeural', 'en-GB-AlfieNeural'],
'ja': ['ja-JP-AoiNeural', 'ja-JP-DaichiNeural'],
'de': ['de-DE-AmalaNeural', 'de-DE-BerndNeural'],
}
def convert_to_ssml(text, voice_name=None):
try:
logging.info("=====> Convert text to ssml!")
logging.info(text)
text = remove_prompt_from_text(text)
lang_code = get_language_code(text)
if voice_name is None:
voice_name = random.choice(lang_code_voice_map[lang_code])
except Exception as e:
logging.warning(f"Error: {e}. Using default voice.")
voice_name = random.choice(lang_code_voice_map['zh'])
ssml = '<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="zh-CN">'
ssml += f'<voice name="{voice_name}">{text}</voice>'
ssml += '</speak>'
return ssml
def get_voice_file_from_text(text, voice_name=None):
speech_config = SpeechConfig(subscription=SPEECH_KEY, region=SPEECH_REGION)
speech_config.set_speech_synthesis_output_format(
SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3)
speech_config.speech_synthesis_language = "zh-CN"
file_name = f"{index_cache_voice_dir}{uuid.uuid4()}.mp3"
file_config = AudioOutputConfig(filename=file_name)
synthesizer = SpeechSynthesizer(
speech_config=speech_config, audio_config=file_config)
ssml = convert_to_ssml(text, voice_name)
result = synthesizer.speak_ssml_async(ssml).get()
if result.reason == ResultReason.SynthesizingAudioCompleted:
logging.info("Speech synthesized for text [{}], and the audio was saved to [{}]".format(
text, file_name))
elif result.reason == ResultReason.Canceled:
cancellation_details = result.cancellation_details
logging.info("Speech synthesis canceled: {}".format(
cancellation_details.reason))
if cancellation_details.reason == CancellationReason.Error:
logging.error("Error details: {}".format(
cancellation_details.error_details))
return file_name
| [
"llama_index.RssReader",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.readers.schema.base.Document"
] | [((795, 827), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (809, 827), False, 'import os\n'), ((841, 869), 'os.environ.get', 'os.environ.get', (['"""SPEECH_KEY"""'], {}), "('SPEECH_KEY')\n", (855, 869), False, 'import os\n'), ((886, 917), 'os.environ.get', 'os.environ.get', (['"""SPEECH_REGION"""'], {}), "('SPEECH_REGION')\n", (900, 917), False, 'import os\n'), ((973, 1008), 'pathlib.Path', 'Path', (['"""/tmp/myGPTReader/cache_web/"""'], {}), "('/tmp/myGPTReader/cache_web/')\n", (977, 1008), False, 'from pathlib import Path\n'), ((1032, 1063), 'pathlib.Path', 'Path', (['"""/data/myGPTReader/file/"""'], {}), "('/data/myGPTReader/file/')\n", (1036, 1063), False, 'from pathlib import Path\n'), ((1088, 1119), 'pathlib.Path', 'Path', (['"""/tmp/myGPTReader/voice/"""'], {}), "('/tmp/myGPTReader/voice/')\n", (1092, 1119), False, 'from pathlib import Path\n'), ((1530, 1587), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (1558, 1587), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((1611, 1641), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (1639, 1641), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((1665, 1695), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (1693, 1695), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((2026, 2058), 'app.fetch_web_post.get_youtube_transcript', 'get_youtube_transcript', (['video_id'], {}), '(video_id)\n', (2048, 2058), False, 'from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud\n'), ((2117, 2137), 'llama_index.readers.schema.base.Document', 'Document', (['transcript'], {}), '(transcript)\n', (2125, 2137), False, 'from llama_index.readers.schema.base import Document\n'), ((3870, 3915), 'logging.info', 'logging.info', (['"""=====> Use chatGPT to answer!"""'], {}), "('=====> Use chatGPT to answer!')\n", (3882, 3915), False, 'import logging\n'), ((3920, 3949), 'logging.info', 'logging.info', (['dialog_messages'], {}), '(dialog_messages)\n', (3932, 3949), False, 'import logging\n'), ((3967, 4079), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': "[{'role': 'user', 'content': dialog_messages}]"}), "(model='gpt-3.5-turbo', messages=[{'role':\n 'user', 'content': dialog_messages}])\n", (3995, 4079), False, 'import openai\n'), ((4102, 4132), 'logging.info', 'logging.info', (['completion.usage'], {}), '(completion.usage)\n', (4114, 4132), False, 'import logging\n'), ((4448, 4462), 'app.fetch_web_post.get_urls', 'get_urls', (['urls'], {}), '(urls)\n', (4456, 4462), False, 'from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud\n'), ((4467, 4495), 'logging.info', 'logging.info', (['combained_urls'], {}), '(combained_urls)\n', (4479, 4495), False, 'import logging\n'), ((5063, 5093), 'app.prompt.get_prompt_template', 'get_prompt_template', (['lang_code'], {}), '(lang_code)\n', (5082, 5093), False, 'from app.prompt import get_prompt_template\n'), ((5098, 5158), 'logging.info', 'logging.info', (['"""=====> Use llama web with chatGPT to answer!"""'], {}), "('=====> Use llama web with chatGPT to answer!')\n", (5110, 5158), False, 'import logging\n'), ((5163, 5201), 'logging.info', 'logging.info', (['"""=====> dialog_messages"""'], {}), "('=====> dialog_messages')\n", (5175, 5201), False, 'import logging\n'), ((5206, 5235), 'logging.info', 'logging.info', (['dialog_messages'], {}), '(dialog_messages)\n', (5218, 5235), False, 'import logging\n'), ((5240, 5279), 'logging.info', 'logging.info', (['"""=====> text_qa_template"""'], {}), "('=====> text_qa_template')\n", (5252, 5279), False, 'import logging\n'), ((5284, 5311), 'logging.info', 'logging.info', (['prompt.prompt'], {}), '(prompt.prompt)\n', (5296, 5311), False, 'import logging\n'), ((6326, 6356), 'app.prompt.get_prompt_template', 'get_prompt_template', (['lang_code'], {}), '(lang_code)\n', (6345, 6356), False, 'from app.prompt import get_prompt_template\n'), ((6361, 6422), 'logging.info', 'logging.info', (['"""=====> Use llama file with chatGPT to answer!"""'], {}), "('=====> Use llama file with chatGPT to answer!')\n", (6373, 6422), False, 'import logging\n'), ((6427, 6465), 'logging.info', 'logging.info', (['"""=====> dialog_messages"""'], {}), "('=====> dialog_messages')\n", (6439, 6465), False, 'import logging\n'), ((6470, 6499), 'logging.info', 'logging.info', (['dialog_messages'], {}), '(dialog_messages)\n', (6482, 6499), False, 'import logging\n'), ((6504, 6543), 'logging.info', 'logging.info', (['"""=====> text_qa_template"""'], {}), "('=====> text_qa_template')\n", (6516, 6543), False, 'import logging\n'), ((6548, 6568), 'logging.info', 'logging.info', (['prompt'], {}), '(prompt)\n', (6560, 6568), False, 'import logging\n'), ((8212, 8271), 'azure.cognitiveservices.speech.SpeechConfig', 'SpeechConfig', ([], {'subscription': 'SPEECH_KEY', 'region': 'SPEECH_REGION'}), '(subscription=SPEECH_KEY, region=SPEECH_REGION)\n', (8224, 8271), False, 'from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, ResultReason, CancellationReason, SpeechSynthesisOutputFormat\n'), ((8524, 8561), 'azure.cognitiveservices.speech.audio.AudioOutputConfig', 'AudioOutputConfig', ([], {'filename': 'file_name'}), '(filename=file_name)\n', (8541, 8561), False, 'from azure.cognitiveservices.speech.audio import AudioOutputConfig\n'), ((8580, 8652), 'azure.cognitiveservices.speech.SpeechSynthesizer', 'SpeechSynthesizer', ([], {'speech_config': 'speech_config', 'audio_config': 'file_config'}), '(speech_config=speech_config, audio_config=file_config)\n', (8597, 8652), False, 'from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, ResultReason, CancellationReason, SpeechSynthesisOutputFormat\n'), ((1451, 1504), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (1461, 1504), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3209, 3268), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['web_storage_context'], {'index_id': 'name'}), '(web_storage_context, index_id=name)\n', (3232, 3268), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((3421, 3481), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['file_storage_context'], {'index_id': 'name'}), '(file_storage_context, index_id=name)\n', (3444, 3481), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((4623, 4668), 'logging.info', 'logging.info', (['f"""=====> Build index from web!"""'], {}), "(f'=====> Build index from web!')\n", (4635, 4668), False, 'import logging\n'), ((4737, 4760), 'logging.info', 'logging.info', (['documents'], {}), '(documents)\n', (4749, 4760), False, 'import logging\n'), ((4777, 4855), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (4811, 4855), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((4948, 5041), 'logging.info', 'logging.info', (['f"""=====> Save index to disk path: {index_cache_web_dir / index_file_name}"""'], {}), "(\n f'=====> Save index to disk path: {index_cache_web_dir / index_file_name}')\n", (4960, 5041), False, 'import logging\n'), ((5912, 5958), 'logging.info', 'logging.info', (['f"""=====> Build index from file!"""'], {}), "(f'=====> Build index from file!')\n", (5924, 5958), False, 'import logging\n'), ((6049, 6127), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (6083, 6127), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((6215, 6304), 'logging.info', 'logging.info', (['f"""=====> Save index to disk path: {index_cache_file_dir / index_name}"""'], {}), "(\n f'=====> Save index to disk path: {index_cache_file_dir / index_name}')\n", (6227, 6304), False, 'import logging\n'), ((6982, 7021), 'openai.Audio.transcribe', 'openai.Audio.transcribe', (['"""whisper-1"""', 'f'], {}), "('whisper-1', f)\n", (7005, 7021), False, 'import openai\n'), ((7532, 7576), 'logging.info', 'logging.info', (['"""=====> Convert text to ssml!"""'], {}), "('=====> Convert text to ssml!')\n", (7544, 7576), False, 'import logging\n'), ((7585, 7603), 'logging.info', 'logging.info', (['text'], {}), '(text)\n', (7597, 7603), False, 'import logging\n'), ((7669, 7692), 'app.util.get_language_code', 'get_language_code', (['text'], {}), '(text)\n', (7686, 7692), False, 'from app.util import get_language_code, get_youtube_video_id\n'), ((2339, 2358), 'app.fetch_web_post.scrape_website', 'scrape_website', (['url'], {}), '(url)\n', (2353, 2358), False, 'from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud\n'), ((2844, 2869), 'app.util.get_youtube_video_id', 'get_youtube_video_id', (['url'], {}), '(url)\n', (2864, 2869), False, 'from app.util import get_language_code, get_youtube_video_id\n'), ((3304, 3320), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (3317, 3320), False, 'import logging\n'), ((3517, 3533), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (3530, 3533), False, 'import logging\n'), ((7749, 7794), 'random.choice', 'random.choice', (['lang_code_voice_map[lang_code]'], {}), '(lang_code_voice_map[lang_code])\n', (7762, 7794), False, 'import random\n'), ((7830, 7882), 'logging.warning', 'logging.warning', (['f"""Error: {e}. Using default voice."""'], {}), "(f'Error: {e}. Using default voice.')\n", (7845, 7882), False, 'import logging\n'), ((7904, 7944), 'random.choice', 'random.choice', (["lang_code_voice_map['zh']"], {}), "(lang_code_voice_map['zh'])\n", (7917, 7944), False, 'import random\n'), ((8487, 8499), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8497, 8499), False, 'import uuid\n'), ((2453, 2464), 'llama_index.RssReader', 'RssReader', ([], {}), '()\n', (2462, 2464), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((2664, 2701), 'app.fetch_web_post.scrape_website_by_phantomjscloud', 'scrape_website_by_phantomjscloud', (['url'], {}), '(url)\n', (2696, 2701), False, 'from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud\n'), ((5979, 6020), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file]'}), '(input_files=[file])\n', (6000, 6020), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((3065, 3124), 'llama_index.readers.schema.base.Document', 'Document', (['f"""Can\'t get transcript from youtube video: {url}"""'], {}), '(f"Can\'t get transcript from youtube video: {url}")\n', (3073, 3124), False, 'from llama_index.readers.schema.base import Document\n'), ((3647, 3657), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (3651, 3657), False, 'from pathlib import Path\n')] |
"""Configuration."""
import streamlit as st
import os
### DEFINE BUILDER_LLM #####
## Uncomment the LLM you want to use to construct the meta agent
## OpenAI
from llama_index.llms import OpenAI
# set OpenAI Key - use Streamlit secrets
os.environ["OPENAI_API_KEY"] = st.secrets.openai_key
# load LLM
BUILDER_LLM = OpenAI(model="gpt-4-1106-preview")
# # Anthropic (make sure you `pip install anthropic`)
# from llama_index.llms import Anthropic
# # set Anthropic key
# os.environ["ANTHROPIC_API_KEY"] = st.secrets.anthropic_key
# BUILDER_LLM = Anthropic()
| [
"llama_index.llms.OpenAI"
] | [((316, 350), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (322, 350), False, 'from llama_index.llms import OpenAI\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4 20:58
@Author : alexanderwu
@File : embedding.py
"""
from llama_index.embeddings.openai import OpenAIEmbedding
from metagpt.config2 import config
def get_embedding() -> OpenAIEmbedding:
llm = config.get_openai_llm()
if llm is None:
raise ValueError("To use OpenAIEmbedding, please ensure that config.llm.api_type is correctly set to 'openai'.")
embedding = OpenAIEmbedding(api_key=llm.api_key, api_base=llm.base_url)
return embedding
| [
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((273, 296), 'metagpt.config2.config.get_openai_llm', 'config.get_openai_llm', ([], {}), '()\n', (294, 296), False, 'from metagpt.config2 import config\n'), ((455, 514), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_key': 'llm.api_key', 'api_base': 'llm.base_url'}), '(api_key=llm.api_key, api_base=llm.base_url)\n', (470, 514), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n')] |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
# Configure prompt parameters and initialise helper
max_input_size = 512
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# load index
index = load_index_from_storage(storage_context, service_context=service_context, )
documents = SimpleDirectoryReader('data').load_data()
index.refresh(documents)
index.storage_context.persist(persist_dir="./storage") | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.load_index_from_storage"
] | [((403, 464), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (417, 464), False, 'import os\n'), ((788, 847), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (800, 847), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((910, 1001), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (938, 1001), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((1042, 1095), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1070, 1095), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1118, 1191), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1141, 1191), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((579, 655), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_base': 'base_path'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_base=base_path)\n", (585, 655), False, 'from langchain.llms.openai import OpenAI\n'), ((1213, 1242), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1234, 1242), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n')] |
from memgpt.data_types import Passage, Document, EmbeddingConfig, Source
from memgpt.utils import create_uuid_from_string
from memgpt.agent_store.storage import StorageConnector, TableType
from memgpt.embeddings import embedding_model
from memgpt.data_types import Document, Passage
from typing import List, Iterator, Dict, Tuple, Optional
import typer
from llama_index.core import Document as LlamaIndexDocument
class DataConnector:
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
pass
def generate_passages(self, documents: List[Document], chunk_size: int = 1024) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Passage]:
pass
def load_data(
connector: DataConnector,
source: Source,
embedding_config: EmbeddingConfig,
passage_store: StorageConnector,
document_store: Optional[StorageConnector] = None,
):
"""Load data from a connector (generates documents and passages) into a specified source_id, associatedw with a user_id."""
assert (
source.embedding_model == embedding_config.embedding_model
), f"Source and embedding config models must match, got: {source.embedding_model} and {embedding_config.embedding_model}"
assert (
source.embedding_dim == embedding_config.embedding_dim
), f"Source and embedding config dimensions must match, got: {source.embedding_dim} and {embedding_config.embedding_dim}."
# embedding model
embed_model = embedding_model(embedding_config)
# insert passages/documents
passages = []
passage_count = 0
document_count = 0
for document_text, document_metadata in connector.generate_documents():
# insert document into storage
document = Document(
id=create_uuid_from_string(f"{str(source.id)}_{document_text}"),
text=document_text,
metadata=document_metadata,
data_source=source.name,
user_id=source.user_id,
)
document_count += 1
if document_store:
document_store.insert(document)
# generate passages
for passage_text, passage_metadata in connector.generate_passages([document], chunk_size=embedding_config.embedding_chunk_size):
try:
embedding = embed_model.get_text_embedding(passage_text)
except Exception as e:
typer.secho(
f"Warning: Failed to get embedding for {passage_text} (error: {str(e)}), skipping insert into VectorDB.",
fg=typer.colors.YELLOW,
)
continue
passage = Passage(
id=create_uuid_from_string(f"{str(source.id)}_{passage_text}"),
text=passage_text,
doc_id=document.id,
metadata_=passage_metadata,
user_id=source.user_id,
data_source=source.name,
embedding_dim=source.embedding_dim,
embedding_model=source.embedding_model,
embedding=embedding,
)
passages.append(passage)
if len(passages) >= embedding_config.embedding_chunk_size:
# insert passages into passage store
passage_store.insert_many(passages)
passage_count += len(passages)
passages = []
if len(passages) > 0:
# insert passages into passage store
passage_store.insert_many(passages)
passage_count += len(passages)
return passage_count, document_count
class DirectoryConnector(DataConnector):
def __init__(self, input_files: List[str] = None, input_directory: str = None, recursive: bool = False, extensions: List[str] = None):
self.connector_type = "directory"
self.input_files = input_files
self.input_directory = input_directory
self.recursive = recursive
self.extensions = extensions
if self.recursive == True:
assert self.input_directory is not None, "Must provide input directory if recursive is True."
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
from llama_index.core import SimpleDirectoryReader
if self.input_directory is not None:
reader = SimpleDirectoryReader(
input_dir=self.input_directory,
recursive=self.recursive,
required_exts=[ext.strip() for ext in str(self.extensions).split(",")],
)
else:
assert self.input_files is not None, "Must provide input files if input_dir is None"
reader = SimpleDirectoryReader(input_files=[str(f) for f in self.input_files])
llama_index_docs = reader.load_data(show_progress=True)
for llama_index_doc in llama_index_docs:
# TODO: add additional metadata?
# doc = Document(text=llama_index_doc.text, metadata=llama_index_doc.metadata)
# docs.append(doc)
yield llama_index_doc.text, llama_index_doc.metadata
def generate_passages(self, documents: List[Document], chunk_size: int = 1024) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Passage]:
# use llama index to run embeddings code
# from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.node_parser import TokenTextSplitter
parser = TokenTextSplitter(chunk_size=chunk_size)
for document in documents:
llama_index_docs = [LlamaIndexDocument(text=document.text, metadata=document.metadata)]
nodes = parser.get_nodes_from_documents(llama_index_docs)
for node in nodes:
# passage = Passage(
# text=node.text,
# doc_id=document.id,
# )
yield node.text, None
class WebConnector(DirectoryConnector):
def __init__(self, urls: List[str] = None, html_to_text: bool = True):
self.urls = urls
self.html_to_text = html_to_text
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
from llama_index.readers.web import SimpleWebPageReader
documents = SimpleWebPageReader(html_to_text=self.html_to_text).load_data(self.urls)
for document in documents:
yield document.text, {"url": document.id_}
class VectorDBConnector(DataConnector):
# NOTE: this class has not been properly tested, so is unlikely to work
# TODO: allow loading multiple tables (1:1 mapping between Document and Table)
def __init__(
self,
name: str,
uri: str,
table_name: str,
text_column: str,
embedding_column: str,
embedding_dim: int,
):
self.name = name
self.uri = uri
self.table_name = table_name
self.text_column = text_column
self.embedding_column = embedding_column
self.embedding_dim = embedding_dim
# connect to db table
from sqlalchemy import create_engine
self.engine = create_engine(uri)
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
yield self.table_name, None
def generate_passages(self, documents: List[Document], chunk_size: int = 1024) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Passage]:
from sqlalchemy import select, MetaData, Table, Inspector
from pgvector.sqlalchemy import Vector
metadata = MetaData()
# Create an inspector to inspect the database
inspector = Inspector.from_engine(self.engine)
table_names = inspector.get_table_names()
assert self.table_name in table_names, f"Table {self.table_name} not found in database: tables that exist {table_names}."
table = Table(self.table_name, metadata, autoload_with=self.engine)
# Prepare a select statement
select_statement = select(table.c[self.text_column], table.c[self.embedding_column].cast(Vector(self.embedding_dim)))
# Execute the query and fetch the results
# TODO: paginate results
with self.engine.connect() as connection:
result = connection.execute(select_statement).fetchall()
for text, embedding in result:
# assume that embeddings are the same model as in config
# TODO: don't re-compute embedding
yield text, {"embedding": embedding}
| [
"llama_index.core.node_parser.TokenTextSplitter",
"llama_index.readers.web.SimpleWebPageReader",
"llama_index.core.Document"
] | [((1472, 1505), 'memgpt.embeddings.embedding_model', 'embedding_model', (['embedding_config'], {}), '(embedding_config)\n', (1487, 1505), False, 'from memgpt.embeddings import embedding_model\n'), ((5412, 5452), 'llama_index.core.node_parser.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'chunk_size'}), '(chunk_size=chunk_size)\n', (5429, 5452), False, 'from llama_index.core.node_parser import TokenTextSplitter\n'), ((7087, 7105), 'sqlalchemy.create_engine', 'create_engine', (['uri'], {}), '(uri)\n', (7100, 7105), False, 'from sqlalchemy import create_engine\n'), ((7506, 7516), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (7514, 7516), False, 'from sqlalchemy import select, MetaData, Table, Inspector\n'), ((7591, 7625), 'sqlalchemy.Inspector.from_engine', 'Inspector.from_engine', (['self.engine'], {}), '(self.engine)\n', (7612, 7625), False, 'from sqlalchemy import select, MetaData, Table, Inspector\n'), ((7823, 7882), 'sqlalchemy.Table', 'Table', (['self.table_name', 'metadata'], {'autoload_with': 'self.engine'}), '(self.table_name, metadata, autoload_with=self.engine)\n', (7828, 7882), False, 'from sqlalchemy import select, MetaData, Table, Inspector\n'), ((5520, 5586), 'llama_index.core.Document', 'LlamaIndexDocument', ([], {'text': 'document.text', 'metadata': 'document.metadata'}), '(text=document.text, metadata=document.metadata)\n', (5538, 5586), True, 'from llama_index.core import Document as LlamaIndexDocument\n'), ((6221, 6272), 'llama_index.readers.web.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': 'self.html_to_text'}), '(html_to_text=self.html_to_text)\n', (6240, 6272), False, 'from llama_index.readers.web import SimpleWebPageReader\n'), ((8018, 8044), 'pgvector.sqlalchemy.Vector', 'Vector', (['self.embedding_dim'], {}), '(self.embedding_dim)\n', (8024, 8044), False, 'from pgvector.sqlalchemy import Vector\n')] |
import os
from llama_index import SimpleDirectoryReader
from sqlalchemy.orm import Session
from superagi.config.config import get_config
from superagi.helper.resource_helper import ResourceHelper
from superagi.lib.logger import logger
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.types.model_source_types import ModelSourceType
from superagi.types.vector_store_types import VectorStoreType
from superagi.models.agent import Agent
class ResourceManager:
"""
Resource Manager handles creation of resources and saving them to the vector store.
:param agent_id: The agent id to use when saving resources to the vector store.
"""
def __init__(self, agent_id: str = None):
self.agent_id = agent_id
def create_llama_document(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
if os.path.exists(file_path):
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
return documents
def create_llama_document_s3(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
temporary_file_path = ""
try:
import boto3
s3 = boto3.client(
's3',
aws_access_key_id=get_config("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=get_config("AWS_SECRET_ACCESS_KEY"),
)
bucket_name = get_config("BUCKET_NAME")
file = s3.get_object(Bucket=bucket_name, Key=file_path)
file_name = file_path.split("/")[-1]
save_directory = "/"
temporary_file_path = save_directory + file_name
with open(temporary_file_path, "wb") as f:
contents = file['Body'].read()
f.write(contents)
documents = SimpleDirectoryReader(input_files=[temporary_file_path]).load_data()
return documents
except Exception as e:
logger.error("superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : ", e)
finally:
if os.path.exists(temporary_file_path):
os.remove(temporary_file_path)
def save_document_to_vector_store(self, documents: list, resource_id: str, mode_api_key: str = None,
model_source: str = ""):
"""
Saves a document to the vector store.
:param documents: The documents to save to the vector store.
:param resource_id: The resource id to use when saving the documents to the vector store.
:param mode_api_key: The mode api key to use when creating embedding to the vector store.
"""
from llama_index import VectorStoreIndex, StorageContext
if ModelSourceType.GooglePalm.value in model_source or ModelSourceType.Replicate.value in model_source:
logger.info("Resource embedding not supported for Google Palm..")
return
import openai
openai.api_key = get_config("OPENAI_API_KEY") or mode_api_key
os.environ["OPENAI_API_KEY"] = get_config("OPENAI_API_KEY", "") or mode_api_key
for docs in documents:
if docs.metadata is None:
docs.metadata = {}
docs.metadata["agent_id"] = str(self.agent_id)
docs.metadata["resource_id"] = resource_id
vector_store = None
storage_context = None
vector_store_name = VectorStoreType.get_vector_store_type(get_config("RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = get_config("RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
try:
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
except ValueError as e:
logger.error(f"Vector store not found{e}")
try:
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
index.set_index_id(f'Agent {self.agent_id}')
except Exception as e:
logger.error("save_document_to_vector_store - unable to create documents from vector", e)
# persisting the data in case of redis
if vector_store_name == VectorStoreType.REDIS:
vector_store.persist(persist_path="")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((1132, 1157), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1146, 1157), False, 'import os\n'), ((1917, 1942), 'superagi.config.config.get_config', 'get_config', (['"""BUCKET_NAME"""'], {}), "('BUCKET_NAME')\n", (1927, 1942), False, 'from superagi.config.config import get_config\n'), ((2589, 2624), 'os.path.exists', 'os.path.exists', (['temporary_file_path'], {}), '(temporary_file_path)\n', (2603, 2624), False, 'import os\n'), ((3367, 3432), 'superagi.lib.logger.logger.info', 'logger.info', (['"""Resource embedding not supported for Google Palm.."""'], {}), "('Resource embedding not supported for Google Palm..')\n", (3378, 3432), False, 'from superagi.lib.logger import logger\n'), ((3499, 3527), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (3509, 3527), False, 'from superagi.config.config import get_config\n'), ((3583, 3615), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (3593, 3615), False, 'from superagi.config.config import get_config\n'), ((4057, 4103), 'superagi.config.config.get_config', 'get_config', (['"""RESOURCE_VECTOR_STORE_INDEX_NAME"""'], {}), "('RESOURCE_VECTOR_STORE_INDEX_NAME')\n", (4067, 4103), False, 'from superagi.config.config import get_config\n'), ((4284, 4339), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4312, 4339), False, 'from llama_index import VectorStoreIndex, StorageContext\n'), ((4460, 4535), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (4491, 4535), False, 'from llama_index import VectorStoreIndex, StorageContext\n'), ((2456, 2566), 'superagi.lib.logger.logger.error', 'logger.error', (['"""superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : """', 'e'], {}), "(\n 'superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : '\n , e)\n", (2468, 2566), False, 'from superagi.lib.logger import logger\n'), ((2642, 2672), 'os.remove', 'os.remove', (['temporary_file_path'], {}), '(temporary_file_path)\n', (2651, 2672), False, 'import os\n'), ((3975, 4010), 'superagi.config.config.get_config', 'get_config', (['"""RESOURCE_VECTOR_STORE"""'], {}), "('RESOURCE_VECTOR_STORE')\n", (3985, 4010), False, 'from superagi.config.config import get_config\n'), ((4384, 4426), 'superagi.lib.logger.logger.error', 'logger.error', (['f"""Vector store not found{e}"""'], {}), "(f'Vector store not found{e}')\n", (4396, 4426), False, 'from superagi.lib.logger import logger\n'), ((4636, 4735), 'superagi.lib.logger.logger.error', 'logger.error', (['"""save_document_to_vector_store - unable to create documents from vector"""', 'e'], {}), "(\n 'save_document_to_vector_store - unable to create documents from vector', e\n )\n", (4648, 4735), False, 'from superagi.lib.logger import logger\n'), ((1183, 1229), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]'}), '(input_files=[file_path])\n', (1204, 1229), False, 'from llama_index import SimpleDirectoryReader\n'), ((1769, 1800), 'superagi.config.config.get_config', 'get_config', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (1779, 1800), False, 'from superagi.config.config import get_config\n'), ((1840, 1875), 'superagi.config.config.get_config', 'get_config', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (1850, 1875), False, 'from superagi.config.config import get_config\n'), ((2315, 2371), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[temporary_file_path]'}), '(input_files=[temporary_file_path])\n', (2336, 2371), False, 'from llama_index import SimpleDirectoryReader\n'), ((4167, 4234), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (4190, 4234), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n')] |
import os
from argparse import Namespace, _SubParsersAction
from llama_index import SimpleDirectoryReader
from .configuration import load_index, save_index
def add_cli(args: Namespace) -> None:
"""Handle subcommand "add"."""
index = load_index()
for p in args.files:
if not os.path.exists(p):
raise FileNotFoundError(p)
if os.path.isdir(p):
documents = SimpleDirectoryReader(p).load_data()
for document in documents:
index.insert(document)
else:
documents = SimpleDirectoryReader(input_files=[p]).load_data()
for document in documents:
index.insert(document)
save_index(index)
def register_add_cli(subparsers: _SubParsersAction) -> None:
"""Register subcommand "add" to ArgumentParser."""
parser = subparsers.add_parser("add")
parser.add_argument(
"files",
default=".",
nargs="+",
help="Files to add",
)
parser.set_defaults(func=add_cli)
| [
"llama_index.SimpleDirectoryReader"
] | [((368, 384), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (381, 384), False, 'import os\n'), ((299, 316), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (313, 316), False, 'import os\n'), ((410, 434), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['p'], {}), '(p)\n', (431, 434), False, 'from llama_index import SimpleDirectoryReader\n'), ((563, 601), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[p]'}), '(input_files=[p])\n', (584, 601), False, 'from llama_index import SimpleDirectoryReader\n')] |
from typing import Dict, List, Type
from llama_index.agent import OpenAIAgent, ReActAgent
from llama_index.agent.types import BaseAgent
from llama_index.llms import Anthropic, OpenAI
from llama_index.llms.llama_utils import messages_to_prompt
from llama_index.llms.llm import LLM
from llama_index.llms.replicate import Replicate
OPENAI_MODELS = [
"text-davinci-003",
"gpt-3.5-turbo-0613",
"gpt-4-0613",
]
ANTHROPIC_MODELS = ["claude-instant-1", "claude-instant-1.2", "claude-2", "claude-2.0"]
LLAMA_MODELS = [
"llama13b-v2-chat",
"llama70b-v2-chat",
]
REPLICATE_MODELS: List[str] = []
ALL_MODELS = OPENAI_MODELS + ANTHROPIC_MODELS + LLAMA_MODELS
AGENTS: Dict[str, Type[BaseAgent]] = {
"react": ReActAgent,
"openai": OpenAIAgent,
}
LLAMA_13B_V2_CHAT = (
"a16z-infra/llama13b-v2-chat:"
"df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5"
)
LLAMA_70B_V2_CHAT = (
"replicate/llama70b-v2-chat:"
"e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48"
)
def get_model(model: str) -> LLM:
llm: LLM
if model in OPENAI_MODELS:
llm = OpenAI(model=model)
elif model in ANTHROPIC_MODELS:
llm = Anthropic(model=model)
elif model in LLAMA_MODELS:
model_dict = {
"llama13b-v2-chat": LLAMA_13B_V2_CHAT,
"llama70b-v2-chat": LLAMA_70B_V2_CHAT,
}
replicate_model = model_dict[model]
llm = Replicate(
model=replicate_model,
temperature=0.01,
context_window=4096,
# override message representation for llama 2
messages_to_prompt=messages_to_prompt,
)
else:
raise ValueError(f"Unknown model {model}")
return llm
def is_valid_combination(agent: str, model: str) -> bool:
if agent == "openai" and model not in ["gpt-3.5-turbo-0613", "gpt-4-0613"]:
print(f"{agent} does not work with {model}")
return False
return True
| [
"llama_index.llms.Anthropic",
"llama_index.llms.OpenAI",
"llama_index.llms.replicate.Replicate"
] | [((1116, 1135), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'model'}), '(model=model)\n', (1122, 1135), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1186, 1208), 'llama_index.llms.Anthropic', 'Anthropic', ([], {'model': 'model'}), '(model=model)\n', (1195, 1208), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1434, 1548), 'llama_index.llms.replicate.Replicate', 'Replicate', ([], {'model': 'replicate_model', 'temperature': '(0.01)', 'context_window': '(4096)', 'messages_to_prompt': 'messages_to_prompt'}), '(model=replicate_model, temperature=0.01, context_window=4096,\n messages_to_prompt=messages_to_prompt)\n', (1443, 1548), False, 'from llama_index.llms.replicate import Replicate\n')] |
import asyncio
import os
import shutil
from argparse import ArgumentParser
from glob import iglob
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union, cast
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
)
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.base.response.schema import (
RESPONSE_TYPE,
StreamingResponse,
Response,
)
from llama_index.core.bridge.pydantic import BaseModel, Field, validator
from llama_index.core.chat_engine import CondenseQuestionChatEngine
from llama_index.core.indices.service_context import ServiceContext
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.llms import LLM
from llama_index.core.query_engine import CustomQueryEngine
from llama_index.core.query_pipeline.components.function import FnComponent
from llama_index.core.query_pipeline.query import QueryPipeline
from llama_index.core.readers.base import BaseReader
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.core.utils import get_cache_dir
def _try_load_openai_llm():
try:
from llama_index.llms.openai import OpenAI # pants: no-infer-dep
return OpenAI(model="gpt-3.5-turbo", streaming=True)
except ImportError:
raise ImportError(
"`llama-index-llms-openai` package not found, "
"please run `pip install llama-index-llms-openai`"
)
RAG_HISTORY_FILE_NAME = "files_history.txt"
def default_ragcli_persist_dir() -> str:
return str(Path(get_cache_dir()) / "rag_cli")
def query_input(query_str: Optional[str] = None) -> str:
return query_str or ""
class QueryPipelineQueryEngine(CustomQueryEngine):
query_pipeline: QueryPipeline = Field(
description="Query Pipeline to use for Q&A.",
)
def custom_query(self, query_str: str) -> RESPONSE_TYPE:
return self.query_pipeline.run(query_str=query_str)
async def acustom_query(self, query_str: str) -> RESPONSE_TYPE:
return await self.query_pipeline.arun(query_str=query_str)
class RagCLI(BaseModel):
"""
CLI tool for chatting with output of a IngestionPipeline via a QueryPipeline.
"""
ingestion_pipeline: IngestionPipeline = Field(
description="Ingestion pipeline to run for RAG ingestion."
)
verbose: bool = Field(
description="Whether to print out verbose information during execution.",
default=False,
)
persist_dir: str = Field(
description="Directory to persist ingestion pipeline.",
default_factory=default_ragcli_persist_dir,
)
llm: LLM = Field(
description="Language model to use for response generation.",
default_factory=lambda: _try_load_openai_llm(),
)
query_pipeline: Optional[QueryPipeline] = Field(
description="Query Pipeline to use for Q&A.",
default=None,
)
chat_engine: Optional[CondenseQuestionChatEngine] = Field(
description="Chat engine to use for chatting.",
default_factory=None,
)
file_extractor: Optional[Dict[str, BaseReader]] = Field(
description="File extractor to use for extracting text from files.",
default=None,
)
class Config:
arbitrary_types_allowed = True
@validator("query_pipeline", always=True)
def query_pipeline_from_ingestion_pipeline(
cls, query_pipeline: Any, values: Dict[str, Any]
) -> Optional[QueryPipeline]:
"""
If query_pipeline is not provided, create one from ingestion_pipeline.
"""
if query_pipeline is not None:
return query_pipeline
ingestion_pipeline = cast(IngestionPipeline, values["ingestion_pipeline"])
if ingestion_pipeline.vector_store is None:
return None
verbose = cast(bool, values["verbose"])
query_component = FnComponent(
fn=query_input, output_key="output", req_params={"query_str"}
)
llm = cast(LLM, values["llm"])
# get embed_model from transformations if possible
embed_model = None
if ingestion_pipeline.transformations is not None:
for transformation in ingestion_pipeline.transformations:
if isinstance(transformation, BaseEmbedding):
embed_model = transformation
break
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=embed_model or "default"
)
retriever = VectorStoreIndex.from_vector_store(
ingestion_pipeline.vector_store, service_context=service_context
).as_retriever(similarity_top_k=8)
response_synthesizer = CompactAndRefine(
service_context=service_context, streaming=True, verbose=verbose
)
# define query pipeline
query_pipeline = QueryPipeline(verbose=verbose)
query_pipeline.add_modules(
{
"query": query_component,
"retriever": retriever,
"summarizer": response_synthesizer,
}
)
query_pipeline.add_link("query", "retriever")
query_pipeline.add_link("retriever", "summarizer", dest_key="nodes")
query_pipeline.add_link("query", "summarizer", dest_key="query_str")
return query_pipeline
@validator("chat_engine", always=True)
def chat_engine_from_query_pipeline(
cls, chat_engine: Any, values: Dict[str, Any]
) -> Optional[CondenseQuestionChatEngine]:
"""
If chat_engine is not provided, create one from query_pipeline.
"""
if chat_engine is not None:
return chat_engine
if values.get("query_pipeline", None) is None:
values["query_pipeline"] = cls.query_pipeline_from_ingestion_pipeline(
query_pipeline=None, values=values
)
query_pipeline = cast(QueryPipeline, values["query_pipeline"])
if query_pipeline is None:
return None
query_engine = QueryPipelineQueryEngine(query_pipeline=query_pipeline) # type: ignore
verbose = cast(bool, values["verbose"])
llm = cast(LLM, values["llm"])
return CondenseQuestionChatEngine.from_defaults(
query_engine=query_engine, llm=llm, verbose=verbose
)
async def handle_cli(
self,
files: Optional[str] = None,
question: Optional[str] = None,
chat: bool = False,
verbose: bool = False,
clear: bool = False,
create_llama: bool = False,
**kwargs: Dict[str, Any],
) -> None:
"""
Entrypoint for local document RAG CLI tool.
"""
if clear:
# delete self.persist_dir directory including all subdirectories and files
if os.path.exists(self.persist_dir):
# Ask for confirmation
response = input(
f"Are you sure you want to delete data within {self.persist_dir}? [y/N] "
)
if response.strip().lower() != "y":
print("Aborted.")
return
os.system(f"rm -rf {self.persist_dir}")
print(f"Successfully cleared {self.persist_dir}")
self.verbose = verbose
ingestion_pipeline = cast(IngestionPipeline, self.ingestion_pipeline)
if self.verbose:
print("Saving/Loading from persist_dir: ", self.persist_dir)
if files is not None:
documents = []
for _file in iglob(files, recursive=True):
_file = os.path.abspath(_file)
if os.path.isdir(_file):
reader = SimpleDirectoryReader(
input_dir=_file,
filename_as_id=True,
file_extractor=self.file_extractor,
)
else:
reader = SimpleDirectoryReader(
input_files=[_file],
filename_as_id=True,
file_extractor=self.file_extractor,
)
documents.extend(reader.load_data(show_progress=verbose))
await ingestion_pipeline.arun(show_progress=verbose, documents=documents)
ingestion_pipeline.persist(persist_dir=self.persist_dir)
# Append the `--files` argument to the history file
with open(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}", "a") as f:
f.write(files + "\n")
if create_llama:
if shutil.which("npx") is None:
print(
"`npx` is not installed. Please install it by calling `npm install -g npx`"
)
else:
history_file_path = Path(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}")
if not history_file_path.exists():
print(
"No data has been ingested, "
"please specify `--files` to create llama dataset."
)
else:
with open(history_file_path) as f:
stored_paths = {line.strip() for line in f if line.strip()}
if len(stored_paths) == 0:
print(
"No data has been ingested, "
"please specify `--files` to create llama dataset."
)
elif len(stored_paths) > 1:
print(
"Multiple files or folders were ingested, which is not supported by create-llama. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file"
)
else:
path = stored_paths.pop()
if "*" in path:
print(
"Glob pattern is not supported by create-llama. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file."
)
elif not os.path.exists(path):
print(
f"The path {path} does not exist. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file."
)
else:
print(f"Calling create-llama using data from {path} ...")
command_args = [
"npx",
"create-llama@latest",
"--frontend",
"--template",
"streaming",
"--framework",
"fastapi",
"--ui",
"shadcn",
"--vector-db",
"none",
"--engine",
"context",
f"--files {path}",
]
os.system(" ".join(command_args))
if question is not None:
await self.handle_question(question)
if chat:
await self.start_chat_repl()
async def handle_question(self, question: str) -> None:
if self.query_pipeline is None:
raise ValueError("query_pipeline is not defined.")
query_pipeline = cast(QueryPipeline, self.query_pipeline)
query_pipeline.verbose = self.verbose
chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine)
response = chat_engine.chat(question)
if isinstance(response, StreamingResponse):
response.print_response_stream()
else:
response = cast(Response, response)
print(response)
async def start_chat_repl(self) -> None:
"""
Start a REPL for chatting with the agent.
"""
if self.query_pipeline is None:
raise ValueError("query_pipeline is not defined.")
chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine)
chat_engine.streaming_chat_repl()
@classmethod
def add_parser_args(
cls,
parser: Union[ArgumentParser, Any],
instance_generator: Optional[Callable[[], "RagCLI"]],
) -> None:
if instance_generator:
parser.add_argument(
"-q",
"--question",
type=str,
help="The question you want to ask.",
required=False,
)
parser.add_argument(
"-f",
"--files",
type=str,
help=(
"The name of the file or directory you want to ask a question about,"
'such as "file.pdf".'
),
)
parser.add_argument(
"-c",
"--chat",
help="If flag is present, opens a chat REPL.",
action="store_true",
)
parser.add_argument(
"-v",
"--verbose",
help="Whether to print out verbose information during execution.",
action="store_true",
)
parser.add_argument(
"--clear",
help="Clears out all currently embedded data.",
action="store_true",
)
parser.add_argument(
"--create-llama",
help="Create a LlamaIndex application with your embedded data.",
required=False,
action="store_true",
)
parser.set_defaults(
func=lambda args: asyncio.run(
instance_generator().handle_cli(**vars(args))
)
)
def cli(self) -> None:
"""
Entrypoint for CLI tool.
"""
parser = ArgumentParser(description="LlamaIndex RAG Q&A tool.")
subparsers = parser.add_subparsers(
title="commands", dest="command", required=True
)
llamarag_parser = subparsers.add_parser(
"rag", help="Ask a question to a document / a directory of documents."
)
self.add_parser_args(llamarag_parser, lambda: self)
# Parse the command-line arguments
args = parser.parse_args()
# Call the appropriate function based on the command
args.func(args)
| [
"llama_index.llms.openai.OpenAI",
"llama_index.core.bridge.pydantic.validator",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.indices.service_context.ServiceContext.from_defaults",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.query_pipeline.components.function.FnComponent",
"llama_index.core.utils.get_cache_dir",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.query_pipeline.query.QueryPipeline",
"llama_index.core.response_synthesizers.CompactAndRefine",
"llama_index.core.chat_engine.CondenseQuestionChatEngine.from_defaults"
] | [((1789, 1840), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query Pipeline to use for Q&A."""'}), "(description='Query Pipeline to use for Q&A.')\n", (1794, 1840), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2284, 2349), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Ingestion pipeline to run for RAG ingestion."""'}), "(description='Ingestion pipeline to run for RAG ingestion.')\n", (2289, 2349), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2384, 2488), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Whether to print out verbose information during execution."""', 'default': '(False)'}), "(description=\n 'Whether to print out verbose information during execution.', default=False\n )\n", (2389, 2488), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2525, 2634), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Directory to persist ingestion pipeline."""', 'default_factory': 'default_ragcli_persist_dir'}), "(description='Directory to persist ingestion pipeline.',\n default_factory=default_ragcli_persist_dir)\n", (2530, 2634), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2854, 2919), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query Pipeline to use for Q&A."""', 'default': 'None'}), "(description='Query Pipeline to use for Q&A.', default=None)\n", (2859, 2919), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2999, 3074), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Chat engine to use for chatting."""', 'default_factory': 'None'}), "(description='Chat engine to use for chatting.', default_factory=None)\n", (3004, 3074), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((3152, 3244), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""File extractor to use for extracting text from files."""', 'default': 'None'}), "(description='File extractor to use for extracting text from files.',\n default=None)\n", (3157, 3244), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((3328, 3368), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""query_pipeline"""'], {'always': '(True)'}), "('query_pipeline', always=True)\n", (3337, 3368), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((5385, 5422), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""chat_engine"""'], {'always': '(True)'}), "('chat_engine', always=True)\n", (5394, 5422), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((1245, 1290), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'streaming': '(True)'}), "(model='gpt-3.5-turbo', streaming=True)\n", (1251, 1290), False, 'from llama_index.llms.openai import OpenAI\n'), ((3714, 3767), 'typing.cast', 'cast', (['IngestionPipeline', "values['ingestion_pipeline']"], {}), "(IngestionPipeline, values['ingestion_pipeline'])\n", (3718, 3767), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((3862, 3891), 'typing.cast', 'cast', (['bool', "values['verbose']"], {}), "(bool, values['verbose'])\n", (3866, 3891), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((3918, 3992), 'llama_index.core.query_pipeline.components.function.FnComponent', 'FnComponent', ([], {'fn': 'query_input', 'output_key': '"""output"""', 'req_params': "{'query_str'}"}), "(fn=query_input, output_key='output', req_params={'query_str'})\n", (3929, 3992), False, 'from llama_index.core.query_pipeline.components.function import FnComponent\n'), ((4029, 4053), 'typing.cast', 'cast', (['LLM', "values['llm']"], {}), "(LLM, values['llm'])\n", (4033, 4053), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((4434, 4509), 'llama_index.core.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': "(embed_model or 'default')"}), "(llm=llm, embed_model=embed_model or 'default')\n", (4462, 4509), False, 'from llama_index.core.indices.service_context import ServiceContext\n'), ((4739, 4826), 'llama_index.core.response_synthesizers.CompactAndRefine', 'CompactAndRefine', ([], {'service_context': 'service_context', 'streaming': '(True)', 'verbose': 'verbose'}), '(service_context=service_context, streaming=True, verbose=\n verbose)\n', (4755, 4826), False, 'from llama_index.core.response_synthesizers import CompactAndRefine\n'), ((4902, 4932), 'llama_index.core.query_pipeline.query.QueryPipeline', 'QueryPipeline', ([], {'verbose': 'verbose'}), '(verbose=verbose)\n', (4915, 4932), False, 'from llama_index.core.query_pipeline.query import QueryPipeline\n'), ((5958, 6003), 'typing.cast', 'cast', (['QueryPipeline', "values['query_pipeline']"], {}), "(QueryPipeline, values['query_pipeline'])\n", (5962, 6003), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6176, 6205), 'typing.cast', 'cast', (['bool', "values['verbose']"], {}), "(bool, values['verbose'])\n", (6180, 6205), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6220, 6244), 'typing.cast', 'cast', (['LLM', "values['llm']"], {}), "(LLM, values['llm'])\n", (6224, 6244), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6260, 6357), 'llama_index.core.chat_engine.CondenseQuestionChatEngine.from_defaults', 'CondenseQuestionChatEngine.from_defaults', ([], {'query_engine': 'query_engine', 'llm': 'llm', 'verbose': 'verbose'}), '(query_engine=query_engine, llm=llm,\n verbose=verbose)\n', (6300, 6357), False, 'from llama_index.core.chat_engine import CondenseQuestionChatEngine\n'), ((7378, 7426), 'typing.cast', 'cast', (['IngestionPipeline', 'self.ingestion_pipeline'], {}), '(IngestionPipeline, self.ingestion_pipeline)\n', (7382, 7426), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12054, 12094), 'typing.cast', 'cast', (['QueryPipeline', 'self.query_pipeline'], {}), '(QueryPipeline, self.query_pipeline)\n', (12058, 12094), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12163, 12213), 'typing.cast', 'cast', (['CondenseQuestionChatEngine', 'self.chat_engine'], {}), '(CondenseQuestionChatEngine, self.chat_engine)\n', (12167, 12213), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12693, 12743), 'typing.cast', 'cast', (['CondenseQuestionChatEngine', 'self.chat_engine'], {}), '(CondenseQuestionChatEngine, self.chat_engine)\n', (12697, 12743), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((14601, 14655), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""LlamaIndex RAG Q&A tool."""'}), "(description='LlamaIndex RAG Q&A tool.')\n", (14615, 14655), False, 'from argparse import ArgumentParser\n'), ((6863, 6895), 'os.path.exists', 'os.path.exists', (['self.persist_dir'], {}), '(self.persist_dir)\n', (6877, 6895), False, 'import os\n'), ((7607, 7635), 'glob.iglob', 'iglob', (['files'], {'recursive': '(True)'}), '(files, recursive=True)\n', (7612, 7635), False, 'from glob import iglob\n'), ((12395, 12419), 'typing.cast', 'cast', (['Response', 'response'], {}), '(Response, response)\n', (12399, 12419), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((1584, 1599), 'llama_index.core.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (1597, 1599), False, 'from llama_index.core.utils import get_cache_dir\n'), ((4552, 4656), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['ingestion_pipeline.vector_store'], {'service_context': 'service_context'}), '(ingestion_pipeline.vector_store,\n service_context=service_context)\n', (4586, 4656), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7215, 7254), 'os.system', 'os.system', (['f"""rm -rf {self.persist_dir}"""'], {}), "(f'rm -rf {self.persist_dir}')\n", (7224, 7254), False, 'import os\n'), ((7661, 7683), 'os.path.abspath', 'os.path.abspath', (['_file'], {}), '(_file)\n', (7676, 7683), False, 'import os\n'), ((7703, 7723), 'os.path.isdir', 'os.path.isdir', (['_file'], {}), '(_file)\n', (7716, 7723), False, 'import os\n'), ((8646, 8665), 'shutil.which', 'shutil.which', (['"""npx"""'], {}), "('npx')\n", (8658, 8665), False, 'import shutil\n'), ((8866, 8917), 'pathlib.Path', 'Path', (['f"""{self.persist_dir}/{RAG_HISTORY_FILE_NAME}"""'], {}), "(f'{self.persist_dir}/{RAG_HISTORY_FILE_NAME}')\n", (8870, 8917), False, 'from pathlib import Path\n'), ((7754, 7854), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '_file', 'filename_as_id': '(True)', 'file_extractor': 'self.file_extractor'}), '(input_dir=_file, filename_as_id=True, file_extractor=\n self.file_extractor)\n', (7775, 7854), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7996, 8099), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[_file]', 'filename_as_id': '(True)', 'file_extractor': 'self.file_extractor'}), '(input_files=[_file], filename_as_id=True,\n file_extractor=self.file_extractor)\n', (8017, 8099), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((10477, 10497), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (10491, 10497), False, 'import os\n')] |
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, List, Optional
if TYPE_CHECKING:
from llama_index.core.service_context import ServiceContext
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager
from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.llms import LLM
from llama_index.core.llms.utils import LLMType, resolve_llm
from llama_index.core.node_parser import NodeParser, SentenceSplitter
from llama_index.core.schema import TransformComponent
from llama_index.core.types import PydanticProgramMode
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
@dataclass
class _Settings:
"""Settings for the Llama Index, lazily initialized."""
# lazy initialization
_llm: Optional[LLM] = None
_embed_model: Optional[BaseEmbedding] = None
_callback_manager: Optional[CallbackManager] = None
_tokenizer: Optional[Callable[[str], List[Any]]] = None
_node_parser: Optional[NodeParser] = None
_prompt_helper: Optional[PromptHelper] = None
_transformations: Optional[List[TransformComponent]] = None
# ---- LLM ----
@property
def llm(self) -> LLM:
"""Get the LLM."""
if self._llm is None:
self._llm = resolve_llm("default")
if self._callback_manager is not None:
self._llm.callback_manager = self._callback_manager
return self._llm
@llm.setter
def llm(self, llm: LLMType) -> None:
"""Set the LLM."""
self._llm = resolve_llm(llm)
@property
def pydantic_program_mode(self) -> PydanticProgramMode:
"""Get the pydantic program mode."""
return self.llm.pydantic_program_mode
@pydantic_program_mode.setter
def pydantic_program_mode(self, pydantic_program_mode: PydanticProgramMode) -> None:
"""Set the pydantic program mode."""
self.llm.pydantic_program_mode = pydantic_program_mode
# ---- Embedding ----
@property
def embed_model(self) -> BaseEmbedding:
"""Get the embedding model."""
if self._embed_model is None:
self._embed_model = resolve_embed_model("default")
if self._callback_manager is not None:
self._embed_model.callback_manager = self._callback_manager
return self._embed_model
@embed_model.setter
def embed_model(self, embed_model: EmbedType) -> None:
"""Set the embedding model."""
self._embed_model = resolve_embed_model(embed_model)
# ---- Callbacks ----
@property
def global_handler(self) -> Optional[BaseCallbackHandler]:
"""Get the global handler."""
import llama_index.core
# TODO: deprecated?
return llama_index.core.global_handler
@global_handler.setter
def global_handler(self, eval_mode: str, **eval_params: Any) -> None:
"""Set the global handler."""
from llama_index.core import set_global_handler
# TODO: deprecated?
set_global_handler(eval_mode, **eval_params)
@property
def callback_manager(self) -> CallbackManager:
"""Get the callback manager."""
if self._callback_manager is None:
self._callback_manager = CallbackManager()
return self._callback_manager
@callback_manager.setter
def callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set the callback manager."""
self._callback_manager = callback_manager
# ---- Tokenizer ----
@property
def tokenizer(self) -> Callable[[str], List[Any]]:
"""Get the tokenizer."""
import llama_index.core
if llama_index.core.global_tokenizer is None:
return get_tokenizer()
# TODO: deprecated?
return llama_index.core.global_tokenizer
@tokenizer.setter
def tokenizer(self, tokenizer: Callable[[str], List[Any]]) -> None:
"""Set the tokenizer."""
try:
from transformers import PreTrainedTokenizerBase # pants: no-infer-dep
if isinstance(tokenizer, PreTrainedTokenizerBase):
from functools import partial
tokenizer = partial(tokenizer.encode, add_special_tokens=False)
except ImportError:
pass
# TODO: deprecated?
set_global_tokenizer(tokenizer)
# ---- Node parser ----
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
if self._node_parser is None:
self._node_parser = SentenceSplitter()
if self._callback_manager is not None:
self._node_parser.callback_manager = self._callback_manager
return self._node_parser
@node_parser.setter
def node_parser(self, node_parser: NodeParser) -> None:
"""Set the node parser."""
self._node_parser = node_parser
@property
def chunk_size(self) -> int:
"""Get the chunk size."""
if hasattr(self.node_parser, "chunk_size"):
return self.node_parser.chunk_size
else:
raise ValueError("Configured node parser does not have chunk size.")
@chunk_size.setter
def chunk_size(self, chunk_size: int) -> None:
"""Set the chunk size."""
if hasattr(self.node_parser, "chunk_size"):
self.node_parser.chunk_size = chunk_size
else:
raise ValueError("Configured node parser does not have chunk size.")
@property
def chunk_overlap(self) -> int:
"""Get the chunk overlap."""
if hasattr(self.node_parser, "chunk_overlap"):
return self.node_parser.chunk_overlap
else:
raise ValueError("Configured node parser does not have chunk overlap.")
@chunk_overlap.setter
def chunk_overlap(self, chunk_overlap: int) -> None:
"""Set the chunk overlap."""
if hasattr(self.node_parser, "chunk_overlap"):
self.node_parser.chunk_overlap = chunk_overlap
else:
raise ValueError("Configured node parser does not have chunk overlap.")
# ---- Node parser alias ----
@property
def text_splitter(self) -> NodeParser:
"""Get the text splitter."""
return self.node_parser
@text_splitter.setter
def text_splitter(self, text_splitter: NodeParser) -> None:
"""Set the text splitter."""
self.node_parser = text_splitter
@property
def prompt_helper(self) -> PromptHelper:
"""Get the prompt helper."""
if self._llm is not None and self._prompt_helper is None:
self._prompt_helper = PromptHelper.from_llm_metadata(self._llm.metadata)
elif self._prompt_helper is None:
self._prompt_helper = PromptHelper()
return self._prompt_helper
@prompt_helper.setter
def prompt_helper(self, prompt_helper: PromptHelper) -> None:
"""Set the prompt helper."""
self._prompt_helper = prompt_helper
@property
def num_output(self) -> int:
"""Get the number of outputs."""
return self.prompt_helper.num_output
@num_output.setter
def num_output(self, num_output: int) -> None:
"""Set the number of outputs."""
self.prompt_helper.num_output = num_output
@property
def context_window(self) -> int:
"""Get the context window."""
return self.prompt_helper.context_window
@context_window.setter
def context_window(self, context_window: int) -> None:
"""Set the context window."""
self.prompt_helper.context_window = context_window
# ---- Transformations ----
@property
def transformations(self) -> List[TransformComponent]:
"""Get the transformations."""
if self._transformations is None:
self._transformations = [self.node_parser]
return self._transformations
@transformations.setter
def transformations(self, transformations: List[TransformComponent]) -> None:
"""Set the transformations."""
self._transformations = transformations
# Singleton
Settings = _Settings()
# -- Helper functions for deprecation/migration --
def llm_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> LLM:
"""Get settings from either settings or context."""
if context is not None:
return context.llm
return settings.llm
def embed_model_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> BaseEmbedding:
"""Get settings from either settings or context."""
if context is not None:
return context.embed_model
return settings.embed_model
def callback_manager_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> CallbackManager:
"""Get settings from either settings or context."""
if context is not None:
return context.callback_manager
return settings.callback_manager
def node_parser_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> NodeParser:
"""Get settings from either settings or context."""
if context is not None:
return context.node_parser
return settings.node_parser
def transformations_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> List[TransformComponent]:
"""Get settings from either settings or context."""
if context is not None:
return context.transformations
return settings.transformations
| [
"llama_index.core.llms.utils.resolve_llm",
"llama_index.core.utils.get_tokenizer",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.embeddings.utils.resolve_embed_model",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.set_global_handler",
"llama_index.core.indices.prompt_helper.PromptHelper",
"llama_index.core.utils.set_global_tokenizer"
] | [((1701, 1717), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (1712, 1717), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2647, 2679), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (2666, 2679), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3164, 3208), 'llama_index.core.set_global_handler', 'set_global_handler', (['eval_mode'], {}), '(eval_mode, **eval_params)\n', (3182, 3208), False, 'from llama_index.core import set_global_handler\n'), ((4474, 4505), 'llama_index.core.utils.set_global_tokenizer', 'set_global_tokenizer', (['tokenizer'], {}), '(tokenizer)\n', (4494, 4505), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((1435, 1457), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['"""default"""'], {}), "('default')\n", (1446, 1457), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2311, 2341), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['"""default"""'], {}), "('default')\n", (2330, 2341), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3395, 3412), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (3410, 3412), False, 'from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager\n'), ((3882, 3897), 'llama_index.core.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (3895, 3897), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((4696, 4714), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4712, 4714), False, 'from llama_index.core.node_parser import NodeParser, SentenceSplitter\n'), ((6766, 6816), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (6796, 6816), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((4340, 4391), 'functools.partial', 'partial', (['tokenizer.encode'], {'add_special_tokens': '(False)'}), '(tokenizer.encode, add_special_tokens=False)\n', (4347, 4391), False, 'from functools import partial\n'), ((6893, 6907), 'llama_index.core.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {}), '()\n', (6905, 6907), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset("CovidQaDataset", "./data")
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=40, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 315), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""CovidQaDataset"""', '"""./data"""'], {}), "('CovidQaDataset', './data')\n", (287, 315), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((360, 412), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (391, 412), False, 'from llama_index.core import VectorStoreIndex\n'), ((505, 554), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (524, 554), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1405, 1429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1427, 1429), False, 'import asyncio\n')] |
from typing import Any, Callable, Optional, Sequence
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.types import PydanticProgramMode
class MockLLM(CustomLLM):
max_tokens: Optional[int]
def __init__(
self,
max_tokens: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
) -> None:
super().__init__(
max_tokens=max_tokens,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
)
@classmethod
def class_name(cls) -> str:
return "MockLLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(num_output=self.max_tokens or -1)
def _generate_text(self, length: int) -> str:
return " ".join(["text" for _ in range(length)])
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
response_text = (
self._generate_text(self.max_tokens) if self.max_tokens else prompt
)
return CompletionResponse(
text=response_text,
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
def gen_prompt() -> CompletionResponseGen:
for ch in prompt:
yield CompletionResponse(
text=prompt,
delta=ch,
)
def gen_response(max_tokens: int) -> CompletionResponseGen:
for i in range(max_tokens):
response_text = self._generate_text(i)
yield CompletionResponse(
text=response_text,
delta="text ",
)
return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
| [
"llama_index.core.llms.callbacks.llm_completion_callback",
"llama_index.core.base.llms.types.LLMMetadata",
"llama_index.core.base.llms.types.CompletionResponse"
] | [((1532, 1557), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1555, 1557), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1871, 1896), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1894, 1896), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1372, 1417), 'llama_index.core.base.llms.types.LLMMetadata', 'LLMMetadata', ([], {'num_output': '(self.max_tokens or -1)'}), '(num_output=self.max_tokens or -1)\n', (1383, 1417), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1803, 1841), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text'}), '(text=response_text)\n', (1821, 1841), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2123, 2164), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'prompt', 'delta': 'ch'}), '(text=prompt, delta=ch)\n', (2141, 2164), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2410, 2463), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text', 'delta': '"""text """'}), "(text=response_text, delta='text ')\n", (2428, 2463), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n')] |
from enum import Enum
from typing import Any, AsyncGenerator, Generator, Optional, Union, List
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS
class MessageRole(str, Enum):
"""Message role."""
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
FUNCTION = "function"
TOOL = "tool"
CHATBOT = "chatbot"
MODEL = "model"
# ===== Generic Model Input - Chat =====
class ChatMessage(BaseModel):
"""Chat message."""
role: MessageRole = MessageRole.USER
content: Optional[Any] = ""
additional_kwargs: dict = Field(default_factory=dict)
def __str__(self) -> str:
return f"{self.role.value}: {self.content}"
@classmethod
def from_str(
cls,
content: str,
role: Union[MessageRole, str] = MessageRole.USER,
**kwargs: Any,
) -> "ChatMessage":
if isinstance(role, str):
role = MessageRole(role)
return cls(role=role, content=content, **kwargs)
class LogProb(BaseModel):
"""LogProb of a token."""
token: str = Field(default_factory=str)
logprob: float = Field(default_factory=float)
bytes: List[int] = Field(default_factory=list)
# ===== Generic Model Output - Chat =====
class ChatResponse(BaseModel):
"""Chat response."""
message: ChatMessage
raw: Optional[dict] = None
delta: Optional[str] = None
logprobs: Optional[List[List[LogProb]]] = None
additional_kwargs: dict = Field(default_factory=dict)
def __str__(self) -> str:
return str(self.message)
ChatResponseGen = Generator[ChatResponse, None, None]
ChatResponseAsyncGen = AsyncGenerator[ChatResponse, None]
# ===== Generic Model Output - Completion =====
class CompletionResponse(BaseModel):
"""
Completion response.
Fields:
text: Text content of the response if not streaming, or if streaming,
the current extent of streamed text.
additional_kwargs: Additional information on the response(i.e. token
counts, function calling information).
raw: Optional raw JSON that was parsed to populate text, if relevant.
delta: New text that just streamed in (only relevant when streaming).
"""
text: str
additional_kwargs: dict = Field(default_factory=dict)
raw: Optional[dict] = None
delta: Optional[str] = None
def __str__(self) -> str:
return self.text
CompletionResponseGen = Generator[CompletionResponse, None, None]
CompletionResponseAsyncGen = AsyncGenerator[CompletionResponse, None]
class LLMMetadata(BaseModel):
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description=(
"Total number of tokens the model can be input and output for one response."
),
)
num_output: int = Field(
default=DEFAULT_NUM_OUTPUTS,
description="Number of tokens the model can output when generating a response.",
)
is_chat_model: bool = Field(
default=False,
description=(
"Set True if the model exposes a chat interface (i.e. can be passed a"
" sequence of messages, rather than text), like OpenAI's"
" /v1/chat/completions endpoint."
),
)
is_function_calling_model: bool = Field(
default=False,
# SEE: https://openai.com/blog/function-calling-and-other-api-updates
description=(
"Set True if the model supports function calling messages, similar to"
" OpenAI's function calling API. For example, converting 'Email Anya to"
" see if she wants to get coffee next Friday' to a function call like"
" `send_email(to: string, body: string)`."
),
)
model_name: str = Field(
default="unknown",
description=(
"The model's name used for logging, testing, and sanity checking. For some"
" models this can be automatically discerned. For other models, like"
" locally loaded models, this must be manually specified."
),
)
system_role: MessageRole = Field(
default=MessageRole.SYSTEM,
description="The role this specific LLM provider"
"expects for system prompt. E.g. 'SYSTEM' for OpenAI, 'CHATBOT' for Cohere",
)
| [
"llama_index.core.bridge.pydantic.Field"
] | [((655, 682), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (660, 682), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1146, 1172), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str'}), '(default_factory=str)\n', (1151, 1172), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1194, 1222), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'float'}), '(default_factory=float)\n', (1199, 1222), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1246, 1273), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1251, 1273), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1544, 1571), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1549, 1571), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2347, 2374), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2352, 2374), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2690, 2827), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CONTEXT_WINDOW', 'description': '"""Total number of tokens the model can be input and output for one response."""'}), "(default=DEFAULT_CONTEXT_WINDOW, description=\n 'Total number of tokens the model can be input and output for one response.'\n )\n", (2695, 2827), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2887, 3007), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""Number of tokens the model can output when generating a response."""'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'Number of tokens the model can output when generating a response.')\n", (2892, 3007), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3052, 3252), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Set True if the model exposes a chat interface (i.e. can be passed a sequence of messages, rather than text), like OpenAI\'s /v1/chat/completions endpoint."""'}), '(default=False, description=\n "Set True if the model exposes a chat interface (i.e. can be passed a sequence of messages, rather than text), like OpenAI\'s /v1/chat/completions endpoint."\n )\n', (3057, 3252), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3358, 3650), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Set True if the model supports function calling messages, similar to OpenAI\'s function calling API. For example, converting \'Email Anya to see if she wants to get coffee next Friday\' to a function call like `send_email(to: string, body: string)`."""'}), '(default=False, description=\n "Set True if the model supports function calling messages, similar to OpenAI\'s function calling API. For example, converting \'Email Anya to see if she wants to get coffee next Friday\' to a function call like `send_email(to: string, body: string)`."\n )\n', (3363, 3650), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3833, 4079), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '"""unknown"""', 'description': '"""The model\'s name used for logging, testing, and sanity checking. For some models this can be automatically discerned. For other models, like locally loaded models, this must be manually specified."""'}), '(default=\'unknown\', description=\n "The model\'s name used for logging, testing, and sanity checking. For some models this can be automatically discerned. For other models, like locally loaded models, this must be manually specified."\n )\n', (3838, 4079), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4178, 4345), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'MessageRole.SYSTEM', 'description': '"""The role this specific LLM providerexpects for system prompt. E.g. \'SYSTEM\' for OpenAI, \'CHATBOT\' for Cohere"""'}), '(default=MessageRole.SYSTEM, description=\n "The role this specific LLM providerexpects for system prompt. E.g. \'SYSTEM\' for OpenAI, \'CHATBOT\' for Cohere"\n )\n', (4183, 4345), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n')] |
"""Base agent type."""
import uuid
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.base.response.schema import RESPONSE_TYPE, Response
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.callbacks import CallbackManager, trace_method
from llama_index.core.chat_engine.types import (
BaseChatEngine,
StreamingAgentChatResponse,
)
from llama_index.core.memory.types import BaseMemory
from llama_index.core.prompts.mixin import PromptDictType, PromptMixin, PromptMixinType
from llama_index.core.schema import QueryBundle
class BaseAgent(BaseChatEngine, BaseQueryEngine):
"""Base Agent."""
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
# TODO: the ReAct agent does not explicitly specify prompts, would need a
# refactor to expose those prompts
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
# ===== Query Engine Interface =====
@trace_method("query")
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
agent_response = self.chat(
query_bundle.query_str,
chat_history=[],
)
return Response(
response=str(agent_response), source_nodes=agent_response.source_nodes
)
@trace_method("query")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
agent_response = await self.achat(
query_bundle.query_str,
chat_history=[],
)
return Response(
response=str(agent_response), source_nodes=agent_response.source_nodes
)
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
raise NotImplementedError("stream_chat not implemented")
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
raise NotImplementedError("astream_chat not implemented")
class TaskStep(BaseModel):
"""Agent task step.
Represents a single input step within the execution run ("Task") of an agent
given a user input.
The output is returned as a `TaskStepOutput`.
"""
task_id: str = Field(..., diescription="Task ID")
step_id: str = Field(..., description="Step ID")
input: Optional[str] = Field(default=None, description="User input")
# memory: BaseMemory = Field(
# ..., type=BaseMemory, description="Conversational Memory"
# )
step_state: Dict[str, Any] = Field(
default_factory=dict, description="Additional state for a given step."
)
# NOTE: the state below may change throughout the course of execution
# this tracks the relationships to other steps
next_steps: Dict[str, "TaskStep"] = Field(
default_factory=dict, description="Next steps to be executed."
)
prev_steps: Dict[str, "TaskStep"] = Field(
default_factory=dict,
description="Previous steps that were dependencies for this step.",
)
is_ready: bool = Field(
default=True, description="Is this step ready to be executed?"
)
def get_next_step(
self,
step_id: str,
input: Optional[str] = None,
step_state: Optional[Dict[str, Any]] = None,
) -> "TaskStep":
"""Convenience function to get next step.
Preserve task_id, memory, step_state.
"""
return TaskStep(
task_id=self.task_id,
step_id=step_id,
input=input,
# memory=self.memory,
step_state=step_state or self.step_state,
)
def link_step(
self,
next_step: "TaskStep",
) -> None:
"""Link to next step.
Add link from this step to next, and from next step to current.
"""
self.next_steps[next_step.step_id] = next_step
next_step.prev_steps[self.step_id] = self
class TaskStepOutput(BaseModel):
"""Agent task step output."""
output: Any = Field(..., description="Task step output")
task_step: TaskStep = Field(..., description="Task step input")
next_steps: List[TaskStep] = Field(..., description="Next steps to be executed.")
is_last: bool = Field(default=False, description="Is this the last step?")
def __str__(self) -> str:
"""String representation."""
return str(self.output)
class Task(BaseModel):
"""Agent Task.
Represents a "run" of an agent given a user input.
"""
class Config:
arbitrary_types_allowed = True
task_id: str = Field(
default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID"
)
input: str = Field(..., type=str, description="User input")
# NOTE: this is state that may be modified throughout the course of execution of the task
memory: BaseMemory = Field(
...,
type=BaseMemory,
description=(
"Conversational Memory. Maintains state before execution of this task."
),
)
callback_manager: CallbackManager = Field(
default_factory=CallbackManager,
exclude=True,
description="Callback manager for the task.",
)
extra_state: Dict[str, Any] = Field(
default_factory=dict,
description=(
"Additional user-specified state for a given task. "
"Can be modified throughout the execution of a task."
),
)
class BaseAgentWorker(PromptMixin):
"""Base agent worker."""
class Config:
arbitrary_types_allowed = True
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
# TODO: the ReAct agent does not explicitly specify prompts, would need a
# refactor to expose those prompts
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
@abstractmethod
def initialize_step(self, task: Task, **kwargs: Any) -> TaskStep:
"""Initialize step from task."""
@abstractmethod
def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput:
"""Run step."""
@abstractmethod
async def arun_step(
self, step: TaskStep, task: Task, **kwargs: Any
) -> TaskStepOutput:
"""Run step (async)."""
raise NotImplementedError
@abstractmethod
def stream_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput:
"""Run step (stream)."""
# TODO: figure out if we need a different type for TaskStepOutput
raise NotImplementedError
@abstractmethod
async def astream_step(
self, step: TaskStep, task: Task, **kwargs: Any
) -> TaskStepOutput:
"""Run step (async stream)."""
raise NotImplementedError
@abstractmethod
def finalize_task(self, task: Task, **kwargs: Any) -> None:
"""Finalize task, after all the steps are completed."""
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
# TODO: make this abstractmethod (right now will break some agent impls)
| [
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.callbacks.trace_method"
] | [((1275, 1296), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1287, 1296), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((1598, 1619), 'llama_index.core.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1610, 1619), False, 'from llama_index.core.callbacks import CallbackManager, trace_method\n'), ((2578, 2612), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'diescription': '"""Task ID"""'}), "(..., diescription='Task ID')\n", (2583, 2612), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2632, 2665), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Step ID"""'}), "(..., description='Step ID')\n", (2637, 2665), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2693, 2738), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""User input"""'}), "(default=None, description='User input')\n", (2698, 2738), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2882, 2959), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional state for a given step."""'}), "(default_factory=dict, description='Additional state for a given step.')\n", (2887, 2959), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3140, 3209), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Next steps to be executed."""'}), "(default_factory=dict, description='Next steps to be executed.')\n", (3145, 3209), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3264, 3364), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Previous steps that were dependencies for this step."""'}), "(default_factory=dict, description=\n 'Previous steps that were dependencies for this step.')\n", (3269, 3364), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3404, 3473), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Is this step ready to be executed?"""'}), "(default=True, description='Is this step ready to be executed?')\n", (3409, 3473), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4369, 4411), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step output"""'}), "(..., description='Task step output')\n", (4374, 4411), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4438, 4479), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step input"""'}), "(..., description='Task step input')\n", (4443, 4479), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4513, 4565), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Next steps to be executed."""'}), "(..., description='Next steps to be executed.')\n", (4518, 4565), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4586, 4644), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Is this the last step?"""'}), "(default=False, description='Is this the last step?')\n", (4591, 4644), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5045, 5091), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'type': 'str', 'description': '"""User input"""'}), "(..., type=str, description='User input')\n", (5050, 5091), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5212, 5329), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'type': 'BaseMemory', 'description': '"""Conversational Memory. Maintains state before execution of this task."""'}), "(..., type=BaseMemory, description=\n 'Conversational Memory. Maintains state before execution of this task.')\n", (5217, 5329), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5421, 5524), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)', 'description': '"""Callback manager for the task."""'}), "(default_factory=CallbackManager, exclude=True, description=\n 'Callback manager for the task.')\n", (5426, 5524), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((5586, 5740), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional user-specified state for a given task. Can be modified throughout the execution of a task."""'}), "(default_factory=dict, description=\n 'Additional user-specified state for a given task. Can be modified throughout the execution of a task.'\n )\n", (5591, 5740), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4975, 4987), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (4985, 4987), False, 'import uuid\n')] |
import json
from abc import abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, Optional, Type
if TYPE_CHECKING:
from llama_index.core.bridge.langchain import StructuredTool, Tool
from deprecated import deprecated
from llama_index.core.bridge.pydantic import BaseModel
class DefaultToolFnSchema(BaseModel):
"""Default tool function Schema."""
input: str
@dataclass
class ToolMetadata:
description: str
name: Optional[str] = None
fn_schema: Optional[Type[BaseModel]] = DefaultToolFnSchema
def get_parameters_dict(self) -> dict:
if self.fn_schema is None:
parameters = {
"type": "object",
"properties": {
"input": {"title": "input query string", "type": "string"},
},
"required": ["input"],
}
else:
parameters = self.fn_schema.schema()
parameters = {
k: v
for k, v in parameters.items()
if k in ["type", "properties", "required", "definitions"]
}
return parameters
@property
def fn_schema_str(self) -> str:
"""Get fn schema as string."""
if self.fn_schema is None:
raise ValueError("fn_schema is None.")
parameters = self.get_parameters_dict()
return json.dumps(parameters)
def get_name(self) -> str:
"""Get name."""
if self.name is None:
raise ValueError("name is None.")
return self.name
@deprecated(
"Deprecated in favor of `to_openai_tool`, which should be used instead."
)
def to_openai_function(self) -> Dict[str, Any]:
"""Deprecated and replaced by `to_openai_tool`.
The name and arguments of a function that should be called, as generated by the
model.
"""
return {
"name": self.name,
"description": self.description,
"parameters": self.get_parameters_dict(),
}
def to_openai_tool(self) -> Dict[str, Any]:
"""To OpenAI tool."""
return {
"type": "function",
"function": {
"name": self.name,
"description": self.description,
"parameters": self.get_parameters_dict(),
},
}
class ToolOutput(BaseModel):
"""Tool output."""
content: str
tool_name: str
raw_input: Dict[str, Any]
raw_output: Any
def __str__(self) -> str:
"""String."""
return str(self.content)
class BaseTool:
@property
@abstractmethod
def metadata(self) -> ToolMetadata:
pass
@abstractmethod
def __call__(self, input: Any) -> ToolOutput:
pass
def _process_langchain_tool_kwargs(
self,
langchain_tool_kwargs: Any,
) -> Dict[str, Any]:
"""Process langchain tool kwargs."""
if "name" not in langchain_tool_kwargs:
langchain_tool_kwargs["name"] = self.metadata.name or ""
if "description" not in langchain_tool_kwargs:
langchain_tool_kwargs["description"] = self.metadata.description
if "fn_schema" not in langchain_tool_kwargs:
langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema
return langchain_tool_kwargs
def to_langchain_tool(
self,
**langchain_tool_kwargs: Any,
) -> "Tool":
"""To langchain tool."""
from llama_index.core.bridge.langchain import Tool
langchain_tool_kwargs = self._process_langchain_tool_kwargs(
langchain_tool_kwargs
)
return Tool.from_function(
func=self.__call__,
**langchain_tool_kwargs,
)
def to_langchain_structured_tool(
self,
**langchain_tool_kwargs: Any,
) -> "StructuredTool":
"""To langchain structured tool."""
from llama_index.core.bridge.langchain import StructuredTool
langchain_tool_kwargs = self._process_langchain_tool_kwargs(
langchain_tool_kwargs
)
return StructuredTool.from_function(
func=self.__call__,
**langchain_tool_kwargs,
)
class AsyncBaseTool(BaseTool):
"""
Base-level tool class that is backwards compatible with the old tool spec but also
supports async.
"""
def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput:
return self.call(*args, **kwargs)
@abstractmethod
def call(self, input: Any) -> ToolOutput:
"""
This is the method that should be implemented by the tool developer.
"""
@abstractmethod
async def acall(self, input: Any) -> ToolOutput:
"""
This is the async version of the call method.
Should also be implemented by the tool developer as an
async-compatible implementation.
"""
class BaseToolAsyncAdapter(AsyncBaseTool):
"""
Adapter class that allows a synchronous tool to be used as an async tool.
"""
def __init__(self, tool: BaseTool):
self.base_tool = tool
@property
def metadata(self) -> ToolMetadata:
return self.base_tool.metadata
def call(self, input: Any) -> ToolOutput:
return self.base_tool(input)
async def acall(self, input: Any) -> ToolOutput:
return self.call(input)
def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool:
"""
Converts a synchronous tool to an async tool.
"""
if isinstance(tool, AsyncBaseTool):
return tool
else:
return BaseToolAsyncAdapter(tool)
| [
"llama_index.core.bridge.langchain.Tool.from_function",
"llama_index.core.bridge.langchain.StructuredTool.from_function"
] | [((1581, 1670), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1591, 1670), False, 'from deprecated import deprecated\n'), ((1395, 1417), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (1405, 1417), False, 'import json\n'), ((3690, 3753), 'llama_index.core.bridge.langchain.Tool.from_function', 'Tool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (3708, 3753), False, 'from llama_index.core.bridge.langchain import Tool\n'), ((4149, 4222), 'llama_index.core.bridge.langchain.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (4177, 4222), False, 'from llama_index.core.bridge.langchain import StructuredTool\n')] |
"""Generate SQL queries using LlamaIndex."""
import argparse
import json
import logging
import os
import re
from typing import Any, cast
from llama_index import LLMPredictor, SQLDatabase
from llama_index.indices import SQLStructStoreIndex
from llama_index.llms.openai import OpenAI
from sqlalchemy import create_engine, text
from tqdm import tqdm
logging.getLogger("root").setLevel(logging.WARNING)
_spaces = re.compile(r"\s+")
_newlines = re.compile(r"\n+")
def _generate_sql(
llama_index: SQLStructStoreIndex,
nl_query_text: str,
) -> str:
"""Generate SQL query for the given NL query text."""
query_engine = llama_index.as_query_engine()
response = query_engine.query(nl_query_text)
if (
response.metadata is None
or "sql_query" not in response.metadata
or response.metadata["sql_query"] is None
):
raise RuntimeError("No SQL query generated.")
query = response.metadata["sql_query"]
# Remove newlines and extra spaces.
query = _newlines.sub(" ", query)
query = _spaces.sub(" ", query)
return query.strip()
def generate_sql(llama_indexes: dict, examples: list, output_file: str) -> None:
"""Generate SQL queries for the given examples and write them to the output file."""
with open(output_file, "w") as f:
for example in tqdm(examples, desc=f"Generating {output_file}"):
db_name = example["db_id"]
nl_query_text = example["question"]
try:
sql_query = _generate_sql(llama_indexes[db_name], nl_query_text)
except Exception as e:
print(
f"Failed to generate SQL query for question: "
f"{example['question']} on database: {example['db_id']}."
)
print(e)
sql_query = "ERROR"
f.write(sql_query + "\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Generate SQL queries using LlamaIndex."
)
parser.add_argument(
"--input", type=str, required=True, help="Path to the spider dataset directory."
)
parser.add_argument(
"--output",
type=str,
required=True,
help="Path to the output directory of generated SQL files,"
" one query on each line, "
"to be compared with the *_gold.sql files in the input directory.",
)
parser.add_argument(
"--model",
type=str,
choices=["gpt-4", "gpt-3.5-turbo", "text-davinci-003", "code-davinci-002"],
required=True,
help="The model to use for generating SQL queries.",
)
args = parser.parse_args()
# Create the output directory if it does not exist.
if not os.path.exists(args.output):
os.makedirs(args.output)
# Load the Spider dataset from the input directory.
with open(os.path.join(args.input, "train_spider.json")) as f:
train_spider = json.load(f)
with open(os.path.join(args.input, "train_others.json")) as f:
train_others = json.load(f)
with open(os.path.join(args.input, "dev.json")) as f:
dev = json.load(f)
# Create all necessary SQL database objects.
databases = {}
for db in train_spider + train_others + dev:
db_name = db["db_id"]
if db_name in databases:
continue
db_path = os.path.join(args.input, "database", db_name, db_name + ".sqlite")
engine = create_engine("sqlite:///" + db_path)
databases[db_name] = (SQLDatabase(engine=engine), engine)
# Create the LlamaIndexes for all databases.
llm = OpenAI(model=args.model, temperature=0)
llm_predictor = LLMPredictor(llm=llm)
llm_indexes = {}
for db_name, (db, engine) in databases.items():
# Get the name of the first table in the database.
# This is a hack to get a table name for the index, which can use any
# table in the database.
with engine.connect() as connection:
table_name = cast(
Any,
connection.execute(
text("select name from sqlite_master where type = 'table'")
).fetchone(),
)[0]
llm_indexes[db_name] = SQLStructStoreIndex.from_documents(
documents=[],
llm_predictor=llm_predictor,
sql_database=db,
table_name=table_name,
)
# Generate SQL queries.
generate_sql(
llama_indexes=llm_indexes,
examples=train_spider + train_others,
output_file=os.path.join(args.output, "train_pred.sql"),
)
generate_sql(
llama_indexes=llm_indexes,
examples=dev,
output_file=os.path.join(args.output, "dev_pred.sql"),
)
| [
"llama_index.llms.openai.OpenAI",
"llama_index.indices.SQLStructStoreIndex.from_documents",
"llama_index.SQLDatabase",
"llama_index.LLMPredictor"
] | [((413, 431), 're.compile', 're.compile', (['"""\\\\s+"""'], {}), "('\\\\s+')\n", (423, 431), False, 'import re\n'), ((444, 462), 're.compile', 're.compile', (['"""\\\\n+"""'], {}), "('\\\\n+')\n", (454, 462), False, 'import re\n'), ((1926, 2003), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate SQL queries using LlamaIndex."""'}), "(description='Generate SQL queries using LlamaIndex.')\n", (1949, 2003), False, 'import argparse\n'), ((3623, 3662), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': 'args.model', 'temperature': '(0)'}), '(model=args.model, temperature=0)\n', (3629, 3662), False, 'from llama_index.llms.openai import OpenAI\n'), ((3683, 3704), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (3695, 3704), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((349, 374), 'logging.getLogger', 'logging.getLogger', (['"""root"""'], {}), "('root')\n", (366, 374), False, 'import logging\n'), ((1329, 1377), 'tqdm.tqdm', 'tqdm', (['examples'], {'desc': 'f"""Generating {output_file}"""'}), "(examples, desc=f'Generating {output_file}')\n", (1333, 1377), False, 'from tqdm import tqdm\n'), ((2745, 2772), 'os.path.exists', 'os.path.exists', (['args.output'], {}), '(args.output)\n', (2759, 2772), False, 'import os\n'), ((2782, 2806), 'os.makedirs', 'os.makedirs', (['args.output'], {}), '(args.output)\n', (2793, 2806), False, 'import os\n'), ((2954, 2966), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2963, 2966), False, 'import json\n'), ((3057, 3069), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3066, 3069), False, 'import json\n'), ((3142, 3154), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3151, 3154), False, 'import json\n'), ((3375, 3441), 'os.path.join', 'os.path.join', (['args.input', '"""database"""', 'db_name', "(db_name + '.sqlite')"], {}), "(args.input, 'database', db_name, db_name + '.sqlite')\n", (3387, 3441), False, 'import os\n'), ((3459, 3496), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + db_path)"], {}), "('sqlite:///' + db_path)\n", (3472, 3496), False, 'from sqlalchemy import create_engine, text\n'), ((2878, 2923), 'os.path.join', 'os.path.join', (['args.input', '"""train_spider.json"""'], {}), "(args.input, 'train_spider.json')\n", (2890, 2923), False, 'import os\n'), ((2981, 3026), 'os.path.join', 'os.path.join', (['args.input', '"""train_others.json"""'], {}), "(args.input, 'train_others.json')\n", (2993, 3026), False, 'import os\n'), ((3084, 3120), 'os.path.join', 'os.path.join', (['args.input', '"""dev.json"""'], {}), "(args.input, 'dev.json')\n", (3096, 3120), False, 'import os\n'), ((3527, 3553), 'llama_index.SQLDatabase', 'SQLDatabase', ([], {'engine': 'engine'}), '(engine=engine)\n', (3538, 3553), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((4243, 4365), 'llama_index.indices.SQLStructStoreIndex.from_documents', 'SQLStructStoreIndex.from_documents', ([], {'documents': '[]', 'llm_predictor': 'llm_predictor', 'sql_database': 'db', 'table_name': 'table_name'}), '(documents=[], llm_predictor=\n llm_predictor, sql_database=db, table_name=table_name)\n', (4277, 4365), False, 'from llama_index.indices import SQLStructStoreIndex\n'), ((4588, 4631), 'os.path.join', 'os.path.join', (['args.output', '"""train_pred.sql"""'], {}), "(args.output, 'train_pred.sql')\n", (4600, 4631), False, 'import os\n'), ((4734, 4775), 'os.path.join', 'os.path.join', (['args.output', '"""dev_pred.sql"""'], {}), "(args.output, 'dev_pred.sql')\n", (4746, 4775), False, 'import os\n'), ((4101, 4160), 'sqlalchemy.text', 'text', (['"""select name from sqlite_master where type = \'table\'"""'], {}), '("select name from sqlite_master where type = \'table\'")\n', (4105, 4160), False, 'from sqlalchemy import create_engine, text\n')] |
"""Utilities for Spider module."""
import json
import os
from typing import Dict, Tuple
from llama_index import LLMPredictor, SQLDatabase
from llama_index.indices import SQLStructStoreIndex
from llama_index.llms.openai import OpenAI
from sqlalchemy import create_engine, text
def load_examples(spider_dir: str) -> Tuple[list, list]:
"""Load examples."""
with open(os.path.join(spider_dir, "train_spider.json")) as f:
train_spider = json.load(f)
with open(os.path.join(spider_dir, "train_others.json")) as f:
train_others = json.load(f)
with open(os.path.join(spider_dir, "dev.json")) as f:
dev = json.load(f)
return train_spider + train_others, dev
def create_indexes(spider_dir: str, llm: OpenAI) -> Dict[str, SQLStructStoreIndex]:
"""Create indexes for all databases."""
# Create all necessary SQL database objects.
databases = {}
for db_name in os.listdir(os.path.join(spider_dir, "database")):
db_path = os.path.join(spider_dir, "database", db_name, db_name + ".sqlite")
if not os.path.exists(db_path):
continue
engine = create_engine("sqlite:///" + db_path)
databases[db_name] = SQLDatabase(engine=engine)
# Test connection.
with engine.connect() as connection:
connection.execute(
text("select name from sqlite_master where type = 'table'")
).fetchone()
llm_predictor = LLMPredictor(llm=llm)
llm_indexes = {}
for db_name, db in databases.items():
llm_indexes[db_name] = SQLStructStoreIndex(
llm_predictor=llm_predictor,
sql_database=db,
)
return llm_indexes
| [
"llama_index.indices.SQLStructStoreIndex",
"llama_index.SQLDatabase",
"llama_index.LLMPredictor"
] | [((1447, 1468), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1459, 1468), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((452, 464), 'json.load', 'json.load', (['f'], {}), '(f)\n', (461, 464), False, 'import json\n'), ((555, 567), 'json.load', 'json.load', (['f'], {}), '(f)\n', (564, 567), False, 'import json\n'), ((640, 652), 'json.load', 'json.load', (['f'], {}), '(f)\n', (649, 652), False, 'import json\n'), ((925, 961), 'os.path.join', 'os.path.join', (['spider_dir', '"""database"""'], {}), "(spider_dir, 'database')\n", (937, 961), False, 'import os\n'), ((982, 1048), 'os.path.join', 'os.path.join', (['spider_dir', '"""database"""', 'db_name', "(db_name + '.sqlite')"], {}), "(spider_dir, 'database', db_name, db_name + '.sqlite')\n", (994, 1048), False, 'import os\n'), ((1127, 1164), 'sqlalchemy.create_engine', 'create_engine', (["('sqlite:///' + db_path)"], {}), "('sqlite:///' + db_path)\n", (1140, 1164), False, 'from sqlalchemy import create_engine, text\n'), ((1194, 1220), 'llama_index.SQLDatabase', 'SQLDatabase', ([], {'engine': 'engine'}), '(engine=engine)\n', (1205, 1220), False, 'from llama_index import LLMPredictor, SQLDatabase\n'), ((1563, 1628), 'llama_index.indices.SQLStructStoreIndex', 'SQLStructStoreIndex', ([], {'llm_predictor': 'llm_predictor', 'sql_database': 'db'}), '(llm_predictor=llm_predictor, sql_database=db)\n', (1582, 1628), False, 'from llama_index.indices import SQLStructStoreIndex\n'), ((376, 421), 'os.path.join', 'os.path.join', (['spider_dir', '"""train_spider.json"""'], {}), "(spider_dir, 'train_spider.json')\n", (388, 421), False, 'import os\n'), ((479, 524), 'os.path.join', 'os.path.join', (['spider_dir', '"""train_others.json"""'], {}), "(spider_dir, 'train_others.json')\n", (491, 524), False, 'import os\n'), ((582, 618), 'os.path.join', 'os.path.join', (['spider_dir', '"""dev.json"""'], {}), "(spider_dir, 'dev.json')\n", (594, 618), False, 'import os\n'), ((1064, 1087), 'os.path.exists', 'os.path.exists', (['db_path'], {}), '(db_path)\n', (1078, 1087), False, 'import os\n'), ((1341, 1400), 'sqlalchemy.text', 'text', (['"""select name from sqlite_master where type = \'table\'"""'], {}), '("select name from sqlite_master where type = \'table\'")\n', (1345, 1400), False, 'from sqlalchemy import create_engine, text\n')] |
from collections import ChainMap
from typing import (
Any,
Dict,
List,
Optional,
Protocol,
Sequence,
get_args,
runtime_checkable,
)
from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator
from llama_index.legacy.callbacks import CBEventType, EventPayload
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponseAsyncGen,
CompletionResponseGen,
MessageRole,
)
from llama_index.legacy.core.query_pipeline.query_component import (
InputKeys,
OutputKeys,
QueryComponent,
StringableInput,
validate_and_convert_stringable,
)
from llama_index.legacy.llms.base import BaseLLM
from llama_index.legacy.llms.generic_utils import (
messages_to_prompt as generic_messages_to_prompt,
)
from llama_index.legacy.llms.generic_utils import (
prompt_to_messages,
)
from llama_index.legacy.prompts import BasePromptTemplate, PromptTemplate
from llama_index.legacy.types import (
BaseOutputParser,
PydanticProgramMode,
TokenAsyncGen,
TokenGen,
)
# NOTE: These two protocols are needed to appease mypy
@runtime_checkable
class MessagesToPromptType(Protocol):
def __call__(self, messages: Sequence[ChatMessage]) -> str:
pass
@runtime_checkable
class CompletionToPromptType(Protocol):
def __call__(self, prompt: str) -> str:
pass
def stream_completion_response_to_tokens(
completion_response_gen: CompletionResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in completion_response_gen:
yield response.delta or ""
return gen()
def stream_chat_response_to_tokens(
chat_response_gen: ChatResponseGen,
) -> TokenGen:
"""Convert a stream completion response to a stream of tokens."""
def gen() -> TokenGen:
for response in chat_response_gen:
yield response.delta or ""
return gen()
async def astream_completion_response_to_tokens(
completion_response_gen: CompletionResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in completion_response_gen:
yield response.delta or ""
return gen()
async def astream_chat_response_to_tokens(
chat_response_gen: ChatResponseAsyncGen,
) -> TokenAsyncGen:
"""Convert a stream completion response to a stream of tokens."""
async def gen() -> TokenAsyncGen:
async for response in chat_response_gen:
yield response.delta or ""
return gen()
def default_completion_to_prompt(prompt: str) -> str:
return prompt
class LLM(BaseLLM):
system_prompt: Optional[str] = Field(
default=None, description="System prompt for LLM calls."
)
messages_to_prompt: MessagesToPromptType = Field(
description="Function to convert a list of messages to an LLM prompt.",
default=generic_messages_to_prompt,
exclude=True,
)
completion_to_prompt: CompletionToPromptType = Field(
description="Function to convert a completion to an LLM prompt.",
default=default_completion_to_prompt,
exclude=True,
)
output_parser: Optional[BaseOutputParser] = Field(
description="Output parser to parse, validate, and correct errors programmatically.",
default=None,
exclude=True,
)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT
# deprecated
query_wrapper_prompt: Optional[BasePromptTemplate] = Field(
description="Query wrapper prompt for LLM calls.",
default=None,
exclude=True,
)
@validator("messages_to_prompt", pre=True)
def set_messages_to_prompt(
cls, messages_to_prompt: Optional[MessagesToPromptType]
) -> MessagesToPromptType:
return messages_to_prompt or generic_messages_to_prompt
@validator("completion_to_prompt", pre=True)
def set_completion_to_prompt(
cls, completion_to_prompt: Optional[CompletionToPromptType]
) -> CompletionToPromptType:
return completion_to_prompt or default_completion_to_prompt
def _log_template_data(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> None:
template_vars = {
k: v
for k, v in ChainMap(prompt.kwargs, prompt_args).items()
if k in prompt.template_vars
}
with self.callback_manager.event(
CBEventType.TEMPLATING,
payload={
EventPayload.TEMPLATE: prompt.get_template(llm=self),
EventPayload.TEMPLATE_VARS: template_vars,
EventPayload.SYSTEM_PROMPT: self.system_prompt,
EventPayload.QUERY_WRAPPER_PROMPT: self.query_wrapper_prompt,
},
):
pass
def _get_prompt(self, prompt: BasePromptTemplate, **prompt_args: Any) -> str:
formatted_prompt = prompt.format(
llm=self,
messages_to_prompt=self.messages_to_prompt,
completion_to_prompt=self.completion_to_prompt,
**prompt_args,
)
if self.output_parser is not None:
formatted_prompt = self.output_parser.format(formatted_prompt)
return self._extend_prompt(formatted_prompt)
def _get_messages(
self, prompt: BasePromptTemplate, **prompt_args: Any
) -> List[ChatMessage]:
messages = prompt.format_messages(llm=self, **prompt_args)
if self.output_parser is not None:
messages = self.output_parser.format_messages(messages)
return self._extend_messages(messages)
def structured_predict(
self,
output_cls: BaseModel,
prompt: PromptTemplate,
**prompt_args: Any,
) -> BaseModel:
from llama_index.legacy.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return program(**prompt_args)
async def astructured_predict(
self,
output_cls: BaseModel,
prompt: PromptTemplate,
**prompt_args: Any,
) -> BaseModel:
from llama_index.legacy.program.utils import get_program_for_llm
program = get_program_for_llm(
output_cls,
prompt,
self,
pydantic_program_mode=self.pydantic_program_mode,
)
return await program.acall(**prompt_args)
def _parse_output(self, output: str) -> str:
if self.output_parser is not None:
return str(self.output_parser.parse(output))
return output
def predict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""Predict."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.chat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = self.complete(formatted_prompt, formatted=True)
output = response.text
return self._parse_output(output)
def stream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenGen:
"""Stream."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = self.stream_chat(messages)
stream_tokens = stream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = self.stream_complete(formatted_prompt, formatted=True)
stream_tokens = stream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
async def apredict(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> str:
"""Async predict."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.achat(messages)
output = chat_response.message.content or ""
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
response = await self.acomplete(formatted_prompt, formatted=True)
output = response.text
return self._parse_output(output)
async def astream(
self,
prompt: BasePromptTemplate,
**prompt_args: Any,
) -> TokenAsyncGen:
"""Async stream."""
self._log_template_data(prompt, **prompt_args)
if self.metadata.is_chat_model:
messages = self._get_messages(prompt, **prompt_args)
chat_response = await self.astream_chat(messages)
stream_tokens = await astream_chat_response_to_tokens(chat_response)
else:
formatted_prompt = self._get_prompt(prompt, **prompt_args)
stream_response = await self.astream_complete(
formatted_prompt, formatted=True
)
stream_tokens = await astream_completion_response_to_tokens(stream_response)
if prompt.output_parser is not None or self.output_parser is not None:
raise NotImplementedError("Output parser is not supported for streaming.")
return stream_tokens
def _extend_prompt(
self,
formatted_prompt: str,
) -> str:
"""Add system and query wrapper prompts to base prompt."""
extended_prompt = formatted_prompt
if self.system_prompt:
extended_prompt = self.system_prompt + "\n\n" + extended_prompt
if self.query_wrapper_prompt:
extended_prompt = self.query_wrapper_prompt.format(
query_str=extended_prompt
)
return extended_prompt
def _extend_messages(self, messages: List[ChatMessage]) -> List[ChatMessage]:
"""Add system prompt to chat message list."""
if self.system_prompt:
messages = [
ChatMessage(role=MessageRole.SYSTEM, content=self.system_prompt),
*messages,
]
return messages
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""Return query component."""
if self.metadata.is_chat_model:
return LLMChatComponent(llm=self, **kwargs)
else:
return LLMCompleteComponent(llm=self, **kwargs)
class BaseLLMComponent(QueryComponent):
"""Base LLM component."""
llm: LLM = Field(..., description="LLM")
streaming: bool = Field(default=False, description="Streaming mode")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
self.llm.callback_manager = callback_manager
class LLMCompleteComponent(BaseLLMComponent):
"""LLM completion component."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
if "prompt" not in input:
raise ValueError("Prompt must be in input dict.")
# do special check to see if prompt is a list of chat messages
if isinstance(input["prompt"], get_args(List[ChatMessage])):
input["prompt"] = self.llm.messages_to_prompt(input["prompt"])
input["prompt"] = validate_and_convert_stringable(input["prompt"])
else:
input["prompt"] = validate_and_convert_stringable(input["prompt"])
input["prompt"] = self.llm.completion_to_prompt(input["prompt"])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
prompt = kwargs["prompt"]
# ignore all other kwargs for now
if self.streaming:
response = self.llm.stream_complete(prompt, formatted=True)
else:
response = self.llm.complete(prompt, formatted=True)
return {"output": response}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
prompt = kwargs["prompt"]
# ignore all other kwargs for now
response = await self.llm.acomplete(prompt, formatted=True)
return {"output": response}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# TODO: support only complete for now
return InputKeys.from_keys({"prompt"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
class LLMChatComponent(BaseLLMComponent):
"""LLM chat component."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
if "messages" not in input:
raise ValueError("Messages must be in input dict.")
# if `messages` is a string, convert to a list of chat message
if isinstance(input["messages"], get_args(StringableInput)):
input["messages"] = validate_and_convert_stringable(input["messages"])
input["messages"] = prompt_to_messages(str(input["messages"]))
for message in input["messages"]:
if not isinstance(message, ChatMessage):
raise ValueError("Messages must be a list of ChatMessage")
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
messages = kwargs["messages"]
if self.streaming:
response = self.llm.stream_chat(messages)
else:
response = self.llm.chat(messages)
return {"output": response}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# TODO: support only complete for now
# non-trivial to figure how to support chat/complete/etc.
messages = kwargs["messages"]
if self.streaming:
response = await self.llm.astream_chat(messages)
else:
response = await self.llm.achat(messages)
return {"output": response}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
# TODO: support only complete for now
return InputKeys.from_keys({"messages"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
| [
"llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable",
"llama_index.legacy.program.utils.get_program_for_llm",
"llama_index.legacy.bridge.pydantic.validator"
] | [((2828, 2891), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""System prompt for LLM calls."""'}), "(default=None, description='System prompt for LLM calls.')\n", (2833, 2891), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((2953, 3090), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a list of messages to an LLM prompt."""', 'default': 'generic_messages_to_prompt', 'exclude': '(True)'}), "(description=\n 'Function to convert a list of messages to an LLM prompt.', default=\n generic_messages_to_prompt, exclude=True)\n", (2958, 3090), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3163, 3290), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Function to convert a completion to an LLM prompt."""', 'default': 'default_completion_to_prompt', 'exclude': '(True)'}), "(description='Function to convert a completion to an LLM prompt.',\n default=default_completion_to_prompt, exclude=True)\n", (3168, 3290), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3366, 3494), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Output parser to parse, validate, and correct errors programmatically."""', 'default': 'None', 'exclude': '(True)'}), "(description=\n 'Output parser to parse, validate, and correct errors programmatically.',\n default=None, exclude=True)\n", (3371, 3494), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3669, 3757), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query wrapper prompt for LLM calls."""', 'default': 'None', 'exclude': '(True)'}), "(description='Query wrapper prompt for LLM calls.', default=None,\n exclude=True)\n", (3674, 3757), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((3791, 3832), 'llama_index.legacy.bridge.pydantic.validator', 'validator', (['"""messages_to_prompt"""'], {'pre': '(True)'}), "('messages_to_prompt', pre=True)\n", (3800, 3832), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((4030, 4073), 'llama_index.legacy.bridge.pydantic.validator', 'validator', (['"""completion_to_prompt"""'], {'pre': '(True)'}), "('completion_to_prompt', pre=True)\n", (4039, 4073), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((11157, 11186), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""LLM"""'}), "(..., description='LLM')\n", (11162, 11186), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((11209, 11259), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Streaming mode"""'}), "(default=False, description='Streaming mode')\n", (11214, 11259), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field, validator\n'), ((6014, 6114), 'llama_index.legacy.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6033, 6114), False, 'from llama_index.legacy.program.utils import get_program_for_llm\n'), ((6461, 6561), 'llama_index.legacy.program.utils.get_program_for_llm', 'get_program_for_llm', (['output_cls', 'prompt', 'self'], {'pydantic_program_mode': 'self.pydantic_program_mode'}), '(output_cls, prompt, self, pydantic_program_mode=self.\n pydantic_program_mode)\n', (6480, 6561), False, 'from llama_index.legacy.program.utils import get_program_for_llm\n'), ((13294, 13325), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (13313, 13325), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((13424, 13456), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (13444, 13456), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15250, 15283), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'messages'}"], {}), "({'messages'})\n", (15269, 15283), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((15382, 15414), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (15402, 15414), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((11912, 11939), 'typing.get_args', 'get_args', (['List[ChatMessage]'], {}), '(List[ChatMessage])\n', (11920, 11939), False, 'from typing import Any, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((12047, 12095), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12078, 12095), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((12140, 12188), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['prompt']"], {}), "(input['prompt'])\n", (12171, 12188), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((13890, 13915), 'typing.get_args', 'get_args', (['StringableInput'], {}), '(StringableInput)\n', (13898, 13915), False, 'from typing import Any, Dict, List, Optional, Protocol, Sequence, get_args, runtime_checkable\n'), ((13950, 14000), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['messages']"], {}), "(input['messages'])\n", (13981, 14000), False, 'from llama_index.legacy.core.query_pipeline.query_component import InputKeys, OutputKeys, QueryComponent, StringableInput, validate_and_convert_stringable\n'), ((10661, 10725), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.SYSTEM', 'content': 'self.system_prompt'}), '(role=MessageRole.SYSTEM, content=self.system_prompt)\n', (10672, 10725), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponseAsyncGen, ChatResponseGen, CompletionResponseAsyncGen, CompletionResponseGen, MessageRole\n'), ((4449, 4485), 'collections.ChainMap', 'ChainMap', (['prompt.kwargs', 'prompt_args'], {}), '(prompt.kwargs, prompt_args)\n', (4457, 4485), False, 'from collections import ChainMap\n')] |
"""Base reader class."""
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Iterable, List
if TYPE_CHECKING:
from llama_index.core.bridge.langchain import Document as LCDocument
from llama_index.core.bridge.pydantic import Field
from llama_index.core.schema import BaseComponent, Document
class BaseReader(ABC):
"""Utilities for loading data from a directory."""
def lazy_load_data(self, *args: Any, **load_kwargs: Any) -> Iterable[Document]:
"""Load data from the input directory lazily."""
raise NotImplementedError(
f"{self.__class__.__name__} does not provide lazy_load_data method currently"
)
def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
"""Load data from the input directory."""
return list(self.lazy_load_data(*args, **load_kwargs))
def load_langchain_documents(self, **load_kwargs: Any) -> List["LCDocument"]:
"""Load data in LangChain document format."""
docs = self.load_data(**load_kwargs)
return [d.to_langchain_format() for d in docs]
class BasePydanticReader(BaseReader, BaseComponent):
"""Serialiable Data Loader with Pydantic."""
is_remote: bool = Field(
default=False,
description="Whether the data is loaded from a remote API or a local file.",
)
class Config:
arbitrary_types_allowed = True
class ReaderConfig(BaseComponent):
"""Represents a reader and it's input arguments."""
reader: BasePydanticReader = Field(..., description="Reader to use.")
reader_args: List[Any] = Field(default_factory=list, description="Reader args.")
reader_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Reader kwargs."
)
class Config:
arbitrary_types_allowed = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "ReaderConfig"
def to_dict(self, **kwargs: Any) -> Dict[str, Any]:
"""Convert the class to a dictionary."""
return {
"loader": self.reader.to_dict(**kwargs),
"reader_args": self.reader_args,
"reader_kwargs": self.reader_kwargs,
"class_name": self.class_name(),
}
def read(self) -> List[Document]:
"""Call the loader with the given arguments."""
return self.reader.load_data(*self.reader_args, **self.reader_kwargs)
| [
"llama_index.core.bridge.pydantic.Field"
] | [((1219, 1321), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the data is loaded from a remote API or a local file."""'}), "(default=False, description=\n 'Whether the data is loaded from a remote API or a local file.')\n", (1224, 1321), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1525, 1565), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Reader to use."""'}), "(..., description='Reader to use.')\n", (1530, 1565), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1595, 1650), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Reader args."""'}), "(default_factory=list, description='Reader args.')\n", (1600, 1650), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1687, 1744), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Reader kwargs."""'}), "(default_factory=dict, description='Reader kwargs.')\n", (1692, 1744), False, 'from llama_index.core.bridge.pydantic import Field\n')] |
"""Base object types."""
import pickle
import warnings
from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.query_pipeline.query import (
ChainableMixin,
InputKeys,
OutputKeys,
QueryComponent,
validate_and_convert_stringable,
)
from llama_index.core.bridge.pydantic import Field
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.vector_store.base import VectorStoreIndex
from llama_index.core.objects.base_node_mapping import (
DEFAULT_PERSIST_FNAME,
BaseObjectNodeMapping,
SimpleObjectNodeMapping,
)
from llama_index.core.schema import QueryType
from llama_index.core.storage.storage_context import (
DEFAULT_PERSIST_DIR,
StorageContext,
)
OT = TypeVar("OT")
class ObjectRetriever(ChainableMixin, Generic[OT]):
"""Object retriever."""
def __init__(
self, retriever: BaseRetriever, object_node_mapping: BaseObjectNodeMapping[OT]
):
self._retriever = retriever
self._object_node_mapping = object_node_mapping
@property
def retriever(self) -> BaseRetriever:
"""Retriever."""
return self._retriever
def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]:
nodes = self._retriever.retrieve(str_or_query_bundle)
return [self._object_node_mapping.from_node(node.node) for node in nodes]
async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]:
nodes = await self._retriever.aretrieve(str_or_query_bundle)
return [self._object_node_mapping.from_node(node.node) for node in nodes]
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""As query component."""
return ObjectRetrieverComponent(retriever=self)
class ObjectRetrieverComponent(QueryComponent):
"""Object retriever component."""
retriever: ObjectRetriever = Field(..., description="Retriever.")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
self.retriever.retriever.callback_manager = callback_manager
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# make sure input is a string
input["input"] = validate_and_convert_stringable(input["input"])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
output = self.retriever.retrieve(kwargs["input"])
return {"output": output}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
output = await self.retriever.aretrieve(kwargs["input"])
return {"output": output}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys({"input"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
class ObjectIndex(Generic[OT]):
"""Object index."""
def __init__(
self, index: BaseIndex, object_node_mapping: BaseObjectNodeMapping
) -> None:
self._index = index
self._object_node_mapping = object_node_mapping
@classmethod
def from_objects(
cls,
objects: Sequence[OT],
object_mapping: Optional[BaseObjectNodeMapping] = None,
index_cls: Type[BaseIndex] = VectorStoreIndex,
**index_kwargs: Any,
) -> "ObjectIndex":
if object_mapping is None:
object_mapping = SimpleObjectNodeMapping.from_objects(objects)
nodes = object_mapping.to_nodes(objects)
index = index_cls(nodes, **index_kwargs)
return cls(index, object_mapping)
def insert_object(self, obj: Any) -> None:
self._object_node_mapping.add_object(obj)
node = self._object_node_mapping.to_node(obj)
self._index.insert_nodes([node])
def as_retriever(self, **kwargs: Any) -> ObjectRetriever:
return ObjectRetriever(
retriever=self._index.as_retriever(**kwargs),
object_node_mapping=self._object_node_mapping,
)
def as_node_retriever(self, **kwargs: Any) -> BaseRetriever:
return self._index.as_retriever(**kwargs)
def persist(
self,
persist_dir: str = DEFAULT_PERSIST_DIR,
obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME,
) -> None:
# try to persist object node mapping
try:
self._object_node_mapping.persist(
persist_dir=persist_dir, obj_node_mapping_fname=obj_node_mapping_fname
)
except (NotImplementedError, pickle.PickleError) as err:
warnings.warn(
(
"Unable to persist ObjectNodeMapping. You will need to "
"reconstruct the same object node mapping to build this ObjectIndex"
),
stacklevel=2,
)
self._index._storage_context.persist(persist_dir=persist_dir)
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
object_node_mapping: Optional[BaseObjectNodeMapping] = None,
) -> "ObjectIndex":
from llama_index.core.indices import load_index_from_storage
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
index = load_index_from_storage(storage_context)
if object_node_mapping:
return cls(index=index, object_node_mapping=object_node_mapping)
else:
# try to load object_node_mapping
# assume SimpleObjectNodeMapping for simplicity as its only subclass
# that supports this method
try:
object_node_mapping = SimpleObjectNodeMapping.from_persist_dir(
persist_dir=persist_dir
)
except Exception as err:
raise Exception(
"Unable to load from persist dir. The object_node_mapping cannot be loaded."
) from err
else:
return cls(index=index, object_node_mapping=object_node_mapping)
| [
"llama_index.core.indices.load_index_from_storage",
"llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects",
"llama_index.core.base.query_pipeline.query.InputKeys.from_keys",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable",
"llama_index.core.storage.storage_context.StorageContext.from_defaults"
] | [((897, 910), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (904, 910), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2032, 2068), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriever.')\n", (2037, 2068), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((2521, 2568), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (2552, 2568), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3055, 3085), 'llama_index.core.base.query_pipeline.query.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (3074, 3085), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3184, 3216), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (3204, 3216), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((5570, 5623), 'llama_index.core.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (5598, 5623), False, 'from llama_index.core.storage.storage_context import DEFAULT_PERSIST_DIR, StorageContext\n'), ((5640, 5680), 'llama_index.core.indices.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (5663, 5680), False, 'from llama_index.core.indices import load_index_from_storage\n'), ((3788, 3833), 'llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects', 'SimpleObjectNodeMapping.from_objects', (['objects'], {}), '(objects)\n', (3824, 3833), False, 'from llama_index.core.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n'), ((4944, 5105), 'warnings.warn', 'warnings.warn', (['"""Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex"""'], {'stacklevel': '(2)'}), "(\n 'Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex'\n , stacklevel=2)\n", (4957, 5105), False, 'import warnings\n'), ((6026, 6091), 'llama_index.core.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir', 'SimpleObjectNodeMapping.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (6066, 6091), False, 'from llama_index.core.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n')] |
from typing import Any, Callable, Optional, Sequence
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.types import PydanticProgramMode
class MockLLM(CustomLLM):
max_tokens: Optional[int]
def __init__(
self,
max_tokens: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
) -> None:
super().__init__(
max_tokens=max_tokens,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
)
@classmethod
def class_name(cls) -> str:
return "MockLLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(num_output=self.max_tokens or -1)
def _generate_text(self, length: int) -> str:
return " ".join(["text" for _ in range(length)])
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
response_text = (
self._generate_text(self.max_tokens) if self.max_tokens else prompt
)
return CompletionResponse(
text=response_text,
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
def gen_prompt() -> CompletionResponseGen:
for ch in prompt:
yield CompletionResponse(
text=prompt,
delta=ch,
)
def gen_response(max_tokens: int) -> CompletionResponseGen:
for i in range(max_tokens):
response_text = self._generate_text(i)
yield CompletionResponse(
text=response_text,
delta="text ",
)
return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
| [
"llama_index.legacy.core.llms.types.CompletionResponse",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.core.llms.types.LLMMetadata"
] | [((1537, 1562), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1560, 1562), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((1876, 1901), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1899, 1901), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((1377, 1422), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'num_output': '(self.max_tokens or -1)'}), '(num_output=self.max_tokens or -1)\n', (1388, 1422), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1808, 1846), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text'}), '(text=response_text)\n', (1826, 1846), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2128, 2169), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'prompt', 'delta': 'ch'}), '(text=prompt, delta=ch)\n', (2146, 2169), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2415, 2468), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text', 'delta': '"""text """'}), "(text=response_text, delta='text ')\n", (2433, 2468), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset("MiniCovidQaDataset", "./data")
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=40, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 319), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniCovidQaDataset"""', '"""./data"""'], {}), "('MiniCovidQaDataset', './data')\n", (287, 319), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((364, 416), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (395, 416), False, 'from llama_index.core import VectorStoreIndex\n'), ((509, 558), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (528, 558), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1409, 1433), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1431, 1433), False, 'import asyncio\n')] |
"""Palm API."""
import os
from typing import Any, Callable, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
DEFAULT_PALM_MODEL = "models/text-bison-001"
class PaLM(CustomLLM):
"""PaLM LLM."""
model_name: str = Field(
default=DEFAULT_PALM_MODEL, description="The PaLM model to use."
)
num_output: int = Field(
default=DEFAULT_NUM_OUTPUTS,
description="The number of tokens to generate.",
gt=0,
)
generate_kwargs: dict = Field(
default_factory=dict, description="Kwargs for generation."
)
_model: Any = PrivateAttr()
def __init__(
self,
api_key: Optional[str] = None,
model_name: Optional[str] = DEFAULT_PALM_MODEL,
num_output: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**generate_kwargs: Any,
) -> None:
"""Initialize params."""
try:
import google.generativeai as palm
except ImportError:
raise ValueError(
"PaLM is not installed. "
"Please install it with `pip install google-generativeai`."
)
api_key = api_key or os.environ.get("PALM_API_KEY")
palm.configure(api_key=api_key)
models = palm.list_models()
models_dict = {m.name: m for m in models}
if model_name not in models_dict:
raise ValueError(
f"Model name {model_name} not found in {models_dict.keys()}"
)
model_name = model_name
self._model = models_dict[model_name]
# get num_output
num_output = num_output or self._model.output_token_limit
generate_kwargs = generate_kwargs or {}
super().__init__(
model_name=model_name,
num_output=num_output,
generate_kwargs=generate_kwargs,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "PaLM_llm"
@property
def metadata(self) -> LLMMetadata:
"""Get LLM metadata."""
# TODO: google palm actually separates input and output token limits
total_tokens = self._model.input_token_limit + self.num_output
return LLMMetadata(
context_window=total_tokens,
num_output=self.num_output,
model_name=self.model_name,
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""Predict the answer to a query.
Args:
prompt (str): Prompt to use for prediction.
Returns:
Tuple[str, str]: Tuple of the predicted answer and the formatted prompt.
"""
import google.generativeai as palm
completion = palm.generate_text(
model=self.model_name,
prompt=prompt,
**kwargs,
)
return CompletionResponse(text=completion.result, raw=completion.candidates[0])
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
"""Stream the answer to a query.
NOTE: this is a beta feature. Will try to build or use
better abstractions about response handling.
Args:
prompt (str): Prompt to use for prediction.
Returns:
str: The predicted answer.
"""
raise NotImplementedError(
"PaLM does not support streaming completion in LlamaIndex currently."
)
| [
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((708, 779), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_PALM_MODEL', 'description': '"""The PaLM model to use."""'}), "(default=DEFAULT_PALM_MODEL, description='The PaLM model to use.')\n", (713, 779), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((816, 910), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""The number of tokens to generate."""', 'gt': '(0)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'The number of tokens to generate.', gt=0)\n", (821, 910), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((965, 1030), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Kwargs for generation."""'}), "(default_factory=dict, description='Kwargs for generation.')\n", (970, 1030), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1064, 1077), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1075, 1077), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3465, 3490), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3488, 3490), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((4106, 4131), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4129, 4131), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((2045, 2076), 'google.generativeai.configure', 'palm.configure', ([], {'api_key': 'api_key'}), '(api_key=api_key)\n', (2059, 2076), True, 'import google.generativeai as palm\n'), ((2095, 2113), 'google.generativeai.list_models', 'palm.list_models', ([], {}), '()\n', (2111, 2113), True, 'import google.generativeai as palm\n'), ((3315, 3415), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'total_tokens', 'num_output': 'self.num_output', 'model_name': 'self.model_name'}), '(context_window=total_tokens, num_output=self.num_output,\n model_name=self.model_name)\n', (3326, 3415), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((3898, 3964), 'google.generativeai.generate_text', 'palm.generate_text', ([], {'model': 'self.model_name', 'prompt': 'prompt'}), '(model=self.model_name, prompt=prompt, **kwargs)\n', (3916, 3964), True, 'import google.generativeai as palm\n'), ((4027, 4099), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'completion.result', 'raw': 'completion.candidates[0]'}), '(text=completion.result, raw=completion.candidates[0])\n', (4045, 4099), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2006, 2036), 'os.environ.get', 'os.environ.get', (['"""PALM_API_KEY"""'], {}), "('PALM_API_KEY')\n", (2020, 2036), False, 'import os\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.ai21_utils import ai21_model_to_context_size
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.llms.generic_utils import (
completion_to_chat_decorator,
get_from_param_or_env,
)
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class AI21(CustomLLM):
"""AI21 Labs LLM."""
model: str = Field(description="The AI21 model to use.")
maxTokens: int = Field(description="The maximum number of tokens to generate.")
temperature: float = Field(description="The temperature to use for sampling.")
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the anthropic API."
)
_api_key = PrivateAttr()
def __init__(
self,
api_key: Optional[str] = None,
model: Optional[str] = "j2-mid",
maxTokens: Optional[int] = 512,
temperature: Optional[float] = 0.1,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
"""Initialize params."""
try:
import ai21 as _ # noqa
except ImportError as e:
raise ImportError(
"You must install the `ai21` package to use AI21."
"Please `pip install ai21`"
) from e
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_key = get_from_param_or_env("api_key", api_key, "AI21_API_KEY")
self._api_key = api_key
super().__init__(
model=model,
maxTokens=maxTokens,
temperature=temperature,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(self) -> str:
"""Get Class Name."""
return "AI21_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=ai21_model_to_context_size(self.model),
num_output=self.maxTokens,
model_name=self.model,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"maxTokens": self.maxTokens,
"temperature": self.temperature,
}
return {**base_kwargs, **self.additional_kwargs}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
import ai21
ai21.api_key = self._api_key
response = ai21.Completion.execute(**all_kwargs, prompt=prompt)
return CompletionResponse(
text=response["completions"][0]["data"]["text"], raw=response.__dict__
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError(
"AI21 does not currently support streaming completion."
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
chat_fn = completion_to_chat_decorator(self.complete)
return chat_fn(messages, **all_kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError("AI21 does not Currently Support Streaming Chat.")
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.llms.ai21_utils.ai21_model_to_context_size",
"llama_index.legacy.core.llms.types.CompletionResponse",
"llama_index.legacy.llms.generic_utils.completion_to_chat_decorator"
] | [((827, 870), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The AI21 model to use."""'}), "(description='The AI21 model to use.')\n", (832, 870), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((892, 954), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (897, 954), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((980, 1037), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (985, 1037), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1079, 1167), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the anthropic API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the anthropic API.')\n", (1084, 1167), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1193, 1206), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1204, 1206), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3625, 3650), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3648, 3650), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4083, 4108), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4106, 4108), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4351, 4370), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4368, 4370), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4623, 4642), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4640, 4642), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2296, 2353), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""AI21_API_KEY"""'], {}), "('api_key', api_key, 'AI21_API_KEY')\n", (2317, 2353), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, get_from_param_or_env\n'), ((3895, 3947), 'ai21.Completion.execute', 'ai21.Completion.execute', ([], {'prompt': 'prompt'}), '(**all_kwargs, prompt=prompt)\n', (3918, 3947), False, 'import ai21\n'), ((3964, 4059), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': "response['completions'][0]['data']['text']", 'raw': 'response.__dict__'}), "(text=response['completions'][0]['data']['text'], raw=\n response.__dict__)\n", (3982, 4059), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((4525, 4568), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self.complete'], {}), '(self.complete)\n', (4553, 4568), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, get_from_param_or_env\n'), ((2257, 2276), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2272, 2276), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((3075, 3113), 'llama_index.legacy.llms.ai21_utils.ai21_model_to_context_size', 'ai21_model_to_context_size', (['self.model'], {}), '(self.model)\n', (3101, 3113), False, 'from llama_index.legacy.llms.ai21_utils import ai21_model_to_context_size\n')] |
import json
from typing import Any, Callable, Dict, List, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.legacy.llms.generic_utils import (
messages_to_prompt as generic_messages_to_prompt,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.llms.vllm_utils import get_response, post_http_request
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class Vllm(LLM):
model: Optional[str] = Field(description="The HuggingFace Model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
tensor_parallel_size: Optional[int] = Field(
default=1,
description="The number of GPUs to use for distributed execution with tensor parallelism.",
)
trust_remote_code: Optional[bool] = Field(
default=True,
description="Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.",
)
n: int = Field(
default=1,
description="Number of output sequences to return for the given prompt.",
)
best_of: Optional[int] = Field(
default=None,
description="Number of output sequences that are generated from the prompt.",
)
presence_penalty: float = Field(
default=0.0,
description="Float that penalizes new tokens based on whether they appear in the generated text so far.",
)
frequency_penalty: float = Field(
default=0.0,
description="Float that penalizes new tokens based on their frequency in the generated text so far.",
)
top_p: float = Field(
default=1.0,
description="Float that controls the cumulative probability of the top tokens to consider.",
)
top_k: int = Field(
default=-1,
description="Integer that controls the number of top tokens to consider.",
)
use_beam_search: bool = Field(
default=False, description="Whether to use beam search instead of sampling."
)
stop: Optional[List[str]] = Field(
default=None,
description="List of strings that stop the generation when they are generated.",
)
ignore_eos: bool = Field(
default=False,
description="Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.",
)
max_new_tokens: int = Field(
default=512,
description="Maximum number of tokens to generate per output sequence.",
)
logprobs: Optional[int] = Field(
default=None,
description="Number of log probabilities to return per output token.",
)
dtype: str = Field(
default="auto",
description="The data type for the model weights and activations.",
)
download_dir: Optional[str] = Field(
default=None,
description="Directory to download and load the weights. (Default to the default cache dir of huggingface)",
)
vllm_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Holds any model parameters valid for `vllm.LLM` call not explicitly specified.",
)
api_url: str = Field(description="The api url for vllm server")
_client: Any = PrivateAttr()
def __init__(
self,
model: str = "facebook/opt-125m",
temperature: float = 1.0,
tensor_parallel_size: int = 1,
trust_remote_code: bool = True,
n: int = 1,
best_of: Optional[int] = None,
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
top_p: float = 1.0,
top_k: int = -1,
use_beam_search: bool = False,
stop: Optional[List[str]] = None,
ignore_eos: bool = False,
max_new_tokens: int = 512,
logprobs: Optional[int] = None,
dtype: str = "auto",
download_dir: Optional[str] = None,
vllm_kwargs: Dict[str, Any] = {},
api_url: Optional[str] = "",
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
try:
from vllm import LLM as VLLModel
except ImportError:
raise ImportError(
"Could not import vllm python package. "
"Please install it with `pip install vllm`."
)
if model != "":
self._client = VLLModel(
model=model,
tensor_parallel_size=tensor_parallel_size,
trust_remote_code=trust_remote_code,
dtype=dtype,
download_dir=download_dir,
**vllm_kwargs
)
else:
self._client = None
callback_manager = callback_manager or CallbackManager([])
super().__init__(
model=model,
temperature=temperature,
n=n,
best_of=best_of,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
top_p=top_p,
top_k=top_k,
use_beam_search=use_beam_search,
stop=stop,
ignore_eos=ignore_eos,
max_new_tokens=max_new_tokens,
logprobs=logprobs,
dtype=dtype,
download_dir=download_dir,
vllm_kwargs=vllm_kwargs,
api_url=api_url,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "Vllm"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(model_name=self.model)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
"max_tokens": self.max_new_tokens,
"n": self.n,
"frequency_penalty": self.frequency_penalty,
"presence_penalty": self.presence_penalty,
"use_beam_search": self.use_beam_search,
"best_of": self.best_of,
"ignore_eos": self.ignore_eos,
"stop": self.stop,
"logprobs": self.logprobs,
"top_k": self.top_k,
"top_p": self.top_p,
"stop": self.stop,
}
return {**base_kwargs}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
kwargs = kwargs if kwargs else {}
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
from vllm import SamplingParams
# build sampling parameters
sampling_params = SamplingParams(**params)
outputs = self._client.generate([prompt], sampling_params)
return CompletionResponse(text=outputs[0].outputs[0].text)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise (ValueError("Not Implemented"))
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise (ValueError("Not Implemented"))
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
kwargs = kwargs if kwargs else {}
return self.chat(messages, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise (ValueError("Not Implemented"))
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise (ValueError("Not Implemented"))
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise (ValueError("Not Implemented"))
class VllmServer(Vllm):
def __init__(
self,
model: str = "facebook/opt-125m",
api_url: str = "http://localhost:8000",
temperature: float = 1.0,
tensor_parallel_size: Optional[int] = 1,
trust_remote_code: Optional[bool] = True,
n: int = 1,
best_of: Optional[int] = None,
presence_penalty: float = 0.0,
frequency_penalty: float = 0.0,
top_p: float = 1.0,
top_k: int = -1,
use_beam_search: bool = False,
stop: Optional[List[str]] = None,
ignore_eos: bool = False,
max_new_tokens: int = 512,
logprobs: Optional[int] = None,
dtype: str = "auto",
download_dir: Optional[str] = None,
messages_to_prompt: Optional[Callable] = None,
completion_to_prompt: Optional[Callable] = None,
vllm_kwargs: Dict[str, Any] = {},
callback_manager: Optional[CallbackManager] = None,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
self._client = None
messages_to_prompt = messages_to_prompt or generic_messages_to_prompt
completion_to_prompt = completion_to_prompt or (lambda x: x)
callback_manager = callback_manager or CallbackManager([])
model = ""
super().__init__(
model=model,
temperature=temperature,
n=n,
best_of=best_of,
presence_penalty=presence_penalty,
frequency_penalty=frequency_penalty,
top_p=top_p,
top_k=top_k,
use_beam_search=use_beam_search,
stop=stop,
ignore_eos=ignore_eos,
max_new_tokens=max_new_tokens,
logprobs=logprobs,
dtype=dtype,
download_dir=download_dir,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
vllm_kwargs=vllm_kwargs,
api_url=api_url,
callback_manager=callback_manager,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "VllmServer"
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> List[CompletionResponse]:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
from vllm import SamplingParams
# build sampling parameters
sampling_params = SamplingParams(**params).__dict__
sampling_params["prompt"] = prompt
response = post_http_request(self.api_url, sampling_params, stream=False)
output = get_response(response)
return CompletionResponse(text=output[0])
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
from vllm import SamplingParams
# build sampling parameters
sampling_params = SamplingParams(**params).__dict__
sampling_params["prompt"] = prompt
response = post_http_request(self.api_url, sampling_params, stream=True)
def gen() -> CompletionResponseGen:
for chunk in response.iter_lines(
chunk_size=8192, decode_unicode=False, delimiter=b"\0"
):
if chunk:
data = json.loads(chunk.decode("utf-8"))
yield CompletionResponse(text=data["text"][0])
return gen()
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
kwargs = kwargs if kwargs else {}
return self.complete(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
kwargs = kwargs if kwargs else {}
params = {**self._model_kwargs, **kwargs}
from vllm import SamplingParams
# build sampling parameters
sampling_params = SamplingParams(**params).__dict__
sampling_params["prompt"] = prompt
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, **kwargs):
yield message
return gen()
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
prompt = self.messages_to_prompt(messages)
completion_response = self.stream_complete(prompt, **kwargs)
return stream_completion_response_to_chat_response(completion_response)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
return self.stream_chat(messages, **kwargs)
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.llms.vllm_utils.get_response",
"llama_index.legacy.llms.generic_utils.completion_response_to_chat_response",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.core.llms.types.CompletionResponse",
"llama_index.legacy.llms.vllm_utils.post_http_request"
] | [((1015, 1065), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The HuggingFace Model to use."""'}), "(description='The HuggingFace Model to use.')\n", (1020, 1065), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1092, 1149), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1097, 1149), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1193, 1311), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1)', 'description': '"""The number of GPUs to use for distributed execution with tensor parallelism."""'}), "(default=1, description=\n 'The number of GPUs to use for distributed execution with tensor parallelism.'\n )\n", (1198, 1311), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1366, 1495), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer."""'}), "(default=True, description=\n 'Trust remote code (e.g., from HuggingFace) when downloading the model and tokenizer.'\n )\n", (1371, 1495), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1523, 1618), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1)', 'description': '"""Number of output sequences to return for the given prompt."""'}), "(default=1, description=\n 'Number of output sequences to return for the given prompt.')\n", (1528, 1618), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1667, 1769), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Number of output sequences that are generated from the prompt."""'}), "(default=None, description=\n 'Number of output sequences that are generated from the prompt.')\n", (1672, 1769), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1819, 1953), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.0)', 'description': '"""Float that penalizes new tokens based on whether they appear in the generated text so far."""'}), "(default=0.0, description=\n 'Float that penalizes new tokens based on whether they appear in the generated text so far.'\n )\n", (1824, 1953), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1999, 2129), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.0)', 'description': '"""Float that penalizes new tokens based on their frequency in the generated text so far."""'}), "(default=0.0, description=\n 'Float that penalizes new tokens based on their frequency in the generated text so far.'\n )\n", (2004, 2129), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2163, 2284), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(1.0)', 'description': '"""Float that controls the cumulative probability of the top tokens to consider."""'}), "(default=1.0, description=\n 'Float that controls the cumulative probability of the top tokens to consider.'\n )\n", (2168, 2284), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2316, 2413), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(-1)', 'description': '"""Integer that controls the number of top tokens to consider."""'}), "(default=-1, description=\n 'Integer that controls the number of top tokens to consider.')\n", (2321, 2413), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2461, 2549), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to use beam search instead of sampling."""'}), "(default=False, description=\n 'Whether to use beam search instead of sampling.')\n", (2466, 2549), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2592, 2697), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""List of strings that stop the generation when they are generated."""'}), "(default=None, description=\n 'List of strings that stop the generation when they are generated.')\n", (2597, 2697), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2740, 2882), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to ignore the EOS token and continue generating tokens after the EOS token is generated."""'}), "(default=False, description=\n 'Whether to ignore the EOS token and continue generating tokens after the EOS token is generated.'\n )\n", (2745, 2882), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2923, 3019), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(512)', 'description': '"""Maximum number of tokens to generate per output sequence."""'}), "(default=512, description=\n 'Maximum number of tokens to generate per output sequence.')\n", (2928, 3019), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3069, 3164), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Number of log probabilities to return per output token."""'}), "(default=None, description=\n 'Number of log probabilities to return per output token.')\n", (3074, 3164), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3201, 3295), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""auto"""', 'description': '"""The data type for the model weights and activations."""'}), "(default='auto', description=\n 'The data type for the model weights and activations.')\n", (3206, 3295), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3349, 3487), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Directory to download and load the weights. (Default to the default cache dir of huggingface)"""'}), "(default=None, description=\n 'Directory to download and load the weights. (Default to the default cache dir of huggingface)'\n )\n", (3354, 3487), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3536, 3667), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Holds any model parameters valid for `vllm.LLM` call not explicitly specified."""'}), "(default_factory=dict, description=\n 'Holds any model parameters valid for `vllm.LLM` call not explicitly specified.'\n )\n", (3541, 3667), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3701, 3749), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The api url for vllm server"""'}), "(description='The api url for vllm server')\n", (3706, 3749), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3770, 3783), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (3781, 3783), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((7427, 7446), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7444, 7446), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7765, 7790), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7788, 7790), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8265, 8284), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8282, 8284), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8445, 8470), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8468, 8470), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8646, 8665), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8663, 8665), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8864, 8889), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8887, 8889), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9062, 9081), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9079, 9081), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9254, 9279), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (9277, 9279), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((11620, 11645), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (11643, 11645), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12217, 12242), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (12240, 12242), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13080, 13105), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13103, 13105), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13321, 13346), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13344, 13346), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13936, 13955), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (13953, 13955), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((14270, 14289), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (14287, 14289), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6582, 6616), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'model_name': 'self.model'}), '(model_name=self.model)\n', (6593, 6616), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((7701, 7758), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (7737, 7758), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((8100, 8124), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (8114, 8124), False, 'from vllm import SamplingParams\n'), ((8207, 8258), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'outputs[0].outputs[0].text'}), '(text=outputs[0].outputs[0].text)\n', (8225, 8258), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((12057, 12119), 'llama_index.legacy.llms.vllm_utils.post_http_request', 'post_http_request', (['self.api_url', 'sampling_params'], {'stream': '(False)'}), '(self.api_url, sampling_params, stream=False)\n', (12074, 12119), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((12137, 12159), 'llama_index.legacy.llms.vllm_utils.get_response', 'get_response', (['response'], {}), '(response)\n', (12149, 12159), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((12176, 12210), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'output[0]'}), '(text=output[0])\n', (12194, 12210), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((12658, 12719), 'llama_index.legacy.llms.vllm_utils.post_http_request', 'post_http_request', (['self.api_url', 'sampling_params'], {'stream': '(True)'}), '(self.api_url, sampling_params, stream=True)\n', (12675, 12719), False, 'from llama_index.legacy.llms.vllm_utils import get_response, post_http_request\n'), ((14199, 14263), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (14242, 14263), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((5219, 5384), 'vllm.LLM', 'VLLModel', ([], {'model': 'model', 'tensor_parallel_size': 'tensor_parallel_size', 'trust_remote_code': 'trust_remote_code', 'dtype': 'dtype', 'download_dir': 'download_dir'}), '(model=model, tensor_parallel_size=tensor_parallel_size,\n trust_remote_code=trust_remote_code, dtype=dtype, download_dir=\n download_dir, **vllm_kwargs)\n', (5227, 5384), True, 'from vllm import LLM as VLLModel\n'), ((5579, 5598), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (5594, 5598), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((10705, 10724), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (10720, 10724), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((11961, 11985), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (11975, 11985), False, 'from vllm import SamplingParams\n'), ((12562, 12586), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (12576, 12586), False, 'from vllm import SamplingParams\n'), ((13678, 13702), 'vllm.SamplingParams', 'SamplingParams', ([], {}), '(**params)\n', (13692, 13702), False, 'from vllm import SamplingParams\n'), ((13011, 13051), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': "data['text'][0]"}), "(text=data['text'][0])\n", (13029, 13051), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset("Llama2PaperDataset", "./data")
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 319), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""Llama2PaperDataset"""', '"""./data"""'], {}), "('Llama2PaperDataset', './data')\n", (287, 319), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((364, 416), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (395, 416), False, 'from llama_index.core import VectorStoreIndex\n'), ((509, 564), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (528, 564), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1415, 1439), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1437, 1439), False, 'import asyncio\n')] |
"""Prompts."""
from abc import ABC, abstractmethod
from copy import deepcopy
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
)
from llama_index.core.bridge.pydantic import Field
if TYPE_CHECKING:
from llama_index.core.bridge.langchain import (
BasePromptTemplate as LangchainTemplate,
)
# pants: no-infer-dep
from llama_index.core.bridge.langchain import (
ConditionalPromptSelector as LangchainSelector,
)
from llama_index.core.base.llms.types import ChatMessage
from llama_index.core.base.query_pipeline.query import (
ChainableMixin,
InputKeys,
OutputKeys,
QueryComponent,
validate_and_convert_stringable,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.base.llms.base import BaseLLM
from llama_index.core.base.llms.generic_utils import (
messages_to_prompt as default_messages_to_prompt,
)
from llama_index.core.base.llms.generic_utils import (
prompt_to_messages,
)
from llama_index.core.prompts.prompt_type import PromptType
from llama_index.core.prompts.utils import get_template_vars
from llama_index.core.types import BaseOutputParser
class BasePromptTemplate(ChainableMixin, BaseModel, ABC):
metadata: Dict[str, Any]
template_vars: List[str]
kwargs: Dict[str, str]
output_parser: Optional[BaseOutputParser]
template_var_mappings: Optional[Dict[str, Any]] = Field(
default_factory=dict, description="Template variable mappings (Optional)."
)
function_mappings: Optional[Dict[str, Callable]] = Field(
default_factory=dict,
description=(
"Function mappings (Optional). This is a mapping from template "
"variable names to functions that take in the current kwargs and "
"return a string."
),
)
def _map_template_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""For keys in template_var_mappings, swap in the right keys."""
template_var_mappings = self.template_var_mappings or {}
return {template_var_mappings.get(k, k): v for k, v in kwargs.items()}
def _map_function_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""For keys in function_mappings, compute values and combine w/ kwargs.
Users can pass in functions instead of fixed values as format variables.
For each function, we call the function with the current kwargs,
get back the value, and then use that value in the template
for the corresponding format variable.
"""
function_mappings = self.function_mappings or {}
# first generate the values for the functions
new_kwargs = {}
for k, v in function_mappings.items():
# TODO: figure out what variables to pass into each function
# is it the kwargs specified during query time? just the fixed kwargs?
# all kwargs?
new_kwargs[k] = v(**kwargs)
# then, add the fixed variables only if not in new_kwargs already
# (implying that function mapping will override fixed variables)
for k, v in kwargs.items():
if k not in new_kwargs:
new_kwargs[k] = v
return new_kwargs
def _map_all_vars(self, kwargs: Dict[str, Any]) -> Dict[str, Any]:
"""Map both template and function variables.
We (1) first call function mappings to compute functions,
and then (2) call the template_var_mappings.
"""
# map function
new_kwargs = self._map_function_vars(kwargs)
# map template vars (to point to existing format vars in string template)
return self._map_template_vars(new_kwargs)
class Config:
arbitrary_types_allowed = True
@abstractmethod
def partial_format(self, **kwargs: Any) -> "BasePromptTemplate":
...
@abstractmethod
def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str:
...
@abstractmethod
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
...
@abstractmethod
def get_template(self, llm: Optional[BaseLLM] = None) -> str:
...
def _as_query_component(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> QueryComponent:
"""As query component."""
return PromptComponent(prompt=self, format_messages=False, llm=llm)
class PromptTemplate(BasePromptTemplate):
template: str
def __init__(
self,
template: str,
prompt_type: str = PromptType.CUSTOM,
output_parser: Optional[BaseOutputParser] = None,
metadata: Optional[Dict[str, Any]] = None,
template_var_mappings: Optional[Dict[str, Any]] = None,
function_mappings: Optional[Dict[str, Callable]] = None,
**kwargs: Any,
) -> None:
if metadata is None:
metadata = {}
metadata["prompt_type"] = prompt_type
template_vars = get_template_vars(template)
super().__init__(
template=template,
template_vars=template_vars,
kwargs=kwargs,
metadata=metadata,
output_parser=output_parser,
template_var_mappings=template_var_mappings,
function_mappings=function_mappings,
)
def partial_format(self, **kwargs: Any) -> "PromptTemplate":
"""Partially format the prompt."""
# NOTE: this is a hack to get around deepcopy failing on output parser
output_parser = self.output_parser
self.output_parser = None
# get function and fixed kwargs, and add that to a copy
# of the current prompt object
prompt = deepcopy(self)
prompt.kwargs.update(kwargs)
# NOTE: put the output parser back
prompt.output_parser = output_parser
self.output_parser = output_parser
return prompt
def format(
self,
llm: Optional[BaseLLM] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
**kwargs: Any,
) -> str:
"""Format the prompt into a string."""
del llm # unused
all_kwargs = {
**self.kwargs,
**kwargs,
}
mapped_all_kwargs = self._map_all_vars(all_kwargs)
prompt = self.template.format(**mapped_all_kwargs)
if self.output_parser is not None:
prompt = self.output_parser.format(prompt)
if completion_to_prompt is not None:
prompt = completion_to_prompt(prompt)
return prompt
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
"""Format the prompt into a list of chat messages."""
del llm # unused
prompt = self.format(**kwargs)
return prompt_to_messages(prompt)
def get_template(self, llm: Optional[BaseLLM] = None) -> str:
return self.template
class ChatPromptTemplate(BasePromptTemplate):
message_templates: List[ChatMessage]
def __init__(
self,
message_templates: List[ChatMessage],
prompt_type: str = PromptType.CUSTOM,
output_parser: Optional[BaseOutputParser] = None,
metadata: Optional[Dict[str, Any]] = None,
template_var_mappings: Optional[Dict[str, Any]] = None,
function_mappings: Optional[Dict[str, Callable]] = None,
**kwargs: Any,
):
if metadata is None:
metadata = {}
metadata["prompt_type"] = prompt_type
template_vars = []
for message_template in message_templates:
template_vars.extend(get_template_vars(message_template.content or ""))
super().__init__(
message_templates=message_templates,
kwargs=kwargs,
metadata=metadata,
output_parser=output_parser,
template_vars=template_vars,
template_var_mappings=template_var_mappings,
function_mappings=function_mappings,
)
@classmethod
def from_messages(
cls,
message_templates: Union[List[Tuple[str, str]], List[ChatMessage]],
**kwargs: Any,
) -> "ChatPromptTemplate":
"""From messages."""
if isinstance(message_templates[0], tuple):
message_templates = [
ChatMessage.from_str(role=role, content=content)
for role, content in message_templates
]
return cls(message_templates=message_templates, **kwargs)
def partial_format(self, **kwargs: Any) -> "ChatPromptTemplate":
prompt = deepcopy(self)
prompt.kwargs.update(kwargs)
return prompt
def format(
self,
llm: Optional[BaseLLM] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
**kwargs: Any,
) -> str:
del llm # unused
messages = self.format_messages(**kwargs)
if messages_to_prompt is not None:
return messages_to_prompt(messages)
return default_messages_to_prompt(messages)
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
del llm # unused
"""Format the prompt into a list of chat messages."""
all_kwargs = {
**self.kwargs,
**kwargs,
}
mapped_all_kwargs = self._map_all_vars(all_kwargs)
messages: List[ChatMessage] = []
for message_template in self.message_templates:
template_vars = get_template_vars(message_template.content or "")
relevant_kwargs = {
k: v for k, v in mapped_all_kwargs.items() if k in template_vars
}
content_template = message_template.content or ""
# if there's mappings specified, make sure those are used
content = content_template.format(**relevant_kwargs)
message: ChatMessage = message_template.copy()
message.content = content
messages.append(message)
if self.output_parser is not None:
messages = self.output_parser.format_messages(messages)
return messages
def get_template(self, llm: Optional[BaseLLM] = None) -> str:
return default_messages_to_prompt(self.message_templates)
def _as_query_component(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> QueryComponent:
"""As query component."""
return PromptComponent(prompt=self, format_messages=True, llm=llm)
class SelectorPromptTemplate(BasePromptTemplate):
default_template: BasePromptTemplate
conditionals: Optional[
List[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]]
] = None
def __init__(
self,
default_template: BasePromptTemplate,
conditionals: Optional[
List[Tuple[Callable[[BaseLLM], bool], BasePromptTemplate]]
] = None,
):
metadata = default_template.metadata
kwargs = default_template.kwargs
template_vars = default_template.template_vars
output_parser = default_template.output_parser
super().__init__(
default_template=default_template,
conditionals=conditionals,
metadata=metadata,
kwargs=kwargs,
template_vars=template_vars,
output_parser=output_parser,
)
def select(self, llm: Optional[BaseLLM] = None) -> BasePromptTemplate:
# ensure output parser is up to date
self.default_template.output_parser = self.output_parser
if llm is None:
return self.default_template
if self.conditionals is not None:
for condition, prompt in self.conditionals:
if condition(llm):
# ensure output parser is up to date
prompt.output_parser = self.output_parser
return prompt
return self.default_template
def partial_format(self, **kwargs: Any) -> "SelectorPromptTemplate":
default_template = self.default_template.partial_format(**kwargs)
if self.conditionals is None:
conditionals = None
else:
conditionals = [
(condition, prompt.partial_format(**kwargs))
for condition, prompt in self.conditionals
]
return SelectorPromptTemplate(
default_template=default_template, conditionals=conditionals
)
def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str:
"""Format the prompt into a string."""
prompt = self.select(llm=llm)
return prompt.format(**kwargs)
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
"""Format the prompt into a list of chat messages."""
prompt = self.select(llm=llm)
return prompt.format_messages(**kwargs)
def get_template(self, llm: Optional[BaseLLM] = None) -> str:
prompt = self.select(llm=llm)
return prompt.get_template(llm=llm)
class LangchainPromptTemplate(BasePromptTemplate):
selector: Any
requires_langchain_llm: bool = False
def __init__(
self,
template: Optional["LangchainTemplate"] = None,
selector: Optional["LangchainSelector"] = None,
output_parser: Optional[BaseOutputParser] = None,
prompt_type: str = PromptType.CUSTOM,
metadata: Optional[Dict[str, Any]] = None,
template_var_mappings: Optional[Dict[str, Any]] = None,
function_mappings: Optional[Dict[str, Callable]] = None,
requires_langchain_llm: bool = False,
) -> None:
try:
from llama_index.core.bridge.langchain import (
ConditionalPromptSelector as LangchainSelector,
)
except ImportError:
raise ImportError(
"Must install `llama_index[langchain]` to use LangchainPromptTemplate."
)
if selector is None:
if template is None:
raise ValueError("Must provide either template or selector.")
selector = LangchainSelector(default_prompt=template)
else:
if template is not None:
raise ValueError("Must provide either template or selector.")
selector = selector
kwargs = selector.default_prompt.partial_variables
template_vars = selector.default_prompt.input_variables
if metadata is None:
metadata = {}
metadata["prompt_type"] = prompt_type
super().__init__(
selector=selector,
metadata=metadata,
kwargs=kwargs,
template_vars=template_vars,
output_parser=output_parser,
template_var_mappings=template_var_mappings,
function_mappings=function_mappings,
requires_langchain_llm=requires_langchain_llm,
)
def partial_format(self, **kwargs: Any) -> "BasePromptTemplate":
"""Partially format the prompt."""
from llama_index.core.bridge.langchain import (
ConditionalPromptSelector as LangchainSelector,
)
mapped_kwargs = self._map_all_vars(kwargs)
default_prompt = self.selector.default_prompt.partial(**mapped_kwargs)
conditionals = [
(condition, prompt.partial(**mapped_kwargs))
for condition, prompt in self.selector.conditionals
]
lc_selector = LangchainSelector(
default_prompt=default_prompt, conditionals=conditionals
)
# copy full prompt object, replace selector
lc_prompt = deepcopy(self)
lc_prompt.selector = lc_selector
return lc_prompt
def format(self, llm: Optional[BaseLLM] = None, **kwargs: Any) -> str:
"""Format the prompt into a string."""
from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep
if llm is not None:
# if llamaindex LLM is provided, and we require a langchain LLM,
# then error. but otherwise if `requires_langchain_llm` is False,
# then we can just use the default prompt
if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm:
raise ValueError("Must provide a LangChainLLM.")
elif not isinstance(llm, LangChainLLM):
lc_template = self.selector.default_prompt
else:
lc_template = self.selector.get_prompt(llm=llm.llm)
else:
lc_template = self.selector.default_prompt
# if there's mappings specified, make sure those are used
mapped_kwargs = self._map_all_vars(kwargs)
return lc_template.format(**mapped_kwargs)
def format_messages(
self, llm: Optional[BaseLLM] = None, **kwargs: Any
) -> List[ChatMessage]:
"""Format the prompt into a list of chat messages."""
from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep
from llama_index.llms.langchain.utils import (
from_lc_messages,
) # pants: no-infer-dep
if llm is not None:
# if llamaindex LLM is provided, and we require a langchain LLM,
# then error. but otherwise if `requires_langchain_llm` is False,
# then we can just use the default prompt
if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm:
raise ValueError("Must provide a LangChainLLM.")
elif not isinstance(llm, LangChainLLM):
lc_template = self.selector.default_prompt
else:
lc_template = self.selector.get_prompt(llm=llm.llm)
else:
lc_template = self.selector.default_prompt
# if there's mappings specified, make sure those are used
mapped_kwargs = self._map_all_vars(kwargs)
lc_prompt_value = lc_template.format_prompt(**mapped_kwargs)
lc_messages = lc_prompt_value.to_messages()
return from_lc_messages(lc_messages)
def get_template(self, llm: Optional[BaseLLM] = None) -> str:
from llama_index.llms.langchain import LangChainLLM # pants: no-infer-dep
if llm is not None:
# if llamaindex LLM is provided, and we require a langchain LLM,
# then error. but otherwise if `requires_langchain_llm` is False,
# then we can just use the default prompt
if not isinstance(llm, LangChainLLM) and self.requires_langchain_llm:
raise ValueError("Must provide a LangChainLLM.")
elif not isinstance(llm, LangChainLLM):
lc_template = self.selector.default_prompt
else:
lc_template = self.selector.get_prompt(llm=llm.llm)
else:
lc_template = self.selector.default_prompt
try:
return str(lc_template.template) # type: ignore
except AttributeError:
return str(lc_template)
# NOTE: only for backwards compatibility
Prompt = PromptTemplate
class PromptComponent(QueryComponent):
"""Prompt component."""
prompt: BasePromptTemplate = Field(..., description="Prompt")
llm: Optional[BaseLLM] = Field(
default=None, description="LLM to use for formatting prompt."
)
format_messages: bool = Field(
default=False,
description="Whether to format the prompt into a list of chat messages.",
)
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: Any) -> None:
"""Set callback manager."""
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
keys = list(input.keys())
for k in keys:
input[k] = validate_and_convert_stringable(input[k])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
if self.format_messages:
output: Union[str, List[ChatMessage]] = self.prompt.format_messages(
llm=self.llm, **kwargs
)
else:
output = self.prompt.format(llm=self.llm, **kwargs)
return {"prompt": output}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component."""
# NOTE: no native async for prompt
return self._run_component(**kwargs)
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys(
set(self.prompt.template_vars) - set(self.prompt.kwargs)
)
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"prompt"})
| [
"llama_index.llms.langchain.utils.from_lc_messages",
"llama_index.core.base.query_pipeline.query.OutputKeys.from_keys",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.bridge.langchain.ConditionalPromptSelector",
"llama_index.core.base.query_pipeline.query.validate_and_convert_stringable",
"llama_index.core.base.llms.generic_utils.messages_to_prompt",
"llama_index.core.base.llms.types.ChatMessage.from_str",
"llama_index.core.prompts.utils.get_template_vars",
"llama_index.core.base.llms.generic_utils.prompt_to_messages"
] | [((1473, 1559), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Template variable mappings (Optional)."""'}), "(default_factory=dict, description=\n 'Template variable mappings (Optional).')\n", (1478, 1559), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((1624, 1819), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Function mappings (Optional). This is a mapping from template variable names to functions that take in the current kwargs and return a string."""'}), "(default_factory=dict, description=\n 'Function mappings (Optional). This is a mapping from template variable names to functions that take in the current kwargs and return a string.'\n )\n", (1629, 1819), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19377, 19409), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Prompt"""'}), "(..., description='Prompt')\n", (19382, 19409), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19439, 19507), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""LLM to use for formatting prompt."""'}), "(default=None, description='LLM to use for formatting prompt.')\n", (19444, 19507), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((19550, 19649), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether to format the prompt into a list of chat messages."""'}), "(default=False, description=\n 'Whether to format the prompt into a list of chat messages.')\n", (19555, 19649), False, 'from llama_index.core.bridge.pydantic import Field\n'), ((5075, 5102), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (['template'], {}), '(template)\n', (5092, 5102), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((5803, 5817), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (5811, 5817), False, 'from copy import deepcopy\n'), ((6932, 6958), 'llama_index.core.base.llms.generic_utils.prompt_to_messages', 'prompt_to_messages', (['prompt'], {}), '(prompt)\n', (6950, 6958), False, 'from llama_index.core.base.llms.generic_utils import prompt_to_messages\n'), ((8719, 8733), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (8727, 8733), False, 'from copy import deepcopy\n'), ((9169, 9205), 'llama_index.core.base.llms.generic_utils.messages_to_prompt', 'default_messages_to_prompt', (['messages'], {}), '(messages)\n', (9195, 9205), True, 'from llama_index.core.base.llms.generic_utils import messages_to_prompt as default_messages_to_prompt\n'), ((10403, 10453), 'llama_index.core.base.llms.generic_utils.messages_to_prompt', 'default_messages_to_prompt', (['self.message_templates'], {}), '(self.message_templates)\n', (10429, 10453), True, 'from llama_index.core.base.llms.generic_utils import messages_to_prompt as default_messages_to_prompt\n'), ((15675, 15750), 'llama_index.core.bridge.langchain.ConditionalPromptSelector', 'LangchainSelector', ([], {'default_prompt': 'default_prompt', 'conditionals': 'conditionals'}), '(default_prompt=default_prompt, conditionals=conditionals)\n', (15692, 15750), True, 'from llama_index.core.bridge.langchain import ConditionalPromptSelector as LangchainSelector\n'), ((15846, 15860), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (15854, 15860), False, 'from copy import deepcopy\n'), ((18234, 18263), 'llama_index.llms.langchain.utils.from_lc_messages', 'from_lc_messages', (['lc_messages'], {}), '(lc_messages)\n', (18250, 18263), False, 'from llama_index.llms.langchain.utils import from_lc_messages\n'), ((20950, 20982), 'llama_index.core.base.query_pipeline.query.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'prompt'}"], {}), "({'prompt'})\n", (20970, 20982), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((9674, 9723), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (["(message_template.content or '')"], {}), "(message_template.content or '')\n", (9691, 9723), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((14324, 14366), 'llama_index.core.bridge.langchain.ConditionalPromptSelector', 'LangchainSelector', ([], {'default_prompt': 'template'}), '(default_prompt=template)\n', (14341, 14366), True, 'from llama_index.core.bridge.langchain import ConditionalPromptSelector as LangchainSelector\n'), ((20056, 20097), 'llama_index.core.base.query_pipeline.query.validate_and_convert_stringable', 'validate_and_convert_stringable', (['input[k]'], {}), '(input[k])\n', (20087, 20097), False, 'from llama_index.core.base.query_pipeline.query import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((7750, 7799), 'llama_index.core.prompts.utils.get_template_vars', 'get_template_vars', (["(message_template.content or '')"], {}), "(message_template.content or '')\n", (7767, 7799), False, 'from llama_index.core.prompts.utils import get_template_vars\n'), ((8448, 8496), 'llama_index.core.base.llms.types.ChatMessage.from_str', 'ChatMessage.from_str', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (8468, 8496), False, 'from llama_index.core.base.llms.types import ChatMessage\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
from llama_index.llms import OpenAI
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"Uber10KDataset2021", "./uber10k_2021_dataset"
)
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff")
judge_llm = OpenAI(model="gpt-3.5-turbo")
rag_evaluator = RagEvaluatorPack(
query_engine=query_engine, rag_dataset=rag_dataset, judge_llm=judge_llm
)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.llms.OpenAI",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((301, 371), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""Uber10KDataset2021"""', '"""./uber10k_2021_dataset"""'], {}), "('Uber10KDataset2021', './uber10k_2021_dataset')\n", (323, 371), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((430, 482), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (461, 482), False, 'from llama_index.core import VectorStoreIndex\n'), ((575, 630), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (594, 630), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((647, 676), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""'}), "(model='gpt-3.5-turbo')\n", (653, 676), False, 'from llama_index.llms import OpenAI\n'), ((1562, 1586), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1584, 1586), False, 'import asyncio\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core.evaluation import PairwiseComparisonEvaluator
from llama_index.llms import OpenAI, Gemini
from llama_index.core import ServiceContext
import pandas as pd
async def main():
# DOWNLOAD LLAMADATASET
pairwise_evaluator_dataset, _ = download_llama_dataset(
"MtBenchHumanJudgementDataset", "./mt_bench_data"
)
# DEFINE EVALUATORS
gpt_4_context = ServiceContext.from_defaults(
llm=OpenAI(temperature=0, model="gpt-4"),
)
gpt_3p5_context = ServiceContext.from_defaults(
llm=OpenAI(temperature=0, model="gpt-3.5-turbo"),
)
gemini_pro_context = ServiceContext.from_defaults(
llm=Gemini(model="models/gemini-pro", temperature=0)
)
evaluators = {
"gpt-4": PairwiseComparisonEvaluator(service_context=gpt_4_context),
"gpt-3.5": PairwiseComparisonEvaluator(service_context=gpt_3p5_context),
"gemini-pro": PairwiseComparisonEvaluator(service_context=gemini_pro_context),
}
# EVALUATE WITH PACK
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
EvaluatorBenchmarkerPack = download_llama_pack("EvaluatorBenchmarkerPack", "./pack")
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gpt-3.5"],
eval_dataset=pairwise_evaluator_dataset,
show_progress=True,
)
gpt_3p5_benchmark_df = await evaluator_benchmarker.arun(
batch_size=100, sleep_time_in_seconds=0
)
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gpt-4"],
eval_dataset=pairwise_evaluator_dataset,
show_progress=True,
)
gpt_4_benchmark_df = await evaluator_benchmarker.arun(
batch_size=100, sleep_time_in_seconds=0
)
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gemini-pro"],
eval_dataset=pairwise_evaluator_dataset,
show_progress=True,
)
gemini_pro_benchmark_df = await evaluator_benchmarker.arun(
batch_size=5, sleep_time_in_seconds=0.5
)
benchmark_df = pd.concat(
[
gpt_3p5_benchmark_df,
gpt_4_benchmark_df,
gemini_pro_benchmark_df,
],
axis=0,
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.evaluation.PairwiseComparisonEvaluator",
"llama_index.llms.Gemini",
"llama_index.llms.OpenAI",
"llama_index.core.llama_dataset.download_llama_dataset"
] | [((402, 475), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MtBenchHumanJudgementDataset"""', '"""./mt_bench_data"""'], {}), "('MtBenchHumanJudgementDataset', './mt_bench_data')\n", (424, 475), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((1675, 1732), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""EvaluatorBenchmarkerPack"""', '"""./pack"""'], {}), "('EvaluatorBenchmarkerPack', './pack')\n", (1694, 1732), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((2636, 2726), 'pandas.concat', 'pd.concat', (['[gpt_3p5_benchmark_df, gpt_4_benchmark_df, gemini_pro_benchmark_df]'], {'axis': '(0)'}), '([gpt_3p5_benchmark_df, gpt_4_benchmark_df,\n gemini_pro_benchmark_df], axis=0)\n', (2645, 2726), True, 'import pandas as pd\n'), ((2857, 2881), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2879, 2881), False, 'import asyncio\n'), ((898, 956), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gpt_4_context'}), '(service_context=gpt_4_context)\n', (925, 956), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((977, 1037), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gpt_3p5_context'}), '(service_context=gpt_3p5_context)\n', (1004, 1037), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((1061, 1124), 'llama_index.core.evaluation.PairwiseComparisonEvaluator', 'PairwiseComparisonEvaluator', ([], {'service_context': 'gemini_pro_context'}), '(service_context=gemini_pro_context)\n', (1088, 1124), False, 'from llama_index.core.evaluation import PairwiseComparisonEvaluator\n'), ((577, 613), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (583, 613), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((686, 730), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0, model='gpt-3.5-turbo')\n", (692, 730), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((806, 854), 'llama_index.llms.Gemini', 'Gemini', ([], {'model': '"""models/gemini-pro"""', 'temperature': '(0)'}), "(model='models/gemini-pro', temperature=0)\n", (812, 854), False, 'from llama_index.llms import OpenAI, Gemini\n')] |
from abc import abstractmethod
from typing import (
Any,
Sequence,
)
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.core.base.query_pipeline.query import (
ChainableMixin,
)
from llama_index.core.bridge.pydantic import Field, validator
from llama_index.core.callbacks import CallbackManager
from llama_index.core.schema import BaseComponent
class BaseLLM(ChainableMixin, BaseComponent):
"""LLM interface."""
callback_manager: CallbackManager = Field(
default_factory=CallbackManager, exclude=True
)
class Config:
arbitrary_types_allowed = True
@validator("callback_manager", pre=True)
def _validate_callback_manager(cls, v: CallbackManager) -> CallbackManager:
if v is None:
return CallbackManager([])
return v
@property
@abstractmethod
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
@abstractmethod
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
"""Chat endpoint for LLM."""
@abstractmethod
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""Completion endpoint for LLM."""
@abstractmethod
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
"""Streaming chat endpoint for LLM."""
@abstractmethod
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
"""Streaming completion endpoint for LLM."""
# ===== Async Endpoints =====
@abstractmethod
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
"""Async chat endpoint for LLM."""
@abstractmethod
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""Async completion endpoint for LLM."""
@abstractmethod
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
"""Async streaming chat endpoint for LLM."""
@abstractmethod
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
"""Async streaming completion endpoint for LLM."""
| [
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.bridge.pydantic.validator"
] | [((669, 721), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)'}), '(default_factory=CallbackManager, exclude=True)\n', (674, 721), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((800, 839), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""callback_manager"""'], {'pre': '(True)'}), "('callback_manager', pre=True)\n", (809, 839), False, 'from llama_index.core.bridge.pydantic import Field, validator\n'), ((961, 980), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (976, 980), False, 'from llama_index.core.callbacks import CallbackManager\n')] |
from typing import Any, Sequence
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
)
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.legacy.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.legacy.llms.llm import LLM
class CustomLLM(LLM):
"""Simple abstract base class for custom LLMs.
Subclasses must implement the `__init__`, `_complete`,
`_stream_complete`, and `metadata` methods.
"""
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
prompt = self.messages_to_prompt(messages)
completion_response_gen = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response_gen)
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return self.chat(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, formatted=formatted, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, formatted=formatted, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@classmethod
def class_name(cls) -> str:
return "custom_llm"
| [
"llama_index.legacy.llms.generic_utils.completion_response_to_chat_response",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response"
] | [((710, 729), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (727, 729), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1022, 1041), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1039, 1041), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1380, 1399), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1397, 1399), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1573, 1592), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (1590, 1592), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1955, 1980), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1978, 1980), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2175, 2200), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (2198, 2200), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((958, 1015), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (994, 1015), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((1305, 1373), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response_gen'], {}), '(completion_response_gen)\n', (1348, 1373), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n')] |
from abc import abstractmethod
from typing import Any, List, Sequence, Union
from llama_index.core.base.query_pipeline.query import (
ChainableMixin,
QueryComponent,
)
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.prompts.mixin import PromptMixin, PromptMixinType
from llama_index.core.schema import QueryBundle, QueryType
from llama_index.core.tools.types import ToolMetadata
MetadataType = Union[str, ToolMetadata]
class SingleSelection(BaseModel):
"""A single selection of a choice."""
index: int
reason: str
class MultiSelection(BaseModel):
"""A multi-selection of choices."""
selections: List[SingleSelection]
@property
def ind(self) -> int:
if len(self.selections) != 1:
raise ValueError(
f"There are {len(self.selections)} selections, " "please use .inds."
)
return self.selections[0].index
@property
def reason(self) -> str:
if len(self.reasons) != 1:
raise ValueError(
f"There are {len(self.reasons)} selections, " "please use .reasons."
)
return self.selections[0].reason
@property
def inds(self) -> List[int]:
return [x.index for x in self.selections]
@property
def reasons(self) -> List[str]:
return [x.reason for x in self.selections]
# separate name for clarity and to not confuse function calling model
SelectorResult = MultiSelection
def _wrap_choice(choice: MetadataType) -> ToolMetadata:
if isinstance(choice, ToolMetadata):
return choice
elif isinstance(choice, str):
return ToolMetadata(description=choice)
else:
raise ValueError(f"Unexpected type: {type(choice)}")
def _wrap_query(query: QueryType) -> QueryBundle:
if isinstance(query, QueryBundle):
return query
elif isinstance(query, str):
return QueryBundle(query_str=query)
else:
raise ValueError(f"Unexpected type: {type(query)}")
class BaseSelector(PromptMixin, ChainableMixin):
"""Base selector."""
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt sub-modules."""
return {}
def select(
self, choices: Sequence[MetadataType], query: QueryType
) -> SelectorResult:
metadatas = [_wrap_choice(choice) for choice in choices]
query_bundle = _wrap_query(query)
return self._select(choices=metadatas, query=query_bundle)
async def aselect(
self, choices: Sequence[MetadataType], query: QueryType
) -> SelectorResult:
metadatas = [_wrap_choice(choice) for choice in choices]
query_bundle = _wrap_query(query)
return await self._aselect(choices=metadatas, query=query_bundle)
@abstractmethod
def _select(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
pass
@abstractmethod
async def _aselect(
self, choices: Sequence[ToolMetadata], query: QueryBundle
) -> SelectorResult:
pass
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""As query component."""
from llama_index.core.query_pipeline.components.router import (
SelectorComponent,
)
return SelectorComponent(selector=self)
| [
"llama_index.core.query_pipeline.components.router.SelectorComponent",
"llama_index.core.tools.types.ToolMetadata",
"llama_index.core.schema.QueryBundle"
] | [((3300, 3332), 'llama_index.core.query_pipeline.components.router.SelectorComponent', 'SelectorComponent', ([], {'selector': 'self'}), '(selector=self)\n', (3317, 3332), False, 'from llama_index.core.query_pipeline.components.router import SelectorComponent\n'), ((1653, 1685), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'description': 'choice'}), '(description=choice)\n', (1665, 1685), False, 'from llama_index.core.tools.types import ToolMetadata\n'), ((1917, 1945), 'llama_index.core.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (1928, 1945), False, 'from llama_index.core.schema import QueryBundle, QueryType\n')] |
"""Base agent type."""
import uuid
from abc import abstractmethod
from typing import Any, Dict, List, Optional
from llama_index.legacy.bridge.pydantic import BaseModel, Field
from llama_index.legacy.callbacks import CallbackManager, trace_method
from llama_index.legacy.chat_engine.types import (
BaseChatEngine,
StreamingAgentChatResponse,
)
from llama_index.legacy.core.base_query_engine import BaseQueryEngine
from llama_index.legacy.core.llms.types import ChatMessage
from llama_index.legacy.core.response.schema import RESPONSE_TYPE, Response
from llama_index.legacy.memory.types import BaseMemory
from llama_index.legacy.prompts.mixin import (
PromptDictType,
PromptMixin,
PromptMixinType,
)
from llama_index.legacy.schema import QueryBundle
class BaseAgent(BaseChatEngine, BaseQueryEngine):
"""Base Agent."""
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
# TODO: the ReAct agent does not explicitly specify prompts, would need a
# refactor to expose those prompts
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
# ===== Query Engine Interface =====
@trace_method("query")
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
agent_response = self.chat(
query_bundle.query_str,
chat_history=[],
)
return Response(
response=str(agent_response), source_nodes=agent_response.source_nodes
)
@trace_method("query")
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
agent_response = await self.achat(
query_bundle.query_str,
chat_history=[],
)
return Response(
response=str(agent_response), source_nodes=agent_response.source_nodes
)
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
raise NotImplementedError("stream_chat not implemented")
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
raise NotImplementedError("astream_chat not implemented")
class TaskStep(BaseModel):
"""Agent task step.
Represents a single input step within the execution run ("Task") of an agent
given a user input.
The output is returned as a `TaskStepOutput`.
"""
task_id: str = Field(..., diescription="Task ID")
step_id: str = Field(..., description="Step ID")
input: Optional[str] = Field(default=None, description="User input")
# memory: BaseMemory = Field(
# ..., type=BaseMemory, description="Conversational Memory"
# )
step_state: Dict[str, Any] = Field(
default_factory=dict, description="Additional state for a given step."
)
# NOTE: the state below may change throughout the course of execution
# this tracks the relationships to other steps
next_steps: Dict[str, "TaskStep"] = Field(
default_factory=dict, description="Next steps to be executed."
)
prev_steps: Dict[str, "TaskStep"] = Field(
default_factory=dict,
description="Previous steps that were dependencies for this step.",
)
is_ready: bool = Field(
default=True, description="Is this step ready to be executed?"
)
def get_next_step(
self,
step_id: str,
input: Optional[str] = None,
step_state: Optional[Dict[str, Any]] = None,
) -> "TaskStep":
"""Convenience function to get next step.
Preserve task_id, memory, step_state.
"""
return TaskStep(
task_id=self.task_id,
step_id=step_id,
input=input,
# memory=self.memory,
step_state=step_state or self.step_state,
)
def link_step(
self,
next_step: "TaskStep",
) -> None:
"""Link to next step.
Add link from this step to next, and from next step to current.
"""
self.next_steps[next_step.step_id] = next_step
next_step.prev_steps[self.step_id] = self
class TaskStepOutput(BaseModel):
"""Agent task step output."""
output: Any = Field(..., description="Task step output")
task_step: TaskStep = Field(..., description="Task step input")
next_steps: List[TaskStep] = Field(..., description="Next steps to be executed.")
is_last: bool = Field(default=False, description="Is this the last step?")
def __str__(self) -> str:
"""String representation."""
return str(self.output)
class Task(BaseModel):
"""Agent Task.
Represents a "run" of an agent given a user input.
"""
class Config:
arbitrary_types_allowed = True
task_id: str = Field(
default_factory=lambda: str(uuid.uuid4()), type=str, description="Task ID"
)
input: str = Field(..., type=str, description="User input")
# NOTE: this is state that may be modified throughout the course of execution of the task
memory: BaseMemory = Field(
...,
type=BaseMemory,
description=(
"Conversational Memory. Maintains state before execution of this task."
),
)
callback_manager: CallbackManager = Field(
default_factory=CallbackManager,
exclude=True,
description="Callback manager for the task.",
)
extra_state: Dict[str, Any] = Field(
default_factory=dict,
description=(
"Additional user-specified state for a given task. "
"Can be modified throughout the execution of a task."
),
)
class BaseAgentWorker(PromptMixin):
"""Base agent worker."""
class Config:
arbitrary_types_allowed = True
def _get_prompts(self) -> PromptDictType:
"""Get prompts."""
# TODO: the ReAct agent does not explicitly specify prompts, would need a
# refactor to expose those prompts
return {}
def _get_prompt_modules(self) -> PromptMixinType:
"""Get prompt modules."""
return {}
def _update_prompts(self, prompts: PromptDictType) -> None:
"""Update prompts."""
@abstractmethod
def initialize_step(self, task: Task, **kwargs: Any) -> TaskStep:
"""Initialize step from task."""
@abstractmethod
def run_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput:
"""Run step."""
@abstractmethod
async def arun_step(
self, step: TaskStep, task: Task, **kwargs: Any
) -> TaskStepOutput:
"""Run step (async)."""
raise NotImplementedError
@abstractmethod
def stream_step(self, step: TaskStep, task: Task, **kwargs: Any) -> TaskStepOutput:
"""Run step (stream)."""
# TODO: figure out if we need a different type for TaskStepOutput
raise NotImplementedError
@abstractmethod
async def astream_step(
self, step: TaskStep, task: Task, **kwargs: Any
) -> TaskStepOutput:
"""Run step (async stream)."""
raise NotImplementedError
@abstractmethod
def finalize_task(self, task: Task, **kwargs: Any) -> None:
"""Finalize task, after all the steps are completed."""
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
# TODO: make this abstractmethod (right now will break some agent impls)
| [
"llama_index.legacy.callbacks.trace_method",
"llama_index.legacy.bridge.pydantic.Field"
] | [((1310, 1331), 'llama_index.legacy.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1322, 1331), False, 'from llama_index.legacy.callbacks import CallbackManager, trace_method\n'), ((1633, 1654), 'llama_index.legacy.callbacks.trace_method', 'trace_method', (['"""query"""'], {}), "('query')\n", (1645, 1654), False, 'from llama_index.legacy.callbacks import CallbackManager, trace_method\n'), ((2613, 2647), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'diescription': '"""Task ID"""'}), "(..., diescription='Task ID')\n", (2618, 2647), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2667, 2700), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Step ID"""'}), "(..., description='Step ID')\n", (2672, 2700), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2728, 2773), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""User input"""'}), "(default=None, description='User input')\n", (2733, 2773), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((2917, 2994), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional state for a given step."""'}), "(default_factory=dict, description='Additional state for a given step.')\n", (2922, 2994), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3175, 3244), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Next steps to be executed."""'}), "(default_factory=dict, description='Next steps to be executed.')\n", (3180, 3244), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3299, 3399), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Previous steps that were dependencies for this step."""'}), "(default_factory=dict, description=\n 'Previous steps that were dependencies for this step.')\n", (3304, 3399), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((3439, 3508), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': '"""Is this step ready to be executed?"""'}), "(default=True, description='Is this step ready to be executed?')\n", (3444, 3508), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4404, 4446), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step output"""'}), "(..., description='Task step output')\n", (4409, 4446), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4473, 4514), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Task step input"""'}), "(..., description='Task step input')\n", (4478, 4514), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4548, 4600), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Next steps to be executed."""'}), "(..., description='Next steps to be executed.')\n", (4553, 4600), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((4621, 4679), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Is this the last step?"""'}), "(default=False, description='Is this the last step?')\n", (4626, 4679), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5080, 5126), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'type': 'str', 'description': '"""User input"""'}), "(..., type=str, description='User input')\n", (5085, 5126), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5247, 5364), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'type': 'BaseMemory', 'description': '"""Conversational Memory. Maintains state before execution of this task."""'}), "(..., type=BaseMemory, description=\n 'Conversational Memory. Maintains state before execution of this task.')\n", (5252, 5364), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5456, 5559), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'CallbackManager', 'exclude': '(True)', 'description': '"""Callback manager for the task."""'}), "(default_factory=CallbackManager, exclude=True, description=\n 'Callback manager for the task.')\n", (5461, 5559), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5621, 5775), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional user-specified state for a given task. Can be modified throughout the execution of a task."""'}), "(default_factory=dict, description=\n 'Additional user-specified state for a given task. Can be modified throughout the execution of a task.'\n )\n", (5626, 5775), False, 'from llama_index.legacy.bridge.pydantic import BaseModel, Field\n'), ((5010, 5022), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (5020, 5022), False, 'import uuid\n')] |
import json
from typing import Any, Dict, Sequence, Tuple
import httpx
from httpx import Timeout
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
MessageRole,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
DEFAULT_REQUEST_TIMEOUT = 30.0
def get_addtional_kwargs(
response: Dict[str, Any], exclude: Tuple[str, ...]
) -> Dict[str, Any]:
return {k: v for k, v in response.items() if k not in exclude}
class Ollama(CustomLLM):
base_url: str = Field(
default="http://localhost:11434",
description="Base url the model is hosted under.",
)
model: str = Field(description="The Ollama model to use.")
temperature: float = Field(
default=0.75,
description="The temperature to use for sampling.",
gte=0.0,
lte=1.0,
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
request_timeout: float = Field(
default=DEFAULT_REQUEST_TIMEOUT,
description="The timeout for making http request to Ollama API server",
)
prompt_key: str = Field(
default="prompt", description="The key to use for the prompt in API calls."
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional model parameters for the Ollama API.",
)
@classmethod
def class_name(cls) -> str:
return "Ollama_llm"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=DEFAULT_NUM_OUTPUTS,
model_name=self.model,
is_chat_model=True, # Ollama supports chat API for all models
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
"num_ctx": self.context_window,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
payload = {
"model": self.model,
"messages": [
{
"role": message.role.value,
"content": message.content,
**message.additional_kwargs,
}
for message in messages
],
"options": self._model_kwargs,
"stream": False,
**kwargs,
}
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
response = client.post(
url=f"{self.base_url}/api/chat",
json=payload,
)
response.raise_for_status()
raw = response.json()
message = raw["message"]
return ChatResponse(
message=ChatMessage(
content=message.get("content"),
role=MessageRole(message.get("role")),
additional_kwargs=get_addtional_kwargs(
message, ("content", "role")
),
),
raw=raw,
additional_kwargs=get_addtional_kwargs(raw, ("message",)),
)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
payload = {
"model": self.model,
"messages": [
{
"role": message.role.value,
"content": message.content,
**message.additional_kwargs,
}
for message in messages
],
"options": self._model_kwargs,
"stream": True,
**kwargs,
}
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
with client.stream(
method="POST",
url=f"{self.base_url}/api/chat",
json=payload,
) as response:
response.raise_for_status()
text = ""
for line in response.iter_lines():
if line:
chunk = json.loads(line)
if "done" in chunk and chunk["done"]:
break
message = chunk["message"]
delta = message.get("content")
text += delta
yield ChatResponse(
message=ChatMessage(
content=text,
role=MessageRole(message.get("role")),
additional_kwargs=get_addtional_kwargs(
message, ("content", "role")
),
),
delta=delta,
raw=chunk,
additional_kwargs=get_addtional_kwargs(chunk, ("message",)),
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
payload = {
self.prompt_key: prompt,
"model": self.model,
"options": self._model_kwargs,
"stream": False,
**kwargs,
}
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
response = client.post(
url=f"{self.base_url}/api/generate",
json=payload,
)
response.raise_for_status()
raw = response.json()
text = raw.get("response")
return CompletionResponse(
text=text,
raw=raw,
additional_kwargs=get_addtional_kwargs(raw, ("response",)),
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
payload = {
self.prompt_key: prompt,
"model": self.model,
"options": self._model_kwargs,
"stream": True,
**kwargs,
}
with httpx.Client(timeout=Timeout(self.request_timeout)) as client:
with client.stream(
method="POST",
url=f"{self.base_url}/api/generate",
json=payload,
) as response:
response.raise_for_status()
text = ""
for line in response.iter_lines():
if line:
chunk = json.loads(line)
delta = chunk.get("response")
text += delta
yield CompletionResponse(
delta=delta,
text=text,
raw=chunk,
additional_kwargs=get_addtional_kwargs(
chunk, ("response",)
),
)
| [
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field"
] | [((816, 911), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""http://localhost:11434"""', 'description': '"""Base url the model is hosted under."""'}), "(default='http://localhost:11434', description=\n 'Base url the model is hosted under.')\n", (821, 911), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((947, 992), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Ollama model to use."""'}), "(description='The Ollama model to use.')\n", (952, 992), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1018, 1112), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(0.75)', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=0.75, description='The temperature to use for sampling.', gte\n =0.0, lte=1.0)\n", (1023, 1112), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1173, 1288), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(default=DEFAULT_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model.', gt=0)\n", (1178, 1288), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1344, 1459), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_REQUEST_TIMEOUT', 'description': '"""The timeout for making http request to Ollama API server"""'}), "(default=DEFAULT_REQUEST_TIMEOUT, description=\n 'The timeout for making http request to Ollama API server')\n", (1349, 1459), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1500, 1587), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '"""prompt"""', 'description': '"""The key to use for the prompt in API calls."""'}), "(default='prompt', description=\n 'The key to use for the prompt in API calls.')\n", (1505, 1587), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1637, 1732), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional model parameters for the Ollama API."""'}), "(default_factory=dict, description=\n 'Additional model parameters for the Ollama API.')\n", (1642, 1732), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2434, 2453), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (2451, 2453), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3730, 3749), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (3747, 3749), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5575, 5600), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5598, 5600), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6418, 6443), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6441, 6443), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((1926, 2053), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'DEFAULT_NUM_OUTPUTS', 'model_name': 'self.model', 'is_chat_model': '(True)'}), '(context_window=self.context_window, num_output=\n DEFAULT_NUM_OUTPUTS, model_name=self.model, is_chat_model=True)\n', (1937, 2053), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((2992, 3021), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (2999, 3021), False, 'from httpx import Timeout\n'), ((4311, 4340), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (4318, 4340), False, 'from httpx import Timeout\n'), ((5943, 5972), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (5950, 5972), False, 'from httpx import Timeout\n'), ((6795, 6824), 'httpx.Timeout', 'Timeout', (['self.request_timeout'], {}), '(self.request_timeout)\n', (6802, 6824), False, 'from httpx import Timeout\n'), ((4704, 4720), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (4714, 4720), False, 'import json\n'), ((7192, 7208), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (7202, 7208), False, 'import json\n')] |
import warnings
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
MessageRole,
)
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.legacy.llms.cohere_utils import (
CHAT_MODELS,
acompletion_with_retry,
cohere_modelname_to_contextsize,
completion_with_retry,
messages_to_cohere_history,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class Cohere(LLM):
model: str = Field(description="The cohere model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
max_retries: int = Field(
default=10, description="The maximum number of API retries."
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Cohere API."
)
max_tokens: int = Field(description="The maximum number of tokens to generate.")
_client: Any = PrivateAttr()
_aclient: Any = PrivateAttr()
def __init__(
self,
model: str = "command",
temperature: float = 0.5,
max_tokens: int = 512,
timeout: Optional[float] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
try:
import cohere
except ImportError as e:
raise ImportError(
"You must install the `cohere` package to use Cohere."
"Please `pip install cohere`"
) from e
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
self._client = cohere.Client(api_key, client_name="llama_index")
self._aclient = cohere.AsyncClient(api_key, client_name="llama_index")
super().__init__(
temperature=temperature,
additional_kwargs=additional_kwargs,
timeout=timeout,
max_retries=max_retries,
model=model,
callback_manager=callback_manager,
max_tokens=max_tokens,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Cohere_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=cohere_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
system_role=MessageRole.CHATBOT,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
history = messages_to_cohere_history(messages[:-1])
prompt = messages[-1].content
all_kwargs = self._get_all_kwargs(**kwargs)
if all_kwargs["model"] not in CHAT_MODELS:
raise ValueError(f"{all_kwargs['model']} not supported for chat")
if "stream" in all_kwargs:
warnings.warn(
"Parameter `stream` is not supported by the `chat` method."
"Use the `stream_chat` method instead"
)
response = completion_with_retry(
client=self._client,
max_retries=self.max_retries,
chat=True,
message=prompt,
chat_history=history,
**all_kwargs,
)
return ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=response.text),
raw=response.__dict__,
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
if "stream" in all_kwargs:
warnings.warn(
"Parameter `stream` is not supported by the `chat` method."
"Use the `stream_chat` method instead"
)
response = completion_with_retry(
client=self._client,
max_retries=self.max_retries,
chat=False,
prompt=prompt,
**all_kwargs,
)
return CompletionResponse(
text=response.generations[0].text,
raw=response.__dict__,
)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
history = messages_to_cohere_history(messages[:-1])
prompt = messages[-1].content
all_kwargs = self._get_all_kwargs(**kwargs)
all_kwargs["stream"] = True
if all_kwargs["model"] not in CHAT_MODELS:
raise ValueError(f"{all_kwargs['model']} not supported for chat")
response = completion_with_retry(
client=self._client,
max_retries=self.max_retries,
chat=True,
message=prompt,
chat_history=history,
**all_kwargs,
)
def gen() -> ChatResponseGen:
content = ""
role = MessageRole.ASSISTANT
for r in response:
if "text" in r.__dict__:
content_delta = r.text
else:
content_delta = ""
content += content_delta
yield ChatResponse(
message=ChatMessage(role=role, content=content),
delta=content_delta,
raw=r.__dict__,
)
return gen()
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
all_kwargs = self._get_all_kwargs(**kwargs)
all_kwargs["stream"] = True
response = completion_with_retry(
client=self._client,
max_retries=self.max_retries,
chat=False,
prompt=prompt,
**all_kwargs,
)
def gen() -> CompletionResponseGen:
content = ""
for r in response:
content_delta = r.text
content += content_delta
yield CompletionResponse(
text=content, delta=content_delta, raw=r._asdict()
)
return gen()
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
history = messages_to_cohere_history(messages[:-1])
prompt = messages[-1].content
all_kwargs = self._get_all_kwargs(**kwargs)
if all_kwargs["model"] not in CHAT_MODELS:
raise ValueError(f"{all_kwargs['model']} not supported for chat")
if "stream" in all_kwargs:
warnings.warn(
"Parameter `stream` is not supported by the `chat` method."
"Use the `stream_chat` method instead"
)
response = await acompletion_with_retry(
aclient=self._aclient,
max_retries=self.max_retries,
chat=True,
message=prompt,
chat_history=history,
**all_kwargs,
)
return ChatResponse(
message=ChatMessage(role=MessageRole.ASSISTANT, content=response.text),
raw=response.__dict__,
)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
if "stream" in all_kwargs:
warnings.warn(
"Parameter `stream` is not supported by the `chat` method."
"Use the `stream_chat` method instead"
)
response = await acompletion_with_retry(
aclient=self._aclient,
max_retries=self.max_retries,
chat=False,
prompt=prompt,
**all_kwargs,
)
return CompletionResponse(
text=response.generations[0].text,
raw=response.__dict__,
)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
history = messages_to_cohere_history(messages[:-1])
prompt = messages[-1].content
all_kwargs = self._get_all_kwargs(**kwargs)
all_kwargs["stream"] = True
if all_kwargs["model"] not in CHAT_MODELS:
raise ValueError(f"{all_kwargs['model']} not supported for chat")
response = await acompletion_with_retry(
aclient=self._aclient,
max_retries=self.max_retries,
chat=True,
message=prompt,
chat_history=history,
**all_kwargs,
)
async def gen() -> ChatResponseAsyncGen:
content = ""
role = MessageRole.ASSISTANT
async for r in response:
if "text" in r.__dict__:
content_delta = r.text
else:
content_delta = ""
content += content_delta
yield ChatResponse(
message=ChatMessage(role=role, content=content),
delta=content_delta,
raw=r.__dict__,
)
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
all_kwargs = self._get_all_kwargs(**kwargs)
all_kwargs["stream"] = True
response = await acompletion_with_retry(
aclient=self._aclient,
max_retries=self.max_retries,
chat=False,
prompt=prompt,
**all_kwargs,
)
async def gen() -> CompletionResponseAsyncGen:
content = ""
async for r in response:
content_delta = r.text
content += content_delta
yield CompletionResponse(
text=content, delta=content_delta, raw=r._asdict()
)
return gen()
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.cohere_utils.cohere_modelname_to_contextsize",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.llms.cohere_utils.messages_to_cohere_history",
"llama_index.legacy.core.llms.types.CompletionResponse",
"llama_index.legacy.llms.cohere_utils.acompletion_with_retry",
"llama_index.legacy.llms.cohere_utils.completion_with_retry"
] | [((897, 942), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The cohere model to use."""'}), "(description='The cohere model to use.')\n", (902, 942), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((968, 1025), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (973, 1025), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1049, 1116), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""'}), "(default=10, description='The maximum number of API retries.')\n", (1054, 1116), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1171, 1256), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the Cohere API."""'}), "(default_factory=dict, description='Additional kwargs for the Cohere API.'\n )\n", (1176, 1256), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1288, 1350), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1293, 1350), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1371, 1384), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1382, 1384), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1405, 1418), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1416, 1418), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((4032, 4051), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4049, 4051), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5025, 5050), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5048, 5050), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5762, 5781), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5779, 5781), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6993, 7018), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7016, 7018), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7775, 7794), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7792, 7794), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8799, 8824), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8822, 8824), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9552, 9571), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9569, 9571), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((10821, 10846), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (10844, 10846), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2518, 2567), 'cohere.Client', 'cohere.Client', (['api_key'], {'client_name': '"""llama_index"""'}), "(api_key, client_name='llama_index')\n", (2531, 2567), False, 'import cohere\n'), ((2592, 2646), 'cohere.AsyncClient', 'cohere.AsyncClient', (['api_key'], {'client_name': '"""llama_index"""'}), "(api_key, client_name='llama_index')\n", (2610, 2646), False, 'import cohere\n'), ((4154, 4195), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (4180, 4195), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((4642, 4781), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(client=self._client, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (4663, 4781), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((5443, 5560), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(client=self._client, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (5464, 5560), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((5644, 5720), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response.generations[0].text', 'raw': 'response.__dict__'}), '(text=response.generations[0].text, raw=response.__dict__)\n', (5662, 5720), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((5908, 5949), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (5934, 5949), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((6224, 6363), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(client=self._client, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (6245, 6363), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((7250, 7367), 'llama_index.legacy.llms.cohere_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(client=self._client, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (7271, 7367), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((7918, 7959), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (7944, 7959), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((9434, 9510), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response.generations[0].text', 'raw': 'response.__dict__'}), '(text=response.generations[0].text, raw=response.__dict__)\n', (9452, 9510), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((9710, 9751), 'llama_index.legacy.llms.cohere_utils.messages_to_cohere_history', 'messages_to_cohere_history', (['messages[:-1]'], {}), '(messages[:-1])\n', (9736, 9751), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((2474, 2493), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2489, 2493), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((4463, 4583), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (4476, 4583), False, 'import warnings\n'), ((5263, 5383), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (5276, 5383), False, 'import warnings\n'), ((8226, 8346), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (8239, 8346), False, 'import warnings\n'), ((8412, 8554), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (8434, 8554), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((9044, 9164), 'warnings.warn', 'warnings.warn', (['"""Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead"""'], {}), "(\n 'Parameter `stream` is not supported by the `chat` method.Use the `stream_chat` method instead'\n )\n", (9057, 9164), False, 'import warnings\n'), ((9230, 9350), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (9252, 9350), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((10032, 10174), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(True)', 'message': 'prompt', 'chat_history': 'history'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=True, message=prompt, chat_history=history, **all_kwargs)\n', (10054, 10174), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((11096, 11216), 'llama_index.legacy.llms.cohere_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'aclient': 'self._aclient', 'max_retries': 'self.max_retries', 'chat': '(False)', 'prompt': 'prompt'}), '(aclient=self._aclient, max_retries=self.max_retries,\n chat=False, prompt=prompt, **all_kwargs)\n', (11118, 11216), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((3405, 3448), 'llama_index.legacy.llms.cohere_utils.cohere_modelname_to_contextsize', 'cohere_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (3436, 3448), False, 'from llama_index.legacy.llms.cohere_utils import CHAT_MODELS, acompletion_with_retry, cohere_modelname_to_contextsize, completion_with_retry, messages_to_cohere_history\n'), ((4910, 4972), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.text'}), '(role=MessageRole.ASSISTANT, content=response.text)\n', (4921, 4972), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((8684, 8746), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.text'}), '(role=MessageRole.ASSISTANT, content=response.text)\n', (8695, 8746), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((6829, 6868), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (6840, 6868), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10657, 10696), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10668, 10696), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')] |
from abc import abstractmethod
from typing import List
from llama_index.core.indices.query.schema import QueryBundle, QueryType
from llama_index.core.prompts.mixin import PromptMixin
from llama_index.core.schema import NodeWithScore
class BaseImageRetriever(PromptMixin):
"""Base Image Retriever Abstraction."""
def text_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""Retrieve image nodes given query or single image input.
Args:
str_or_query_bundle (QueryType): a query text
string or a QueryBundle object.
"""
if isinstance(str_or_query_bundle, str):
str_or_query_bundle = QueryBundle(query_str=str_or_query_bundle)
return self._text_to_image_retrieve(str_or_query_bundle)
@abstractmethod
def _text_to_image_retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Retrieve image nodes or documents given query text.
Implemented by the user.
"""
def image_to_image_retrieve(
self, str_or_query_bundle: QueryType
) -> List[NodeWithScore]:
"""Retrieve image nodes given single image input.
Args:
str_or_query_bundle (QueryType): a image path
string or a QueryBundle object.
"""
if isinstance(str_or_query_bundle, str):
# leave query_str as empty since we are using image_path for image retrieval
str_or_query_bundle = QueryBundle(
query_str="", image_path=str_or_query_bundle
)
return self._image_to_image_retrieve(str_or_query_bundle)
@abstractmethod
def _image_to_image_retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Retrieve image nodes or documents given image.
Implemented by the user.
"""
# Async Methods
async def atext_to_image_retrieve(
self,
str_or_query_bundle: QueryType,
) -> List[NodeWithScore]:
if isinstance(str_or_query_bundle, str):
str_or_query_bundle = QueryBundle(query_str=str_or_query_bundle)
return await self._atext_to_image_retrieve(str_or_query_bundle)
@abstractmethod
async def _atext_to_image_retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Async retrieve image nodes or documents given query text.
Implemented by the user.
"""
async def aimage_to_image_retrieve(
self,
str_or_query_bundle: QueryType,
) -> List[NodeWithScore]:
if isinstance(str_or_query_bundle, str):
# leave query_str as empty since we are using image_path for image retrieval
str_or_query_bundle = QueryBundle(
query_str="", image_path=str_or_query_bundle
)
return await self._aimage_to_image_retrieve(str_or_query_bundle)
@abstractmethod
async def _aimage_to_image_retrieve(
self,
query_bundle: QueryBundle,
) -> List[NodeWithScore]:
"""Async retrieve image nodes or documents given image.
Implemented by the user.
"""
| [
"llama_index.core.indices.query.schema.QueryBundle"
] | [((706, 748), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (717, 748), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((1525, 1582), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': '""""""', 'image_path': 'str_or_query_bundle'}), "(query_str='', image_path=str_or_query_bundle)\n", (1536, 1582), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((2145, 2187), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'str_or_query_bundle'}), '(query_str=str_or_query_bundle)\n', (2156, 2187), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n'), ((2813, 2870), 'llama_index.core.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': '""""""', 'image_path': 'str_or_query_bundle'}), "(query_str='', image_path=str_or_query_bundle)\n", (2824, 2870), False, 'from llama_index.core.indices.query.schema import QueryBundle, QueryType\n')] |
import json
from abc import abstractmethod
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Dict, Optional, Type
if TYPE_CHECKING:
from llama_index.legacy.bridge.langchain import StructuredTool, Tool
from deprecated import deprecated
from llama_index.legacy.bridge.pydantic import BaseModel
class DefaultToolFnSchema(BaseModel):
"""Default tool function Schema."""
input: str
@dataclass
class ToolMetadata:
description: str
name: Optional[str] = None
fn_schema: Optional[Type[BaseModel]] = DefaultToolFnSchema
def get_parameters_dict(self) -> dict:
if self.fn_schema is None:
parameters = {
"type": "object",
"properties": {
"input": {"title": "input query string", "type": "string"},
},
"required": ["input"],
}
else:
parameters = self.fn_schema.schema()
parameters = {
k: v
for k, v in parameters.items()
if k in ["type", "properties", "required", "definitions"]
}
return parameters
@property
def fn_schema_str(self) -> str:
"""Get fn schema as string."""
if self.fn_schema is None:
raise ValueError("fn_schema is None.")
parameters = self.get_parameters_dict()
return json.dumps(parameters)
def get_name(self) -> str:
"""Get name."""
if self.name is None:
raise ValueError("name is None.")
return self.name
@deprecated(
"Deprecated in favor of `to_openai_tool`, which should be used instead."
)
def to_openai_function(self) -> Dict[str, Any]:
"""Deprecated and replaced by `to_openai_tool`.
The name and arguments of a function that should be called, as generated by the
model.
"""
return {
"name": self.name,
"description": self.description,
"parameters": self.get_parameters_dict(),
}
def to_openai_tool(self) -> Dict[str, Any]:
"""To OpenAI tool."""
return {
"type": "function",
"function": {
"name": self.name,
"description": self.description,
"parameters": self.get_parameters_dict(),
},
}
class ToolOutput(BaseModel):
"""Tool output."""
content: str
tool_name: str
raw_input: Dict[str, Any]
raw_output: Any
def __str__(self) -> str:
"""String."""
return str(self.content)
class BaseTool:
@property
@abstractmethod
def metadata(self) -> ToolMetadata:
pass
@abstractmethod
def __call__(self, input: Any) -> ToolOutput:
pass
def _process_langchain_tool_kwargs(
self,
langchain_tool_kwargs: Any,
) -> Dict[str, Any]:
"""Process langchain tool kwargs."""
if "name" not in langchain_tool_kwargs:
langchain_tool_kwargs["name"] = self.metadata.name or ""
if "description" not in langchain_tool_kwargs:
langchain_tool_kwargs["description"] = self.metadata.description
if "fn_schema" not in langchain_tool_kwargs:
langchain_tool_kwargs["args_schema"] = self.metadata.fn_schema
return langchain_tool_kwargs
def to_langchain_tool(
self,
**langchain_tool_kwargs: Any,
) -> "Tool":
"""To langchain tool."""
from llama_index.legacy.bridge.langchain import Tool
langchain_tool_kwargs = self._process_langchain_tool_kwargs(
langchain_tool_kwargs
)
return Tool.from_function(
func=self.__call__,
**langchain_tool_kwargs,
)
def to_langchain_structured_tool(
self,
**langchain_tool_kwargs: Any,
) -> "StructuredTool":
"""To langchain structured tool."""
from llama_index.legacy.bridge.langchain import StructuredTool
langchain_tool_kwargs = self._process_langchain_tool_kwargs(
langchain_tool_kwargs
)
return StructuredTool.from_function(
func=self.__call__,
**langchain_tool_kwargs,
)
class AsyncBaseTool(BaseTool):
"""
Base-level tool class that is backwards compatible with the old tool spec but also
supports async.
"""
def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput:
return self.call(*args, **kwargs)
@abstractmethod
def call(self, input: Any) -> ToolOutput:
"""
This is the method that should be implemented by the tool developer.
"""
@abstractmethod
async def acall(self, input: Any) -> ToolOutput:
"""
This is the async version of the call method.
Should also be implemented by the tool developer as an
async-compatible implementation.
"""
class BaseToolAsyncAdapter(AsyncBaseTool):
"""
Adapter class that allows a synchronous tool to be used as an async tool.
"""
def __init__(self, tool: BaseTool):
self.base_tool = tool
@property
def metadata(self) -> ToolMetadata:
return self.base_tool.metadata
def call(self, input: Any) -> ToolOutput:
return self.base_tool(input)
async def acall(self, input: Any) -> ToolOutput:
return self.call(input)
def adapt_to_async_tool(tool: BaseTool) -> AsyncBaseTool:
"""
Converts a synchronous tool to an async tool.
"""
if isinstance(tool, AsyncBaseTool):
return tool
else:
return BaseToolAsyncAdapter(tool)
| [
"llama_index.legacy.bridge.langchain.Tool.from_function",
"llama_index.legacy.bridge.langchain.StructuredTool.from_function"
] | [((1586, 1675), 'deprecated.deprecated', 'deprecated', (['"""Deprecated in favor of `to_openai_tool`, which should be used instead."""'], {}), "(\n 'Deprecated in favor of `to_openai_tool`, which should be used instead.')\n", (1596, 1675), False, 'from deprecated import deprecated\n'), ((1400, 1422), 'json.dumps', 'json.dumps', (['parameters'], {}), '(parameters)\n', (1410, 1422), False, 'import json\n'), ((3697, 3760), 'llama_index.legacy.bridge.langchain.Tool.from_function', 'Tool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (3715, 3760), False, 'from llama_index.legacy.bridge.langchain import Tool\n'), ((4158, 4231), 'llama_index.legacy.bridge.langchain.StructuredTool.from_function', 'StructuredTool.from_function', ([], {'func': 'self.__call__'}), '(func=self.__call__, **langchain_tool_kwargs)\n', (4186, 4231), False, 'from llama_index.legacy.bridge.langchain import StructuredTool\n')] |
import logging
from dataclasses import dataclass
from typing import Any, List, Optional, cast
from deprecated import deprecated
import llama_index.core
from llama_index.core.bridge.pydantic import BaseModel
from llama_index.core.callbacks.base import CallbackManager
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.service_context_elements.llm_predictor import (
LLMPredictor,
BaseLLMPredictor,
)
from llama_index.core.base.llms.types import LLMMetadata
from llama_index.core.llms.llm import LLM
from llama_index.core.llms.utils import LLMType, resolve_llm
from llama_index.core.service_context_elements.llama_logger import LlamaLogger
from llama_index.core.node_parser.interface import NodeParser, TextSplitter
from llama_index.core.node_parser.text.sentence import (
DEFAULT_CHUNK_SIZE,
SENTENCE_CHUNK_OVERLAP,
SentenceSplitter,
)
from llama_index.core.prompts.base import BasePromptTemplate
from llama_index.core.schema import TransformComponent
from llama_index.core.types import PydanticProgramMode
logger = logging.getLogger(__name__)
def _get_default_node_parser(
chunk_size: int = DEFAULT_CHUNK_SIZE,
chunk_overlap: int = SENTENCE_CHUNK_OVERLAP,
callback_manager: Optional[CallbackManager] = None,
) -> NodeParser:
"""Get default node parser."""
return SentenceSplitter(
chunk_size=chunk_size,
chunk_overlap=chunk_overlap,
callback_manager=callback_manager or CallbackManager(),
)
def _get_default_prompt_helper(
llm_metadata: LLMMetadata,
context_window: Optional[int] = None,
num_output: Optional[int] = None,
) -> PromptHelper:
"""Get default prompt helper."""
if context_window is not None:
llm_metadata.context_window = context_window
if num_output is not None:
llm_metadata.num_output = num_output
return PromptHelper.from_llm_metadata(llm_metadata=llm_metadata)
class ServiceContextData(BaseModel):
llm: dict
llm_predictor: dict
prompt_helper: dict
embed_model: dict
transformations: List[dict]
@dataclass
class ServiceContext:
"""Service Context container.
The service context container is a utility container for LlamaIndex
index and query classes. It contains the following:
- llm_predictor: BaseLLMPredictor
- prompt_helper: PromptHelper
- embed_model: BaseEmbedding
- node_parser: NodeParser
- llama_logger: LlamaLogger (deprecated)
- callback_manager: CallbackManager
"""
llm_predictor: BaseLLMPredictor
prompt_helper: PromptHelper
embed_model: BaseEmbedding
transformations: List[TransformComponent]
llama_logger: LlamaLogger
callback_manager: CallbackManager
@classmethod
@deprecated(
version="0.10.0",
reason="ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.",
)
def from_defaults(
cls,
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# pydantic program mode (used if output_cls is specified)
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Create a ServiceContext from defaults.
If an argument is specified, then use the argument value provided for that
parameter. If an argument is not specified, then use the default value.
You can change the base defaults by setting llama_index.global_service_context
to a ServiceContext object with your desired settings.
Args:
llm_predictor (Optional[BaseLLMPredictor]): LLMPredictor
prompt_helper (Optional[PromptHelper]): PromptHelper
embed_model (Optional[BaseEmbedding]): BaseEmbedding
or "local" (use local model)
node_parser (Optional[NodeParser]): NodeParser
llama_logger (Optional[LlamaLogger]): LlamaLogger (deprecated)
chunk_size (Optional[int]): chunk_size
callback_manager (Optional[CallbackManager]): CallbackManager
system_prompt (Optional[str]): System-wide prompt to be prepended
to all input prompts, used to guide system "decision making"
query_wrapper_prompt (Optional[BasePromptTemplate]): A format to wrap
passed-in input queries.
Deprecated Args:
chunk_size_limit (Optional[int]): renamed to chunk_size
"""
from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size instead"
)
chunk_size = chunk_size_limit
if llama_index.core.global_service_context is not None:
return cls.from_service_context(
llama_index.core.global_service_context,
llm=llm,
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
node_parser=node_parser,
text_splitter=text_splitter,
llama_logger=llama_logger,
callback_manager=callback_manager,
context_window=context_window,
chunk_size=chunk_size,
chunk_size_limit=chunk_size_limit,
chunk_overlap=chunk_overlap,
num_output=num_output,
system_prompt=system_prompt,
query_wrapper_prompt=query_wrapper_prompt,
transformations=transformations,
)
callback_manager = callback_manager or CallbackManager([])
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm.system_prompt = llm.system_prompt or system_prompt
llm.query_wrapper_prompt = llm.query_wrapper_prompt or query_wrapper_prompt
llm.pydantic_program_mode = (
llm.pydantic_program_mode or pydantic_program_mode
)
if llm_predictor is not None:
print("LLMPredictor is deprecated, please use LLM instead.")
llm_predictor = llm_predictor or LLMPredictor(
llm=llm, pydantic_program_mode=pydantic_program_mode
)
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# NOTE: embed model should be a transformation, but the way the service
# context works, we can't put in there yet.
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or [node_parser]
llama_logger = llama_logger or LlamaLogger()
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@classmethod
def from_service_context(
cls,
service_context: "ServiceContext",
llm_predictor: Optional[BaseLLMPredictor] = None,
llm: Optional[LLMType] = "default",
prompt_helper: Optional[PromptHelper] = None,
embed_model: Optional[Any] = "default",
node_parser: Optional[NodeParser] = None,
text_splitter: Optional[TextSplitter] = None,
transformations: Optional[List[TransformComponent]] = None,
llama_logger: Optional[LlamaLogger] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
query_wrapper_prompt: Optional[BasePromptTemplate] = None,
# node parser kwargs
chunk_size: Optional[int] = None,
chunk_overlap: Optional[int] = None,
# prompt helper kwargs
context_window: Optional[int] = None,
num_output: Optional[int] = None,
# deprecated kwargs
chunk_size_limit: Optional[int] = None,
) -> "ServiceContext":
"""Instantiate a new service context using a previous as the defaults."""
from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model
embed_model = cast(EmbedType, embed_model)
if chunk_size_limit is not None and chunk_size is None:
logger.warning(
"chunk_size_limit is deprecated, please specify chunk_size",
DeprecationWarning,
)
chunk_size = chunk_size_limit
callback_manager = callback_manager or service_context.callback_manager
if llm != "default":
if llm_predictor is not None:
raise ValueError("Cannot specify both llm and llm_predictor")
llm = resolve_llm(llm)
llm_predictor = LLMPredictor(llm=llm)
llm_predictor = llm_predictor or service_context.llm_predictor
if isinstance(llm_predictor, LLMPredictor):
llm_predictor.llm.callback_manager = callback_manager
if system_prompt:
llm_predictor.system_prompt = system_prompt
if query_wrapper_prompt:
llm_predictor.query_wrapper_prompt = query_wrapper_prompt
# NOTE: the embed_model isn't used in all indices
# default to using the embed model passed from the service context
if embed_model == "default":
embed_model = service_context.embed_model
embed_model = resolve_embed_model(embed_model)
embed_model.callback_manager = callback_manager
prompt_helper = prompt_helper or service_context.prompt_helper
if context_window is not None or num_output is not None:
prompt_helper = _get_default_prompt_helper(
llm_metadata=llm_predictor.metadata,
context_window=context_window,
num_output=num_output,
)
transformations = transformations or []
node_parser_found = False
for transform in service_context.transformations:
if isinstance(transform, NodeParser):
node_parser_found = True
node_parser = transform
break
if text_splitter is not None and node_parser is not None:
raise ValueError("Cannot specify both text_splitter and node_parser")
if not node_parser_found:
node_parser = (
text_splitter # text splitter extends node parser
or node_parser
or _get_default_node_parser(
chunk_size=chunk_size or DEFAULT_CHUNK_SIZE,
chunk_overlap=chunk_overlap or SENTENCE_CHUNK_OVERLAP,
callback_manager=callback_manager,
)
)
transformations = transformations or service_context.transformations
llama_logger = llama_logger or service_context.llama_logger
return cls(
llm_predictor=llm_predictor,
embed_model=embed_model,
prompt_helper=prompt_helper,
transformations=transformations,
llama_logger=llama_logger, # deprecated
callback_manager=callback_manager,
)
@property
def llm(self) -> LLM:
return self.llm_predictor.llm
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
for transform in self.transformations:
if isinstance(transform, NodeParser):
return transform
raise ValueError("No node parser found.")
def to_dict(self) -> dict:
"""Convert service context to dict."""
llm_dict = self.llm_predictor.llm.to_dict()
llm_predictor_dict = self.llm_predictor.to_dict()
embed_model_dict = self.embed_model.to_dict()
prompt_helper_dict = self.prompt_helper.to_dict()
tranform_list_dict = [x.to_dict() for x in self.transformations]
return ServiceContextData(
llm=llm_dict,
llm_predictor=llm_predictor_dict,
prompt_helper=prompt_helper_dict,
embed_model=embed_model_dict,
transformations=tranform_list_dict,
).dict()
@classmethod
def from_dict(cls, data: dict) -> "ServiceContext":
from llama_index.core.embeddings.loading import load_embed_model
from llama_index.core.extractors.loading import load_extractor
from llama_index.core.node_parser.loading import load_parser
from llama_index.core.service_context_elements.llm_predictor import (
load_predictor,
)
service_context_data = ServiceContextData.parse_obj(data)
llm_predictor = load_predictor(service_context_data.llm_predictor)
embed_model = load_embed_model(service_context_data.embed_model)
prompt_helper = PromptHelper.from_dict(service_context_data.prompt_helper)
transformations: List[TransformComponent] = []
for transform in service_context_data.transformations:
try:
transformations.append(load_parser(transform))
except ValueError:
transformations.append(load_extractor(transform))
return cls.from_defaults(
llm_predictor=llm_predictor,
prompt_helper=prompt_helper,
embed_model=embed_model,
transformations=transformations,
)
def set_global_service_context(service_context: Optional[ServiceContext]) -> None:
"""Helper function to set the global service context."""
llama_index.core.global_service_context = service_context
if service_context is not None:
from llama_index.core.settings import Settings
Settings.llm = service_context.llm
Settings.embed_model = service_context.embed_model
Settings.prompt_helper = service_context.prompt_helper
Settings.transformations = service_context.transformations
Settings.node_parser = service_context.node_parser
Settings.callback_manager = service_context.callback_manager
| [
"llama_index.core.llms.utils.resolve_llm",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.indices.prompt_helper.PromptHelper.from_dict",
"llama_index.core.embeddings.utils.resolve_embed_model",
"llama_index.core.embeddings.loading.load_embed_model",
"llama_index.core.service_context_elements.llm_predictor.LLMPredictor",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.node_parser.loading.load_parser",
"llama_index.core.extractors.loading.load_extractor",
"llama_index.core.service_context_elements.llama_logger.LlamaLogger",
"llama_index.core.service_context_elements.llm_predictor.load_predictor"
] | [((1138, 1165), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1155, 1165), False, 'import logging\n'), ((1940, 1997), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', ([], {'llm_metadata': 'llm_metadata'}), '(llm_metadata=llm_metadata)\n', (1970, 1997), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((2817, 2947), 'deprecated.deprecated', 'deprecated', ([], {'version': '"""0.10.0"""', 'reason': '"""ServiceContext is deprecated, please use `llama_index.settings.Settings` instead."""'}), "(version='0.10.0', reason=\n 'ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.'\n )\n", (2827, 2947), False, 'from deprecated import deprecated\n'), ((5458, 5486), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (5462, 5486), False, 'from typing import Any, List, Optional, cast\n'), ((7915, 7947), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (7934, 7947), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((10364, 10392), 'typing.cast', 'cast', (['EmbedType', 'embed_model'], {}), '(EmbedType, embed_model)\n', (10368, 10392), False, 'from typing import Any, List, Optional, cast\n'), ((11608, 11640), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (11627, 11640), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((14844, 14894), 'llama_index.core.service_context_elements.llm_predictor.load_predictor', 'load_predictor', (['service_context_data.llm_predictor'], {}), '(service_context_data.llm_predictor)\n', (14858, 14894), False, 'from llama_index.core.service_context_elements.llm_predictor import load_predictor\n'), ((14918, 14968), 'llama_index.core.embeddings.loading.load_embed_model', 'load_embed_model', (['service_context_data.embed_model'], {}), '(service_context_data.embed_model)\n', (14934, 14968), False, 'from llama_index.core.embeddings.loading import load_embed_model\n'), ((14994, 15052), 'llama_index.core.indices.prompt_helper.PromptHelper.from_dict', 'PromptHelper.from_dict', (['service_context_data.prompt_helper'], {}), '(service_context_data.prompt_helper)\n', (15016, 15052), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((6659, 6678), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6674, 6678), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((6846, 6862), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (6857, 6862), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((7294, 7360), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm', 'pydantic_program_mode': 'pydantic_program_mode'}), '(llm=llm, pydantic_program_mode=pydantic_program_mode)\n', (7306, 7360), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((8823, 8836), 'llama_index.core.service_context_elements.llama_logger.LlamaLogger', 'LlamaLogger', ([], {}), '()\n', (8834, 8836), False, 'from llama_index.core.service_context_elements.llama_logger import LlamaLogger\n'), ((10903, 10919), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (10914, 10919), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((10948, 10969), 'llama_index.core.service_context_elements.llm_predictor.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (10960, 10969), False, 'from llama_index.core.service_context_elements.llm_predictor import LLMPredictor, BaseLLMPredictor\n'), ((1539, 1556), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (1554, 1556), False, 'from llama_index.core.callbacks.base import CallbackManager\n'), ((15228, 15250), 'llama_index.core.node_parser.loading.load_parser', 'load_parser', (['transform'], {}), '(transform)\n', (15239, 15250), False, 'from llama_index.core.node_parser.loading import load_parser\n'), ((15322, 15347), 'llama_index.core.extractors.loading.load_extractor', 'load_extractor', (['transform'], {}), '(transform)\n', (15336, 15347), False, 'from llama_index.core.extractors.loading import load_extractor\n')] |
import json
from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
MessageRole,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
DEFAULT_RUNGPT_MODEL = "rungpt"
DEFAULT_RUNGPT_TEMP = 0.75
class RunGptLLM(LLM):
"""The opengpt of Jina AI models."""
model: Optional[str] = Field(
default=DEFAULT_RUNGPT_MODEL, description="The rungpt model to use."
)
endpoint: str = Field(description="The endpoint of serving address.")
temperature: float = Field(
default=DEFAULT_RUNGPT_TEMP,
description="The temperature to use for sampling.",
gte=0.0,
lte=1.0,
)
max_tokens: int = Field(
default=DEFAULT_NUM_OUTPUTS,
description="Max tokens model generates.",
gt=0,
)
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model.",
gt=0,
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the Replicate API."
)
base_url: str = Field(
description="The address of your target model served by rungpt."
)
def __init__(
self,
model: Optional[str] = DEFAULT_RUNGPT_MODEL,
endpoint: str = "0.0.0.0:51002",
temperature: float = DEFAULT_RUNGPT_TEMP,
max_tokens: Optional[int] = DEFAULT_NUM_OUTPUTS,
context_window: int = DEFAULT_CONTEXT_WINDOW,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
):
if endpoint.startswith("http://"):
base_url = endpoint
else:
base_url = "http://" + endpoint
super().__init__(
model=model,
endpoint=endpoint,
temperature=temperature,
max_tokens=max_tokens,
context_window=context_window,
additional_kwargs=additional_kwargs or {},
callback_manager=callback_manager or CallbackManager([]),
base_url=base_url,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "RunGptLLM"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens,
model_name=self._model,
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
try:
import requests
except ImportError:
raise ImportError(
"Could not import requests library."
"Please install requests with `pip install requests`"
)
response_gpt = requests.post(
self.base_url + "/generate",
json=self._request_pack("complete", prompt, **kwargs),
stream=False,
).json()
return CompletionResponse(
text=response_gpt["choices"][0]["text"],
additional_kwargs=response_gpt["usage"],
raw=response_gpt,
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
try:
import requests
except ImportError:
raise ImportError(
"Could not import requests library."
"Please install requests with `pip install requests`"
)
response_gpt = requests.post(
self.base_url + "/generate_stream",
json=self._request_pack("complete", prompt, **kwargs),
stream=True,
)
try:
import sseclient
except ImportError:
raise ImportError(
"Could not import sseclient-py library."
"Please install requests with `pip install sseclient-py`"
)
client = sseclient.SSEClient(response_gpt)
response_iter = client.events()
def gen() -> CompletionResponseGen:
text = ""
for item in response_iter:
item_dict = json.loads(json.dumps(eval(item.data)))
delta = item_dict["choices"][0]["text"]
additional_kwargs = item_dict["usage"]
text = text + self._space_handler(delta)
yield CompletionResponse(
text=text,
delta=delta,
raw=item_dict,
additional_kwargs=additional_kwargs,
)
return gen()
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
message_list = self._message_wrapper(messages)
try:
import requests
except ImportError:
raise ImportError(
"Could not import requests library."
"Please install requests with `pip install requests`"
)
response_gpt = requests.post(
self.base_url + "/chat",
json=self._request_pack("chat", message_list, **kwargs),
stream=False,
).json()
chat_message, _ = self._message_unpacker(response_gpt)
return ChatResponse(message=chat_message, raw=response_gpt)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
message_list = self._message_wrapper(messages)
try:
import requests
except ImportError:
raise ImportError(
"Could not import requests library."
"Please install requests with `pip install requests`"
)
response_gpt = requests.post(
self.base_url + "/chat_stream",
json=self._request_pack("chat", message_list, **kwargs),
stream=True,
)
try:
import sseclient
except ImportError:
raise ImportError(
"Could not import sseclient-py library."
"Please install requests with `pip install sseclient-py`"
)
client = sseclient.SSEClient(response_gpt)
chat_iter = client.events()
def gen() -> ChatResponseGen:
content = ""
for item in chat_iter:
item_dict = json.loads(json.dumps(eval(item.data)))
chat_message, delta = self._message_unpacker(item_dict)
content = content + self._space_handler(delta)
chat_message.content = content
yield ChatResponse(message=chat_message, raw=item_dict, delta=delta)
return gen()
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
return self.chat(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
async def gen() -> ChatResponseAsyncGen:
for message in self.stream_chat(messages, **kwargs):
yield message
# NOTE: convert generator to async generator
return gen()
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
async def gen() -> CompletionResponseAsyncGen:
for message in self.stream_complete(prompt, **kwargs):
yield message
return gen()
def _message_wrapper(self, messages: Sequence[ChatMessage]) -> List[Dict[str, Any]]:
message_list = []
for message in messages:
role = message.role.value
content = message.content
message_list.append({"role": role, "content": content})
return message_list
def _message_unpacker(
self, response_gpt: Dict[str, Any]
) -> Tuple[ChatMessage, str]:
message = response_gpt["choices"][0]["message"]
additional_kwargs = response_gpt["usage"]
role = message["role"]
content = message["content"]
key = MessageRole.SYSTEM
for r in MessageRole:
if r.value == role:
key = r
chat_message = ChatMessage(
role=key, content=content, additional_kwargs=additional_kwargs
)
return chat_message, content
def _request_pack(
self, mode: str, prompt: Union[str, List[Dict[str, Any]]], **kwargs: Any
) -> Optional[Dict[str, Any]]:
if mode == "complete":
return {
"prompt": prompt,
"max_tokens": kwargs.pop("max_tokens", self.max_tokens),
"temperature": kwargs.pop("temperature", self.temperature),
"top_k": kwargs.pop("top_k", 50),
"top_p": kwargs.pop("top_p", 0.95),
"repetition_penalty": kwargs.pop("repetition_penalty", 1.2),
"do_sample": kwargs.pop("do_sample", False),
"echo": kwargs.pop("echo", True),
"n": kwargs.pop("n", 1),
"stop": kwargs.pop("stop", "."),
}
elif mode == "chat":
return {
"messages": prompt,
"max_tokens": kwargs.pop("max_tokens", self.max_tokens),
"temperature": kwargs.pop("temperature", self.temperature),
"top_k": kwargs.pop("top_k", 50),
"top_p": kwargs.pop("top_p", 0.95),
"repetition_penalty": kwargs.pop("repetition_penalty", 1.2),
"do_sample": kwargs.pop("do_sample", False),
"echo": kwargs.pop("echo", True),
"n": kwargs.pop("n", 1),
"stop": kwargs.pop("stop", "."),
}
return None
def _space_handler(self, word: str) -> str:
if word.isalnum():
return " " + word
return word
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatResponse",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((893, 968), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_RUNGPT_MODEL', 'description': '"""The rungpt model to use."""'}), "(default=DEFAULT_RUNGPT_MODEL, description='The rungpt model to use.')\n", (898, 968), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1003, 1056), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The endpoint of serving address."""'}), "(description='The endpoint of serving address.')\n", (1008, 1056), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1082, 1191), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_RUNGPT_TEMP', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_RUNGPT_TEMP, description=\n 'The temperature to use for sampling.', gte=0.0, lte=1.0)\n", (1087, 1191), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1248, 1336), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""Max tokens model generates."""', 'gt': '(0)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'Max tokens model generates.', gt=0)\n", (1253, 1336), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1389, 1504), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model."""', 'gt': '(0)'}), "(default=DEFAULT_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model.', gt=0)\n", (1394, 1504), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1571, 1659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the Replicate API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the Replicate API.')\n", (1576, 1659), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1689, 1760), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The address of your target model served by rungpt."""'}), "(description='The address of your target model served by rungpt.')\n", (1694, 1760), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((3597, 3622), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3620, 3622), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4350, 4375), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4373, 4375), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5847, 5866), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5864, 5866), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6567, 6586), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6584, 6586), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7968, 7987), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (7985, 7987), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8161, 8180), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8178, 8180), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8543, 8568), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8566, 8568), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8742, 8767), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8765, 8767), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3444, 3547), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.max_tokens', 'model_name': 'self._model'}), '(context_window=self.context_window, num_output=self.max_tokens,\n model_name=self._model)\n', (3455, 3547), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((4178, 4300), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': "response_gpt['choices'][0]['text']", 'additional_kwargs': "response_gpt['usage']", 'raw': 'response_gpt'}), "(text=response_gpt['choices'][0]['text'],\n additional_kwargs=response_gpt['usage'], raw=response_gpt)\n", (4196, 4300), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((5187, 5220), 'sseclient.SSEClient', 'sseclient.SSEClient', (['response_gpt'], {}), '(response_gpt)\n', (5206, 5220), False, 'import sseclient\n'), ((6508, 6560), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'chat_message', 'raw': 'response_gpt'}), '(message=chat_message, raw=response_gpt)\n', (6520, 6560), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7436, 7469), 'sseclient.SSEClient', 'sseclient.SSEClient', (['response_gpt'], {}), '(response_gpt)\n', (7455, 7469), False, 'import sseclient\n'), ((9819, 9894), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'key', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=key, content=content, additional_kwargs=additional_kwargs)\n', (9830, 9894), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((2963, 2982), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2978, 2982), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((5625, 5724), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'delta': 'delta', 'raw': 'item_dict', 'additional_kwargs': 'additional_kwargs'}), '(text=text, delta=delta, raw=item_dict, additional_kwargs\n =additional_kwargs)\n', (5643, 5724), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7877, 7939), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'chat_message', 'raw': 'item_dict', 'delta': 'delta'}), '(message=chat_message, raw=item_dict, delta=delta)\n', (7889, 7939), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')] |
from typing import List, Optional
import fsspec
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.schema import BaseNode
from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc
from llama_index.core.storage.kvstore import (
SimpleKVStore as SimpleCache,
)
from llama_index.core.storage.kvstore.types import (
BaseKVStore as BaseCache,
)
DEFAULT_CACHE_NAME = "llama_cache"
class IngestionCache(BaseModel):
class Config:
arbitrary_types_allowed = True
nodes_key = "nodes"
collection: str = Field(
default=DEFAULT_CACHE_NAME, description="Collection name of the cache."
)
cache: BaseCache = Field(default_factory=SimpleCache, description="Cache to use.")
# TODO: add async get/put methods?
def put(
self, key: str, nodes: List[BaseNode], collection: Optional[str] = None
) -> None:
"""Put a value into the cache."""
collection = collection or self.collection
val = {self.nodes_key: [doc_to_json(node) for node in nodes]}
self.cache.put(key, val, collection=collection)
def get(
self, key: str, collection: Optional[str] = None
) -> Optional[List[BaseNode]]:
"""Get a value from the cache."""
collection = collection or self.collection
node_dicts = self.cache.get(key, collection=collection)
if node_dicts is None:
return None
return [json_to_doc(node_dict) for node_dict in node_dicts[self.nodes_key]]
def clear(self, collection: Optional[str] = None) -> None:
"""Clear the cache."""
collection = collection or self.collection
data = self.cache.get_all(collection=collection)
for key in data:
self.cache.delete(key, collection=collection)
def persist(
self, persist_path: str, fs: Optional[fsspec.AbstractFileSystem] = None
) -> None:
"""Persist the cache to a directory, if possible."""
if isinstance(self.cache, SimpleCache):
self.cache.persist(persist_path, fs=fs)
else:
print("Warning: skipping persist, only needed for SimpleCache.")
@classmethod
def from_persist_path(
cls,
persist_path: str,
collection: str = DEFAULT_CACHE_NAME,
fs: Optional[fsspec.AbstractFileSystem] = None,
) -> "IngestionCache":
"""Create a IngestionCache from a persist directory."""
return cls(
collection=collection,
cache=SimpleCache.from_persist_path(persist_path, fs=fs),
)
__all__ = ["SimpleCache", "BaseCache"]
| [
"llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.storage.docstore.utils.json_to_doc",
"llama_index.core.storage.docstore.utils.doc_to_json"
] | [((577, 655), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CACHE_NAME', 'description': '"""Collection name of the cache."""'}), "(default=DEFAULT_CACHE_NAME, description='Collection name of the cache.')\n", (582, 655), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((693, 756), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'SimpleCache', 'description': '"""Cache to use."""'}), "(default_factory=SimpleCache, description='Cache to use.')\n", (698, 756), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1461, 1483), 'llama_index.core.storage.docstore.utils.json_to_doc', 'json_to_doc', (['node_dict'], {}), '(node_dict)\n', (1472, 1483), False, 'from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc\n'), ((1031, 1048), 'llama_index.core.storage.docstore.utils.doc_to_json', 'doc_to_json', (['node'], {}), '(node)\n', (1042, 1048), False, 'from llama_index.core.storage.docstore.utils import doc_to_json, json_to_doc\n'), ((2531, 2581), 'llama_index.core.storage.kvstore.SimpleKVStore.from_persist_path', 'SimpleCache.from_persist_path', (['persist_path'], {'fs': 'fs'}), '(persist_path, fs=fs)\n', (2560, 2581), True, 'from llama_index.core.storage.kvstore import SimpleKVStore as SimpleCache\n')] |
"""Base object types."""
import pickle
import warnings
from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.callbacks.base import CallbackManager
from llama_index.legacy.core.base_retriever import BaseRetriever
from llama_index.legacy.core.query_pipeline.query_component import (
ChainableMixin,
InputKeys,
OutputKeys,
QueryComponent,
validate_and_convert_stringable,
)
from llama_index.legacy.indices.base import BaseIndex
from llama_index.legacy.indices.vector_store.base import VectorStoreIndex
from llama_index.legacy.objects.base_node_mapping import (
DEFAULT_PERSIST_FNAME,
BaseObjectNodeMapping,
SimpleObjectNodeMapping,
)
from llama_index.legacy.schema import QueryType
from llama_index.legacy.storage.storage_context import (
DEFAULT_PERSIST_DIR,
StorageContext,
)
OT = TypeVar("OT")
class ObjectRetriever(ChainableMixin, Generic[OT]):
"""Object retriever."""
def __init__(
self, retriever: BaseRetriever, object_node_mapping: BaseObjectNodeMapping[OT]
):
self._retriever = retriever
self._object_node_mapping = object_node_mapping
@property
def retriever(self) -> BaseRetriever:
"""Retriever."""
return self._retriever
def retrieve(self, str_or_query_bundle: QueryType) -> List[OT]:
nodes = self._retriever.retrieve(str_or_query_bundle)
return [self._object_node_mapping.from_node(node.node) for node in nodes]
async def aretrieve(self, str_or_query_bundle: QueryType) -> List[OT]:
nodes = await self._retriever.aretrieve(str_or_query_bundle)
return [self._object_node_mapping.from_node(node.node) for node in nodes]
def _as_query_component(self, **kwargs: Any) -> QueryComponent:
"""As query component."""
return ObjectRetrieverComponent(retriever=self)
class ObjectRetrieverComponent(QueryComponent):
"""Object retriever component."""
retriever: ObjectRetriever = Field(..., description="Retriever.")
class Config:
arbitrary_types_allowed = True
def set_callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set callback manager."""
self.retriever.retriever.callback_manager = callback_manager
def _validate_component_inputs(self, input: Dict[str, Any]) -> Dict[str, Any]:
"""Validate component inputs during run_component."""
# make sure input is a string
input["input"] = validate_and_convert_stringable(input["input"])
return input
def _run_component(self, **kwargs: Any) -> Any:
"""Run component."""
output = self.retriever.retrieve(kwargs["input"])
return {"output": output}
async def _arun_component(self, **kwargs: Any) -> Any:
"""Run component (async)."""
output = await self.retriever.aretrieve(kwargs["input"])
return {"output": output}
@property
def input_keys(self) -> InputKeys:
"""Input keys."""
return InputKeys.from_keys({"input"})
@property
def output_keys(self) -> OutputKeys:
"""Output keys."""
return OutputKeys.from_keys({"output"})
class ObjectIndex(Generic[OT]):
"""Object index."""
def __init__(
self, index: BaseIndex, object_node_mapping: BaseObjectNodeMapping
) -> None:
self._index = index
self._object_node_mapping = object_node_mapping
@classmethod
def from_objects(
cls,
objects: Sequence[OT],
object_mapping: Optional[BaseObjectNodeMapping] = None,
index_cls: Type[BaseIndex] = VectorStoreIndex,
**index_kwargs: Any,
) -> "ObjectIndex":
if object_mapping is None:
object_mapping = SimpleObjectNodeMapping.from_objects(objects)
nodes = object_mapping.to_nodes(objects)
index = index_cls(nodes, **index_kwargs)
return cls(index, object_mapping)
def insert_object(self, obj: Any) -> None:
self._object_node_mapping.add_object(obj)
node = self._object_node_mapping.to_node(obj)
self._index.insert_nodes([node])
def as_retriever(self, **kwargs: Any) -> ObjectRetriever:
return ObjectRetriever(
retriever=self._index.as_retriever(**kwargs),
object_node_mapping=self._object_node_mapping,
)
def as_node_retriever(self, **kwargs: Any) -> BaseRetriever:
return self._index.as_retriever(**kwargs)
def persist(
self,
persist_dir: str = DEFAULT_PERSIST_DIR,
obj_node_mapping_fname: str = DEFAULT_PERSIST_FNAME,
) -> None:
# try to persist object node mapping
try:
self._object_node_mapping.persist(
persist_dir=persist_dir, obj_node_mapping_fname=obj_node_mapping_fname
)
except (NotImplementedError, pickle.PickleError) as err:
warnings.warn(
(
"Unable to persist ObjectNodeMapping. You will need to "
"reconstruct the same object node mapping to build this ObjectIndex"
),
stacklevel=2,
)
self._index._storage_context.persist(persist_dir=persist_dir)
@classmethod
def from_persist_dir(
cls,
persist_dir: str = DEFAULT_PERSIST_DIR,
object_node_mapping: Optional[BaseObjectNodeMapping] = None,
) -> "ObjectIndex":
from llama_index.legacy.indices import load_index_from_storage
storage_context = StorageContext.from_defaults(persist_dir=persist_dir)
index = load_index_from_storage(storage_context)
if object_node_mapping:
return cls(index=index, object_node_mapping=object_node_mapping)
else:
# try to load object_node_mapping
# assume SimpleObjectNodeMapping for simplicity as its only subclass
# that supports this method
try:
object_node_mapping = SimpleObjectNodeMapping.from_persist_dir(
persist_dir=persist_dir
)
except Exception as err:
raise Exception(
"Unable to load from persist dir. The object_node_mapping cannot be loaded."
) from err
else:
return cls(index=index, object_node_mapping=object_node_mapping)
| [
"llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys",
"llama_index.legacy.indices.load_index_from_storage",
"llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys",
"llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects",
"llama_index.legacy.storage.storage_context.StorageContext.from_defaults",
"llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable"
] | [((925, 938), 'typing.TypeVar', 'TypeVar', (['"""OT"""'], {}), "('OT')\n", (932, 938), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar\n'), ((2060, 2096), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Retriever."""'}), "(..., description='Retriever.')\n", (2065, 2096), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2549, 2596), 'llama_index.legacy.core.query_pipeline.query_component.validate_and_convert_stringable', 'validate_and_convert_stringable', (["input['input']"], {}), "(input['input'])\n", (2580, 2596), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3083, 3113), 'llama_index.legacy.core.query_pipeline.query_component.InputKeys.from_keys', 'InputKeys.from_keys', (["{'input'}"], {}), "({'input'})\n", (3102, 3113), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((3212, 3244), 'llama_index.legacy.core.query_pipeline.query_component.OutputKeys.from_keys', 'OutputKeys.from_keys', (["{'output'}"], {}), "({'output'})\n", (3232, 3244), False, 'from llama_index.legacy.core.query_pipeline.query_component import ChainableMixin, InputKeys, OutputKeys, QueryComponent, validate_and_convert_stringable\n'), ((5600, 5653), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (5628, 5653), False, 'from llama_index.legacy.storage.storage_context import DEFAULT_PERSIST_DIR, StorageContext\n'), ((5670, 5710), 'llama_index.legacy.indices.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {}), '(storage_context)\n', (5693, 5710), False, 'from llama_index.legacy.indices import load_index_from_storage\n'), ((3816, 3861), 'llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_objects', 'SimpleObjectNodeMapping.from_objects', (['objects'], {}), '(objects)\n', (3852, 3861), False, 'from llama_index.legacy.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n'), ((4972, 5133), 'warnings.warn', 'warnings.warn', (['"""Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex"""'], {'stacklevel': '(2)'}), "(\n 'Unable to persist ObjectNodeMapping. You will need to reconstruct the same object node mapping to build this ObjectIndex'\n , stacklevel=2)\n", (4985, 5133), False, 'import warnings\n'), ((6056, 6121), 'llama_index.legacy.objects.base_node_mapping.SimpleObjectNodeMapping.from_persist_dir', 'SimpleObjectNodeMapping.from_persist_dir', ([], {'persist_dir': 'persist_dir'}), '(persist_dir=persist_dir)\n', (6096, 6121), False, 'from llama_index.legacy.objects.base_node_mapping import DEFAULT_PERSIST_FNAME, BaseObjectNodeMapping, SimpleObjectNodeMapping\n')] |
"""Base reader class."""
from abc import ABC
from typing import TYPE_CHECKING, Any, Dict, Iterable, List
if TYPE_CHECKING:
from llama_index.legacy.bridge.langchain import Document as LCDocument
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.schema import BaseComponent, Document
class BaseReader(ABC):
"""Utilities for loading data from a directory."""
def lazy_load_data(self, *args: Any, **load_kwargs: Any) -> Iterable[Document]:
"""Load data from the input directory lazily."""
raise NotImplementedError(
f"{self.__class__.__name__} does not provide lazy_load_data method currently"
)
def load_data(self, *args: Any, **load_kwargs: Any) -> List[Document]:
"""Load data from the input directory."""
return list(self.lazy_load_data(*args, **load_kwargs))
def load_langchain_documents(self, **load_kwargs: Any) -> List["LCDocument"]:
"""Load data in LangChain document format."""
docs = self.load_data(**load_kwargs)
return [d.to_langchain_format() for d in docs]
class BasePydanticReader(BaseReader, BaseComponent):
"""Serialiable Data Loader with Pydatnic."""
is_remote: bool = Field(
default=False,
description="Whether the data is loaded from a remote API or a local file.",
)
class Config:
arbitrary_types_allowed = True
class ReaderConfig(BaseComponent):
"""Represents a reader and it's input arguments."""
reader: BasePydanticReader = Field(..., description="Reader to use.")
reader_args: List[Any] = Field(default_factory=list, description="Reader args.")
reader_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Reader kwargs."
)
class Config:
arbitrary_types_allowed = True
@classmethod
def class_name(cls) -> str:
"""Get the name identifier of the class."""
return "ReaderConfig"
def to_dict(self, **kwargs: Any) -> Dict[str, Any]:
"""Convert the class to a dictionary."""
return {
"loader": self.reader.to_dict(**kwargs),
"reader_args": self.reader_args,
"reader_kwargs": self.reader_kwargs,
"class_name": self.class_name(),
}
def read(self) -> List[Document]:
"""Call the loader with the given arguments."""
return self.reader.load_data(*self.reader_args, **self.reader_kwargs)
| [
"llama_index.legacy.bridge.pydantic.Field"
] | [((1225, 1327), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the data is loaded from a remote API or a local file."""'}), "(default=False, description=\n 'Whether the data is loaded from a remote API or a local file.')\n", (1230, 1327), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1531, 1571), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""Reader to use."""'}), "(..., description='Reader to use.')\n", (1536, 1571), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1601, 1656), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': '"""Reader args."""'}), "(default_factory=list, description='Reader args.')\n", (1606, 1656), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1693, 1750), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Reader kwargs."""'}), "(default_factory=dict, description='Reader kwargs.')\n", (1698, 1750), False, 'from llama_index.legacy.bridge.pydantic import Field\n')] |
"""
Portkey integration with Llama_index for enhanced monitoring.
"""
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.llms.generic_utils import (
chat_to_completion_decorator,
completion_to_chat_decorator,
stream_chat_to_completion_decorator,
stream_completion_to_chat_decorator,
)
from llama_index.legacy.llms.portkey_utils import (
IMPORT_ERROR_MESSAGE,
generate_llm_metadata,
get_llm,
is_chat_model,
)
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
if TYPE_CHECKING:
from portkey import (
LLMOptions,
Modes,
ModesLiteral,
PortkeyResponse,
)
DEFAULT_PORTKEY_MODEL = "gpt-3.5-turbo"
class Portkey(CustomLLM):
"""_summary_.
Args:
LLM (_type_): _description_
"""
mode: Optional[Union["Modes", "ModesLiteral"]] = Field(
description="The mode for using the Portkey integration"
)
model: Optional[str] = Field(default=DEFAULT_PORTKEY_MODEL)
llm: "LLMOptions" = Field(description="LLM parameter", default_factory=dict)
llms: List["LLMOptions"] = Field(description="LLM parameters", default_factory=list)
_client: Any = PrivateAttr()
def __init__(
self,
*,
mode: Union["Modes", "ModesLiteral"],
api_key: Optional[str] = None,
base_url: Optional[str] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
"""
Initialize a Portkey instance.
Args:
mode (Optional[Modes]): The mode for using the Portkey integration
(default: Modes.SINGLE).
api_key (Optional[str]): The API key to authenticate with Portkey.
base_url (Optional[str]): The Base url to the self hosted rubeus \
(the opensource version of portkey) or any other self hosted server.
"""
try:
import portkey
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
super().__init__(
base_url=base_url,
api_key=api_key,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
if api_key is not None:
portkey.api_key = api_key
if base_url is not None:
portkey.base_url = base_url
portkey.mode = mode
self._client = portkey
self.model = None
self.mode = mode
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return generate_llm_metadata(self.llms[0])
def add_llms(
self, llm_params: Union["LLMOptions", List["LLMOptions"]]
) -> "Portkey":
"""
Adds the specified LLM parameters to the list of LLMs. This may be used for
fallbacks or load-balancing as specified in the mode.
Args:
llm_params (Union[LLMOptions, List[LLMOptions]]): A single LLM parameter \
set or a list of LLM parameter sets. Each set should be an instance of \
LLMOptions with
the specified attributes.
> provider: Optional[ProviderTypes]
> model: str
> temperature: float
> max_tokens: Optional[int]
> max_retries: int
> trace_id: Optional[str]
> cache_status: Optional[CacheType]
> cache: Optional[bool]
> metadata: Dict[str, Any]
> weight: Optional[float]
> **kwargs : Other additional parameters that are supported by \
LLMOptions in portkey-ai
NOTE: User may choose to pass additional params as well.
Returns:
self
"""
try:
from portkey import LLMOptions
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
if isinstance(llm_params, LLMOptions):
llm_params = [llm_params]
self.llms.extend(llm_params)
if self.model is None:
self.model = self.llms[0].model
return self
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
"""Completion endpoint for LLM."""
if self._is_chat_model:
complete_fn = chat_to_completion_decorator(self._chat)
else:
complete_fn = self._complete
return complete_fn(prompt, **kwargs)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if self._is_chat_model:
chat_fn = self._chat
else:
chat_fn = completion_to_chat_decorator(self._complete)
return chat_fn(messages, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
"""Completion endpoint for LLM."""
if self._is_chat_model:
complete_fn = stream_chat_to_completion_decorator(self._stream_chat)
else:
complete_fn = self._stream_complete
return complete_fn(prompt, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if self._is_chat_model:
stream_chat_fn = self._stream_chat
else:
stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete)
return stream_chat_fn(messages, **kwargs)
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
try:
from portkey import Config, Message
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
_messages = cast(
List[Message],
[{"role": i.role.value, "content": i.content} for i in messages],
)
config = Config(llms=self.llms)
response = self._client.ChatCompletions.create(
messages=_messages, config=config
)
self.llm = self._get_llm(response)
message = response.choices[0].message
return ChatResponse(message=message, raw=response)
def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
try:
from portkey import Config
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
config = Config(llms=self.llms)
response = self._client.Completions.create(prompt=prompt, config=config)
text = response.choices[0].text
return CompletionResponse(text=text, raw=response)
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
try:
from portkey import Config, Message
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
_messages = cast(
List[Message],
[{"role": i.role.value, "content": i.content} for i in messages],
)
config = Config(llms=self.llms)
response = self._client.ChatCompletions.create(
messages=_messages, config=config, stream=True, **kwargs
)
def gen() -> ChatResponseGen:
content = ""
function_call: Optional[dict] = {}
for resp in response:
if resp.choices is None:
continue
delta = resp.choices[0].delta
role = delta.get("role", "assistant")
content_delta = delta.get("content", "") or ""
content += content_delta
function_call_delta = delta.get("function_call", None)
if function_call_delta is not None:
if function_call is None:
function_call = function_call_delta
# ensure we do not add a blank function call
if (
function_call
and function_call.get("function_name", "") is None
):
del function_call["function_name"]
else:
function_call["arguments"] += function_call_delta["arguments"]
additional_kwargs = {}
if function_call is not None:
additional_kwargs["function_call"] = function_call
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=resp,
)
return gen()
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
try:
from portkey import Config
except ImportError as exc:
raise ImportError(IMPORT_ERROR_MESSAGE) from exc
config = Config(llms=self.llms)
response = self._client.Completions.create(
prompt=prompt, config=config, stream=True, **kwargs
)
def gen() -> CompletionResponseGen:
text = ""
for resp in response:
delta = resp.choices[0].text or ""
text += delta
yield CompletionResponse(
delta=delta,
text=text,
raw=resp,
)
return gen()
@property
def _is_chat_model(self) -> bool:
"""Check if a given model is a chat-based language model.
Returns:
bool: True if the provided model is a chat-based language model,
False otherwise.
"""
return is_chat_model(self.model or "")
def _get_llm(self, response: "PortkeyResponse") -> "LLMOptions":
return get_llm(response, self.llms)
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatResponse",
"llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator",
"llama_index.legacy.llms.portkey_utils.is_chat_model",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.llms.generic_utils.completion_to_chat_decorator",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse",
"llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator",
"llama_index.legacy.llms.generic_utils.chat_to_completion_decorator",
"llama_index.legacy.llms.portkey_utils.generate_llm_metadata",
"llama_index.legacy.llms.portkey_utils.get_llm"
] | [((1284, 1347), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The mode for using the Portkey integration"""'}), "(description='The mode for using the Portkey integration')\n", (1289, 1347), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1390, 1426), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_PORTKEY_MODEL'}), '(default=DEFAULT_PORTKEY_MODEL)\n', (1395, 1426), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1451, 1507), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""LLM parameter"""', 'default_factory': 'dict'}), "(description='LLM parameter', default_factory=dict)\n", (1456, 1507), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1540, 1597), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""LLM parameters"""', 'default_factory': 'list'}), "(description='LLM parameters', default_factory=list)\n", (1545, 1597), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1618, 1631), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1629, 1631), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5009, 5034), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5032, 5034), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5396, 5415), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5413, 5415), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5695, 5720), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5718, 5720), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6113, 6132), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6130, 6132), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3423, 3458), 'llama_index.legacy.llms.portkey_utils.generate_llm_metadata', 'generate_llm_metadata', (['self.llms[0]'], {}), '(self.llms[0])\n', (3444, 3458), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((6735, 6824), 'typing.cast', 'cast', (['List[Message]', "[{'role': i.role.value, 'content': i.content} for i in messages]"], {}), "(List[Message], [{'role': i.role.value, 'content': i.content} for i in\n messages])\n", (6739, 6824), False, 'from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast\n'), ((6873, 6895), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (6879, 6895), False, 'from portkey import Config\n'), ((7113, 7156), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'message', 'raw': 'response'}), '(message=message, raw=response)\n', (7125, 7156), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((7399, 7421), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (7405, 7421), False, 'from portkey import Config\n'), ((7558, 7601), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'text', 'raw': 'response'}), '(text=text, raw=response)\n', (7576, 7601), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((7889, 7978), 'typing.cast', 'cast', (['List[Message]', "[{'role': i.role.value, 'content': i.content} for i in messages]"], {}), "(List[Message], [{'role': i.role.value, 'content': i.content} for i in\n messages])\n", (7893, 7978), False, 'from typing import TYPE_CHECKING, Any, Callable, List, Optional, Sequence, Union, cast\n'), ((8027, 8049), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (8033, 8049), False, 'from portkey import Config\n'), ((10014, 10036), 'portkey.Config', 'Config', ([], {'llms': 'self.llms'}), '(llms=self.llms)\n', (10020, 10036), False, 'from portkey import Config\n'), ((10791, 10822), 'llama_index.legacy.llms.portkey_utils.is_chat_model', 'is_chat_model', (["(self.model or '')"], {}), "(self.model or '')\n", (10804, 10822), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((10908, 10936), 'llama_index.legacy.llms.portkey_utils.get_llm', 'get_llm', (['response', 'self.llms'], {}), '(response, self.llms)\n', (10915, 10936), False, 'from llama_index.legacy.llms.portkey_utils import IMPORT_ERROR_MESSAGE, generate_llm_metadata, get_llm, is_chat_model\n'), ((5249, 5289), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self._chat'], {}), '(self._chat)\n', (5277, 5289), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5601, 5645), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self._complete'], {}), '(self._complete)\n', (5629, 5645), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5945, 5999), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self._stream_chat'], {}), '(self._stream_chat)\n', (5980, 5999), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((6363, 6421), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self._stream_complete'], {}), '(self._stream_complete)\n', (6398, 6421), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((10367, 10419), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'delta': 'delta', 'text': 'text', 'raw': 'resp'}), '(delta=delta, text=text, raw=resp)\n', (10385, 10419), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((9478, 9554), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (9489, 9554), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n')] |
"""Query plan tool."""
from typing import Any, Dict, List, Optional
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.response_synthesizers import (
BaseSynthesizer,
get_response_synthesizer,
)
from llama_index.core.schema import NodeWithScore, TextNode
from llama_index.core.tools.types import BaseTool, ToolMetadata, ToolOutput
from llama_index.core.utils import print_text
DEFAULT_NAME = "query_plan_tool"
QUERYNODE_QUERY_STR_DESC = """\
Question we are asking. This is the query string that will be executed. \
"""
QUERYNODE_TOOL_NAME_DESC = """\
Name of the tool to execute the `query_str`. \
Should NOT be specified if there are subquestions to be specified, in which \
case child_nodes should be nonempty instead.\
"""
QUERYNODE_DEPENDENCIES_DESC = """\
List of sub-questions that need to be answered in order \
to answer the question given by `query_str`.\
Should be blank if there are no sub-questions to be specified, in which case \
`tool_name` is specified.\
"""
class QueryNode(BaseModel):
"""Query node.
A query node represents a query (query_str) that must be answered.
It can either be answered by a tool (tool_name), or by a list of child nodes
(child_nodes).
The tool_name and child_nodes fields are mutually exclusive.
"""
# NOTE: inspired from https://github.com/jxnl/openai_function_call/pull/3/files
id: int = Field(..., description="ID of the query node.")
query_str: str = Field(..., description=QUERYNODE_QUERY_STR_DESC)
tool_name: Optional[str] = Field(
default=None, description="Name of the tool to execute the `query_str`."
)
dependencies: List[int] = Field(
default_factory=list, description=QUERYNODE_DEPENDENCIES_DESC
)
class QueryPlan(BaseModel):
"""Query plan.
Contains a list of QueryNode objects (which is a recursive object).
Out of the list of QueryNode objects, one of them must be the root node.
The root node is the one that isn't a dependency of any other node.
"""
nodes: List[QueryNode] = Field(
...,
description="The original question we are asking.",
)
DEFAULT_DESCRIPTION_PREFIX = """\
This is a query plan tool that takes in a list of tools and executes a \
query plan over these tools to answer a query. The query plan is a DAG of query nodes.
Given a list of tool names and the query plan schema, you \
can choose to generate a query plan to answer a question.
The tool names and descriptions are as follows:
"""
class QueryPlanTool(BaseTool):
"""Query plan tool.
A tool that takes in a list of tools and executes a query plan.
"""
def __init__(
self,
query_engine_tools: List[BaseTool],
response_synthesizer: BaseSynthesizer,
name: str,
description_prefix: str,
) -> None:
"""Initialize."""
self._query_tools_dict = {t.metadata.name: t for t in query_engine_tools}
self._response_synthesizer = response_synthesizer
self._name = name
self._description_prefix = description_prefix
@classmethod
def from_defaults(
cls,
query_engine_tools: List[BaseTool],
response_synthesizer: Optional[BaseSynthesizer] = None,
name: Optional[str] = None,
description_prefix: Optional[str] = None,
) -> "QueryPlanTool":
"""Initialize from defaults."""
name = name or DEFAULT_NAME
description_prefix = description_prefix or DEFAULT_DESCRIPTION_PREFIX
response_synthesizer = response_synthesizer or get_response_synthesizer()
return cls(
query_engine_tools=query_engine_tools,
response_synthesizer=response_synthesizer,
name=name,
description_prefix=description_prefix,
)
@property
def metadata(self) -> ToolMetadata:
"""Metadata."""
tools_description = "\n\n".join(
[
f"Tool Name: {tool.metadata.name}\n"
+ f"Tool Description: {tool.metadata.description} "
for tool in self._query_tools_dict.values()
]
)
# TODO: fill in description with query engine tools.
description = f"""\
{self._description_prefix}\n\n
{tools_description}
"""
return ToolMetadata(description, self._name, fn_schema=QueryPlan)
def _execute_node(
self, node: QueryNode, nodes_dict: Dict[int, QueryNode]
) -> ToolOutput:
"""Execute node."""
print_text(f"Executing node {node.json()}\n", color="blue")
if len(node.dependencies) > 0:
print_text(
f"Executing {len(node.dependencies)} child nodes\n", color="pink"
)
child_query_nodes: List[QueryNode] = [
nodes_dict[dep] for dep in node.dependencies
]
# execute the child nodes first
child_responses: List[ToolOutput] = [
self._execute_node(child, nodes_dict) for child in child_query_nodes
]
# form the child Node/NodeWithScore objects
child_nodes = []
for child_query_node, child_response in zip(
child_query_nodes, child_responses
):
node_text = (
f"Query: {child_query_node.query_str}\n"
f"Response: {child_response!s}\n"
)
child_node = TextNode(text=node_text)
child_nodes.append(child_node)
# use response synthesizer to combine results
child_nodes_with_scores = [
NodeWithScore(node=n, score=1.0) for n in child_nodes
]
response_obj = self._response_synthesizer.synthesize(
query=node.query_str,
nodes=child_nodes_with_scores,
)
response = ToolOutput(
content=str(response_obj),
tool_name=node.query_str,
raw_input={"query": node.query_str},
raw_output=response_obj,
)
else:
# this is a leaf request, execute the query string using the specified tool
tool = self._query_tools_dict[node.tool_name]
print_text(f"Selected Tool: {tool.metadata}\n", color="pink")
response = tool(node.query_str)
print_text(
"Executed query, got response.\n"
f"Query: {node.query_str}\n"
f"Response: {response!s}\n",
color="blue",
)
return response
def _find_root_nodes(self, nodes_dict: Dict[int, QueryNode]) -> List[QueryNode]:
"""Find root node."""
# the root node is the one that isn't a dependency of any other node
node_counts = {node_id: 0 for node_id in nodes_dict}
for node in nodes_dict.values():
for dep in node.dependencies:
node_counts[dep] += 1
root_node_ids = [
node_id for node_id, count in node_counts.items() if count == 0
]
return [nodes_dict[node_id] for node_id in root_node_ids]
def __call__(self, *args: Any, **kwargs: Any) -> ToolOutput:
"""Call."""
# the kwargs represented as a JSON object
# should be a QueryPlan object
query_plan = QueryPlan(**kwargs)
nodes_dict = {node.id: node for node in query_plan.nodes}
root_nodes = self._find_root_nodes(nodes_dict)
if len(root_nodes) > 1:
raise ValueError("Query plan should have exactly one root node.")
return self._execute_node(root_nodes[0], nodes_dict)
| [
"llama_index.core.schema.TextNode",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.tools.types.ToolMetadata",
"llama_index.core.schema.NodeWithScore",
"llama_index.core.response_synthesizers.get_response_synthesizer",
"llama_index.core.utils.print_text"
] | [((1418, 1465), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""ID of the query node."""'}), "(..., description='ID of the query node.')\n", (1423, 1465), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1487, 1535), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': 'QUERYNODE_QUERY_STR_DESC'}), '(..., description=QUERYNODE_QUERY_STR_DESC)\n', (1492, 1535), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1567, 1646), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""Name of the tool to execute the `query_str`."""'}), "(default=None, description='Name of the tool to execute the `query_str`.')\n", (1572, 1646), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1691, 1759), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list', 'description': 'QUERYNODE_DEPENDENCIES_DESC'}), '(default_factory=list, description=QUERYNODE_DEPENDENCIES_DESC)\n', (1696, 1759), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2084, 2146), 'llama_index.core.bridge.pydantic.Field', 'Field', (['...'], {'description': '"""The original question we are asking."""'}), "(..., description='The original question we are asking.')\n", (2089, 2146), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4353, 4411), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', (['description', 'self._name'], {'fn_schema': 'QueryPlan'}), '(description, self._name, fn_schema=QueryPlan)\n', (4365, 4411), False, 'from llama_index.core.tools.types import BaseTool, ToolMetadata, ToolOutput\n'), ((6429, 6549), 'llama_index.core.utils.print_text', 'print_text', (['f"""Executed query, got response.\nQuery: {node.query_str}\nResponse: {response!s}\n"""'], {'color': '"""blue"""'}), '(\n f"""Executed query, got response.\nQuery: {node.query_str}\nResponse: {response!s}\n"""\n , color=\'blue\')\n', (6439, 6549), False, 'from llama_index.core.utils import print_text\n'), ((3593, 3619), 'llama_index.core.response_synthesizers.get_response_synthesizer', 'get_response_synthesizer', ([], {}), '()\n', (3617, 3619), False, 'from llama_index.core.response_synthesizers import BaseSynthesizer, get_response_synthesizer\n'), ((6315, 6376), 'llama_index.core.utils.print_text', 'print_text', (['f"""Selected Tool: {tool.metadata}\n"""'], {'color': '"""pink"""'}), "(f'Selected Tool: {tool.metadata}\\n', color='pink')\n", (6325, 6376), False, 'from llama_index.core.utils import print_text\n'), ((5495, 5519), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'node_text'}), '(text=node_text)\n', (5503, 5519), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n'), ((5681, 5713), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'n', 'score': '(1.0)'}), '(node=n, score=1.0)\n', (5694, 5713), False, 'from llama_index.core.schema import NodeWithScore, TextNode\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.generic_utils import (
completion_to_chat_decorator,
stream_completion_to_chat_decorator,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.llms.watsonx_utils import (
WATSONX_MODELS,
get_from_param_or_env_without_error,
watsonx_model_to_context_size,
)
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class WatsonX(LLM):
"""IBM WatsonX LLM."""
model_id: str = Field(description="The Model to use.")
max_new_tokens: int = Field(description="The maximum number of tokens to generate.")
temperature: float = Field(description="The temperature to use for sampling.")
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional Kwargs for the WatsonX model"
)
model_info: Dict[str, Any] = Field(
default_factory=dict, description="Details about the selected model"
)
_model = PrivateAttr()
def __init__(
self,
credentials: Dict[str, Any],
model_id: Optional[str] = "ibm/mpt-7b-instruct2",
project_id: Optional[str] = None,
space_id: Optional[str] = None,
max_new_tokens: Optional[int] = 512,
temperature: Optional[float] = 0.1,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
"""Initialize params."""
if model_id not in WATSONX_MODELS:
raise ValueError(
f"Model name {model_id} not found in {WATSONX_MODELS.keys()}"
)
try:
from ibm_watson_machine_learning.foundation_models.model import Model
except ImportError as e:
raise ImportError(
"You must install the `ibm_watson_machine_learning` package to use WatsonX"
"please `pip install ibm_watson_machine_learning`"
) from e
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
project_id = get_from_param_or_env_without_error(
project_id, "IBM_WATSONX_PROJECT_ID"
)
space_id = get_from_param_or_env_without_error(space_id, "IBM_WATSONX_SPACE_ID")
if project_id is not None or space_id is not None:
self._model = Model(
model_id=model_id,
credentials=credentials,
project_id=project_id,
space_id=space_id,
)
else:
raise ValueError(
f"Did not find `project_id` or `space_id`, Please pass them as named parameters"
f" or as environment variables, `IBM_WATSONX_PROJECT_ID` or `IBM_WATSONX_SPACE_ID`."
)
super().__init__(
model_id=model_id,
temperature=temperature,
max_new_tokens=max_new_tokens,
additional_kwargs=additional_kwargs,
model_info=self._model.get_details(),
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(self) -> str:
"""Get Class Name."""
return "WatsonX_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=watsonx_model_to_context_size(self.model_id),
num_output=self.max_new_tokens,
model_name=self.model_id,
)
@property
def sample_model_kwargs(self) -> Dict[str, Any]:
"""Get a sample of Model kwargs that a user can pass to the model."""
try:
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames
except ImportError as e:
raise ImportError(
"You must install the `ibm_watson_machine_learning` package to use WatsonX"
"please `pip install ibm_watson_machine_learning`"
) from e
params = GenTextParamsMetaNames().get_example_values()
params.pop("return_options")
return params
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"max_new_tokens": self.max_new_tokens,
"temperature": self.temperature,
}
return {**base_kwargs, **self.additional_kwargs}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {**self._model_kwargs, **kwargs}
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
response = self._model.generate_text(prompt=prompt, params=all_kwargs)
return CompletionResponse(text=response)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
all_kwargs = self._get_all_kwargs(**kwargs)
stream_response = self._model.generate_text_stream(
prompt=prompt, params=all_kwargs
)
def gen() -> CompletionResponseGen:
content = ""
for stream_delta in stream_response:
content += stream_delta
yield CompletionResponse(text=content, delta=stream_delta)
return gen()
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
all_kwargs = self._get_all_kwargs(**kwargs)
chat_fn = completion_to_chat_decorator(self.complete)
return chat_fn(messages, **all_kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
all_kwargs = self._get_all_kwargs(**kwargs)
chat_stream_fn = stream_completion_to_chat_decorator(self.stream_complete)
return chat_stream_fn(messages, **all_kwargs)
# Async Functions
# IBM Watson Machine Learning Package currently does not have Support for Async calls
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
raise NotImplementedError
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError
| [
"llama_index.legacy.llms.watsonx_utils.WATSONX_MODELS.keys",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.core.llms.types.CompletionResponse",
"llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator",
"llama_index.legacy.llms.generic_utils.completion_to_chat_decorator",
"llama_index.legacy.llms.watsonx_utils.watsonx_model_to_context_size"
] | [((968, 1006), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Model to use."""'}), "(description='The Model to use.')\n", (973, 1006), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1033, 1095), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1038, 1095), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1121, 1178), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1126, 1178), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1219, 1306), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional Kwargs for the WatsonX model"""'}), "(default_factory=dict, description=\n 'Additional Kwargs for the WatsonX model')\n", (1224, 1306), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1349, 1424), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Details about the selected model"""'}), "(default_factory=dict, description='Details about the selected model')\n", (1354, 1424), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1453, 1466), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1464, 1466), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5496, 5521), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5519, 5521), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5823, 5848), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5846, 5848), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6402, 6421), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6419, 6421), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6674, 6693), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6691, 6693), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2920, 2993), 'llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error', 'get_from_param_or_env_without_error', (['project_id', '"""IBM_WATSONX_PROJECT_ID"""'], {}), "(project_id, 'IBM_WATSONX_PROJECT_ID')\n", (2955, 2993), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((3035, 3104), 'llama_index.legacy.llms.watsonx_utils.get_from_param_or_env_without_error', 'get_from_param_or_env_without_error', (['space_id', '"""IBM_WATSONX_SPACE_ID"""'], {}), "(space_id, 'IBM_WATSONX_SPACE_ID')\n", (3070, 3104), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((5783, 5816), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response'}), '(text=response)\n', (5801, 5816), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((6576, 6619), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self.complete'], {}), '(self.complete)\n', (6604, 6619), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, stream_completion_to_chat_decorator\n'), ((6879, 6936), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self.stream_complete'], {}), '(self.stream_complete)\n', (6914, 6936), False, 'from llama_index.legacy.llms.generic_utils import completion_to_chat_decorator, stream_completion_to_chat_decorator\n'), ((2878, 2897), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (2893, 2897), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((3191, 3286), 'ibm_watson_machine_learning.foundation_models.model.Model', 'Model', ([], {'model_id': 'model_id', 'credentials': 'credentials', 'project_id': 'project_id', 'space_id': 'space_id'}), '(model_id=model_id, credentials=credentials, project_id=project_id,\n space_id=space_id)\n', (3196, 3286), False, 'from ibm_watson_machine_learning.foundation_models.model import Model\n'), ((4376, 4420), 'llama_index.legacy.llms.watsonx_utils.watsonx_model_to_context_size', 'watsonx_model_to_context_size', (['self.model_id'], {}), '(self.model_id)\n', (4405, 4420), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n'), ((5020, 5044), 'ibm_watson_machine_learning.metanames.GenTextParamsMetaNames', 'GenTextParamsMetaNames', ([], {}), '()\n', (5042, 5044), False, 'from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames\n'), ((6321, 6373), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'stream_delta'}), '(text=content, delta=stream_delta)\n', (6339, 6373), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((2400, 2421), 'llama_index.legacy.llms.watsonx_utils.WATSONX_MODELS.keys', 'WATSONX_MODELS.keys', ([], {}), '()\n', (2419, 2421), False, 'from llama_index.legacy.llms.watsonx_utils import WATSONX_MODELS, get_from_param_or_env_without_error, watsonx_model_to_context_size\n')] |
from typing import Any, Awaitable, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_TEMPERATURE
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.generic_utils import (
achat_to_completion_decorator,
acompletion_to_chat_decorator,
astream_chat_to_completion_decorator,
astream_completion_to_chat_decorator,
chat_to_completion_decorator,
completion_to_chat_decorator,
stream_chat_to_completion_decorator,
stream_completion_to_chat_decorator,
)
from llama_index.legacy.llms.litellm_utils import (
acompletion_with_retry,
completion_with_retry,
from_litellm_message,
is_function_calling_model,
openai_modelname_to_contextsize,
to_openai_message_dicts,
validate_litellm_api_key,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
DEFAULT_LITELLM_MODEL = "gpt-3.5-turbo"
class LiteLLM(LLM):
model: str = Field(
default=DEFAULT_LITELLM_MODEL,
description=(
"The LiteLLM model to use. "
"For complete list of providers https://docs.litellm.ai/docs/providers"
),
)
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use during generation.",
gte=0.0,
lte=1.0,
)
max_tokens: Optional[int] = Field(
description="The maximum number of tokens to generate.",
gt=0,
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the LLM API.",
# for all inputs https://docs.litellm.ai/docs/completion/input
)
max_retries: int = Field(
default=10, description="The maximum number of API retries."
)
def __init__(
self,
model: str = DEFAULT_LITELLM_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
api_type: Optional[str] = None,
api_base: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
if "custom_llm_provider" in kwargs:
if (
kwargs["custom_llm_provider"] != "ollama"
and kwargs["custom_llm_provider"] != "vllm"
): # don't check keys for local models
validate_litellm_api_key(api_key, api_type)
else: # by default assume it's a hosted endpoint
validate_litellm_api_key(api_key, api_type)
additional_kwargs = additional_kwargs or {}
if api_key is not None:
additional_kwargs["api_key"] = api_key
if api_type is not None:
additional_kwargs["api_type"] = api_type
if api_base is not None:
additional_kwargs["api_base"] = api_base
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
def _get_model_name(self) -> str:
model_name = self.model
if "ft-" in model_name: # legacy fine-tuning
model_name = model_name.split(":")[0]
elif model_name.startswith("ft:"):
model_name = model_name.split(":")[1]
return model_name
@classmethod
def class_name(cls) -> str:
return "litellm_llm"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=openai_modelname_to_contextsize(self._get_model_name()),
num_output=self.max_tokens or -1,
is_chat_model=True,
is_function_calling_model=is_function_calling_model(self._get_model_name()),
model_name=self.model,
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if self._is_chat_model:
chat_fn = self._chat
else:
chat_fn = completion_to_chat_decorator(self._complete)
return chat_fn(messages, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if self._is_chat_model:
stream_chat_fn = self._stream_chat
else:
stream_chat_fn = stream_completion_to_chat_decorator(self._stream_complete)
return stream_chat_fn(messages, **kwargs)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
# litellm assumes all llms are chat llms
if self._is_chat_model:
complete_fn = chat_to_completion_decorator(self._chat)
else:
complete_fn = self._complete
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if self._is_chat_model:
stream_complete_fn = stream_chat_to_completion_decorator(self._stream_chat)
else:
stream_complete_fn = self._stream_complete
return stream_complete_fn(prompt, **kwargs)
@property
def _is_chat_model(self) -> bool:
# litellm assumes all llms are chat llms
return True
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
def _chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
if not self._is_chat_model:
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None:
all_kwargs.pop(
"max_tokens"
) # don't send max_tokens == None, this throws errors for Non OpenAI providers
response = completion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
messages=message_dicts,
stream=False,
**all_kwargs,
)
message_dict = response["choices"][0]["message"]
message = from_litellm_message(message_dict)
return ChatResponse(
message=message,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
def _stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
if not self._is_chat_model:
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
if "max_tokens" in all_kwargs and all_kwargs["max_tokens"] is None:
all_kwargs.pop(
"max_tokens"
) # don't send max_tokens == None, this throws errors for Non OpenAI providers
def gen() -> ChatResponseGen:
content = ""
function_call: Optional[dict] = None
for response in completion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
messages=message_dicts,
stream=True,
**all_kwargs,
):
delta = response["choices"][0]["delta"]
role = delta.get("role", "assistant")
content_delta = delta.get("content", "") or ""
content += content_delta
function_call_delta = delta.get("function_call", None)
if function_call_delta is not None:
if function_call is None:
function_call = function_call_delta
## ensure we do not add a blank function call
if function_call.get("function_name", "") is None:
del function_call["function_name"]
else:
function_call["arguments"] += function_call_delta["arguments"]
additional_kwargs = {}
if function_call is not None:
additional_kwargs["function_call"] = function_call
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
def _complete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
raise NotImplementedError("litellm assumes all llms are chat llms.")
def _stream_complete(self, prompt: str, **kwargs: Any) -> CompletionResponseGen:
raise NotImplementedError("litellm assumes all llms are chat llms.")
def _get_max_token_for_prompt(self, prompt: str) -> int:
try:
import tiktoken
except ImportError:
raise ImportError(
"Please install tiktoken to use the max_tokens=None feature."
)
context_window = self.metadata.context_window
try:
encoding = tiktoken.encoding_for_model(self._get_model_name())
except KeyError:
encoding = encoding = tiktoken.get_encoding(
"cl100k_base"
) # default to using cl10k_base
tokens = encoding.encode(prompt)
max_token = context_window - len(tokens)
if max_token <= 0:
raise ValueError(
f"The prompt is too long for the model. "
f"Please use a prompt that is less than {context_window} tokens."
)
return max_token
def _get_response_token_counts(self, raw_response: Any) -> dict:
"""Get the token usage reported by the response."""
if not isinstance(raw_response, dict):
return {}
usage = raw_response.get("usage", {})
return {
"prompt_tokens": usage.get("prompt_tokens", 0),
"completion_tokens": usage.get("completion_tokens", 0),
"total_tokens": usage.get("total_tokens", 0),
}
# ===== Async Endpoints =====
@llm_chat_callback()
async def achat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponse:
achat_fn: Callable[..., Awaitable[ChatResponse]]
if self._is_chat_model:
achat_fn = self._achat
else:
achat_fn = acompletion_to_chat_decorator(self._acomplete)
return await achat_fn(messages, **kwargs)
@llm_chat_callback()
async def astream_chat(
self,
messages: Sequence[ChatMessage],
**kwargs: Any,
) -> ChatResponseAsyncGen:
astream_chat_fn: Callable[..., Awaitable[ChatResponseAsyncGen]]
if self._is_chat_model:
astream_chat_fn = self._astream_chat
else:
astream_chat_fn = astream_completion_to_chat_decorator(
self._astream_complete
)
return await astream_chat_fn(messages, **kwargs)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if self._is_chat_model:
acomplete_fn = achat_to_completion_decorator(self._achat)
else:
acomplete_fn = self._acomplete
return await acomplete_fn(prompt, **kwargs)
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
if self._is_chat_model:
astream_complete_fn = astream_chat_to_completion_decorator(
self._astream_chat
)
else:
astream_complete_fn = self._astream_complete
return await astream_complete_fn(prompt, **kwargs)
async def _achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
if not self._is_chat_model:
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
response = await acompletion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
messages=message_dicts,
stream=False,
**all_kwargs,
)
message_dict = response["choices"][0]["message"]
message = from_litellm_message(message_dict)
return ChatResponse(
message=message,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
async def _astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
if not self._is_chat_model:
raise ValueError("This model is not a chat model.")
message_dicts = to_openai_message_dicts(messages)
all_kwargs = self._get_all_kwargs(**kwargs)
async def gen() -> ChatResponseAsyncGen:
content = ""
function_call: Optional[dict] = None
async for response in await acompletion_with_retry(
is_chat_model=self._is_chat_model,
max_retries=self.max_retries,
messages=message_dicts,
stream=True,
**all_kwargs,
):
delta = response["choices"][0]["delta"]
role = delta.get("role", "assistant")
content_delta = delta.get("content", "") or ""
content += content_delta
function_call_delta = delta.get("function_call", None)
if function_call_delta is not None:
if function_call is None:
function_call = function_call_delta
## ensure we do not add a blank function call
if function_call.get("function_name", "") is None:
del function_call["function_name"]
else:
function_call["arguments"] += function_call_delta["arguments"]
additional_kwargs = {}
if function_call is not None:
additional_kwargs["function_call"] = function_call
yield ChatResponse(
message=ChatMessage(
role=role,
content=content,
additional_kwargs=additional_kwargs,
),
delta=content_delta,
raw=response,
additional_kwargs=self._get_response_token_counts(response),
)
return gen()
async def _acomplete(self, prompt: str, **kwargs: Any) -> CompletionResponse:
raise NotImplementedError("litellm assumes all llms are chat llms.")
async def _astream_complete(
self, prompt: str, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError("litellm assumes all llms are chat llms.")
| [
"llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.llms.litellm_utils.completion_with_retry",
"llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator",
"llama_index.legacy.llms.litellm_utils.acompletion_with_retry",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.litellm_utils.validate_litellm_api_key",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.llms.generic_utils.astream_completion_to_chat_decorator",
"llama_index.legacy.llms.generic_utils.achat_to_completion_decorator",
"llama_index.legacy.llms.litellm_utils.to_openai_message_dicts",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.llms.generic_utils.acompletion_to_chat_decorator",
"llama_index.legacy.llms.generic_utils.completion_to_chat_decorator",
"llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator",
"llama_index.legacy.llms.generic_utils.chat_to_completion_decorator",
"llama_index.legacy.llms.litellm_utils.from_litellm_message"
] | [((1378, 1535), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_LITELLM_MODEL', 'description': '"""The LiteLLM model to use. For complete list of providers https://docs.litellm.ai/docs/providers"""'}), "(default=DEFAULT_LITELLM_MODEL, description=\n 'The LiteLLM model to use. For complete list of providers https://docs.litellm.ai/docs/providers'\n )\n", (1383, 1535), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1613, 1727), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_TEMPERATURE', 'description': '"""The temperature to use during generation."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_TEMPERATURE, description=\n 'The temperature to use during generation.', gte=0.0, lte=1.0)\n", (1618, 1727), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1794, 1862), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""', 'gt': '(0)'}), "(description='The maximum number of tokens to generate.', gt=0)\n", (1799, 1862), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1926, 2003), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the LLM API."""'}), "(default_factory=dict, description='Additional kwargs for the LLM API.')\n", (1931, 2003), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((2121, 2188), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""'}), "(default=10, description='The maximum number of API retries.')\n", (2126, 2188), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((5024, 5043), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5041, 5043), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5323, 5342), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5340, 5342), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((5688, 5713), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (5711, 5713), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6082, 6107), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6105, 6107), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12053, 12072), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (12070, 12072), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12459, 12478), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (12476, 12478), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((12967, 12992), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (12990, 12992), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((13330, 13355), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (13353, 13355), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7262, 7295), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (7285, 7295), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((7593, 7736), 'llama_index.legacy.llms.litellm_utils.completion_with_retry', 'completion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(False)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=False, **all_kwargs)\n', (7614, 7736), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((7878, 7912), 'llama_index.legacy.llms.litellm_utils.from_litellm_message', 'from_litellm_message', (['message_dict'], {}), '(message_dict)\n', (7898, 7912), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((8316, 8349), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (8339, 8349), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14006, 14039), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (14029, 14039), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14403, 14437), 'llama_index.legacy.llms.litellm_utils.from_litellm_message', 'from_litellm_message', (['message_dict'], {}), '(message_dict)\n', (14423, 14437), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((14853, 14886), 'llama_index.legacy.llms.litellm_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (14876, 14886), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((3380, 3423), 'llama_index.legacy.llms.litellm_utils.validate_litellm_api_key', 'validate_litellm_api_key', (['api_key', 'api_type'], {}), '(api_key, api_type)\n', (3404, 3423), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((5229, 5273), 'llama_index.legacy.llms.generic_utils.completion_to_chat_decorator', 'completion_to_chat_decorator', (['self._complete'], {}), '(self._complete)\n', (5257, 5273), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5573, 5631), 'llama_index.legacy.llms.generic_utils.stream_completion_to_chat_decorator', 'stream_completion_to_chat_decorator', (['self._stream_complete'], {}), '(self._stream_complete)\n', (5608, 5631), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((5934, 5974), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self._chat'], {}), '(self._chat)\n', (5962, 5974), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((6296, 6350), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self._stream_chat'], {}), '(self._stream_chat)\n', (6331, 6350), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((8768, 8910), 'llama_index.legacy.llms.litellm_utils.completion_with_retry', 'completion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(True)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=True, **all_kwargs)\n', (8789, 8910), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((12356, 12402), 'llama_index.legacy.llms.generic_utils.acompletion_to_chat_decorator', 'acompletion_to_chat_decorator', (['self._acomplete'], {}), '(self._acomplete)\n', (12385, 12402), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((12813, 12873), 'llama_index.legacy.llms.generic_utils.astream_completion_to_chat_decorator', 'astream_completion_to_chat_decorator', (['self._astream_complete'], {}), '(self._astream_complete)\n', (12849, 12873), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((13172, 13214), 'llama_index.legacy.llms.generic_utils.achat_to_completion_decorator', 'achat_to_completion_decorator', (['self._achat'], {}), '(self._achat)\n', (13201, 13214), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((13557, 13613), 'llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator', 'astream_chat_to_completion_decorator', (['self._astream_chat'], {}), '(self._astream_chat)\n', (13593, 13613), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, acompletion_to_chat_decorator, astream_chat_to_completion_decorator, astream_completion_to_chat_decorator, chat_to_completion_decorator, completion_to_chat_decorator, stream_chat_to_completion_decorator, stream_completion_to_chat_decorator\n'), ((14117, 14261), 'llama_index.legacy.llms.litellm_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(False)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=False, **all_kwargs)\n', (14139, 14261), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((3266, 3309), 'llama_index.legacy.llms.litellm_utils.validate_litellm_api_key', 'validate_litellm_api_key', (['api_key', 'api_type'], {}), '(api_key, api_type)\n', (3290, 3309), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((11130, 11166), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['"""cl100k_base"""'], {}), "('cl100k_base')\n", (11151, 11166), False, 'import tiktoken\n'), ((15103, 15246), 'llama_index.legacy.llms.litellm_utils.acompletion_with_retry', 'acompletion_with_retry', ([], {'is_chat_model': 'self._is_chat_model', 'max_retries': 'self.max_retries', 'messages': 'message_dicts', 'stream': '(True)'}), '(is_chat_model=self._is_chat_model, max_retries=self.\n max_retries, messages=message_dicts, stream=True, **all_kwargs)\n', (15125, 15246), False, 'from llama_index.legacy.llms.litellm_utils import acompletion_with_retry, completion_with_retry, from_litellm_message, is_function_calling_model, openai_modelname_to_contextsize, to_openai_message_dicts, validate_litellm_api_key\n'), ((9990, 10066), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (10001, 10066), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((16326, 16402), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content', 'additional_kwargs': 'additional_kwargs'}), '(role=role, content=content, additional_kwargs=additional_kwargs)\n', (16337, 16402), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset("MiniTruthfulQADataset", "./data")
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 322), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniTruthfulQADataset"""', '"""./data"""'], {}), "('MiniTruthfulQADataset', './data')\n", (287, 322), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((367, 419), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (398, 419), False, 'from llama_index.core import VectorStoreIndex\n'), ((512, 561), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (531, 561), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1412, 1436), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1434, 1436), False, 'import asyncio\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_TEMPERATURE
# from mistralai.models.chat_completion import ChatMessage
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
MessageRole,
)
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.legacy.llms.generic_utils import (
achat_to_completion_decorator,
astream_chat_to_completion_decorator,
chat_to_completion_decorator,
get_from_param_or_env,
stream_chat_to_completion_decorator,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.llms.mistralai_utils import (
mistralai_modelname_to_contextsize,
)
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
DEFAULT_MISTRALAI_MODEL = "mistral-tiny"
DEFAULT_MISTRALAI_ENDPOINT = "https://api.mistral.ai"
DEFAULT_MISTRALAI_MAX_TOKENS = 512
class MistralAI(LLM):
model: str = Field(
default=DEFAULT_MISTRALAI_MODEL, description="The mistralai model to use."
)
temperature: float = Field(
default=DEFAULT_TEMPERATURE,
description="The temperature to use for sampling.",
gte=0.0,
lte=1.0,
)
max_tokens: int = Field(
default=DEFAULT_MISTRALAI_MAX_TOKENS,
description="The maximum number of tokens to generate.",
gt=0,
)
timeout: float = Field(
default=120, description="The timeout to use in seconds.", gte=0
)
max_retries: int = Field(
default=5, description="The maximum number of API retries.", gte=0
)
safe_mode: bool = Field(
default=False,
description="The parameter to enforce guardrails in chat generations.",
)
random_seed: str = Field(
default=None, description="The random seed to use for sampling."
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the MistralAI API."
)
_client: Any = PrivateAttr()
_aclient: Any = PrivateAttr()
def __init__(
self,
model: str = DEFAULT_MISTRALAI_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_MISTRALAI_MAX_TOKENS,
timeout: int = 120,
max_retries: int = 5,
safe_mode: bool = False,
random_seed: Optional[int] = None,
api_key: Optional[str] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
try:
from mistralai.async_client import MistralAsyncClient
from mistralai.client import MistralClient
except ImportError as e:
raise ImportError(
"You must install the `mistralai` package to use mistralai."
"Please `pip install mistralai`"
) from e
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_key = get_from_param_or_env("api_key", api_key, "MISTRAL_API_KEY", "")
if not api_key:
raise ValueError(
"You must provide an API key to use mistralai. "
"You can either pass it in as an argument or set it `MISTRAL_API_KEY`."
)
self._client = MistralClient(
api_key=api_key,
endpoint=DEFAULT_MISTRALAI_ENDPOINT,
timeout=timeout,
max_retries=max_retries,
)
self._aclient = MistralAsyncClient(
api_key=api_key,
endpoint=DEFAULT_MISTRALAI_ENDPOINT,
timeout=timeout,
max_retries=max_retries,
)
super().__init__(
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
timeout=timeout,
max_retries=max_retries,
safe_mode=safe_mode,
random_seed=random_seed,
model=model,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "MistralAI_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=mistralai_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
safe_mode=self.safe_mode,
random_seed=self.random_seed,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_tokens": self.max_tokens,
"random_seed": self.random_seed,
"safe_mode": self.safe_mode,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
# convert messages to mistral ChatMessage
from mistralai.client import ChatMessage as mistral_chatmessage
messages = [
mistral_chatmessage(role=x.role, content=x.content) for x in messages
]
all_kwargs = self._get_all_kwargs(**kwargs)
response = self._client.chat(messages=messages, **all_kwargs)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT, content=response.choices[0].message.content
),
raw=dict(response),
)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
# convert messages to mistral ChatMessage
from mistralai.client import ChatMessage as mistral_chatmessage
messages = [
mistral_chatmessage(role=message.role, content=message.content)
for message in messages
]
all_kwargs = self._get_all_kwargs(**kwargs)
response = self._client.chat_stream(messages=messages, **all_kwargs)
def gen() -> ChatResponseGen:
content = ""
role = MessageRole.ASSISTANT
for chunk in response:
content_delta = chunk.choices[0].delta.content
if content_delta is None:
continue
content += content_delta
yield ChatResponse(
message=ChatMessage(role=role, content=content),
delta=content_delta,
raw=chunk,
)
return gen()
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
stream_complete_fn = stream_chat_to_completion_decorator(self.stream_chat)
return stream_complete_fn(prompt, **kwargs)
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
# convert messages to mistral ChatMessage
from mistralai.client import ChatMessage as mistral_chatmessage
messages = [
mistral_chatmessage(role=message.role, content=message.content)
for message in messages
]
all_kwargs = self._get_all_kwargs(**kwargs)
response = await self._aclient.chat(messages=messages, **all_kwargs)
return ChatResponse(
message=ChatMessage(
role=MessageRole.ASSISTANT, content=response.choices[0].message.content
),
raw=dict(response),
)
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
acomplete_fn = achat_to_completion_decorator(self.achat)
return await acomplete_fn(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
# convert messages to mistral ChatMessage
from mistralai.client import ChatMessage as mistral_chatmessage
messages = [
mistral_chatmessage(role=x.role, content=x.content) for x in messages
]
all_kwargs = self._get_all_kwargs(**kwargs)
response = await self._aclient.chat_stream(messages=messages, **all_kwargs)
async def gen() -> ChatResponseAsyncGen:
content = ""
role = MessageRole.ASSISTANT
async for chunk in response:
content_delta = chunk.choices[0].delta.content
if content_delta is None:
continue
content += content_delta
yield ChatResponse(
message=ChatMessage(role=role, content=content),
delta=content_delta,
raw=chunk,
)
return gen()
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
astream_complete_fn = astream_chat_to_completion_decorator(self.astream_chat)
return await astream_complete_fn(prompt, **kwargs)
| [
"llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.mistralai_utils.mistralai_modelname_to_contextsize",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.llms.generic_utils.achat_to_completion_decorator",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.llms.generic_utils.chat_to_completion_decorator"
] | [((1271, 1357), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MISTRALAI_MODEL', 'description': '"""The mistralai model to use."""'}), "(default=DEFAULT_MISTRALAI_MODEL, description=\n 'The mistralai model to use.')\n", (1276, 1357), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1392, 1501), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_TEMPERATURE', 'description': '"""The temperature to use for sampling."""', 'gte': '(0.0)', 'lte': '(1.0)'}), "(default=DEFAULT_TEMPERATURE, description=\n 'The temperature to use for sampling.', gte=0.0, lte=1.0)\n", (1397, 1501), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1558, 1669), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_MISTRALAI_MAX_TOKENS', 'description': '"""The maximum number of tokens to generate."""', 'gt': '(0)'}), "(default=DEFAULT_MISTRALAI_MAX_TOKENS, description=\n 'The maximum number of tokens to generate.', gt=0)\n", (1563, 1669), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1718, 1789), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(120)', 'description': '"""The timeout to use in seconds."""', 'gte': '(0)'}), "(default=120, description='The timeout to use in seconds.', gte=0)\n", (1723, 1789), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1827, 1900), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(5)', 'description': '"""The maximum number of API retries."""', 'gte': '(0)'}), "(default=5, description='The maximum number of API retries.', gte=0)\n", (1832, 1900), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1937, 2034), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""The parameter to enforce guardrails in chat generations."""'}), "(default=False, description=\n 'The parameter to enforce guardrails in chat generations.')\n", (1942, 2034), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2076, 2147), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'None', 'description': '"""The random seed to use for sampling."""'}), "(default=None, description='The random seed to use for sampling.')\n", (2081, 2147), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2202, 2290), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the MistralAI API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the MistralAI API.')\n", (2207, 2290), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2320, 2333), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2331, 2333), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2354, 2367), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2365, 2367), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5957, 5976), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (5974, 5976), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6632, 6657), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6655, 6657), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6884, 6903), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6901, 6903), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7946, 7971), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7969, 7971), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8236, 8255), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (8253, 8255), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8969, 8994), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8992, 8994), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9238, 9257), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9255, 9257), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((10306, 10331), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (10329, 10331), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3684, 3748), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""MISTRAL_API_KEY"""', '""""""'], {}), "('api_key', api_key, 'MISTRAL_API_KEY', '')\n", (3705, 3748), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((3995, 4109), 'mistralai.client.MistralClient', 'MistralClient', ([], {'api_key': 'api_key', 'endpoint': 'DEFAULT_MISTRALAI_ENDPOINT', 'timeout': 'timeout', 'max_retries': 'max_retries'}), '(api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT, timeout\n =timeout, max_retries=max_retries)\n', (4008, 4109), False, 'from mistralai.client import MistralClient\n'), ((4188, 4306), 'mistralai.async_client.MistralAsyncClient', 'MistralAsyncClient', ([], {'api_key': 'api_key', 'endpoint': 'DEFAULT_MISTRALAI_ENDPOINT', 'timeout': 'timeout', 'max_retries': 'max_retries'}), '(api_key=api_key, endpoint=DEFAULT_MISTRALAI_ENDPOINT,\n timeout=timeout, max_retries=max_retries)\n', (4206, 4306), False, 'from mistralai.async_client import MistralAsyncClient\n'), ((6793, 6832), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self.chat'], {}), '(self.chat)\n', (6821, 6832), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((8124, 8177), 'llama_index.legacy.llms.generic_utils.stream_chat_to_completion_decorator', 'stream_chat_to_completion_decorator', (['self.stream_chat'], {}), '(self.stream_chat)\n', (8159, 8177), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((9138, 9179), 'llama_index.legacy.llms.generic_utils.achat_to_completion_decorator', 'achat_to_completion_decorator', (['self.achat'], {}), '(self.achat)\n', (9167, 9179), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((10497, 10552), 'llama_index.legacy.llms.generic_utils.astream_chat_to_completion_decorator', 'astream_chat_to_completion_decorator', (['self.astream_chat'], {}), '(self.astream_chat)\n', (10533, 10552), False, 'from llama_index.legacy.llms.generic_utils import achat_to_completion_decorator, astream_chat_to_completion_decorator, chat_to_completion_decorator, get_from_param_or_env, stream_chat_to_completion_decorator\n'), ((3645, 3664), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (3660, 3664), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((6217, 6268), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'x.role', 'content': 'x.content'}), '(role=x.role, content=x.content)\n', (6236, 6268), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((7168, 7231), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'message.role', 'content': 'message.content'}), '(role=message.role, content=message.content)\n', (7187, 7231), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((8517, 8580), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'message.role', 'content': 'message.content'}), '(role=message.role, content=message.content)\n', (8536, 8580), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((9534, 9585), 'mistralai.client.ChatMessage', 'mistral_chatmessage', ([], {'role': 'x.role', 'content': 'x.content'}), '(role=x.role, content=x.content)\n', (9553, 9585), True, 'from mistralai.client import ChatMessage as mistral_chatmessage\n'), ((5163, 5209), 'llama_index.legacy.llms.mistralai_utils.mistralai_modelname_to_contextsize', 'mistralai_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (5197, 5209), False, 'from llama_index.legacy.llms.mistralai_utils import mistralai_modelname_to_contextsize\n'), ((6468, 6557), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.choices[0].message.content'}), '(role=MessageRole.ASSISTANT, content=response.choices[0].message\n .content)\n', (6479, 6557), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((8805, 8894), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'MessageRole.ASSISTANT', 'content': 'response.choices[0].message.content'}), '(role=MessageRole.ASSISTANT, content=response.choices[0].message\n .content)\n', (8816, 8894), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((7787, 7826), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (7798, 7826), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n'), ((10147, 10186), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'role': 'role', 'content': 'content'}), '(role=role, content=content)\n', (10158, 10186), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata, MessageRole\n')] |
"""Base index classes."""
import logging
from abc import ABC, abstractmethod
from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast
from llama_index.legacy.chat_engine.types import BaseChatEngine, ChatMode
from llama_index.legacy.core.base_query_engine import BaseQueryEngine
from llama_index.legacy.core.base_retriever import BaseRetriever
from llama_index.legacy.data_structs.data_structs import IndexStruct
from llama_index.legacy.ingestion import run_transformations
from llama_index.legacy.schema import BaseNode, Document, IndexNode
from llama_index.legacy.service_context import ServiceContext
from llama_index.legacy.storage.docstore.types import BaseDocumentStore, RefDocInfo
from llama_index.legacy.storage.storage_context import StorageContext
IS = TypeVar("IS", bound=IndexStruct)
IndexType = TypeVar("IndexType", bound="BaseIndex")
logger = logging.getLogger(__name__)
class BaseIndex(Generic[IS], ABC):
"""Base LlamaIndex.
Args:
nodes (List[Node]): List of nodes to index
show_progress (bool): Whether to show tqdm progress bars. Defaults to False.
service_context (ServiceContext): Service context container (contains
components like LLM, Embeddings, etc.).
"""
index_struct_cls: Type[IS]
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IS] = None,
storage_context: Optional[StorageContext] = None,
service_context: Optional[ServiceContext] = None,
show_progress: bool = False,
**kwargs: Any,
) -> None:
"""Initialize with parameters."""
if index_struct is None and nodes is None and objects is None:
raise ValueError("One of nodes, objects, or index_struct must be provided.")
if index_struct is not None and nodes is not None:
raise ValueError("Only one of nodes or index_struct can be provided.")
# This is to explicitly make sure that the old UX is not used
if nodes is not None and len(nodes) >= 1 and not isinstance(nodes[0], BaseNode):
if isinstance(nodes[0], Document):
raise ValueError(
"The constructor now takes in a list of Node objects. "
"Since you are passing in a list of Document objects, "
"please use `from_documents` instead."
)
else:
raise ValueError("nodes must be a list of Node objects.")
self._service_context = service_context or ServiceContext.from_defaults()
self._storage_context = storage_context or StorageContext.from_defaults()
self._docstore = self._storage_context.docstore
self._show_progress = show_progress
self._vector_store = self._storage_context.vector_store
self._graph_store = self._storage_context.graph_store
objects = objects or []
self._object_map = {}
for obj in objects:
self._object_map[obj.index_id] = obj.obj
obj.obj = None # clear the object avoid serialization issues
with self._service_context.callback_manager.as_trace("index_construction"):
if index_struct is None:
nodes = nodes or []
index_struct = self.build_index_from_nodes(
nodes + objects # type: ignore
)
self._index_struct = index_struct
self._storage_context.index_store.add_index_struct(self._index_struct)
@classmethod
def from_documents(
cls: Type[IndexType],
documents: Sequence[Document],
storage_context: Optional[StorageContext] = None,
service_context: Optional[ServiceContext] = None,
show_progress: bool = False,
**kwargs: Any,
) -> IndexType:
"""Create index from documents.
Args:
documents (Optional[Sequence[BaseDocument]]): List of documents to
build the index from.
"""
storage_context = storage_context or StorageContext.from_defaults()
service_context = service_context or ServiceContext.from_defaults()
docstore = storage_context.docstore
with service_context.callback_manager.as_trace("index_construction"):
for doc in documents:
docstore.set_document_hash(doc.get_doc_id(), doc.hash)
nodes = run_transformations(
documents, # type: ignore
service_context.transformations,
show_progress=show_progress,
**kwargs,
)
return cls(
nodes=nodes,
storage_context=storage_context,
service_context=service_context,
show_progress=show_progress,
**kwargs,
)
@property
def index_struct(self) -> IS:
"""Get the index struct."""
return self._index_struct
@property
def index_id(self) -> str:
"""Get the index struct."""
return self._index_struct.index_id
def set_index_id(self, index_id: str) -> None:
"""Set the index id.
NOTE: if you decide to set the index_id on the index_struct manually,
you will need to explicitly call `add_index_struct` on the `index_store`
to update the index store.
Args:
index_id (str): Index id to set.
"""
# delete the old index struct
old_id = self._index_struct.index_id
self._storage_context.index_store.delete_index_struct(old_id)
# add the new index struct
self._index_struct.index_id = index_id
self._storage_context.index_store.add_index_struct(self._index_struct)
@property
def docstore(self) -> BaseDocumentStore:
"""Get the docstore corresponding to the index."""
return self._docstore
@property
def service_context(self) -> ServiceContext:
return self._service_context
@property
def storage_context(self) -> StorageContext:
return self._storage_context
@property
def summary(self) -> str:
return str(self._index_struct.summary)
@summary.setter
def summary(self, new_summary: str) -> None:
self._index_struct.summary = new_summary
self._storage_context.index_store.add_index_struct(self._index_struct)
@abstractmethod
def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IS:
"""Build the index from nodes."""
def build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IS:
"""Build the index from nodes."""
self._docstore.add_documents(nodes, allow_update=True)
return self._build_index_from_nodes(nodes)
@abstractmethod
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Index-specific logic for inserting nodes to the index struct."""
def insert_nodes(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert nodes."""
with self._service_context.callback_manager.as_trace("insert_nodes"):
self.docstore.add_documents(nodes, allow_update=True)
self._insert(nodes, **insert_kwargs)
self._storage_context.index_store.add_index_struct(self._index_struct)
def insert(self, document: Document, **insert_kwargs: Any) -> None:
"""Insert a document."""
with self._service_context.callback_manager.as_trace("insert"):
nodes = run_transformations(
[document],
self._service_context.transformations,
show_progress=self._show_progress,
)
self.insert_nodes(nodes, **insert_kwargs)
self.docstore.set_document_hash(document.get_doc_id(), document.hash)
@abstractmethod
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
def delete_nodes(
self,
node_ids: List[str],
delete_from_docstore: bool = False,
**delete_kwargs: Any,
) -> None:
"""Delete a list of nodes from the index.
Args:
doc_ids (List[str]): A list of doc_ids from the nodes to delete
"""
for node_id in node_ids:
self._delete_node(node_id, **delete_kwargs)
if delete_from_docstore:
self.docstore.delete_document(node_id, raise_error=False)
self._storage_context.index_store.add_index_struct(self._index_struct)
def delete(self, doc_id: str, **delete_kwargs: Any) -> None:
"""Delete a document from the index.
All nodes in the index related to the index will be deleted.
Args:
doc_id (str): A doc_id of the ingested document
"""
logger.warning(
"delete() is now deprecated, please refer to delete_ref_doc() to delete "
"ingested documents+nodes or delete_nodes to delete a list of nodes."
)
self.delete_ref_doc(doc_id)
def delete_ref_doc(
self, ref_doc_id: str, delete_from_docstore: bool = False, **delete_kwargs: Any
) -> None:
"""Delete a document and it's nodes by using ref_doc_id."""
ref_doc_info = self.docstore.get_ref_doc_info(ref_doc_id)
if ref_doc_info is None:
logger.warning(f"ref_doc_id {ref_doc_id} not found, nothing deleted.")
return
self.delete_nodes(
ref_doc_info.node_ids,
delete_from_docstore=False,
**delete_kwargs,
)
if delete_from_docstore:
self.docstore.delete_ref_doc(ref_doc_id, raise_error=False)
def update(self, document: Document, **update_kwargs: Any) -> None:
"""Update a document and it's corresponding nodes.
This is equivalent to deleting the document and then inserting it again.
Args:
document (Union[BaseDocument, BaseIndex]): document to update
insert_kwargs (Dict): kwargs to pass to insert
delete_kwargs (Dict): kwargs to pass to delete
"""
logger.warning(
"update() is now deprecated, please refer to update_ref_doc() to update "
"ingested documents+nodes."
)
self.update_ref_doc(document, **update_kwargs)
def update_ref_doc(self, document: Document, **update_kwargs: Any) -> None:
"""Update a document and it's corresponding nodes.
This is equivalent to deleting the document and then inserting it again.
Args:
document (Union[BaseDocument, BaseIndex]): document to update
insert_kwargs (Dict): kwargs to pass to insert
delete_kwargs (Dict): kwargs to pass to delete
"""
with self._service_context.callback_manager.as_trace("update"):
self.delete_ref_doc(
document.get_doc_id(),
delete_from_docstore=True,
**update_kwargs.pop("delete_kwargs", {}),
)
self.insert(document, **update_kwargs.pop("insert_kwargs", {}))
def refresh(
self, documents: Sequence[Document], **update_kwargs: Any
) -> List[bool]:
"""Refresh an index with documents that have changed.
This allows users to save LLM and Embedding model calls, while only
updating documents that have any changes in text or metadata. It
will also insert any documents that previously were not stored.
"""
logger.warning(
"refresh() is now deprecated, please refer to refresh_ref_docs() to "
"refresh ingested documents+nodes with an updated list of documents."
)
return self.refresh_ref_docs(documents, **update_kwargs)
def refresh_ref_docs(
self, documents: Sequence[Document], **update_kwargs: Any
) -> List[bool]:
"""Refresh an index with documents that have changed.
This allows users to save LLM and Embedding model calls, while only
updating documents that have any changes in text or metadata. It
will also insert any documents that previously were not stored.
"""
with self._service_context.callback_manager.as_trace("refresh"):
refreshed_documents = [False] * len(documents)
for i, document in enumerate(documents):
existing_doc_hash = self._docstore.get_document_hash(
document.get_doc_id()
)
if existing_doc_hash is None:
self.insert(document, **update_kwargs.pop("insert_kwargs", {}))
refreshed_documents[i] = True
elif existing_doc_hash != document.hash:
self.update_ref_doc(
document, **update_kwargs.pop("update_kwargs", {})
)
refreshed_documents[i] = True
return refreshed_documents
@property
@abstractmethod
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
...
@abstractmethod
def as_retriever(self, **kwargs: Any) -> BaseRetriever:
...
def as_query_engine(self, **kwargs: Any) -> BaseQueryEngine:
# NOTE: lazy import
from llama_index.legacy.query_engine.retriever_query_engine import (
RetrieverQueryEngine,
)
retriever = self.as_retriever(**kwargs)
kwargs["retriever"] = retriever
if "service_context" not in kwargs:
kwargs["service_context"] = self._service_context
return RetrieverQueryEngine.from_args(**kwargs)
def as_chat_engine(
self, chat_mode: ChatMode = ChatMode.BEST, **kwargs: Any
) -> BaseChatEngine:
query_engine = self.as_query_engine(**kwargs)
if "service_context" not in kwargs:
kwargs["service_context"] = self._service_context
# resolve chat mode
if chat_mode in [ChatMode.REACT, ChatMode.OPENAI, ChatMode.BEST]:
# use an agent with query engine tool in these chat modes
# NOTE: lazy import
from llama_index.legacy.agent import AgentRunner
from llama_index.legacy.tools.query_engine import QueryEngineTool
# get LLM
service_context = cast(ServiceContext, kwargs["service_context"])
llm = service_context.llm
# convert query engine to tool
query_engine_tool = QueryEngineTool.from_defaults(query_engine=query_engine)
return AgentRunner.from_llm(tools=[query_engine_tool], llm=llm, **kwargs)
if chat_mode == ChatMode.CONDENSE_QUESTION:
# NOTE: lazy import
from llama_index.legacy.chat_engine import CondenseQuestionChatEngine
return CondenseQuestionChatEngine.from_defaults(
query_engine=query_engine,
**kwargs,
)
elif chat_mode == ChatMode.CONTEXT:
from llama_index.legacy.chat_engine import ContextChatEngine
return ContextChatEngine.from_defaults(
retriever=self.as_retriever(**kwargs),
**kwargs,
)
elif chat_mode == ChatMode.CONDENSE_PLUS_CONTEXT:
from llama_index.legacy.chat_engine import CondensePlusContextChatEngine
return CondensePlusContextChatEngine.from_defaults(
retriever=self.as_retriever(**kwargs),
**kwargs,
)
elif chat_mode == ChatMode.SIMPLE:
from llama_index.legacy.chat_engine import SimpleChatEngine
return SimpleChatEngine.from_defaults(
**kwargs,
)
else:
raise ValueError(f"Unknown chat mode: {chat_mode}")
# legacy
BaseGPTIndex = BaseIndex
| [
"llama_index.legacy.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args",
"llama_index.legacy.chat_engine.CondenseQuestionChatEngine.from_defaults",
"llama_index.legacy.tools.query_engine.QueryEngineTool.from_defaults",
"llama_index.legacy.chat_engine.SimpleChatEngine.from_defaults",
"llama_index.legacy.service_context.ServiceContext.from_defaults",
"llama_index.legacy.storage.storage_context.StorageContext.from_defaults",
"llama_index.legacy.agent.AgentRunner.from_llm",
"llama_index.legacy.ingestion.run_transformations"
] | [((793, 825), 'typing.TypeVar', 'TypeVar', (['"""IS"""'], {'bound': 'IndexStruct'}), "('IS', bound=IndexStruct)\n", (800, 825), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((838, 877), 'typing.TypeVar', 'TypeVar', (['"""IndexType"""'], {'bound': '"""BaseIndex"""'}), "('IndexType', bound='BaseIndex')\n", (845, 877), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((888, 915), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (905, 915), False, 'import logging\n'), ((13757, 13797), 'llama_index.legacy.query_engine.retriever_query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', ([], {}), '(**kwargs)\n', (13787, 13797), False, 'from llama_index.legacy.query_engine.retriever_query_engine import RetrieverQueryEngine\n'), ((2626, 2656), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (2654, 2656), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((2708, 2738), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (2736, 2738), False, 'from llama_index.legacy.storage.storage_context import StorageContext\n'), ((4137, 4167), 'llama_index.legacy.storage.storage_context.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (4165, 4167), False, 'from llama_index.legacy.storage.storage_context import StorageContext\n'), ((4213, 4243), 'llama_index.legacy.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {}), '()\n', (4241, 4243), False, 'from llama_index.legacy.service_context import ServiceContext\n'), ((4493, 4599), 'llama_index.legacy.ingestion.run_transformations', 'run_transformations', (['documents', 'service_context.transformations'], {'show_progress': 'show_progress'}), '(documents, service_context.transformations,\n show_progress=show_progress, **kwargs)\n', (4512, 4599), False, 'from llama_index.legacy.ingestion import run_transformations\n'), ((7604, 7713), 'llama_index.legacy.ingestion.run_transformations', 'run_transformations', (['[document]', 'self._service_context.transformations'], {'show_progress': 'self._show_progress'}), '([document], self._service_context.transformations,\n show_progress=self._show_progress)\n', (7623, 7713), False, 'from llama_index.legacy.ingestion import run_transformations\n'), ((14470, 14517), 'typing.cast', 'cast', (['ServiceContext', "kwargs['service_context']"], {}), "(ServiceContext, kwargs['service_context'])\n", (14474, 14517), False, 'from typing import Any, Dict, Generic, List, Optional, Sequence, Type, TypeVar, cast\n'), ((14632, 14688), 'llama_index.legacy.tools.query_engine.QueryEngineTool.from_defaults', 'QueryEngineTool.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine)\n', (14661, 14688), False, 'from llama_index.legacy.tools.query_engine import QueryEngineTool\n'), ((14709, 14775), 'llama_index.legacy.agent.AgentRunner.from_llm', 'AgentRunner.from_llm', ([], {'tools': '[query_engine_tool]', 'llm': 'llm'}), '(tools=[query_engine_tool], llm=llm, **kwargs)\n', (14729, 14775), False, 'from llama_index.legacy.agent import AgentRunner\n'), ((14963, 15040), 'llama_index.legacy.chat_engine.CondenseQuestionChatEngine.from_defaults', 'CondenseQuestionChatEngine.from_defaults', ([], {'query_engine': 'query_engine'}), '(query_engine=query_engine, **kwargs)\n', (15003, 15040), False, 'from llama_index.legacy.chat_engine import CondenseQuestionChatEngine\n'), ((15793, 15833), 'llama_index.legacy.chat_engine.SimpleChatEngine.from_defaults', 'SimpleChatEngine.from_defaults', ([], {}), '(**kwargs)\n', (15823, 15833), False, 'from llama_index.legacy.chat_engine import SimpleChatEngine\n')] |
import json
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import (
DEFAULT_TEMPERATURE,
)
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.legacy.llms.bedrock_utils import (
BEDROCK_FOUNDATION_LLMS,
CHAT_ONLY_MODELS,
STREAMING_MODELS,
Provider,
completion_with_retry,
get_provider,
)
from llama_index.legacy.llms.generic_utils import (
completion_response_to_chat_response,
stream_completion_response_to_chat_response,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class Bedrock(LLM):
model: str = Field(description="The modelId of the Bedrock model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
max_tokens: int = Field(description="The maximum number of tokens to generate.")
context_size: int = Field("The maximum number of tokens available for input.")
profile_name: Optional[str] = Field(
description="The name of aws profile to use. If not given, then the default profile is used."
)
aws_access_key_id: Optional[str] = Field(
description="AWS Access Key ID to use", exclude=True
)
aws_secret_access_key: Optional[str] = Field(
description="AWS Secret Access Key to use", exclude=True
)
aws_session_token: Optional[str] = Field(
description="AWS Session Token to use", exclude=True
)
region_name: Optional[str] = Field(
description="AWS region name to use. Uses region configured in AWS CLI if not passed",
exclude=True,
)
botocore_session: Optional[Any] = Field(
description="Use this Botocore session instead of creating a new default one.",
exclude=True,
)
botocore_config: Optional[Any] = Field(
description="Custom configuration object to use instead of the default generated one.",
exclude=True,
)
max_retries: int = Field(
default=10, description="The maximum number of API retries.", gt=0
)
timeout: float = Field(
default=60.0,
description="The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.",
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the bedrock invokeModel request.",
)
_client: Any = PrivateAttr()
_aclient: Any = PrivateAttr()
_provider: Provider = PrivateAttr()
def __init__(
self,
model: str,
temperature: Optional[float] = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = 512,
context_size: Optional[int] = None,
profile_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region_name: Optional[str] = None,
botocore_session: Optional[Any] = None,
client: Optional[Any] = None,
timeout: Optional[float] = 60.0,
max_retries: Optional[int] = 10,
botocore_config: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
if context_size is None and model not in BEDROCK_FOUNDATION_LLMS:
raise ValueError(
"`context_size` argument not provided and"
"model provided refers to a non-foundation model."
" Please specify the context_size"
)
session_kwargs = {
"profile_name": profile_name,
"region_name": region_name,
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"aws_session_token": aws_session_token,
"botocore_session": botocore_session,
}
config = None
try:
import boto3
from botocore.config import Config
config = (
Config(
retries={"max_attempts": max_retries, "mode": "standard"},
connect_timeout=timeout,
read_timeout=timeout,
)
if botocore_config is None
else botocore_config
)
session = boto3.Session(**session_kwargs)
except ImportError:
raise ImportError(
"boto3 package not found, install with" "'pip install boto3'"
)
# Prior to general availability, custom boto3 wheel files were
# distributed that used the bedrock service to invokeModel.
# This check prevents any services still using those wheel files
# from breaking
if client is not None:
self._client = client
elif "bedrock-runtime" in session.get_available_services():
self._client = session.client("bedrock-runtime", config=config)
else:
self._client = session.client("bedrock", config=config)
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
context_size = context_size or BEDROCK_FOUNDATION_LLMS[model]
self._provider = get_provider(model)
messages_to_prompt = messages_to_prompt or self._provider.messages_to_prompt
completion_to_prompt = (
completion_to_prompt or self._provider.completion_to_prompt
)
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
context_size=context_size,
profile_name=profile_name,
timeout=timeout,
max_retries=max_retries,
botocore_config=config,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Bedrock_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_size,
num_output=self.max_tokens,
is_chat_model=self.model in CHAT_ONLY_MODELS,
model_name=self.model,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
self._provider.max_tokens_key: self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if not formatted:
prompt = self.completion_to_prompt(prompt)
all_kwargs = self._get_all_kwargs(**kwargs)
request_body = self._provider.get_request_body(prompt, all_kwargs)
request_body_str = json.dumps(request_body)
response = completion_with_retry(
client=self._client,
model=self.model,
request_body=request_body_str,
max_retries=self.max_retries,
**all_kwargs,
)["body"].read()
response = json.loads(response)
return CompletionResponse(
text=self._provider.get_text_from_response(response), raw=response
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if self.model in BEDROCK_FOUNDATION_LLMS and self.model not in STREAMING_MODELS:
raise ValueError(f"Model {self.model} does not support streaming")
if not formatted:
prompt = self.completion_to_prompt(prompt)
all_kwargs = self._get_all_kwargs(**kwargs)
request_body = self._provider.get_request_body(prompt, all_kwargs)
request_body_str = json.dumps(request_body)
response = completion_with_retry(
client=self._client,
model=self.model,
request_body=request_body_str,
max_retries=self.max_retries,
stream=True,
**all_kwargs,
)["body"]
def gen() -> CompletionResponseGen:
content = ""
for r in response:
r = json.loads(r["chunk"]["bytes"])
content_delta = self._provider.get_text_from_stream_response(r)
content += content_delta
yield CompletionResponse(text=content, delta=content_delta, raw=r)
return gen()
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
prompt = self.messages_to_prompt(messages)
completion_response = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response)
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
"""Chat asynchronously."""
# TODO: do synchronous chat for now
return self.chat(messages, **kwargs)
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.llms.bedrock_utils.completion_with_retry",
"llama_index.legacy.llms.generic_utils.completion_response_to_chat_response",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.core.llms.types.CompletionResponse",
"llama_index.legacy.llms.bedrock_utils.get_provider"
] | [((1084, 1145), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The modelId of the Bedrock model to use."""'}), "(description='The modelId of the Bedrock model to use.')\n", (1089, 1145), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1171, 1228), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (1176, 1228), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1251, 1313), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1256, 1313), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1338, 1396), 'llama_index.legacy.bridge.pydantic.Field', 'Field', (['"""The maximum number of tokens available for input."""'], {}), "('The maximum number of tokens available for input.')\n", (1343, 1396), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1431, 1541), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The name of aws profile to use. If not given, then the default profile is used."""'}), "(description=\n 'The name of aws profile to use. If not given, then the default profile is used.'\n )\n", (1436, 1541), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1585, 1644), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Access Key ID to use"""', 'exclude': '(True)'}), "(description='AWS Access Key ID to use', exclude=True)\n", (1590, 1644), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1702, 1765), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Secret Access Key to use"""', 'exclude': '(True)'}), "(description='AWS Secret Access Key to use', exclude=True)\n", (1707, 1765), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1819, 1878), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS Session Token to use"""', 'exclude': '(True)'}), "(description='AWS Session Token to use', exclude=True)\n", (1824, 1878), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1926, 2041), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""AWS region name to use. Uses region configured in AWS CLI if not passed"""', 'exclude': '(True)'}), "(description=\n 'AWS region name to use. Uses region configured in AWS CLI if not passed',\n exclude=True)\n", (1931, 2041), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2094, 2202), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Use this Botocore session instead of creating a new default one."""', 'exclude': '(True)'}), "(description=\n 'Use this Botocore session instead of creating a new default one.',\n exclude=True)\n", (2099, 2202), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2254, 2370), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Custom configuration object to use instead of the default generated one."""', 'exclude': '(True)'}), "(description=\n 'Custom configuration object to use instead of the default generated one.',\n exclude=True)\n", (2259, 2370), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2408, 2481), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(10)', 'description': '"""The maximum number of API retries."""', 'gt': '(0)'}), "(default=10, description='The maximum number of API retries.', gt=0)\n", (2413, 2481), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2517, 2665), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(60.0)', 'description': '"""The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts."""'}), "(default=60.0, description=\n 'The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.'\n )\n", (2522, 2665), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2719, 2821), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the bedrock invokeModel request."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the bedrock invokeModel request.')\n", (2724, 2821), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2860, 2873), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2871, 2873), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2894, 2907), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2905, 2907), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((2934, 2947), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2945, 2947), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((7807, 7832), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7830, 7832), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((8617, 8642), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (8640, 8642), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((9840, 9859), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (9857, 9859), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6106, 6125), 'llama_index.legacy.llms.bedrock_utils.get_provider', 'get_provider', (['model'], {}), '(model)\n', (6118, 6125), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((7158, 7304), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_size', 'num_output': 'self.max_tokens', 'is_chat_model': '(self.model in CHAT_ONLY_MODELS)', 'model_name': 'self.model'}), '(context_window=self.context_size, num_output=self.max_tokens,\n is_chat_model=self.model in CHAT_ONLY_MODELS, model_name=self.model)\n', (7169, 7304), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((8181, 8205), 'json.dumps', 'json.dumps', (['request_body'], {}), '(request_body)\n', (8191, 8205), False, 'import json\n'), ((8466, 8486), 'json.loads', 'json.loads', (['response'], {}), '(response)\n', (8476, 8486), False, 'import json\n'), ((9171, 9195), 'json.dumps', 'json.dumps', (['request_body'], {}), '(request_body)\n', (9181, 9195), False, 'import json\n'), ((10088, 10145), 'llama_index.legacy.llms.generic_utils.completion_response_to_chat_response', 'completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (10124, 10145), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((10406, 10470), 'llama_index.legacy.llms.generic_utils.stream_completion_response_to_chat_response', 'stream_completion_response_to_chat_response', (['completion_response'], {}), '(completion_response)\n', (10449, 10470), False, 'from llama_index.legacy.llms.generic_utils import completion_response_to_chat_response, stream_completion_response_to_chat_response\n'), ((5180, 5211), 'boto3.Session', 'boto3.Session', ([], {}), '(**session_kwargs)\n', (5193, 5211), False, 'import boto3\n'), ((5991, 6010), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (6006, 6010), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((9215, 9368), 'llama_index.legacy.llms.bedrock_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'model': 'self.model', 'request_body': 'request_body_str', 'max_retries': 'self.max_retries', 'stream': '(True)'}), '(client=self._client, model=self.model, request_body=\n request_body_str, max_retries=self.max_retries, stream=True, **all_kwargs)\n', (9236, 9368), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((4872, 4988), 'botocore.config.Config', 'Config', ([], {'retries': "{'max_attempts': max_retries, 'mode': 'standard'}", 'connect_timeout': 'timeout', 'read_timeout': 'timeout'}), "(retries={'max_attempts': max_retries, 'mode': 'standard'},\n connect_timeout=timeout, read_timeout=timeout)\n", (4878, 4988), False, 'from botocore.config import Config\n'), ((9576, 9607), 'json.loads', 'json.loads', (["r['chunk']['bytes']"], {}), "(r['chunk']['bytes'])\n", (9586, 9607), False, 'import json\n'), ((8225, 8365), 'llama_index.legacy.llms.bedrock_utils.completion_with_retry', 'completion_with_retry', ([], {'client': 'self._client', 'model': 'self.model', 'request_body': 'request_body_str', 'max_retries': 'self.max_retries'}), '(client=self._client, model=self.model, request_body=\n request_body_str, max_retries=self.max_retries, **all_kwargs)\n', (8246, 8365), False, 'from llama_index.legacy.llms.bedrock_utils import BEDROCK_FOUNDATION_LLMS, CHAT_ONLY_MODELS, STREAMING_MODELS, Provider, completion_with_retry, get_provider\n'), ((9751, 9811), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'content', 'delta': 'content_delta', 'raw': 'r'}), '(text=content, delta=content_delta, raw=r)\n', (9769, 9811), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"BraintrustCodaHelpDeskDataset", "./braintrust_codahdd"
)
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 344), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""BraintrustCodaHelpDeskDataset"""', '"""./braintrust_codahdd"""'], {}), "('BraintrustCodaHelpDeskDataset', './braintrust_codahdd')\n", (287, 344), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((403, 455), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (434, 455), False, 'from llama_index.core import VectorStoreIndex\n'), ((548, 603), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (567, 603), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1454, 1478), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1476, 1478), False, 'import asyncio\n')] |
from typing import Any, Dict, Optional
from llama_index.legacy.bridge.pydantic import Field
from llama_index.legacy.constants import (
DEFAULT_NUM_OUTPUTS,
DEFAULT_TEMPERATURE,
)
from llama_index.legacy.core.llms.types import LLMMetadata
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
from llama_index.legacy.llms.openai_like import OpenAILike
DEFAULT_API_BASE = "https://router.neutrinoapp.com/api/llm-router"
DEFAULT_ROUTER = "default"
MAX_CONTEXT_WINDOW = 200000
class Neutrino(OpenAILike):
model: str = Field(
description="The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."
)
context_window: int = Field(
default=MAX_CONTEXT_WINDOW,
description="The maximum number of context tokens for the model. Defaults to the largest supported model (Claude).",
gt=0,
)
is_chat_model: bool = Field(
default=True,
description=LLMMetadata.__fields__["is_chat_model"].field_info.description,
)
def __init__(
self,
model: Optional[str] = None,
router: str = DEFAULT_ROUTER,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 5,
api_base: Optional[str] = DEFAULT_API_BASE,
api_key: Optional[str] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_base = get_from_param_or_env("api_base", api_base, "NEUTRINO_API_BASE")
api_key = get_from_param_or_env("api_key", api_key, "NEUTRINO_API_KEY")
model = model or router
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=api_base,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
**kwargs,
)
@classmethod
def class_name(cls) -> str:
return "Neutrino_LLM"
| [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.bridge.pydantic.Field"
] | [((548, 659), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Neutrino router to use. See https://docs.neutrinoapp.com/router for details."""'}), "(description=\n 'The Neutrino router to use. See https://docs.neutrinoapp.com/router for details.'\n )\n", (553, 659), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((690, 856), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'MAX_CONTEXT_WINDOW', 'description': '"""The maximum number of context tokens for the model. Defaults to the largest supported model (Claude)."""', 'gt': '(0)'}), "(default=MAX_CONTEXT_WINDOW, description=\n 'The maximum number of context tokens for the model. Defaults to the largest supported model (Claude).'\n , gt=0)\n", (695, 856), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((904, 1004), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(True)', 'description': "LLMMetadata.__fields__['is_chat_model'].field_info.description"}), "(default=True, description=LLMMetadata.__fields__['is_chat_model'].\n field_info.description)\n", (909, 1004), False, 'from llama_index.legacy.bridge.pydantic import Field\n'), ((1519, 1583), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_base"""', 'api_base', '"""NEUTRINO_API_BASE"""'], {}), "('api_base', api_base, 'NEUTRINO_API_BASE')\n", (1540, 1583), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1602, 1663), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""NEUTRINO_API_KEY"""'], {}), "('api_key', api_key, 'NEUTRINO_API_KEY')\n", (1623, 1663), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n')] |
"""Tree-based index."""
from enum import Enum
from typing import Any, Dict, Optional, Sequence, Union
from llama_index.core.base.base_retriever import BaseRetriever
from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.data_structs.data_structs import IndexGraph
from llama_index.core.indices.base import BaseIndex
from llama_index.core.indices.common_tree.base import GPTTreeIndexBuilder
from llama_index.core.indices.tree.inserter import TreeIndexInserter
from llama_index.core.llms.llm import LLM
from llama_index.core.prompts import BasePromptTemplate
from llama_index.core.prompts.default_prompts import (
DEFAULT_INSERT_PROMPT,
DEFAULT_SUMMARY_PROMPT,
)
from llama_index.core.schema import BaseNode, IndexNode
from llama_index.core.service_context import ServiceContext
from llama_index.core.settings import (
Settings,
embed_model_from_settings_or_context,
llm_from_settings_or_context,
)
from llama_index.core.storage.docstore.types import RefDocInfo
class TreeRetrieverMode(str, Enum):
SELECT_LEAF = "select_leaf"
SELECT_LEAF_EMBEDDING = "select_leaf_embedding"
ALL_LEAF = "all_leaf"
ROOT = "root"
REQUIRE_TREE_MODES = {
TreeRetrieverMode.SELECT_LEAF,
TreeRetrieverMode.SELECT_LEAF_EMBEDDING,
TreeRetrieverMode.ROOT,
}
class TreeIndex(BaseIndex[IndexGraph]):
"""Tree Index.
The tree index is a tree-structured index, where each node is a summary of
the children nodes. During index construction, the tree is constructed
in a bottoms-up fashion until we end up with a set of root_nodes.
There are a few different options during query time (see :ref:`Ref-Query`).
The main option is to traverse down the tree from the root nodes.
A secondary answer is to directly synthesize the answer from the root nodes.
Args:
summary_template (Optional[BasePromptTemplate]): A Summarization Prompt
(see :ref:`Prompt-Templates`).
insert_prompt (Optional[BasePromptTemplate]): An Tree Insertion Prompt
(see :ref:`Prompt-Templates`).
num_children (int): The number of children each node should have.
build_tree (bool): Whether to build the tree during index construction.
show_progress (bool): Whether to show progress bars. Defaults to False.
"""
index_struct_cls = IndexGraph
def __init__(
self,
nodes: Optional[Sequence[BaseNode]] = None,
objects: Optional[Sequence[IndexNode]] = None,
index_struct: Optional[IndexGraph] = None,
llm: Optional[LLM] = None,
summary_template: Optional[BasePromptTemplate] = None,
insert_prompt: Optional[BasePromptTemplate] = None,
num_children: int = 10,
build_tree: bool = True,
use_async: bool = False,
show_progress: bool = False,
# deprecated
service_context: Optional[ServiceContext] = None,
**kwargs: Any,
) -> None:
"""Initialize params."""
# need to set parameters before building index in base class.
self.num_children = num_children
self.summary_template = summary_template or DEFAULT_SUMMARY_PROMPT
self.insert_prompt: BasePromptTemplate = insert_prompt or DEFAULT_INSERT_PROMPT
self.build_tree = build_tree
self._use_async = use_async
self._llm = llm or llm_from_settings_or_context(Settings, service_context)
super().__init__(
nodes=nodes,
index_struct=index_struct,
service_context=service_context,
show_progress=show_progress,
objects=objects,
**kwargs,
)
def as_retriever(
self,
retriever_mode: Union[str, TreeRetrieverMode] = TreeRetrieverMode.SELECT_LEAF,
embed_model: Optional[BaseEmbedding] = None,
**kwargs: Any,
) -> BaseRetriever:
# NOTE: lazy import
from llama_index.core.indices.tree.all_leaf_retriever import (
TreeAllLeafRetriever,
)
from llama_index.core.indices.tree.select_leaf_embedding_retriever import (
TreeSelectLeafEmbeddingRetriever,
)
from llama_index.core.indices.tree.select_leaf_retriever import (
TreeSelectLeafRetriever,
)
from llama_index.core.indices.tree.tree_root_retriever import (
TreeRootRetriever,
)
self._validate_build_tree_required(TreeRetrieverMode(retriever_mode))
if retriever_mode == TreeRetrieverMode.SELECT_LEAF:
return TreeSelectLeafRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.SELECT_LEAF_EMBEDDING:
embed_model = embed_model or embed_model_from_settings_or_context(
Settings, self._service_context
)
return TreeSelectLeafEmbeddingRetriever(
self, embed_model=embed_model, object_map=self._object_map, **kwargs
)
elif retriever_mode == TreeRetrieverMode.ROOT:
return TreeRootRetriever(self, object_map=self._object_map, **kwargs)
elif retriever_mode == TreeRetrieverMode.ALL_LEAF:
return TreeAllLeafRetriever(self, object_map=self._object_map, **kwargs)
else:
raise ValueError(f"Unknown retriever mode: {retriever_mode}")
def _validate_build_tree_required(self, retriever_mode: TreeRetrieverMode) -> None:
"""Check if index supports modes that require trees."""
if retriever_mode in REQUIRE_TREE_MODES and not self.build_tree:
raise ValueError(
"Index was constructed without building trees, "
f"but retriever mode {retriever_mode} requires trees."
)
def _build_index_from_nodes(self, nodes: Sequence[BaseNode]) -> IndexGraph:
"""Build the index from nodes."""
index_builder = GPTTreeIndexBuilder(
self.num_children,
self.summary_template,
service_context=self.service_context,
llm=self._llm,
use_async=self._use_async,
show_progress=self._show_progress,
docstore=self._docstore,
)
return index_builder.build_from_nodes(nodes, build_tree=self.build_tree)
def _insert(self, nodes: Sequence[BaseNode], **insert_kwargs: Any) -> None:
"""Insert a document."""
# TODO: allow to customize insert prompt
inserter = TreeIndexInserter(
self.index_struct,
service_context=self.service_context,
llm=self._llm,
num_children=self.num_children,
insert_prompt=self.insert_prompt,
summary_prompt=self.summary_template,
docstore=self._docstore,
)
inserter.insert(nodes)
def _delete_node(self, node_id: str, **delete_kwargs: Any) -> None:
"""Delete a node."""
raise NotImplementedError("Delete not implemented for tree index.")
@property
def ref_doc_info(self) -> Dict[str, RefDocInfo]:
"""Retrieve a dict mapping of ingested documents and their nodes+metadata."""
node_doc_ids = list(self.index_struct.all_nodes.values())
nodes = self.docstore.get_nodes(node_doc_ids)
all_ref_doc_info = {}
for node in nodes:
ref_node = node.source_node
if not ref_node:
continue
ref_doc_info = self.docstore.get_ref_doc_info(ref_node.node_id)
if not ref_doc_info:
continue
all_ref_doc_info[ref_node.node_id] = ref_doc_info
return all_ref_doc_info
# legacy
GPTTreeIndex = TreeIndex
| [
"llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever",
"llama_index.core.settings.embed_model_from_settings_or_context",
"llama_index.core.indices.tree.inserter.TreeIndexInserter",
"llama_index.core.settings.llm_from_settings_or_context",
"llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever",
"llama_index.core.indices.tree.all_leaf_retriever.TreeAllLeafRetriever",
"llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder",
"llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever"
] | [((5992, 6202), 'llama_index.core.indices.common_tree.base.GPTTreeIndexBuilder', 'GPTTreeIndexBuilder', (['self.num_children', 'self.summary_template'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'use_async': 'self._use_async', 'show_progress': 'self._show_progress', 'docstore': 'self._docstore'}), '(self.num_children, self.summary_template,\n service_context=self.service_context, llm=self._llm, use_async=self.\n _use_async, show_progress=self._show_progress, docstore=self._docstore)\n', (6011, 6202), False, 'from llama_index.core.indices.common_tree.base import GPTTreeIndexBuilder\n'), ((6552, 6784), 'llama_index.core.indices.tree.inserter.TreeIndexInserter', 'TreeIndexInserter', (['self.index_struct'], {'service_context': 'self.service_context', 'llm': 'self._llm', 'num_children': 'self.num_children', 'insert_prompt': 'self.insert_prompt', 'summary_prompt': 'self.summary_template', 'docstore': 'self._docstore'}), '(self.index_struct, service_context=self.service_context,\n llm=self._llm, num_children=self.num_children, insert_prompt=self.\n insert_prompt, summary_prompt=self.summary_template, docstore=self.\n _docstore)\n', (6569, 6784), False, 'from llama_index.core.indices.tree.inserter import TreeIndexInserter\n'), ((3443, 3498), 'llama_index.core.settings.llm_from_settings_or_context', 'llm_from_settings_or_context', (['Settings', 'service_context'], {}), '(Settings, service_context)\n', (3471, 3498), False, 'from llama_index.core.settings import Settings, embed_model_from_settings_or_context, llm_from_settings_or_context\n'), ((4636, 4704), 'llama_index.core.indices.tree.select_leaf_retriever.TreeSelectLeafRetriever', 'TreeSelectLeafRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (4659, 4704), False, 'from llama_index.core.indices.tree.select_leaf_retriever import TreeSelectLeafRetriever\n'), ((4937, 5044), 'llama_index.core.indices.tree.select_leaf_embedding_retriever.TreeSelectLeafEmbeddingRetriever', 'TreeSelectLeafEmbeddingRetriever', (['self'], {'embed_model': 'embed_model', 'object_map': 'self._object_map'}), '(self, embed_model=embed_model, object_map=\n self._object_map, **kwargs)\n', (4969, 5044), False, 'from llama_index.core.indices.tree.select_leaf_embedding_retriever import TreeSelectLeafEmbeddingRetriever\n'), ((4818, 4887), 'llama_index.core.settings.embed_model_from_settings_or_context', 'embed_model_from_settings_or_context', (['Settings', 'self._service_context'], {}), '(Settings, self._service_context)\n', (4854, 4887), False, 'from llama_index.core.settings import Settings, embed_model_from_settings_or_context, llm_from_settings_or_context\n'), ((5144, 5206), 'llama_index.core.indices.tree.tree_root_retriever.TreeRootRetriever', 'TreeRootRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (5161, 5206), False, 'from llama_index.core.indices.tree.tree_root_retriever import TreeRootRetriever\n'), ((5285, 5350), 'llama_index.core.indices.tree.all_leaf_retriever.TreeAllLeafRetriever', 'TreeAllLeafRetriever', (['self'], {'object_map': 'self._object_map'}), '(self, object_map=self._object_map, **kwargs)\n', (5305, 5350), False, 'from llama_index.core.indices.tree.all_leaf_retriever import TreeAllLeafRetriever\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseAsyncGen,
ChatResponseGen,
CompletionResponse,
CompletionResponseAsyncGen,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import (
llm_chat_callback,
llm_completion_callback,
)
from llama_index.legacy.llms.llm import LLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
EXAMPLE_URL = "https://clarifai.com/anthropic/completion/models/claude-v2"
class Clarifai(LLM):
model_url: Optional[str] = Field(
description=f"Full URL of the model. e.g. `{EXAMPLE_URL}`"
)
model_version_id: Optional[str] = Field(description="Model Version ID.")
app_id: Optional[str] = Field(description="Clarifai application ID of the model.")
user_id: Optional[str] = Field(description="Clarifai user ID of the model.")
pat: Optional[str] = Field(
description="Personal Access Tokens(PAT) to validate requests."
)
_model: Any = PrivateAttr()
_is_chat_model: bool = PrivateAttr()
def __init__(
self,
model_name: Optional[str] = None,
model_url: Optional[str] = None,
model_version_id: Optional[str] = "",
app_id: Optional[str] = None,
user_id: Optional[str] = None,
pat: Optional[str] = None,
temperature: float = 0.1,
max_tokens: int = 512,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
):
try:
import os
from clarifai.client.model import Model
except ImportError:
raise ImportError("ClarifaiLLM requires `pip install clarifai`.")
if pat is None and os.environ.get("CLARIFAI_PAT") is not None:
pat = os.environ.get("CLARIFAI_PAT")
if not pat and os.environ.get("CLARIFAI_PAT") is None:
raise ValueError(
"Set `CLARIFAI_PAT` as env variable or pass `pat` as constructor argument"
)
if model_url is not None and model_name is not None:
raise ValueError("You can only specify one of model_url or model_name.")
if model_url is None and model_name is None:
raise ValueError("You must specify one of model_url or model_name.")
if model_name is not None:
if app_id is None or user_id is None:
raise ValueError(
f"Missing one app ID or user ID of the model: {app_id=}, {user_id=}"
)
else:
self._model = Model(
user_id=user_id,
app_id=app_id,
model_id=model_name,
model_version={"id": model_version_id},
pat=pat,
)
if model_url is not None:
self._model = Model(model_url, pat=pat)
model_name = self._model.id
self._is_chat_model = False
if "chat" in self._model.app_id or "chat" in self._model.id:
self._is_chat_model = True
additional_kwargs = additional_kwargs or {}
super().__init__(
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
model_name=model_name,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "ClarifaiLLM"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens,
model_name=self._model,
is_chat_model=self._is_chat_model,
)
# TODO: When the Clarifai python SDK supports inference params, add here.
def chat(
self,
messages: Sequence[ChatMessage],
inference_params: Optional[Dict] = {},
**kwargs: Any,
) -> ChatResponse:
"""Chat endpoint for LLM."""
prompt = "".join([str(m) for m in messages])
try:
response = (
self._model.predict_by_bytes(
input_bytes=prompt.encode(encoding="UTF-8"),
input_type="text",
inference_params=inference_params,
)
.outputs[0]
.data.text.raw
)
except Exception as e:
raise Exception(f"Prediction failed: {e}")
return ChatResponse(message=ChatMessage(content=response))
def complete(
self,
prompt: str,
formatted: bool = False,
inference_params: Optional[Dict] = {},
**kwargs: Any,
) -> CompletionResponse:
"""Completion endpoint for LLM."""
try:
response = (
self._model.predict_by_bytes(
input_bytes=prompt.encode(encoding="utf-8"),
input_type="text",
inference_params=inference_params,
)
.outputs[0]
.data.text.raw
)
except Exception as e:
raise Exception(f"Prediction failed: {e}")
return CompletionResponse(text=response)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError(
"Clarifai does not currently support streaming completion."
)
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError(
"Clarifai does not currently support streaming completion."
)
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
raise NotImplementedError("Currently not supported.")
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError("Currently not supported.")
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError("Clarifai does not currently support this function.")
| [
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatMessage",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((762, 827), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': 'f"""Full URL of the model. e.g. `{EXAMPLE_URL}`"""'}), "(description=f'Full URL of the model. e.g. `{EXAMPLE_URL}`')\n", (767, 827), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((880, 918), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Model Version ID."""'}), "(description='Model Version ID.')\n", (885, 918), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((947, 1005), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Clarifai application ID of the model."""'}), "(description='Clarifai application ID of the model.')\n", (952, 1005), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1035, 1086), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Clarifai user ID of the model."""'}), "(description='Clarifai user ID of the model.')\n", (1040, 1086), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1112, 1182), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""Personal Access Tokens(PAT) to validate requests."""'}), "(description='Personal Access Tokens(PAT) to validate requests.')\n", (1117, 1182), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1216, 1229), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1227, 1229), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1257, 1270), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1268, 1270), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((6542, 6561), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6559, 6561), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6735, 6760), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (6758, 6760), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((6934, 6953), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (6951, 6953), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((7142, 7167), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (7165, 7167), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4359, 4497), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': 'self.context_window', 'num_output': 'self.max_tokens', 'model_name': 'self._model', 'is_chat_model': 'self._is_chat_model'}), '(context_window=self.context_window, num_output=self.max_tokens,\n model_name=self._model, is_chat_model=self._is_chat_model)\n', (4370, 4497), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((6035, 6068), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response'}), '(text=response)\n', (6053, 6068), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n'), ((2360, 2390), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2374, 2390), False, 'import os\n'), ((3434, 3459), 'clarifai.client.model.Model', 'Model', (['model_url'], {'pat': 'pat'}), '(model_url, pat=pat)\n', (3439, 3459), False, 'from clarifai.client.model import Model\n'), ((2298, 2328), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2312, 2328), False, 'import os\n'), ((2415, 2445), 'os.environ.get', 'os.environ.get', (['"""CLARIFAI_PAT"""'], {}), "('CLARIFAI_PAT')\n", (2429, 2445), False, 'import os\n'), ((3146, 3258), 'clarifai.client.model.Model', 'Model', ([], {'user_id': 'user_id', 'app_id': 'app_id', 'model_id': 'model_name', 'model_version': "{'id': model_version_id}", 'pat': 'pat'}), "(user_id=user_id, app_id=app_id, model_id=model_name, model_version={\n 'id': model_version_id}, pat=pat)\n", (3151, 3258), False, 'from clarifai.client.model import Model\n'), ((5340, 5369), 'llama_index.legacy.core.llms.types.ChatMessage', 'ChatMessage', ([], {'content': 'response'}), '(content=response)\n', (5351, 5369), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseAsyncGen, ChatResponseGen, CompletionResponse, CompletionResponseAsyncGen, CompletionResponseGen, LLMMetadata\n')] |
"""PII postprocessor."""
import json
from copy import deepcopy
from typing import Callable, Dict, List, Optional, Tuple
from llama_index.core.llms.llm import LLM
from llama_index.core.postprocessor.types import BaseNodePostprocessor
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle
DEFAULT_PII_TMPL = (
"The current context information is provided. \n"
"A task is also provided to mask the PII within the context. \n"
"Return the text, with all PII masked out, and a mapping of the original PII "
"to the masked PII. \n"
"Return the output of the task in JSON. \n"
"Context:\n"
"Hello Zhang Wei, I am John. "
"Your AnyCompany Financial Services, "
"LLC credit card account 1111-0000-1111-0008 "
"has a minimum payment of $24.53 that is due "
"by July 31st. Based on your autopay settings, we will withdraw your payment. "
"Task: Mask out the PII, replace each PII with a tag, and return the text. Return the mapping in JSON. \n"
"Output: \n"
"Hello [NAME1], I am [NAME2]. "
"Your AnyCompany Financial Services, "
"LLC credit card account [CREDIT_CARD_NUMBER] "
"has a minimum payment of $24.53 that is due "
"by [DATE_TIME]. Based on your autopay settings, we will withdraw your payment. "
"Output Mapping:\n"
'{{"NAME1": "Zhang Wei", "NAME2": "John", "CREDIT_CARD_NUMBER": "1111-0000-1111-0008", "DATE_TIME": "July 31st"}}\n'
"Context:\n{context_str}\n"
"Task: {query_str}\n"
"Output: \n"
""
)
class PIINodePostprocessor(BaseNodePostprocessor):
"""PII Node processor.
NOTE: this is a beta feature, the API might change.
Args:
llm (LLM): The local LLM to use for prediction.
"""
llm: LLM
pii_str_tmpl: str = DEFAULT_PII_TMPL
pii_node_info_key: str = "__pii_node_info__"
@classmethod
def class_name(cls) -> str:
return "PIINodePostprocessor"
def mask_pii(self, text: str) -> Tuple[str, Dict]:
"""Mask PII in text."""
pii_prompt = PromptTemplate(self.pii_str_tmpl)
# TODO: allow customization
task_str = (
"Mask out the PII, replace each PII with a tag, and return the text. "
"Return the mapping in JSON."
)
response = self.llm.predict(pii_prompt, context_str=text, query_str=task_str)
splits = response.split("Output Mapping:")
text_output = splits[0].strip()
json_str_output = splits[1].strip()
json_dict = json.loads(json_str_output)
return text_output, json_dict
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text, mapping_info = self.mask_pii(
node.get_content(metadata_mode=MetadataMode.LLM)
)
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = mapping_info
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
class NERPIINodePostprocessor(BaseNodePostprocessor):
"""NER PII Node processor.
Uses a HF transformers model.
"""
pii_node_info_key: str = "__pii_node_info__"
@classmethod
def class_name(cls) -> str:
return "NERPIINodePostprocessor"
def mask_pii(self, ner: Callable, text: str) -> Tuple[str, Dict]:
"""Mask PII in text."""
new_text = text
response = ner(text)
mapping = {}
for entry in response:
entity_group_tag = f"[{entry['entity_group']}_{entry['start']}]"
new_text = new_text.replace(entry["word"], entity_group_tag).strip()
mapping[entity_group_tag] = entry["word"]
return new_text, mapping
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
from transformers import pipeline # pants: no-infer-dep
ner = pipeline("ner", grouped_entities=True)
# swap out text from nodes, with the original node mappings
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
new_text, mapping_info = self.mask_pii(
ner, node.get_content(metadata_mode=MetadataMode.LLM)
)
new_node = deepcopy(node)
new_node.excluded_embed_metadata_keys.append(self.pii_node_info_key)
new_node.excluded_llm_metadata_keys.append(self.pii_node_info_key)
new_node.metadata[self.pii_node_info_key] = mapping_info
new_node.set_content(new_text)
new_nodes.append(NodeWithScore(node=new_node, score=node_with_score.score))
return new_nodes
| [
"llama_index.core.prompts.base.PromptTemplate",
"llama_index.core.schema.NodeWithScore"
] | [((2092, 2125), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', (['self.pii_str_tmpl'], {}), '(self.pii_str_tmpl)\n', (2106, 2125), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((2560, 2587), 'json.loads', 'json.loads', (['json_str_output'], {}), '(json_str_output)\n', (2570, 2587), False, 'import json\n'), ((4543, 4581), 'transformers.pipeline', 'pipeline', (['"""ner"""'], {'grouped_entities': '(True)'}), "('ner', grouped_entities=True)\n", (4551, 4581), False, 'from transformers import pipeline\n'), ((3143, 3157), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (3151, 3157), False, 'from copy import deepcopy\n'), ((4911, 4925), 'copy.deepcopy', 'deepcopy', (['node'], {}), '(node)\n', (4919, 4925), False, 'from copy import deepcopy\n'), ((3459, 3516), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (3472, 3516), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle\n'), ((5227, 5284), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (5240, 5284), False, 'from llama_index.core.schema import MetadataMode, NodeWithScore, QueryBundle\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS, DEFAULT_TEMPERATURE
from llama_index.legacy.core.llms.types import ChatMessage, LLMMetadata
from llama_index.legacy.llms.everlyai_utils import everlyai_modelname_to_contextsize
from llama_index.legacy.llms.generic_utils import get_from_param_or_env
from llama_index.legacy.llms.openai import OpenAI
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
EVERLYAI_API_BASE = "https://everlyai.xyz/hosted"
DEFAULT_MODEL = "meta-llama/Llama-2-7b-chat-hf"
class EverlyAI(OpenAI):
def __init__(
self,
model: str = DEFAULT_MODEL,
temperature: float = DEFAULT_TEMPERATURE,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
max_retries: int = 10,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
api_key = get_from_param_or_env("api_key", api_key, "EverlyAI_API_KEY")
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
api_base=EVERLYAI_API_BASE,
api_key=api_key,
additional_kwargs=additional_kwargs,
max_retries=max_retries,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "EverlyAI_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=everlyai_modelname_to_contextsize(self.model),
num_output=self.max_tokens,
is_chat_model=True,
model_name=self.model,
)
@property
def _is_chat_model(self) -> bool:
return True
| [
"llama_index.legacy.llms.generic_utils.get_from_param_or_env",
"llama_index.legacy.callbacks.CallbackManager",
"llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize"
] | [((1525, 1586), 'llama_index.legacy.llms.generic_utils.get_from_param_or_env', 'get_from_param_or_env', (['"""api_key"""', 'api_key', '"""EverlyAI_API_KEY"""'], {}), "('api_key', api_key, 'EverlyAI_API_KEY')\n", (1546, 1586), False, 'from llama_index.legacy.llms.generic_utils import get_from_param_or_env\n'), ((1486, 1505), 'llama_index.legacy.callbacks.CallbackManager', 'CallbackManager', (['[]'], {}), '([])\n', (1501, 1505), False, 'from llama_index.legacy.callbacks import CallbackManager\n'), ((2357, 2402), 'llama_index.legacy.llms.everlyai_utils.everlyai_modelname_to_contextsize', 'everlyai_modelname_to_contextsize', (['self.model'], {}), '(self.model)\n', (2390, 2402), False, 'from llama_index.legacy.llms.everlyai_utils import everlyai_modelname_to_contextsize\n')] |
"""txtai reader."""
from typing import Any, Dict, List
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class TxtaiReader(BaseReader):
"""txtai reader.
Retrieves documents through an existing in-memory txtai index.
These documents can then be used in a downstream LlamaIndex data structure.
If you wish use txtai itself as an index to to organize documents,
insert documents, and perform queries on them, please use VectorStoreIndex
with TxtaiVectorStore.
Args:
txtai_index (txtai.ann.ANN): A txtai Index object (required)
"""
def __init__(self, index: Any):
"""Initialize with parameters."""
import_err_msg = """
`txtai` package not found. For instructions on
how to install `txtai` please visit
https://neuml.github.io/txtai/install/
"""
try:
import txtai # noqa
except ImportError:
raise ImportError(import_err_msg)
self._index = index
def load_data(
self,
query: np.ndarray,
id_to_text_map: Dict[str, str],
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""Load data from txtai index.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
id_to_text_map (Dict[str, str]): A map from ID's to text.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
search_result = self._index.search(query, k)
documents = []
for query_result in search_result:
for doc_id, _ in query_result:
doc_id = str(doc_id)
if doc_id not in id_to_text_map:
raise ValueError(
f"Document ID {doc_id} not found in id_to_text_map."
)
text = id_to_text_map[doc_id]
documents.append(Document(text=text))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| [
"llama_index.legacy.schema.Document"
] | [((2425, 2444), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2433, 2444), False, 'from llama_index.legacy.schema import Document\n'), ((2194, 2213), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2202, 2213), False, 'from llama_index.legacy.schema import Document\n')] |
from llama_index.core.prompts.base import PromptTemplate
from llama_index.core.prompts.prompt_type import PromptType
"""Single select prompt.
PromptTemplate to select one out of `num_choices` options provided in `context_list`,
given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `query_str`
"""
SingleSelectPrompt = PromptTemplate
"""Multiple select prompt.
PromptTemplate to select multiple candidates (up to `max_outputs`) out of `num_choices`
options provided in `context_list`, given a query `query_str`.
Required template variables: `num_chunks`, `context_list`, `query_str`,
`max_outputs`
"""
MultiSelectPrompt = PromptTemplate
# single select
DEFAULT_SINGLE_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return "
"the choice that is most relevant to the question: '{query_str}'\n"
)
DEFAULT_SINGLE_SELECT_PROMPT = PromptTemplate(
template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=PromptType.SINGLE_SELECT
)
# multiple select
DEFAULT_MULTI_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choices "
"(no more than {max_outputs}, but only select what is needed) that "
"are most relevant to the question: '{query_str}'\n"
)
DEFAULT_MULTIPLE_SELECT_PROMPT = PromptTemplate(
template=DEFAULT_MULTI_SELECT_PROMPT_TMPL, prompt_type=PromptType.MULTI_SELECT
)
# single pydantic select
DEFAULT_SINGLE_PYD_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered list "
"(1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, generate "
"the selection object and reason that is most relevant to the "
"question: '{query_str}'\n"
)
# multiple pydantic select
DEFAULT_MULTI_PYD_SELECT_PROMPT_TMPL = (
"Some choices are given below. It is provided in a numbered "
"list (1 to {num_choices}), "
"where each item in the list corresponds to a summary.\n"
"---------------------\n"
"{context_list}"
"\n---------------------\n"
"Using only the choices above and not prior knowledge, return the top choice(s) "
"(no more than {max_outputs}, but only select what is needed) by generating "
"the selection object and reasons that are most relevant to the "
"question: '{query_str}'\n"
)
| [
"llama_index.core.prompts.base.PromptTemplate"
] | [((1156, 1257), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_SINGLE_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.SINGLE_SELECT'}), '(template=DEFAULT_SINGLE_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.SINGLE_SELECT)\n', (1170, 1257), False, 'from llama_index.core.prompts.base import PromptTemplate\n'), ((1812, 1911), 'llama_index.core.prompts.base.PromptTemplate', 'PromptTemplate', ([], {'template': 'DEFAULT_MULTI_SELECT_PROMPT_TMPL', 'prompt_type': 'PromptType.MULTI_SELECT'}), '(template=DEFAULT_MULTI_SELECT_PROMPT_TMPL, prompt_type=\n PromptType.MULTI_SELECT)\n', (1826, 1911), False, 'from llama_index.core.prompts.base import PromptTemplate\n')] |
"""Awadb reader."""
from typing import Any, List
import numpy as np
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class AwadbReader(BaseReader):
"""Awadb reader.
Retrieves documents through an existing awadb client.
These documents can then be used in a downstream LlamaIndex data structure.
Args:
client (awadb.client): An awadb client.
"""
def __init__(self, client: Any):
"""Initialize with parameters."""
import_err_msg = """
`faiss` package not found. For instructions on
how to install `faiss` please visit
https://github.com/facebookresearch/faiss/wiki/Installing-Faiss
"""
try:
pass
except ImportError:
raise ImportError(import_err_msg)
self.awadb_client = client
def load_data(
self,
query: np.ndarray,
k: int = 4,
separate_documents: bool = True,
) -> List[Document]:
"""Load data from Faiss.
Args:
query (np.ndarray): A 2D numpy array of query vectors.
k (int): Number of nearest neighbors to retrieve. Defaults to 4.
separate_documents (Optional[bool]): Whether to return separate
documents. Defaults to True.
Returns:
List[Document]: A list of documents.
"""
results = self.awadb_client.Search(
query,
k,
text_in_page_content=None,
meta_filter=None,
not_include_fields=None,
)
documents = []
for item_detail in results[0]["ResultItems"]:
documents.append(Document(text=item_detail["embedding_text"]))
if not separate_documents:
# join all documents into one
text_list = [doc.get_content() for doc in documents]
text = "\n\n".join(text_list)
documents = [Document(text=text)]
return documents
| [
"llama_index.legacy.schema.Document"
] | [((1780, 1824), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': "item_detail['embedding_text']"}), "(text=item_detail['embedding_text'])\n", (1788, 1824), False, 'from llama_index.legacy.schema import Document\n'), ((2042, 2061), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (2050, 2061), False, 'from llama_index.legacy.schema import Document\n')] |
"""Mongo client."""
from typing import Dict, Iterable, List, Optional, Union
from llama_index.legacy.readers.base import BaseReader
from llama_index.legacy.schema import Document
class SimpleMongoReader(BaseReader):
"""Simple mongo reader.
Concatenates each Mongo doc into Document used by LlamaIndex.
Args:
host (str): Mongo host.
port (int): Mongo port.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
uri: Optional[str] = None,
) -> None:
"""Initialize with parameters."""
try:
from pymongo import MongoClient
except ImportError as err:
raise ImportError(
"`pymongo` package not found, please run `pip install pymongo`"
) from err
client: MongoClient
if uri:
client = MongoClient(uri)
elif host and port:
client = MongoClient(host, port)
else:
raise ValueError("Either `host` and `port` or `uri` must be provided.")
self.client = client
def _flatten(self, texts: List[Union[str, List[str]]]) -> List[str]:
result = []
for text in texts:
result += text if isinstance(text, list) else [text]
return result
def lazy_load_data(
self,
db_name: str,
collection_name: str,
field_names: List[str] = ["text"],
separator: str = "",
query_dict: Optional[Dict] = None,
max_docs: int = 0,
metadata_names: Optional[List[str]] = None,
) -> Iterable[Document]:
"""Load data from the input directory.
Args:
db_name (str): name of the database.
collection_name (str): name of the collection.
field_names(List[str]): names of the fields to be concatenated.
Defaults to ["text"]
separator (str): separator to be used between fields.
Defaults to ""
query_dict (Optional[Dict]): query to filter documents. Read more
at [official docs](https://www.mongodb.com/docs/manual/reference/method/db.collection.find/#std-label-method-find-query)
Defaults to None
max_docs (int): maximum number of documents to load.
Defaults to 0 (no limit)
metadata_names (Optional[List[str]]): names of the fields to be added
to the metadata attribute of the Document. Defaults to None
Returns:
List[Document]: A list of documents.
"""
db = self.client[db_name]
cursor = db[collection_name].find(filter=query_dict or {}, limit=max_docs)
for item in cursor:
try:
texts = [item[name] for name in field_names]
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
texts = self._flatten(texts)
text = separator.join(texts)
if metadata_names is None:
yield Document(text=text)
else:
try:
metadata = {name: item[name] for name in metadata_names}
except KeyError as err:
raise ValueError(
f"{err.args[0]} field not found in Mongo document."
) from err
yield Document(text=text, metadata=metadata)
| [
"llama_index.legacy.schema.Document"
] | [((887, 903), 'pymongo.MongoClient', 'MongoClient', (['uri'], {}), '(uri)\n', (898, 903), False, 'from pymongo import MongoClient\n'), ((953, 976), 'pymongo.MongoClient', 'MongoClient', (['host', 'port'], {}), '(host, port)\n', (964, 976), False, 'from pymongo import MongoClient\n'), ((3133, 3152), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text'}), '(text=text)\n', (3141, 3152), False, 'from llama_index.legacy.schema import Document\n'), ((3476, 3514), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': 'text', 'metadata': 'metadata'}), '(text=text, metadata=metadata)\n', (3484, 3514), False, 'from llama_index.legacy.schema import Document\n')] |
from typing import Any, Callable, Optional, Sequence
from typing_extensions import override
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class _BaseGradientLLM(CustomLLM):
_gradient = PrivateAttr()
_model = PrivateAttr()
# Config
max_tokens: Optional[int] = Field(
default=DEFAULT_NUM_OUTPUTS,
description="The number of tokens to generate.",
gt=0,
lt=512,
)
# Gradient client config
access_token: Optional[str] = Field(
description="The Gradient access token to use.",
)
host: Optional[str] = Field(
description="The url of the Gradient service to access."
)
workspace_id: Optional[str] = Field(
description="The Gradient workspace id to use.",
)
is_chat_model: bool = Field(
default=False, description="Whether the model is a chat model."
)
def __init__(
self,
*,
access_token: Optional[str] = None,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
**kwargs: Any,
) -> None:
super().__init__(
max_tokens=max_tokens,
access_token=access_token,
host=host,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
**kwargs,
)
try:
from gradientai import Gradient
self._gradient = Gradient(
access_token=access_token, host=host, workspace_id=workspace_id
)
except ImportError as e:
raise ImportError(
"Could not import Gradient Python package. "
"Please install it with `pip install gradientai`."
) from e
def close(self) -> None:
self._gradient.close()
@llm_completion_callback()
@override
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return CompletionResponse(
text=self._model.complete(
query=prompt,
max_generated_token_count=self.max_tokens,
**kwargs,
).generated_output
)
@llm_completion_callback()
@override
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
grdt_reponse = await self._model.acomplete(
query=prompt,
max_generated_token_count=self.max_tokens,
**kwargs,
)
return CompletionResponse(text=grdt_reponse.generated_output)
@override
def stream_complete(
self,
prompt: str,
formatted: bool = False,
**kwargs: Any,
) -> CompletionResponseGen:
raise NotImplementedError
@property
@override
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=1024,
num_output=self.max_tokens or 20,
is_chat_model=self.is_chat_model,
is_function_calling_model=False,
model_name=self._model.id,
)
class GradientBaseModelLLM(_BaseGradientLLM):
base_model_slug: str = Field(
description="The slug of the base model to use.",
)
def __init__(
self,
*,
access_token: Optional[str] = None,
base_model_slug: str,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
) -> None:
super().__init__(
access_token=access_token,
base_model_slug=base_model_slug,
host=host,
max_tokens=max_tokens,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
)
self._model = self._gradient.get_base_model(
base_model_slug=base_model_slug,
)
class GradientModelAdapterLLM(_BaseGradientLLM):
model_adapter_id: str = Field(
description="The id of the model adapter to use.",
)
def __init__(
self,
*,
access_token: Optional[str] = None,
host: Optional[str] = None,
max_tokens: Optional[int] = None,
model_adapter_id: str,
workspace_id: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
is_chat_model: bool = False,
) -> None:
super().__init__(
access_token=access_token,
host=host,
max_tokens=max_tokens,
model_adapter_id=model_adapter_id,
workspace_id=workspace_id,
callback_manager=callback_manager,
is_chat_model=is_chat_model,
)
self._model = self._gradient.get_model_adapter(
model_adapter_id=model_adapter_id
)
| [
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.core.llms.types.CompletionResponse"
] | [((660, 673), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (671, 673), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((687, 700), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (698, 700), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((747, 849), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""The number of tokens to generate."""', 'gt': '(0)', 'lt': '(512)'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'The number of tokens to generate.', gt=0, lt=512)\n", (752, 849), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((948, 1002), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Gradient access token to use."""'}), "(description='The Gradient access token to use.')\n", (953, 1002), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1044, 1107), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The url of the Gradient service to access."""'}), "(description='The url of the Gradient service to access.')\n", (1049, 1107), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1156, 1210), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The Gradient workspace id to use."""'}), "(description='The Gradient workspace id to use.')\n", (1161, 1210), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1252, 1322), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Whether the model is a chat model."""'}), "(default=False, description='Whether the model is a chat model.')\n", (1257, 1322), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3019, 3044), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3042, 3044), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((3408, 3433), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3431, 3433), False, 'from llama_index.legacy.llms.base import llm_completion_callback\n'), ((4391, 4446), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The slug of the base model to use."""'}), "(description='The slug of the base model to use.')\n", (4396, 4446), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((5307, 5363), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The id of the model adapter to use."""'}), "(description='The id of the model adapter to use.')\n", (5312, 5363), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3749, 3803), 'llama_index.legacy.core.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'grdt_reponse.generated_output'}), '(text=grdt_reponse.generated_output)\n', (3767, 3803), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((4084, 4252), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': '(1024)', 'num_output': '(self.max_tokens or 20)', 'is_chat_model': 'self.is_chat_model', 'is_function_calling_model': '(False)', 'model_name': 'self._model.id'}), '(context_window=1024, num_output=self.max_tokens or 20,\n is_chat_model=self.is_chat_model, is_function_calling_model=False,\n model_name=self._model.id)\n', (4095, 4252), False, 'from llama_index.legacy.core.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2635, 2708), 'gradientai.Gradient', 'Gradient', ([], {'access_token': 'access_token', 'host': 'host', 'workspace_id': 'workspace_id'}), '(access_token=access_token, host=host, workspace_id=workspace_id)\n', (2643, 2708), False, 'from gradientai import Gradient\n')] |
import asyncio
import logging
import queue
import time
from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
from threading import Event
from typing import AsyncGenerator, Generator, List, Optional, Union
from llama_index.core.base.llms.types import (
ChatMessage,
ChatResponseAsyncGen,
ChatResponseGen,
)
from llama_index.core.base.response.schema import Response, StreamingResponse
from llama_index.core.memory import BaseMemory
from llama_index.core.schema import NodeWithScore
from llama_index.core.tools import ToolOutput
from llama_index.core.instrumentation.events.chat_engine import (
StreamChatErrorEvent,
StreamChatEndEvent,
StreamChatStartEvent,
StreamChatDeltaReceivedEvent,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
logger = logging.getLogger(__name__)
logger.setLevel(logging.WARNING)
def is_function(message: ChatMessage) -> bool:
"""Utility for ChatMessage responses from OpenAI models."""
return "tool_calls" in message.additional_kwargs
class ChatResponseMode(str, Enum):
"""Flag toggling waiting/streaming in `Agent._chat`."""
WAIT = "wait"
STREAM = "stream"
@dataclass
class AgentChatResponse:
"""Agent chat response."""
response: str = ""
sources: List[ToolOutput] = field(default_factory=list)
source_nodes: List[NodeWithScore] = field(default_factory=list)
def __post_init__(self) -> None:
if self.sources and not self.source_nodes:
for tool_output in self.sources:
if isinstance(tool_output.raw_output, (Response, StreamingResponse)):
self.source_nodes.extend(tool_output.raw_output.source_nodes)
def __str__(self) -> str:
return self.response
@dataclass
class StreamingAgentChatResponse:
"""Streaming chat response to user and writing to chat history."""
response: str = ""
sources: List[ToolOutput] = field(default_factory=list)
chat_stream: Optional[ChatResponseGen] = None
achat_stream: Optional[ChatResponseAsyncGen] = None
source_nodes: List[NodeWithScore] = field(default_factory=list)
_unformatted_response: str = ""
_queue: queue.Queue = field(default_factory=queue.Queue)
_aqueue: asyncio.Queue = field(default_factory=asyncio.Queue)
# flag when chat message is a function call
_is_function: Optional[bool] = None
# flag when processing done
_is_done = False
# signal when a new item is added to the queue
_new_item_event: asyncio.Event = field(default_factory=asyncio.Event)
# NOTE: async code uses two events rather than one since it yields
# control when waiting for queue item
# signal when the OpenAI functions stop executing
_is_function_false_event: asyncio.Event = field(default_factory=asyncio.Event)
# signal when an OpenAI function is being executed
_is_function_not_none_thread_event: Event = field(default_factory=Event)
def __post_init__(self) -> None:
if self.sources and not self.source_nodes:
for tool_output in self.sources:
if isinstance(tool_output.raw_output, (Response, StreamingResponse)):
self.source_nodes.extend(tool_output.raw_output.source_nodes)
def __str__(self) -> str:
if self._is_done and not self._queue.empty() and not self._is_function:
while self._queue.queue:
delta = self._queue.queue.popleft()
self._unformatted_response += delta
self.response = self._unformatted_response.strip()
return self.response
def put_in_queue(self, delta: Optional[str]) -> None:
self._queue.put_nowait(delta)
self._is_function_not_none_thread_event.set()
def aput_in_queue(self, delta: Optional[str]) -> None:
self._aqueue.put_nowait(delta)
self._new_item_event.set()
@dispatcher.span
def write_response_to_history(
self,
memory: BaseMemory,
on_stream_end_fn: Optional[callable] = None,
raise_error: bool = False,
) -> None:
if self.chat_stream is None:
raise ValueError(
"chat_stream is None. Cannot write to history without chat_stream."
)
# try/except to prevent hanging on error
dispatcher.event(StreamChatStartEvent())
try:
final_text = ""
for chat in self.chat_stream:
self._is_function = is_function(chat.message)
if chat.delta:
dispatcher.event(StreamChatDeltaReceivedEvent(delta=chat.delta))
self.put_in_queue(chat.delta)
final_text += chat.delta or ""
if self._is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text.strip() # final message
memory.put(chat.message)
except Exception as e:
dispatcher.event(StreamChatErrorEvent())
if not raise_error:
logger.warning(
f"Encountered exception writing response to history: {e}"
)
else:
raise
dispatcher.event(StreamChatEndEvent())
self._is_done = True
# This act as is_done events for any consumers waiting
self._is_function_not_none_thread_event.set()
if on_stream_end_fn is not None and not self._is_function:
on_stream_end_fn()
@dispatcher.span
async def awrite_response_to_history(
self,
memory: BaseMemory,
on_stream_end_fn: Optional[callable] = None,
) -> None:
if self.achat_stream is None:
raise ValueError(
"achat_stream is None. Cannot asynchronously write to "
"history without achat_stream."
)
# try/except to prevent hanging on error
dispatcher.event(StreamChatStartEvent())
try:
final_text = ""
async for chat in self.achat_stream:
self._is_function = is_function(chat.message)
if chat.delta:
dispatcher.event(StreamChatDeltaReceivedEvent(delta=chat.delta))
self.aput_in_queue(chat.delta)
final_text += chat.delta or ""
self._new_item_event.set()
if self._is_function is False:
self._is_function_false_event.set()
if self._is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text.strip() # final message
memory.put(chat.message)
except Exception as e:
dispatcher.event(StreamChatErrorEvent())
logger.warning(f"Encountered exception writing response to history: {e}")
dispatcher.event(StreamChatEndEvent())
self._is_done = True
# These act as is_done events for any consumers waiting
self._is_function_false_event.set()
self._new_item_event.set()
if on_stream_end_fn is not None and not self._is_function:
on_stream_end_fn()
@property
def response_gen(self) -> Generator[str, None, None]:
while not self._is_done or not self._queue.empty():
try:
delta = self._queue.get(block=False)
self._unformatted_response += delta
yield delta
except queue.Empty:
# Queue is empty, but we're not done yet. Sleep for 0 secs to release the GIL and allow other threads to run.
time.sleep(0)
self.response = self._unformatted_response.strip()
async def async_response_gen(self) -> AsyncGenerator[str, None]:
while True:
if not self._aqueue.empty() or not self._is_done:
try:
delta = await asyncio.wait_for(self._aqueue.get(), timeout=0.1)
except asyncio.TimeoutError:
if self._is_done:
break
continue
if delta is not None:
self._unformatted_response += delta
yield delta
else:
break
self.response = self._unformatted_response.strip()
def print_response_stream(self) -> None:
for token in self.response_gen:
print(token, end="", flush=True)
async def aprint_response_stream(self) -> None:
async for token in self.async_response_gen():
print(token, end="", flush=True)
AGENT_CHAT_RESPONSE_TYPE = Union[AgentChatResponse, StreamingAgentChatResponse]
class BaseChatEngine(ABC):
"""Base Chat Engine."""
@abstractmethod
def reset(self) -> None:
"""Reset conversation state."""
@abstractmethod
def chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Main chat interface."""
@abstractmethod
def stream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
"""Stream chat interface."""
@abstractmethod
async def achat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> AGENT_CHAT_RESPONSE_TYPE:
"""Async version of main chat interface."""
@abstractmethod
async def astream_chat(
self, message: str, chat_history: Optional[List[ChatMessage]] = None
) -> StreamingAgentChatResponse:
"""Async version of main chat interface."""
def chat_repl(self) -> None:
"""Enter interactive chat REPL."""
print("===== Entering Chat REPL =====")
print('Type "exit" to exit.\n')
self.reset()
message = input("Human: ")
while message != "exit":
response = self.chat(message)
print(f"Assistant: {response}\n")
message = input("Human: ")
def streaming_chat_repl(self) -> None:
"""Enter interactive chat REPL with streaming responses."""
print("===== Entering Chat REPL =====")
print('Type "exit" to exit.\n')
self.reset()
message = input("Human: ")
while message != "exit":
response = self.stream_chat(message)
print("Assistant: ", end="", flush=True)
response.print_response_stream()
print("\n")
message = input("Human: ")
@property
@abstractmethod
def chat_history(self) -> List[ChatMessage]:
pass
class ChatMode(str, Enum):
"""Chat Engine Modes."""
SIMPLE = "simple"
"""Corresponds to `SimpleChatEngine`.
Chat with LLM, without making use of a knowledge base.
"""
CONDENSE_QUESTION = "condense_question"
"""Corresponds to `CondenseQuestionChatEngine`.
First generate a standalone question from conversation context and last message,
then query the query engine for a response.
"""
CONTEXT = "context"
"""Corresponds to `ContextChatEngine`.
First retrieve text from the index using the user's message, then use the context
in the system prompt to generate a response.
"""
CONDENSE_PLUS_CONTEXT = "condense_plus_context"
"""Corresponds to `CondensePlusContextChatEngine`.
First condense a conversation and latest user message to a standalone question.
Then build a context for the standalone question from a retriever,
Then pass the context along with prompt and user message to LLM to generate a response.
"""
REACT = "react"
"""Corresponds to `ReActAgent`.
Use a ReAct agent loop with query engine tools.
"""
OPENAI = "openai"
"""Corresponds to `OpenAIAgent`.
Use an OpenAI function calling agent loop.
NOTE: only works with OpenAI models that support function calling API.
"""
BEST = "best"
"""Select the best chat engine based on the current LLM.
Corresponds to `OpenAIAgent` if using an OpenAI model that supports
function calling API, otherwise, corresponds to `ReActAgent`.
"""
| [
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.instrumentation.events.chat_engine.StreamChatStartEvent",
"llama_index.core.instrumentation.events.chat_engine.StreamChatDeltaReceivedEvent",
"llama_index.core.instrumentation.events.chat_engine.StreamChatErrorEvent",
"llama_index.core.instrumentation.events.chat_engine.StreamChatEndEvent"
] | [((831, 866), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (856, 866), True, 'import llama_index.core.instrumentation as instrument\n'), ((877, 904), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (894, 904), False, 'import logging\n'), ((1367, 1394), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1372, 1394), False, 'from dataclasses import dataclass, field\n'), ((1435, 1462), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1440, 1462), False, 'from dataclasses import dataclass, field\n'), ((1999, 2026), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2004, 2026), False, 'from dataclasses import dataclass, field\n'), ((2173, 2200), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (2178, 2200), False, 'from dataclasses import dataclass, field\n'), ((2263, 2297), 'dataclasses.field', 'field', ([], {'default_factory': 'queue.Queue'}), '(default_factory=queue.Queue)\n', (2268, 2297), False, 'from dataclasses import dataclass, field\n'), ((2327, 2363), 'dataclasses.field', 'field', ([], {'default_factory': 'asyncio.Queue'}), '(default_factory=asyncio.Queue)\n', (2332, 2363), False, 'from dataclasses import dataclass, field\n'), ((2593, 2629), 'dataclasses.field', 'field', ([], {'default_factory': 'asyncio.Event'}), '(default_factory=asyncio.Event)\n', (2598, 2629), False, 'from dataclasses import dataclass, field\n'), ((2843, 2879), 'dataclasses.field', 'field', ([], {'default_factory': 'asyncio.Event'}), '(default_factory=asyncio.Event)\n', (2848, 2879), False, 'from dataclasses import dataclass, field\n'), ((2983, 3011), 'dataclasses.field', 'field', ([], {'default_factory': 'Event'}), '(default_factory=Event)\n', (2988, 3011), False, 'from dataclasses import dataclass, field\n'), ((4385, 4407), 'llama_index.core.instrumentation.events.chat_engine.StreamChatStartEvent', 'StreamChatStartEvent', ([], {}), '()\n', (4405, 4407), False, 'from llama_index.core.instrumentation.events.chat_engine import StreamChatErrorEvent, StreamChatEndEvent, StreamChatStartEvent, StreamChatDeltaReceivedEvent\n'), ((5435, 5455), 'llama_index.core.instrumentation.events.chat_engine.StreamChatEndEvent', 'StreamChatEndEvent', ([], {}), '()\n', (5453, 5455), False, 'from llama_index.core.instrumentation.events.chat_engine import StreamChatErrorEvent, StreamChatEndEvent, StreamChatStartEvent, StreamChatDeltaReceivedEvent\n'), ((6154, 6176), 'llama_index.core.instrumentation.events.chat_engine.StreamChatStartEvent', 'StreamChatStartEvent', ([], {}), '()\n', (6174, 6176), False, 'from llama_index.core.instrumentation.events.chat_engine import StreamChatErrorEvent, StreamChatEndEvent, StreamChatStartEvent, StreamChatDeltaReceivedEvent\n'), ((7244, 7264), 'llama_index.core.instrumentation.events.chat_engine.StreamChatEndEvent', 'StreamChatEndEvent', ([], {}), '()\n', (7262, 7264), False, 'from llama_index.core.instrumentation.events.chat_engine import StreamChatErrorEvent, StreamChatEndEvent, StreamChatStartEvent, StreamChatDeltaReceivedEvent\n'), ((5186, 5208), 'llama_index.core.instrumentation.events.chat_engine.StreamChatErrorEvent', 'StreamChatErrorEvent', ([], {}), '()\n', (5206, 5208), False, 'from llama_index.core.instrumentation.events.chat_engine import StreamChatErrorEvent, StreamChatEndEvent, StreamChatStartEvent, StreamChatDeltaReceivedEvent\n'), ((7109, 7131), 'llama_index.core.instrumentation.events.chat_engine.StreamChatErrorEvent', 'StreamChatErrorEvent', ([], {}), '()\n', (7129, 7131), False, 'from llama_index.core.instrumentation.events.chat_engine import StreamChatErrorEvent, StreamChatEndEvent, StreamChatStartEvent, StreamChatDeltaReceivedEvent\n'), ((7994, 8007), 'time.sleep', 'time.sleep', (['(0)'], {}), '(0)\n', (8004, 8007), False, 'import time\n'), ((4622, 4668), 'llama_index.core.instrumentation.events.chat_engine.StreamChatDeltaReceivedEvent', 'StreamChatDeltaReceivedEvent', ([], {'delta': 'chat.delta'}), '(delta=chat.delta)\n', (4650, 4668), False, 'from llama_index.core.instrumentation.events.chat_engine import StreamChatErrorEvent, StreamChatEndEvent, StreamChatStartEvent, StreamChatDeltaReceivedEvent\n'), ((6398, 6444), 'llama_index.core.instrumentation.events.chat_engine.StreamChatDeltaReceivedEvent', 'StreamChatDeltaReceivedEvent', ([], {'delta': 'chat.delta'}), '(delta=chat.delta)\n', (6426, 6444), False, 'from llama_index.core.instrumentation.events.chat_engine import StreamChatErrorEvent, StreamChatEndEvent, StreamChatStartEvent, StreamChatDeltaReceivedEvent\n')] |
from typing import Dict, Type
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.embeddings.mock_embed_model import MockEmbedding
RECOGNIZED_EMBEDDINGS: Dict[str, Type[BaseEmbedding]] = {
MockEmbedding.class_name(): MockEmbedding,
}
# conditionals for llama-cloud support
try:
from llama_index.embeddings.openai import OpenAIEmbedding # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[OpenAIEmbedding.class_name()] = OpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.azure_openai import (
AzureOpenAIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[AzureOpenAIEmbedding.class_name()] = AzureOpenAIEmbedding
except ImportError:
pass
try:
from llama_index.embeddings.huggingface import (
HuggingFaceInferenceAPIEmbedding,
) # pants: no-infer-dep
RECOGNIZED_EMBEDDINGS[
HuggingFaceInferenceAPIEmbedding.class_name()
] = HuggingFaceInferenceAPIEmbedding
except ImportError:
pass
def load_embed_model(data: dict) -> BaseEmbedding:
"""Load Embedding by name."""
if isinstance(data, BaseEmbedding):
return data
name = data.get("class_name", None)
if name is None:
raise ValueError("Embedding loading requires a class_name")
if name not in RECOGNIZED_EMBEDDINGS:
raise ValueError(f"Invalid Embedding name: {name}")
return RECOGNIZED_EMBEDDINGS[name].from_dict(data)
| [
"llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name",
"llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name",
"llama_index.embeddings.openai.OpenAIEmbedding.class_name"
] | [((229, 255), 'llama_index.core.embeddings.mock_embed_model.MockEmbedding.class_name', 'MockEmbedding.class_name', ([], {}), '()\n', (253, 255), False, 'from llama_index.core.embeddings.mock_embed_model import MockEmbedding\n'), ((431, 459), 'llama_index.embeddings.openai.OpenAIEmbedding.class_name', 'OpenAIEmbedding.class_name', ([], {}), '()\n', (457, 459), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((654, 687), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding.class_name', 'AzureOpenAIEmbedding.class_name', ([], {}), '()\n', (685, 687), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n'), ((907, 952), 'llama_index.embeddings.huggingface.HuggingFaceInferenceAPIEmbedding.class_name', 'HuggingFaceInferenceAPIEmbedding.class_name', ([], {}), '()\n', (950, 952), False, 'from llama_index.embeddings.huggingface import HuggingFaceInferenceAPIEmbedding\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core.evaluation import CorrectnessEvaluator
from llama_index.llms import OpenAI, Gemini
from llama_index.core import ServiceContext
import pandas as pd
async def main():
# DOWNLOAD LLAMADATASET
evaluator_dataset, _ = download_llama_dataset(
"MiniMtBenchSingleGradingDataset", "./mini_mt_bench_data"
)
# DEFINE EVALUATORS
gpt_4_context = ServiceContext.from_defaults(
llm=OpenAI(temperature=0, model="gpt-4"),
)
gpt_3p5_context = ServiceContext.from_defaults(
llm=OpenAI(temperature=0, model="gpt-3.5-turbo"),
)
gemini_pro_context = ServiceContext.from_defaults(
llm=Gemini(model="models/gemini-pro", temperature=0)
)
evaluators = {
"gpt-4": CorrectnessEvaluator(service_context=gpt_4_context),
"gpt-3.5": CorrectnessEvaluator(service_context=gpt_3p5_context),
"gemini-pro": CorrectnessEvaluator(service_context=gemini_pro_context),
}
# EVALUATE WITH PACK
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
EvaluatorBenchmarkerPack = download_llama_pack("EvaluatorBenchmarkerPack", "./pack")
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gpt-3.5"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gpt_3p5_benchmark_df = await evaluator_benchmarker.arun(
batch_size=100, sleep_time_in_seconds=0
)
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gpt-4"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gpt_4_benchmark_df = await evaluator_benchmarker.arun(
batch_size=100, sleep_time_in_seconds=0
)
evaluator_benchmarker = EvaluatorBenchmarkerPack(
evaluator=evaluators["gemini-pro"],
eval_dataset=evaluator_dataset,
show_progress=True,
)
gemini_pro_benchmark_df = await evaluator_benchmarker.arun(
batch_size=5, sleep_time_in_seconds=0.5
)
benchmark_df = pd.concat(
[
gpt_3p5_benchmark_df,
gpt_4_benchmark_df,
gemini_pro_benchmark_df,
],
axis=0,
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.llama_pack.download_llama_pack",
"llama_index.core.evaluation.CorrectnessEvaluator",
"llama_index.llms.Gemini",
"llama_index.llms.OpenAI",
"llama_index.core.llama_dataset.download_llama_dataset"
] | [((386, 471), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""MiniMtBenchSingleGradingDataset"""', '"""./mini_mt_bench_data"""'], {}), "('MiniMtBenchSingleGradingDataset',\n './mini_mt_bench_data')\n", (408, 471), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((1646, 1703), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""EvaluatorBenchmarkerPack"""', '"""./pack"""'], {}), "('EvaluatorBenchmarkerPack', './pack')\n", (1665, 1703), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((2580, 2670), 'pandas.concat', 'pd.concat', (['[gpt_3p5_benchmark_df, gpt_4_benchmark_df, gemini_pro_benchmark_df]'], {'axis': '(0)'}), '([gpt_3p5_benchmark_df, gpt_4_benchmark_df,\n gemini_pro_benchmark_df], axis=0)\n', (2589, 2670), True, 'import pandas as pd\n'), ((2801, 2825), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2823, 2825), False, 'import asyncio\n'), ((890, 941), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gpt_4_context'}), '(service_context=gpt_4_context)\n', (910, 941), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((962, 1015), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gpt_3p5_context'}), '(service_context=gpt_3p5_context)\n', (982, 1015), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((1039, 1095), 'llama_index.core.evaluation.CorrectnessEvaluator', 'CorrectnessEvaluator', ([], {'service_context': 'gemini_pro_context'}), '(service_context=gemini_pro_context)\n', (1059, 1095), False, 'from llama_index.core.evaluation import CorrectnessEvaluator\n'), ((569, 605), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (575, 605), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((678, 722), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-3.5-turbo"""'}), "(temperature=0, model='gpt-3.5-turbo')\n", (684, 722), False, 'from llama_index.llms import OpenAI, Gemini\n'), ((798, 846), 'llama_index.llms.Gemini', 'Gemini', ([], {'model': '"""models/gemini-pro"""', 'temperature': '(0)'}), "(model='models/gemini-pro', temperature=0)\n", (804, 846), False, 'from llama_index.llms import OpenAI, Gemini\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset(
"PaulGrahamEssayDataset", "./paul_graham"
)
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack_stuff")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=20, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 330), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""PaulGrahamEssayDataset"""', '"""./paul_graham"""'], {}), "('PaulGrahamEssayDataset', './paul_graham')\n", (287, 330), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((389, 441), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (420, 441), False, 'from llama_index.core import VectorStoreIndex\n'), ((534, 589), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack_stuff"""'], {}), "('RagEvaluatorPack', './pack_stuff')\n", (553, 589), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1440, 1464), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1462, 1464), False, 'import asyncio\n')] |
from typing import TYPE_CHECKING, Any, Optional
from llama_index.core.base.base_query_engine import BaseQueryEngine
if TYPE_CHECKING:
from llama_index.core.langchain_helpers.agents.tools import (
LlamaIndexTool,
)
from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput
DEFAULT_NAME = "query_engine_tool"
DEFAULT_DESCRIPTION = """Useful for running a natural language query
against a knowledge base and get back a natural language response.
"""
class QueryEngineTool(AsyncBaseTool):
"""Query engine tool.
A tool making use of a query engine.
Args:
query_engine (BaseQueryEngine): A query engine.
metadata (ToolMetadata): The associated metadata of the query engine.
"""
def __init__(
self,
query_engine: BaseQueryEngine,
metadata: ToolMetadata,
resolve_input_errors: bool = True,
) -> None:
self._query_engine = query_engine
self._metadata = metadata
self._resolve_input_errors = resolve_input_errors
@classmethod
def from_defaults(
cls,
query_engine: BaseQueryEngine,
name: Optional[str] = None,
description: Optional[str] = None,
resolve_input_errors: bool = True,
) -> "QueryEngineTool":
name = name or DEFAULT_NAME
description = description or DEFAULT_DESCRIPTION
metadata = ToolMetadata(name=name, description=description)
return cls(
query_engine=query_engine,
metadata=metadata,
resolve_input_errors=resolve_input_errors,
)
@property
def query_engine(self) -> BaseQueryEngine:
return self._query_engine
@property
def metadata(self) -> ToolMetadata:
return self._metadata
def call(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError(
"Cannot call query engine without specifying `input` parameter."
)
response = self._query_engine.query(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
async def acall(self, *args: Any, **kwargs: Any) -> ToolOutput:
if args is not None and len(args) > 0:
query_str = str(args[0])
elif kwargs is not None and "input" in kwargs:
# NOTE: this assumes our default function schema of `input`
query_str = kwargs["input"]
elif kwargs is not None and self._resolve_input_errors:
query_str = str(kwargs)
else:
raise ValueError("Cannot call query engine without inputs")
response = await self._query_engine.aquery(query_str)
return ToolOutput(
content=str(response),
tool_name=self.metadata.name,
raw_input={"input": query_str},
raw_output=response,
)
def as_langchain_tool(self) -> "LlamaIndexTool":
from llama_index.core.langchain_helpers.agents.tools import (
IndexToolConfig,
LlamaIndexTool,
)
tool_config = IndexToolConfig(
query_engine=self.query_engine,
name=self.metadata.name,
description=self.metadata.description,
)
return LlamaIndexTool.from_tool_config(tool_config=tool_config)
| [
"llama_index.core.langchain_helpers.agents.tools.IndexToolConfig",
"llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config",
"llama_index.core.tools.types.ToolMetadata"
] | [((1402, 1450), 'llama_index.core.tools.types.ToolMetadata', 'ToolMetadata', ([], {'name': 'name', 'description': 'description'}), '(name=name, description=description)\n', (1414, 1450), False, 'from llama_index.core.tools.types import AsyncBaseTool, ToolMetadata, ToolOutput\n'), ((3560, 3675), 'llama_index.core.langchain_helpers.agents.tools.IndexToolConfig', 'IndexToolConfig', ([], {'query_engine': 'self.query_engine', 'name': 'self.metadata.name', 'description': 'self.metadata.description'}), '(query_engine=self.query_engine, name=self.metadata.name,\n description=self.metadata.description)\n', (3575, 3675), False, 'from llama_index.core.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n'), ((3734, 3790), 'llama_index.core.langchain_helpers.agents.tools.LlamaIndexTool.from_tool_config', 'LlamaIndexTool.from_tool_config', ([], {'tool_config': 'tool_config'}), '(tool_config=tool_config)\n', (3765, 3790), False, 'from llama_index.core.langchain_helpers.agents.tools import IndexToolConfig, LlamaIndexTool\n')] |
from typing import Any, Callable, Dict, Optional, Sequence
from llama_index.legacy.bridge.pydantic import Field, PrivateAttr
from llama_index.legacy.callbacks import CallbackManager
from llama_index.legacy.constants import DEFAULT_NUM_OUTPUTS
from llama_index.legacy.core.llms.types import (
ChatMessage,
ChatResponse,
ChatResponseGen,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback
from llama_index.legacy.llms.custom import CustomLLM
from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator
from llama_index.legacy.llms.openai_utils import (
from_openai_message_dict,
to_openai_message_dicts,
)
from llama_index.legacy.types import BaseOutputParser, PydanticProgramMode
class LlamaAPI(CustomLLM):
model: str = Field(description="The llama-api model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
max_tokens: int = Field(description="The maximum number of tokens to generate.")
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs for the llama-api API."
)
_client: Any = PrivateAttr()
def __init__(
self,
model: str = "llama-13b-chat",
temperature: float = 0.1,
max_tokens: int = DEFAULT_NUM_OUTPUTS,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
) -> None:
try:
from llamaapi import LlamaAPI as Client
except ImportError as e:
raise ImportError(
"llama_api not installed."
"Please install it with `pip install llamaapi`."
) from e
self._client = Client(api_key)
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs or {},
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
@classmethod
def class_name(cls) -> str:
return "llama_api_llm"
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"model": self.model,
"temperature": self.temperature,
"max_length": self.max_tokens,
}
return {
**base_kwargs,
**self.additional_kwargs,
}
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=4096,
num_output=DEFAULT_NUM_OUTPUTS,
is_chat_model=True,
is_function_calling_model=True,
model_name="llama-api",
)
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
message_dicts = to_openai_message_dicts(messages)
json_dict = {
"messages": message_dicts,
**self._model_kwargs,
**kwargs,
}
response = self._client.run(json_dict).json()
message_dict = response["choices"][0]["message"]
message = from_openai_message_dict(message_dict)
return ChatResponse(message=message, raw=response)
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
complete_fn = chat_to_completion_decorator(self.chat)
return complete_fn(prompt, **kwargs)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError("stream_complete is not supported for LlamaAPI")
@llm_chat_callback()
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError("stream_chat is not supported for LlamaAPI")
| [
"llama_index.legacy.llms.openai_utils.from_openai_message_dict",
"llama_index.legacy.llms.base.llm_chat_callback",
"llama_index.legacy.core.llms.types.ChatResponse",
"llama_index.legacy.llms.base.llm_completion_callback",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.core.llms.types.LLMMetadata",
"llama_index.legacy.bridge.pydantic.Field",
"llama_index.legacy.llms.generic_utils.chat_to_completion_decorator",
"llama_index.legacy.llms.openai_utils.to_openai_message_dicts"
] | [((868, 916), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The llama-api model to use."""'}), "(description='The llama-api model to use.')\n", (873, 916), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((942, 999), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The temperature to use for sampling."""'}), "(description='The temperature to use for sampling.')\n", (947, 999), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1022, 1084), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'description': '"""The maximum number of tokens to generate."""'}), "(description='The maximum number of tokens to generate.')\n", (1027, 1084), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1125, 1213), 'llama_index.legacy.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict', 'description': '"""Additional kwargs for the llama-api API."""'}), "(default_factory=dict, description=\n 'Additional kwargs for the llama-api API.')\n", (1130, 1213), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((1243, 1256), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (1254, 1256), False, 'from llama_index.legacy.bridge.pydantic import Field, PrivateAttr\n'), ((3379, 3398), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (3396, 3398), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((3902, 3927), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (3925, 3927), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4154, 4179), 'llama_index.legacy.llms.base.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (4177, 4179), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((4392, 4411), 'llama_index.legacy.llms.base.llm_chat_callback', 'llm_chat_callback', ([], {}), '()\n', (4409, 4411), False, 'from llama_index.legacy.llms.base import llm_chat_callback, llm_completion_callback\n'), ((2205, 2220), 'llamaapi.LlamaAPI', 'Client', (['api_key'], {}), '(api_key)\n', (2211, 2220), True, 'from llamaapi import LlamaAPI as Client\n'), ((3161, 3305), 'llama_index.legacy.core.llms.types.LLMMetadata', 'LLMMetadata', ([], {'context_window': '(4096)', 'num_output': 'DEFAULT_NUM_OUTPUTS', 'is_chat_model': '(True)', 'is_function_calling_model': '(True)', 'model_name': '"""llama-api"""'}), "(context_window=4096, num_output=DEFAULT_NUM_OUTPUTS,\n is_chat_model=True, is_function_calling_model=True, model_name='llama-api')\n", (3172, 3305), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((3507, 3540), 'llama_index.legacy.llms.openai_utils.to_openai_message_dicts', 'to_openai_message_dicts', (['messages'], {}), '(messages)\n', (3530, 3540), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message_dict, to_openai_message_dicts\n'), ((3797, 3835), 'llama_index.legacy.llms.openai_utils.from_openai_message_dict', 'from_openai_message_dict', (['message_dict'], {}), '(message_dict)\n', (3821, 3835), False, 'from llama_index.legacy.llms.openai_utils import from_openai_message_dict, to_openai_message_dicts\n'), ((3852, 3895), 'llama_index.legacy.core.llms.types.ChatResponse', 'ChatResponse', ([], {'message': 'message', 'raw': 'response'}), '(message=message, raw=response)\n', (3864, 3895), False, 'from llama_index.legacy.core.llms.types import ChatMessage, ChatResponse, ChatResponseGen, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((4063, 4102), 'llama_index.legacy.llms.generic_utils.chat_to_completion_decorator', 'chat_to_completion_decorator', (['self.chat'], {}), '(self.chat)\n', (4091, 4102), False, 'from llama_index.legacy.llms.generic_utils import chat_to_completion_decorator\n')] |
"""Download tool from Llama Hub."""
from typing import Optional, Type
from llama_index.legacy.download.module import (
LLAMA_HUB_URL,
MODULE_TYPE,
download_llama_module,
track_download,
)
from llama_index.legacy.tools.tool_spec.base import BaseToolSpec
def download_tool(
tool_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_path: Optional[str] = None,
) -> Type[BaseToolSpec]:
"""Download a single tool from Llama Hub.
Args:
tool_class: The name of the tool class you want to download,
such as `GmailToolSpec`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_path: Custom dirpath to download loader into.
Returns:
A Loader.
"""
tool_cls = download_llama_module(
tool_class,
llama_hub_url=llama_hub_url,
refresh_cache=refresh_cache,
custom_dir="tools",
custom_path=custom_path,
library_path="tools/library.json",
)
if not issubclass(tool_cls, BaseToolSpec):
raise ValueError(f"Tool class {tool_class} must be a subclass of BaseToolSpec.")
track_download(tool_class, MODULE_TYPE.TOOL)
return tool_cls
| [
"llama_index.legacy.download.module.track_download",
"llama_index.legacy.download.module.download_llama_module"
] | [((867, 1047), 'llama_index.legacy.download.module.download_llama_module', 'download_llama_module', (['tool_class'], {'llama_hub_url': 'llama_hub_url', 'refresh_cache': 'refresh_cache', 'custom_dir': '"""tools"""', 'custom_path': 'custom_path', 'library_path': '"""tools/library.json"""'}), "(tool_class, llama_hub_url=llama_hub_url,\n refresh_cache=refresh_cache, custom_dir='tools', custom_path=\n custom_path, library_path='tools/library.json')\n", (888, 1047), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n'), ((1234, 1278), 'llama_index.legacy.download.module.track_download', 'track_download', (['tool_class', 'MODULE_TYPE.TOOL'], {}), '(tool_class, MODULE_TYPE.TOOL)\n', (1248, 1278), False, 'from llama_index.legacy.download.module import LLAMA_HUB_URL, MODULE_TYPE, download_llama_module, track_download\n')] |
import json
import os
from typing import Dict, List, Optional, Type
from loguru import logger
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentChunkWithScore,
DocumentMetadataFilter,
Query,
QueryResult,
QueryWithEmbedding,
)
from llama_index.indices.base import BaseGPTIndex
from llama_index.indices.vector_store.base import GPTVectorStoreIndex
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import Response
from llama_index.data_structs.node_v2 import Node, DocumentRelationship, NodeWithScore
from llama_index.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.data_structs.struct_type import IndexStructType
from llama_index.indices.response.builder import ResponseMode
INDEX_STRUCT_TYPE_STR = os.environ.get(
"LLAMA_INDEX_TYPE", IndexStructType.SIMPLE_DICT.value
)
INDEX_JSON_PATH = os.environ.get("LLAMA_INDEX_JSON_PATH", None)
QUERY_KWARGS_JSON_PATH = os.environ.get("LLAMA_QUERY_KWARGS_JSON_PATH", None)
RESPONSE_MODE = os.environ.get("LLAMA_RESPONSE_MODE", ResponseMode.NO_TEXT.value)
EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES = [
IndexStructType.DICT,
IndexStructType.WEAVIATE,
IndexStructType.PINECONE,
IndexStructType.QDRANT,
IndexStructType.CHROMA,
IndexStructType.VECTOR_STORE,
]
def _create_or_load_index(
index_type_str: Optional[str] = None,
index_json_path: Optional[str] = None,
index_type_to_index_cls: Optional[dict[str, Type[BaseGPTIndex]]] = None,
) -> BaseGPTIndex:
"""Create or load index from json path."""
index_json_path = index_json_path or INDEX_JSON_PATH
index_type_to_index_cls = (
index_type_to_index_cls or INDEX_STRUCT_TYPE_TO_INDEX_CLASS
)
index_type_str = index_type_str or INDEX_STRUCT_TYPE_STR
index_type = IndexStructType(index_type_str)
if index_type not in index_type_to_index_cls:
raise ValueError(f"Unknown index type: {index_type}")
if index_type in EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES:
raise ValueError("Please use vector store directly.")
index_cls = index_type_to_index_cls[index_type]
if index_json_path is None:
return index_cls(nodes=[]) # Create empty index
else:
return index_cls.load_from_disk(index_json_path) # Load index from disk
def _create_or_load_query_kwargs(
query_kwargs_json_path: Optional[str] = None,
) -> Optional[dict]:
"""Create or load query kwargs from json path."""
query_kwargs_json_path = query_kwargs_json_path or QUERY_KWARGS_JSON_PATH
query_kargs: Optional[dict] = None
if query_kwargs_json_path is not None:
with open(INDEX_JSON_PATH, "r") as f:
query_kargs = json.load(f)
return query_kargs
def _doc_chunk_to_node(doc_chunk: DocumentChunk, source_doc_id: str) -> Node:
"""Convert document chunk to Node"""
return Node(
doc_id=doc_chunk.id,
text=doc_chunk.text,
embedding=doc_chunk.embedding,
extra_info=doc_chunk.metadata.dict(),
relationships={DocumentRelationship.SOURCE: source_doc_id},
)
def _query_with_embedding_to_query_bundle(query: QueryWithEmbedding) -> QueryBundle:
return QueryBundle(
query_str=query.query,
embedding=query.embedding,
)
def _source_node_to_doc_chunk_with_score(
node_with_score: NodeWithScore,
) -> DocumentChunkWithScore:
node = node_with_score.node
if node.extra_info is not None:
metadata = DocumentChunkMetadata(**node.extra_info)
else:
metadata = DocumentChunkMetadata()
return DocumentChunkWithScore(
id=node.doc_id,
text=node.text,
score=node_with_score.score if node_with_score.score is not None else 1.0,
metadata=metadata,
)
def _response_to_query_result(
response: Response, query: QueryWithEmbedding
) -> QueryResult:
results = [
_source_node_to_doc_chunk_with_score(node) for node in response.source_nodes
]
return QueryResult(
query=query.query,
results=results,
)
class LlamaDataStore(DataStore):
def __init__(
self, index: Optional[BaseGPTIndex] = None, query_kwargs: Optional[dict] = None
):
self._index = index or _create_or_load_index()
self._query_kwargs = query_kwargs or _create_or_load_query_kwargs()
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of list of document chunks and inserts them into the database.
Return a list of document ids.
"""
doc_ids = []
for doc_id, doc_chunks in chunks.items():
logger.debug(f"Upserting {doc_id} with {len(doc_chunks)} chunks")
nodes = [
_doc_chunk_to_node(doc_chunk=doc_chunk, source_doc_id=doc_id)
for doc_chunk in doc_chunks
]
self._index.insert_nodes(nodes)
doc_ids.append(doc_id)
return doc_ids
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and
returns a list of query results with matching document chunks and scores.
"""
query_result_all = []
for query in queries:
if query.filter is not None:
logger.warning("Filters are not supported yet, ignoring for now.")
query_bundle = _query_with_embedding_to_query_bundle(query)
# Setup query kwargs
if self._query_kwargs is not None:
query_kwargs = self._query_kwargs
else:
query_kwargs = {}
# TODO: support top_k for other indices
if isinstance(self._index, GPTVectorStoreIndex):
query_kwargs["similarity_top_k"] = query.top_k
response = await self._index.aquery(
query_bundle, response_mode=RESPONSE_MODE, **query_kwargs
)
query_result = _response_to_query_result(response, query)
query_result_all.append(query_result)
return query_result_all
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Returns whether the operation was successful.
"""
if delete_all:
logger.warning("Delete all not supported yet.")
return False
if filter is not None:
logger.warning("Filters are not supported yet.")
return False
if ids is not None:
for id_ in ids:
try:
self._index.delete(id_)
except NotImplementedError:
# NOTE: some indices does not support delete yet.
logger.warning(f"{type(self._index)} does not support delete yet.")
return False
return True
| [
"llama_index.data_structs.struct_type.IndexStructType",
"llama_index.indices.query.schema.QueryBundle"
] | [((860, 929), 'os.environ.get', 'os.environ.get', (['"""LLAMA_INDEX_TYPE"""', 'IndexStructType.SIMPLE_DICT.value'], {}), "('LLAMA_INDEX_TYPE', IndexStructType.SIMPLE_DICT.value)\n", (874, 929), False, 'import os\n'), ((954, 999), 'os.environ.get', 'os.environ.get', (['"""LLAMA_INDEX_JSON_PATH"""', 'None'], {}), "('LLAMA_INDEX_JSON_PATH', None)\n", (968, 999), False, 'import os\n'), ((1025, 1077), 'os.environ.get', 'os.environ.get', (['"""LLAMA_QUERY_KWARGS_JSON_PATH"""', 'None'], {}), "('LLAMA_QUERY_KWARGS_JSON_PATH', None)\n", (1039, 1077), False, 'import os\n'), ((1094, 1159), 'os.environ.get', 'os.environ.get', (['"""LLAMA_RESPONSE_MODE"""', 'ResponseMode.NO_TEXT.value'], {}), "('LLAMA_RESPONSE_MODE', ResponseMode.NO_TEXT.value)\n", (1108, 1159), False, 'import os\n'), ((1882, 1913), 'llama_index.data_structs.struct_type.IndexStructType', 'IndexStructType', (['index_type_str'], {}), '(index_type_str)\n', (1897, 1913), False, 'from llama_index.data_structs.struct_type import IndexStructType\n'), ((3268, 3329), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query.query', 'embedding': 'query.embedding'}), '(query_str=query.query, embedding=query.embedding)\n', (3279, 3329), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((3655, 3812), 'models.models.DocumentChunkWithScore', 'DocumentChunkWithScore', ([], {'id': 'node.doc_id', 'text': 'node.text', 'score': '(node_with_score.score if node_with_score.score is not None else 1.0)', 'metadata': 'metadata'}), '(id=node.doc_id, text=node.text, score=\n node_with_score.score if node_with_score.score is not None else 1.0,\n metadata=metadata)\n', (3677, 3812), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((4062, 4109), 'models.models.QueryResult', 'QueryResult', ([], {'query': 'query.query', 'results': 'results'}), '(query=query.query, results=results)\n', (4073, 4109), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((3549, 3589), 'models.models.DocumentChunkMetadata', 'DocumentChunkMetadata', ([], {}), '(**node.extra_info)\n', (3570, 3589), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((3619, 3642), 'models.models.DocumentChunkMetadata', 'DocumentChunkMetadata', ([], {}), '()\n', (3640, 3642), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((2779, 2791), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2788, 2791), False, 'import json\n'), ((6623, 6670), 'loguru.logger.warning', 'logger.warning', (['"""Delete all not supported yet."""'], {}), "('Delete all not supported yet.')\n", (6637, 6670), False, 'from loguru import logger\n'), ((6740, 6788), 'loguru.logger.warning', 'logger.warning', (['"""Filters are not supported yet."""'], {}), "('Filters are not supported yet.')\n", (6754, 6788), False, 'from loguru import logger\n'), ((5454, 5520), 'loguru.logger.warning', 'logger.warning', (['"""Filters are not supported yet, ignoring for now."""'], {}), "('Filters are not supported yet, ignoring for now.')\n", (5468, 5520), False, 'from loguru import logger\n')] |
import logging
import os
from typing import Optional
from typing import Type
import openai
from langchain.chat_models import ChatOpenAI
from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from pydantic import BaseModel, Field
from superagi.config.config import get_config
from superagi.llms.base_llm import BaseLlm
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.tools.base_tool import BaseTool
from superagi.types.vector_store_types import VectorStoreType
from superagi.vector_store.chromadb import ChromaDB
class QueryResource(BaseModel):
"""Input for QueryResource tool."""
query: str = Field(..., description="the search query to search resources")
class QueryResourceTool(BaseTool):
"""
Read File tool
Attributes:
name : The name.
description : The description.
args_schema : The args schema.
"""
name: str = "QueryResource"
args_schema: Type[BaseModel] = QueryResource
description: str = "Tool searches resources content and extracts relevant information to perform the given task." \
"Tool is given preference over other search/read file tools for relevant data." \
"Resources content is taken from the files: {summary}"
agent_id: int = None
llm: Optional[BaseLlm] = None
def _execute(self, query: str):
openai.api_key = self.llm.get_api_key()
os.environ["OPENAI_API_KEY"] = self.llm.get_api_key()
llm_predictor_chatgpt = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=self.llm.get_model(),
openai_api_key=get_config("OPENAI_API_KEY")))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt)
vector_store_name = VectorStoreType.get_vector_store_type(
self.get_tool_config(key="RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = self.get_tool_config(key="RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
logging.info(f"vector_store_name {vector_store_name}")
logging.info(f"vector_store_index_name {vector_store_index_name}")
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
logging.info(f"vector_store {vector_store}")
as_query_engine_args = dict(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="agent_id",
value=str(self.agent_id)
)
]
)
)
if vector_store_name == VectorStoreType.CHROMA:
as_query_engine_args["chroma_collection"] = ChromaDB.create_collection(
collection_name=vector_store_index_name)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
query_engine = index.as_query_engine(
**as_query_engine_args
)
try:
response = query_engine.query(query)
except ValueError as e:
logging.error(f"ValueError {e}")
response = "Document not found"
return response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((754, 816), 'pydantic.Field', 'Field', (['...'], {'description': '"""the search query to search resources"""'}), "(..., description='the search query to search resources')\n", (759, 816), False, 'from pydantic import BaseModel, Field\n'), ((1840, 1905), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt'}), '(llm_predictor=llm_predictor_chatgpt)\n', (1868, 1905), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2173, 2227), 'logging.info', 'logging.info', (['f"""vector_store_name {vector_store_name}"""'], {}), "(f'vector_store_name {vector_store_name}')\n", (2185, 2227), False, 'import logging\n'), ((2236, 2302), 'logging.info', 'logging.info', (['f"""vector_store_index_name {vector_store_index_name}"""'], {}), "(f'vector_store_index_name {vector_store_index_name}')\n", (2248, 2302), False, 'import logging\n'), ((2421, 2465), 'logging.info', 'logging.info', (['f"""vector_store {vector_store}"""'], {}), "(f'vector_store {vector_store}')\n", (2433, 2465), False, 'import logging\n'), ((2970, 3068), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (3004, 3068), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2869, 2936), 'superagi.vector_store.chromadb.ChromaDB.create_collection', 'ChromaDB.create_collection', ([], {'collection_name': 'vector_store_index_name'}), '(collection_name=vector_store_index_name)\n', (2895, 2936), False, 'from superagi.vector_store.chromadb import ChromaDB\n'), ((2326, 2393), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (2349, 2393), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n'), ((3262, 3294), 'logging.error', 'logging.error', (['f"""ValueError {e}"""'], {}), "(f'ValueError {e}')\n", (3275, 3294), False, 'import logging\n'), ((1782, 1810), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1792, 1810), False, 'from superagi.config.config import get_config\n')] |
import logging
import os
from typing import Optional
from typing import Type
import openai
from langchain.chat_models import ChatOpenAI
from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from pydantic import BaseModel, Field
from superagi.config.config import get_config
from superagi.llms.base_llm import BaseLlm
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.tools.base_tool import BaseTool
from superagi.types.vector_store_types import VectorStoreType
from superagi.vector_store.chromadb import ChromaDB
class QueryResource(BaseModel):
"""Input for QueryResource tool."""
query: str = Field(..., description="the search query to search resources")
class QueryResourceTool(BaseTool):
"""
Read File tool
Attributes:
name : The name.
description : The description.
args_schema : The args schema.
"""
name: str = "QueryResource"
args_schema: Type[BaseModel] = QueryResource
description: str = "Tool searches resources content and extracts relevant information to perform the given task." \
"Tool is given preference over other search/read file tools for relevant data." \
"Resources content is taken from the files: {summary}"
agent_id: int = None
llm: Optional[BaseLlm] = None
def _execute(self, query: str):
openai.api_key = self.llm.get_api_key()
os.environ["OPENAI_API_KEY"] = self.llm.get_api_key()
llm_predictor_chatgpt = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=self.llm.get_model(),
openai_api_key=get_config("OPENAI_API_KEY")))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt)
vector_store_name = VectorStoreType.get_vector_store_type(
self.get_tool_config(key="RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = self.get_tool_config(key="RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
logging.info(f"vector_store_name {vector_store_name}")
logging.info(f"vector_store_index_name {vector_store_index_name}")
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
logging.info(f"vector_store {vector_store}")
as_query_engine_args = dict(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="agent_id",
value=str(self.agent_id)
)
]
)
)
if vector_store_name == VectorStoreType.CHROMA:
as_query_engine_args["chroma_collection"] = ChromaDB.create_collection(
collection_name=vector_store_index_name)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
query_engine = index.as_query_engine(
**as_query_engine_args
)
try:
response = query_engine.query(query)
except ValueError as e:
logging.error(f"ValueError {e}")
response = "Document not found"
return response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((754, 816), 'pydantic.Field', 'Field', (['...'], {'description': '"""the search query to search resources"""'}), "(..., description='the search query to search resources')\n", (759, 816), False, 'from pydantic import BaseModel, Field\n'), ((1840, 1905), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt'}), '(llm_predictor=llm_predictor_chatgpt)\n', (1868, 1905), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2173, 2227), 'logging.info', 'logging.info', (['f"""vector_store_name {vector_store_name}"""'], {}), "(f'vector_store_name {vector_store_name}')\n", (2185, 2227), False, 'import logging\n'), ((2236, 2302), 'logging.info', 'logging.info', (['f"""vector_store_index_name {vector_store_index_name}"""'], {}), "(f'vector_store_index_name {vector_store_index_name}')\n", (2248, 2302), False, 'import logging\n'), ((2421, 2465), 'logging.info', 'logging.info', (['f"""vector_store {vector_store}"""'], {}), "(f'vector_store {vector_store}')\n", (2433, 2465), False, 'import logging\n'), ((2970, 3068), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (3004, 3068), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2869, 2936), 'superagi.vector_store.chromadb.ChromaDB.create_collection', 'ChromaDB.create_collection', ([], {'collection_name': 'vector_store_index_name'}), '(collection_name=vector_store_index_name)\n', (2895, 2936), False, 'from superagi.vector_store.chromadb import ChromaDB\n'), ((2326, 2393), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (2349, 2393), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n'), ((3262, 3294), 'logging.error', 'logging.error', (['f"""ValueError {e}"""'], {}), "(f'ValueError {e}')\n", (3275, 3294), False, 'import logging\n'), ((1782, 1810), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1792, 1810), False, 'from superagi.config.config import get_config\n')] |
import logging
import os
from typing import Optional
from typing import Type
import openai
from langchain.chat_models import ChatOpenAI
from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from pydantic import BaseModel, Field
from superagi.config.config import get_config
from superagi.llms.base_llm import BaseLlm
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.tools.base_tool import BaseTool
from superagi.types.vector_store_types import VectorStoreType
from superagi.vector_store.chromadb import ChromaDB
class QueryResource(BaseModel):
"""Input for QueryResource tool."""
query: str = Field(..., description="the search query to search resources")
class QueryResourceTool(BaseTool):
"""
Read File tool
Attributes:
name : The name.
description : The description.
args_schema : The args schema.
"""
name: str = "QueryResource"
args_schema: Type[BaseModel] = QueryResource
description: str = "Tool searches resources content and extracts relevant information to perform the given task." \
"Tool is given preference over other search/read file tools for relevant data." \
"Resources content is taken from the files: {summary}"
agent_id: int = None
llm: Optional[BaseLlm] = None
def _execute(self, query: str):
openai.api_key = self.llm.get_api_key()
os.environ["OPENAI_API_KEY"] = self.llm.get_api_key()
llm_predictor_chatgpt = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=self.llm.get_model(),
openai_api_key=get_config("OPENAI_API_KEY")))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt)
vector_store_name = VectorStoreType.get_vector_store_type(
self.get_tool_config(key="RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = self.get_tool_config(key="RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
logging.info(f"vector_store_name {vector_store_name}")
logging.info(f"vector_store_index_name {vector_store_index_name}")
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
logging.info(f"vector_store {vector_store}")
as_query_engine_args = dict(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="agent_id",
value=str(self.agent_id)
)
]
)
)
if vector_store_name == VectorStoreType.CHROMA:
as_query_engine_args["chroma_collection"] = ChromaDB.create_collection(
collection_name=vector_store_index_name)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
query_engine = index.as_query_engine(
**as_query_engine_args
)
try:
response = query_engine.query(query)
except ValueError as e:
logging.error(f"ValueError {e}")
response = "Document not found"
return response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((754, 816), 'pydantic.Field', 'Field', (['...'], {'description': '"""the search query to search resources"""'}), "(..., description='the search query to search resources')\n", (759, 816), False, 'from pydantic import BaseModel, Field\n'), ((1840, 1905), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt'}), '(llm_predictor=llm_predictor_chatgpt)\n', (1868, 1905), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2173, 2227), 'logging.info', 'logging.info', (['f"""vector_store_name {vector_store_name}"""'], {}), "(f'vector_store_name {vector_store_name}')\n", (2185, 2227), False, 'import logging\n'), ((2236, 2302), 'logging.info', 'logging.info', (['f"""vector_store_index_name {vector_store_index_name}"""'], {}), "(f'vector_store_index_name {vector_store_index_name}')\n", (2248, 2302), False, 'import logging\n'), ((2421, 2465), 'logging.info', 'logging.info', (['f"""vector_store {vector_store}"""'], {}), "(f'vector_store {vector_store}')\n", (2433, 2465), False, 'import logging\n'), ((2970, 3068), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (3004, 3068), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2869, 2936), 'superagi.vector_store.chromadb.ChromaDB.create_collection', 'ChromaDB.create_collection', ([], {'collection_name': 'vector_store_index_name'}), '(collection_name=vector_store_index_name)\n', (2895, 2936), False, 'from superagi.vector_store.chromadb import ChromaDB\n'), ((2326, 2393), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (2349, 2393), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n'), ((3262, 3294), 'logging.error', 'logging.error', (['f"""ValueError {e}"""'], {}), "(f'ValueError {e}')\n", (3275, 3294), False, 'import logging\n'), ((1782, 1810), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1792, 1810), False, 'from superagi.config.config import get_config\n')] |
import logging
import os
from typing import Optional
from typing import Type
import openai
from langchain.chat_models import ChatOpenAI
from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from pydantic import BaseModel, Field
from superagi.config.config import get_config
from superagi.llms.base_llm import BaseLlm
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.tools.base_tool import BaseTool
from superagi.types.vector_store_types import VectorStoreType
from superagi.vector_store.chromadb import ChromaDB
class QueryResource(BaseModel):
"""Input for QueryResource tool."""
query: str = Field(..., description="the search query to search resources")
class QueryResourceTool(BaseTool):
"""
Read File tool
Attributes:
name : The name.
description : The description.
args_schema : The args schema.
"""
name: str = "QueryResource"
args_schema: Type[BaseModel] = QueryResource
description: str = "Tool searches resources content and extracts relevant information to perform the given task." \
"Tool is given preference over other search/read file tools for relevant data." \
"Resources content is taken from the files: {summary}"
agent_id: int = None
llm: Optional[BaseLlm] = None
def _execute(self, query: str):
openai.api_key = self.llm.get_api_key()
os.environ["OPENAI_API_KEY"] = self.llm.get_api_key()
llm_predictor_chatgpt = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=self.llm.get_model(),
openai_api_key=get_config("OPENAI_API_KEY")))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt)
vector_store_name = VectorStoreType.get_vector_store_type(
self.get_tool_config(key="RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = self.get_tool_config(key="RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
logging.info(f"vector_store_name {vector_store_name}")
logging.info(f"vector_store_index_name {vector_store_index_name}")
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
logging.info(f"vector_store {vector_store}")
as_query_engine_args = dict(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="agent_id",
value=str(self.agent_id)
)
]
)
)
if vector_store_name == VectorStoreType.CHROMA:
as_query_engine_args["chroma_collection"] = ChromaDB.create_collection(
collection_name=vector_store_index_name)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
query_engine = index.as_query_engine(
**as_query_engine_args
)
try:
response = query_engine.query(query)
except ValueError as e:
logging.error(f"ValueError {e}")
response = "Document not found"
return response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((754, 816), 'pydantic.Field', 'Field', (['...'], {'description': '"""the search query to search resources"""'}), "(..., description='the search query to search resources')\n", (759, 816), False, 'from pydantic import BaseModel, Field\n'), ((1840, 1905), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt'}), '(llm_predictor=llm_predictor_chatgpt)\n', (1868, 1905), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2173, 2227), 'logging.info', 'logging.info', (['f"""vector_store_name {vector_store_name}"""'], {}), "(f'vector_store_name {vector_store_name}')\n", (2185, 2227), False, 'import logging\n'), ((2236, 2302), 'logging.info', 'logging.info', (['f"""vector_store_index_name {vector_store_index_name}"""'], {}), "(f'vector_store_index_name {vector_store_index_name}')\n", (2248, 2302), False, 'import logging\n'), ((2421, 2465), 'logging.info', 'logging.info', (['f"""vector_store {vector_store}"""'], {}), "(f'vector_store {vector_store}')\n", (2433, 2465), False, 'import logging\n'), ((2970, 3068), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (3004, 3068), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2869, 2936), 'superagi.vector_store.chromadb.ChromaDB.create_collection', 'ChromaDB.create_collection', ([], {'collection_name': 'vector_store_index_name'}), '(collection_name=vector_store_index_name)\n', (2895, 2936), False, 'from superagi.vector_store.chromadb import ChromaDB\n'), ((2326, 2393), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (2349, 2393), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n'), ((3262, 3294), 'logging.error', 'logging.error', (['f"""ValueError {e}"""'], {}), "(f'ValueError {e}')\n", (3275, 3294), False, 'import logging\n'), ((1782, 1810), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1792, 1810), False, 'from superagi.config.config import get_config\n')] |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from typing import Any, List, Optional
from sentence_transformers import CrossEncoder
from typing import Optional, Sequence
from langchain_core.documents import Document
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from llama_index.bridge.pydantic import Field, PrivateAttr
class LangchainReranker(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
model_name_or_path: str = Field()
_model: Any = PrivateAttr()
top_n: int = Field()
device: str = Field()
max_length: int = Field()
batch_size: int = Field()
# show_progress_bar: bool = None
num_workers: int = Field()
# activation_fct = None
# apply_softmax = False
def __init__(self,
model_name_or_path: str,
top_n: int = 3,
device: str = "cuda",
max_length: int = 1024,
batch_size: int = 32,
# show_progress_bar: bool = None,
num_workers: int = 0,
# activation_fct = None,
# apply_softmax = False,
):
# self.top_n=top_n
# self.model_name_or_path=model_name_or_path
# self.device=device
# self.max_length=max_length
# self.batch_size=batch_size
# self.show_progress_bar=show_progress_bar
# self.num_workers=num_workers
# self.activation_fct=activation_fct
# self.apply_softmax=apply_softmax
self._model = CrossEncoder(model_name=model_name_or_path, max_length=1024, device=device)
super().__init__(
top_n=top_n,
model_name_or_path=model_name_or_path,
device=device,
max_length=max_length,
batch_size=batch_size,
# show_progress_bar=show_progress_bar,
num_workers=num_workers,
# activation_fct=activation_fct,
# apply_softmax=apply_softmax
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
sentence_pairs = [[query, _doc] for _doc in _docs]
results = self._model.predict(sentences=sentence_pairs,
batch_size=self.batch_size,
# show_progress_bar=self.show_progress_bar,
num_workers=self.num_workers,
# activation_fct=self.activation_fct,
# apply_softmax=self.apply_softmax,
convert_to_tensor=True
)
top_k = self.top_n if self.top_n < len(results) else len(results)
values, indices = results.topk(top_k)
final_results = []
for value, index in zip(values, indices):
doc = doc_list[index]
doc.metadata["relevance_score"] = value
final_results.append(doc)
return final_results
if __name__ == "__main__":
from configs import (LLM_MODELS,
VECTOR_SEARCH_TOP_K,
SCORE_THRESHOLD,
TEMPERATURE,
USE_RERANKER,
RERANKER_MODEL,
RERANKER_MAX_LENGTH,
MODEL_PATH)
from server.utils import embedding_device
if USE_RERANKER:
reranker_model_path = MODEL_PATH["reranker"].get(RERANKER_MODEL, "BAAI/bge-reranker-large")
print("-----------------model path------------------")
print(reranker_model_path)
reranker_model = LangchainReranker(top_n=3,
device=embedding_device(),
max_length=RERANKER_MAX_LENGTH,
model_name_or_path=reranker_model_path
)
| [
"llama_index.bridge.pydantic.Field",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((602, 609), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (607, 609), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((628, 641), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (639, 641), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((659, 666), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (664, 666), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((685, 692), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (690, 692), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((715, 722), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (720, 722), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((745, 752), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (750, 752), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((813, 820), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (818, 820), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1674, 1749), 'sentence_transformers.CrossEncoder', 'CrossEncoder', ([], {'model_name': 'model_name_or_path', 'max_length': '(1024)', 'device': 'device'}), '(model_name=model_name_or_path, max_length=1024, device=device)\n', (1686, 1749), False, 'from sentence_transformers import CrossEncoder\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((4520, 4538), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (4536, 4538), False, 'from server.utils import embedding_device\n')] |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from typing import Any, List, Optional
from sentence_transformers import CrossEncoder
from typing import Optional, Sequence
from langchain_core.documents import Document
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from llama_index.bridge.pydantic import Field, PrivateAttr
class LangchainReranker(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
model_name_or_path: str = Field()
_model: Any = PrivateAttr()
top_n: int = Field()
device: str = Field()
max_length: int = Field()
batch_size: int = Field()
# show_progress_bar: bool = None
num_workers: int = Field()
# activation_fct = None
# apply_softmax = False
def __init__(self,
model_name_or_path: str,
top_n: int = 3,
device: str = "cuda",
max_length: int = 1024,
batch_size: int = 32,
# show_progress_bar: bool = None,
num_workers: int = 0,
# activation_fct = None,
# apply_softmax = False,
):
# self.top_n=top_n
# self.model_name_or_path=model_name_or_path
# self.device=device
# self.max_length=max_length
# self.batch_size=batch_size
# self.show_progress_bar=show_progress_bar
# self.num_workers=num_workers
# self.activation_fct=activation_fct
# self.apply_softmax=apply_softmax
self._model = CrossEncoder(model_name=model_name_or_path, max_length=1024, device=device)
super().__init__(
top_n=top_n,
model_name_or_path=model_name_or_path,
device=device,
max_length=max_length,
batch_size=batch_size,
# show_progress_bar=show_progress_bar,
num_workers=num_workers,
# activation_fct=activation_fct,
# apply_softmax=apply_softmax
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
sentence_pairs = [[query, _doc] for _doc in _docs]
results = self._model.predict(sentences=sentence_pairs,
batch_size=self.batch_size,
# show_progress_bar=self.show_progress_bar,
num_workers=self.num_workers,
# activation_fct=self.activation_fct,
# apply_softmax=self.apply_softmax,
convert_to_tensor=True
)
top_k = self.top_n if self.top_n < len(results) else len(results)
values, indices = results.topk(top_k)
final_results = []
for value, index in zip(values, indices):
doc = doc_list[index]
doc.metadata["relevance_score"] = value
final_results.append(doc)
return final_results
if __name__ == "__main__":
from configs import (LLM_MODELS,
VECTOR_SEARCH_TOP_K,
SCORE_THRESHOLD,
TEMPERATURE,
USE_RERANKER,
RERANKER_MODEL,
RERANKER_MAX_LENGTH,
MODEL_PATH)
from server.utils import embedding_device
if USE_RERANKER:
reranker_model_path = MODEL_PATH["reranker"].get(RERANKER_MODEL, "BAAI/bge-reranker-large")
print("-----------------model path------------------")
print(reranker_model_path)
reranker_model = LangchainReranker(top_n=3,
device=embedding_device(),
max_length=RERANKER_MAX_LENGTH,
model_name_or_path=reranker_model_path
)
| [
"llama_index.bridge.pydantic.Field",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((602, 609), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (607, 609), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((628, 641), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (639, 641), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((659, 666), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (664, 666), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((685, 692), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (690, 692), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((715, 722), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (720, 722), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((745, 752), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (750, 752), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((813, 820), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (818, 820), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1674, 1749), 'sentence_transformers.CrossEncoder', 'CrossEncoder', ([], {'model_name': 'model_name_or_path', 'max_length': '(1024)', 'device': 'device'}), '(model_name=model_name_or_path, max_length=1024, device=device)\n', (1686, 1749), False, 'from sentence_transformers import CrossEncoder\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((4520, 4538), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (4536, 4538), False, 'from server.utils import embedding_device\n')] |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from typing import Any, List, Optional
from sentence_transformers import CrossEncoder
from typing import Optional, Sequence
from langchain_core.documents import Document
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from llama_index.bridge.pydantic import Field, PrivateAttr
class LangchainReranker(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
model_name_or_path: str = Field()
_model: Any = PrivateAttr()
top_n: int = Field()
device: str = Field()
max_length: int = Field()
batch_size: int = Field()
# show_progress_bar: bool = None
num_workers: int = Field()
# activation_fct = None
# apply_softmax = False
def __init__(self,
model_name_or_path: str,
top_n: int = 3,
device: str = "cuda",
max_length: int = 1024,
batch_size: int = 32,
# show_progress_bar: bool = None,
num_workers: int = 0,
# activation_fct = None,
# apply_softmax = False,
):
# self.top_n=top_n
# self.model_name_or_path=model_name_or_path
# self.device=device
# self.max_length=max_length
# self.batch_size=batch_size
# self.show_progress_bar=show_progress_bar
# self.num_workers=num_workers
# self.activation_fct=activation_fct
# self.apply_softmax=apply_softmax
self._model = CrossEncoder(model_name=model_name_or_path, max_length=1024, device=device)
super().__init__(
top_n=top_n,
model_name_or_path=model_name_or_path,
device=device,
max_length=max_length,
batch_size=batch_size,
# show_progress_bar=show_progress_bar,
num_workers=num_workers,
# activation_fct=activation_fct,
# apply_softmax=apply_softmax
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
sentence_pairs = [[query, _doc] for _doc in _docs]
results = self._model.predict(sentences=sentence_pairs,
batch_size=self.batch_size,
# show_progress_bar=self.show_progress_bar,
num_workers=self.num_workers,
# activation_fct=self.activation_fct,
# apply_softmax=self.apply_softmax,
convert_to_tensor=True
)
top_k = self.top_n if self.top_n < len(results) else len(results)
values, indices = results.topk(top_k)
final_results = []
for value, index in zip(values, indices):
doc = doc_list[index]
doc.metadata["relevance_score"] = value
final_results.append(doc)
return final_results
if __name__ == "__main__":
from configs import (LLM_MODELS,
VECTOR_SEARCH_TOP_K,
SCORE_THRESHOLD,
TEMPERATURE,
USE_RERANKER,
RERANKER_MODEL,
RERANKER_MAX_LENGTH,
MODEL_PATH)
from server.utils import embedding_device
if USE_RERANKER:
reranker_model_path = MODEL_PATH["reranker"].get(RERANKER_MODEL, "BAAI/bge-reranker-large")
print("-----------------model path------------------")
print(reranker_model_path)
reranker_model = LangchainReranker(top_n=3,
device=embedding_device(),
max_length=RERANKER_MAX_LENGTH,
model_name_or_path=reranker_model_path
)
| [
"llama_index.bridge.pydantic.Field",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((602, 609), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (607, 609), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((628, 641), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (639, 641), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((659, 666), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (664, 666), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((685, 692), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (690, 692), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((715, 722), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (720, 722), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((745, 752), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (750, 752), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((813, 820), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (818, 820), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1674, 1749), 'sentence_transformers.CrossEncoder', 'CrossEncoder', ([], {'model_name': 'model_name_or_path', 'max_length': '(1024)', 'device': 'device'}), '(model_name=model_name_or_path, max_length=1024, device=device)\n', (1686, 1749), False, 'from sentence_transformers import CrossEncoder\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((4520, 4538), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (4536, 4538), False, 'from server.utils import embedding_device\n')] |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from typing import Any, List, Optional
from sentence_transformers import CrossEncoder
from typing import Optional, Sequence
from langchain_core.documents import Document
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from llama_index.bridge.pydantic import Field, PrivateAttr
class LangchainReranker(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
model_name_or_path: str = Field()
_model: Any = PrivateAttr()
top_n: int = Field()
device: str = Field()
max_length: int = Field()
batch_size: int = Field()
# show_progress_bar: bool = None
num_workers: int = Field()
# activation_fct = None
# apply_softmax = False
def __init__(self,
model_name_or_path: str,
top_n: int = 3,
device: str = "cuda",
max_length: int = 1024,
batch_size: int = 32,
# show_progress_bar: bool = None,
num_workers: int = 0,
# activation_fct = None,
# apply_softmax = False,
):
# self.top_n=top_n
# self.model_name_or_path=model_name_or_path
# self.device=device
# self.max_length=max_length
# self.batch_size=batch_size
# self.show_progress_bar=show_progress_bar
# self.num_workers=num_workers
# self.activation_fct=activation_fct
# self.apply_softmax=apply_softmax
self._model = CrossEncoder(model_name=model_name_or_path, max_length=1024, device=device)
super().__init__(
top_n=top_n,
model_name_or_path=model_name_or_path,
device=device,
max_length=max_length,
batch_size=batch_size,
# show_progress_bar=show_progress_bar,
num_workers=num_workers,
# activation_fct=activation_fct,
# apply_softmax=apply_softmax
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
sentence_pairs = [[query, _doc] for _doc in _docs]
results = self._model.predict(sentences=sentence_pairs,
batch_size=self.batch_size,
# show_progress_bar=self.show_progress_bar,
num_workers=self.num_workers,
# activation_fct=self.activation_fct,
# apply_softmax=self.apply_softmax,
convert_to_tensor=True
)
top_k = self.top_n if self.top_n < len(results) else len(results)
values, indices = results.topk(top_k)
final_results = []
for value, index in zip(values, indices):
doc = doc_list[index]
doc.metadata["relevance_score"] = value
final_results.append(doc)
return final_results
if __name__ == "__main__":
from configs import (LLM_MODELS,
VECTOR_SEARCH_TOP_K,
SCORE_THRESHOLD,
TEMPERATURE,
USE_RERANKER,
RERANKER_MODEL,
RERANKER_MAX_LENGTH,
MODEL_PATH)
from server.utils import embedding_device
if USE_RERANKER:
reranker_model_path = MODEL_PATH["reranker"].get(RERANKER_MODEL, "BAAI/bge-reranker-large")
print("-----------------model path------------------")
print(reranker_model_path)
reranker_model = LangchainReranker(top_n=3,
device=embedding_device(),
max_length=RERANKER_MAX_LENGTH,
model_name_or_path=reranker_model_path
)
| [
"llama_index.bridge.pydantic.Field",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((602, 609), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (607, 609), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((628, 641), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (639, 641), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((659, 666), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (664, 666), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((685, 692), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (690, 692), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((715, 722), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (720, 722), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((745, 752), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (750, 752), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((813, 820), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (818, 820), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1674, 1749), 'sentence_transformers.CrossEncoder', 'CrossEncoder', ([], {'model_name': 'model_name_or_path', 'max_length': '(1024)', 'device': 'device'}), '(model_name=model_name_or_path, max_length=1024, device=device)\n', (1686, 1749), False, 'from sentence_transformers import CrossEncoder\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((4520, 4538), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (4536, 4538), False, 'from server.utils import embedding_device\n')] |
'''
Below helper functions are implemented in this script:
build_sentence_window_index - VectorStore Index for Sentence window RAG technique
get_sentence_window_query_engine - query enginer for the above index
build_automerging_index - VectorStore Index for Auto-merging RAG technique
get_automerging_query_engine - query enginer for the above index
Evaluation function:
get_prebuilt_trulens_recorder - evaluation function with all the feedback functions
'''
import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
############################################################################## Function 1 ###########################################################
def build_sentence_window_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
sentence_window_size=3,
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=sentence_window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
documents, service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
############################################################################## Function 2 ###########################################################
def get_sentence_window_query_engine(
sentence_index, similarity_top_k=6, rerank_top_n=2
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
############################################################################## Function 3 ###########################################################
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None
):
# chunk sizes for all the layers (factor of 4)
chunk_sizes = chunk_sizes or [2048, 512, 128]
# Hierarchical node parser to parse the tree nodes (parent and children)
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
# getting all intermediate and parent nodes
nodes = node_parser.get_nodes_from_documents(documents)
# getting only the leaf nodes
leaf_nodes = get_leaf_nodes(nodes)
# required service context to initialize both llm and embed model
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
# storage context to store the intermediate and parent nodes in a docstore, because the index is built only on the leaf nodes
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context
)
return automerging_index
############################################################################## Function 4 ###########################################################
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=6,
):
# retriever is used to merge the child nodes into the parent nodes
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
# Ranking is used to select top k relevant chunks from similarity_top_k
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model='BAAI/bge-reranker-base'
)
# getting query engine with the above mentioned retiriever and reranker
automerging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return automerging_engine
############################################################################## Function 5 ###########################################################
def get_prebuilt_trulens_recorder(query_engine, app_id):
# Feedback functions
# Answer Relevance
provider = fOpenAI()
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons,
name="Answer Relevance"
).on_input_output()
# Context Relevance
context_selection = TruLlama.select_source_nodes().node.text
f_qs_relevance = (
Feedback(provider.qs_relevance,
name="Context Relevance")
.on_input()
.on(context_selection)
.aggregate(np.mean)
)
# Groundedness
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons,
name="Groundedness"
)
.on(context_selection)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks = [
f_qa_relevance,
f_qs_relevance,
f_groundedness
]
)
return tru_recorder
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor"
] | [((1446, 1596), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'sentence_window_size', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=sentence_window_size,\n window_metadata_key='window', original_text_metadata_key='original_text')\n", (1484, 1596), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((1647, 1739), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (1675, 1739), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2493, 2555), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2525, 2555), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((2569, 2646), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (2594, 2646), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((3368, 3429), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3404, 3429), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3591, 3612), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (3605, 3612), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3706, 3768), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (3734, 3768), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((3944, 3974), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (3972, 3974), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4938, 5027), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (4958, 5027), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((5128, 5205), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (5153, 5205), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((5322, 5393), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5352, 5393), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((5711, 5720), 'trulens_eval.OpenAI', 'fOpenAI', ([], {}), '()\n', (5718, 5720), True, 'from trulens_eval import OpenAI as fOpenAI\n'), ((6162, 6206), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (6174, 6206), False, 'from trulens_eval.feedback import Groundedness\n'), ((6488, 6589), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_qs_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_qs_relevance, f_groundedness])\n', (6496, 6589), False, 'from trulens_eval import Feedback, TruLlama\n'), ((1777, 1801), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1791, 1801), False, 'import os\n'), ((1828, 1904), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'sentence_context'}), '(documents, service_context=sentence_context)\n', (1859, 1904), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4037, 4061), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4051, 4061), False, 'import os\n'), ((4091, 4189), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4107, 4189), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2068, 2118), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2096, 2118), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4356, 4406), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4384, 4406), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((5743, 5813), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (5751, 5813), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5903, 5933), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (5931, 5933), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5972, 6029), 'trulens_eval.Feedback', 'Feedback', (['provider.qs_relevance'], {'name': '"""Context Relevance"""'}), "(provider.qs_relevance, name='Context Relevance')\n", (5980, 6029), False, 'from trulens_eval import Feedback, TruLlama\n'), ((6239, 6316), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (6247, 6316), False, 'from trulens_eval import Feedback, TruLlama\n')] |
'''
Below helper functions are implemented in this script:
build_sentence_window_index - VectorStore Index for Sentence window RAG technique
get_sentence_window_query_engine - query enginer for the above index
build_automerging_index - VectorStore Index for Auto-merging RAG technique
get_automerging_query_engine - query enginer for the above index
Evaluation function:
get_prebuilt_trulens_recorder - evaluation function with all the feedback functions
'''
import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
############################################################################## Function 1 ###########################################################
def build_sentence_window_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
sentence_window_size=3,
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=sentence_window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
documents, service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
############################################################################## Function 2 ###########################################################
def get_sentence_window_query_engine(
sentence_index, similarity_top_k=6, rerank_top_n=2
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
############################################################################## Function 3 ###########################################################
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None
):
# chunk sizes for all the layers (factor of 4)
chunk_sizes = chunk_sizes or [2048, 512, 128]
# Hierarchical node parser to parse the tree nodes (parent and children)
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
# getting all intermediate and parent nodes
nodes = node_parser.get_nodes_from_documents(documents)
# getting only the leaf nodes
leaf_nodes = get_leaf_nodes(nodes)
# required service context to initialize both llm and embed model
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
# storage context to store the intermediate and parent nodes in a docstore, because the index is built only on the leaf nodes
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context
)
return automerging_index
############################################################################## Function 4 ###########################################################
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=6,
):
# retriever is used to merge the child nodes into the parent nodes
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
# Ranking is used to select top k relevant chunks from similarity_top_k
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model='BAAI/bge-reranker-base'
)
# getting query engine with the above mentioned retiriever and reranker
automerging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return automerging_engine
############################################################################## Function 5 ###########################################################
def get_prebuilt_trulens_recorder(query_engine, app_id):
# Feedback functions
# Answer Relevance
provider = fOpenAI()
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons,
name="Answer Relevance"
).on_input_output()
# Context Relevance
context_selection = TruLlama.select_source_nodes().node.text
f_qs_relevance = (
Feedback(provider.qs_relevance,
name="Context Relevance")
.on_input()
.on(context_selection)
.aggregate(np.mean)
)
# Groundedness
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons,
name="Groundedness"
)
.on(context_selection)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks = [
f_qa_relevance,
f_qs_relevance,
f_groundedness
]
)
return tru_recorder
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor"
] | [((1446, 1596), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'sentence_window_size', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=sentence_window_size,\n window_metadata_key='window', original_text_metadata_key='original_text')\n", (1484, 1596), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((1647, 1739), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (1675, 1739), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2493, 2555), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2525, 2555), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((2569, 2646), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (2594, 2646), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((3368, 3429), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3404, 3429), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3591, 3612), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (3605, 3612), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3706, 3768), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (3734, 3768), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((3944, 3974), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (3972, 3974), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4938, 5027), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (4958, 5027), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((5128, 5205), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (5153, 5205), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((5322, 5393), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5352, 5393), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((5711, 5720), 'trulens_eval.OpenAI', 'fOpenAI', ([], {}), '()\n', (5718, 5720), True, 'from trulens_eval import OpenAI as fOpenAI\n'), ((6162, 6206), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (6174, 6206), False, 'from trulens_eval.feedback import Groundedness\n'), ((6488, 6589), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_qs_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_qs_relevance, f_groundedness])\n', (6496, 6589), False, 'from trulens_eval import Feedback, TruLlama\n'), ((1777, 1801), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1791, 1801), False, 'import os\n'), ((1828, 1904), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'sentence_context'}), '(documents, service_context=sentence_context)\n', (1859, 1904), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4037, 4061), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4051, 4061), False, 'import os\n'), ((4091, 4189), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4107, 4189), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2068, 2118), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2096, 2118), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4356, 4406), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4384, 4406), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((5743, 5813), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (5751, 5813), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5903, 5933), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (5931, 5933), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5972, 6029), 'trulens_eval.Feedback', 'Feedback', (['provider.qs_relevance'], {'name': '"""Context Relevance"""'}), "(provider.qs_relevance, name='Context Relevance')\n", (5980, 6029), False, 'from trulens_eval import Feedback, TruLlama\n'), ((6239, 6316), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (6247, 6316), False, 'from trulens_eval import Feedback, TruLlama\n')] |
'''
Below helper functions are implemented in this script:
build_sentence_window_index - VectorStore Index for Sentence window RAG technique
get_sentence_window_query_engine - query enginer for the above index
build_automerging_index - VectorStore Index for Auto-merging RAG technique
get_automerging_query_engine - query enginer for the above index
Evaluation function:
get_prebuilt_trulens_recorder - evaluation function with all the feedback functions
'''
import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
############################################################################## Function 1 ###########################################################
def build_sentence_window_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
sentence_window_size=3,
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=sentence_window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
documents, service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
############################################################################## Function 2 ###########################################################
def get_sentence_window_query_engine(
sentence_index, similarity_top_k=6, rerank_top_n=2
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
############################################################################## Function 3 ###########################################################
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None
):
# chunk sizes for all the layers (factor of 4)
chunk_sizes = chunk_sizes or [2048, 512, 128]
# Hierarchical node parser to parse the tree nodes (parent and children)
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
# getting all intermediate and parent nodes
nodes = node_parser.get_nodes_from_documents(documents)
# getting only the leaf nodes
leaf_nodes = get_leaf_nodes(nodes)
# required service context to initialize both llm and embed model
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
# storage context to store the intermediate and parent nodes in a docstore, because the index is built only on the leaf nodes
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context
)
return automerging_index
############################################################################## Function 4 ###########################################################
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=6,
):
# retriever is used to merge the child nodes into the parent nodes
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
# Ranking is used to select top k relevant chunks from similarity_top_k
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model='BAAI/bge-reranker-base'
)
# getting query engine with the above mentioned retiriever and reranker
automerging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return automerging_engine
############################################################################## Function 5 ###########################################################
def get_prebuilt_trulens_recorder(query_engine, app_id):
# Feedback functions
# Answer Relevance
provider = fOpenAI()
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons,
name="Answer Relevance"
).on_input_output()
# Context Relevance
context_selection = TruLlama.select_source_nodes().node.text
f_qs_relevance = (
Feedback(provider.qs_relevance,
name="Context Relevance")
.on_input()
.on(context_selection)
.aggregate(np.mean)
)
# Groundedness
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons,
name="Groundedness"
)
.on(context_selection)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks = [
f_qa_relevance,
f_qs_relevance,
f_groundedness
]
)
return tru_recorder
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor"
] | [((1446, 1596), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'sentence_window_size', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=sentence_window_size,\n window_metadata_key='window', original_text_metadata_key='original_text')\n", (1484, 1596), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((1647, 1739), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (1675, 1739), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2493, 2555), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2525, 2555), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((2569, 2646), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (2594, 2646), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((3368, 3429), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3404, 3429), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3591, 3612), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (3605, 3612), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3706, 3768), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (3734, 3768), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((3944, 3974), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (3972, 3974), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4938, 5027), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (4958, 5027), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((5128, 5205), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (5153, 5205), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((5322, 5393), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5352, 5393), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((5711, 5720), 'trulens_eval.OpenAI', 'fOpenAI', ([], {}), '()\n', (5718, 5720), True, 'from trulens_eval import OpenAI as fOpenAI\n'), ((6162, 6206), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (6174, 6206), False, 'from trulens_eval.feedback import Groundedness\n'), ((6488, 6589), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_qs_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_qs_relevance, f_groundedness])\n', (6496, 6589), False, 'from trulens_eval import Feedback, TruLlama\n'), ((1777, 1801), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1791, 1801), False, 'import os\n'), ((1828, 1904), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'sentence_context'}), '(documents, service_context=sentence_context)\n', (1859, 1904), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4037, 4061), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4051, 4061), False, 'import os\n'), ((4091, 4189), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4107, 4189), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2068, 2118), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2096, 2118), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4356, 4406), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4384, 4406), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((5743, 5813), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (5751, 5813), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5903, 5933), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (5931, 5933), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5972, 6029), 'trulens_eval.Feedback', 'Feedback', (['provider.qs_relevance'], {'name': '"""Context Relevance"""'}), "(provider.qs_relevance, name='Context Relevance')\n", (5980, 6029), False, 'from trulens_eval import Feedback, TruLlama\n'), ((6239, 6316), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (6247, 6316), False, 'from trulens_eval import Feedback, TruLlama\n')] |
'''
Below helper functions are implemented in this script:
build_sentence_window_index - VectorStore Index for Sentence window RAG technique
get_sentence_window_query_engine - query enginer for the above index
build_automerging_index - VectorStore Index for Auto-merging RAG technique
get_automerging_query_engine - query enginer for the above index
Evaluation function:
get_prebuilt_trulens_recorder - evaluation function with all the feedback functions
'''
import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
############################################################################## Function 1 ###########################################################
def build_sentence_window_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
sentence_window_size=3,
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=sentence_window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
documents, service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
############################################################################## Function 2 ###########################################################
def get_sentence_window_query_engine(
sentence_index, similarity_top_k=6, rerank_top_n=2
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
############################################################################## Function 3 ###########################################################
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None
):
# chunk sizes for all the layers (factor of 4)
chunk_sizes = chunk_sizes or [2048, 512, 128]
# Hierarchical node parser to parse the tree nodes (parent and children)
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
# getting all intermediate and parent nodes
nodes = node_parser.get_nodes_from_documents(documents)
# getting only the leaf nodes
leaf_nodes = get_leaf_nodes(nodes)
# required service context to initialize both llm and embed model
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
# storage context to store the intermediate and parent nodes in a docstore, because the index is built only on the leaf nodes
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context
)
return automerging_index
############################################################################## Function 4 ###########################################################
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=6,
):
# retriever is used to merge the child nodes into the parent nodes
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
# Ranking is used to select top k relevant chunks from similarity_top_k
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model='BAAI/bge-reranker-base'
)
# getting query engine with the above mentioned retiriever and reranker
automerging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return automerging_engine
############################################################################## Function 5 ###########################################################
def get_prebuilt_trulens_recorder(query_engine, app_id):
# Feedback functions
# Answer Relevance
provider = fOpenAI()
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons,
name="Answer Relevance"
).on_input_output()
# Context Relevance
context_selection = TruLlama.select_source_nodes().node.text
f_qs_relevance = (
Feedback(provider.qs_relevance,
name="Context Relevance")
.on_input()
.on(context_selection)
.aggregate(np.mean)
)
# Groundedness
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons,
name="Groundedness"
)
.on(context_selection)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks = [
f_qa_relevance,
f_qs_relevance,
f_groundedness
]
)
return tru_recorder
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor"
] | [((1446, 1596), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'sentence_window_size', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=sentence_window_size,\n window_metadata_key='window', original_text_metadata_key='original_text')\n", (1484, 1596), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((1647, 1739), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (1675, 1739), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2493, 2555), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2525, 2555), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((2569, 2646), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (2594, 2646), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((3368, 3429), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3404, 3429), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3591, 3612), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (3605, 3612), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3706, 3768), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (3734, 3768), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((3944, 3974), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (3972, 3974), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4938, 5027), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (4958, 5027), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((5128, 5205), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (5153, 5205), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((5322, 5393), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5352, 5393), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((5711, 5720), 'trulens_eval.OpenAI', 'fOpenAI', ([], {}), '()\n', (5718, 5720), True, 'from trulens_eval import OpenAI as fOpenAI\n'), ((6162, 6206), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (6174, 6206), False, 'from trulens_eval.feedback import Groundedness\n'), ((6488, 6589), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_qs_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_qs_relevance, f_groundedness])\n', (6496, 6589), False, 'from trulens_eval import Feedback, TruLlama\n'), ((1777, 1801), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1791, 1801), False, 'import os\n'), ((1828, 1904), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'sentence_context'}), '(documents, service_context=sentence_context)\n', (1859, 1904), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4037, 4061), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4051, 4061), False, 'import os\n'), ((4091, 4189), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4107, 4189), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2068, 2118), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2096, 2118), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4356, 4406), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4384, 4406), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((5743, 5813), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (5751, 5813), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5903, 5933), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (5931, 5933), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5972, 6029), 'trulens_eval.Feedback', 'Feedback', (['provider.qs_relevance'], {'name': '"""Context Relevance"""'}), "(provider.qs_relevance, name='Context Relevance')\n", (5980, 6029), False, 'from trulens_eval import Feedback, TruLlama\n'), ((6239, 6316), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (6247, 6316), False, 'from trulens_eval import Feedback, TruLlama\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from sqlalchemy import make_url
from llama_index.vector_stores.postgres import PGVectorStore
# from llama_index.llms.llama_cpp import LlamaCPP
import psycopg2
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
from llama_index.core.schema import NodeWithScore
from typing import Optional
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import QueryBundle
from llama_index.core.retrievers import BaseRetriever
from typing import Any, List
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.vector_stores import VectorStoreQuery
import argparse
def load_vector_database(username, password):
db_name = "example_db"
host = "localhost"
password = password
port = "5432"
user = username
# conn = psycopg2.connect(connection_string)
conn = psycopg2.connect(
dbname="postgres",
host=host,
password=password,
port=port,
user=user,
)
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {db_name}")
c.execute(f"CREATE DATABASE {db_name}")
vector_store = PGVectorStore.from_params(
database=db_name,
host=host,
password=password,
port=port,
user=user,
table_name="llama2_paper",
embed_dim=384, # openai embedding dimension
)
return vector_store
def load_data(data_path):
loader = PyMuPDFReader()
documents = loader.load(file_path=data_path)
text_parser = SentenceSplitter(
chunk_size=1024,
# separator=" ",
)
text_chunks = []
# maintain relationship with source doc index, to help inject doc metadata in (3)
doc_idxs = []
for doc_idx, doc in enumerate(documents):
cur_text_chunks = text_parser.split_text(doc.text)
text_chunks.extend(cur_text_chunks)
doc_idxs.extend([doc_idx] * len(cur_text_chunks))
from llama_index.core.schema import TextNode
nodes = []
for idx, text_chunk in enumerate(text_chunks):
node = TextNode(
text=text_chunk,
)
src_doc = documents[doc_idxs[idx]]
node.metadata = src_doc.metadata
nodes.append(node)
return nodes
class VectorDBRetriever(BaseRetriever):
"""Retriever over a postgres vector store."""
def __init__(
self,
vector_store: PGVectorStore,
embed_model: Any,
query_mode: str = "default",
similarity_top_k: int = 2,
) -> None:
"""Init params."""
self._vector_store = vector_store
self._embed_model = embed_model
self._query_mode = query_mode
self._similarity_top_k = similarity_top_k
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
query_embedding = self._embed_model.get_query_embedding(
query_bundle.query_str
)
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=self._similarity_top_k,
mode=self._query_mode,
)
query_result = self._vector_store.query(vector_store_query)
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
return nodes_with_scores
def completion_to_prompt(completion):
return f"<|system|>\n</s>\n<|user|>\n{completion}</s>\n<|assistant|>\n"
# Transform a list of chat messages into zephyr-specific input
def messages_to_prompt(messages):
prompt = ""
for message in messages:
if message.role == "system":
prompt += f"<|system|>\n{message.content}</s>\n"
elif message.role == "user":
prompt += f"<|user|>\n{message.content}</s>\n"
elif message.role == "assistant":
prompt += f"<|assistant|>\n{message.content}</s>\n"
# ensure we start with a system prompt, insert blank if needed
if not prompt.startswith("<|system|>\n"):
prompt = "<|system|>\n</s>\n" + prompt
# add final assistant prompt
prompt = prompt + "<|assistant|>\n"
return prompt
def main(args):
embed_model = HuggingFaceEmbedding(model_name=args.embedding_model_path)
# Use custom LLM in BigDL
from bigdl.llm.llamaindex.llms import BigdlLLM
llm = BigdlLLM(
model_name=args.model_path,
tokenizer_name=args.model_path,
context_window=512,
max_new_tokens=args.n_predict,
generate_kwargs={"temperature": 0.7, "do_sample": False},
model_kwargs={},
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
device_map="xpu",
)
vector_store = load_vector_database(username=args.user, password=args.password)
nodes = load_data(data_path=args.data)
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
vector_store.add(nodes)
# query_str = "Can you tell me about the key concepts for safety finetuning"
query_str = "Explain about the training data for Llama 2"
query_embedding = embed_model.get_query_embedding(query_str)
# construct vector store query
query_mode = "default"
# query_mode = "sparse"
# query_mode = "hybrid"
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, mode=query_mode
)
# returns a VectorStoreQueryResult
query_result = vector_store.query(vector_store_query)
# print("Retrieval Results: ")
# print(query_result.nodes[0].get_content())
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
retriever = VectorDBRetriever(
vector_store, embed_model, query_mode="default", similarity_top_k=1
)
query_engine = RetrieverQueryEngine.from_args(retriever, llm=llm)
# query_str = "How does Llama 2 perform compared to other open-source models?"
query_str = args.question
response = query_engine.query(query_str)
print("------------RESPONSE GENERATION---------------------")
print(str(response))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='LlamaIndex BigdlLLM Example')
parser.add_argument('-m','--model-path', type=str, required=True,
help='the path to transformers model')
parser.add_argument('-q', '--question', type=str, default='How does Llama 2 perform compared to other open-source models?',
help='qustion you want to ask.')
parser.add_argument('-d','--data',type=str, default='./data/llama2.pdf',
help="the data used during retrieval")
parser.add_argument('-u', '--user', type=str, required=True,
help="user name in the database postgres")
parser.add_argument('-p','--password', type=str, required=True,
help="the password of the user in the database")
parser.add_argument('-e','--embedding-model-path',default="BAAI/bge-small-en",
help="the path to embedding model path")
parser.add_argument('-n','--n-predict', type=int, default=32,
help='max number of predict tokens')
args = parser.parse_args()
main(args) | [
"llama_index.vector_stores.postgres.PGVectorStore.from_params",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.schema.TextNode",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.schema.NodeWithScore",
"llama_index.core.vector_stores.VectorStoreQuery",
"llama_index.core.query_engine.RetrieverQueryEngine.from_args",
"llama_index.readers.file.PyMuPDFReader"
] | [((1521, 1612), 'psycopg2.connect', 'psycopg2.connect', ([], {'dbname': '"""postgres"""', 'host': 'host', 'password': 'password', 'port': 'port', 'user': 'user'}), "(dbname='postgres', host=host, password=password, port=port,\n user=user)\n", (1537, 1612), False, 'import psycopg2\n'), ((1841, 1982), 'llama_index.vector_stores.postgres.PGVectorStore.from_params', 'PGVectorStore.from_params', ([], {'database': 'db_name', 'host': 'host', 'password': 'password', 'port': 'port', 'user': 'user', 'table_name': '"""llama2_paper"""', 'embed_dim': '(384)'}), "(database=db_name, host=host, password=password,\n port=port, user=user, table_name='llama2_paper', embed_dim=384)\n", (1866, 1982), False, 'from llama_index.vector_stores.postgres import PGVectorStore\n'), ((2137, 2152), 'llama_index.readers.file.PyMuPDFReader', 'PyMuPDFReader', ([], {}), '()\n', (2150, 2152), False, 'from llama_index.readers.file import PyMuPDFReader\n'), ((2222, 2255), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(1024)'}), '(chunk_size=1024)\n', (2238, 2255), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((5116, 5174), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'args.embedding_model_path'}), '(model_name=args.embedding_model_path)\n', (5136, 5174), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((5271, 5583), 'bigdl.llm.llamaindex.llms.BigdlLLM', 'BigdlLLM', ([], {'model_name': 'args.model_path', 'tokenizer_name': 'args.model_path', 'context_window': '(512)', 'max_new_tokens': 'args.n_predict', 'generate_kwargs': "{'temperature': 0.7, 'do_sample': False}", 'model_kwargs': '{}', 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt', 'device_map': '"""xpu"""'}), "(model_name=args.model_path, tokenizer_name=args.model_path,\n context_window=512, max_new_tokens=args.n_predict, generate_kwargs={\n 'temperature': 0.7, 'do_sample': False}, model_kwargs={},\n messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt, device_map='xpu')\n", (5279, 5583), False, 'from bigdl.llm.llamaindex.llms import BigdlLLM\n'), ((6353, 6444), 'llama_index.core.vector_stores.VectorStoreQuery', 'VectorStoreQuery', ([], {'query_embedding': 'query_embedding', 'similarity_top_k': '(2)', 'mode': 'query_mode'}), '(query_embedding=query_embedding, similarity_top_k=2, mode=\n query_mode)\n', (6369, 6444), False, 'from llama_index.core.vector_stores import VectorStoreQuery\n'), ((7083, 7133), 'llama_index.core.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'llm': 'llm'}), '(retriever, llm=llm)\n', (7113, 7133), False, 'from llama_index.core.query_engine import RetrieverQueryEngine\n'), ((7428, 7494), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LlamaIndex BigdlLLM Example"""'}), "(description='LlamaIndex BigdlLLM Example')\n", (7451, 7494), False, 'import argparse\n'), ((2759, 2784), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'text_chunk'}), '(text=text_chunk)\n', (2767, 2784), False, 'from llama_index.core.schema import TextNode\n'), ((3682, 3800), 'llama_index.core.vector_stores.VectorStoreQuery', 'VectorStoreQuery', ([], {'query_embedding': 'query_embedding', 'similarity_top_k': 'self._similarity_top_k', 'mode': 'self._query_mode'}), '(query_embedding=query_embedding, similarity_top_k=self.\n _similarity_top_k, mode=self._query_mode)\n', (3698, 3800), False, 'from llama_index.core.vector_stores import VectorStoreQuery\n'), ((6893, 6930), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node', 'score': 'score'}), '(node=node, score=score)\n', (6906, 6930), False, 'from llama_index.core.schema import NodeWithScore\n'), ((4191, 4228), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node', 'score': 'score'}), '(node=node, score=score)\n', (4204, 4228), False, 'from llama_index.core.schema import NodeWithScore\n')] |
"""Configuration."""
import streamlit as st
import os
### DEFINE BUILDER_LLM #####
## Uncomment the LLM you want to use to construct the meta agent
## OpenAI
from llama_index.llms import OpenAI
# set OpenAI Key - use Streamlit secrets
os.environ["OPENAI_API_KEY"] = st.secrets.openai_key
# load LLM
BUILDER_LLM = OpenAI(model="gpt-4-1106-preview")
# # Anthropic (make sure you `pip install anthropic`)
# from llama_index.llms import Anthropic
# # set Anthropic key
# os.environ["ANTHROPIC_API_KEY"] = st.secrets.anthropic_key
# BUILDER_LLM = Anthropic()
| [
"llama_index.llms.OpenAI"
] | [((316, 350), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (322, 350), False, 'from llama_index.llms import OpenAI\n')] |
"""Configuration."""
import streamlit as st
import os
### DEFINE BUILDER_LLM #####
## Uncomment the LLM you want to use to construct the meta agent
## OpenAI
from llama_index.llms import OpenAI
# set OpenAI Key - use Streamlit secrets
os.environ["OPENAI_API_KEY"] = st.secrets.openai_key
# load LLM
BUILDER_LLM = OpenAI(model="gpt-4-1106-preview")
# # Anthropic (make sure you `pip install anthropic`)
# from llama_index.llms import Anthropic
# # set Anthropic key
# os.environ["ANTHROPIC_API_KEY"] = st.secrets.anthropic_key
# BUILDER_LLM = Anthropic()
| [
"llama_index.llms.OpenAI"
] | [((316, 350), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (322, 350), False, 'from llama_index.llms import OpenAI\n')] |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
# Configure prompt parameters and initialise helper
max_input_size = 512
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# load index
index = load_index_from_storage(storage_context, service_context=service_context, )
documents = SimpleDirectoryReader('data').load_data()
index.refresh(documents)
index.storage_context.persist(persist_dir="./storage") | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.load_index_from_storage"
] | [((403, 464), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (417, 464), False, 'import os\n'), ((788, 847), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (800, 847), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((910, 1001), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (938, 1001), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((1042, 1095), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1070, 1095), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1118, 1191), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1141, 1191), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((579, 655), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_base': 'base_path'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_base=base_path)\n", (585, 655), False, 'from langchain.llms.openai import OpenAI\n'), ((1213, 1242), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1234, 1242), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n')] |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
# Configure prompt parameters and initialise helper
max_input_size = 512
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# load index
index = load_index_from_storage(storage_context, service_context=service_context, )
documents = SimpleDirectoryReader('data').load_data()
index.refresh(documents)
index.storage_context.persist(persist_dir="./storage") | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.load_index_from_storage"
] | [((403, 464), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (417, 464), False, 'import os\n'), ((788, 847), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (800, 847), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((910, 1001), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (938, 1001), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((1042, 1095), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1070, 1095), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1118, 1191), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1141, 1191), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((579, 655), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_base': 'base_path'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_base=base_path)\n", (585, 655), False, 'from langchain.llms.openai import OpenAI\n'), ((1213, 1242), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1234, 1242), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n')] |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
# Configure prompt parameters and initialise helper
max_input_size = 512
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# load index
index = load_index_from_storage(storage_context, service_context=service_context, )
documents = SimpleDirectoryReader('data').load_data()
index.refresh(documents)
index.storage_context.persist(persist_dir="./storage") | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.load_index_from_storage"
] | [((403, 464), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (417, 464), False, 'import os\n'), ((788, 847), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (800, 847), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((910, 1001), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (938, 1001), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((1042, 1095), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1070, 1095), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1118, 1191), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1141, 1191), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((579, 655), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_base': 'base_path'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_base=base_path)\n", (585, 655), False, 'from langchain.llms.openai import OpenAI\n'), ((1213, 1242), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1234, 1242), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n')] |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
# Configure prompt parameters and initialise helper
max_input_size = 512
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# load index
index = load_index_from_storage(storage_context, service_context=service_context, )
documents = SimpleDirectoryReader('data').load_data()
index.refresh(documents)
index.storage_context.persist(persist_dir="./storage") | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.load_index_from_storage"
] | [((403, 464), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (417, 464), False, 'import os\n'), ((788, 847), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (800, 847), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((910, 1001), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (938, 1001), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((1042, 1095), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1070, 1095), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1118, 1191), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1141, 1191), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((579, 655), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_base': 'base_path'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_base=base_path)\n", (585, 655), False, 'from langchain.llms.openai import OpenAI\n'), ((1213, 1242), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1234, 1242), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n')] |
import os
from llama_index import SimpleDirectoryReader
from sqlalchemy.orm import Session
from superagi.config.config import get_config
from superagi.helper.resource_helper import ResourceHelper
from superagi.lib.logger import logger
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.types.model_source_types import ModelSourceType
from superagi.types.vector_store_types import VectorStoreType
from superagi.models.agent import Agent
class ResourceManager:
"""
Resource Manager handles creation of resources and saving them to the vector store.
:param agent_id: The agent id to use when saving resources to the vector store.
"""
def __init__(self, agent_id: str = None):
self.agent_id = agent_id
def create_llama_document(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
if os.path.exists(file_path):
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
return documents
def create_llama_document_s3(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
temporary_file_path = ""
try:
import boto3
s3 = boto3.client(
's3',
aws_access_key_id=get_config("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=get_config("AWS_SECRET_ACCESS_KEY"),
)
bucket_name = get_config("BUCKET_NAME")
file = s3.get_object(Bucket=bucket_name, Key=file_path)
file_name = file_path.split("/")[-1]
save_directory = "/"
temporary_file_path = save_directory + file_name
with open(temporary_file_path, "wb") as f:
contents = file['Body'].read()
f.write(contents)
documents = SimpleDirectoryReader(input_files=[temporary_file_path]).load_data()
return documents
except Exception as e:
logger.error("superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : ", e)
finally:
if os.path.exists(temporary_file_path):
os.remove(temporary_file_path)
def save_document_to_vector_store(self, documents: list, resource_id: str, mode_api_key: str = None,
model_source: str = ""):
"""
Saves a document to the vector store.
:param documents: The documents to save to the vector store.
:param resource_id: The resource id to use when saving the documents to the vector store.
:param mode_api_key: The mode api key to use when creating embedding to the vector store.
"""
from llama_index import VectorStoreIndex, StorageContext
if ModelSourceType.GooglePalm.value in model_source or ModelSourceType.Replicate.value in model_source:
logger.info("Resource embedding not supported for Google Palm..")
return
import openai
openai.api_key = get_config("OPENAI_API_KEY") or mode_api_key
os.environ["OPENAI_API_KEY"] = get_config("OPENAI_API_KEY", "") or mode_api_key
for docs in documents:
if docs.metadata is None:
docs.metadata = {}
docs.metadata["agent_id"] = str(self.agent_id)
docs.metadata["resource_id"] = resource_id
vector_store = None
storage_context = None
vector_store_name = VectorStoreType.get_vector_store_type(get_config("RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = get_config("RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
try:
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
except ValueError as e:
logger.error(f"Vector store not found{e}")
try:
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
index.set_index_id(f'Agent {self.agent_id}')
except Exception as e:
logger.error("save_document_to_vector_store - unable to create documents from vector", e)
# persisting the data in case of redis
if vector_store_name == VectorStoreType.REDIS:
vector_store.persist(persist_path="")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((1132, 1157), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1146, 1157), False, 'import os\n'), ((1917, 1942), 'superagi.config.config.get_config', 'get_config', (['"""BUCKET_NAME"""'], {}), "('BUCKET_NAME')\n", (1927, 1942), False, 'from superagi.config.config import get_config\n'), ((2589, 2624), 'os.path.exists', 'os.path.exists', (['temporary_file_path'], {}), '(temporary_file_path)\n', (2603, 2624), False, 'import os\n'), ((3367, 3432), 'superagi.lib.logger.logger.info', 'logger.info', (['"""Resource embedding not supported for Google Palm.."""'], {}), "('Resource embedding not supported for Google Palm..')\n", (3378, 3432), False, 'from superagi.lib.logger import logger\n'), ((3499, 3527), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (3509, 3527), False, 'from superagi.config.config import get_config\n'), ((3583, 3615), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (3593, 3615), False, 'from superagi.config.config import get_config\n'), ((4057, 4103), 'superagi.config.config.get_config', 'get_config', (['"""RESOURCE_VECTOR_STORE_INDEX_NAME"""'], {}), "('RESOURCE_VECTOR_STORE_INDEX_NAME')\n", (4067, 4103), False, 'from superagi.config.config import get_config\n'), ((4284, 4339), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4312, 4339), False, 'from llama_index import VectorStoreIndex, StorageContext\n'), ((4460, 4535), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (4491, 4535), False, 'from llama_index import VectorStoreIndex, StorageContext\n'), ((2456, 2566), 'superagi.lib.logger.logger.error', 'logger.error', (['"""superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : """', 'e'], {}), "(\n 'superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : '\n , e)\n", (2468, 2566), False, 'from superagi.lib.logger import logger\n'), ((2642, 2672), 'os.remove', 'os.remove', (['temporary_file_path'], {}), '(temporary_file_path)\n', (2651, 2672), False, 'import os\n'), ((3975, 4010), 'superagi.config.config.get_config', 'get_config', (['"""RESOURCE_VECTOR_STORE"""'], {}), "('RESOURCE_VECTOR_STORE')\n", (3985, 4010), False, 'from superagi.config.config import get_config\n'), ((4384, 4426), 'superagi.lib.logger.logger.error', 'logger.error', (['f"""Vector store not found{e}"""'], {}), "(f'Vector store not found{e}')\n", (4396, 4426), False, 'from superagi.lib.logger import logger\n'), ((4636, 4735), 'superagi.lib.logger.logger.error', 'logger.error', (['"""save_document_to_vector_store - unable to create documents from vector"""', 'e'], {}), "(\n 'save_document_to_vector_store - unable to create documents from vector', e\n )\n", (4648, 4735), False, 'from superagi.lib.logger import logger\n'), ((1183, 1229), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]'}), '(input_files=[file_path])\n', (1204, 1229), False, 'from llama_index import SimpleDirectoryReader\n'), ((1769, 1800), 'superagi.config.config.get_config', 'get_config', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (1779, 1800), False, 'from superagi.config.config import get_config\n'), ((1840, 1875), 'superagi.config.config.get_config', 'get_config', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (1850, 1875), False, 'from superagi.config.config import get_config\n'), ((2315, 2371), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[temporary_file_path]'}), '(input_files=[temporary_file_path])\n', (2336, 2371), False, 'from llama_index import SimpleDirectoryReader\n'), ((4167, 4234), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (4190, 4234), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n')] |
import os
from llama_index import SimpleDirectoryReader
from sqlalchemy.orm import Session
from superagi.config.config import get_config
from superagi.helper.resource_helper import ResourceHelper
from superagi.lib.logger import logger
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.types.model_source_types import ModelSourceType
from superagi.types.vector_store_types import VectorStoreType
from superagi.models.agent import Agent
class ResourceManager:
"""
Resource Manager handles creation of resources and saving them to the vector store.
:param agent_id: The agent id to use when saving resources to the vector store.
"""
def __init__(self, agent_id: str = None):
self.agent_id = agent_id
def create_llama_document(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
if os.path.exists(file_path):
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
return documents
def create_llama_document_s3(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
temporary_file_path = ""
try:
import boto3
s3 = boto3.client(
's3',
aws_access_key_id=get_config("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=get_config("AWS_SECRET_ACCESS_KEY"),
)
bucket_name = get_config("BUCKET_NAME")
file = s3.get_object(Bucket=bucket_name, Key=file_path)
file_name = file_path.split("/")[-1]
save_directory = "/"
temporary_file_path = save_directory + file_name
with open(temporary_file_path, "wb") as f:
contents = file['Body'].read()
f.write(contents)
documents = SimpleDirectoryReader(input_files=[temporary_file_path]).load_data()
return documents
except Exception as e:
logger.error("superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : ", e)
finally:
if os.path.exists(temporary_file_path):
os.remove(temporary_file_path)
def save_document_to_vector_store(self, documents: list, resource_id: str, mode_api_key: str = None,
model_source: str = ""):
"""
Saves a document to the vector store.
:param documents: The documents to save to the vector store.
:param resource_id: The resource id to use when saving the documents to the vector store.
:param mode_api_key: The mode api key to use when creating embedding to the vector store.
"""
from llama_index import VectorStoreIndex, StorageContext
if ModelSourceType.GooglePalm.value in model_source or ModelSourceType.Replicate.value in model_source:
logger.info("Resource embedding not supported for Google Palm..")
return
import openai
openai.api_key = get_config("OPENAI_API_KEY") or mode_api_key
os.environ["OPENAI_API_KEY"] = get_config("OPENAI_API_KEY", "") or mode_api_key
for docs in documents:
if docs.metadata is None:
docs.metadata = {}
docs.metadata["agent_id"] = str(self.agent_id)
docs.metadata["resource_id"] = resource_id
vector_store = None
storage_context = None
vector_store_name = VectorStoreType.get_vector_store_type(get_config("RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = get_config("RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
try:
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
except ValueError as e:
logger.error(f"Vector store not found{e}")
try:
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
index.set_index_id(f'Agent {self.agent_id}')
except Exception as e:
logger.error("save_document_to_vector_store - unable to create documents from vector", e)
# persisting the data in case of redis
if vector_store_name == VectorStoreType.REDIS:
vector_store.persist(persist_path="")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((1132, 1157), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1146, 1157), False, 'import os\n'), ((1917, 1942), 'superagi.config.config.get_config', 'get_config', (['"""BUCKET_NAME"""'], {}), "('BUCKET_NAME')\n", (1927, 1942), False, 'from superagi.config.config import get_config\n'), ((2589, 2624), 'os.path.exists', 'os.path.exists', (['temporary_file_path'], {}), '(temporary_file_path)\n', (2603, 2624), False, 'import os\n'), ((3367, 3432), 'superagi.lib.logger.logger.info', 'logger.info', (['"""Resource embedding not supported for Google Palm.."""'], {}), "('Resource embedding not supported for Google Palm..')\n", (3378, 3432), False, 'from superagi.lib.logger import logger\n'), ((3499, 3527), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (3509, 3527), False, 'from superagi.config.config import get_config\n'), ((3583, 3615), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (3593, 3615), False, 'from superagi.config.config import get_config\n'), ((4057, 4103), 'superagi.config.config.get_config', 'get_config', (['"""RESOURCE_VECTOR_STORE_INDEX_NAME"""'], {}), "('RESOURCE_VECTOR_STORE_INDEX_NAME')\n", (4067, 4103), False, 'from superagi.config.config import get_config\n'), ((4284, 4339), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4312, 4339), False, 'from llama_index import VectorStoreIndex, StorageContext\n'), ((4460, 4535), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (4491, 4535), False, 'from llama_index import VectorStoreIndex, StorageContext\n'), ((2456, 2566), 'superagi.lib.logger.logger.error', 'logger.error', (['"""superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : """', 'e'], {}), "(\n 'superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : '\n , e)\n", (2468, 2566), False, 'from superagi.lib.logger import logger\n'), ((2642, 2672), 'os.remove', 'os.remove', (['temporary_file_path'], {}), '(temporary_file_path)\n', (2651, 2672), False, 'import os\n'), ((3975, 4010), 'superagi.config.config.get_config', 'get_config', (['"""RESOURCE_VECTOR_STORE"""'], {}), "('RESOURCE_VECTOR_STORE')\n", (3985, 4010), False, 'from superagi.config.config import get_config\n'), ((4384, 4426), 'superagi.lib.logger.logger.error', 'logger.error', (['f"""Vector store not found{e}"""'], {}), "(f'Vector store not found{e}')\n", (4396, 4426), False, 'from superagi.lib.logger import logger\n'), ((4636, 4735), 'superagi.lib.logger.logger.error', 'logger.error', (['"""save_document_to_vector_store - unable to create documents from vector"""', 'e'], {}), "(\n 'save_document_to_vector_store - unable to create documents from vector', e\n )\n", (4648, 4735), False, 'from superagi.lib.logger import logger\n'), ((1183, 1229), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]'}), '(input_files=[file_path])\n', (1204, 1229), False, 'from llama_index import SimpleDirectoryReader\n'), ((1769, 1800), 'superagi.config.config.get_config', 'get_config', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (1779, 1800), False, 'from superagi.config.config import get_config\n'), ((1840, 1875), 'superagi.config.config.get_config', 'get_config', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (1850, 1875), False, 'from superagi.config.config import get_config\n'), ((2315, 2371), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[temporary_file_path]'}), '(input_files=[temporary_file_path])\n', (2336, 2371), False, 'from llama_index import SimpleDirectoryReader\n'), ((4167, 4234), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (4190, 4234), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n')] |