code
stringlengths 161
233k
| apis
sequencelengths 1
24
| extract_api
stringlengths 162
68.5k
|
---|---|---|
"""FastAPI app creation, logger configuration and main API routes."""
import logging
from fastapi import Depends, FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from injector import Injector
from llama_index.core.callbacks import CallbackManager
from llama_index.core.callbacks.global_handlers import create_global_handler
from llama_index.core.settings import Settings as LlamaIndexSettings
from private_gpt.server.chat.chat_router import chat_router
from private_gpt.server.chunks.chunks_router import chunks_router
from private_gpt.server.completions.completions_router import completions_router
from private_gpt.server.embeddings.embeddings_router import embeddings_router
from private_gpt.server.health.health_router import health_router
from private_gpt.server.ingest.ingest_router import ingest_router
from private_gpt.settings.settings import Settings
logger = logging.getLogger(__name__)
def create_app(root_injector: Injector) -> FastAPI:
# Start the API
async def bind_injector_to_request(request: Request) -> None:
request.state.injector = root_injector
app = FastAPI(dependencies=[Depends(bind_injector_to_request)])
app.include_router(completions_router)
app.include_router(chat_router)
app.include_router(chunks_router)
app.include_router(ingest_router)
app.include_router(embeddings_router)
app.include_router(health_router)
# Add LlamaIndex simple observability
global_handler = create_global_handler("simple")
LlamaIndexSettings.callback_manager = CallbackManager([global_handler])
settings = root_injector.get(Settings)
if settings.server.cors.enabled:
logger.debug("Setting up CORS middleware")
app.add_middleware(
CORSMiddleware,
allow_credentials=settings.server.cors.allow_credentials,
allow_origins=settings.server.cors.allow_origins,
allow_origin_regex=settings.server.cors.allow_origin_regex,
allow_methods=settings.server.cors.allow_methods,
allow_headers=settings.server.cors.allow_headers,
)
if settings.ui.enabled:
logger.debug("Importing the UI module")
try:
from private_gpt.ui.ui import PrivateGptUi
except ImportError as e:
raise ImportError(
"UI dependencies not found, install with `poetry install --extras ui`"
) from e
ui = root_injector.get(PrivateGptUi)
ui.mount_in_app(app, settings.ui.path)
return app
| [
"llama_index.core.callbacks.CallbackManager",
"llama_index.core.callbacks.global_handlers.create_global_handler"
] | [((894, 921), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (911, 921), False, 'import logging\n'), ((1479, 1510), 'llama_index.core.callbacks.global_handlers.create_global_handler', 'create_global_handler', (['"""simple"""'], {}), "('simple')\n", (1500, 1510), False, 'from llama_index.core.callbacks.global_handlers import create_global_handler\n'), ((1553, 1586), 'llama_index.core.callbacks.CallbackManager', 'CallbackManager', (['[global_handler]'], {}), '([global_handler])\n', (1568, 1586), False, 'from llama_index.core.callbacks import CallbackManager\n'), ((1143, 1176), 'fastapi.Depends', 'Depends', (['bind_injector_to_request'], {}), '(bind_injector_to_request)\n', (1150, 1176), False, 'from fastapi import Depends, FastAPI, Request\n')] |
import json
import os
from typing import Dict, List, Optional, Type
from loguru import logger
from datastore.datastore import DataStore
from models.models import (
DocumentChunk,
DocumentChunkMetadata,
DocumentChunkWithScore,
DocumentMetadataFilter,
Query,
QueryResult,
QueryWithEmbedding,
)
from llama_index.indices.base import BaseGPTIndex
from llama_index.indices.vector_store.base import GPTVectorStoreIndex
from llama_index.indices.query.schema import QueryBundle
from llama_index.response.schema import Response
from llama_index.data_structs.node_v2 import Node, DocumentRelationship, NodeWithScore
from llama_index.indices.registry import INDEX_STRUCT_TYPE_TO_INDEX_CLASS
from llama_index.data_structs.struct_type import IndexStructType
from llama_index.indices.response.builder import ResponseMode
INDEX_STRUCT_TYPE_STR = os.environ.get(
"LLAMA_INDEX_TYPE", IndexStructType.SIMPLE_DICT.value
)
INDEX_JSON_PATH = os.environ.get("LLAMA_INDEX_JSON_PATH", None)
QUERY_KWARGS_JSON_PATH = os.environ.get("LLAMA_QUERY_KWARGS_JSON_PATH", None)
RESPONSE_MODE = os.environ.get("LLAMA_RESPONSE_MODE", ResponseMode.NO_TEXT.value)
EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES = [
IndexStructType.DICT,
IndexStructType.WEAVIATE,
IndexStructType.PINECONE,
IndexStructType.QDRANT,
IndexStructType.CHROMA,
IndexStructType.VECTOR_STORE,
]
def _create_or_load_index(
index_type_str: Optional[str] = None,
index_json_path: Optional[str] = None,
index_type_to_index_cls: Optional[dict[str, Type[BaseGPTIndex]]] = None,
) -> BaseGPTIndex:
"""Create or load index from json path."""
index_json_path = index_json_path or INDEX_JSON_PATH
index_type_to_index_cls = (
index_type_to_index_cls or INDEX_STRUCT_TYPE_TO_INDEX_CLASS
)
index_type_str = index_type_str or INDEX_STRUCT_TYPE_STR
index_type = IndexStructType(index_type_str)
if index_type not in index_type_to_index_cls:
raise ValueError(f"Unknown index type: {index_type}")
if index_type in EXTERNAL_VECTOR_STORE_INDEX_STRUCT_TYPES:
raise ValueError("Please use vector store directly.")
index_cls = index_type_to_index_cls[index_type]
if index_json_path is None:
return index_cls(nodes=[]) # Create empty index
else:
return index_cls.load_from_disk(index_json_path) # Load index from disk
def _create_or_load_query_kwargs(
query_kwargs_json_path: Optional[str] = None,
) -> Optional[dict]:
"""Create or load query kwargs from json path."""
query_kwargs_json_path = query_kwargs_json_path or QUERY_KWARGS_JSON_PATH
query_kargs: Optional[dict] = None
if query_kwargs_json_path is not None:
with open(INDEX_JSON_PATH, "r") as f:
query_kargs = json.load(f)
return query_kargs
def _doc_chunk_to_node(doc_chunk: DocumentChunk, source_doc_id: str) -> Node:
"""Convert document chunk to Node"""
return Node(
doc_id=doc_chunk.id,
text=doc_chunk.text,
embedding=doc_chunk.embedding,
extra_info=doc_chunk.metadata.dict(),
relationships={DocumentRelationship.SOURCE: source_doc_id},
)
def _query_with_embedding_to_query_bundle(query: QueryWithEmbedding) -> QueryBundle:
return QueryBundle(
query_str=query.query,
embedding=query.embedding,
)
def _source_node_to_doc_chunk_with_score(
node_with_score: NodeWithScore,
) -> DocumentChunkWithScore:
node = node_with_score.node
if node.extra_info is not None:
metadata = DocumentChunkMetadata(**node.extra_info)
else:
metadata = DocumentChunkMetadata()
return DocumentChunkWithScore(
id=node.doc_id,
text=node.text,
score=node_with_score.score if node_with_score.score is not None else 1.0,
metadata=metadata,
)
def _response_to_query_result(
response: Response, query: QueryWithEmbedding
) -> QueryResult:
results = [
_source_node_to_doc_chunk_with_score(node) for node in response.source_nodes
]
return QueryResult(
query=query.query,
results=results,
)
class LlamaDataStore(DataStore):
def __init__(
self, index: Optional[BaseGPTIndex] = None, query_kwargs: Optional[dict] = None
):
self._index = index or _create_or_load_index()
self._query_kwargs = query_kwargs or _create_or_load_query_kwargs()
async def _upsert(self, chunks: Dict[str, List[DocumentChunk]]) -> List[str]:
"""
Takes in a list of list of document chunks and inserts them into the database.
Return a list of document ids.
"""
doc_ids = []
for doc_id, doc_chunks in chunks.items():
logger.debug(f"Upserting {doc_id} with {len(doc_chunks)} chunks")
nodes = [
_doc_chunk_to_node(doc_chunk=doc_chunk, source_doc_id=doc_id)
for doc_chunk in doc_chunks
]
self._index.insert_nodes(nodes)
doc_ids.append(doc_id)
return doc_ids
async def _query(
self,
queries: List[QueryWithEmbedding],
) -> List[QueryResult]:
"""
Takes in a list of queries with embeddings and filters and
returns a list of query results with matching document chunks and scores.
"""
query_result_all = []
for query in queries:
if query.filter is not None:
logger.warning("Filters are not supported yet, ignoring for now.")
query_bundle = _query_with_embedding_to_query_bundle(query)
# Setup query kwargs
if self._query_kwargs is not None:
query_kwargs = self._query_kwargs
else:
query_kwargs = {}
# TODO: support top_k for other indices
if isinstance(self._index, GPTVectorStoreIndex):
query_kwargs["similarity_top_k"] = query.top_k
response = await self._index.aquery(
query_bundle, response_mode=RESPONSE_MODE, **query_kwargs
)
query_result = _response_to_query_result(response, query)
query_result_all.append(query_result)
return query_result_all
async def delete(
self,
ids: Optional[List[str]] = None,
filter: Optional[DocumentMetadataFilter] = None,
delete_all: Optional[bool] = None,
) -> bool:
"""
Removes vectors by ids, filter, or everything in the datastore.
Returns whether the operation was successful.
"""
if delete_all:
logger.warning("Delete all not supported yet.")
return False
if filter is not None:
logger.warning("Filters are not supported yet.")
return False
if ids is not None:
for id_ in ids:
try:
self._index.delete(id_)
except NotImplementedError:
# NOTE: some indices does not support delete yet.
logger.warning(f"{type(self._index)} does not support delete yet.")
return False
return True
| [
"llama_index.data_structs.struct_type.IndexStructType",
"llama_index.indices.query.schema.QueryBundle"
] | [((860, 929), 'os.environ.get', 'os.environ.get', (['"""LLAMA_INDEX_TYPE"""', 'IndexStructType.SIMPLE_DICT.value'], {}), "('LLAMA_INDEX_TYPE', IndexStructType.SIMPLE_DICT.value)\n", (874, 929), False, 'import os\n'), ((954, 999), 'os.environ.get', 'os.environ.get', (['"""LLAMA_INDEX_JSON_PATH"""', 'None'], {}), "('LLAMA_INDEX_JSON_PATH', None)\n", (968, 999), False, 'import os\n'), ((1025, 1077), 'os.environ.get', 'os.environ.get', (['"""LLAMA_QUERY_KWARGS_JSON_PATH"""', 'None'], {}), "('LLAMA_QUERY_KWARGS_JSON_PATH', None)\n", (1039, 1077), False, 'import os\n'), ((1094, 1159), 'os.environ.get', 'os.environ.get', (['"""LLAMA_RESPONSE_MODE"""', 'ResponseMode.NO_TEXT.value'], {}), "('LLAMA_RESPONSE_MODE', ResponseMode.NO_TEXT.value)\n", (1108, 1159), False, 'import os\n'), ((1882, 1913), 'llama_index.data_structs.struct_type.IndexStructType', 'IndexStructType', (['index_type_str'], {}), '(index_type_str)\n', (1897, 1913), False, 'from llama_index.data_structs.struct_type import IndexStructType\n'), ((3268, 3329), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query.query', 'embedding': 'query.embedding'}), '(query_str=query.query, embedding=query.embedding)\n', (3279, 3329), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((3655, 3812), 'models.models.DocumentChunkWithScore', 'DocumentChunkWithScore', ([], {'id': 'node.doc_id', 'text': 'node.text', 'score': '(node_with_score.score if node_with_score.score is not None else 1.0)', 'metadata': 'metadata'}), '(id=node.doc_id, text=node.text, score=\n node_with_score.score if node_with_score.score is not None else 1.0,\n metadata=metadata)\n', (3677, 3812), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((4062, 4109), 'models.models.QueryResult', 'QueryResult', ([], {'query': 'query.query', 'results': 'results'}), '(query=query.query, results=results)\n', (4073, 4109), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((3549, 3589), 'models.models.DocumentChunkMetadata', 'DocumentChunkMetadata', ([], {}), '(**node.extra_info)\n', (3570, 3589), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((3619, 3642), 'models.models.DocumentChunkMetadata', 'DocumentChunkMetadata', ([], {}), '()\n', (3640, 3642), False, 'from models.models import DocumentChunk, DocumentChunkMetadata, DocumentChunkWithScore, DocumentMetadataFilter, Query, QueryResult, QueryWithEmbedding\n'), ((2779, 2791), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2788, 2791), False, 'import json\n'), ((6623, 6670), 'loguru.logger.warning', 'logger.warning', (['"""Delete all not supported yet."""'], {}), "('Delete all not supported yet.')\n", (6637, 6670), False, 'from loguru import logger\n'), ((6740, 6788), 'loguru.logger.warning', 'logger.warning', (['"""Filters are not supported yet."""'], {}), "('Filters are not supported yet.')\n", (6754, 6788), False, 'from loguru import logger\n'), ((5454, 5520), 'loguru.logger.warning', 'logger.warning', (['"""Filters are not supported yet, ignoring for now."""'], {}), "('Filters are not supported yet, ignoring for now.')\n", (5468, 5520), False, 'from loguru import logger\n')] |
import os
import weaviate
from llama_index.storage.storage_context import StorageContext
from llama_index.vector_stores import WeaviateVectorStore
from llama_index.query_engine.retriever_query_engine import RetrieverQueryEngine
from llama_index.callbacks.base import CallbackManager
from llama_index import (
LLMPredictor,
ServiceContext,
StorageContext,
VectorStoreIndex,
)
import chainlit as cl
from llama_index.llms import LocalAI
from llama_index.embeddings import HuggingFaceEmbedding
import yaml
# Load the configuration file
with open("config.yaml", "r") as ymlfile:
cfg = yaml.safe_load(ymlfile)
# Get the values from the configuration file or set the default values
temperature = cfg['localAI'].get('temperature', 0)
model_name = cfg['localAI'].get('modelName', "gpt-3.5-turbo")
api_base = cfg['localAI'].get('apiBase', "http://local-ai.default")
api_key = cfg['localAI'].get('apiKey', "stub")
streaming = cfg['localAI'].get('streaming', True)
weaviate_url = cfg['weviate'].get('url', "http://weviate.default")
index_name = cfg['weviate'].get('index', "AIChroma")
query_mode = cfg['query'].get('mode', "hybrid")
topK = cfg['query'].get('topK', 1)
alpha = cfg['query'].get('alpha', 0.0)
embed_model_name = cfg['embedding'].get('model', "BAAI/bge-small-en-v1.5")
chunk_size = cfg['query'].get('chunkSize', 1024)
embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
llm = LocalAI(temperature=temperature, model_name=model_name, api_base=api_base, api_key=api_key, streaming=streaming)
llm.globally_use_chat_completions = True;
client = weaviate.Client(weaviate_url)
vector_store = WeaviateVectorStore(weaviate_client=client, index_name=index_name)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
@cl.on_chat_start
async def factory():
llm_predictor = LLMPredictor(
llm=llm
)
service_context = ServiceContext.from_defaults(embed_model=embed_model, callback_manager=CallbackManager([cl.LlamaIndexCallbackHandler()]), llm_predictor=llm_predictor, chunk_size=chunk_size)
index = VectorStoreIndex.from_vector_store(
vector_store,
storage_context=storage_context,
service_context=service_context
)
query_engine = index.as_query_engine(vector_store_query_mode=query_mode, similarity_top_k=topK, alpha=alpha, streaming=True)
cl.user_session.set("query_engine", query_engine)
@cl.on_message
async def main(message: cl.Message):
query_engine = cl.user_session.get("query_engine")
response = await cl.make_async(query_engine.query)(message.content)
response_message = cl.Message(content="")
for token in response.response_gen:
await response_message.stream_token(token=token)
if response.response_txt:
response_message.content = response.response_txt
await response_message.send()
| [
"llama_index.LLMPredictor",
"llama_index.StorageContext.from_defaults",
"llama_index.vector_stores.WeaviateVectorStore",
"llama_index.llms.LocalAI",
"llama_index.VectorStoreIndex.from_vector_store",
"llama_index.embeddings.HuggingFaceEmbedding"
] | [((1360, 1409), 'llama_index.embeddings.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'embed_model_name'}), '(model_name=embed_model_name)\n', (1380, 1409), False, 'from llama_index.embeddings import HuggingFaceEmbedding\n'), ((1418, 1534), 'llama_index.llms.LocalAI', 'LocalAI', ([], {'temperature': 'temperature', 'model_name': 'model_name', 'api_base': 'api_base', 'api_key': 'api_key', 'streaming': 'streaming'}), '(temperature=temperature, model_name=model_name, api_base=api_base,\n api_key=api_key, streaming=streaming)\n', (1425, 1534), False, 'from llama_index.llms import LocalAI\n'), ((1582, 1611), 'weaviate.Client', 'weaviate.Client', (['weaviate_url'], {}), '(weaviate_url)\n', (1597, 1611), False, 'import weaviate\n'), ((1627, 1693), 'llama_index.vector_stores.WeaviateVectorStore', 'WeaviateVectorStore', ([], {'weaviate_client': 'client', 'index_name': 'index_name'}), '(weaviate_client=client, index_name=index_name)\n', (1646, 1693), False, 'from llama_index.vector_stores import WeaviateVectorStore\n'), ((1712, 1767), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (1740, 1767), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex\n'), ((604, 627), 'yaml.safe_load', 'yaml.safe_load', (['ymlfile'], {}), '(ymlfile)\n', (618, 627), False, 'import yaml\n'), ((1829, 1850), 'llama_index.LLMPredictor', 'LLMPredictor', ([], {'llm': 'llm'}), '(llm=llm)\n', (1841, 1850), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex\n'), ((2079, 2198), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['vector_store'], {'storage_context': 'storage_context', 'service_context': 'service_context'}), '(vector_store, storage_context=\n storage_context, service_context=service_context)\n', (2113, 2198), False, 'from llama_index import LLMPredictor, ServiceContext, StorageContext, VectorStoreIndex\n'), ((2359, 2408), 'chainlit.user_session.set', 'cl.user_session.set', (['"""query_engine"""', 'query_engine'], {}), "('query_engine', query_engine)\n", (2378, 2408), True, 'import chainlit as cl\n'), ((2482, 2517), 'chainlit.user_session.get', 'cl.user_session.get', (['"""query_engine"""'], {}), "('query_engine')\n", (2501, 2517), True, 'import chainlit as cl\n'), ((2614, 2636), 'chainlit.Message', 'cl.Message', ([], {'content': '""""""'}), "(content='')\n", (2624, 2636), True, 'import chainlit as cl\n'), ((2539, 2572), 'chainlit.make_async', 'cl.make_async', (['query_engine.query'], {}), '(query_engine.query)\n', (2552, 2572), True, 'import chainlit as cl\n'), ((1980, 2010), 'chainlit.LlamaIndexCallbackHandler', 'cl.LlamaIndexCallbackHandler', ([], {}), '()\n', (2008, 2010), True, 'import chainlit as cl\n')] |
import typer
import uuid
from typing import Optional, List, Any
import os
import numpy as np
from memgpt.utils import is_valid_url, printd
from memgpt.data_types import EmbeddingConfig
from memgpt.credentials import MemGPTCredentials
from memgpt.constants import MAX_EMBEDDING_DIM, EMBEDDING_TO_TOKENIZER_MAP, EMBEDDING_TO_TOKENIZER_DEFAULT
# from llama_index.core.base.embeddings import BaseEmbedding
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core import Document as LlamaIndexDocument
# from llama_index.core.base.embeddings import BaseEmbedding
# from llama_index.core.embeddings import BaseEmbedding
# from llama_index.core.base.embeddings.base import BaseEmbedding
# from llama_index.bridge.pydantic import PrivateAttr
# from llama_index.embeddings.base import BaseEmbedding
# from llama_index.embeddings.huggingface_utils import format_text
import tiktoken
def parse_and_chunk_text(text: str, chunk_size: int) -> List[str]:
parser = SentenceSplitter(chunk_size=chunk_size)
llama_index_docs = [LlamaIndexDocument(text=text)]
nodes = parser.get_nodes_from_documents(llama_index_docs)
return [n.text for n in nodes]
def truncate_text(text: str, max_length: int, encoding) -> str:
# truncate the text based on max_length and encoding
encoded_text = encoding.encode(text)[:max_length]
return encoding.decode(encoded_text)
def check_and_split_text(text: str, embedding_model: str) -> List[str]:
"""Split text into chunks of max_length tokens or less"""
if embedding_model in EMBEDDING_TO_TOKENIZER_MAP:
encoding = tiktoken.get_encoding(EMBEDDING_TO_TOKENIZER_MAP[embedding_model])
else:
print(f"Warning: couldn't find tokenizer for model {embedding_model}, using default tokenizer {EMBEDDING_TO_TOKENIZER_DEFAULT}")
encoding = tiktoken.get_encoding(EMBEDDING_TO_TOKENIZER_DEFAULT)
num_tokens = len(encoding.encode(text))
# determine max length
if hasattr(encoding, "max_length"):
# TODO(fix) this is broken
max_length = encoding.max_length
else:
# TODO: figure out the real number
printd(f"Warning: couldn't find max_length for tokenizer {embedding_model}, using default max_length 8191")
max_length = 8191
# truncate text if too long
if num_tokens > max_length:
print(f"Warning: text is too long ({num_tokens} tokens), truncating to {max_length} tokens.")
# First, apply any necessary formatting
formatted_text = format_text(text, embedding_model)
# Then truncate
text = truncate_text(formatted_text, max_length, encoding)
return [text]
class EmbeddingEndpoint:
"""Implementation for OpenAI compatible endpoint"""
# """ Based off llama index https://github.com/run-llama/llama_index/blob/a98bdb8ecee513dc2e880f56674e7fd157d1dc3a/llama_index/embeddings/text_embeddings_inference.py """
# _user: str = PrivateAttr()
# _timeout: float = PrivateAttr()
# _base_url: str = PrivateAttr()
def __init__(
self,
model: str,
base_url: str,
user: str,
timeout: float = 60.0,
**kwargs: Any,
):
if not is_valid_url(base_url):
raise ValueError(
f"Embeddings endpoint was provided an invalid URL (set to: '{base_url}'). Make sure embedding_endpoint is set correctly in your MemGPT config."
)
self.model_name = model
self._user = user
self._base_url = base_url
self._timeout = timeout
def _call_api(self, text: str) -> List[float]:
if not is_valid_url(self._base_url):
raise ValueError(
f"Embeddings endpoint does not have a valid URL (set to: '{self._base_url}'). Make sure embedding_endpoint is set correctly in your MemGPT config."
)
import httpx
headers = {"Content-Type": "application/json"}
json_data = {"input": text, "model": self.model_name, "user": self._user}
with httpx.Client() as client:
response = client.post(
f"{self._base_url}/embeddings",
headers=headers,
json=json_data,
timeout=self._timeout,
)
response_json = response.json()
if isinstance(response_json, list):
# embedding directly in response
embedding = response_json
elif isinstance(response_json, dict):
# TEI embedding packaged inside openai-style response
try:
embedding = response_json["data"][0]["embedding"]
except (KeyError, IndexError):
raise TypeError(f"Got back an unexpected payload from text embedding function, response=\n{response_json}")
else:
# unknown response, can't parse
raise TypeError(f"Got back an unexpected payload from text embedding function, response=\n{response_json}")
return embedding
def get_text_embedding(self, text: str) -> List[float]:
return self._call_api(text)
def default_embedding_model():
# default to hugging face model running local
# warning: this is a terrible model
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
os.environ["TOKENIZERS_PARALLELISM"] = "False"
model = "BAAI/bge-small-en-v1.5"
return HuggingFaceEmbedding(model_name=model)
def query_embedding(embedding_model, query_text: str):
"""Generate padded embedding for querying database"""
query_vec = embedding_model.get_text_embedding(query_text)
query_vec = np.array(query_vec)
query_vec = np.pad(query_vec, (0, MAX_EMBEDDING_DIM - query_vec.shape[0]), mode="constant").tolist()
return query_vec
def embedding_model(config: EmbeddingConfig, user_id: Optional[uuid.UUID] = None):
"""Return LlamaIndex embedding model to use for embeddings"""
endpoint_type = config.embedding_endpoint_type
# TODO refactor to pass credentials through args
credentials = MemGPTCredentials.load()
if endpoint_type == "openai":
assert credentials.openai_key is not None
from llama_index.embeddings.openai import OpenAIEmbedding
additional_kwargs = {"user_id": user_id} if user_id else {}
model = OpenAIEmbedding(
api_base=config.embedding_endpoint,
api_key=credentials.openai_key,
additional_kwargs=additional_kwargs,
)
return model
elif endpoint_type == "azure":
assert all(
[
credentials.azure_key is not None,
credentials.azure_embedding_endpoint is not None,
credentials.azure_version is not None,
]
)
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings
model = "text-embedding-ada-002"
deployment = credentials.azure_embedding_deployment if credentials.azure_embedding_deployment is not None else model
return AzureOpenAIEmbedding(
model=model,
deployment_name=deployment,
api_key=credentials.azure_key,
azure_endpoint=credentials.azure_endpoint,
api_version=credentials.azure_version,
)
elif endpoint_type == "hugging-face":
return EmbeddingEndpoint(
model=config.embedding_model,
base_url=config.embedding_endpoint,
user=user_id,
)
else:
return default_embedding_model()
| [
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.embeddings.azure_openai.AzureOpenAIEmbedding",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.Document",
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((981, 1020), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': 'chunk_size'}), '(chunk_size=chunk_size)\n', (997, 1020), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((5381, 5419), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'model'}), '(model_name=model)\n', (5401, 5419), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((5614, 5633), 'numpy.array', 'np.array', (['query_vec'], {}), '(query_vec)\n', (5622, 5633), True, 'import numpy as np\n'), ((6035, 6059), 'memgpt.credentials.MemGPTCredentials.load', 'MemGPTCredentials.load', ([], {}), '()\n', (6057, 6059), False, 'from memgpt.credentials import MemGPTCredentials\n'), ((1045, 1074), 'llama_index.core.Document', 'LlamaIndexDocument', ([], {'text': 'text'}), '(text=text)\n', (1063, 1074), True, 'from llama_index.core import Document as LlamaIndexDocument\n'), ((1601, 1667), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['EMBEDDING_TO_TOKENIZER_MAP[embedding_model]'], {}), '(EMBEDDING_TO_TOKENIZER_MAP[embedding_model])\n', (1622, 1667), False, 'import tiktoken\n'), ((1834, 1887), 'tiktoken.get_encoding', 'tiktoken.get_encoding', (['EMBEDDING_TO_TOKENIZER_DEFAULT'], {}), '(EMBEDDING_TO_TOKENIZER_DEFAULT)\n', (1855, 1887), False, 'import tiktoken\n'), ((2138, 2255), 'memgpt.utils.printd', 'printd', (['f"""Warning: couldn\'t find max_length for tokenizer {embedding_model}, using default max_length 8191"""'], {}), '(\n f"Warning: couldn\'t find max_length for tokenizer {embedding_model}, using default max_length 8191"\n )\n', (2144, 2255), False, 'from memgpt.utils import is_valid_url, printd\n'), ((6296, 6421), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_base': 'config.embedding_endpoint', 'api_key': 'credentials.openai_key', 'additional_kwargs': 'additional_kwargs'}), '(api_base=config.embedding_endpoint, api_key=credentials.\n openai_key, additional_kwargs=additional_kwargs)\n', (6311, 6421), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n'), ((3196, 3218), 'memgpt.utils.is_valid_url', 'is_valid_url', (['base_url'], {}), '(base_url)\n', (3208, 3218), False, 'from memgpt.utils import is_valid_url, printd\n'), ((3615, 3643), 'memgpt.utils.is_valid_url', 'is_valid_url', (['self._base_url'], {}), '(self._base_url)\n', (3627, 3643), False, 'from memgpt.utils import is_valid_url, printd\n'), ((4026, 4040), 'httpx.Client', 'httpx.Client', ([], {}), '()\n', (4038, 4040), False, 'import httpx\n'), ((5650, 5729), 'numpy.pad', 'np.pad', (['query_vec', '(0, MAX_EMBEDDING_DIM - query_vec.shape[0])'], {'mode': '"""constant"""'}), "(query_vec, (0, MAX_EMBEDDING_DIM - query_vec.shape[0]), mode='constant')\n", (5656, 5729), True, 'import numpy as np\n'), ((7100, 7283), 'llama_index.embeddings.azure_openai.AzureOpenAIEmbedding', 'AzureOpenAIEmbedding', ([], {'model': 'model', 'deployment_name': 'deployment', 'api_key': 'credentials.azure_key', 'azure_endpoint': 'credentials.azure_endpoint', 'api_version': 'credentials.azure_version'}), '(model=model, deployment_name=deployment, api_key=\n credentials.azure_key, azure_endpoint=credentials.azure_endpoint,\n api_version=credentials.azure_version)\n', (7120, 7283), False, 'from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding\n')] |
import logging
import os
from typing import Optional
from typing import Type
import openai
from langchain.chat_models import ChatOpenAI
from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext
from llama_index.vector_stores.types import ExactMatchFilter, MetadataFilters
from pydantic import BaseModel, Field
from superagi.config.config import get_config
from superagi.llms.base_llm import BaseLlm
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.tools.base_tool import BaseTool
from superagi.types.vector_store_types import VectorStoreType
from superagi.vector_store.chromadb import ChromaDB
class QueryResource(BaseModel):
"""Input for QueryResource tool."""
query: str = Field(..., description="the search query to search resources")
class QueryResourceTool(BaseTool):
"""
Read File tool
Attributes:
name : The name.
description : The description.
args_schema : The args schema.
"""
name: str = "QueryResource"
args_schema: Type[BaseModel] = QueryResource
description: str = "Tool searches resources content and extracts relevant information to perform the given task." \
"Tool is given preference over other search/read file tools for relevant data." \
"Resources content is taken from the files: {summary}"
agent_id: int = None
llm: Optional[BaseLlm] = None
def _execute(self, query: str):
openai.api_key = self.llm.get_api_key()
os.environ["OPENAI_API_KEY"] = self.llm.get_api_key()
llm_predictor_chatgpt = LLMPredictor(llm=ChatOpenAI(temperature=0, model_name=self.llm.get_model(),
openai_api_key=get_config("OPENAI_API_KEY")))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor_chatgpt)
vector_store_name = VectorStoreType.get_vector_store_type(
self.get_tool_config(key="RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = self.get_tool_config(key="RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
logging.info(f"vector_store_name {vector_store_name}")
logging.info(f"vector_store_index_name {vector_store_index_name}")
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
logging.info(f"vector_store {vector_store}")
as_query_engine_args = dict(
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="agent_id",
value=str(self.agent_id)
)
]
)
)
if vector_store_name == VectorStoreType.CHROMA:
as_query_engine_args["chroma_collection"] = ChromaDB.create_collection(
collection_name=vector_store_index_name)
index = VectorStoreIndex.from_vector_store(vector_store=vector_store, service_context=service_context)
query_engine = index.as_query_engine(
**as_query_engine_args
)
try:
response = query_engine.query(query)
except ValueError as e:
logging.error(f"ValueError {e}")
response = "Document not found"
return response
| [
"llama_index.ServiceContext.from_defaults",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((754, 816), 'pydantic.Field', 'Field', (['...'], {'description': '"""the search query to search resources"""'}), "(..., description='the search query to search resources')\n", (759, 816), False, 'from pydantic import BaseModel, Field\n'), ((1840, 1905), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor_chatgpt'}), '(llm_predictor=llm_predictor_chatgpt)\n', (1868, 1905), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2173, 2227), 'logging.info', 'logging.info', (['f"""vector_store_name {vector_store_name}"""'], {}), "(f'vector_store_name {vector_store_name}')\n", (2185, 2227), False, 'import logging\n'), ((2236, 2302), 'logging.info', 'logging.info', (['f"""vector_store_index_name {vector_store_index_name}"""'], {}), "(f'vector_store_index_name {vector_store_index_name}')\n", (2248, 2302), False, 'import logging\n'), ((2421, 2465), 'logging.info', 'logging.info', (['f"""vector_store {vector_store}"""'], {}), "(f'vector_store {vector_store}')\n", (2433, 2465), False, 'import logging\n'), ((2970, 3068), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (3004, 3068), False, 'from llama_index import VectorStoreIndex, LLMPredictor, ServiceContext\n'), ((2869, 2936), 'superagi.vector_store.chromadb.ChromaDB.create_collection', 'ChromaDB.create_collection', ([], {'collection_name': 'vector_store_index_name'}), '(collection_name=vector_store_index_name)\n', (2895, 2936), False, 'from superagi.vector_store.chromadb import ChromaDB\n'), ((2326, 2393), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (2349, 2393), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n'), ((3262, 3294), 'logging.error', 'logging.error', (['f"""ValueError {e}"""'], {}), "(f'ValueError {e}')\n", (3275, 3294), False, 'import logging\n'), ((1782, 1810), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1792, 1810), False, 'from superagi.config.config import get_config\n')] |
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from typing import Any, List, Optional
from sentence_transformers import CrossEncoder
from typing import Optional, Sequence
from langchain_core.documents import Document
from langchain.callbacks.manager import Callbacks
from langchain.retrievers.document_compressors.base import BaseDocumentCompressor
from llama_index.bridge.pydantic import Field, PrivateAttr
class LangchainReranker(BaseDocumentCompressor):
"""Document compressor that uses `Cohere Rerank API`."""
model_name_or_path: str = Field()
_model: Any = PrivateAttr()
top_n: int = Field()
device: str = Field()
max_length: int = Field()
batch_size: int = Field()
# show_progress_bar: bool = None
num_workers: int = Field()
# activation_fct = None
# apply_softmax = False
def __init__(self,
model_name_or_path: str,
top_n: int = 3,
device: str = "cuda",
max_length: int = 1024,
batch_size: int = 32,
# show_progress_bar: bool = None,
num_workers: int = 0,
# activation_fct = None,
# apply_softmax = False,
):
# self.top_n=top_n
# self.model_name_or_path=model_name_or_path
# self.device=device
# self.max_length=max_length
# self.batch_size=batch_size
# self.show_progress_bar=show_progress_bar
# self.num_workers=num_workers
# self.activation_fct=activation_fct
# self.apply_softmax=apply_softmax
self._model = CrossEncoder(model_name=model_name_or_path, max_length=1024, device=device)
super().__init__(
top_n=top_n,
model_name_or_path=model_name_or_path,
device=device,
max_length=max_length,
batch_size=batch_size,
# show_progress_bar=show_progress_bar,
num_workers=num_workers,
# activation_fct=activation_fct,
# apply_softmax=apply_softmax
)
def compress_documents(
self,
documents: Sequence[Document],
query: str,
callbacks: Optional[Callbacks] = None,
) -> Sequence[Document]:
"""
Compress documents using Cohere's rerank API.
Args:
documents: A sequence of documents to compress.
query: The query to use for compressing the documents.
callbacks: Callbacks to run during the compression process.
Returns:
A sequence of compressed documents.
"""
if len(documents) == 0: # to avoid empty api call
return []
doc_list = list(documents)
_docs = [d.page_content for d in doc_list]
sentence_pairs = [[query, _doc] for _doc in _docs]
results = self._model.predict(sentences=sentence_pairs,
batch_size=self.batch_size,
# show_progress_bar=self.show_progress_bar,
num_workers=self.num_workers,
# activation_fct=self.activation_fct,
# apply_softmax=self.apply_softmax,
convert_to_tensor=True
)
top_k = self.top_n if self.top_n < len(results) else len(results)
values, indices = results.topk(top_k)
final_results = []
for value, index in zip(values, indices):
doc = doc_list[index]
doc.metadata["relevance_score"] = value
final_results.append(doc)
return final_results
if __name__ == "__main__":
from configs import (LLM_MODELS,
VECTOR_SEARCH_TOP_K,
SCORE_THRESHOLD,
TEMPERATURE,
USE_RERANKER,
RERANKER_MODEL,
RERANKER_MAX_LENGTH,
MODEL_PATH)
from server.utils import embedding_device
if USE_RERANKER:
reranker_model_path = MODEL_PATH["reranker"].get(RERANKER_MODEL, "BAAI/bge-reranker-large")
print("-----------------model path------------------")
print(reranker_model_path)
reranker_model = LangchainReranker(top_n=3,
device=embedding_device(),
max_length=RERANKER_MAX_LENGTH,
model_name_or_path=reranker_model_path
)
| [
"llama_index.bridge.pydantic.Field",
"llama_index.bridge.pydantic.PrivateAttr"
] | [((602, 609), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (607, 609), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((628, 641), 'llama_index.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (639, 641), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((659, 666), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (664, 666), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((685, 692), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (690, 692), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((715, 722), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (720, 722), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((745, 752), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (750, 752), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((813, 820), 'llama_index.bridge.pydantic.Field', 'Field', ([], {}), '()\n', (818, 820), False, 'from llama_index.bridge.pydantic import Field, PrivateAttr\n'), ((1674, 1749), 'sentence_transformers.CrossEncoder', 'CrossEncoder', ([], {'model_name': 'model_name_or_path', 'max_length': '(1024)', 'device': 'device'}), '(model_name=model_name_or_path, max_length=1024, device=device)\n', (1686, 1749), False, 'from sentence_transformers import CrossEncoder\n'), ((70, 95), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (85, 95), False, 'import os\n'), ((4520, 4538), 'server.utils.embedding_device', 'embedding_device', ([], {}), '()\n', (4536, 4538), False, 'from server.utils import embedding_device\n')] |
'''
Below helper functions are implemented in this script:
build_sentence_window_index - VectorStore Index for Sentence window RAG technique
get_sentence_window_query_engine - query enginer for the above index
build_automerging_index - VectorStore Index for Auto-merging RAG technique
get_automerging_query_engine - query enginer for the above index
Evaluation function:
get_prebuilt_trulens_recorder - evaluation function with all the feedback functions
'''
import os
import numpy as np
from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage
from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes
from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank
from llama_index.retrievers import AutoMergingRetriever
from llama_index.query_engine import RetrieverQueryEngine
from trulens_eval import Feedback, TruLlama
from trulens_eval import OpenAI as fOpenAI
from trulens_eval.feedback import Groundedness
############################################################################## Function 1 ###########################################################
def build_sentence_window_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
sentence_window_size=3,
save_dir="sentence_index",
):
# create the sentence window node parser w/ default settings
node_parser = SentenceWindowNodeParser.from_defaults(
window_size=sentence_window_size,
window_metadata_key="window",
original_text_metadata_key="original_text",
)
sentence_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
node_parser=node_parser,
)
if not os.path.exists(save_dir):
sentence_index = VectorStoreIndex.from_documents(
documents, service_context=sentence_context
)
sentence_index.storage_context.persist(persist_dir=save_dir)
else:
sentence_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=sentence_context,
)
return sentence_index
############################################################################## Function 2 ###########################################################
def get_sentence_window_query_engine(
sentence_index, similarity_top_k=6, rerank_top_n=2
):
# define postprocessors
postproc = MetadataReplacementPostProcessor(target_metadata_key="window")
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model="BAAI/bge-reranker-base"
)
sentence_window_engine = sentence_index.as_query_engine(
similarity_top_k=similarity_top_k, node_postprocessors=[postproc, rerank]
)
return sentence_window_engine
############################################################################## Function 3 ###########################################################
def build_automerging_index(
documents,
llm,
embed_model="local:BAAI/bge-small-en-v1.5",
save_dir="merging_index",
chunk_sizes=None
):
# chunk sizes for all the layers (factor of 4)
chunk_sizes = chunk_sizes or [2048, 512, 128]
# Hierarchical node parser to parse the tree nodes (parent and children)
node_parser = HierarchicalNodeParser.from_defaults(chunk_sizes=chunk_sizes)
# getting all intermediate and parent nodes
nodes = node_parser.get_nodes_from_documents(documents)
# getting only the leaf nodes
leaf_nodes = get_leaf_nodes(nodes)
# required service context to initialize both llm and embed model
merging_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model
)
# storage context to store the intermediate and parent nodes in a docstore, because the index is built only on the leaf nodes
storage_context = StorageContext.from_defaults()
storage_context.docstore.add_documents(nodes)
if not os.path.exists(save_dir):
automerging_index = VectorStoreIndex(
leaf_nodes, storage_context=storage_context, service_context=merging_context
)
automerging_index.storage_context.persist(persist_dir=save_dir)
else:
automerging_index = load_index_from_storage(
StorageContext.from_defaults(persist_dir=save_dir),
service_context=merging_context
)
return automerging_index
############################################################################## Function 4 ###########################################################
def get_automerging_query_engine(
automerging_index,
similarity_top_k=12,
rerank_top_n=6,
):
# retriever is used to merge the child nodes into the parent nodes
base_retriever = automerging_index.as_retriever(similarity_top_k=similarity_top_k)
retriever = AutoMergingRetriever(
base_retriever, automerging_index.storage_context, verbose=True
)
# Ranking is used to select top k relevant chunks from similarity_top_k
rerank = SentenceTransformerRerank(
top_n=rerank_top_n, model='BAAI/bge-reranker-base'
)
# getting query engine with the above mentioned retiriever and reranker
automerging_engine = RetrieverQueryEngine.from_args(
retriever, node_postprocessors=[rerank]
)
return automerging_engine
############################################################################## Function 5 ###########################################################
def get_prebuilt_trulens_recorder(query_engine, app_id):
# Feedback functions
# Answer Relevance
provider = fOpenAI()
f_qa_relevance = Feedback(
provider.relevance_with_cot_reasons,
name="Answer Relevance"
).on_input_output()
# Context Relevance
context_selection = TruLlama.select_source_nodes().node.text
f_qs_relevance = (
Feedback(provider.qs_relevance,
name="Context Relevance")
.on_input()
.on(context_selection)
.aggregate(np.mean)
)
# Groundedness
grounded = Groundedness(groundedness_provider=provider)
f_groundedness = (
Feedback(grounded.groundedness_measure_with_cot_reasons,
name="Groundedness"
)
.on(context_selection)
.on_output()
.aggregate(grounded.grounded_statements_aggregator)
)
tru_recorder = TruLlama(
query_engine,
app_id=app_id,
feedbacks = [
f_qa_relevance,
f_qs_relevance,
f_groundedness
]
)
return tru_recorder
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.retrievers.AutoMergingRetriever",
"llama_index.node_parser.HierarchicalNodeParser.from_defaults",
"llama_index.VectorStoreIndex",
"llama_index.indices.postprocessor.SentenceTransformerRerank",
"llama_index.node_parser.SentenceWindowNodeParser.from_defaults",
"llama_index.ServiceContext.from_defaults",
"llama_index.node_parser.get_leaf_nodes",
"llama_index.StorageContext.from_defaults",
"llama_index.query_engine.RetrieverQueryEngine.from_args",
"llama_index.indices.postprocessor.MetadataReplacementPostProcessor"
] | [((1446, 1596), 'llama_index.node_parser.SentenceWindowNodeParser.from_defaults', 'SentenceWindowNodeParser.from_defaults', ([], {'window_size': 'sentence_window_size', 'window_metadata_key': '"""window"""', 'original_text_metadata_key': '"""original_text"""'}), "(window_size=sentence_window_size,\n window_metadata_key='window', original_text_metadata_key='original_text')\n", (1484, 1596), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((1647, 1739), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model', 'node_parser': 'node_parser'}), '(llm=llm, embed_model=embed_model, node_parser=\n node_parser)\n', (1675, 1739), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2493, 2555), 'llama_index.indices.postprocessor.MetadataReplacementPostProcessor', 'MetadataReplacementPostProcessor', ([], {'target_metadata_key': '"""window"""'}), "(target_metadata_key='window')\n", (2525, 2555), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((2569, 2646), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (2594, 2646), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((3368, 3429), 'llama_index.node_parser.HierarchicalNodeParser.from_defaults', 'HierarchicalNodeParser.from_defaults', ([], {'chunk_sizes': 'chunk_sizes'}), '(chunk_sizes=chunk_sizes)\n', (3404, 3429), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3591, 3612), 'llama_index.node_parser.get_leaf_nodes', 'get_leaf_nodes', (['nodes'], {}), '(nodes)\n', (3605, 3612), False, 'from llama_index.node_parser import SentenceWindowNodeParser, HierarchicalNodeParser, get_leaf_nodes\n'), ((3706, 3768), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (3734, 3768), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((3944, 3974), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (3972, 3974), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4938, 5027), 'llama_index.retrievers.AutoMergingRetriever', 'AutoMergingRetriever', (['base_retriever', 'automerging_index.storage_context'], {'verbose': '(True)'}), '(base_retriever, automerging_index.storage_context,\n verbose=True)\n', (4958, 5027), False, 'from llama_index.retrievers import AutoMergingRetriever\n'), ((5128, 5205), 'llama_index.indices.postprocessor.SentenceTransformerRerank', 'SentenceTransformerRerank', ([], {'top_n': 'rerank_top_n', 'model': '"""BAAI/bge-reranker-base"""'}), "(top_n=rerank_top_n, model='BAAI/bge-reranker-base')\n", (5153, 5205), False, 'from llama_index.indices.postprocessor import MetadataReplacementPostProcessor, SentenceTransformerRerank\n'), ((5322, 5393), 'llama_index.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'node_postprocessors': '[rerank]'}), '(retriever, node_postprocessors=[rerank])\n', (5352, 5393), False, 'from llama_index.query_engine import RetrieverQueryEngine\n'), ((5711, 5720), 'trulens_eval.OpenAI', 'fOpenAI', ([], {}), '()\n', (5718, 5720), True, 'from trulens_eval import OpenAI as fOpenAI\n'), ((6162, 6206), 'trulens_eval.feedback.Groundedness', 'Groundedness', ([], {'groundedness_provider': 'provider'}), '(groundedness_provider=provider)\n', (6174, 6206), False, 'from trulens_eval.feedback import Groundedness\n'), ((6488, 6589), 'trulens_eval.TruLlama', 'TruLlama', (['query_engine'], {'app_id': 'app_id', 'feedbacks': '[f_qa_relevance, f_qs_relevance, f_groundedness]'}), '(query_engine, app_id=app_id, feedbacks=[f_qa_relevance,\n f_qs_relevance, f_groundedness])\n', (6496, 6589), False, 'from trulens_eval import Feedback, TruLlama\n'), ((1777, 1801), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (1791, 1801), False, 'import os\n'), ((1828, 1904), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'sentence_context'}), '(documents, service_context=sentence_context)\n', (1859, 1904), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4037, 4061), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (4051, 4061), False, 'import os\n'), ((4091, 4189), 'llama_index.VectorStoreIndex', 'VectorStoreIndex', (['leaf_nodes'], {'storage_context': 'storage_context', 'service_context': 'merging_context'}), '(leaf_nodes, storage_context=storage_context,\n service_context=merging_context)\n', (4107, 4189), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((2068, 2118), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (2096, 2118), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((4356, 4406), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': 'save_dir'}), '(persist_dir=save_dir)\n', (4384, 4406), False, 'from llama_index import ServiceContext, VectorStoreIndex, StorageContext, load_index_from_storage\n'), ((5743, 5813), 'trulens_eval.Feedback', 'Feedback', (['provider.relevance_with_cot_reasons'], {'name': '"""Answer Relevance"""'}), "(provider.relevance_with_cot_reasons, name='Answer Relevance')\n", (5751, 5813), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5903, 5933), 'trulens_eval.TruLlama.select_source_nodes', 'TruLlama.select_source_nodes', ([], {}), '()\n', (5931, 5933), False, 'from trulens_eval import Feedback, TruLlama\n'), ((5972, 6029), 'trulens_eval.Feedback', 'Feedback', (['provider.qs_relevance'], {'name': '"""Context Relevance"""'}), "(provider.qs_relevance, name='Context Relevance')\n", (5980, 6029), False, 'from trulens_eval import Feedback, TruLlama\n'), ((6239, 6316), 'trulens_eval.Feedback', 'Feedback', (['grounded.groundedness_measure_with_cot_reasons'], {'name': '"""Groundedness"""'}), "(grounded.groundedness_measure_with_cot_reasons, name='Groundedness')\n", (6247, 6316), False, 'from trulens_eval import Feedback, TruLlama\n')] |
import os
import streamlit as st
from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex
from llama_index.embeddings.openai import OpenAIEmbedding
from llama_index.llms.openai import OpenAI
st.set_page_config(
page_title="Chat with the PDM docs, powered by LlamaIndex",
page_icon="📝",
layout="centered",
initial_sidebar_state="auto",
menu_items=None,
)
st.title("Chat with the PDM docs, powered by LlamaIndex 💬🦙")
st.info(
"PDM - A modern Python package and dependency manager. "
"Check out the full documentation at [PDM docs](https://pdm-project.org).",
icon="📃",
)
Settings.llm = OpenAI(
api_key=st.secrets.get("openai_key"),
api_base=st.secrets.get("openai_base"),
model="gpt-3.5-turbo",
temperature=0.5,
system_prompt="You are an expert on PDM and your job is to answer technical questions. "
"Assume that all questions are related to PDM. Keep your answers technical and based on facts - do not hallucinate features.",
)
Settings.embed_model = OpenAIEmbedding(api_base=st.secrets.get("openai_base"), api_key=st.secrets.get("openai_key"))
DATA_PATH = os.path.join(os.path.dirname(os.path.dirname(__file__)), "docs/docs")
if "messages" not in st.session_state.keys(): # Initialize the chat messages history
st.session_state.messages = [
{
"role": "assistant",
"content": "Ask me a question about PDM!",
}
]
@st.cache_resource(show_spinner=False)
def load_data():
with st.spinner(text="Loading and indexing the PDM docs - hang tight! This should take 1-2 minutes."):
reader = SimpleDirectoryReader(input_dir=DATA_PATH, recursive=True, required_exts=[".md"])
docs = reader.load_data()
index = VectorStoreIndex.from_documents(docs)
return index
index = load_data()
if "chat_engine" not in st.session_state.keys(): # Initialize the chat engine
st.session_state.chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=True)
if prompt := st.chat_input("Your question"): # Prompt for user input and save to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
for message in st.session_state.messages: # Display the prior chat messages
with st.chat_message(message["role"]):
st.write(message["content"])
# If last message is not from assistant, generate a new response
if st.session_state.messages[-1]["role"] != "assistant":
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response = st.session_state.chat_engine.chat(prompt)
st.write(response.response)
message = {"role": "assistant", "content": response.response}
st.session_state.messages.append(message) # Add response to message history
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.SimpleDirectoryReader"
] | [((215, 384), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Chat with the PDM docs, powered by LlamaIndex"""', 'page_icon': '"""📝"""', 'layout': '"""centered"""', 'initial_sidebar_state': '"""auto"""', 'menu_items': 'None'}), "(page_title=\n 'Chat with the PDM docs, powered by LlamaIndex', page_icon='📝', layout=\n 'centered', initial_sidebar_state='auto', menu_items=None)\n", (233, 384), True, 'import streamlit as st\n'), ((398, 458), 'streamlit.title', 'st.title', (['"""Chat with the PDM docs, powered by LlamaIndex 💬🦙"""'], {}), "('Chat with the PDM docs, powered by LlamaIndex 💬🦙')\n", (406, 458), True, 'import streamlit as st\n'), ((459, 616), 'streamlit.info', 'st.info', (['"""PDM - A modern Python package and dependency manager. Check out the full documentation at [PDM docs](https://pdm-project.org)."""'], {'icon': '"""📃"""'}), "(\n 'PDM - A modern Python package and dependency manager. Check out the full documentation at [PDM docs](https://pdm-project.org).'\n , icon='📃')\n", (466, 616), True, 'import streamlit as st\n'), ((1446, 1483), 'streamlit.cache_resource', 'st.cache_resource', ([], {'show_spinner': '(False)'}), '(show_spinner=False)\n', (1463, 1483), True, 'import streamlit as st\n'), ((1230, 1253), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1251, 1253), True, 'import streamlit as st\n'), ((1863, 1886), 'streamlit.session_state.keys', 'st.session_state.keys', ([], {}), '()\n', (1884, 1886), True, 'import streamlit as st\n'), ((2033, 2063), 'streamlit.chat_input', 'st.chat_input', (['"""Your question"""'], {}), "('Your question')\n", (2046, 2063), True, 'import streamlit as st\n'), ((2119, 2188), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (2151, 2188), True, 'import streamlit as st\n'), ((660, 688), 'streamlit.secrets.get', 'st.secrets.get', (['"""openai_key"""'], {}), "('openai_key')\n", (674, 688), True, 'import streamlit as st\n'), ((703, 732), 'streamlit.secrets.get', 'st.secrets.get', (['"""openai_base"""'], {}), "('openai_base')\n", (717, 732), True, 'import streamlit as st\n'), ((1056, 1085), 'streamlit.secrets.get', 'st.secrets.get', (['"""openai_base"""'], {}), "('openai_base')\n", (1070, 1085), True, 'import streamlit as st\n'), ((1095, 1123), 'streamlit.secrets.get', 'st.secrets.get', (['"""openai_key"""'], {}), "('openai_key')\n", (1109, 1123), True, 'import streamlit as st\n'), ((1167, 1192), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1182, 1192), False, 'import os\n'), ((1510, 1616), 'streamlit.spinner', 'st.spinner', ([], {'text': '"""Loading and indexing the PDM docs - hang tight! This should take 1-2 minutes."""'}), "(text=\n 'Loading and indexing the PDM docs - hang tight! This should take 1-2 minutes.'\n )\n", (1520, 1616), True, 'import streamlit as st\n'), ((1625, 1711), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': 'DATA_PATH', 'recursive': '(True)', 'required_exts': "['.md']"}), "(input_dir=DATA_PATH, recursive=True, required_exts=[\n '.md'])\n", (1646, 1711), False, 'from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex\n'), ((1757, 1794), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['docs'], {}), '(docs)\n', (1788, 1794), False, 'from llama_index.core import Settings, SimpleDirectoryReader, VectorStoreIndex\n'), ((2276, 2308), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (2291, 2308), True, 'import streamlit as st\n'), ((2318, 2346), 'streamlit.write', 'st.write', (["message['content']"], {}), "(message['content'])\n", (2326, 2346), True, 'import streamlit as st\n'), ((2479, 2507), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (2494, 2507), True, 'import streamlit as st\n'), ((2522, 2547), 'streamlit.spinner', 'st.spinner', (['"""Thinking..."""'], {}), "('Thinking...')\n", (2532, 2547), True, 'import streamlit as st\n'), ((2572, 2613), 'streamlit.session_state.chat_engine.chat', 'st.session_state.chat_engine.chat', (['prompt'], {}), '(prompt)\n', (2605, 2613), True, 'import streamlit as st\n'), ((2626, 2653), 'streamlit.write', 'st.write', (['response.response'], {}), '(response.response)\n', (2634, 2653), True, 'import streamlit as st\n'), ((2740, 2781), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (['message'], {}), '(message)\n', (2772, 2781), True, 'import streamlit as st\n')] |
from typing import Callable, List
def split_text_keep_separator(text: str, separator: str) -> List[str]:
"""Split text with separator and keep the separator at the end of each split."""
parts = text.split(separator)
result = [separator + s if i > 0 else s for i, s in enumerate(parts)]
return [s for s in result if s]
def split_by_sep(sep: str, keep_sep: bool = True) -> Callable[[str], List[str]]:
"""Split text by separator."""
if keep_sep:
return lambda text: split_text_keep_separator(text, sep)
else:
return lambda text: text.split(sep)
def split_by_char() -> Callable[[str], List[str]]:
"""Split text by character."""
return lambda text: list(text)
def split_by_sentence_tokenizer() -> Callable[[str], List[str]]:
import os
import nltk
from llama_index.utils import get_cache_dir
cache_dir = get_cache_dir()
nltk_data_dir = os.environ.get("NLTK_DATA", cache_dir)
# update nltk path for nltk so that it finds the data
if nltk_data_dir not in nltk.data.path:
nltk.data.path.append(nltk_data_dir)
try:
nltk.data.find("tokenizers/punkt")
except LookupError:
nltk.download("punkt", download_dir=nltk_data_dir)
tokenizer = nltk.tokenize.PunktSentenceTokenizer()
# get the spans and then return the sentences
# using the start index of each span
# instead of using end, use the start of the next span if available
def split(text: str) -> List[str]:
spans = list(tokenizer.span_tokenize(text))
sentences = []
for i, span in enumerate(spans):
start = span[0]
if i < len(spans) - 1:
end = spans[i + 1][0]
else:
end = len(text)
sentences.append(text[start:end])
return sentences
return split
def split_by_regex(regex: str) -> Callable[[str], List[str]]:
"""Split text by regex."""
import re
return lambda text: re.findall(regex, text)
def split_by_phrase_regex() -> Callable[[str], List[str]]:
"""Split text by phrase regex.
This regular expression will split the sentences into phrases,
where each phrase is a sequence of one or more non-comma,
non-period, and non-semicolon characters, followed by an optional comma,
period, or semicolon. The regular expression will also capture the
delimiters themselves as separate items in the list of phrases.
"""
regex = "[^,.;。]+[,.;。]?"
return split_by_regex(regex)
| [
"llama_index.utils.get_cache_dir"
] | [((876, 891), 'llama_index.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (889, 891), False, 'from llama_index.utils import get_cache_dir\n'), ((912, 950), 'os.environ.get', 'os.environ.get', (['"""NLTK_DATA"""', 'cache_dir'], {}), "('NLTK_DATA', cache_dir)\n", (926, 950), False, 'import os\n'), ((1252, 1290), 'nltk.tokenize.PunktSentenceTokenizer', 'nltk.tokenize.PunktSentenceTokenizer', ([], {}), '()\n', (1288, 1290), False, 'import nltk\n'), ((1062, 1098), 'nltk.data.path.append', 'nltk.data.path.append', (['nltk_data_dir'], {}), '(nltk_data_dir)\n', (1083, 1098), False, 'import nltk\n'), ((1117, 1151), 'nltk.data.find', 'nltk.data.find', (['"""tokenizers/punkt"""'], {}), "('tokenizers/punkt')\n", (1131, 1151), False, 'import nltk\n'), ((1985, 2008), 're.findall', 're.findall', (['regex', 'text'], {}), '(regex, text)\n', (1995, 2008), False, 'import re\n'), ((1184, 1234), 'nltk.download', 'nltk.download', (['"""punkt"""'], {'download_dir': 'nltk_data_dir'}), "('punkt', download_dir=nltk_data_dir)\n", (1197, 1234), False, 'import nltk\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import torch
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from sqlalchemy import make_url
from llama_index.vector_stores.postgres import PGVectorStore
# from llama_index.llms.llama_cpp import LlamaCPP
import psycopg2
from pathlib import Path
from llama_index.readers.file import PyMuPDFReader
from llama_index.core.schema import NodeWithScore
from typing import Optional
from llama_index.core.query_engine import RetrieverQueryEngine
from llama_index.core import QueryBundle
from llama_index.core.retrievers import BaseRetriever
from typing import Any, List
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.vector_stores import VectorStoreQuery
import argparse
def load_vector_database(username, password):
db_name = "example_db"
host = "localhost"
password = password
port = "5432"
user = username
# conn = psycopg2.connect(connection_string)
conn = psycopg2.connect(
dbname="postgres",
host=host,
password=password,
port=port,
user=user,
)
conn.autocommit = True
with conn.cursor() as c:
c.execute(f"DROP DATABASE IF EXISTS {db_name}")
c.execute(f"CREATE DATABASE {db_name}")
vector_store = PGVectorStore.from_params(
database=db_name,
host=host,
password=password,
port=port,
user=user,
table_name="llama2_paper",
embed_dim=384, # openai embedding dimension
)
return vector_store
def load_data(data_path):
loader = PyMuPDFReader()
documents = loader.load(file_path=data_path)
text_parser = SentenceSplitter(
chunk_size=1024,
# separator=" ",
)
text_chunks = []
# maintain relationship with source doc index, to help inject doc metadata in (3)
doc_idxs = []
for doc_idx, doc in enumerate(documents):
cur_text_chunks = text_parser.split_text(doc.text)
text_chunks.extend(cur_text_chunks)
doc_idxs.extend([doc_idx] * len(cur_text_chunks))
from llama_index.core.schema import TextNode
nodes = []
for idx, text_chunk in enumerate(text_chunks):
node = TextNode(
text=text_chunk,
)
src_doc = documents[doc_idxs[idx]]
node.metadata = src_doc.metadata
nodes.append(node)
return nodes
class VectorDBRetriever(BaseRetriever):
"""Retriever over a postgres vector store."""
def __init__(
self,
vector_store: PGVectorStore,
embed_model: Any,
query_mode: str = "default",
similarity_top_k: int = 2,
) -> None:
"""Init params."""
self._vector_store = vector_store
self._embed_model = embed_model
self._query_mode = query_mode
self._similarity_top_k = similarity_top_k
super().__init__()
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
query_embedding = self._embed_model.get_query_embedding(
query_bundle.query_str
)
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding,
similarity_top_k=self._similarity_top_k,
mode=self._query_mode,
)
query_result = self._vector_store.query(vector_store_query)
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
return nodes_with_scores
def completion_to_prompt(completion):
return f"<|system|>\n</s>\n<|user|>\n{completion}</s>\n<|assistant|>\n"
# Transform a list of chat messages into zephyr-specific input
def messages_to_prompt(messages):
prompt = ""
for message in messages:
if message.role == "system":
prompt += f"<|system|>\n{message.content}</s>\n"
elif message.role == "user":
prompt += f"<|user|>\n{message.content}</s>\n"
elif message.role == "assistant":
prompt += f"<|assistant|>\n{message.content}</s>\n"
# ensure we start with a system prompt, insert blank if needed
if not prompt.startswith("<|system|>\n"):
prompt = "<|system|>\n</s>\n" + prompt
# add final assistant prompt
prompt = prompt + "<|assistant|>\n"
return prompt
def main(args):
embed_model = HuggingFaceEmbedding(model_name=args.embedding_model_path)
# Use custom LLM in BigDL
from bigdl.llm.llamaindex.llms import BigdlLLM
llm = BigdlLLM(
model_name=args.model_path,
tokenizer_name=args.model_path,
context_window=512,
max_new_tokens=args.n_predict,
generate_kwargs={"temperature": 0.7, "do_sample": False},
model_kwargs={},
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
device_map="xpu",
)
vector_store = load_vector_database(username=args.user, password=args.password)
nodes = load_data(data_path=args.data)
for node in nodes:
node_embedding = embed_model.get_text_embedding(
node.get_content(metadata_mode="all")
)
node.embedding = node_embedding
vector_store.add(nodes)
# query_str = "Can you tell me about the key concepts for safety finetuning"
query_str = "Explain about the training data for Llama 2"
query_embedding = embed_model.get_query_embedding(query_str)
# construct vector store query
query_mode = "default"
# query_mode = "sparse"
# query_mode = "hybrid"
vector_store_query = VectorStoreQuery(
query_embedding=query_embedding, similarity_top_k=2, mode=query_mode
)
# returns a VectorStoreQueryResult
query_result = vector_store.query(vector_store_query)
# print("Retrieval Results: ")
# print(query_result.nodes[0].get_content())
nodes_with_scores = []
for index, node in enumerate(query_result.nodes):
score: Optional[float] = None
if query_result.similarities is not None:
score = query_result.similarities[index]
nodes_with_scores.append(NodeWithScore(node=node, score=score))
retriever = VectorDBRetriever(
vector_store, embed_model, query_mode="default", similarity_top_k=1
)
query_engine = RetrieverQueryEngine.from_args(retriever, llm=llm)
# query_str = "How does Llama 2 perform compared to other open-source models?"
query_str = args.question
response = query_engine.query(query_str)
print("------------RESPONSE GENERATION---------------------")
print(str(response))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='LlamaIndex BigdlLLM Example')
parser.add_argument('-m','--model-path', type=str, required=True,
help='the path to transformers model')
parser.add_argument('-q', '--question', type=str, default='How does Llama 2 perform compared to other open-source models?',
help='qustion you want to ask.')
parser.add_argument('-d','--data',type=str, default='./data/llama2.pdf',
help="the data used during retrieval")
parser.add_argument('-u', '--user', type=str, required=True,
help="user name in the database postgres")
parser.add_argument('-p','--password', type=str, required=True,
help="the password of the user in the database")
parser.add_argument('-e','--embedding-model-path',default="BAAI/bge-small-en",
help="the path to embedding model path")
parser.add_argument('-n','--n-predict', type=int, default=32,
help='max number of predict tokens')
args = parser.parse_args()
main(args) | [
"llama_index.vector_stores.postgres.PGVectorStore.from_params",
"llama_index.embeddings.huggingface.HuggingFaceEmbedding",
"llama_index.core.schema.TextNode",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.schema.NodeWithScore",
"llama_index.core.vector_stores.VectorStoreQuery",
"llama_index.core.query_engine.RetrieverQueryEngine.from_args",
"llama_index.readers.file.PyMuPDFReader"
] | [((1521, 1612), 'psycopg2.connect', 'psycopg2.connect', ([], {'dbname': '"""postgres"""', 'host': 'host', 'password': 'password', 'port': 'port', 'user': 'user'}), "(dbname='postgres', host=host, password=password, port=port,\n user=user)\n", (1537, 1612), False, 'import psycopg2\n'), ((1841, 1982), 'llama_index.vector_stores.postgres.PGVectorStore.from_params', 'PGVectorStore.from_params', ([], {'database': 'db_name', 'host': 'host', 'password': 'password', 'port': 'port', 'user': 'user', 'table_name': '"""llama2_paper"""', 'embed_dim': '(384)'}), "(database=db_name, host=host, password=password,\n port=port, user=user, table_name='llama2_paper', embed_dim=384)\n", (1866, 1982), False, 'from llama_index.vector_stores.postgres import PGVectorStore\n'), ((2137, 2152), 'llama_index.readers.file.PyMuPDFReader', 'PyMuPDFReader', ([], {}), '()\n', (2150, 2152), False, 'from llama_index.readers.file import PyMuPDFReader\n'), ((2222, 2255), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(1024)'}), '(chunk_size=1024)\n', (2238, 2255), False, 'from llama_index.core.node_parser import SentenceSplitter\n'), ((5116, 5174), 'llama_index.embeddings.huggingface.HuggingFaceEmbedding', 'HuggingFaceEmbedding', ([], {'model_name': 'args.embedding_model_path'}), '(model_name=args.embedding_model_path)\n', (5136, 5174), False, 'from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n'), ((5271, 5583), 'bigdl.llm.llamaindex.llms.BigdlLLM', 'BigdlLLM', ([], {'model_name': 'args.model_path', 'tokenizer_name': 'args.model_path', 'context_window': '(512)', 'max_new_tokens': 'args.n_predict', 'generate_kwargs': "{'temperature': 0.7, 'do_sample': False}", 'model_kwargs': '{}', 'messages_to_prompt': 'messages_to_prompt', 'completion_to_prompt': 'completion_to_prompt', 'device_map': '"""xpu"""'}), "(model_name=args.model_path, tokenizer_name=args.model_path,\n context_window=512, max_new_tokens=args.n_predict, generate_kwargs={\n 'temperature': 0.7, 'do_sample': False}, model_kwargs={},\n messages_to_prompt=messages_to_prompt, completion_to_prompt=\n completion_to_prompt, device_map='xpu')\n", (5279, 5583), False, 'from bigdl.llm.llamaindex.llms import BigdlLLM\n'), ((6353, 6444), 'llama_index.core.vector_stores.VectorStoreQuery', 'VectorStoreQuery', ([], {'query_embedding': 'query_embedding', 'similarity_top_k': '(2)', 'mode': 'query_mode'}), '(query_embedding=query_embedding, similarity_top_k=2, mode=\n query_mode)\n', (6369, 6444), False, 'from llama_index.core.vector_stores import VectorStoreQuery\n'), ((7083, 7133), 'llama_index.core.query_engine.RetrieverQueryEngine.from_args', 'RetrieverQueryEngine.from_args', (['retriever'], {'llm': 'llm'}), '(retriever, llm=llm)\n', (7113, 7133), False, 'from llama_index.core.query_engine import RetrieverQueryEngine\n'), ((7428, 7494), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""LlamaIndex BigdlLLM Example"""'}), "(description='LlamaIndex BigdlLLM Example')\n", (7451, 7494), False, 'import argparse\n'), ((2759, 2784), 'llama_index.core.schema.TextNode', 'TextNode', ([], {'text': 'text_chunk'}), '(text=text_chunk)\n', (2767, 2784), False, 'from llama_index.core.schema import TextNode\n'), ((3682, 3800), 'llama_index.core.vector_stores.VectorStoreQuery', 'VectorStoreQuery', ([], {'query_embedding': 'query_embedding', 'similarity_top_k': 'self._similarity_top_k', 'mode': 'self._query_mode'}), '(query_embedding=query_embedding, similarity_top_k=self.\n _similarity_top_k, mode=self._query_mode)\n', (3698, 3800), False, 'from llama_index.core.vector_stores import VectorStoreQuery\n'), ((6893, 6930), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node', 'score': 'score'}), '(node=node, score=score)\n', (6906, 6930), False, 'from llama_index.core.schema import NodeWithScore\n'), ((4191, 4228), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'node', 'score': 'score'}), '(node=node, score=score)\n', (4204, 4228), False, 'from llama_index.core.schema import NodeWithScore\n')] |
import os
import logging
import hashlib
import random
import uuid
import openai
from pathlib import Path
from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage
from llama_index.readers.schema.base import Document
from langchain.chat_models import ChatOpenAI
from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, ResultReason, CancellationReason, SpeechSynthesisOutputFormat
from azure.cognitiveservices.speech.audio import AudioOutputConfig
from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud
from app.prompt import get_prompt_template
from app.util import get_language_code, get_youtube_video_id
OPENAI_API_KEY = os.environ.get('OPENAI_API_KEY')
SPEECH_KEY = os.environ.get('SPEECH_KEY')
SPEECH_REGION = os.environ.get('SPEECH_REGION')
openai.api_key = OPENAI_API_KEY
index_cache_web_dir = Path('/tmp/myGPTReader/cache_web/')
index_cache_file_dir = Path('/data/myGPTReader/file/')
index_cache_voice_dir = Path('/tmp/myGPTReader/voice/')
if not index_cache_web_dir.is_dir():
index_cache_web_dir.mkdir(parents=True, exist_ok=True)
if not index_cache_voice_dir.is_dir():
index_cache_voice_dir.mkdir(parents=True, exist_ok=True)
if not index_cache_file_dir.is_dir():
index_cache_file_dir.mkdir(parents=True, exist_ok=True)
llm_predictor = LLMPredictor(llm=ChatOpenAI(
temperature=0, model_name="gpt-3.5-turbo"))
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor)
web_storage_context = StorageContext.from_defaults()
file_storage_context = StorageContext.from_defaults()
def get_unique_md5(urls):
urls_str = ''.join(sorted(urls))
hashed_str = hashlib.md5(urls_str.encode('utf-8')).hexdigest()
return hashed_str
def format_dialog_messages(messages):
return "\n".join(messages)
def get_document_from_youtube_id(video_id):
if video_id is None:
return None
transcript = get_youtube_transcript(video_id)
if transcript is None:
return None
return Document(transcript)
def remove_prompt_from_text(text):
return text.replace('chatGPT:', '').strip()
def get_documents_from_urls(urls):
documents = []
for url in urls['page_urls']:
document = Document(scrape_website(url))
documents.append(document)
if len(urls['rss_urls']) > 0:
rss_documents = RssReader().load_data(urls['rss_urls'])
documents = documents + rss_documents
if len(urls['phantomjscloud_urls']) > 0:
for url in urls['phantomjscloud_urls']:
document = Document(scrape_website_by_phantomjscloud(url))
documents.append(document)
if len(urls['youtube_urls']) > 0:
for url in urls['youtube_urls']:
video_id = get_youtube_video_id(url)
document = get_document_from_youtube_id(video_id)
if (document is not None):
documents.append(document)
else:
documents.append(Document(f"Can't get transcript from youtube video: {url}"))
return documents
def get_index_from_web_cache(name):
try:
index = load_index_from_storage(web_storage_context, index_id=name)
except Exception as e:
logging.error(e)
return None
return index
def get_index_from_file_cache(name):
try:
index = load_index_from_storage(file_storage_context, index_id=name)
except Exception as e:
logging.error(e)
return None
return index
def get_index_name_from_file(file: str):
file_md5_with_extension = str(Path(file).relative_to(index_cache_file_dir).name)
file_md5 = file_md5_with_extension.split('.')[0]
return file_md5
def get_answer_from_chatGPT(messages):
dialog_messages = format_dialog_messages(messages)
logging.info('=====> Use chatGPT to answer!')
logging.info(dialog_messages)
completion = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[{"role": "user", "content": dialog_messages}]
)
logging.info(completion.usage)
total_tokens = completion.usage.total_tokens
return completion.choices[0].message.content, total_tokens, None
def get_answer_from_llama_web(messages, urls):
dialog_messages = format_dialog_messages(messages)
lang_code = get_language_code(remove_prompt_from_text(messages[-1]))
combained_urls = get_urls(urls)
logging.info(combained_urls)
index_file_name = get_unique_md5(urls)
index = get_index_from_web_cache(index_file_name)
if index is None:
logging.info(f"=====> Build index from web!")
documents = get_documents_from_urls(combained_urls)
logging.info(documents)
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.set_index_id(index_file_name)
index.storage_context.persist()
logging.info(
f"=====> Save index to disk path: {index_cache_web_dir / index_file_name}")
prompt = get_prompt_template(lang_code)
logging.info('=====> Use llama web with chatGPT to answer!')
logging.info('=====> dialog_messages')
logging.info(dialog_messages)
logging.info('=====> text_qa_template')
logging.info(prompt.prompt)
answer = index.as_query_engine(text_qa_template=prompt).query(dialog_messages)
total_llm_model_tokens = llm_predictor.last_token_usage
total_embedding_model_tokens = service_context.embed_model.last_token_usage
return answer, total_llm_model_tokens, total_embedding_model_tokens
def get_answer_from_llama_file(messages, file):
dialog_messages = format_dialog_messages(messages)
lang_code = get_language_code(remove_prompt_from_text(messages[-1]))
index_name = get_index_name_from_file(file)
index = get_index_from_file_cache(index_name)
if index is None:
logging.info(f"=====> Build index from file!")
documents = SimpleDirectoryReader(input_files=[file]).load_data()
index = GPTVectorStoreIndex.from_documents(documents, service_context=service_context)
index.set_index_id(index_name)
index.storage_context.persist()
logging.info(
f"=====> Save index to disk path: {index_cache_file_dir / index_name}")
prompt = get_prompt_template(lang_code)
logging.info('=====> Use llama file with chatGPT to answer!')
logging.info('=====> dialog_messages')
logging.info(dialog_messages)
logging.info('=====> text_qa_template')
logging.info(prompt)
answer = answer = index.as_query_engine(text_qa_template=prompt).query(dialog_messages)
total_llm_model_tokens = llm_predictor.last_token_usage
total_embedding_model_tokens = service_context.embed_model.last_token_usage
return answer, total_llm_model_tokens, total_embedding_model_tokens
def get_text_from_whisper(voice_file_path):
with open(voice_file_path, "rb") as f:
transcript = openai.Audio.transcribe("whisper-1", f)
return transcript.text
lang_code_voice_map = {
'zh': ['zh-CN-XiaoxiaoNeural', 'zh-CN-XiaohanNeural', 'zh-CN-YunxiNeural', 'zh-CN-YunyangNeural'],
'en': ['en-US-JennyNeural', 'en-US-RogerNeural', 'en-IN-NeerjaNeural', 'en-IN-PrabhatNeural', 'en-AU-AnnetteNeural', 'en-AU-CarlyNeural', 'en-GB-AbbiNeural', 'en-GB-AlfieNeural'],
'ja': ['ja-JP-AoiNeural', 'ja-JP-DaichiNeural'],
'de': ['de-DE-AmalaNeural', 'de-DE-BerndNeural'],
}
def convert_to_ssml(text, voice_name=None):
try:
logging.info("=====> Convert text to ssml!")
logging.info(text)
text = remove_prompt_from_text(text)
lang_code = get_language_code(text)
if voice_name is None:
voice_name = random.choice(lang_code_voice_map[lang_code])
except Exception as e:
logging.warning(f"Error: {e}. Using default voice.")
voice_name = random.choice(lang_code_voice_map['zh'])
ssml = '<speak version="1.0" xmlns="http://www.w3.org/2001/10/synthesis" xml:lang="zh-CN">'
ssml += f'<voice name="{voice_name}">{text}</voice>'
ssml += '</speak>'
return ssml
def get_voice_file_from_text(text, voice_name=None):
speech_config = SpeechConfig(subscription=SPEECH_KEY, region=SPEECH_REGION)
speech_config.set_speech_synthesis_output_format(
SpeechSynthesisOutputFormat.Audio16Khz32KBitRateMonoMp3)
speech_config.speech_synthesis_language = "zh-CN"
file_name = f"{index_cache_voice_dir}{uuid.uuid4()}.mp3"
file_config = AudioOutputConfig(filename=file_name)
synthesizer = SpeechSynthesizer(
speech_config=speech_config, audio_config=file_config)
ssml = convert_to_ssml(text, voice_name)
result = synthesizer.speak_ssml_async(ssml).get()
if result.reason == ResultReason.SynthesizingAudioCompleted:
logging.info("Speech synthesized for text [{}], and the audio was saved to [{}]".format(
text, file_name))
elif result.reason == ResultReason.Canceled:
cancellation_details = result.cancellation_details
logging.info("Speech synthesis canceled: {}".format(
cancellation_details.reason))
if cancellation_details.reason == CancellationReason.Error:
logging.error("Error details: {}".format(
cancellation_details.error_details))
return file_name
| [
"llama_index.RssReader",
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.load_index_from_storage",
"llama_index.GPTVectorStoreIndex.from_documents",
"llama_index.readers.schema.base.Document"
] | [((795, 827), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (809, 827), False, 'import os\n'), ((841, 869), 'os.environ.get', 'os.environ.get', (['"""SPEECH_KEY"""'], {}), "('SPEECH_KEY')\n", (855, 869), False, 'import os\n'), ((886, 917), 'os.environ.get', 'os.environ.get', (['"""SPEECH_REGION"""'], {}), "('SPEECH_REGION')\n", (900, 917), False, 'import os\n'), ((973, 1008), 'pathlib.Path', 'Path', (['"""/tmp/myGPTReader/cache_web/"""'], {}), "('/tmp/myGPTReader/cache_web/')\n", (977, 1008), False, 'from pathlib import Path\n'), ((1032, 1063), 'pathlib.Path', 'Path', (['"""/data/myGPTReader/file/"""'], {}), "('/data/myGPTReader/file/')\n", (1036, 1063), False, 'from pathlib import Path\n'), ((1088, 1119), 'pathlib.Path', 'Path', (['"""/tmp/myGPTReader/voice/"""'], {}), "('/tmp/myGPTReader/voice/')\n", (1092, 1119), False, 'from pathlib import Path\n'), ((1530, 1587), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor'}), '(llm_predictor=llm_predictor)\n', (1558, 1587), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((1611, 1641), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (1639, 1641), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((1665, 1695), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {}), '()\n', (1693, 1695), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((2026, 2058), 'app.fetch_web_post.get_youtube_transcript', 'get_youtube_transcript', (['video_id'], {}), '(video_id)\n', (2048, 2058), False, 'from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud\n'), ((2117, 2137), 'llama_index.readers.schema.base.Document', 'Document', (['transcript'], {}), '(transcript)\n', (2125, 2137), False, 'from llama_index.readers.schema.base import Document\n'), ((3870, 3915), 'logging.info', 'logging.info', (['"""=====> Use chatGPT to answer!"""'], {}), "('=====> Use chatGPT to answer!')\n", (3882, 3915), False, 'import logging\n'), ((3920, 3949), 'logging.info', 'logging.info', (['dialog_messages'], {}), '(dialog_messages)\n', (3932, 3949), False, 'import logging\n'), ((3967, 4079), 'openai.ChatCompletion.create', 'openai.ChatCompletion.create', ([], {'model': '"""gpt-3.5-turbo"""', 'messages': "[{'role': 'user', 'content': dialog_messages}]"}), "(model='gpt-3.5-turbo', messages=[{'role':\n 'user', 'content': dialog_messages}])\n", (3995, 4079), False, 'import openai\n'), ((4102, 4132), 'logging.info', 'logging.info', (['completion.usage'], {}), '(completion.usage)\n', (4114, 4132), False, 'import logging\n'), ((4448, 4462), 'app.fetch_web_post.get_urls', 'get_urls', (['urls'], {}), '(urls)\n', (4456, 4462), False, 'from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud\n'), ((4467, 4495), 'logging.info', 'logging.info', (['combained_urls'], {}), '(combained_urls)\n', (4479, 4495), False, 'import logging\n'), ((5063, 5093), 'app.prompt.get_prompt_template', 'get_prompt_template', (['lang_code'], {}), '(lang_code)\n', (5082, 5093), False, 'from app.prompt import get_prompt_template\n'), ((5098, 5158), 'logging.info', 'logging.info', (['"""=====> Use llama web with chatGPT to answer!"""'], {}), "('=====> Use llama web with chatGPT to answer!')\n", (5110, 5158), False, 'import logging\n'), ((5163, 5201), 'logging.info', 'logging.info', (['"""=====> dialog_messages"""'], {}), "('=====> dialog_messages')\n", (5175, 5201), False, 'import logging\n'), ((5206, 5235), 'logging.info', 'logging.info', (['dialog_messages'], {}), '(dialog_messages)\n', (5218, 5235), False, 'import logging\n'), ((5240, 5279), 'logging.info', 'logging.info', (['"""=====> text_qa_template"""'], {}), "('=====> text_qa_template')\n", (5252, 5279), False, 'import logging\n'), ((5284, 5311), 'logging.info', 'logging.info', (['prompt.prompt'], {}), '(prompt.prompt)\n', (5296, 5311), False, 'import logging\n'), ((6326, 6356), 'app.prompt.get_prompt_template', 'get_prompt_template', (['lang_code'], {}), '(lang_code)\n', (6345, 6356), False, 'from app.prompt import get_prompt_template\n'), ((6361, 6422), 'logging.info', 'logging.info', (['"""=====> Use llama file with chatGPT to answer!"""'], {}), "('=====> Use llama file with chatGPT to answer!')\n", (6373, 6422), False, 'import logging\n'), ((6427, 6465), 'logging.info', 'logging.info', (['"""=====> dialog_messages"""'], {}), "('=====> dialog_messages')\n", (6439, 6465), False, 'import logging\n'), ((6470, 6499), 'logging.info', 'logging.info', (['dialog_messages'], {}), '(dialog_messages)\n', (6482, 6499), False, 'import logging\n'), ((6504, 6543), 'logging.info', 'logging.info', (['"""=====> text_qa_template"""'], {}), "('=====> text_qa_template')\n", (6516, 6543), False, 'import logging\n'), ((6548, 6568), 'logging.info', 'logging.info', (['prompt'], {}), '(prompt)\n', (6560, 6568), False, 'import logging\n'), ((8212, 8271), 'azure.cognitiveservices.speech.SpeechConfig', 'SpeechConfig', ([], {'subscription': 'SPEECH_KEY', 'region': 'SPEECH_REGION'}), '(subscription=SPEECH_KEY, region=SPEECH_REGION)\n', (8224, 8271), False, 'from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, ResultReason, CancellationReason, SpeechSynthesisOutputFormat\n'), ((8524, 8561), 'azure.cognitiveservices.speech.audio.AudioOutputConfig', 'AudioOutputConfig', ([], {'filename': 'file_name'}), '(filename=file_name)\n', (8541, 8561), False, 'from azure.cognitiveservices.speech.audio import AudioOutputConfig\n'), ((8580, 8652), 'azure.cognitiveservices.speech.SpeechSynthesizer', 'SpeechSynthesizer', ([], {'speech_config': 'speech_config', 'audio_config': 'file_config'}), '(speech_config=speech_config, audio_config=file_config)\n', (8597, 8652), False, 'from azure.cognitiveservices.speech import SpeechConfig, SpeechSynthesizer, ResultReason, CancellationReason, SpeechSynthesisOutputFormat\n'), ((1451, 1504), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (1461, 1504), False, 'from langchain.chat_models import ChatOpenAI\n'), ((3209, 3268), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['web_storage_context'], {'index_id': 'name'}), '(web_storage_context, index_id=name)\n', (3232, 3268), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((3421, 3481), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['file_storage_context'], {'index_id': 'name'}), '(file_storage_context, index_id=name)\n', (3444, 3481), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((4623, 4668), 'logging.info', 'logging.info', (['f"""=====> Build index from web!"""'], {}), "(f'=====> Build index from web!')\n", (4635, 4668), False, 'import logging\n'), ((4737, 4760), 'logging.info', 'logging.info', (['documents'], {}), '(documents)\n', (4749, 4760), False, 'import logging\n'), ((4777, 4855), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (4811, 4855), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((4948, 5041), 'logging.info', 'logging.info', (['f"""=====> Save index to disk path: {index_cache_web_dir / index_file_name}"""'], {}), "(\n f'=====> Save index to disk path: {index_cache_web_dir / index_file_name}')\n", (4960, 5041), False, 'import logging\n'), ((5912, 5958), 'logging.info', 'logging.info', (['f"""=====> Build index from file!"""'], {}), "(f'=====> Build index from file!')\n", (5924, 5958), False, 'import logging\n'), ((6049, 6127), 'llama_index.GPTVectorStoreIndex.from_documents', 'GPTVectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (6083, 6127), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((6215, 6304), 'logging.info', 'logging.info', (['f"""=====> Save index to disk path: {index_cache_file_dir / index_name}"""'], {}), "(\n f'=====> Save index to disk path: {index_cache_file_dir / index_name}')\n", (6227, 6304), False, 'import logging\n'), ((6982, 7021), 'openai.Audio.transcribe', 'openai.Audio.transcribe', (['"""whisper-1"""', 'f'], {}), "('whisper-1', f)\n", (7005, 7021), False, 'import openai\n'), ((7532, 7576), 'logging.info', 'logging.info', (['"""=====> Convert text to ssml!"""'], {}), "('=====> Convert text to ssml!')\n", (7544, 7576), False, 'import logging\n'), ((7585, 7603), 'logging.info', 'logging.info', (['text'], {}), '(text)\n', (7597, 7603), False, 'import logging\n'), ((7669, 7692), 'app.util.get_language_code', 'get_language_code', (['text'], {}), '(text)\n', (7686, 7692), False, 'from app.util import get_language_code, get_youtube_video_id\n'), ((2339, 2358), 'app.fetch_web_post.scrape_website', 'scrape_website', (['url'], {}), '(url)\n', (2353, 2358), False, 'from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud\n'), ((2844, 2869), 'app.util.get_youtube_video_id', 'get_youtube_video_id', (['url'], {}), '(url)\n', (2864, 2869), False, 'from app.util import get_language_code, get_youtube_video_id\n'), ((3304, 3320), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (3317, 3320), False, 'import logging\n'), ((3517, 3533), 'logging.error', 'logging.error', (['e'], {}), '(e)\n', (3530, 3533), False, 'import logging\n'), ((7749, 7794), 'random.choice', 'random.choice', (['lang_code_voice_map[lang_code]'], {}), '(lang_code_voice_map[lang_code])\n', (7762, 7794), False, 'import random\n'), ((7830, 7882), 'logging.warning', 'logging.warning', (['f"""Error: {e}. Using default voice."""'], {}), "(f'Error: {e}. Using default voice.')\n", (7845, 7882), False, 'import logging\n'), ((7904, 7944), 'random.choice', 'random.choice', (["lang_code_voice_map['zh']"], {}), "(lang_code_voice_map['zh'])\n", (7917, 7944), False, 'import random\n'), ((8487, 8499), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8497, 8499), False, 'import uuid\n'), ((2453, 2464), 'llama_index.RssReader', 'RssReader', ([], {}), '()\n', (2462, 2464), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((2664, 2701), 'app.fetch_web_post.scrape_website_by_phantomjscloud', 'scrape_website_by_phantomjscloud', (['url'], {}), '(url)\n', (2696, 2701), False, 'from app.fetch_web_post import get_urls, get_youtube_transcript, scrape_website, scrape_website_by_phantomjscloud\n'), ((5979, 6020), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file]'}), '(input_files=[file])\n', (6000, 6020), False, 'from llama_index import ServiceContext, GPTVectorStoreIndex, LLMPredictor, RssReader, SimpleDirectoryReader, StorageContext, load_index_from_storage\n'), ((3065, 3124), 'llama_index.readers.schema.base.Document', 'Document', (['f"""Can\'t get transcript from youtube video: {url}"""'], {}), '(f"Can\'t get transcript from youtube video: {url}")\n', (3073, 3124), False, 'from llama_index.readers.schema.base import Document\n'), ((3647, 3657), 'pathlib.Path', 'Path', (['file'], {}), '(file)\n', (3651, 3657), False, 'from pathlib import Path\n')] |
"""Configuration."""
import streamlit as st
import os
### DEFINE BUILDER_LLM #####
## Uncomment the LLM you want to use to construct the meta agent
## OpenAI
from llama_index.llms import OpenAI
# set OpenAI Key - use Streamlit secrets
os.environ["OPENAI_API_KEY"] = st.secrets.openai_key
# load LLM
BUILDER_LLM = OpenAI(model="gpt-4-1106-preview")
# # Anthropic (make sure you `pip install anthropic`)
# from llama_index.llms import Anthropic
# # set Anthropic key
# os.environ["ANTHROPIC_API_KEY"] = st.secrets.anthropic_key
# BUILDER_LLM = Anthropic()
| [
"llama_index.llms.OpenAI"
] | [((316, 350), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': '"""gpt-4-1106-preview"""'}), "(model='gpt-4-1106-preview')\n", (322, 350), False, 'from llama_index.llms import OpenAI\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2024/1/4 20:58
@Author : alexanderwu
@File : embedding.py
"""
from llama_index.embeddings.openai import OpenAIEmbedding
from metagpt.config2 import config
def get_embedding() -> OpenAIEmbedding:
llm = config.get_openai_llm()
if llm is None:
raise ValueError("To use OpenAIEmbedding, please ensure that config.llm.api_type is correctly set to 'openai'.")
embedding = OpenAIEmbedding(api_key=llm.api_key, api_base=llm.base_url)
return embedding
| [
"llama_index.embeddings.openai.OpenAIEmbedding"
] | [((273, 296), 'metagpt.config2.config.get_openai_llm', 'config.get_openai_llm', ([], {}), '()\n', (294, 296), False, 'from metagpt.config2 import config\n'), ((455, 514), 'llama_index.embeddings.openai.OpenAIEmbedding', 'OpenAIEmbedding', ([], {'api_key': 'llm.api_key', 'api_base': 'llm.base_url'}), '(api_key=llm.api_key, api_base=llm.base_url)\n', (470, 514), False, 'from llama_index.embeddings.openai import OpenAIEmbedding\n')] |
import os
# Uncomment to specify your OpenAI API key here (local testing only, not in production!), or add corresponding environment variable (recommended)
# os.environ['OPENAI_API_KEY']= ""
from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext
from langchain.llms.openai import OpenAI
from llama_index import StorageContext, load_index_from_storage
base_path = os.environ.get('OPENAI_API_BASE', 'http://localhost:8080/v1')
# This example uses text-davinci-003 by default; feel free to change if desired
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="gpt-3.5-turbo", openai_api_base=base_path))
# Configure prompt parameters and initialise helper
max_input_size = 512
num_output = 256
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
# Load documents from the 'data' directory
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
# rebuild storage context
storage_context = StorageContext.from_defaults(persist_dir='./storage')
# load index
index = load_index_from_storage(storage_context, service_context=service_context, )
documents = SimpleDirectoryReader('data').load_data()
index.refresh(documents)
index.storage_context.persist(persist_dir="./storage") | [
"llama_index.SimpleDirectoryReader",
"llama_index.ServiceContext.from_defaults",
"llama_index.StorageContext.from_defaults",
"llama_index.PromptHelper",
"llama_index.load_index_from_storage"
] | [((403, 464), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_BASE"""', '"""http://localhost:8080/v1"""'], {}), "('OPENAI_API_BASE', 'http://localhost:8080/v1')\n", (417, 464), False, 'import os\n'), ((788, 847), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_input_size, num_output, max_chunk_overlap)\n', (800, 847), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((910, 1001), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm_predictor': 'llm_predictor', 'prompt_helper': 'prompt_helper'}), '(llm_predictor=llm_predictor, prompt_helper=\n prompt_helper)\n', (938, 1001), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n'), ((1042, 1095), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'persist_dir': '"""./storage"""'}), "(persist_dir='./storage')\n", (1070, 1095), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((1118, 1191), 'llama_index.load_index_from_storage', 'load_index_from_storage', (['storage_context'], {'service_context': 'service_context'}), '(storage_context, service_context=service_context)\n', (1141, 1191), False, 'from llama_index import StorageContext, load_index_from_storage\n'), ((579, 655), 'langchain.llms.openai.OpenAI', 'OpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""', 'openai_api_base': 'base_path'}), "(temperature=0, model_name='gpt-3.5-turbo', openai_api_base=base_path)\n", (585, 655), False, 'from langchain.llms.openai import OpenAI\n'), ((1213, 1242), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['"""data"""'], {}), "('data')\n", (1234, 1242), False, 'from llama_index import LLMPredictor, PromptHelper, SimpleDirectoryReader, ServiceContext\n')] |
from memgpt.data_types import Passage, Document, EmbeddingConfig, Source
from memgpt.utils import create_uuid_from_string
from memgpt.agent_store.storage import StorageConnector, TableType
from memgpt.embeddings import embedding_model
from memgpt.data_types import Document, Passage
from typing import List, Iterator, Dict, Tuple, Optional
import typer
from llama_index.core import Document as LlamaIndexDocument
class DataConnector:
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
pass
def generate_passages(self, documents: List[Document], chunk_size: int = 1024) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Passage]:
pass
def load_data(
connector: DataConnector,
source: Source,
embedding_config: EmbeddingConfig,
passage_store: StorageConnector,
document_store: Optional[StorageConnector] = None,
):
"""Load data from a connector (generates documents and passages) into a specified source_id, associatedw with a user_id."""
assert (
source.embedding_model == embedding_config.embedding_model
), f"Source and embedding config models must match, got: {source.embedding_model} and {embedding_config.embedding_model}"
assert (
source.embedding_dim == embedding_config.embedding_dim
), f"Source and embedding config dimensions must match, got: {source.embedding_dim} and {embedding_config.embedding_dim}."
# embedding model
embed_model = embedding_model(embedding_config)
# insert passages/documents
passages = []
passage_count = 0
document_count = 0
for document_text, document_metadata in connector.generate_documents():
# insert document into storage
document = Document(
id=create_uuid_from_string(f"{str(source.id)}_{document_text}"),
text=document_text,
metadata=document_metadata,
data_source=source.name,
user_id=source.user_id,
)
document_count += 1
if document_store:
document_store.insert(document)
# generate passages
for passage_text, passage_metadata in connector.generate_passages([document], chunk_size=embedding_config.embedding_chunk_size):
try:
embedding = embed_model.get_text_embedding(passage_text)
except Exception as e:
typer.secho(
f"Warning: Failed to get embedding for {passage_text} (error: {str(e)}), skipping insert into VectorDB.",
fg=typer.colors.YELLOW,
)
continue
passage = Passage(
id=create_uuid_from_string(f"{str(source.id)}_{passage_text}"),
text=passage_text,
doc_id=document.id,
metadata_=passage_metadata,
user_id=source.user_id,
data_source=source.name,
embedding_dim=source.embedding_dim,
embedding_model=source.embedding_model,
embedding=embedding,
)
passages.append(passage)
if len(passages) >= embedding_config.embedding_chunk_size:
# insert passages into passage store
passage_store.insert_many(passages)
passage_count += len(passages)
passages = []
if len(passages) > 0:
# insert passages into passage store
passage_store.insert_many(passages)
passage_count += len(passages)
return passage_count, document_count
class DirectoryConnector(DataConnector):
def __init__(self, input_files: List[str] = None, input_directory: str = None, recursive: bool = False, extensions: List[str] = None):
self.connector_type = "directory"
self.input_files = input_files
self.input_directory = input_directory
self.recursive = recursive
self.extensions = extensions
if self.recursive == True:
assert self.input_directory is not None, "Must provide input directory if recursive is True."
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
from llama_index.core import SimpleDirectoryReader
if self.input_directory is not None:
reader = SimpleDirectoryReader(
input_dir=self.input_directory,
recursive=self.recursive,
required_exts=[ext.strip() for ext in str(self.extensions).split(",")],
)
else:
assert self.input_files is not None, "Must provide input files if input_dir is None"
reader = SimpleDirectoryReader(input_files=[str(f) for f in self.input_files])
llama_index_docs = reader.load_data(show_progress=True)
for llama_index_doc in llama_index_docs:
# TODO: add additional metadata?
# doc = Document(text=llama_index_doc.text, metadata=llama_index_doc.metadata)
# docs.append(doc)
yield llama_index_doc.text, llama_index_doc.metadata
def generate_passages(self, documents: List[Document], chunk_size: int = 1024) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Passage]:
# use llama index to run embeddings code
# from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.node_parser import TokenTextSplitter
parser = TokenTextSplitter(chunk_size=chunk_size)
for document in documents:
llama_index_docs = [LlamaIndexDocument(text=document.text, metadata=document.metadata)]
nodes = parser.get_nodes_from_documents(llama_index_docs)
for node in nodes:
# passage = Passage(
# text=node.text,
# doc_id=document.id,
# )
yield node.text, None
class WebConnector(DirectoryConnector):
def __init__(self, urls: List[str] = None, html_to_text: bool = True):
self.urls = urls
self.html_to_text = html_to_text
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
from llama_index.readers.web import SimpleWebPageReader
documents = SimpleWebPageReader(html_to_text=self.html_to_text).load_data(self.urls)
for document in documents:
yield document.text, {"url": document.id_}
class VectorDBConnector(DataConnector):
# NOTE: this class has not been properly tested, so is unlikely to work
# TODO: allow loading multiple tables (1:1 mapping between Document and Table)
def __init__(
self,
name: str,
uri: str,
table_name: str,
text_column: str,
embedding_column: str,
embedding_dim: int,
):
self.name = name
self.uri = uri
self.table_name = table_name
self.text_column = text_column
self.embedding_column = embedding_column
self.embedding_dim = embedding_dim
# connect to db table
from sqlalchemy import create_engine
self.engine = create_engine(uri)
def generate_documents(self) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Document]:
yield self.table_name, None
def generate_passages(self, documents: List[Document], chunk_size: int = 1024) -> Iterator[Tuple[str, Dict]]: # -> Iterator[Passage]:
from sqlalchemy import select, MetaData, Table, Inspector
from pgvector.sqlalchemy import Vector
metadata = MetaData()
# Create an inspector to inspect the database
inspector = Inspector.from_engine(self.engine)
table_names = inspector.get_table_names()
assert self.table_name in table_names, f"Table {self.table_name} not found in database: tables that exist {table_names}."
table = Table(self.table_name, metadata, autoload_with=self.engine)
# Prepare a select statement
select_statement = select(table.c[self.text_column], table.c[self.embedding_column].cast(Vector(self.embedding_dim)))
# Execute the query and fetch the results
# TODO: paginate results
with self.engine.connect() as connection:
result = connection.execute(select_statement).fetchall()
for text, embedding in result:
# assume that embeddings are the same model as in config
# TODO: don't re-compute embedding
yield text, {"embedding": embedding}
| [
"llama_index.core.node_parser.TokenTextSplitter",
"llama_index.readers.web.SimpleWebPageReader",
"llama_index.core.Document"
] | [((1472, 1505), 'memgpt.embeddings.embedding_model', 'embedding_model', (['embedding_config'], {}), '(embedding_config)\n', (1487, 1505), False, 'from memgpt.embeddings import embedding_model\n'), ((5412, 5452), 'llama_index.core.node_parser.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': 'chunk_size'}), '(chunk_size=chunk_size)\n', (5429, 5452), False, 'from llama_index.core.node_parser import TokenTextSplitter\n'), ((7087, 7105), 'sqlalchemy.create_engine', 'create_engine', (['uri'], {}), '(uri)\n', (7100, 7105), False, 'from sqlalchemy import create_engine\n'), ((7506, 7516), 'sqlalchemy.MetaData', 'MetaData', ([], {}), '()\n', (7514, 7516), False, 'from sqlalchemy import select, MetaData, Table, Inspector\n'), ((7591, 7625), 'sqlalchemy.Inspector.from_engine', 'Inspector.from_engine', (['self.engine'], {}), '(self.engine)\n', (7612, 7625), False, 'from sqlalchemy import select, MetaData, Table, Inspector\n'), ((7823, 7882), 'sqlalchemy.Table', 'Table', (['self.table_name', 'metadata'], {'autoload_with': 'self.engine'}), '(self.table_name, metadata, autoload_with=self.engine)\n', (7828, 7882), False, 'from sqlalchemy import select, MetaData, Table, Inspector\n'), ((5520, 5586), 'llama_index.core.Document', 'LlamaIndexDocument', ([], {'text': 'document.text', 'metadata': 'document.metadata'}), '(text=document.text, metadata=document.metadata)\n', (5538, 5586), True, 'from llama_index.core import Document as LlamaIndexDocument\n'), ((6221, 6272), 'llama_index.readers.web.SimpleWebPageReader', 'SimpleWebPageReader', ([], {'html_to_text': 'self.html_to_text'}), '(html_to_text=self.html_to_text)\n', (6240, 6272), False, 'from llama_index.readers.web import SimpleWebPageReader\n'), ((8018, 8044), 'pgvector.sqlalchemy.Vector', 'Vector', (['self.embedding_dim'], {}), '(self.embedding_dim)\n', (8024, 8044), False, 'from pgvector.sqlalchemy import Vector\n')] |
import os
from llama_index import SimpleDirectoryReader
from sqlalchemy.orm import Session
from superagi.config.config import get_config
from superagi.helper.resource_helper import ResourceHelper
from superagi.lib.logger import logger
from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory
from superagi.types.model_source_types import ModelSourceType
from superagi.types.vector_store_types import VectorStoreType
from superagi.models.agent import Agent
class ResourceManager:
"""
Resource Manager handles creation of resources and saving them to the vector store.
:param agent_id: The agent id to use when saving resources to the vector store.
"""
def __init__(self, agent_id: str = None):
self.agent_id = agent_id
def create_llama_document(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
if os.path.exists(file_path):
documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
return documents
def create_llama_document_s3(self, file_path: str):
"""
Creates a document index from a given file path.
:param file_path: The file path to create the document index from.
:return: A list of documents.
"""
if file_path is None:
raise Exception("file_path must be provided")
temporary_file_path = ""
try:
import boto3
s3 = boto3.client(
's3',
aws_access_key_id=get_config("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=get_config("AWS_SECRET_ACCESS_KEY"),
)
bucket_name = get_config("BUCKET_NAME")
file = s3.get_object(Bucket=bucket_name, Key=file_path)
file_name = file_path.split("/")[-1]
save_directory = "/"
temporary_file_path = save_directory + file_name
with open(temporary_file_path, "wb") as f:
contents = file['Body'].read()
f.write(contents)
documents = SimpleDirectoryReader(input_files=[temporary_file_path]).load_data()
return documents
except Exception as e:
logger.error("superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : ", e)
finally:
if os.path.exists(temporary_file_path):
os.remove(temporary_file_path)
def save_document_to_vector_store(self, documents: list, resource_id: str, mode_api_key: str = None,
model_source: str = ""):
"""
Saves a document to the vector store.
:param documents: The documents to save to the vector store.
:param resource_id: The resource id to use when saving the documents to the vector store.
:param mode_api_key: The mode api key to use when creating embedding to the vector store.
"""
from llama_index import VectorStoreIndex, StorageContext
if ModelSourceType.GooglePalm.value in model_source or ModelSourceType.Replicate.value in model_source:
logger.info("Resource embedding not supported for Google Palm..")
return
import openai
openai.api_key = get_config("OPENAI_API_KEY") or mode_api_key
os.environ["OPENAI_API_KEY"] = get_config("OPENAI_API_KEY", "") or mode_api_key
for docs in documents:
if docs.metadata is None:
docs.metadata = {}
docs.metadata["agent_id"] = str(self.agent_id)
docs.metadata["resource_id"] = resource_id
vector_store = None
storage_context = None
vector_store_name = VectorStoreType.get_vector_store_type(get_config("RESOURCE_VECTOR_STORE") or "Redis")
vector_store_index_name = get_config("RESOURCE_VECTOR_STORE_INDEX_NAME") or "super-agent-index"
try:
vector_store = LlamaVectorStoreFactory(vector_store_name, vector_store_index_name).get_vector_store()
storage_context = StorageContext.from_defaults(vector_store=vector_store)
except ValueError as e:
logger.error(f"Vector store not found{e}")
try:
index = VectorStoreIndex.from_documents(documents, storage_context=storage_context)
index.set_index_id(f'Agent {self.agent_id}')
except Exception as e:
logger.error("save_document_to_vector_store - unable to create documents from vector", e)
# persisting the data in case of redis
if vector_store_name == VectorStoreType.REDIS:
vector_store.persist(persist_path="")
| [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.SimpleDirectoryReader",
"llama_index.StorageContext.from_defaults"
] | [((1132, 1157), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (1146, 1157), False, 'import os\n'), ((1917, 1942), 'superagi.config.config.get_config', 'get_config', (['"""BUCKET_NAME"""'], {}), "('BUCKET_NAME')\n", (1927, 1942), False, 'from superagi.config.config import get_config\n'), ((2589, 2624), 'os.path.exists', 'os.path.exists', (['temporary_file_path'], {}), '(temporary_file_path)\n', (2603, 2624), False, 'import os\n'), ((3367, 3432), 'superagi.lib.logger.logger.info', 'logger.info', (['"""Resource embedding not supported for Google Palm.."""'], {}), "('Resource embedding not supported for Google Palm..')\n", (3378, 3432), False, 'from superagi.lib.logger import logger\n'), ((3499, 3527), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (3509, 3527), False, 'from superagi.config.config import get_config\n'), ((3583, 3615), 'superagi.config.config.get_config', 'get_config', (['"""OPENAI_API_KEY"""', '""""""'], {}), "('OPENAI_API_KEY', '')\n", (3593, 3615), False, 'from superagi.config.config import get_config\n'), ((4057, 4103), 'superagi.config.config.get_config', 'get_config', (['"""RESOURCE_VECTOR_STORE_INDEX_NAME"""'], {}), "('RESOURCE_VECTOR_STORE_INDEX_NAME')\n", (4067, 4103), False, 'from superagi.config.config import get_config\n'), ((4284, 4339), 'llama_index.StorageContext.from_defaults', 'StorageContext.from_defaults', ([], {'vector_store': 'vector_store'}), '(vector_store=vector_store)\n', (4312, 4339), False, 'from llama_index import VectorStoreIndex, StorageContext\n'), ((4460, 4535), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'storage_context': 'storage_context'}), '(documents, storage_context=storage_context)\n', (4491, 4535), False, 'from llama_index import VectorStoreIndex, StorageContext\n'), ((2456, 2566), 'superagi.lib.logger.logger.error', 'logger.error', (['"""superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : """', 'e'], {}), "(\n 'superagi/resource_manager/resource_manager.py - create_llama_document_s3 threw : '\n , e)\n", (2468, 2566), False, 'from superagi.lib.logger import logger\n'), ((2642, 2672), 'os.remove', 'os.remove', (['temporary_file_path'], {}), '(temporary_file_path)\n', (2651, 2672), False, 'import os\n'), ((3975, 4010), 'superagi.config.config.get_config', 'get_config', (['"""RESOURCE_VECTOR_STORE"""'], {}), "('RESOURCE_VECTOR_STORE')\n", (3985, 4010), False, 'from superagi.config.config import get_config\n'), ((4384, 4426), 'superagi.lib.logger.logger.error', 'logger.error', (['f"""Vector store not found{e}"""'], {}), "(f'Vector store not found{e}')\n", (4396, 4426), False, 'from superagi.lib.logger import logger\n'), ((4636, 4735), 'superagi.lib.logger.logger.error', 'logger.error', (['"""save_document_to_vector_store - unable to create documents from vector"""', 'e'], {}), "(\n 'save_document_to_vector_store - unable to create documents from vector', e\n )\n", (4648, 4735), False, 'from superagi.lib.logger import logger\n'), ((1183, 1229), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[file_path]'}), '(input_files=[file_path])\n', (1204, 1229), False, 'from llama_index import SimpleDirectoryReader\n'), ((1769, 1800), 'superagi.config.config.get_config', 'get_config', (['"""AWS_ACCESS_KEY_ID"""'], {}), "('AWS_ACCESS_KEY_ID')\n", (1779, 1800), False, 'from superagi.config.config import get_config\n'), ((1840, 1875), 'superagi.config.config.get_config', 'get_config', (['"""AWS_SECRET_ACCESS_KEY"""'], {}), "('AWS_SECRET_ACCESS_KEY')\n", (1850, 1875), False, 'from superagi.config.config import get_config\n'), ((2315, 2371), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[temporary_file_path]'}), '(input_files=[temporary_file_path])\n', (2336, 2371), False, 'from llama_index import SimpleDirectoryReader\n'), ((4167, 4234), 'superagi.resource_manager.llama_vector_store_factory.LlamaVectorStoreFactory', 'LlamaVectorStoreFactory', (['vector_store_name', 'vector_store_index_name'], {}), '(vector_store_name, vector_store_index_name)\n', (4190, 4234), False, 'from superagi.resource_manager.llama_vector_store_factory import LlamaVectorStoreFactory\n')] |
import os
from argparse import Namespace, _SubParsersAction
from llama_index import SimpleDirectoryReader
from .configuration import load_index, save_index
def add_cli(args: Namespace) -> None:
"""Handle subcommand "add"."""
index = load_index()
for p in args.files:
if not os.path.exists(p):
raise FileNotFoundError(p)
if os.path.isdir(p):
documents = SimpleDirectoryReader(p).load_data()
for document in documents:
index.insert(document)
else:
documents = SimpleDirectoryReader(input_files=[p]).load_data()
for document in documents:
index.insert(document)
save_index(index)
def register_add_cli(subparsers: _SubParsersAction) -> None:
"""Register subcommand "add" to ArgumentParser."""
parser = subparsers.add_parser("add")
parser.add_argument(
"files",
default=".",
nargs="+",
help="Files to add",
)
parser.set_defaults(func=add_cli)
| [
"llama_index.SimpleDirectoryReader"
] | [((368, 384), 'os.path.isdir', 'os.path.isdir', (['p'], {}), '(p)\n', (381, 384), False, 'import os\n'), ((299, 316), 'os.path.exists', 'os.path.exists', (['p'], {}), '(p)\n', (313, 316), False, 'import os\n'), ((410, 434), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['p'], {}), '(p)\n', (431, 434), False, 'from llama_index import SimpleDirectoryReader\n'), ((563, 601), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[p]'}), '(input_files=[p])\n', (584, 601), False, 'from llama_index import SimpleDirectoryReader\n')] |
from typing import Dict, List, Type
from llama_index.agent import OpenAIAgent, ReActAgent
from llama_index.agent.types import BaseAgent
from llama_index.llms import Anthropic, OpenAI
from llama_index.llms.llama_utils import messages_to_prompt
from llama_index.llms.llm import LLM
from llama_index.llms.replicate import Replicate
OPENAI_MODELS = [
"text-davinci-003",
"gpt-3.5-turbo-0613",
"gpt-4-0613",
]
ANTHROPIC_MODELS = ["claude-instant-1", "claude-instant-1.2", "claude-2", "claude-2.0"]
LLAMA_MODELS = [
"llama13b-v2-chat",
"llama70b-v2-chat",
]
REPLICATE_MODELS: List[str] = []
ALL_MODELS = OPENAI_MODELS + ANTHROPIC_MODELS + LLAMA_MODELS
AGENTS: Dict[str, Type[BaseAgent]] = {
"react": ReActAgent,
"openai": OpenAIAgent,
}
LLAMA_13B_V2_CHAT = (
"a16z-infra/llama13b-v2-chat:"
"df7690f1994d94e96ad9d568eac121aecf50684a0b0963b25a41cc40061269e5"
)
LLAMA_70B_V2_CHAT = (
"replicate/llama70b-v2-chat:"
"e951f18578850b652510200860fc4ea62b3b16fac280f83ff32282f87bbd2e48"
)
def get_model(model: str) -> LLM:
llm: LLM
if model in OPENAI_MODELS:
llm = OpenAI(model=model)
elif model in ANTHROPIC_MODELS:
llm = Anthropic(model=model)
elif model in LLAMA_MODELS:
model_dict = {
"llama13b-v2-chat": LLAMA_13B_V2_CHAT,
"llama70b-v2-chat": LLAMA_70B_V2_CHAT,
}
replicate_model = model_dict[model]
llm = Replicate(
model=replicate_model,
temperature=0.01,
context_window=4096,
# override message representation for llama 2
messages_to_prompt=messages_to_prompt,
)
else:
raise ValueError(f"Unknown model {model}")
return llm
def is_valid_combination(agent: str, model: str) -> bool:
if agent == "openai" and model not in ["gpt-3.5-turbo-0613", "gpt-4-0613"]:
print(f"{agent} does not work with {model}")
return False
return True
| [
"llama_index.llms.Anthropic",
"llama_index.llms.OpenAI",
"llama_index.llms.replicate.Replicate"
] | [((1116, 1135), 'llama_index.llms.OpenAI', 'OpenAI', ([], {'model': 'model'}), '(model=model)\n', (1122, 1135), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1186, 1208), 'llama_index.llms.Anthropic', 'Anthropic', ([], {'model': 'model'}), '(model=model)\n', (1195, 1208), False, 'from llama_index.llms import Anthropic, OpenAI\n'), ((1434, 1548), 'llama_index.llms.replicate.Replicate', 'Replicate', ([], {'model': 'replicate_model', 'temperature': '(0.01)', 'context_window': '(4096)', 'messages_to_prompt': 'messages_to_prompt'}), '(model=replicate_model, temperature=0.01, context_window=4096,\n messages_to_prompt=messages_to_prompt)\n', (1443, 1548), False, 'from llama_index.llms.replicate import Replicate\n')] |
import asyncio
import os
import shutil
from argparse import ArgumentParser
from glob import iglob
from pathlib import Path
from typing import Any, Callable, Dict, Optional, Union, cast
from llama_index.core import (
SimpleDirectoryReader,
VectorStoreIndex,
)
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.base.response.schema import (
RESPONSE_TYPE,
StreamingResponse,
Response,
)
from llama_index.core.bridge.pydantic import BaseModel, Field, validator
from llama_index.core.chat_engine import CondenseQuestionChatEngine
from llama_index.core.indices.service_context import ServiceContext
from llama_index.core.ingestion import IngestionPipeline
from llama_index.core.llms import LLM
from llama_index.core.query_engine import CustomQueryEngine
from llama_index.core.query_pipeline.components.function import FnComponent
from llama_index.core.query_pipeline.query import QueryPipeline
from llama_index.core.readers.base import BaseReader
from llama_index.core.response_synthesizers import CompactAndRefine
from llama_index.core.utils import get_cache_dir
def _try_load_openai_llm():
try:
from llama_index.llms.openai import OpenAI # pants: no-infer-dep
return OpenAI(model="gpt-3.5-turbo", streaming=True)
except ImportError:
raise ImportError(
"`llama-index-llms-openai` package not found, "
"please run `pip install llama-index-llms-openai`"
)
RAG_HISTORY_FILE_NAME = "files_history.txt"
def default_ragcli_persist_dir() -> str:
return str(Path(get_cache_dir()) / "rag_cli")
def query_input(query_str: Optional[str] = None) -> str:
return query_str or ""
class QueryPipelineQueryEngine(CustomQueryEngine):
query_pipeline: QueryPipeline = Field(
description="Query Pipeline to use for Q&A.",
)
def custom_query(self, query_str: str) -> RESPONSE_TYPE:
return self.query_pipeline.run(query_str=query_str)
async def acustom_query(self, query_str: str) -> RESPONSE_TYPE:
return await self.query_pipeline.arun(query_str=query_str)
class RagCLI(BaseModel):
"""
CLI tool for chatting with output of a IngestionPipeline via a QueryPipeline.
"""
ingestion_pipeline: IngestionPipeline = Field(
description="Ingestion pipeline to run for RAG ingestion."
)
verbose: bool = Field(
description="Whether to print out verbose information during execution.",
default=False,
)
persist_dir: str = Field(
description="Directory to persist ingestion pipeline.",
default_factory=default_ragcli_persist_dir,
)
llm: LLM = Field(
description="Language model to use for response generation.",
default_factory=lambda: _try_load_openai_llm(),
)
query_pipeline: Optional[QueryPipeline] = Field(
description="Query Pipeline to use for Q&A.",
default=None,
)
chat_engine: Optional[CondenseQuestionChatEngine] = Field(
description="Chat engine to use for chatting.",
default_factory=None,
)
file_extractor: Optional[Dict[str, BaseReader]] = Field(
description="File extractor to use for extracting text from files.",
default=None,
)
class Config:
arbitrary_types_allowed = True
@validator("query_pipeline", always=True)
def query_pipeline_from_ingestion_pipeline(
cls, query_pipeline: Any, values: Dict[str, Any]
) -> Optional[QueryPipeline]:
"""
If query_pipeline is not provided, create one from ingestion_pipeline.
"""
if query_pipeline is not None:
return query_pipeline
ingestion_pipeline = cast(IngestionPipeline, values["ingestion_pipeline"])
if ingestion_pipeline.vector_store is None:
return None
verbose = cast(bool, values["verbose"])
query_component = FnComponent(
fn=query_input, output_key="output", req_params={"query_str"}
)
llm = cast(LLM, values["llm"])
# get embed_model from transformations if possible
embed_model = None
if ingestion_pipeline.transformations is not None:
for transformation in ingestion_pipeline.transformations:
if isinstance(transformation, BaseEmbedding):
embed_model = transformation
break
service_context = ServiceContext.from_defaults(
llm=llm, embed_model=embed_model or "default"
)
retriever = VectorStoreIndex.from_vector_store(
ingestion_pipeline.vector_store, service_context=service_context
).as_retriever(similarity_top_k=8)
response_synthesizer = CompactAndRefine(
service_context=service_context, streaming=True, verbose=verbose
)
# define query pipeline
query_pipeline = QueryPipeline(verbose=verbose)
query_pipeline.add_modules(
{
"query": query_component,
"retriever": retriever,
"summarizer": response_synthesizer,
}
)
query_pipeline.add_link("query", "retriever")
query_pipeline.add_link("retriever", "summarizer", dest_key="nodes")
query_pipeline.add_link("query", "summarizer", dest_key="query_str")
return query_pipeline
@validator("chat_engine", always=True)
def chat_engine_from_query_pipeline(
cls, chat_engine: Any, values: Dict[str, Any]
) -> Optional[CondenseQuestionChatEngine]:
"""
If chat_engine is not provided, create one from query_pipeline.
"""
if chat_engine is not None:
return chat_engine
if values.get("query_pipeline", None) is None:
values["query_pipeline"] = cls.query_pipeline_from_ingestion_pipeline(
query_pipeline=None, values=values
)
query_pipeline = cast(QueryPipeline, values["query_pipeline"])
if query_pipeline is None:
return None
query_engine = QueryPipelineQueryEngine(query_pipeline=query_pipeline) # type: ignore
verbose = cast(bool, values["verbose"])
llm = cast(LLM, values["llm"])
return CondenseQuestionChatEngine.from_defaults(
query_engine=query_engine, llm=llm, verbose=verbose
)
async def handle_cli(
self,
files: Optional[str] = None,
question: Optional[str] = None,
chat: bool = False,
verbose: bool = False,
clear: bool = False,
create_llama: bool = False,
**kwargs: Dict[str, Any],
) -> None:
"""
Entrypoint for local document RAG CLI tool.
"""
if clear:
# delete self.persist_dir directory including all subdirectories and files
if os.path.exists(self.persist_dir):
# Ask for confirmation
response = input(
f"Are you sure you want to delete data within {self.persist_dir}? [y/N] "
)
if response.strip().lower() != "y":
print("Aborted.")
return
os.system(f"rm -rf {self.persist_dir}")
print(f"Successfully cleared {self.persist_dir}")
self.verbose = verbose
ingestion_pipeline = cast(IngestionPipeline, self.ingestion_pipeline)
if self.verbose:
print("Saving/Loading from persist_dir: ", self.persist_dir)
if files is not None:
documents = []
for _file in iglob(files, recursive=True):
_file = os.path.abspath(_file)
if os.path.isdir(_file):
reader = SimpleDirectoryReader(
input_dir=_file,
filename_as_id=True,
file_extractor=self.file_extractor,
)
else:
reader = SimpleDirectoryReader(
input_files=[_file],
filename_as_id=True,
file_extractor=self.file_extractor,
)
documents.extend(reader.load_data(show_progress=verbose))
await ingestion_pipeline.arun(show_progress=verbose, documents=documents)
ingestion_pipeline.persist(persist_dir=self.persist_dir)
# Append the `--files` argument to the history file
with open(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}", "a") as f:
f.write(files + "\n")
if create_llama:
if shutil.which("npx") is None:
print(
"`npx` is not installed. Please install it by calling `npm install -g npx`"
)
else:
history_file_path = Path(f"{self.persist_dir}/{RAG_HISTORY_FILE_NAME}")
if not history_file_path.exists():
print(
"No data has been ingested, "
"please specify `--files` to create llama dataset."
)
else:
with open(history_file_path) as f:
stored_paths = {line.strip() for line in f if line.strip()}
if len(stored_paths) == 0:
print(
"No data has been ingested, "
"please specify `--files` to create llama dataset."
)
elif len(stored_paths) > 1:
print(
"Multiple files or folders were ingested, which is not supported by create-llama. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file"
)
else:
path = stored_paths.pop()
if "*" in path:
print(
"Glob pattern is not supported by create-llama. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file."
)
elif not os.path.exists(path):
print(
f"The path {path} does not exist. "
"Please call `llamaindex-cli rag --clear` to clear the cache first, "
"then call `llamaindex-cli rag --files` again with a single folder or file."
)
else:
print(f"Calling create-llama using data from {path} ...")
command_args = [
"npx",
"create-llama@latest",
"--frontend",
"--template",
"streaming",
"--framework",
"fastapi",
"--ui",
"shadcn",
"--vector-db",
"none",
"--engine",
"context",
f"--files {path}",
]
os.system(" ".join(command_args))
if question is not None:
await self.handle_question(question)
if chat:
await self.start_chat_repl()
async def handle_question(self, question: str) -> None:
if self.query_pipeline is None:
raise ValueError("query_pipeline is not defined.")
query_pipeline = cast(QueryPipeline, self.query_pipeline)
query_pipeline.verbose = self.verbose
chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine)
response = chat_engine.chat(question)
if isinstance(response, StreamingResponse):
response.print_response_stream()
else:
response = cast(Response, response)
print(response)
async def start_chat_repl(self) -> None:
"""
Start a REPL for chatting with the agent.
"""
if self.query_pipeline is None:
raise ValueError("query_pipeline is not defined.")
chat_engine = cast(CondenseQuestionChatEngine, self.chat_engine)
chat_engine.streaming_chat_repl()
@classmethod
def add_parser_args(
cls,
parser: Union[ArgumentParser, Any],
instance_generator: Optional[Callable[[], "RagCLI"]],
) -> None:
if instance_generator:
parser.add_argument(
"-q",
"--question",
type=str,
help="The question you want to ask.",
required=False,
)
parser.add_argument(
"-f",
"--files",
type=str,
help=(
"The name of the file or directory you want to ask a question about,"
'such as "file.pdf".'
),
)
parser.add_argument(
"-c",
"--chat",
help="If flag is present, opens a chat REPL.",
action="store_true",
)
parser.add_argument(
"-v",
"--verbose",
help="Whether to print out verbose information during execution.",
action="store_true",
)
parser.add_argument(
"--clear",
help="Clears out all currently embedded data.",
action="store_true",
)
parser.add_argument(
"--create-llama",
help="Create a LlamaIndex application with your embedded data.",
required=False,
action="store_true",
)
parser.set_defaults(
func=lambda args: asyncio.run(
instance_generator().handle_cli(**vars(args))
)
)
def cli(self) -> None:
"""
Entrypoint for CLI tool.
"""
parser = ArgumentParser(description="LlamaIndex RAG Q&A tool.")
subparsers = parser.add_subparsers(
title="commands", dest="command", required=True
)
llamarag_parser = subparsers.add_parser(
"rag", help="Ask a question to a document / a directory of documents."
)
self.add_parser_args(llamarag_parser, lambda: self)
# Parse the command-line arguments
args = parser.parse_args()
# Call the appropriate function based on the command
args.func(args)
| [
"llama_index.llms.openai.OpenAI",
"llama_index.core.bridge.pydantic.validator",
"llama_index.core.VectorStoreIndex.from_vector_store",
"llama_index.core.indices.service_context.ServiceContext.from_defaults",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.query_pipeline.components.function.FnComponent",
"llama_index.core.utils.get_cache_dir",
"llama_index.core.SimpleDirectoryReader",
"llama_index.core.query_pipeline.query.QueryPipeline",
"llama_index.core.response_synthesizers.CompactAndRefine",
"llama_index.core.chat_engine.CondenseQuestionChatEngine.from_defaults"
] | [((1789, 1840), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query Pipeline to use for Q&A."""'}), "(description='Query Pipeline to use for Q&A.')\n", (1794, 1840), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2284, 2349), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Ingestion pipeline to run for RAG ingestion."""'}), "(description='Ingestion pipeline to run for RAG ingestion.')\n", (2289, 2349), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2384, 2488), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Whether to print out verbose information during execution."""', 'default': '(False)'}), "(description=\n 'Whether to print out verbose information during execution.', default=False\n )\n", (2389, 2488), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2525, 2634), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Directory to persist ingestion pipeline."""', 'default_factory': 'default_ragcli_persist_dir'}), "(description='Directory to persist ingestion pipeline.',\n default_factory=default_ragcli_persist_dir)\n", (2530, 2634), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2854, 2919), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Query Pipeline to use for Q&A."""', 'default': 'None'}), "(description='Query Pipeline to use for Q&A.', default=None)\n", (2859, 2919), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((2999, 3074), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""Chat engine to use for chatting."""', 'default_factory': 'None'}), "(description='Chat engine to use for chatting.', default_factory=None)\n", (3004, 3074), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((3152, 3244), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'description': '"""File extractor to use for extracting text from files."""', 'default': 'None'}), "(description='File extractor to use for extracting text from files.',\n default=None)\n", (3157, 3244), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((3328, 3368), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""query_pipeline"""'], {'always': '(True)'}), "('query_pipeline', always=True)\n", (3337, 3368), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((5385, 5422), 'llama_index.core.bridge.pydantic.validator', 'validator', (['"""chat_engine"""'], {'always': '(True)'}), "('chat_engine', always=True)\n", (5394, 5422), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, validator\n'), ((1245, 1290), 'llama_index.llms.openai.OpenAI', 'OpenAI', ([], {'model': '"""gpt-3.5-turbo"""', 'streaming': '(True)'}), "(model='gpt-3.5-turbo', streaming=True)\n", (1251, 1290), False, 'from llama_index.llms.openai import OpenAI\n'), ((3714, 3767), 'typing.cast', 'cast', (['IngestionPipeline', "values['ingestion_pipeline']"], {}), "(IngestionPipeline, values['ingestion_pipeline'])\n", (3718, 3767), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((3862, 3891), 'typing.cast', 'cast', (['bool', "values['verbose']"], {}), "(bool, values['verbose'])\n", (3866, 3891), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((3918, 3992), 'llama_index.core.query_pipeline.components.function.FnComponent', 'FnComponent', ([], {'fn': 'query_input', 'output_key': '"""output"""', 'req_params': "{'query_str'}"}), "(fn=query_input, output_key='output', req_params={'query_str'})\n", (3929, 3992), False, 'from llama_index.core.query_pipeline.components.function import FnComponent\n'), ((4029, 4053), 'typing.cast', 'cast', (['LLM', "values['llm']"], {}), "(LLM, values['llm'])\n", (4033, 4053), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((4434, 4509), 'llama_index.core.indices.service_context.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': "(embed_model or 'default')"}), "(llm=llm, embed_model=embed_model or 'default')\n", (4462, 4509), False, 'from llama_index.core.indices.service_context import ServiceContext\n'), ((4739, 4826), 'llama_index.core.response_synthesizers.CompactAndRefine', 'CompactAndRefine', ([], {'service_context': 'service_context', 'streaming': '(True)', 'verbose': 'verbose'}), '(service_context=service_context, streaming=True, verbose=\n verbose)\n', (4755, 4826), False, 'from llama_index.core.response_synthesizers import CompactAndRefine\n'), ((4902, 4932), 'llama_index.core.query_pipeline.query.QueryPipeline', 'QueryPipeline', ([], {'verbose': 'verbose'}), '(verbose=verbose)\n', (4915, 4932), False, 'from llama_index.core.query_pipeline.query import QueryPipeline\n'), ((5958, 6003), 'typing.cast', 'cast', (['QueryPipeline', "values['query_pipeline']"], {}), "(QueryPipeline, values['query_pipeline'])\n", (5962, 6003), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6176, 6205), 'typing.cast', 'cast', (['bool', "values['verbose']"], {}), "(bool, values['verbose'])\n", (6180, 6205), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6220, 6244), 'typing.cast', 'cast', (['LLM', "values['llm']"], {}), "(LLM, values['llm'])\n", (6224, 6244), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((6260, 6357), 'llama_index.core.chat_engine.CondenseQuestionChatEngine.from_defaults', 'CondenseQuestionChatEngine.from_defaults', ([], {'query_engine': 'query_engine', 'llm': 'llm', 'verbose': 'verbose'}), '(query_engine=query_engine, llm=llm,\n verbose=verbose)\n', (6300, 6357), False, 'from llama_index.core.chat_engine import CondenseQuestionChatEngine\n'), ((7378, 7426), 'typing.cast', 'cast', (['IngestionPipeline', 'self.ingestion_pipeline'], {}), '(IngestionPipeline, self.ingestion_pipeline)\n', (7382, 7426), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12054, 12094), 'typing.cast', 'cast', (['QueryPipeline', 'self.query_pipeline'], {}), '(QueryPipeline, self.query_pipeline)\n', (12058, 12094), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12163, 12213), 'typing.cast', 'cast', (['CondenseQuestionChatEngine', 'self.chat_engine'], {}), '(CondenseQuestionChatEngine, self.chat_engine)\n', (12167, 12213), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((12693, 12743), 'typing.cast', 'cast', (['CondenseQuestionChatEngine', 'self.chat_engine'], {}), '(CondenseQuestionChatEngine, self.chat_engine)\n', (12697, 12743), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((14601, 14655), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': '"""LlamaIndex RAG Q&A tool."""'}), "(description='LlamaIndex RAG Q&A tool.')\n", (14615, 14655), False, 'from argparse import ArgumentParser\n'), ((6863, 6895), 'os.path.exists', 'os.path.exists', (['self.persist_dir'], {}), '(self.persist_dir)\n', (6877, 6895), False, 'import os\n'), ((7607, 7635), 'glob.iglob', 'iglob', (['files'], {'recursive': '(True)'}), '(files, recursive=True)\n', (7612, 7635), False, 'from glob import iglob\n'), ((12395, 12419), 'typing.cast', 'cast', (['Response', 'response'], {}), '(Response, response)\n', (12399, 12419), False, 'from typing import Any, Callable, Dict, Optional, Union, cast\n'), ((1584, 1599), 'llama_index.core.utils.get_cache_dir', 'get_cache_dir', ([], {}), '()\n', (1597, 1599), False, 'from llama_index.core.utils import get_cache_dir\n'), ((4552, 4656), 'llama_index.core.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', (['ingestion_pipeline.vector_store'], {'service_context': 'service_context'}), '(ingestion_pipeline.vector_store,\n service_context=service_context)\n', (4586, 4656), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7215, 7254), 'os.system', 'os.system', (['f"""rm -rf {self.persist_dir}"""'], {}), "(f'rm -rf {self.persist_dir}')\n", (7224, 7254), False, 'import os\n'), ((7661, 7683), 'os.path.abspath', 'os.path.abspath', (['_file'], {}), '(_file)\n', (7676, 7683), False, 'import os\n'), ((7703, 7723), 'os.path.isdir', 'os.path.isdir', (['_file'], {}), '(_file)\n', (7716, 7723), False, 'import os\n'), ((8646, 8665), 'shutil.which', 'shutil.which', (['"""npx"""'], {}), "('npx')\n", (8658, 8665), False, 'import shutil\n'), ((8866, 8917), 'pathlib.Path', 'Path', (['f"""{self.persist_dir}/{RAG_HISTORY_FILE_NAME}"""'], {}), "(f'{self.persist_dir}/{RAG_HISTORY_FILE_NAME}')\n", (8870, 8917), False, 'from pathlib import Path\n'), ((7754, 7854), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_dir': '_file', 'filename_as_id': '(True)', 'file_extractor': 'self.file_extractor'}), '(input_dir=_file, filename_as_id=True, file_extractor=\n self.file_extractor)\n', (7775, 7854), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((7996, 8099), 'llama_index.core.SimpleDirectoryReader', 'SimpleDirectoryReader', ([], {'input_files': '[_file]', 'filename_as_id': '(True)', 'file_extractor': 'self.file_extractor'}), '(input_files=[_file], filename_as_id=True,\n file_extractor=self.file_extractor)\n', (8017, 8099), False, 'from llama_index.core import SimpleDirectoryReader, VectorStoreIndex\n'), ((10477, 10497), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (10491, 10497), False, 'import os\n')] |
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, List, Optional
if TYPE_CHECKING:
from llama_index.core.service_context import ServiceContext
from llama_index.core.base.embeddings.base import BaseEmbedding
from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager
from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model
from llama_index.core.indices.prompt_helper import PromptHelper
from llama_index.core.llms import LLM
from llama_index.core.llms.utils import LLMType, resolve_llm
from llama_index.core.node_parser import NodeParser, SentenceSplitter
from llama_index.core.schema import TransformComponent
from llama_index.core.types import PydanticProgramMode
from llama_index.core.utils import get_tokenizer, set_global_tokenizer
@dataclass
class _Settings:
"""Settings for the Llama Index, lazily initialized."""
# lazy initialization
_llm: Optional[LLM] = None
_embed_model: Optional[BaseEmbedding] = None
_callback_manager: Optional[CallbackManager] = None
_tokenizer: Optional[Callable[[str], List[Any]]] = None
_node_parser: Optional[NodeParser] = None
_prompt_helper: Optional[PromptHelper] = None
_transformations: Optional[List[TransformComponent]] = None
# ---- LLM ----
@property
def llm(self) -> LLM:
"""Get the LLM."""
if self._llm is None:
self._llm = resolve_llm("default")
if self._callback_manager is not None:
self._llm.callback_manager = self._callback_manager
return self._llm
@llm.setter
def llm(self, llm: LLMType) -> None:
"""Set the LLM."""
self._llm = resolve_llm(llm)
@property
def pydantic_program_mode(self) -> PydanticProgramMode:
"""Get the pydantic program mode."""
return self.llm.pydantic_program_mode
@pydantic_program_mode.setter
def pydantic_program_mode(self, pydantic_program_mode: PydanticProgramMode) -> None:
"""Set the pydantic program mode."""
self.llm.pydantic_program_mode = pydantic_program_mode
# ---- Embedding ----
@property
def embed_model(self) -> BaseEmbedding:
"""Get the embedding model."""
if self._embed_model is None:
self._embed_model = resolve_embed_model("default")
if self._callback_manager is not None:
self._embed_model.callback_manager = self._callback_manager
return self._embed_model
@embed_model.setter
def embed_model(self, embed_model: EmbedType) -> None:
"""Set the embedding model."""
self._embed_model = resolve_embed_model(embed_model)
# ---- Callbacks ----
@property
def global_handler(self) -> Optional[BaseCallbackHandler]:
"""Get the global handler."""
import llama_index.core
# TODO: deprecated?
return llama_index.core.global_handler
@global_handler.setter
def global_handler(self, eval_mode: str, **eval_params: Any) -> None:
"""Set the global handler."""
from llama_index.core import set_global_handler
# TODO: deprecated?
set_global_handler(eval_mode, **eval_params)
@property
def callback_manager(self) -> CallbackManager:
"""Get the callback manager."""
if self._callback_manager is None:
self._callback_manager = CallbackManager()
return self._callback_manager
@callback_manager.setter
def callback_manager(self, callback_manager: CallbackManager) -> None:
"""Set the callback manager."""
self._callback_manager = callback_manager
# ---- Tokenizer ----
@property
def tokenizer(self) -> Callable[[str], List[Any]]:
"""Get the tokenizer."""
import llama_index.core
if llama_index.core.global_tokenizer is None:
return get_tokenizer()
# TODO: deprecated?
return llama_index.core.global_tokenizer
@tokenizer.setter
def tokenizer(self, tokenizer: Callable[[str], List[Any]]) -> None:
"""Set the tokenizer."""
try:
from transformers import PreTrainedTokenizerBase # pants: no-infer-dep
if isinstance(tokenizer, PreTrainedTokenizerBase):
from functools import partial
tokenizer = partial(tokenizer.encode, add_special_tokens=False)
except ImportError:
pass
# TODO: deprecated?
set_global_tokenizer(tokenizer)
# ---- Node parser ----
@property
def node_parser(self) -> NodeParser:
"""Get the node parser."""
if self._node_parser is None:
self._node_parser = SentenceSplitter()
if self._callback_manager is not None:
self._node_parser.callback_manager = self._callback_manager
return self._node_parser
@node_parser.setter
def node_parser(self, node_parser: NodeParser) -> None:
"""Set the node parser."""
self._node_parser = node_parser
@property
def chunk_size(self) -> int:
"""Get the chunk size."""
if hasattr(self.node_parser, "chunk_size"):
return self.node_parser.chunk_size
else:
raise ValueError("Configured node parser does not have chunk size.")
@chunk_size.setter
def chunk_size(self, chunk_size: int) -> None:
"""Set the chunk size."""
if hasattr(self.node_parser, "chunk_size"):
self.node_parser.chunk_size = chunk_size
else:
raise ValueError("Configured node parser does not have chunk size.")
@property
def chunk_overlap(self) -> int:
"""Get the chunk overlap."""
if hasattr(self.node_parser, "chunk_overlap"):
return self.node_parser.chunk_overlap
else:
raise ValueError("Configured node parser does not have chunk overlap.")
@chunk_overlap.setter
def chunk_overlap(self, chunk_overlap: int) -> None:
"""Set the chunk overlap."""
if hasattr(self.node_parser, "chunk_overlap"):
self.node_parser.chunk_overlap = chunk_overlap
else:
raise ValueError("Configured node parser does not have chunk overlap.")
# ---- Node parser alias ----
@property
def text_splitter(self) -> NodeParser:
"""Get the text splitter."""
return self.node_parser
@text_splitter.setter
def text_splitter(self, text_splitter: NodeParser) -> None:
"""Set the text splitter."""
self.node_parser = text_splitter
@property
def prompt_helper(self) -> PromptHelper:
"""Get the prompt helper."""
if self._llm is not None and self._prompt_helper is None:
self._prompt_helper = PromptHelper.from_llm_metadata(self._llm.metadata)
elif self._prompt_helper is None:
self._prompt_helper = PromptHelper()
return self._prompt_helper
@prompt_helper.setter
def prompt_helper(self, prompt_helper: PromptHelper) -> None:
"""Set the prompt helper."""
self._prompt_helper = prompt_helper
@property
def num_output(self) -> int:
"""Get the number of outputs."""
return self.prompt_helper.num_output
@num_output.setter
def num_output(self, num_output: int) -> None:
"""Set the number of outputs."""
self.prompt_helper.num_output = num_output
@property
def context_window(self) -> int:
"""Get the context window."""
return self.prompt_helper.context_window
@context_window.setter
def context_window(self, context_window: int) -> None:
"""Set the context window."""
self.prompt_helper.context_window = context_window
# ---- Transformations ----
@property
def transformations(self) -> List[TransformComponent]:
"""Get the transformations."""
if self._transformations is None:
self._transformations = [self.node_parser]
return self._transformations
@transformations.setter
def transformations(self, transformations: List[TransformComponent]) -> None:
"""Set the transformations."""
self._transformations = transformations
# Singleton
Settings = _Settings()
# -- Helper functions for deprecation/migration --
def llm_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> LLM:
"""Get settings from either settings or context."""
if context is not None:
return context.llm
return settings.llm
def embed_model_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> BaseEmbedding:
"""Get settings from either settings or context."""
if context is not None:
return context.embed_model
return settings.embed_model
def callback_manager_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> CallbackManager:
"""Get settings from either settings or context."""
if context is not None:
return context.callback_manager
return settings.callback_manager
def node_parser_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> NodeParser:
"""Get settings from either settings or context."""
if context is not None:
return context.node_parser
return settings.node_parser
def transformations_from_settings_or_context(
settings: _Settings, context: Optional["ServiceContext"]
) -> List[TransformComponent]:
"""Get settings from either settings or context."""
if context is not None:
return context.transformations
return settings.transformations
| [
"llama_index.core.llms.utils.resolve_llm",
"llama_index.core.utils.get_tokenizer",
"llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata",
"llama_index.core.embeddings.utils.resolve_embed_model",
"llama_index.core.node_parser.SentenceSplitter",
"llama_index.core.callbacks.base.CallbackManager",
"llama_index.core.set_global_handler",
"llama_index.core.indices.prompt_helper.PromptHelper",
"llama_index.core.utils.set_global_tokenizer"
] | [((1701, 1717), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['llm'], {}), '(llm)\n', (1712, 1717), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2647, 2679), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['embed_model'], {}), '(embed_model)\n', (2666, 2679), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3164, 3208), 'llama_index.core.set_global_handler', 'set_global_handler', (['eval_mode'], {}), '(eval_mode, **eval_params)\n', (3182, 3208), False, 'from llama_index.core import set_global_handler\n'), ((4474, 4505), 'llama_index.core.utils.set_global_tokenizer', 'set_global_tokenizer', (['tokenizer'], {}), '(tokenizer)\n', (4494, 4505), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((1435, 1457), 'llama_index.core.llms.utils.resolve_llm', 'resolve_llm', (['"""default"""'], {}), "('default')\n", (1446, 1457), False, 'from llama_index.core.llms.utils import LLMType, resolve_llm\n'), ((2311, 2341), 'llama_index.core.embeddings.utils.resolve_embed_model', 'resolve_embed_model', (['"""default"""'], {}), "('default')\n", (2330, 2341), False, 'from llama_index.core.embeddings.utils import EmbedType, resolve_embed_model\n'), ((3395, 3412), 'llama_index.core.callbacks.base.CallbackManager', 'CallbackManager', ([], {}), '()\n', (3410, 3412), False, 'from llama_index.core.callbacks.base import BaseCallbackHandler, CallbackManager\n'), ((3882, 3897), 'llama_index.core.utils.get_tokenizer', 'get_tokenizer', ([], {}), '()\n', (3895, 3897), False, 'from llama_index.core.utils import get_tokenizer, set_global_tokenizer\n'), ((4696, 4714), 'llama_index.core.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {}), '()\n', (4712, 4714), False, 'from llama_index.core.node_parser import NodeParser, SentenceSplitter\n'), ((6766, 6816), 'llama_index.core.indices.prompt_helper.PromptHelper.from_llm_metadata', 'PromptHelper.from_llm_metadata', (['self._llm.metadata'], {}), '(self._llm.metadata)\n', (6796, 6816), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n'), ((4340, 4391), 'functools.partial', 'partial', (['tokenizer.encode'], {'add_special_tokens': '(False)'}), '(tokenizer.encode, add_special_tokens=False)\n', (4347, 4391), False, 'from functools import partial\n'), ((6893, 6907), 'llama_index.core.indices.prompt_helper.PromptHelper', 'PromptHelper', ([], {}), '()\n', (6905, 6907), False, 'from llama_index.core.indices.prompt_helper import PromptHelper\n')] |
import asyncio
from llama_index.core.llama_dataset import download_llama_dataset
from llama_index.core.llama_pack import download_llama_pack
from llama_index.core import VectorStoreIndex
async def main():
# DOWNLOAD LLAMADATASET
rag_dataset, documents = download_llama_dataset("CovidQaDataset", "./data")
# BUILD BASIC RAG PIPELINE
index = VectorStoreIndex.from_documents(documents=documents)
query_engine = index.as_query_engine()
# EVALUATE WITH PACK
RagEvaluatorPack = download_llama_pack("RagEvaluatorPack", "./pack")
rag_evaluator = RagEvaluatorPack(query_engine=query_engine, rag_dataset=rag_dataset)
############################################################################
# NOTE: If have a lower tier subscription for OpenAI API like Usage Tier 1 #
# then you'll need to use different batch_size and sleep_time_in_seconds. #
# For Usage Tier 1, settings that seemed to work well were batch_size=5, #
# and sleep_time_in_seconds=15 (as of December 2023.) #
############################################################################
benchmark_df = await rag_evaluator.arun(
batch_size=40, # batches the number of openai api calls to make
sleep_time_in_seconds=1, # number of seconds sleep before making an api call
)
print(benchmark_df)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main)
| [
"llama_index.core.VectorStoreIndex.from_documents",
"llama_index.core.llama_dataset.download_llama_dataset",
"llama_index.core.llama_pack.download_llama_pack"
] | [((265, 315), 'llama_index.core.llama_dataset.download_llama_dataset', 'download_llama_dataset', (['"""CovidQaDataset"""', '"""./data"""'], {}), "('CovidQaDataset', './data')\n", (287, 315), False, 'from llama_index.core.llama_dataset import download_llama_dataset\n'), ((360, 412), 'llama_index.core.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', ([], {'documents': 'documents'}), '(documents=documents)\n', (391, 412), False, 'from llama_index.core import VectorStoreIndex\n'), ((505, 554), 'llama_index.core.llama_pack.download_llama_pack', 'download_llama_pack', (['"""RagEvaluatorPack"""', '"""./pack"""'], {}), "('RagEvaluatorPack', './pack')\n", (524, 554), False, 'from llama_index.core.llama_pack import download_llama_pack\n'), ((1405, 1429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1427, 1429), False, 'import asyncio\n')] |
from typing import Any, Callable, Optional, Sequence
from llama_index.core.base.llms.types import (
ChatMessage,
CompletionResponse,
CompletionResponseGen,
LLMMetadata,
)
from llama_index.core.callbacks import CallbackManager
from llama_index.core.llms.callbacks import llm_completion_callback
from llama_index.core.llms.custom import CustomLLM
from llama_index.core.types import PydanticProgramMode
class MockLLM(CustomLLM):
max_tokens: Optional[int]
def __init__(
self,
max_tokens: Optional[int] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
) -> None:
super().__init__(
max_tokens=max_tokens,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
)
@classmethod
def class_name(cls) -> str:
return "MockLLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(num_output=self.max_tokens or -1)
def _generate_text(self, length: int) -> str:
return " ".join(["text" for _ in range(length)])
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
response_text = (
self._generate_text(self.max_tokens) if self.max_tokens else prompt
)
return CompletionResponse(
text=response_text,
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
def gen_prompt() -> CompletionResponseGen:
for ch in prompt:
yield CompletionResponse(
text=prompt,
delta=ch,
)
def gen_response(max_tokens: int) -> CompletionResponseGen:
for i in range(max_tokens):
response_text = self._generate_text(i)
yield CompletionResponse(
text=response_text,
delta="text ",
)
return gen_response(self.max_tokens) if self.max_tokens else gen_prompt()
| [
"llama_index.core.llms.callbacks.llm_completion_callback",
"llama_index.core.base.llms.types.LLMMetadata",
"llama_index.core.base.llms.types.CompletionResponse"
] | [((1532, 1557), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1555, 1557), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1871, 1896), 'llama_index.core.llms.callbacks.llm_completion_callback', 'llm_completion_callback', ([], {}), '()\n', (1894, 1896), False, 'from llama_index.core.llms.callbacks import llm_completion_callback\n'), ((1372, 1417), 'llama_index.core.base.llms.types.LLMMetadata', 'LLMMetadata', ([], {'num_output': '(self.max_tokens or -1)'}), '(num_output=self.max_tokens or -1)\n', (1383, 1417), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((1803, 1841), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text'}), '(text=response_text)\n', (1821, 1841), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2123, 2164), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'prompt', 'delta': 'ch'}), '(text=prompt, delta=ch)\n', (2141, 2164), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n'), ((2410, 2463), 'llama_index.core.base.llms.types.CompletionResponse', 'CompletionResponse', ([], {'text': 'response_text', 'delta': '"""text """'}), "(text=response_text, delta='text ')\n", (2428, 2463), False, 'from llama_index.core.base.llms.types import ChatMessage, CompletionResponse, CompletionResponseGen, LLMMetadata\n')] |
from enum import Enum
from typing import Any, AsyncGenerator, Generator, Optional, Union, List
from llama_index.core.bridge.pydantic import BaseModel, Field
from llama_index.core.constants import DEFAULT_CONTEXT_WINDOW, DEFAULT_NUM_OUTPUTS
class MessageRole(str, Enum):
"""Message role."""
SYSTEM = "system"
USER = "user"
ASSISTANT = "assistant"
FUNCTION = "function"
TOOL = "tool"
CHATBOT = "chatbot"
MODEL = "model"
# ===== Generic Model Input - Chat =====
class ChatMessage(BaseModel):
"""Chat message."""
role: MessageRole = MessageRole.USER
content: Optional[Any] = ""
additional_kwargs: dict = Field(default_factory=dict)
def __str__(self) -> str:
return f"{self.role.value}: {self.content}"
@classmethod
def from_str(
cls,
content: str,
role: Union[MessageRole, str] = MessageRole.USER,
**kwargs: Any,
) -> "ChatMessage":
if isinstance(role, str):
role = MessageRole(role)
return cls(role=role, content=content, **kwargs)
class LogProb(BaseModel):
"""LogProb of a token."""
token: str = Field(default_factory=str)
logprob: float = Field(default_factory=float)
bytes: List[int] = Field(default_factory=list)
# ===== Generic Model Output - Chat =====
class ChatResponse(BaseModel):
"""Chat response."""
message: ChatMessage
raw: Optional[dict] = None
delta: Optional[str] = None
logprobs: Optional[List[List[LogProb]]] = None
additional_kwargs: dict = Field(default_factory=dict)
def __str__(self) -> str:
return str(self.message)
ChatResponseGen = Generator[ChatResponse, None, None]
ChatResponseAsyncGen = AsyncGenerator[ChatResponse, None]
# ===== Generic Model Output - Completion =====
class CompletionResponse(BaseModel):
"""
Completion response.
Fields:
text: Text content of the response if not streaming, or if streaming,
the current extent of streamed text.
additional_kwargs: Additional information on the response(i.e. token
counts, function calling information).
raw: Optional raw JSON that was parsed to populate text, if relevant.
delta: New text that just streamed in (only relevant when streaming).
"""
text: str
additional_kwargs: dict = Field(default_factory=dict)
raw: Optional[dict] = None
delta: Optional[str] = None
def __str__(self) -> str:
return self.text
CompletionResponseGen = Generator[CompletionResponse, None, None]
CompletionResponseAsyncGen = AsyncGenerator[CompletionResponse, None]
class LLMMetadata(BaseModel):
context_window: int = Field(
default=DEFAULT_CONTEXT_WINDOW,
description=(
"Total number of tokens the model can be input and output for one response."
),
)
num_output: int = Field(
default=DEFAULT_NUM_OUTPUTS,
description="Number of tokens the model can output when generating a response.",
)
is_chat_model: bool = Field(
default=False,
description=(
"Set True if the model exposes a chat interface (i.e. can be passed a"
" sequence of messages, rather than text), like OpenAI's"
" /v1/chat/completions endpoint."
),
)
is_function_calling_model: bool = Field(
default=False,
# SEE: https://openai.com/blog/function-calling-and-other-api-updates
description=(
"Set True if the model supports function calling messages, similar to"
" OpenAI's function calling API. For example, converting 'Email Anya to"
" see if she wants to get coffee next Friday' to a function call like"
" `send_email(to: string, body: string)`."
),
)
model_name: str = Field(
default="unknown",
description=(
"The model's name used for logging, testing, and sanity checking. For some"
" models this can be automatically discerned. For other models, like"
" locally loaded models, this must be manually specified."
),
)
system_role: MessageRole = Field(
default=MessageRole.SYSTEM,
description="The role this specific LLM provider"
"expects for system prompt. E.g. 'SYSTEM' for OpenAI, 'CHATBOT' for Cohere",
)
| [
"llama_index.core.bridge.pydantic.Field"
] | [((655, 682), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (660, 682), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1146, 1172), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'str'}), '(default_factory=str)\n', (1151, 1172), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1194, 1222), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'float'}), '(default_factory=float)\n', (1199, 1222), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1246, 1273), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (1251, 1273), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((1544, 1571), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1549, 1571), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2347, 2374), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (2352, 2374), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2690, 2827), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_CONTEXT_WINDOW', 'description': '"""Total number of tokens the model can be input and output for one response."""'}), "(default=DEFAULT_CONTEXT_WINDOW, description=\n 'Total number of tokens the model can be input and output for one response.'\n )\n", (2695, 2827), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((2887, 3007), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'DEFAULT_NUM_OUTPUTS', 'description': '"""Number of tokens the model can output when generating a response."""'}), "(default=DEFAULT_NUM_OUTPUTS, description=\n 'Number of tokens the model can output when generating a response.')\n", (2892, 3007), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3052, 3252), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Set True if the model exposes a chat interface (i.e. can be passed a sequence of messages, rather than text), like OpenAI\'s /v1/chat/completions endpoint."""'}), '(default=False, description=\n "Set True if the model exposes a chat interface (i.e. can be passed a sequence of messages, rather than text), like OpenAI\'s /v1/chat/completions endpoint."\n )\n', (3057, 3252), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3358, 3650), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '(False)', 'description': '"""Set True if the model supports function calling messages, similar to OpenAI\'s function calling API. For example, converting \'Email Anya to see if she wants to get coffee next Friday\' to a function call like `send_email(to: string, body: string)`."""'}), '(default=False, description=\n "Set True if the model supports function calling messages, similar to OpenAI\'s function calling API. For example, converting \'Email Anya to see if she wants to get coffee next Friday\' to a function call like `send_email(to: string, body: string)`."\n )\n', (3363, 3650), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((3833, 4079), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': '"""unknown"""', 'description': '"""The model\'s name used for logging, testing, and sanity checking. For some models this can be automatically discerned. For other models, like locally loaded models, this must be manually specified."""'}), '(default=\'unknown\', description=\n "The model\'s name used for logging, testing, and sanity checking. For some models this can be automatically discerned. For other models, like locally loaded models, this must be manually specified."\n )\n', (3838, 4079), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n'), ((4178, 4345), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'default': 'MessageRole.SYSTEM', 'description': '"""The role this specific LLM providerexpects for system prompt. E.g. \'SYSTEM\' for OpenAI, \'CHATBOT\' for Cohere"""'}), '(default=MessageRole.SYSTEM, description=\n "The role this specific LLM providerexpects for system prompt. E.g. \'SYSTEM\' for OpenAI, \'CHATBOT\' for Cohere"\n )\n', (4183, 4345), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field\n')] |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
- Downloads last month
- 36