code
stringlengths 193
97.3k
| apis
sequencelengths 1
8
| extract_api
stringlengths 113
214k
|
---|---|---|
# Ultralytics YOLO ๐, AGPL-3.0 license
import getpass
from typing import List
import cv2
import numpy as np
import pandas as pd
from ultralytics.data.augment import LetterBox
from ultralytics.utils import LOGGER as logger
from ultralytics.utils import SETTINGS
from ultralytics.utils.checks import check_requirements
from ultralytics.utils.ops import xyxy2xywh
from ultralytics.utils.plotting import plot_images
def get_table_schema(vector_size):
"""Extracts and returns the schema of a database table."""
from lancedb.pydantic import LanceModel, Vector
class Schema(LanceModel):
im_file: str
labels: List[str]
cls: List[int]
bboxes: List[List[float]]
masks: List[List[List[int]]]
keypoints: List[List[List[float]]]
vector: Vector(vector_size)
return Schema
def get_sim_index_schema():
"""Returns a LanceModel schema for a database table with specified vector size."""
from lancedb.pydantic import LanceModel
class Schema(LanceModel):
idx: int
im_file: str
count: int
sim_im_files: List[str]
return Schema
def sanitize_batch(batch, dataset_info):
"""Sanitizes input batch for inference, ensuring correct format and dimensions."""
batch["cls"] = batch["cls"].flatten().int().tolist()
box_cls_pair = sorted(zip(batch["bboxes"].tolist(), batch["cls"]), key=lambda x: x[1])
batch["bboxes"] = [box for box, _ in box_cls_pair]
batch["cls"] = [cls for _, cls in box_cls_pair]
batch["labels"] = [dataset_info["names"][i] for i in batch["cls"]]
batch["masks"] = batch["masks"].tolist() if "masks" in batch else [[[]]]
batch["keypoints"] = batch["keypoints"].tolist() if "keypoints" in batch else [[[]]]
return batch
def plot_query_result(similar_set, plot_labels=True):
"""
Plot images from the similar set.
Args:
similar_set (list): Pyarrow or pandas object containing the similar data points
plot_labels (bool): Whether to plot labels or not
"""
similar_set = (
similar_set.to_dict(orient="list") if isinstance(similar_set, pd.DataFrame) else similar_set.to_pydict()
)
empty_masks = [[[]]]
empty_boxes = [[]]
images = similar_set.get("im_file", [])
bboxes = similar_set.get("bboxes", []) if similar_set.get("bboxes") is not empty_boxes else []
masks = similar_set.get("masks") if similar_set.get("masks")[0] != empty_masks else []
kpts = similar_set.get("keypoints") if similar_set.get("keypoints")[0] != empty_masks else []
cls = similar_set.get("cls", [])
plot_size = 640
imgs, batch_idx, plot_boxes, plot_masks, plot_kpts = [], [], [], [], []
for i, imf in enumerate(images):
im = cv2.imread(imf)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
h, w = im.shape[:2]
r = min(plot_size / h, plot_size / w)
imgs.append(LetterBox(plot_size, center=False)(image=im).transpose(2, 0, 1))
if plot_labels:
if len(bboxes) > i and len(bboxes[i]) > 0:
box = np.array(bboxes[i], dtype=np.float32)
box[:, [0, 2]] *= r
box[:, [1, 3]] *= r
plot_boxes.append(box)
if len(masks) > i and len(masks[i]) > 0:
mask = np.array(masks[i], dtype=np.uint8)[0]
plot_masks.append(LetterBox(plot_size, center=False)(image=mask))
if len(kpts) > i and kpts[i] is not None:
kpt = np.array(kpts[i], dtype=np.float32)
kpt[:, :, :2] *= r
plot_kpts.append(kpt)
batch_idx.append(np.ones(len(np.array(bboxes[i], dtype=np.float32))) * i)
imgs = np.stack(imgs, axis=0)
masks = np.stack(plot_masks, axis=0) if plot_masks else np.zeros(0, dtype=np.uint8)
kpts = np.concatenate(plot_kpts, axis=0) if plot_kpts else np.zeros((0, 51), dtype=np.float32)
boxes = xyxy2xywh(np.concatenate(plot_boxes, axis=0)) if plot_boxes else np.zeros(0, dtype=np.float32)
batch_idx = np.concatenate(batch_idx, axis=0)
cls = np.concatenate([np.array(c, dtype=np.int32) for c in cls], axis=0)
return plot_images(
imgs, batch_idx, cls, bboxes=boxes, masks=masks, kpts=kpts, max_subplots=len(images), save=False, threaded=False
)
def prompt_sql_query(query):
"""Plots images with optional labels from a similar data set."""
check_requirements("openai>=1.6.1")
from openai import OpenAI
if not SETTINGS["openai_api_key"]:
logger.warning("OpenAI API key not found in settings. Please enter your API key below.")
openai_api_key = getpass.getpass("OpenAI API key: ")
SETTINGS.update({"openai_api_key": openai_api_key})
openai = OpenAI(api_key=SETTINGS["openai_api_key"])
messages = [
{
"role": "system",
"content": """
You are a helpful data scientist proficient in SQL. You need to output exactly one SQL query based on
the following schema and a user request. You only need to output the format with fixed selection
statement that selects everything from "'table'", like `SELECT * from 'table'`
Schema:
im_file: string not null
labels: list<item: string> not null
child 0, item: string
cls: list<item: int64> not null
child 0, item: int64
bboxes: list<item: list<item: double>> not null
child 0, item: list<item: double>
child 0, item: double
masks: list<item: list<item: list<item: int64>>> not null
child 0, item: list<item: list<item: int64>>
child 0, item: list<item: int64>
child 0, item: int64
keypoints: list<item: list<item: list<item: double>>> not null
child 0, item: list<item: list<item: double>>
child 0, item: list<item: double>
child 0, item: double
vector: fixed_size_list<item: float>[256] not null
child 0, item: float
Some details about the schema:
- the "labels" column contains the string values like 'person' and 'dog' for the respective objects
in each image
- the "cls" column contains the integer values on these classes that map them the labels
Example of a correct query:
request - Get all data points that contain 2 or more people and at least one dog
correct query-
SELECT * FROM 'table' WHERE ARRAY_LENGTH(cls) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'person')) >= 2 AND ARRAY_LENGTH(FILTER(labels, x -> x = 'dog')) >= 1;
""",
},
{"role": "user", "content": f"{query}"},
]
response = openai.chat.completions.create(model="gpt-3.5-turbo", messages=messages)
return response.choices[0].message.content
| [
"lancedb.pydantic.Vector"
] | [((3694, 3716), 'numpy.stack', 'np.stack', (['imgs'], {'axis': '(0)'}), '(imgs, axis=0)\n', (3702, 3716), True, 'import numpy as np\n'), ((4027, 4060), 'numpy.concatenate', 'np.concatenate', (['batch_idx'], {'axis': '(0)'}), '(batch_idx, axis=0)\n', (4041, 4060), True, 'import numpy as np\n'), ((4394, 4429), 'ultralytics.utils.checks.check_requirements', 'check_requirements', (['"""openai>=1.6.1"""'], {}), "('openai>=1.6.1')\n", (4412, 4429), False, 'from ultralytics.utils.checks import check_requirements\n'), ((4731, 4773), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "SETTINGS['openai_api_key']"}), "(api_key=SETTINGS['openai_api_key'])\n", (4737, 4773), False, 'from openai import OpenAI\n'), ((798, 817), 'lancedb.pydantic.Vector', 'Vector', (['vector_size'], {}), '(vector_size)\n', (804, 817), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2746, 2761), 'cv2.imread', 'cv2.imread', (['imf'], {}), '(imf)\n', (2756, 2761), False, 'import cv2\n'), ((2775, 2810), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (2787, 2810), False, 'import cv2\n'), ((3729, 3757), 'numpy.stack', 'np.stack', (['plot_masks'], {'axis': '(0)'}), '(plot_masks, axis=0)\n', (3737, 3757), True, 'import numpy as np\n'), ((3777, 3804), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.uint8'}), '(0, dtype=np.uint8)\n', (3785, 3804), True, 'import numpy as np\n'), ((3816, 3849), 'numpy.concatenate', 'np.concatenate', (['plot_kpts'], {'axis': '(0)'}), '(plot_kpts, axis=0)\n', (3830, 3849), True, 'import numpy as np\n'), ((3868, 3903), 'numpy.zeros', 'np.zeros', (['(0, 51)'], {'dtype': 'np.float32'}), '((0, 51), dtype=np.float32)\n', (3876, 3903), True, 'import numpy as np\n'), ((3981, 4010), 'numpy.zeros', 'np.zeros', (['(0)'], {'dtype': 'np.float32'}), '(0, dtype=np.float32)\n', (3989, 4010), True, 'import numpy as np\n'), ((4508, 4601), 'ultralytics.utils.LOGGER.warning', 'logger.warning', (['"""OpenAI API key not found in settings. Please enter your API key below."""'], {}), "(\n 'OpenAI API key not found in settings. Please enter your API key below.')\n", (4522, 4601), True, 'from ultralytics.utils import LOGGER as logger\n'), ((4622, 4657), 'getpass.getpass', 'getpass.getpass', (['"""OpenAI API key: """'], {}), "('OpenAI API key: ')\n", (4637, 4657), False, 'import getpass\n'), ((4666, 4717), 'ultralytics.utils.SETTINGS.update', 'SETTINGS.update', (["{'openai_api_key': openai_api_key}"], {}), "({'openai_api_key': openai_api_key})\n", (4681, 4717), False, 'from ultralytics.utils import SETTINGS\n'), ((3926, 3960), 'numpy.concatenate', 'np.concatenate', (['plot_boxes'], {'axis': '(0)'}), '(plot_boxes, axis=0)\n', (3940, 3960), True, 'import numpy as np\n'), ((4087, 4114), 'numpy.array', 'np.array', (['c'], {'dtype': 'np.int32'}), '(c, dtype=np.int32)\n', (4095, 4114), True, 'import numpy as np\n'), ((3071, 3108), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3079, 3108), True, 'import numpy as np\n'), ((3492, 3527), 'numpy.array', 'np.array', (['kpts[i]'], {'dtype': 'np.float32'}), '(kpts[i], dtype=np.float32)\n', (3500, 3527), True, 'import numpy as np\n'), ((3296, 3330), 'numpy.array', 'np.array', (['masks[i]'], {'dtype': 'np.uint8'}), '(masks[i], dtype=np.uint8)\n', (3304, 3330), True, 'import numpy as np\n'), ((2905, 2939), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (2914, 2939), False, 'from ultralytics.data.augment import LetterBox\n'), ((3368, 3402), 'ultralytics.data.augment.LetterBox', 'LetterBox', (['plot_size'], {'center': '(False)'}), '(plot_size, center=False)\n', (3377, 3402), False, 'from ultralytics.data.augment import LetterBox\n'), ((3638, 3675), 'numpy.array', 'np.array', (['bboxes[i]'], {'dtype': 'np.float32'}), '(bboxes[i], dtype=np.float32)\n', (3646, 3675), True, 'import numpy as np\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import click
from lancedb.utils import CONFIG
@click.group()
@click.version_option(help="LanceDB command line interface entry point")
def cli():
"LanceDB command line interface"
diagnostics_help = """
Enable or disable LanceDB diagnostics. When enabled, LanceDB will send anonymous events
to help us improve LanceDB. These diagnostics are used only for error reporting and no
data is collected. You can find more about diagnosis on our docs:
https://lancedb.github.io/lancedb/cli_config/
"""
@cli.command(help=diagnostics_help)
@click.option("--enabled/--disabled", default=True)
def diagnostics(enabled):
CONFIG.update({"diagnostics": True if enabled else False})
click.echo("LanceDB diagnostics is %s" % ("enabled" if enabled else "disabled"))
@cli.command(help="Show current LanceDB configuration")
def config():
# TODO: pretty print as table with colors and formatting
click.echo("Current LanceDB configuration:")
cfg = CONFIG.copy()
cfg.pop("uuid") # Don't show uuid as it is not configurable
for item, amount in cfg.items():
click.echo("{} ({})".format(item, amount))
| [
"lancedb.utils.CONFIG.copy",
"lancedb.utils.CONFIG.update"
] | [((641, 654), 'click.group', 'click.group', ([], {}), '()\n', (652, 654), False, 'import click\n'), ((656, 727), 'click.version_option', 'click.version_option', ([], {'help': '"""LanceDB command line interface entry point"""'}), "(help='LanceDB command line interface entry point')\n", (676, 727), False, 'import click\n'), ((1131, 1181), 'click.option', 'click.option', (['"""--enabled/--disabled"""'], {'default': '(True)'}), "('--enabled/--disabled', default=True)\n", (1143, 1181), False, 'import click\n'), ((1212, 1270), 'lancedb.utils.CONFIG.update', 'CONFIG.update', (["{'diagnostics': True if enabled else False}"], {}), "({'diagnostics': True if enabled else False})\n", (1225, 1270), False, 'from lancedb.utils import CONFIG\n'), ((1275, 1360), 'click.echo', 'click.echo', (["('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled'))"], {}), "('LanceDB diagnostics is %s' % ('enabled' if enabled else 'disabled')\n )\n", (1285, 1360), False, 'import click\n'), ((1493, 1537), 'click.echo', 'click.echo', (['"""Current LanceDB configuration:"""'], {}), "('Current LanceDB configuration:')\n", (1503, 1537), False, 'import click\n'), ((1548, 1561), 'lancedb.utils.CONFIG.copy', 'CONFIG.copy', ([], {}), '()\n', (1559, 1561), False, 'from lancedb.utils import CONFIG\n')] |
# Copyright (c) Hegel AI, Inc.
# All rights reserved.
#
# This source code's license can be found in the
# LICENSE file in the root directory of this source tree.
import itertools
import warnings
import pandas as pd
from typing import Callable, Optional
try:
import lancedb
from lancedb.embeddings import with_embeddings
except ImportError:
lancedb = None
import logging
from time import perf_counter
from .experiment import Experiment
from ._utils import _get_dynamic_columns
VALID_TASKS = [""]
def query_builder(
table: "lancedb.Table",
embed_fn: Callable,
text: str,
metric: str = "cosine",
limit: int = 3,
filter: str = None,
nprobes: int = None,
refine_factor: int = None,
):
if nprobes is not None or refine_factor is not None:
warnings.warn(
"`nprobes` and `refine_factor` are not used by the default `query_builder`. "
"Feel free to open an issue to request adding support for them."
)
query = table.search(embed_fn(text)[0]).metric(metric)
if filter:
query = query.where(filter)
return query.limit(limit).to_df()
class LanceDBExperiment(Experiment):
r"""
Perform an experiment with ``LanceDB`` to test different embedding functions or retrieval arguments.
You can query from an existing table, or create a new one (and insert documents into it) during
the experiment.
Args:
uri (str): LanceDB uri to interact with your database. Default is "lancedb"
table_name (str): the table that you will get or create. Default is "table"
use_existing_table (bool): determines whether to create a new collection or use
an existing one
embedding_fns (list[Callable]): embedding functions to test in the experiment
by default only uses the default one in LanceDB
query_args (dict[str, list]): parameters used to query the table
Each value is expected to be a list to create all possible combinations
data (Optional[list[dict]]): documents or embeddings that will be added to
the newly created table
text_col_name (str): name of the text column in the table. Default is "text"
clean_up (bool): determines whether to drop the table after the experiment ends
"""
def __init__(
self,
embedding_fns: dict[str, Callable],
query_args: dict[str, list],
uri: str = "lancedb",
table_name: str = "table",
use_existing_table: bool = False,
data: Optional[list[dict]] = None,
text_col_name: str = "text",
clean_up: bool = False,
):
if lancedb is None:
raise ModuleNotFoundError(
"Package `lancedb` is required to be installed to use this experiment."
"Please use `pip install lancedb` to install the package"
)
self.table_name = table_name
self.use_existing_table = use_existing_table
self.embedding_fns = embedding_fns
if use_existing_table and data:
raise RuntimeError("You can either use an existing collection or create a new one during the experiment.")
if not use_existing_table and data is None:
raise RuntimeError("If you choose to create a new collection, you must also add to it.")
self.data = data if data is not None else []
self.argument_combos: list[dict] = []
self.text_col_name = text_col_name
self.db = lancedb.connect(uri)
self.completion_fn = self.lancedb_completion_fn
self.query_args = query_args
self.clean_up = clean_up
super().__init__()
def prepare(self):
for combo in itertools.product(*self.query_args.values()):
self.argument_combos.append(dict(zip(self.query_args.keys(), combo)))
def run(self, runs: int = 1):
input_args = [] # This will be used to construct DataFrame table
results = []
latencies = []
if not self.argument_combos:
logging.info("Preparing first...")
self.prepare()
for emb_fn_name, emb_fn in self.embedding_fns.items():
if self.use_existing_table: # Use existing table
table = self.db.open_table(self.table_name)
if not table:
raise RuntimeError(f"Table {self.table_name} does not exist.")
else: # Create table and insert data
data = with_embeddings(emb_fn, self.data, self.text_col_name)
table = self.db.create_table(self.table_name, data, mode="overwrite")
# Query from table
for query_arg_dict in self.argument_combos:
query_args = query_arg_dict.copy()
for _ in range(runs):
start = perf_counter()
results.append(self.lancedb_completion_fn(table=table, embedding_fn=emb_fn, **query_args))
latencies.append(perf_counter() - start)
query_args["emb_fn"] = emb_fn_name # Saving for visualization
input_args.append(query_args)
# Clean up
if self.clean_up:
self.db.drop_table(self.table_name)
self._construct_result_dfs(input_args, results, latencies)
def lancedb_completion_fn(self, table, embedding_fn, **kwargs):
return query_builder(table, embedding_fn, **kwargs)
def _construct_result_dfs(
self,
input_args: list[dict[str, object]],
results: list[dict[str, object]],
latencies: list[float],
):
r"""
Construct a few DataFrames that contain all relevant data (i.e. input arguments, results, evaluation metrics).
This version only extract the most relevant objects returned by LanceDB.
Args:
input_args (list[dict[str, object]]): list of dictionaries, where each of them is a set of
input argument that was passed into the model
results (list[dict[str, object]]): list of responses from the model
latencies (list[float]): list of latency measurements
"""
# `input_arg_df` contains all all input args
input_arg_df = pd.DataFrame(input_args)
# `dynamic_input_arg_df` contains input args that has more than one unique values
dynamic_input_arg_df = _get_dynamic_columns(input_arg_df)
# `response_df` contains the extracted response (often being the text response)
response_dict = dict()
response_dict["top doc ids"] = [self._extract_top_doc_ids(result) for result in results]
response_dict["distances"] = [self._extract_lancedb_dists(result) for result in results]
response_dict["documents"] = [self._extract_lancedb_docs(result) for result in results]
response_df = pd.DataFrame(response_dict)
# `result_df` contains everything returned by the completion function
result_df = response_df # pd.concat([self.response_df, pd.DataFrame(results)], axis=1)
# `score_df` contains computed metrics (e.g. latency, evaluation metrics)
self.score_df = pd.DataFrame({"latency": latencies})
# `partial_df` contains some input arguments, extracted responses, and score
self.partial_df = pd.concat([dynamic_input_arg_df, response_df, self.score_df], axis=1)
# `full_df` contains all input arguments, responses, and score
self.full_df = pd.concat([input_arg_df, result_df, self.score_df], axis=1)
@staticmethod
def _extract_top_doc_ids(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["ids"]
@staticmethod
def _extract_lancedb_dists(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["_distance"]
@staticmethod
def _extract_lancedb_docs(output: pd.DataFrame) -> list[tuple[str, float]]:
r"""Helper function to get distances between documents from LanceDB."""
return output.to_dict(orient="list")["text"]
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((797, 961), 'warnings.warn', 'warnings.warn', (['"""`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them."""'], {}), "(\n '`nprobes` and `refine_factor` are not used by the default `query_builder`. Feel free to open an issue to request adding support for them.'\n )\n", (810, 961), False, 'import warnings\n'), ((3496, 3516), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (3511, 3516), False, 'import lancedb\n'), ((6251, 6275), 'pandas.DataFrame', 'pd.DataFrame', (['input_args'], {}), '(input_args)\n', (6263, 6275), True, 'import pandas as pd\n'), ((6864, 6891), 'pandas.DataFrame', 'pd.DataFrame', (['response_dict'], {}), '(response_dict)\n', (6876, 6891), True, 'import pandas as pd\n'), ((7173, 7209), 'pandas.DataFrame', 'pd.DataFrame', (["{'latency': latencies}"], {}), "({'latency': latencies})\n", (7185, 7209), True, 'import pandas as pd\n'), ((7322, 7391), 'pandas.concat', 'pd.concat', (['[dynamic_input_arg_df, response_df, self.score_df]'], {'axis': '(1)'}), '([dynamic_input_arg_df, response_df, self.score_df], axis=1)\n', (7331, 7391), True, 'import pandas as pd\n'), ((7486, 7545), 'pandas.concat', 'pd.concat', (['[input_arg_df, result_df, self.score_df]'], {'axis': '(1)'}), '([input_arg_df, result_df, self.score_df], axis=1)\n', (7495, 7545), True, 'import pandas as pd\n'), ((4045, 4079), 'logging.info', 'logging.info', (['"""Preparing first..."""'], {}), "('Preparing first...')\n", (4057, 4079), False, 'import logging\n'), ((4479, 4533), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['emb_fn', 'self.data', 'self.text_col_name'], {}), '(emb_fn, self.data, self.text_col_name)\n', (4494, 4533), False, 'from lancedb.embeddings import with_embeddings\n'), ((4825, 4839), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (4837, 4839), False, 'from time import perf_counter\n'), ((4988, 5002), 'time.perf_counter', 'perf_counter', ([], {}), '()\n', (5000, 5002), False, 'from time import perf_counter\n')] |
import os
from pathlib import Path
from tqdm import tqdm
from lancedb import connect
from pydantic import BaseModel
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import get_registry
from typing import Iterable
DB_PATH = Path(os.getcwd(), "db")
DATA_PATH = Path(os.getcwd(), "data")
DB_TABLE = "paul_graham"
class Document(BaseModel):
id: int
text: str
filename: str
openai = get_registry().get("openai").create(name="text-embedding-3-large", dim=256)
class TextChunk(LanceModel):
id: int
doc_id: int
chunk_num: int
start_pos: int
end_pos: int
text: str = openai.SourceField()
# For some reason if we call openai.ndim(), it returns 1536 instead of 256 like we want
vector: Vector(openai.ndims()) = openai.VectorField(default=None)
def chunk_text(
documents: Iterable[Document], window_size: int = 1024, overlap: int = 0
):
id = 0
for doc in documents:
for chunk_num, start_pos in enumerate(
range(0, len(doc.text), window_size - overlap)
):
# TODO: Fix up this and use a Lance Model instead - have reached out to the team to ask for some help
yield {
"id": id,
"doc_id": doc.id,
"chunk_num": chunk_num,
"start_pos": start_pos,
"end_pos": start_pos + window_size,
"text": doc.text[start_pos : start_pos + window_size],
}
id += 1
def read_file_content(path: Path, file_suffix: str) -> Iterable[Document]:
for i, file in enumerate(path.iterdir()):
if file.suffix != file_suffix:
continue
yield Document(id=i, text=file.read_text(), filename=file.name)
def batch_chunks(chunks, batch_size=10):
batch = []
for item in chunks:
batch.append(item)
if len(batch) == batch_size:
yield batch
batch = []
if batch:
yield batch
def main():
assert "OPENAI_API_KEY" in os.environ, "OPENAI_API_KEY is not set"
db = connect(DB_PATH)
table = db.create_table(DB_TABLE, schema=TextChunk, mode="overwrite")
documents = read_file_content(DATA_PATH, file_suffix=".md")
chunks = chunk_text(documents)
batched_chunks = batch_chunks(chunks, 20)
for chunk_batch in tqdm(batched_chunks):
table.add(chunk_batch)
if __name__ == "__main__":
main()
| [
"lancedb.connect",
"lancedb.embeddings.get_registry"
] | [((253, 264), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (262, 264), False, 'import os\n'), ((289, 300), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (298, 300), False, 'import os\n'), ((2068, 2084), 'lancedb.connect', 'connect', (['DB_PATH'], {}), '(DB_PATH)\n', (2075, 2084), False, 'from lancedb import connect\n'), ((2329, 2349), 'tqdm.tqdm', 'tqdm', (['batched_chunks'], {}), '(batched_chunks)\n', (2333, 2349), False, 'from tqdm import tqdm\n'), ((419, 433), 'lancedb.embeddings.get_registry', 'get_registry', ([], {}), '()\n', (431, 433), False, 'from lancedb.embeddings import get_registry\n')] |
"""LanceDB vector store with cloud storage support."""
import os
from typing import Any, Optional
from dotenv import load_dotenv
from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode
from llama_index.vector_stores import LanceDBVectorStore as LanceDBVectorStoreBase
from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities
from llama_index.vector_stores.types import VectorStoreQuery, VectorStoreQueryResult
from pandas import DataFrame
load_dotenv()
class LanceDBVectorStore(LanceDBVectorStoreBase):
"""Advanced LanceDB Vector Store supporting cloud storage and prefiltering."""
from lancedb.query import LanceQueryBuilder
from lancedb.table import Table
def __init__(
self,
uri: str,
table_name: str = "vectors",
nprobes: int = 20,
refine_factor: Optional[int] = None,
api_key: Optional[str] = None,
region: Optional[str] = None,
**kwargs: Any,
) -> None:
"""Init params."""
self._setup_connection(uri, api_key, region)
self.uri = uri
self.table_name = table_name
self.nprobes = nprobes
self.refine_factor = refine_factor
self.api_key = api_key
self.region = region
def _setup_connection(self, uri: str, api_key: Optional[str] = None, region: Optional[str] = None):
"""Establishes a robust connection to LanceDB."""
api_key = api_key or os.getenv('LANCEDB_API_KEY')
region = region or os.getenv('LANCEDB_REGION')
import_err_msg = "`lancedb` package not found, please run `pip install lancedb`"
try:
import lancedb
except ImportError:
raise ImportError(import_err_msg)
if api_key and region:
self.connection = lancedb.connect(uri, api_key=api_key, region=region)
else:
self.connection = lancedb.connect(uri)
def query(
self,
query: VectorStoreQuery,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Enhanced query method to support prefiltering in LanceDB queries."""
table = self.connection.open_table(self.table_name)
lance_query = self._prepare_lance_query(query, table, **kwargs)
results = lance_query.to_df()
return self._construct_query_result(results)
def _prepare_lance_query(self, query: VectorStoreQuery, table: Table, **kwargs) -> LanceQueryBuilder:
"""Prepares the LanceDB query considering prefiltering and additional parameters."""
if query.filters is not None:
if "where" in kwargs:
raise ValueError(
"Cannot specify filter via both query and kwargs. "
"Use kwargs only for lancedb specific items that are "
"not supported via the generic query interface.")
where = _to_lance_filter(query.filters)
else:
where = kwargs.pop("where", None)
prefilter = kwargs.pop("prefilter", False)
table = self.connection.open_table(self.table_name)
lance_query = (
table.search(query.query_embedding).limit(query.similarity_top_k).where(
where, prefilter=prefilter).nprobes(self.nprobes))
if self.refine_factor is not None:
lance_query.refine_factor(self.refine_factor)
return lance_query
def _construct_query_result(self, results: DataFrame) -> VectorStoreQueryResult:
"""Constructs a VectorStoreQueryResult from a LanceDB query result."""
nodes = []
for _, row in results.iterrows():
node = TextNode(
text=row.get('text', ''), # ensure text is a string
id_=row['id'],
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=row['doc_id']),
})
nodes.append(node)
return VectorStoreQueryResult(
nodes=nodes,
similarities=_to_llama_similarities(results),
ids=results["id"].tolist(),
)
| [
"lancedb.connect"
] | [((490, 503), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (501, 503), False, 'from dotenv import load_dotenv\n'), ((1464, 1492), 'os.getenv', 'os.getenv', (['"""LANCEDB_API_KEY"""'], {}), "('LANCEDB_API_KEY')\n", (1473, 1492), False, 'import os\n'), ((1520, 1547), 'os.getenv', 'os.getenv', (['"""LANCEDB_REGION"""'], {}), "('LANCEDB_REGION')\n", (1529, 1547), False, 'import os\n'), ((1814, 1866), 'lancedb.connect', 'lancedb.connect', (['uri'], {'api_key': 'api_key', 'region': 'region'}), '(uri, api_key=api_key, region=region)\n', (1829, 1866), False, 'import lancedb\n'), ((1911, 1931), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1926, 1931), False, 'import lancedb\n'), ((2898, 2929), 'llama_index.vector_stores.lancedb._to_lance_filter', '_to_lance_filter', (['query.filters'], {}), '(query.filters)\n', (2914, 2929), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((4021, 4052), 'llama_index.vector_stores.lancedb._to_llama_similarities', '_to_llama_similarities', (['results'], {}), '(results)\n', (4043, 4052), False, 'from llama_index.vector_stores.lancedb import _to_lance_filter, _to_llama_similarities\n'), ((3841, 3879), 'llama_index.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': "row['doc_id']"}), "(node_id=row['doc_id'])\n", (3856, 3879), False, 'from llama_index.schema import NodeRelationship, RelatedNodeInfo, TextNode\n')] |
from pathlib import Path
from typing import Any, Callable
from lancedb import DBConnection as LanceDBConnection
from lancedb import connect as lancedb_connect
from lancedb.table import Table as LanceDBTable
from openai import Client as OpenAIClient
from pydantic import Field, PrivateAttr
from crewai_tools.tools.rag.rag_tool import Adapter
def _default_embedding_function():
client = OpenAIClient()
def _embedding_function(input):
rs = client.embeddings.create(input=input, model="text-embedding-ada-002")
return [record.embedding for record in rs.data]
return _embedding_function
class LanceDBAdapter(Adapter):
uri: str | Path
table_name: str
embedding_function: Callable = Field(default_factory=_default_embedding_function)
top_k: int = 3
vector_column_name: str = "vector"
text_column_name: str = "text"
_db: LanceDBConnection = PrivateAttr()
_table: LanceDBTable = PrivateAttr()
def model_post_init(self, __context: Any) -> None:
self._db = lancedb_connect(self.uri)
self._table = self._db.open_table(self.table_name)
return super().model_post_init(__context)
def query(self, question: str) -> str:
query = self.embedding_function([question])[0]
results = (
self._table.search(query, vector_column_name=self.vector_column_name)
.limit(self.top_k)
.select([self.text_column_name])
.to_list()
)
values = [result[self.text_column_name] for result in results]
return "\n".join(values)
| [
"lancedb.connect"
] | [((393, 407), 'openai.Client', 'OpenAIClient', ([], {}), '()\n', (405, 407), True, 'from openai import Client as OpenAIClient\n'), ((724, 774), 'pydantic.Field', 'Field', ([], {'default_factory': '_default_embedding_function'}), '(default_factory=_default_embedding_function)\n', (729, 774), False, 'from pydantic import Field, PrivateAttr\n'), ((898, 911), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (909, 911), False, 'from pydantic import Field, PrivateAttr\n'), ((939, 952), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (950, 952), False, 'from pydantic import Field, PrivateAttr\n'), ((1028, 1053), 'lancedb.connect', 'lancedb_connect', (['self.uri'], {}), '(self.uri)\n', (1043, 1053), True, 'from lancedb import connect as lancedb_connect\n')] |
import logging
from typing import Any, Dict, Generator, List, Optional, Sequence, Tuple, Type
import lancedb
import pandas as pd
from dotenv import load_dotenv
from lancedb.pydantic import LanceModel, Vector
from lancedb.query import LanceVectorQueryBuilder
from pydantic import BaseModel, ValidationError, create_model
from langroid.embedding_models.base import (
EmbeddingModel,
EmbeddingModelsConfig,
)
from langroid.embedding_models.models import OpenAIEmbeddingsConfig
from langroid.mytypes import Document, EmbeddingFunction
from langroid.utils.configuration import settings
from langroid.utils.pydantic_utils import (
dataframe_to_document_model,
dataframe_to_documents,
extend_document_class,
extra_metadata,
flatten_pydantic_instance,
flatten_pydantic_model,
nested_dict_from_flat,
)
from langroid.vector_store.base import VectorStore, VectorStoreConfig
logger = logging.getLogger(__name__)
class LanceDBConfig(VectorStoreConfig):
cloud: bool = False
collection_name: str | None = "temp"
storage_path: str = ".lancedb/data"
embedding: EmbeddingModelsConfig = OpenAIEmbeddingsConfig()
distance: str = "cosine"
# document_class is used to store in lancedb with right schema,
# and also to retrieve the right type of Documents when searching.
document_class: Type[Document] = Document
flatten: bool = False # flatten Document class into LanceSchema ?
class LanceDB(VectorStore):
def __init__(self, config: LanceDBConfig = LanceDBConfig()):
super().__init__(config)
self.config: LanceDBConfig = config
emb_model = EmbeddingModel.create(config.embedding)
self.embedding_fn: EmbeddingFunction = emb_model.embedding_fn()
self.embedding_dim = emb_model.embedding_dims
self.host = config.host
self.port = config.port
self.is_from_dataframe = False # were docs ingested from a dataframe?
self.df_metadata_columns: List[str] = [] # metadata columns from dataframe
self._setup_schemas(config.document_class)
load_dotenv()
if self.config.cloud:
logger.warning(
"LanceDB Cloud is not available yet. Switching to local storage."
)
config.cloud = False
else:
try:
self.client = lancedb.connect(
uri=config.storage_path,
)
except Exception as e:
new_storage_path = config.storage_path + ".new"
logger.warning(
f"""
Error connecting to local LanceDB at {config.storage_path}:
{e}
Switching to {new_storage_path}
"""
)
self.client = lancedb.connect(
uri=new_storage_path,
)
# Note: Only create collection if a non-null collection name is provided.
# This is useful to delay creation of vecdb until we have a suitable
# collection name (e.g. we could get it from the url or folder path).
if config.collection_name is not None:
self.create_collection(
config.collection_name, replace=config.replace_collection
)
def _setup_schemas(self, doc_cls: Type[Document] | None) -> None:
doc_cls = doc_cls or self.config.document_class
self.unflattened_schema = self._create_lance_schema(doc_cls)
self.schema = (
self._create_flat_lance_schema(doc_cls)
if self.config.flatten
else self.unflattened_schema
)
def clear_empty_collections(self) -> int:
coll_names = self.list_collections()
n_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
if nr == 0:
n_deletes += 1
self.client.drop_table(name)
return n_deletes
def clear_all_collections(self, really: bool = False, prefix: str = "") -> int:
"""Clear all collections with the given prefix."""
if not really:
logger.warning("Not deleting all collections, set really=True to confirm")
return 0
coll_names = [
c for c in self.list_collections(empty=True) if c.startswith(prefix)
]
if len(coll_names) == 0:
logger.warning(f"No collections found with prefix {prefix}")
return 0
n_empty_deletes = 0
n_non_empty_deletes = 0
for name in coll_names:
nr = self.client.open_table(name).head(1).shape[0]
n_empty_deletes += nr == 0
n_non_empty_deletes += nr > 0
self.client.drop_table(name)
logger.warning(
f"""
Deleted {n_empty_deletes} empty collections and
{n_non_empty_deletes} non-empty collections.
"""
)
return n_empty_deletes + n_non_empty_deletes
def list_collections(self, empty: bool = False) -> List[str]:
"""
Returns:
List of collection names that have at least one vector.
Args:
empty (bool, optional): Whether to include empty collections.
"""
colls = self.client.table_names(limit=None)
if len(colls) == 0:
return []
if empty: # include empty tbls
return colls # type: ignore
counts = [self.client.open_table(coll).head(1).shape[0] for coll in colls]
return [coll for coll, count in zip(colls, counts) if count > 0]
def _create_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Create a subclass of LanceModel with fields:
- id (str)
- Vector field that has dims equal to
the embedding dimension of the embedding model, and a data field of type
DocClass.
- other fields from doc_cls
Args:
doc_cls (Type[Document]): A Pydantic model which should be a subclass of
Document, to be used as the type for the data field.
Returns:
Type[BaseModel]: A new Pydantic model subclassing from LanceModel.
Raises:
ValueError: If `n` is not a non-negative integer or if `DocClass` is not a
subclass of Document.
"""
if not issubclass(doc_cls, Document):
raise ValueError("DocClass must be a subclass of Document")
n = self.embedding_dim
# Prepare fields for the new model
fields = {"id": (str, ...), "vector": (Vector(n), ...)}
sorted_fields = dict(
sorted(doc_cls.__fields__.items(), key=lambda item: item[0])
)
# Add both statically and dynamically defined fields from doc_cls
for field_name, field in sorted_fields.items():
fields[field_name] = (field.outer_type_, field.default)
# Create the new model with dynamic fields
NewModel = create_model(
"NewModel", __base__=LanceModel, **fields
) # type: ignore
return NewModel # type: ignore
def _create_flat_lance_schema(self, doc_cls: Type[Document]) -> Type[BaseModel]:
"""
Flat version of the lance_schema, as nested Pydantic schemas are not yet
supported by LanceDB.
"""
lance_model = self._create_lance_schema(doc_cls)
FlatModel = flatten_pydantic_model(lance_model, base_model=LanceModel)
return FlatModel
def create_collection(self, collection_name: str, replace: bool = False) -> None:
"""
Create a collection with the given name, optionally replacing an existing
collection if `replace` is True.
Args:
collection_name (str): Name of the collection to create.
replace (bool): Whether to replace an existing collection
with the same name. Defaults to False.
"""
self.config.collection_name = collection_name
collections = self.list_collections()
if collection_name in collections:
coll = self.client.open_table(collection_name)
if coll.head().shape[0] > 0:
logger.warning(f"Non-empty Collection {collection_name} already exists")
if not replace:
logger.warning("Not replacing collection")
return
else:
logger.warning("Recreating fresh collection")
self.client.create_table(collection_name, schema=self.schema, mode="overwrite")
if settings.debug:
level = logger.getEffectiveLevel()
logger.setLevel(logging.INFO)
logger.setLevel(level)
def _maybe_set_doc_class_schema(self, doc: Document) -> None:
"""
Set the config.document_class and self.schema based on doc if needed
Args:
doc: an instance of Document, to be added to a collection
"""
extra_metadata_fields = extra_metadata(doc, self.config.document_class)
if len(extra_metadata_fields) > 0:
logger.warning(
f"""
Added documents contain extra metadata fields:
{extra_metadata_fields}
which were not present in the original config.document_class.
Trying to change document_class and corresponding schemas.
Overriding LanceDBConfig.document_class with an auto-generated
Pydantic class that includes these extra fields.
If this fails, or you see odd results, it is recommended that you
define a subclass of Document, with metadata of class derived from
DocMetaData, with extra fields defined via
`Field(..., description="...")` declarations,
and set this document class as the value of the
LanceDBConfig.document_class attribute.
"""
)
doc_cls = extend_document_class(doc)
self.config.document_class = doc_cls
self._setup_schemas(doc_cls)
def add_documents(self, documents: Sequence[Document]) -> None:
super().maybe_add_ids(documents)
colls = self.list_collections(empty=True)
if len(documents) == 0:
return
embedding_vecs = self.embedding_fn([doc.content for doc in documents])
coll_name = self.config.collection_name
if coll_name is None:
raise ValueError("No collection name set, cannot ingest docs")
self._maybe_set_doc_class_schema(documents[0])
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it,
self.create_collection(coll_name, replace=True)
ids = [str(d.id()) for d in documents]
# don't insert all at once, batch in chunks of b,
# else we get an API error
b = self.config.batch_size
def make_batches() -> Generator[List[BaseModel], None, None]:
for i in range(0, len(ids), b):
batch = [
self.unflattened_schema(
id=ids[i + j],
vector=embedding_vecs[i + j],
**doc.dict(),
)
for j, doc in enumerate(documents[i : i + b])
]
if self.config.flatten:
batch = [
flatten_pydantic_instance(instance) # type: ignore
for instance in batch
]
yield batch
tbl = self.client.open_table(self.config.collection_name)
try:
tbl.add(make_batches())
except Exception as e:
logger.error(
f"""
Error adding documents to LanceDB: {e}
POSSIBLE REMEDY: Delete the LancdDB storage directory
{self.config.storage_path} and try again.
"""
)
def add_dataframe(
self,
df: pd.DataFrame,
content: str = "content",
metadata: List[str] = [],
) -> None:
"""
Add a dataframe to the collection.
Args:
df (pd.DataFrame): A dataframe
content (str): The name of the column in the dataframe that contains the
text content to be embedded using the embedding model.
metadata (List[str]): A list of column names in the dataframe that contain
metadata to be stored in the database. Defaults to [].
"""
self.is_from_dataframe = True
actual_metadata = metadata.copy()
self.df_metadata_columns = actual_metadata # could be updated below
# get content column
content_values = df[content].values.tolist()
embedding_vecs = self.embedding_fn(content_values)
# add vector column
df["vector"] = embedding_vecs
if content != "content":
# rename content column to "content", leave existing column intact
df = df.rename(columns={content: "content"}, inplace=False)
if "id" not in df.columns:
docs = dataframe_to_documents(df, content="content", metadata=metadata)
ids = [str(d.id()) for d in docs]
df["id"] = ids
if "id" not in actual_metadata:
actual_metadata += ["id"]
colls = self.list_collections(empty=True)
coll_name = self.config.collection_name
if (
coll_name not in colls
or self.client.open_table(coll_name).head(1).shape[0] == 0
):
# collection either doesn't exist or is empty, so replace it
# and set new schema from df
self.client.create_table(
self.config.collection_name,
data=df,
mode="overwrite",
)
doc_cls = dataframe_to_document_model(
df,
content=content,
metadata=actual_metadata,
exclude=["vector"],
)
self.config.document_class = doc_cls # type: ignore
self._setup_schemas(doc_cls) # type: ignore
else:
# collection exists and is not empty, so append to it
tbl = self.client.open_table(self.config.collection_name)
tbl.add(df)
def delete_collection(self, collection_name: str) -> None:
self.client.drop_table(collection_name)
def _lance_result_to_docs(self, result: LanceVectorQueryBuilder) -> List[Document]:
if self.is_from_dataframe:
df = result.to_pandas()
return dataframe_to_documents(
df,
content="content",
metadata=self.df_metadata_columns,
doc_cls=self.config.document_class,
)
else:
records = result.to_arrow().to_pylist()
return self._records_to_docs(records)
def _records_to_docs(self, records: List[Dict[str, Any]]) -> List[Document]:
if self.config.flatten:
docs = [
self.unflattened_schema(**nested_dict_from_flat(rec)) for rec in records
]
else:
try:
docs = [self.schema(**rec) for rec in records]
except ValidationError as e:
raise ValueError(
f"""
Error validating LanceDB result: {e}
HINT: This could happen when you're re-using an
existing LanceDB store with a different schema.
Try deleting your local lancedb storage at `{self.config.storage_path}`
re-ingesting your documents and/or replacing the collections.
"""
)
doc_cls = self.config.document_class
doc_cls_field_names = doc_cls.__fields__.keys()
return [
doc_cls(
**{
field_name: getattr(doc, field_name)
for field_name in doc_cls_field_names
}
)
for doc in docs
]
def get_all_documents(self, where: str = "") -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
tbl = self.client.open_table(self.config.collection_name)
pre_result = tbl.search(None).where(where or None).limit(None)
return self._lance_result_to_docs(pre_result)
def get_documents_by_ids(self, ids: List[str]) -> List[Document]:
if self.config.collection_name is None:
raise ValueError("No collection name set, cannot retrieve docs")
_ids = [str(id) for id in ids]
tbl = self.client.open_table(self.config.collection_name)
docs = []
for _id in _ids:
results = self._lance_result_to_docs(tbl.search().where(f"id == '{_id}'"))
if len(results) > 0:
docs.append(results[0])
return docs
def similar_texts_with_scores(
self,
text: str,
k: int = 1,
where: Optional[str] = None,
) -> List[Tuple[Document, float]]:
embedding = self.embedding_fn([text])[0]
tbl = self.client.open_table(self.config.collection_name)
result = (
tbl.search(embedding).metric(self.config.distance).where(where).limit(k)
)
docs = self._lance_result_to_docs(result)
# note _distance is 1 - cosine
if self.is_from_dataframe:
scores = [
1 - rec["_distance"] for rec in result.to_pandas().to_dict("records")
]
else:
scores = [1 - rec["_distance"] for rec in result.to_arrow().to_pylist()]
if len(docs) == 0:
logger.warning(f"No matches found for {text}")
return []
if settings.debug:
logger.info(f"Found {len(docs)} matches, max score: {max(scores)}")
doc_score_pairs = list(zip(docs, scores))
self.show_if_debug(doc_score_pairs)
return doc_score_pairs
| [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((911, 938), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (928, 938), False, 'import logging\n'), ((1125, 1149), 'langroid.embedding_models.models.OpenAIEmbeddingsConfig', 'OpenAIEmbeddingsConfig', ([], {}), '()\n', (1147, 1149), False, 'from langroid.embedding_models.models import OpenAIEmbeddingsConfig\n'), ((1627, 1666), 'langroid.embedding_models.base.EmbeddingModel.create', 'EmbeddingModel.create', (['config.embedding'], {}), '(config.embedding)\n', (1648, 1666), False, 'from langroid.embedding_models.base import EmbeddingModel, EmbeddingModelsConfig\n'), ((2080, 2093), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (2091, 2093), False, 'from dotenv import load_dotenv\n'), ((7037, 7092), 'pydantic.create_model', 'create_model', (['"""NewModel"""'], {'__base__': 'LanceModel'}), "('NewModel', __base__=LanceModel, **fields)\n", (7049, 7092), False, 'from pydantic import BaseModel, ValidationError, create_model\n'), ((7469, 7527), 'langroid.utils.pydantic_utils.flatten_pydantic_model', 'flatten_pydantic_model', (['lance_model'], {'base_model': 'LanceModel'}), '(lance_model, base_model=LanceModel)\n', (7491, 7527), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((9064, 9111), 'langroid.utils.pydantic_utils.extra_metadata', 'extra_metadata', (['doc', 'self.config.document_class'], {}), '(doc, self.config.document_class)\n', (9078, 9111), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((10124, 10150), 'langroid.utils.pydantic_utils.extend_document_class', 'extend_document_class', (['doc'], {}), '(doc)\n', (10145, 10150), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((13444, 13508), 'langroid.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'metadata'}), "(df, content='content', metadata=metadata)\n", (13466, 13508), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14182, 14280), 'langroid.utils.pydantic_utils.dataframe_to_document_model', 'dataframe_to_document_model', (['df'], {'content': 'content', 'metadata': 'actual_metadata', 'exclude': "['vector']"}), "(df, content=content, metadata=actual_metadata,\n exclude=['vector'])\n", (14209, 14280), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((14943, 15064), 'langroid.utils.pydantic_utils.dataframe_to_documents', 'dataframe_to_documents', (['df'], {'content': '"""content"""', 'metadata': 'self.df_metadata_columns', 'doc_cls': 'self.config.document_class'}), "(df, content='content', metadata=self.\n df_metadata_columns, doc_cls=self.config.document_class)\n", (14965, 15064), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((2342, 2382), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'config.storage_path'}), '(uri=config.storage_path)\n', (2357, 2382), False, 'import lancedb\n'), ((6637, 6646), 'lancedb.pydantic.Vector', 'Vector', (['n'], {}), '(n)\n', (6643, 6646), False, 'from lancedb.pydantic import LanceModel, Vector\n'), ((2806, 2843), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'new_storage_path'}), '(uri=new_storage_path)\n', (2821, 2843), False, 'import lancedb\n'), ((11696, 11731), 'langroid.utils.pydantic_utils.flatten_pydantic_instance', 'flatten_pydantic_instance', (['instance'], {}), '(instance)\n', (11721, 11731), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n'), ((15432, 15458), 'langroid.utils.pydantic_utils.nested_dict_from_flat', 'nested_dict_from_flat', (['rec'], {}), '(rec)\n', (15453, 15458), False, 'from langroid.utils.pydantic_utils import dataframe_to_document_model, dataframe_to_documents, extend_document_class, extra_metadata, flatten_pydantic_instance, flatten_pydantic_model, nested_dict_from_flat\n')] |
import json
import lancedb
from lancedb.pydantic import Vector, LanceModel
from datetime import datetime
# import pyarrow as pa
TABLE_NAME = "documents"
uri = "data/sample-lancedb"
db = lancedb.connect(uri)
# vector: list of vectors
# file_name: name of file
# file_path: path of file
# id
# updated_at
# created_at
class Document(LanceModel):
id: str
file_name: str
file_path: str
created_at: datetime
updated_at: datetime
vector: Vector(768) # Palm Embeddings size
try:
table = db.create_table(TABLE_NAME, schema=Document)
except OSError:
print("table exists")
table = db.open_table(TABLE_NAME)
except Exception as inst:
# Print out the type of exceptions.
print(type(inst))
print(inst.args)
print(inst)
if True:
now = datetime.now()
# Idempotent upsert. Alternatively we can delete first, then insert.
table.add(
[
Document(
id="1",
file_name="test_name",
file_path="test_path",
created_at=now,
updated_at=now,
vector=[i for i in range(768)],
)
]
)
table.delete(f'id="1" AND created_at != timestamp "{now}"')
if False:
table.update(
where='id="1"',
values=Document(
id="1",
file_name="test_name",
file_path="test_path",
created_at=datetime.now(),
updated_at=datetime.now(),
vector=[i for i in range(768)],
),
)
vector = [i for i in range(768)]
result = table.search(vector).limit(2).to_list()
for item in result:
print(item)
# print(json.dumps(item, indent=2))
print(db[TABLE_NAME].head())
| [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((189, 209), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (204, 209), False, 'import lancedb\n'), ((461, 472), 'lancedb.pydantic.Vector', 'Vector', (['(768)'], {}), '(768)\n', (467, 472), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((786, 800), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (798, 800), False, 'from datetime import datetime\n'), ((1421, 1435), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1433, 1435), False, 'from datetime import datetime\n'), ((1460, 1474), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1472, 1474), False, 'from datetime import datetime\n')] |
import json
from sentence_transformers import SentenceTransformer
from pydantic.main import ModelMetaclass
from pathlib import Path
import pandas as pd
import sqlite3
from uuid import uuid4
import lancedb
encoder = SentenceTransformer('all-MiniLM-L6-v2')
data_folder = Path('data/collections')
config_file = Path('data/config/indexes.yaml')
index_folder = Path('indexes')
lance_folder = Path('indexes')
lance_folder.mkdir(parents=True, exist_ok=True)
sqlite_folder = Path('data/indexes/')
class LanceDBDocument():
def __init__(self, document:dict, title:str, text:str, fields, tags=None, date=None, file_path=None):
self.document = self.fill_missing_fields(document, text, title, tags, date)
# self.text = document[text]
# self.tags = document[tags] if tags is not None else list()
# self.date = document[date] if date is not None else None
self.file_path = file_path
self.metadata = {k:document[k] for k in fields if k not in [title, text, tags, date]}
self.uuid = str(uuid4()) if 'uuid' not in document else document['uuid']
self.save_uuids = list()
self.sqlite_fields = list()
self.lance_exclude = list()
def fill_missing_fields(self, document, text, title, tags, date):
if title not in document:
self.title = ''
else:
self.title = document[title]
if text not in document:
self.text = ''
else:
self.text = document[text]
if date not in document:
self.date = ''
else:
self.date = document[date]
if tags not in document:
self.tags = list()
else:
self.tags = document[tags]
def create_json_document(self, text, uuids=None):
"""Creates a custom dictionary object that can be used for both sqlite and lancedb
The full document is always stored in sqlite where fixed fields are:
title
text
date
filepath
document_uuid - used for retrieval from lancedb results
Json field contains the whole document for retrieval and display
Lancedb only gets searching text, vectorization of that, and filter fields
"""
_document = {'title':self.title,
'text':text,
'tags':self.tags,
'date':self.date,
'file_path':str(self.file_path),
'uuid':self.uuid,
'metadata': self.metadata}
self._enforce_tags_schema()
for field in ['title','date','file_path']:
self.enforce_string_schema(field, _document)
return _document
def enforce_string_schema(self, field, test_document):
if not isinstance(test_document[field], str):
self.lance_exclude.append(field)
def _enforce_tags_schema(self):
# This enforces a simple List[str] format for the tags to match what lancedb can use for filtering
# If they are of type List[Dict] as a nested field, they are stored in sqlite for retrieval
if isinstance(self.tags, list):
tags_are_list = True
for _tag in self.tags:
if not isinstance(_tag, str):
tags_are_list = False
break
if not tags_are_list:
self.lance_exclude.append('tags')
def return_document(self):
document = self.create_json_document(self.text)
return document
class SqlLiteIngest():
def __init__(self, documents, source_file, db_location, index_name, overwrite):
self.documents = documents
self.source_file = source_file
self.db_location = db_location
self.index_name = index_name
self.overwrite = overwrite
def initialize(self):
self.connection = sqlite3.connect(self.db_location)
if self.overwrite:
self.connection.execute(f"""DROP TABLE IF EXISTS {self.index_name};""")
table_exists = self.connection.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{self.index_name}';").fetchall()
if len(table_exists) == 0:
self.connection.execute(f"""
CREATE TABLE {self.index_name}(
id INTEGER PRIMARY KEY NOT NULL,
uuid STRING NOT NULL,
text STRING NOT NULL,
title STRING,
date STRING,
source_file STRING,
metadata JSONB);""")
def insert(self, document):
self.connection.execute(f"""INSERT INTO
{self.index_name} (uuid, text, title, date, source_file, metadata)
VALUES ('{document.uuid.replace("'","''")}', '{document.text.replace("'","''")}',
'{document.title.replace("'","''")}', '{document.date.replace("'","''")}',
'{self.index_name.replace("'","''")}', '{json.dumps(document.metadata).replace("'","''")}');""")
def bulk_insert(self):
for document in self.documents:
self.insert(document)
self.connection.commit()
self.connection.close()
from lancedb.pydantic import LanceModel, Vector, List
class LanceDBSchema384(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(384)
class LanceDBSchema512(LanceModel):
uuid: str
text: str
title: str
tags: List[str]
vector: Vector(512)
class LanceDBIngest():
def __init__(self, documents, lance_location, index_name, overwrite, encoder, schema):
self.documents = documents
self.lance_location = lance_location
self.index_name = index_name
self.overwrite = overwrite
self.encoder = encoder
self.schema = schema
def initialize(self):
self.db = lancedb.connect(self.lance_location)
existing_tables = self.db.table_names()
self.documents = [self.prep_documents(document) for document in self.documents]
if self.overwrite:
self.table = self.db.create_table(self.index_name, data=self.documents, mode='overwrite', schema=self.schema.to_arrow_schema())
else:
if self.index_name in existing_tables:
self.table = self.db.open_table(self.index_name)
self.table.add(self.documents)
else:
self.table = self.db.create_table(self.index_name, data=self.documents, schema=self.schema.to_arrow_schema())
def prep_documents(self, document):
lance_document = dict()
lance_document['text'] = document.text
lance_document['vector'] = self.encoder.encode(document.text)
lance_document['uuid'] = document.uuid
lance_document['title'] = document.title
lance_document['tags'] = document.tags
return lance_document
def insert(self, document):
document['vector'] = self.encoder.encode(document.text)
self.table.add(document)
def bulk_insert(self, create_vectors=False):
if create_vectors:
self.table.create_index(vector_column_name='vector', metric='cosine')
self.table.create_fts_index(field_names=['title','text'], replace=True)
return self.table
class IndexDocuments():
def __init__(self,field_mapping, source_file, index_name, overwrite):
self.field_mapping = field_mapping
self.source_file = source_file
self.index_name = index_name
self.overwrite = overwrite
def open_json(self):
with open(self.source_file, 'r') as f:
self.data = json.load(f)
print(self.data)
def open_csv(self):
self.data = pd.read_csv(self.source_file)
def create_document(self, document):
document = LanceDBDocument(document,
text=self.field_mapping['text'],
title=self.field_mapping['title'],
tags=self.field_mapping['tags'],
date=self.field_mapping['date'],
fields=list(document.keys()),
file_path=self.source_file
)
return document
def create_documents(self):
self.documents = [self.create_document(document) for document in self.data]
def ingest(self, overwrite=False):
# lance_path = Path(f'../indexes/lance')
lance_folder.mkdir(parents=True, exist_ok=True)
lance_ingest = LanceDBIngest(documents=self.documents,
lance_location=lance_folder,
# field_mapping=self.field_mapping,
index_name=self.index_name,
overwrite=self.overwrite,
encoder=encoder,
schema=LanceDBSchema384)
lance_ingest.initialize()
if len(self.documents) <= 256:
_table = lance_ingest.bulk_insert(create_vectors=False)
else:
_table = lance_ingest.bulk_insert(create_vectors=True)
sql_path = sqlite_folder.joinpath('documents.sqlite')
sqlite_ingest = SqlLiteIngest(documents=self.documents,
source_file=self.source_file,
db_location=sql_path,
index_name=self.index_name,
overwrite=self.overwrite)
sqlite_ingest.initialize()
sqlite_ingest.bulk_insert()
| [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((216, 255), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['"""all-MiniLM-L6-v2"""'], {}), "('all-MiniLM-L6-v2')\n", (235, 255), False, 'from sentence_transformers import SentenceTransformer\n'), ((271, 295), 'pathlib.Path', 'Path', (['"""data/collections"""'], {}), "('data/collections')\n", (275, 295), False, 'from pathlib import Path\n'), ((310, 342), 'pathlib.Path', 'Path', (['"""data/config/indexes.yaml"""'], {}), "('data/config/indexes.yaml')\n", (314, 342), False, 'from pathlib import Path\n'), ((358, 373), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (362, 373), False, 'from pathlib import Path\n'), ((390, 405), 'pathlib.Path', 'Path', (['"""indexes"""'], {}), "('indexes')\n", (394, 405), False, 'from pathlib import Path\n'), ((471, 492), 'pathlib.Path', 'Path', (['"""data/indexes/"""'], {}), "('data/indexes/')\n", (475, 492), False, 'from pathlib import Path\n'), ((5306, 5317), 'lancedb.pydantic.Vector', 'Vector', (['(384)'], {}), '(384)\n', (5312, 5317), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((5430, 5441), 'lancedb.pydantic.Vector', 'Vector', (['(512)'], {}), '(512)\n', (5436, 5441), False, 'from lancedb.pydantic import LanceModel, Vector, List\n'), ((3896, 3929), 'sqlite3.connect', 'sqlite3.connect', (['self.db_location'], {}), '(self.db_location)\n', (3911, 3929), False, 'import sqlite3\n'), ((5814, 5850), 'lancedb.connect', 'lancedb.connect', (['self.lance_location'], {}), '(self.lance_location)\n', (5829, 5850), False, 'import lancedb\n'), ((7670, 7699), 'pandas.read_csv', 'pd.read_csv', (['self.source_file'], {}), '(self.source_file)\n', (7681, 7699), True, 'import pandas as pd\n'), ((7583, 7595), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7592, 7595), False, 'import json\n'), ((1035, 1042), 'uuid.uuid4', 'uuid4', ([], {}), '()\n', (1040, 1042), False, 'from uuid import uuid4\n'), ((4948, 4977), 'json.dumps', 'json.dumps', (['document.metadata'], {}), '(document.metadata)\n', (4958, 4977), False, 'import json\n')] |
import os
import urllib.request
import html2text
import predictionguard as pg
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from sentence_transformers import SentenceTransformer
import numpy as np
import lancedb
from lancedb.embeddings import with_embeddings
import pandas as pd
os.environ['PREDICTIONGUARD_TOKEN'] = "q1VuOjnffJ3NO2oFN8Q9m8vghYc84ld13jaqdF7E"
# Let's get the html off of a website.
fp = urllib.request.urlopen("file:////home/shaunak_joshi/gt/insuranceagent.html")
mybytes = fp.read()
html = mybytes.decode("utf8")
fp.close()
# And convert it to text.
h = html2text.HTML2Text()
h.ignore_links = True
text = h.handle(html)
# Clean things up just a bit.
text = text.split("Introduction")[1]
#print(text)
#text = text.split("Location, Location, Location")[0]
#print(text)
#print(type(text))
# Chunk the text into smaller pieces for injection into LLM prompts.
text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50)
docs = text_splitter.split_text(text)
# Let's checkout some of the chunks!
#for i in range(0, 10):
# print("Chunk", str(i+1))
# print("----------------------------")
# print(docs[i])
# print("")
# Let's take care of some of the formatting so it doesn't conflict with our
# typical prompt template structure
docs = [x.replace('#', '-') for x in docs]
# Now we need to embed these documents and put them into a "vector store" or
# "vector db" that we will use for semantic search and retrieval.
# Embeddings setup
name="all-MiniLM-L12-v2"
model = SentenceTransformer(name)
def embed_batch(batch):
return [model.encode(sentence) for sentence in batch]
def embed(sentence):
return model.encode(sentence)
# LanceDB setup
os.mkdir(".lancedb")
uri = ".lancedb"
db = lancedb.connect(uri)
# Create a dataframe with the chunk ids and chunks
metadata = []
for i in range(len(docs)):
metadata.append([i,docs[i]])
doc_df = pd.DataFrame(metadata, columns=["chunk", "text"])
# Embed the documents
data = with_embeddings(embed_batch, doc_df)
# Create the DB table and add the records.
db.create_table("linux", data=data)
table = db.open_table("linux")
table.add(data=data)
# Let's try to match a query to one of our documents.
#message = "What plays a crucial role in deciding insurance policies?"
#results = table.search(embed(message)).limit(5).to_pandas()
#print(results.head())
# Now let's augment our Q&A prompt with this external knowledge on-the-fly!!!
template = """### Instruction:
Read the below input context and respond with a short answer to the given question. Use only the information in the bel>
### Input:
Context: {context}
Question: {question}
### Response:
"""
qa_prompt = PromptTemplate(
input_variables=["context", "question"],
template=template,
)
def rag_answer(message):
# Search the for relevant context
results = table.search(embed(message)).limit(5).to_pandas()
results.sort_values(by=['_distance'], inplace=True, ascending=True)
doc_use = results['text'].values[0]
# Augment the prompt with the context
prompt = qa_prompt.format(context=doc_use, question=message)
# Get a response
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=prompt
)
return result['choices'][0]['text']
response = rag_answer("A house has been destroyed by a tornado and also has been set on fire. The water doesn't work but the gas lines are fine. The area the house is in is notorious for crime. It is built in an earthquake prone zone. There are cracks in the walls and it is quite old. Based on this information, generate three insights about the type of insurance policy the house will require and any other thing you find important. Keep the insights under 20 words each.")
print('')
print("RESPONSE:", response)
| [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((670, 691), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (689, 691), False, 'import html2text\n'), ((1001, 1056), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (1022, 1056), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((1627, 1652), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (1646, 1652), False, 'from sentence_transformers import SentenceTransformer\n'), ((1818, 1838), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (1826, 1838), False, 'import os\n'), ((1863, 1883), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (1878, 1883), False, 'import lancedb\n'), ((2025, 2074), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text']"}), "(metadata, columns=['chunk', 'text'])\n", (2037, 2074), True, 'import pandas as pd\n'), ((2108, 2144), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (2123, 2144), False, 'from lancedb.embeddings import with_embeddings\n'), ((2827, 2901), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'question']", 'template': 'template'}), "(input_variables=['context', 'question'], template=template)\n", (2841, 2901), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((3294, 3361), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (3314, 3361), True, 'import predictionguard as pg\n')] |
from lancedb.pydantic import LanceModel, Vector
from lancedb.embeddings import EmbeddingFunctionRegistry
registry = EmbeddingFunctionRegistry.get_instance()
func = registry.get("openai").create()
class Questions(LanceModel):
question: str = func.SourceField()
vector: Vector(func.ndims()) = func.VectorField()
| [
"lancedb.embeddings.EmbeddingFunctionRegistry.get_instance"
] | [((117, 157), 'lancedb.embeddings.EmbeddingFunctionRegistry.get_instance', 'EmbeddingFunctionRegistry.get_instance', ([], {}), '()\n', (155, 157), False, 'from lancedb.embeddings import EmbeddingFunctionRegistry\n')] |
import logging
import os
import time
from functools import wraps
from pathlib import Path
from random import random, seed
import lancedb
import pyarrow as pa
import pyarrow.parquet as pq
import typer
from lancedb.db import LanceTable
log_level = os.environ.get("LOG_LEVEL", "info")
logging.basicConfig(
level=getattr(logging, log_level.upper()),
format="%(asctime)s %(levelname)s | %(processName)s %(name)s | %(message)s",
)
logger = logging.getLogger(__name__)
app = typer.Typer()
V_SIZE = 256
DB_PATH = "benchmark"
DB_TABLE = "vectors"
DB_TABLE_SIZE = os.environ.get("DB_TABLE_SIZE", 100000)
Q_PATH = "query"
Q_SIZE = os.environ.get("Q_SIZE", 100)
Q_V = "v.parquet"
Q_KNN = "knn.parquet"
Q_ANN = "ann.parquet"
def timeit(func):
@wraps(func)
def f(*args, **kwargs):
start_time = time.perf_counter()
result = func(*args, **kwargs)
end_time = time.perf_counter()
total_time = end_time - start_time
logger.info(f"{func.__name__} {args} done in {total_time:.2f} secs")
return result
return f
def get_db():
if int(os.environ["AZURE"]) == 0:
f = Path(os.environ["DATA"])
f.mkdir(parents=True, exist_ok=True)
return lancedb.connect(f / DB_PATH)
else:
return lancedb.connect(
f"az://{os.environ['AZURE_STORAGE_CONTAINER']}/{DB_PATH}"
)
def open_table(table: str):
return LanceTable(get_db(), table)
def get_q(what="v"):
tables = {
"v": Q_V,
"knn": Q_KNN,
"ann": Q_ANN,
}
f = Path(os.environ["DATA"]) / Q_PATH
f.mkdir(parents=True, exist_ok=True)
return f / tables[what]
def gen_data(n: int, start=1):
seed()
for i in range(start, start + n):
yield ({"id": i, "vector": list(random() for _ in range(V_SIZE))})
@app.command()
def db_init(n: int = DB_TABLE_SIZE):
get_db().create_table(DB_TABLE, data=list(gen_data(n)))
@app.command()
def db_info():
table = open_table(DB_TABLE)
logger.debug(table.head(10))
@app.command()
def db_add(n: int, start: int):
table = open_table(DB_TABLE)
table.add(list(gen_data(n, start=start)))
@app.command()
def q_init(n: int = Q_SIZE):
pq.write_table(pa.Table.from_pylist(list(gen_data(n))), get_q())
@app.command()
def q_info():
logger.debug(pq.read_table(get_q()))
@timeit
def q_process(what: str):
table = open_table(DB_TABLE)
r = pa.Table.from_pylist(
[
{
"id": v["id"],
"neighbours": table.search(v["vector"])
.limit(10)
.select(["id"])
.to_arrow()["id"]
.to_pylist(),
}
for v in pq.read_table(get_q()).to_pylist()
]
)
pq.write_table(r, get_q(what))
@app.command()
@timeit
def create_index():
open_table(DB_TABLE).create_index(
num_sub_vectors=8
) # TODO :avoid hard coded params
@app.command()
def q_knn():
q_process("knn")
@app.command()
def q_ann():
create_index()
q_process("ann")
if __name__ == "__main__":
app()
| [
"lancedb.connect"
] | [((248, 283), 'os.environ.get', 'os.environ.get', (['"""LOG_LEVEL"""', '"""info"""'], {}), "('LOG_LEVEL', 'info')\n", (262, 283), False, 'import os\n'), ((446, 473), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (463, 473), False, 'import logging\n'), ((480, 493), 'typer.Typer', 'typer.Typer', ([], {}), '()\n', (491, 493), False, 'import typer\n'), ((569, 608), 'os.environ.get', 'os.environ.get', (['"""DB_TABLE_SIZE"""', '(100000)'], {}), "('DB_TABLE_SIZE', 100000)\n", (583, 608), False, 'import os\n'), ((636, 665), 'os.environ.get', 'os.environ.get', (['"""Q_SIZE"""', '(100)'], {}), "('Q_SIZE', 100)\n", (650, 665), False, 'import os\n'), ((753, 764), 'functools.wraps', 'wraps', (['func'], {}), '(func)\n', (758, 764), False, 'from functools import wraps\n'), ((1693, 1699), 'random.seed', 'seed', ([], {}), '()\n', (1697, 1699), False, 'from random import random, seed\n'), ((814, 833), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (831, 833), False, 'import time\n'), ((892, 911), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (909, 911), False, 'import time\n'), ((1134, 1158), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1138, 1158), False, 'from pathlib import Path\n'), ((1219, 1247), 'lancedb.connect', 'lancedb.connect', (['(f / DB_PATH)'], {}), '(f / DB_PATH)\n', (1234, 1247), False, 'import lancedb\n'), ((1273, 1347), 'lancedb.connect', 'lancedb.connect', (['f"""az://{os.environ[\'AZURE_STORAGE_CONTAINER\']}/{DB_PATH}"""'], {}), '(f"az://{os.environ[\'AZURE_STORAGE_CONTAINER\']}/{DB_PATH}")\n', (1288, 1347), False, 'import lancedb\n'), ((1553, 1577), 'pathlib.Path', 'Path', (["os.environ['DATA']"], {}), "(os.environ['DATA'])\n", (1557, 1577), False, 'from pathlib import Path\n'), ((1778, 1786), 'random.random', 'random', ([], {}), '()\n', (1784, 1786), False, 'from random import random, seed\n')] |
import argparse
import os
import shutil
from functools import lru_cache
from pathlib import Path
from typing import Any, Iterator
import srsly
from codetiming import Timer
from config import Settings
from dotenv import load_dotenv
from rich import progress
from schemas.wine import LanceModelWine, Wine
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.pydantic import pydantic_to_schema
from lancedb.table import Table
load_dotenv()
# Custom types
JsonBlob = dict[str, Any]
class FileNotFoundError(Exception):
pass
@lru_cache()
def get_settings():
# Use lru_cache to avoid loading .env file for every request
return Settings()
def chunk_iterable(item_list: list[JsonBlob], chunksize: int) -> Iterator[list[JsonBlob]]:
"""
Break a large iterable into an iterable of smaller iterables of size `chunksize`
"""
for i in range(0, len(item_list), chunksize):
yield item_list[i : i + chunksize]
def get_json_data(data_dir: Path, filename: str) -> list[JsonBlob]:
"""Get all line-delimited json files (.jsonl) from a directory with a given prefix"""
file_path = data_dir / filename
if not file_path.is_file():
# File may not have been uncompressed yet so try to do that first
data = srsly.read_gzip_jsonl(file_path)
# This time if it isn't there it really doesn't exist
if not file_path.is_file():
raise FileNotFoundError(f"No valid .jsonl file found in `{data_dir}`")
else:
data = srsly.read_gzip_jsonl(file_path)
return data
def validate(
data: list[JsonBlob],
exclude_none: bool = False,
) -> list[JsonBlob]:
validated_data = [Wine(**item).model_dump(exclude_none=exclude_none) for item in data]
return validated_data
def embed_func(batch: list[str], model) -> list[list[float]]:
return [model.encode(sentence.lower()) for sentence in batch]
def vectorize_text(data: list[JsonBlob]) -> list[LanceModelWine] | None:
# Load a sentence transformer model for semantic similarity from a specified checkpoint
model_id = get_settings().embedding_model_checkpoint
assert model_id, "Invalid embedding model checkpoint specified in .env file"
MODEL = SentenceTransformer(model_id)
ids = [item["id"] for item in data]
to_vectorize = [text.get("to_vectorize") for text in data]
vectors = embed_func(to_vectorize, MODEL)
try:
data_batch = [{**d, "vector": vector} for d, vector in zip(data, vectors)]
except Exception as e:
print(f"{e}: Failed to add ID range {min(ids)}-{max(ids)}")
return None
return data_batch
def embed_batches(tbl: str, validated_data: list[JsonBlob]) -> Table:
"""Ingest vector embeddings in batches for ANN index"""
chunked_data = chunk_iterable(validated_data, CHUNKSIZE)
print(f"Adding vectors to table for ANN index...")
# Add rich progress bar
with progress.Progress(
"[progress.description]{task.description}",
progress.BarColumn(),
"[progress.percentage]{task.percentage:>3.0f}%",
progress.TimeElapsedColumn(),
) as prog:
overall_progress_task = prog.add_task(
"Starting vectorization...", total=len(validated_data) // CHUNKSIZE
)
for chunk in chunked_data:
batch = vectorize_text(chunk)
prog.update(overall_progress_task, advance=1)
tbl.add(batch, mode="append")
def main(tbl: Table, data: list[JsonBlob]) -> None:
"""Generate sentence embeddings and create ANN and FTS indexes"""
with Timer(
name="Data validation in pydantic",
text="Validated data using Pydantic in {:.4f} sec",
):
validated_data = validate(data, exclude_none=False)
with Timer(
name="Insert vectors in batches",
text="Created sentence embeddings in {:.4f} sec",
):
embed_batches(tbl, validated_data)
print(f"Finished inserting {len(tbl)} vectors into LanceDB table")
with Timer(name="Create ANN index", text="Created ANN index in {:.4f} sec"):
print("Creating ANN index...")
# Creating IVF-PQ index for now, as we eagerly await DiskANN
# Choose num partitions as a power of 2 that's closest to len(dataset) // 5000
# In this case, we have 130k datapoints, so the nearest power of 2 is 130000//5000 ~ 32)
tbl.create_index(metric="cosine", num_partitions=4, num_sub_vectors=32)
with Timer(name="Create FTS index", text="Created FTS index in {:.4f} sec"):
# Create a full-text search index via Tantivy (which implements Lucene + BM25 in Rust)
tbl.create_fts_index(["to_vectorize"])
if __name__ == "__main__":
# fmt: off
parser = argparse.ArgumentParser("Bulk index database from the wine reviews JSONL data")
parser.add_argument("--limit", "-l", type=int, default=0, help="Limit the size of the dataset to load for testing purposes")
parser.add_argument("--chunksize", type=int, default=1000, help="Size of each chunk to break the dataset into before processing")
parser.add_argument("--filename", type=str, default="winemag-data-130k-v2.jsonl.gz", help="Name of the JSONL zip file to use")
args = vars(parser.parse_args())
# fmt: on
LIMIT = args["limit"]
DATA_DIR = Path(__file__).parents[1] / "data"
FILENAME = args["filename"]
CHUNKSIZE = args["chunksize"]
data = list(get_json_data(DATA_DIR, FILENAME))
assert data, "No data found in the specified file"
data = data[:LIMIT] if LIMIT > 0 else data
DB_NAME = "./winemag"
TABLE = "wines"
if os.path.exists(DB_NAME):
shutil.rmtree(DB_NAME)
db = lancedb.connect(DB_NAME)
try:
tbl = db.create_table(TABLE, schema=pydantic_to_schema(LanceModelWine), mode="create")
except OSError:
tbl = db.open_table(TABLE)
main(tbl, data)
print("Finished execution!")
| [
"lancedb.connect",
"lancedb.pydantic.pydantic_to_schema"
] | [((455, 468), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (466, 468), False, 'from dotenv import load_dotenv\n'), ((560, 571), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (569, 571), False, 'from functools import lru_cache\n'), ((668, 678), 'config.Settings', 'Settings', ([], {}), '()\n', (676, 678), False, 'from config import Settings\n'), ((2230, 2259), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (2249, 2259), False, 'from sentence_transformers import SentenceTransformer\n'), ((4737, 4816), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bulk index database from the wine reviews JSONL data"""'], {}), "('Bulk index database from the wine reviews JSONL data')\n", (4760, 4816), False, 'import argparse\n'), ((5613, 5636), 'os.path.exists', 'os.path.exists', (['DB_NAME'], {}), '(DB_NAME)\n', (5627, 5636), False, 'import os\n'), ((5679, 5703), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (5694, 5703), False, 'import lancedb\n'), ((1283, 1315), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1304, 1315), False, 'import srsly\n'), ((1522, 1554), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1543, 1554), False, 'import srsly\n'), ((3582, 3680), 'codetiming.Timer', 'Timer', ([], {'name': '"""Data validation in pydantic"""', 'text': '"""Validated data using Pydantic in {:.4f} sec"""'}), "(name='Data validation in pydantic', text=\n 'Validated data using Pydantic in {:.4f} sec')\n", (3587, 3680), False, 'from codetiming import Timer\n'), ((3770, 3864), 'codetiming.Timer', 'Timer', ([], {'name': '"""Insert vectors in batches"""', 'text': '"""Created sentence embeddings in {:.4f} sec"""'}), "(name='Insert vectors in batches', text=\n 'Created sentence embeddings in {:.4f} sec')\n", (3775, 3864), False, 'from codetiming import Timer\n'), ((4012, 4082), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create ANN index"""', 'text': '"""Created ANN index in {:.4f} sec"""'}), "(name='Create ANN index', text='Created ANN index in {:.4f} sec')\n", (4017, 4082), False, 'from codetiming import Timer\n'), ((4466, 4536), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create FTS index"""', 'text': '"""Created FTS index in {:.4f} sec"""'}), "(name='Create FTS index', text='Created FTS index in {:.4f} sec')\n", (4471, 4536), False, 'from codetiming import Timer\n'), ((5646, 5668), 'shutil.rmtree', 'shutil.rmtree', (['DB_NAME'], {}), '(DB_NAME)\n', (5659, 5668), False, 'import shutil\n'), ((3003, 3023), 'rich.progress.BarColumn', 'progress.BarColumn', ([], {}), '()\n', (3021, 3023), False, 'from rich import progress\n'), ((3090, 3118), 'rich.progress.TimeElapsedColumn', 'progress.TimeElapsedColumn', ([], {}), '()\n', (3116, 3118), False, 'from rich import progress\n'), ((1688, 1700), 'schemas.wine.Wine', 'Wine', ([], {}), '(**item)\n', (1692, 1700), False, 'from schemas.wine import LanceModelWine, Wine\n'), ((5304, 5318), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5308, 5318), False, 'from pathlib import Path\n'), ((5757, 5791), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['LanceModelWine'], {}), '(LanceModelWine)\n', (5775, 5791), False, 'from lancedb.pydantic import pydantic_to_schema\n')] |
from datasets import load_dataset
data = load_dataset('jamescalam/youtube-transcriptions', split='train')
from lancedb.context import contextualize
df = (contextualize(data.to_pandas())
.groupby("title").text_col("text")
.window(20).stride(4)
.to_df())
df.head(1)
import openai
import os
# Configuring the environment variable OPENAI_API_KEY
if "OPENAI_API_KEY" not in os.environ:
# OR set the key here as a variable
openai.api_key = ""
assert len(openai.Model.list()["data"]) > 0
def embed_func(c):
rs = openai.Embedding.create(input=c, engine="text-embedding-ada-002")
return [record["embedding"] for record in rs["data"]]
import lancedb
from lancedb.embeddings import with_embeddings
# data = with_embeddings(embed_func, df, show_progress=True)
# data.to_pandas().head(1)
db = lancedb.connect("/tmp/lancedb")
# tbl = db.create_table("youtube-chatbot", data)
# get table
tbl = db.open_table("youtube-chatbot")
#print the length of the table
print(len(tbl))
tbl.to_pandas().head(1)
def create_prompt(query, context):
limit = 3750
prompt_start = (
"Answer the question based on the context below.\n\n"+
"Context:\n"
)
prompt_end = (
f"\n\nQuestion: {query}\nAnswer:"
)
# append contexts until hitting limit
for i in range(1, len(context)):
if len("\n\n---\n\n".join(context.text[:i])) >= limit:
prompt = (
prompt_start +
"\n\n---\n\n".join(context.text[:i-1]) +
prompt_end
)
break
elif i == len(context)-1:
prompt = (
prompt_start +
"\n\n---\n\n".join(context.text) +
prompt_end
)
print ( "prompt:", prompt )
return prompt
def complete(prompt):
# query text-davinci-003
res = openai.Completion.create(
engine='text-davinci-003',
prompt=prompt,
temperature=0,
max_tokens=400,
top_p=1,
frequency_penalty=0,
presence_penalty=0,
stop=None
)
return res['choices'][0]['text'].strip()
query = ("How do I use the Pandas library to create embeddings?")
# Embed the question
emb = embed_func(query)[0]
# Use LanceDB to get top 3 most relevant context
context = tbl.search(emb).limit(3).to_df()
# Get the answer from completion API
prompt = create_prompt(query, context)
print( "context:", context )
print ( complete( prompt )) | [
"lancedb.connect"
] | [((42, 106), 'datasets.load_dataset', 'load_dataset', (['"""jamescalam/youtube-transcriptions"""'], {'split': '"""train"""'}), "('jamescalam/youtube-transcriptions', split='train')\n", (54, 106), False, 'from datasets import load_dataset\n'), ((831, 862), 'lancedb.connect', 'lancedb.connect', (['"""/tmp/lancedb"""'], {}), "('/tmp/lancedb')\n", (846, 862), False, 'import lancedb\n'), ((549, 614), 'openai.Embedding.create', 'openai.Embedding.create', ([], {'input': 'c', 'engine': '"""text-embedding-ada-002"""'}), "(input=c, engine='text-embedding-ada-002')\n", (572, 614), False, 'import openai\n'), ((1876, 2042), 'openai.Completion.create', 'openai.Completion.create', ([], {'engine': '"""text-davinci-003"""', 'prompt': 'prompt', 'temperature': '(0)', 'max_tokens': '(400)', 'top_p': '(1)', 'frequency_penalty': '(0)', 'presence_penalty': '(0)', 'stop': 'None'}), "(engine='text-davinci-003', prompt=prompt,\n temperature=0, max_tokens=400, top_p=1, frequency_penalty=0,\n presence_penalty=0, stop=None)\n", (1900, 2042), False, 'import openai\n'), ((483, 502), 'openai.Model.list', 'openai.Model.list', ([], {}), '()\n', (500, 502), False, 'import openai\n')] |
import hashlib
import io
import logging
from typing import List
import numpy as np
from lancedb.pydantic import LanceModel, vector
from PIL import Image
from pydantic import BaseModel, Field, computed_field
from homematch.config import IMAGES_DIR
logger = logging.getLogger(__name__)
class PropertyListingBase(BaseModel):
page_source: str
resource_title: str
resource_country: str
operation_type: str
active: bool
url: str
title: str
normalized_title: str
zone: str
current_price: float | None = None
ad_text: str
basic_info: List[str]
last_update: str
main_image_url: str
scraped_ts: str
@computed_field # type: ignore
@property
def identificator(self) -> str:
return hashlib.sha256(self.url.encode()).hexdigest()[:16]
@computed_field # type: ignore
@property
def text_description(self) -> str:
basic_info_text = ",".join(self.basic_info)
basic_info_text = basic_info_text.replace("habs", "bedrooms")
basic_info_text = basic_info_text.replace("baรฑos", "bathrooms")
basic_info_text = basic_info_text.replace("baรฑo", "bathroom")
basic_info_text = basic_info_text.replace("mยฒ", "square meters")
basic_info_text = basic_info_text.replace("planta", "floor")
basic_info_text = basic_info_text.replace("Bajo", "0 floor")
description = ""
description += f"Zone: {self.zone}."
description += f"\nPrice: {self.current_price} euros."
description += f"\nFeatures: {basic_info_text}"
return description
class PropertyListing(PropertyListingBase):
images_dir: str = Field(str(IMAGES_DIR), description="Directory to store images")
@property
def image_path(self) -> str:
return str(self.images_dir) + f"/{self.identificator}.jpg"
def load_image(self) -> Image.Image:
try:
return Image.open(self.image_path)
except FileNotFoundError:
logger.error(f"Image file not found: {self.image_path}")
raise
@classmethod
def pil_to_bytes(cls, img: Image.Image) -> bytes:
buf = io.BytesIO()
img.save(buf, format="PNG")
return buf.getvalue()
@classmethod
def pil_to_numpy(cls, img: Image.Image) -> np.ndarray:
return np.array(img)
class PropertyData(PropertyListing):
class Config:
arbitrary_types_allowed = True
image: Image.Image
class ImageData(PropertyListing, LanceModel):
vector: vector(768) # type: ignore
image_bytes: bytes
| [
"lancedb.pydantic.vector"
] | [((259, 286), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (276, 286), False, 'import logging\n'), ((2511, 2522), 'lancedb.pydantic.vector', 'vector', (['(768)'], {}), '(768)\n', (2517, 2522), False, 'from lancedb.pydantic import LanceModel, vector\n'), ((2146, 2158), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (2156, 2158), False, 'import io\n'), ((2317, 2330), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2325, 2330), True, 'import numpy as np\n'), ((1911, 1938), 'PIL.Image.open', 'Image.open', (['self.image_path'], {}), '(self.image_path)\n', (1921, 1938), False, 'from PIL import Image\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logging
import os
from typing import Any, Callable, Dict, List, Optional, Union
from urllib.parse import urljoin
import attrs
import pyarrow as pa
import requests
from pydantic import BaseModel
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from lancedb.common import Credential
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory
from lancedb.remote.errors import LanceDBClientError
ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream"
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
def _read_ipc(resp: requests.Response) -> pa.Table:
resp_body = resp.content
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
return reader.read_all()
@attrs.define(slots=False)
class RestfulLanceDBClient:
db_name: str
region: str
api_key: Credential
host_override: Optional[str] = attrs.field(default=None)
closed: bool = attrs.field(default=False, init=False)
connection_timeout: float = attrs.field(default=120.0, kw_only=True)
read_timeout: float = attrs.field(default=300.0, kw_only=True)
@functools.cached_property
def session(self) -> requests.Session:
sess = requests.Session()
retry_adapter_instance = retry_adapter(retry_adapter_options())
sess.mount(urljoin(self.url, "/v1/table/"), retry_adapter_instance)
adapter_class = LanceDBClientHTTPAdapterFactory()
sess.mount("https://", adapter_class())
return sess
@property
def url(self) -> str:
return (
self.host_override
or f"https://{self.db_name}.{self.region}.api.lancedb.com"
)
def close(self):
self.session.close()
self.closed = True
@functools.cached_property
def headers(self) -> Dict[str, str]:
headers = {
"x-api-key": self.api_key,
}
if self.region == "local": # Local test mode
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
if self.host_override:
headers["x-lancedb-database"] = self.db_name
return headers
@staticmethod
def _check_status(resp: requests.Response):
if resp.status_code == 404:
raise LanceDBClientError(f"Not found: {resp.text}")
elif 400 <= resp.status_code < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status_code}, error: {resp.text}"
)
elif 500 <= resp.status_code < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status_code}, error: {resp.text}"
)
elif resp.status_code != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status_code}, error: {resp.text}"
)
@_check_not_closed
def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None):
"""Send a GET request and returns the deserialized response payload."""
if isinstance(params, BaseModel):
params: Dict[str, Any] = params.dict(exclude_none=True)
with self.session.get(
urljoin(self.url, uri),
params=params,
headers=self.headers,
timeout=(self.connection_timeout, self.read_timeout),
) as resp:
self._check_status(resp)
return resp.json()
@_check_not_closed
def post(
self,
uri: str,
data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None,
params: Optional[Dict[str, Any]] = None,
content_type: Optional[str] = None,
deserialize: Callable = lambda resp: resp.json(),
request_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Send a POST request and returns the deserialized response payload.
Parameters
----------
uri : str
The uri to send the POST request to.
data: Union[Dict[str, Any], BaseModel]
request_id: Optional[str]
Optional client side request id to be sent in the request headers.
"""
if isinstance(data, BaseModel):
data: Dict[str, Any] = data.dict(exclude_none=True)
if isinstance(data, bytes):
req_kwargs = {"data": data}
else:
req_kwargs = {"json": data}
headers = self.headers.copy()
if content_type is not None:
headers["content-type"] = content_type
if request_id is not None:
headers["x-request-id"] = request_id
with self.session.post(
urljoin(self.url, uri),
headers=headers,
params=params,
timeout=(self.connection_timeout, self.read_timeout),
**req_kwargs,
) as resp:
self._check_status(resp)
return deserialize(resp)
@_check_not_closed
def list_tables(self, limit: int, page_token: Optional[str] = None) -> List[str]:
"""List all tables in the database."""
if page_token is None:
page_token = ""
json = self.get("/v1/table/", {"limit": limit, "page_token": page_token})
return json["tables"]
@_check_not_closed
def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query a table."""
tbl = self.post(f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc)
return VectorQueryResult(tbl)
def mount_retry_adapter_for_table(self, table_name: str) -> None:
"""
Adds an http adapter to session that will retry retryable requests to the table.
"""
retry_options = retry_adapter_options(methods=["GET", "POST"])
retry_adapter_instance = retry_adapter(retry_options)
session = self.session
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/query/"), retry_adapter_instance
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/describe/"),
retry_adapter_instance,
)
session.mount(
urljoin(self.url, f"/v1/table/{table_name}/index/list/"),
retry_adapter_instance,
)
def retry_adapter_options(methods=["GET"]) -> Dict[str, Any]:
return {
"retries": int(os.environ.get("LANCE_CLIENT_MAX_RETRIES", "3")),
"connect_retries": int(os.environ.get("LANCE_CLIENT_CONNECT_RETRIES", "3")),
"read_retries": int(os.environ.get("LANCE_CLIENT_READ_RETRIES", "3")),
"backoff_factor": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_FACTOR", "0.25")
),
"backoff_jitter": float(
os.environ.get("LANCE_CLIENT_RETRY_BACKOFF_JITTER", "0.25")
),
"statuses": [
int(i.strip())
for i in os.environ.get(
"LANCE_CLIENT_RETRY_STATUSES", "429, 500, 502, 503"
).split(",")
],
"methods": methods,
}
def retry_adapter(options: Dict[str, Any]) -> HTTPAdapter:
total_retries = options["retries"]
connect_retries = options["connect_retries"]
read_retries = options["read_retries"]
backoff_factor = options["backoff_factor"]
backoff_jitter = options["backoff_jitter"]
statuses = options["statuses"]
methods = frozenset(options["methods"])
logging.debug(
f"Setting up retry adapter with {total_retries} retries," # noqa G003
+ f"connect retries {connect_retries}, read retries {read_retries},"
+ f"backoff factor {backoff_factor}, statuses {statuses}, "
+ f"methods {methods}"
)
return HTTPAdapter(
max_retries=Retry(
total=total_retries,
connect=connect_retries,
read=read_retries,
backoff_factor=backoff_factor,
backoff_jitter=backoff_jitter,
status_forcelist=statuses,
allowed_methods=methods,
)
)
| [
"lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory",
"lancedb.remote.VectorQueryResult",
"lancedb.remote.errors.LanceDBClientError"
] | [((1587, 1612), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1599, 1612), False, 'import attrs\n'), ((1207, 1225), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1222, 1225), False, 'import functools\n'), ((1733, 1758), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1744, 1758), False, 'import attrs\n'), ((1779, 1817), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1790, 1817), False, 'import attrs\n'), ((1851, 1891), 'attrs.field', 'attrs.field', ([], {'default': '(120.0)', 'kw_only': '(True)'}), '(default=120.0, kw_only=True)\n', (1862, 1891), False, 'import attrs\n'), ((1918, 1958), 'attrs.field', 'attrs.field', ([], {'default': '(300.0)', 'kw_only': '(True)'}), '(default=300.0, kw_only=True)\n', (1929, 1958), False, 'import attrs\n'), ((8166, 8402), 'logging.debug', 'logging.debug', (["(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')"], {}), "(f'Setting up retry adapter with {total_retries} retries,' +\n f'connect retries {connect_retries}, read retries {read_retries},' +\n f'backoff factor {backoff_factor}, statuses {statuses}, ' +\n f'methods {methods}')\n", (8179, 8402), False, 'import logging\n'), ((2049, 2067), 'requests.Session', 'requests.Session', ([], {}), '()\n', (2065, 2067), False, 'import requests\n'), ((2242, 2275), 'lancedb.remote.connection_timeout.LanceDBClientHTTPAdapterFactory', 'LanceDBClientHTTPAdapterFactory', ([], {}), '()\n', (2273, 2275), False, 'from lancedb.remote.connection_timeout import LanceDBClientHTTPAdapterFactory\n'), ((6258, 6280), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (6275, 6280), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1512, 1538), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1527, 1538), True, 'import pyarrow as pa\n'), ((2160, 2191), 'urllib.parse.urljoin', 'urljoin', (['self.url', '"""/v1/table/"""'], {}), "(self.url, '/v1/table/')\n", (2167, 2191), False, 'from urllib.parse import urljoin\n'), ((3098, 3143), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Not found: {resp.text}"""'], {}), "(f'Not found: {resp.text}')\n", (3116, 3143), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((6665, 6716), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/query/"""'], {}), "(self.url, f'/v1/table/{table_name}/query/')\n", (6672, 6716), False, 'from urllib.parse import urljoin\n'), ((6786, 6840), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/describe/"""'], {}), "(self.url, f'/v1/table/{table_name}/describe/')\n", (6793, 6840), False, 'from urllib.parse import urljoin\n'), ((6923, 6979), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'f"""/v1/table/{table_name}/index/list/"""'], {}), "(self.url, f'/v1/table/{table_name}/index/list/')\n", (6930, 6979), False, 'from urllib.parse import urljoin\n'), ((7127, 7174), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_MAX_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_MAX_RETRIES', '3')\n", (7141, 7174), False, 'import os\n'), ((7208, 7259), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_CONNECT_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_CONNECT_RETRIES', '3')\n", (7222, 7259), False, 'import os\n'), ((7290, 7338), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_READ_RETRIES"""', '"""3"""'], {}), "('LANCE_CLIENT_READ_RETRIES', '3')\n", (7304, 7338), False, 'import os\n'), ((7386, 7445), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_FACTOR"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_FACTOR', '0.25')\n", (7400, 7445), False, 'import os\n'), ((7502, 7561), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_BACKOFF_JITTER"""', '"""0.25"""'], {}), "('LANCE_CLIENT_RETRY_BACKOFF_JITTER', '0.25')\n", (7516, 7561), False, 'import os\n'), ((8487, 8679), 'urllib3.Retry', 'Retry', ([], {'total': 'total_retries', 'connect': 'connect_retries', 'read': 'read_retries', 'backoff_factor': 'backoff_factor', 'backoff_jitter': 'backoff_jitter', 'status_forcelist': 'statuses', 'allowed_methods': 'methods'}), '(total=total_retries, connect=connect_retries, read=read_retries,\n backoff_factor=backoff_factor, backoff_jitter=backoff_jitter,\n status_forcelist=statuses, allowed_methods=methods)\n', (8492, 8679), False, 'from urllib3 import Retry\n'), ((3206, 3280), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Bad Request: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Bad Request: {resp.status_code}, error: {resp.text}')\n", (3224, 3280), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3986, 4008), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (3993, 4008), False, 'from urllib.parse import urljoin\n'), ((5430, 5452), 'urllib.parse.urljoin', 'urljoin', (['self.url', 'uri'], {}), '(self.url, uri)\n', (5437, 5452), False, 'from urllib.parse import urljoin\n'), ((3373, 3462), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Internal Server Error: {resp.status_code}, error: {resp.text}"""'], {}), "(\n f'Internal Server Error: {resp.status_code}, error: {resp.text}')\n", (3391, 3462), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((3544, 3620), 'lancedb.remote.errors.LanceDBClientError', 'LanceDBClientError', (['f"""Unknown Error: {resp.status_code}, error: {resp.text}"""'], {}), "(f'Unknown Error: {resp.status_code}, error: {resp.text}')\n", (3562, 3620), False, 'from lancedb.remote.errors import LanceDBClientError\n'), ((7643, 7710), 'os.environ.get', 'os.environ.get', (['"""LANCE_CLIENT_RETRY_STATUSES"""', '"""429, 500, 502, 503"""'], {}), "('LANCE_CLIENT_RETRY_STATUSES', '429, 500, 502, 503')\n", (7657, 7710), False, 'import os\n')] |
from langchain.text_splitter import (
RecursiveCharacterTextSplitter,
Language,
LatexTextSplitter,
)
from langchain.document_loaders import TextLoader
from langchain.embeddings import OpenAIEmbeddings
import argparse, os, arxiv
os.environ["OPENAI_API_KEY"] = "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"
embeddings = OpenAIEmbeddings()
from langchain.vectorstores import LanceDB
from lancedb.pydantic import Vector, LanceModel
from Typing import List
from datetime import datetime
import lancedb
global embedding_out_length
embedding_out_length = 1536
class Content(LanceModel):
id: str
arxiv_id: str
vector: Vector(embedding_out_length)
text: str
uploaded_date: datetime
title: str
authors: List[str]
abstract: str
categories: List[str]
url: str
def PyPDF_to_Vector(table: LanceDB, embeddings: OpenAIEmbeddings, src_dir: str, n_threads: int = 1):
pass
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description="Create Vector DB and perform ingestion from source files")
argparser.add_argument('-s', '--src_dir', type=str, required=True, help = "Source directory where arxiv sources are stored")
argparser.add_argument('-db', '--db_name', type=str, required=True, help = "Name of the LanceDB database to be created")
argparser.add_argument('-t', '--table_name', type=str, required=False, help = "Name of the LanceDB table to be created", default = "EIC_archive")
argparser.add_argument('-openai_key', '--openai_api_key', type=str, required=True, help = "OpenAI API key")
argparser.add_argument('-c', '--chunking', type = str, required=False, help = "Type of Chunking PDF or LATEX", default = "PDF")
argparser.add_argument('-n', '--nthreads', type=int, default=-1)
args = argparser.parse_args()
SRC_DIR = args.src_dir
DB_NAME = args.db_name
TABLE_NAME = args.table_name
OPENAI_API_KEY = args.openai_api_key
NTHREADS = args.nthreads
db = lancedb.connect(DB_NAME)
table = db.create_table(TABLE_NAME, schema=Content, mode="overwrite")
db = lancedb.connect()
meta_data = {"arxiv_id": "1", "title": "EIC LLM",
"category" : "N/A",
"authors": "N/A",
"sub_categories": "N/A",
"abstract": "N/A",
"published": "N/A",
"updated": "N/A",
"doi": "N/A"
},
table = db.create_table(
"EIC_archive",
data=[
{
"vector": embeddings.embed_query("EIC LLM"),
"text": "EIC LLM",
"id": "1",
"arxiv_id" : "N/A",
"title" : "N/A",
"category" : "N/A",
"published" : "N/A"
}
],
mode="overwrite",
)
vectorstore = LanceDB(connection = table, embedding = embeddings)
sourcedir = "PDFs"
count = 0
for source in os.listdir(sourcedir):
if not os.path.isdir(os.path.join("PDFs", source)):
continue
print (f"Adding the source document {source} to the Vector DB")
import arxiv
client = arxiv.Client()
search = arxiv.Search(id_list=[source])
paper = next(arxiv.Client().results(search))
meta_data = {"arxiv_id": paper.entry_id,
"title": paper.title,
"category" : categories[paper.primary_category],
"published": paper.published
}
for file in os.listdir(os.path.join(sourcedir, source)):
if file.endswith(".tex"):
latex_file = os.path.join(sourcedir, source, file)
print (source, latex_file)
documents = TextLoader(latex_file, encoding = 'latin-1').load()
latex_splitter = LatexTextSplitter(
chunk_size=120, chunk_overlap=10
)
documents = latex_splitter.split_documents(documents)
for doc in documents:
for k, v in meta_data.items():
doc.metadata[k] = v
vectorstore.add_documents(documents = documents)
count+=len(documents) | [
"lancedb.pydantic.Vector",
"lancedb.connect"
] | [((342, 360), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (358, 360), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2116, 2133), 'lancedb.connect', 'lancedb.connect', ([], {}), '()\n', (2131, 2133), False, 'import lancedb\n'), ((2820, 2867), 'langchain.vectorstores.LanceDB', 'LanceDB', ([], {'connection': 'table', 'embedding': 'embeddings'}), '(connection=table, embedding=embeddings)\n', (2827, 2867), False, 'from langchain.vectorstores import LanceDB\n'), ((2916, 2937), 'os.listdir', 'os.listdir', (['sourcedir'], {}), '(sourcedir)\n', (2926, 2937), False, 'import argparse, os, arxiv\n'), ((648, 676), 'lancedb.pydantic.Vector', 'Vector', (['embedding_out_length'], {}), '(embedding_out_length)\n', (654, 676), False, 'from lancedb.pydantic import Vector, LanceModel\n'), ((978, 1078), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Create Vector DB and perform ingestion from source files"""'}), "(description=\n 'Create Vector DB and perform ingestion from source files')\n", (1001, 1078), False, 'import argparse, os, arxiv\n'), ((2006, 2030), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (2021, 2030), False, 'import lancedb\n'), ((3110, 3124), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3122, 3124), False, 'import arxiv\n'), ((3138, 3168), 'arxiv.Search', 'arxiv.Search', ([], {'id_list': '[source]'}), '(id_list=[source])\n', (3150, 3168), False, 'import arxiv\n'), ((3458, 3489), 'os.path.join', 'os.path.join', (['sourcedir', 'source'], {}), '(sourcedir, source)\n', (3470, 3489), False, 'import argparse, os, arxiv\n'), ((2964, 2992), 'os.path.join', 'os.path.join', (['"""PDFs"""', 'source'], {}), "('PDFs', source)\n", (2976, 2992), False, 'import argparse, os, arxiv\n'), ((3551, 3588), 'os.path.join', 'os.path.join', (['sourcedir', 'source', 'file'], {}), '(sourcedir, source, file)\n', (3563, 3588), False, 'import argparse, os, arxiv\n'), ((3733, 3784), 'langchain.text_splitter.LatexTextSplitter', 'LatexTextSplitter', ([], {'chunk_size': '(120)', 'chunk_overlap': '(10)'}), '(chunk_size=120, chunk_overlap=10)\n', (3750, 3784), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter, Language, LatexTextSplitter\n'), ((3186, 3200), 'arxiv.Client', 'arxiv.Client', ([], {}), '()\n', (3198, 3200), False, 'import arxiv\n'), ((3652, 3694), 'langchain.document_loaders.TextLoader', 'TextLoader', (['latex_file'], {'encoding': '"""latin-1"""'}), "(latex_file, encoding='latin-1')\n", (3662, 3694), False, 'from langchain.document_loaders import TextLoader\n')] |
import time
import os
import pandas as pd
import streamlit as st
import lancedb
from lancedb.embeddings import with_embeddings
from langchain import PromptTemplate
import predictionguard as pg
import streamlit as st
import duckdb
import re
import numpy as np
from sentence_transformers import SentenceTransformer
#---------------------#
# Lance DB Setup #
#---------------------#
uri = "schema.lancedb"
db = lancedb.connect(uri)
def embed(query, embModel):
return embModel.encode(query)
def batch_embed_func(batch):
return [st.session_state['en_emb'].encode(sentence) for sentence in batch]
#---------------------#
# Streamlit config #
#---------------------#
if "login" not in st.session_state:
st.session_state["login"] = False
# Hide the hamburger menu
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#--------------------------#
# Define datasets #
#--------------------------#
#JOBS
df1=pd.read_csv('datasets/jobs.csv')
#SOCIAL
df2=pd.read_csv('datasets/social.csv')
#movies
df3=pd.read_csv('datasets/movies.csv')
conn = duckdb.connect(database=':memory:')
conn.register('jobs', df1)
conn.register('social', df2)
conn.register('movies', df3)
#--------------------------#
# Prompt Templates #
#--------------------------#
### PROMPT TEMPLATES
### PROMPT TEMPLATES
qa_template = """### System:
You are a data chatbot who answers the user question. To answer these questions we need to run SQL queries on our data and its output is given below in context. You just have to frame your answer using that context. Give a short and crisp response.Don't add any notes or any extra information after your response.
### User:
Question: {question}
context: {context}
### Assistant:
"""
qa_prompt = PromptTemplate(template=qa_template,input_variables=["question", "context"])
sql_template = """<|begin_of_sentence|>You are a SQL expert and you only generate SQL queries which are executable. You provide no extra explanations.
You respond with a SQL query that answers the user question in the below instruction by querying a database with the schema provided in the below instruction.
Always start your query with SELECT statement and end with a semicolon.
### Instruction:
User question: \"{question}\"
Database schema:
{schema}
### Response:
"""
sql_prompt=PromptTemplate(template=sql_template, input_variables=["question","schema"])
#--------------------------#
# Generate SQL Query #
#--------------------------#
# Embeddings setup
name="all-MiniLM-L12-v2"
def load_model():
return SentenceTransformer(name)
model = load_model()
def generate_sql_query(question, schema):
prompt_filled = sql_prompt.format(question=question,schema=schema)
try:
result = pg.Completion.create(
model="deepseek-coder-6.7b-instruct",
prompt=prompt_filled,
max_tokens=300,
temperature=0.1
)
sql_query = result["choices"][0]["text"]
return sql_query
except Exception as e:
return None
def extract_and_refine_sql_query(sql_query):
# Extract SQL query using a regular expression
match = re.search(r"(SELECT.*?);", sql_query, re.DOTALL)
if match:
refined_query = match.group(1)
# Check for and remove any text after a colon
colon_index = refined_query.find(':')
if colon_index != -1:
refined_query = refined_query[:colon_index]
# Ensure the query ends with a semicolon
if not refined_query.endswith(';'):
refined_query += ';'
return refined_query
else:
return ""
def get_answer_from_sql(question):
# Search Relavent Tables
table = db.open_table("schema")
results = table.search(embed(question, model)).limit(2).to_df()
print(results)
results = results[results['_distance'] < 1.5]
print("Results:", results)
if len(results) == 0:
completion = "We did not find any relevant tables."
return completion
else:
results.sort_values(by=['_distance'], inplace=True, ascending=True)
doc_use = ""
for _, row in results.iterrows():
if len(row['text'].split(' ')) < 10:
continue
else:
schema=row['schema']
table_name=row['text']
st.sidebar.info(table_name)
st.sidebar.code(schema)
break
sql_query = generate_sql_query(question, schema)
sql_query = extract_and_refine_sql_query(sql_query)
try:
# print("Executing SQL Query:", sql_query)
result = conn.execute(sql_query).fetchall()
# print("Result:", result)
return result, sql_query
except Exception as e:
print(f"Error executing SQL query: {e}")
return "There was an error executing the SQL query."
#--------------------------#
# Get Answer #
#--------------------------#
def get_answer(question,context):
try:
prompt_filled = qa_prompt.format(question=question, context=context)
# Respond to the user
output = pg.Completion.create(
model="Neural-Chat-7B",
prompt=prompt_filled,
max_tokens=200,
temperature=0.1
)
completion = output['choices'][0]['text']
return completion
except Exception as e:
completion = "There was an error executing the SQL query."
return completion
#--------------------------#
# Streamlit app #
#--------------------------#
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Ask a question"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# contruct prompt thread
examples = []
turn = "user"
example = {}
for m in st.session_state.messages:
latest_message = m["content"]
example[turn] = m["content"]
if turn == "user":
turn = "assistant"
else:
turn = "user"
examples.append(example)
example = {}
if len(example) > 2:
examples = examples[-2:]
else:
thread = ""
# # Check for PII
# with st.spinner("Checking for PII..."):
# pii_result = pg.PII.check(
# prompt=latest_message,
# replace=False,
# replace_method="fake"
# )
# # Check for injection
# with st.spinner("Checking for security vulnerabilities..."):
# injection_result = pg.Injection.check(
# prompt=latest_message,
# detect=True
# )
# # Handle insecure states
# elif "[" in pii_result['checks'][0]['pii_types_and_positions']:
# st.warning('Warning! PII detected. Please avoid using personal information.')
# full_response = "Warning! PII detected. Please avoid using personal information."
# elif injection_result['checks'][0]['probability'] > 0.5:
# st.warning('Warning! Injection detected. Your input might result in a security breach.')
# full_response = "Warning! Injection detected. Your input might result in a security breach."
# generate response
with st.spinner("Generating an answer..."):
context=get_answer_from_sql(latest_message)
print("context",context)
completion = get_answer(latest_message,context)
# display response
for token in completion.split(" "):
full_response += " " + token
message_placeholder.markdown(full_response + "โ")
time.sleep(0.075)
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"lancedb.connect"
] | [((413, 433), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (428, 433), False, 'import lancedb\n'), ((890, 947), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (901, 947), True, 'import streamlit as st\n'), ((1043, 1075), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/jobs.csv"""'], {}), "('datasets/jobs.csv')\n", (1054, 1075), True, 'import pandas as pd\n'), ((1089, 1123), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/social.csv"""'], {}), "('datasets/social.csv')\n", (1100, 1123), True, 'import pandas as pd\n'), ((1137, 1171), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/movies.csv"""'], {}), "('datasets/movies.csv')\n", (1148, 1171), True, 'import pandas as pd\n'), ((1180, 1215), 'duckdb.connect', 'duckdb.connect', ([], {'database': '""":memory:"""'}), "(database=':memory:')\n", (1194, 1215), False, 'import duckdb\n'), ((1861, 1938), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'qa_template', 'input_variables': "['question', 'context']"}), "(template=qa_template, input_variables=['question', 'context'])\n", (1875, 1938), False, 'from langchain import PromptTemplate\n'), ((2426, 2503), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'sql_template', 'input_variables': "['question', 'schema']"}), "(template=sql_template, input_variables=['question', 'schema'])\n", (2440, 2503), False, 'from langchain import PromptTemplate\n'), ((2672, 2697), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (2691, 2697), False, 'from sentence_transformers import SentenceTransformer\n'), ((3239, 3286), 're.search', 're.search', (['"""(SELECT.*?);"""', 'sql_query', 're.DOTALL'], {}), "('(SELECT.*?);', sql_query, re.DOTALL)\n", (3248, 3286), False, 'import re\n'), ((5846, 5877), 'streamlit.chat_input', 'st.chat_input', (['"""Ask a question"""'], {}), "('Ask a question')\n", (5859, 5877), True, 'import streamlit as st\n'), ((5883, 5952), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (5915, 5952), True, 'import streamlit as st\n'), ((8226, 8311), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (8258, 8311), True, 'import streamlit as st\n'), ((2856, 2974), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""deepseek-coder-6.7b-instruct"""', 'prompt': 'prompt_filled', 'max_tokens': '(300)', 'temperature': '(0.1)'}), "(model='deepseek-coder-6.7b-instruct', prompt=\n prompt_filled, max_tokens=300, temperature=0.1)\n", (2876, 2974), True, 'import predictionguard as pg\n'), ((5195, 5298), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Neural-Chat-7B"""', 'prompt': 'prompt_filled', 'max_tokens': '(200)', 'temperature': '(0.1)'}), "(model='Neural-Chat-7B', prompt=prompt_filled,\n max_tokens=200, temperature=0.1)\n", (5215, 5298), True, 'import predictionguard as pg\n'), ((5758, 5790), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (5773, 5790), True, 'import streamlit as st\n'), ((5800, 5831), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (5811, 5831), True, 'import streamlit as st\n'), ((5962, 5985), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (5977, 5985), True, 'import streamlit as st\n'), ((5995, 6014), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (6006, 6014), True, 'import streamlit as st\n'), ((6025, 6053), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (6040, 6053), True, 'import streamlit as st\n'), ((6085, 6095), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (6093, 6095), True, 'import streamlit as st\n'), ((7748, 7785), 'streamlit.spinner', 'st.spinner', (['"""Generating an answer..."""'], {}), "('Generating an answer...')\n", (7758, 7785), True, 'import streamlit as st\n'), ((4413, 4440), 'streamlit.sidebar.info', 'st.sidebar.info', (['table_name'], {}), '(table_name)\n', (4428, 4440), True, 'import streamlit as st\n'), ((4457, 4480), 'streamlit.sidebar.code', 'st.sidebar.code', (['schema'], {}), '(schema)\n', (4472, 4480), True, 'import streamlit as st\n'), ((8147, 8164), 'time.sleep', 'time.sleep', (['(0.075)'], {}), '(0.075)\n', (8157, 8164), False, 'import time\n')] |
from FlagEmbedding import LLMEmbedder, FlagReranker
import os
import lancedb
import re
import pandas as pd
import random
from datasets import load_dataset
import torch
import gc
import lance
from lancedb.embeddings import with_embeddings
task = "qa" # Encode for a specific task (qa, icl, chat, lrlm, tool, convsearch)
embed_model = LLMEmbedder('BAAI/llm-embedder', use_fp16=False) # Load model (automatically use GPUs)
reranker_model = FlagReranker('BAAI/bge-reranker-base', use_fp16=True) # use_fp16 speeds up computation with a slight performance degradation
"""# Load `Chunks` of data from [BeIR Dataset](https://huggingface.co/datasets/BeIR/scidocs)
Note: This is a dataset built specially for retrieval tasks to see how good your search is working
"""
data=pd.read_csv("Kcc_subset.csv")
# just random samples for faster embed demo
data['documents'] = 'query:' + data['QueryText'] + ', answer:' + data['KccAns']
data = data.dropna()
def embed_documents(batch):
"""
Function to embed the whole text data
"""
return embed_model.encode_keys(batch, task=task) # Encode data or 'keys'
db = lancedb.connect("./db") # Connect Local DB
if "doc_embed" in db.table_names():
table = db.open_table("doc_embed") # Open Table
else:
# Use the train text chunk data to save embed in the DB
data1 = with_embeddings(embed_documents, data, column = 'documents',show_progress = True, batch_size = 512)
table = db.create_table("doc_embed", data=data1) # create Table
"""# Search from a random Text"""
def search(query, top_k = 10):
"""
Search a query from the table
"""
query_vector = embed_model.encode_queries(query, task=task) # Encode the QUERY (it is done differently than the 'key')
search_results = table.search(query_vector).limit(top_k)
return ",".join(search_results.to_pandas().dropna(subset = "QueryText").reset_index(drop = True)["documents"].to_list())
# query = "how to control flower drop in bottelgourd?"
# print("QUERY:-> ", query)
# # get top_k search results
# search_results = search(query, top_k = 10).to_pandas().dropna(subset = "Query").reset_index(drop = True)["documents"]
# print(",".join(search_results.to_list))
# def rerank(query, search_results):
# search_results["old_similarity_rank"] = search_results.index+1 # Old ranks
# torch.cuda.empty_cache()
# gc.collect()
# search_results["new_scores"] = reranker_model.compute_score([[query,chunk] for chunk in search_results["text"]]) # Re compute ranks
# return search_results.sort_values(by = "new_scores", ascending = False).reset_index(drop = True)
# print("QUERY:-> ", query) | [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((356, 404), 'FlagEmbedding.LLMEmbedder', 'LLMEmbedder', (['"""BAAI/llm-embedder"""'], {'use_fp16': '(False)'}), "('BAAI/llm-embedder', use_fp16=False)\n", (367, 404), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((463, 516), 'FlagEmbedding.FlagReranker', 'FlagReranker', (['"""BAAI/bge-reranker-base"""'], {'use_fp16': '(True)'}), "('BAAI/bge-reranker-base', use_fp16=True)\n", (475, 516), False, 'from FlagEmbedding import LLMEmbedder, FlagReranker\n'), ((803, 832), 'pandas.read_csv', 'pd.read_csv', (['"""Kcc_subset.csv"""'], {}), "('Kcc_subset.csv')\n", (814, 832), True, 'import pandas as pd\n'), ((1162, 1185), 'lancedb.connect', 'lancedb.connect', (['"""./db"""'], {}), "('./db')\n", (1177, 1185), False, 'import lancedb\n'), ((1370, 1469), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_documents', 'data'], {'column': '"""documents"""', 'show_progress': '(True)', 'batch_size': '(512)'}), "(embed_documents, data, column='documents', show_progress=\n True, batch_size=512)\n", (1385, 1469), False, 'from lancedb.embeddings import with_embeddings\n')] |
import time
import re
import shutil
import os
import urllib
import html2text
import predictionguard as pg
from langchain import PromptTemplate, FewShotPromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import PredictionGuard
import streamlit as st
from sentence_transformers import SentenceTransformer
import lancedb
from lancedb.embeddings import with_embeddings
import pandas as pd
#--------------------------#
# Prompt templates #
#--------------------------#
demo_formatter_template = """\nUser: {user}
Assistant: {assistant}\n"""
demo_prompt = PromptTemplate(
input_variables=["user", "assistant"],
template=demo_formatter_template,
)
category_template = """### Instruction:
Read the below input and determine if it is a request to generate computer code? Respond "yes" or "no".
### Input:
{query}
### Response:
"""
category_prompt = PromptTemplate(
input_variables=["query"],
template=category_template
)
qa_template = """### Instruction:
Read the context below and respond with an answer to the question. If the question cannot be answered based on the context alone or the context does not explicitly say the answer to the question, write "Sorry I had trouble answering this question, based on the information I found."
### Input:
Context: {context}
Question: {query}
### Response:
"""
qa_prompt = PromptTemplate(
input_variables=["context", "query"],
template=qa_template
)
chat_template = """### Instruction:
You are a friendly and clever AI assistant. Respond to the latest human message in the input conversation below.
### Input:
{context}
Human: {query}
AI:
### Response:
"""
chat_prompt = PromptTemplate(
input_variables=["context", "query"],
template=chat_template
)
code_template = """### Instruction:
You are a code generation assistant. Respond with a code snippet and any explanation requested in the below input.
### Input:
{query}
### Response:
"""
code_prompt = PromptTemplate(
input_variables=["query"],
template=code_template
)
#-------------------------#
# Vector search #
#-------------------------#
# Embeddings setup
name="all-MiniLM-L12-v2"
model = SentenceTransformer(name)
def embed_batch(batch):
return [model.encode(sentence) for sentence in batch]
def embed(sentence):
return model.encode(sentence)
# LanceDB setup
if os.path.exists(".lancedb"):
shutil.rmtree(".lancedb")
os.mkdir(".lancedb")
uri = ".lancedb"
db = lancedb.connect(uri)
def vector_search_urls(urls, query, sessionid):
for url in urls:
# Let's get the html off of a website.
fp = urllib.request.urlopen(url)
mybytes = fp.read()
html = mybytes.decode("utf8")
fp.close()
# And convert it to text.
h = html2text.HTML2Text()
h.ignore_links = True
text = h.handle(html)
# Chunk the text into smaller pieces for injection into LLM prompts.
text_splitter = CharacterTextSplitter(chunk_size=700, chunk_overlap=50)
docs = text_splitter.split_text(text)
docs = [x.replace('#', '-') for x in docs]
# Create a dataframe with the chunk ids and chunks
metadata = []
for i in range(len(docs)):
metadata.append([
i,
docs[i],
url
])
doc_df = pd.DataFrame(metadata, columns=["chunk", "text", "url"])
# Embed the documents
data = with_embeddings(embed_batch, doc_df)
# Create the table if there isn't one.
if sessionid not in db.table_names():
db.create_table(sessionid, data=data)
else:
table = db.open_table(sessionid)
table.add(data=data)
# Perform the query
table = db.open_table(sessionid)
results = table.search(embed(query)).limit(1).to_df()
results = results[results['_distance'] < 1.0]
if len(results) == 0:
doc_use = ""
else:
doc_use = results['text'].values[0]
# Clean up
db.drop_table(sessionid)
return doc_use
#-------------------------#
# Info Agent #
#-------------------------#
tools = load_tools(["serpapi"], llm=PredictionGuard(model="Nous-Hermes-Llama2-13B"))
agent = initialize_agent(
tools,
PredictionGuard(model="Nous-Hermes-Llama2-13B"),
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
verbose=True,
max_execution_time=30)
#-------------------------#
# Helper functions #
#-------------------------#
def find_urls(text):
return re.findall(r'(https?://[^\s]+)', text)
# QuestionID provides some help in determining if a sentence is a question.
class QuestionID:
"""
QuestionID has the actual logic used to determine if sentence is a question
"""
def padCharacter(self, character: str, sentence: str):
if character in sentence:
position = sentence.index(character)
if position > 0 and position < len(sentence):
# Check for existing white space before the special character.
if (sentence[position - 1]) != " ":
sentence = sentence.replace(character, (" " + character))
return sentence
def predict(self, sentence: str):
questionStarters = [
"which", "wont", "cant", "isnt", "arent", "is", "do", "does",
"will", "can"
]
questionElements = [
"who", "what", "when", "where", "why", "how", "sup", "?"
]
sentence = sentence.lower()
sentence = sentence.replace("\'", "")
sentence = self.padCharacter('?', sentence)
splitWords = sentence.split()
if any(word == splitWords[0] for word in questionStarters) or any(
word in splitWords for word in questionElements):
return True
else:
return False
#---------------------#
# Streamlit config #
#---------------------#
#st.set_page_config(layout="wide")
# Hide the hamburger menu
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True)
#--------------------------#
# Streamlit sidebar #
#--------------------------#
st.sidebar.title("Super Chat ๐")
st.sidebar.markdown(
"This app provides a chat interface driven by various generative AI models and "
"augmented (via information retrieval and agentic processing)."
)
url_text = st.sidebar.text_area(
"Enter one or more urls for reference information (separated by a comma):",
"", height=100)
if len(url_text) > 0:
urls = url_text.split(",")
else:
urls = []
#--------------------------#
# Streamlit app #
#--------------------------#
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
if prompt := st.chat_input("Hello?"):
st.session_state.messages.append({"role": "user", "content": prompt})
with st.chat_message("user"):
st.markdown(prompt)
with st.chat_message("assistant"):
message_placeholder = st.empty()
full_response = ""
# process the context
examples = []
turn = "user"
example = {}
for m in st.session_state.messages:
latest_message = m["content"]
example[turn] = m["content"]
if turn == "user":
turn = "assistant"
else:
turn = "user"
examples.append(example)
example = {}
if len(example) > 4:
examples = examples[-4:]
# Determine what kind of message this is.
with st.spinner("Trying to figure out what you are wanting..."):
result = pg.Completion.create(
model="WizardCoder",
prompt=category_prompt.format(query=latest_message),
output={
"type": "categorical",
"categories": ["yes", "no"]
}
)
# configure out chain
code = result['choices'][0]['output']
qIDModel = QuestionID()
question = qIDModel.predict(latest_message)
if code == "no" and question:
# if there are urls, let's embed them as a primary data source.
if len(urls) > 0:
with st.spinner("Performing vector search..."):
info_context = vector_search_urls(urls, latest_message, "assistant")
else:
info_context = ""
# Handle the informational request.
if info_context != "":
with st.spinner("Generating a RAG result..."):
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=qa_prompt.format(context=info_context, query=latest_message)
)
completion = result['choices'][0]['text'].split('#')[0].strip()
# Otherwise try an agentic approach.
else:
with st.spinner("Trying to find an answer with an agent..."):
try:
completion = agent.run(latest_message)
except:
completion = "Sorry, I didn't find an answer. Could you rephrase the question?"
if "Agent stopped" in completion:
completion = "Sorry, I didn't find an answer. Could you rephrase the question?"
elif code == "yes":
# Handle the code generation request.
with st.spinner("Generating code..."):
result = pg.Completion.create(
model="WizardCoder",
prompt=code_prompt.format(query=latest_message),
max_tokens=500
)
completion = result['choices'][0]['text']
else:
# contruct prompt
few_shot_prompt = FewShotPromptTemplate(
examples=examples,
example_prompt=demo_prompt,
example_separator="",
prefix="The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n",
suffix="\nHuman: {human}\nAssistant: ",
input_variables=["human"],
)
prompt = few_shot_prompt.format(human=latest_message)
# generate response
with st.spinner("Generating chat response..."):
result = pg.Completion.create(
model="Nous-Hermes-Llama2-13B",
prompt=prompt,
)
completion = result['choices'][0]['text']
# Print out the response.
completion = completion.split("Human:")[0].strip()
completion = completion.split("H:")[0].strip()
completion = completion.split('#')[0].strip()
for token in completion.split(" "):
full_response += " " + token
message_placeholder.markdown(full_response + "โ")
time.sleep(0.075)
message_placeholder.markdown(full_response)
st.session_state.messages.append({"role": "assistant", "content": full_response}) | [
"lancedb.embeddings.with_embeddings",
"lancedb.connect"
] | [((728, 820), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['user', 'assistant']", 'template': 'demo_formatter_template'}), "(input_variables=['user', 'assistant'], template=\n demo_formatter_template)\n", (742, 820), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1030, 1099), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query']", 'template': 'category_template'}), "(input_variables=['query'], template=category_template)\n", (1044, 1099), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1510, 1584), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'query']", 'template': 'qa_template'}), "(input_variables=['context', 'query'], template=qa_template)\n", (1524, 1584), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((1820, 1896), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['context', 'query']", 'template': 'chat_template'}), "(input_variables=['context', 'query'], template=chat_template)\n", (1834, 1896), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((2113, 2178), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['query']", 'template': 'code_template'}), "(input_variables=['query'], template=code_template)\n", (2127, 2178), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((2328, 2353), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['name'], {}), '(name)\n', (2347, 2353), False, 'from sentence_transformers import SentenceTransformer\n'), ((2513, 2539), 'os.path.exists', 'os.path.exists', (['""".lancedb"""'], {}), "('.lancedb')\n", (2527, 2539), False, 'import os\n'), ((2571, 2591), 'os.mkdir', 'os.mkdir', (['""".lancedb"""'], {}), "('.lancedb')\n", (2579, 2591), False, 'import os\n'), ((2614, 2634), 'lancedb.connect', 'lancedb.connect', (['uri'], {}), '(uri)\n', (2629, 2634), False, 'import lancedb\n'), ((6281, 6338), 'streamlit.markdown', 'st.markdown', (['hide_streamlit_style'], {'unsafe_allow_html': '(True)'}), '(hide_streamlit_style, unsafe_allow_html=True)\n', (6292, 6338), True, 'import streamlit as st\n'), ((6429, 6461), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""Super Chat ๐"""'], {}), "('Super Chat ๐')\n", (6445, 6461), True, 'import streamlit as st\n'), ((6462, 6634), 'streamlit.sidebar.markdown', 'st.sidebar.markdown', (['"""This app provides a chat interface driven by various generative AI models and augmented (via information retrieval and agentic processing)."""'], {}), "(\n 'This app provides a chat interface driven by various generative AI models and augmented (via information retrieval and agentic processing).'\n )\n", (6481, 6634), True, 'import streamlit as st\n'), ((6649, 6770), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter one or more urls for reference information (separated by a comma):"""', '""""""'], {'height': '(100)'}), "(\n 'Enter one or more urls for reference information (separated by a comma):',\n '', height=100)\n", (6669, 6770), True, 'import streamlit as st\n'), ((2545, 2570), 'shutil.rmtree', 'shutil.rmtree', (['""".lancedb"""'], {}), "('.lancedb')\n", (2558, 2570), False, 'import shutil\n'), ((4440, 4487), 'langchain.llms.PredictionGuard', 'PredictionGuard', ([], {'model': '"""Nous-Hermes-Llama2-13B"""'}), "(model='Nous-Hermes-Llama2-13B')\n", (4455, 4487), False, 'from langchain.llms import PredictionGuard\n'), ((4702, 4740), 're.findall', 're.findall', (['"""(https?://[^\\\\s]+)"""', 'text'], {}), "('(https?://[^\\\\s]+)', text)\n", (4712, 4740), False, 'import re\n'), ((7149, 7172), 'streamlit.chat_input', 'st.chat_input', (['"""Hello?"""'], {}), "('Hello?')\n", (7162, 7172), True, 'import streamlit as st\n'), ((7178, 7247), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'user', 'content': prompt}"], {}), "({'role': 'user', 'content': prompt})\n", (7210, 7247), True, 'import streamlit as st\n'), ((11513, 11598), 'streamlit.session_state.messages.append', 'st.session_state.messages.append', (["{'role': 'assistant', 'content': full_response}"], {}), "({'role': 'assistant', 'content':\n full_response})\n", (11545, 11598), True, 'import streamlit as st\n'), ((2767, 2794), 'urllib.request.urlopen', 'urllib.request.urlopen', (['url'], {}), '(url)\n', (2789, 2794), False, 'import urllib\n'), ((2927, 2948), 'html2text.HTML2Text', 'html2text.HTML2Text', ([], {}), '()\n', (2946, 2948), False, 'import html2text\n'), ((3111, 3166), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'chunk_size': '(700)', 'chunk_overlap': '(50)'}), '(chunk_size=700, chunk_overlap=50)\n', (3132, 3166), False, 'from langchain.text_splitter import CharacterTextSplitter\n'), ((3507, 3563), 'pandas.DataFrame', 'pd.DataFrame', (['metadata'], {'columns': "['chunk', 'text', 'url']"}), "(metadata, columns=['chunk', 'text', 'url'])\n", (3519, 3563), True, 'import pandas as pd\n'), ((3618, 3654), 'lancedb.embeddings.with_embeddings', 'with_embeddings', (['embed_batch', 'doc_df'], {}), '(embed_batch, doc_df)\n', (3633, 3654), False, 'from lancedb.embeddings import with_embeddings\n'), ((4349, 4396), 'langchain.llms.PredictionGuard', 'PredictionGuard', ([], {'model': '"""Nous-Hermes-Llama2-13B"""'}), "(model='Nous-Hermes-Llama2-13B')\n", (4364, 4396), False, 'from langchain.llms import PredictionGuard\n'), ((7061, 7093), 'streamlit.chat_message', 'st.chat_message', (["message['role']"], {}), "(message['role'])\n", (7076, 7093), True, 'import streamlit as st\n'), ((7103, 7134), 'streamlit.markdown', 'st.markdown', (["message['content']"], {}), "(message['content'])\n", (7114, 7134), True, 'import streamlit as st\n'), ((7257, 7280), 'streamlit.chat_message', 'st.chat_message', (['"""user"""'], {}), "('user')\n", (7272, 7280), True, 'import streamlit as st\n'), ((7290, 7309), 'streamlit.markdown', 'st.markdown', (['prompt'], {}), '(prompt)\n', (7301, 7309), True, 'import streamlit as st\n'), ((7320, 7348), 'streamlit.chat_message', 'st.chat_message', (['"""assistant"""'], {}), "('assistant')\n", (7335, 7348), True, 'import streamlit as st\n'), ((7380, 7390), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (7388, 7390), True, 'import streamlit as st\n'), ((7955, 8013), 'streamlit.spinner', 'st.spinner', (['"""Trying to figure out what you are wanting..."""'], {}), "('Trying to figure out what you are wanting...')\n", (7965, 8013), True, 'import streamlit as st\n'), ((11438, 11455), 'time.sleep', 'time.sleep', (['(0.075)'], {}), '(0.075)\n', (11448, 11455), False, 'import time\n'), ((10288, 10613), 'langchain.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {'examples': 'examples', 'example_prompt': 'demo_prompt', 'example_separator': '""""""', 'prefix': '"""The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n"""', 'suffix': '"""\nHuman: {human}\nAssistant: """', 'input_variables': "['human']"}), '(examples=examples, example_prompt=demo_prompt,\n example_separator=\'\', prefix=\n """The following is a conversation between an AI assistant and a human user. The assistant is helpful, creative, clever, and very friendly.\n"""\n , suffix="""\nHuman: {human}\nAssistant: """, input_variables=[\'human\'])\n', (10309, 10613), False, 'from langchain import PromptTemplate, FewShotPromptTemplate\n'), ((8640, 8681), 'streamlit.spinner', 'st.spinner', (['"""Performing vector search..."""'], {}), "('Performing vector search...')\n", (8650, 8681), True, 'import streamlit as st\n'), ((8929, 8969), 'streamlit.spinner', 'st.spinner', (['"""Generating a RAG result..."""'], {}), "('Generating a RAG result...')\n", (8939, 8969), True, 'import streamlit as st\n'), ((9377, 9432), 'streamlit.spinner', 'st.spinner', (['"""Trying to find an answer with an agent..."""'], {}), "('Trying to find an answer with an agent...')\n", (9387, 9432), True, 'import streamlit as st\n'), ((9910, 9942), 'streamlit.spinner', 'st.spinner', (['"""Generating code..."""'], {}), "('Generating code...')\n", (9920, 9942), True, 'import streamlit as st\n'), ((10823, 10864), 'streamlit.spinner', 'st.spinner', (['"""Generating chat response..."""'], {}), "('Generating chat response...')\n", (10833, 10864), True, 'import streamlit as st\n'), ((10891, 10958), 'predictionguard.Completion.create', 'pg.Completion.create', ([], {'model': '"""Nous-Hermes-Llama2-13B"""', 'prompt': 'prompt'}), "(model='Nous-Hermes-Llama2-13B', prompt=prompt)\n", (10911, 10958), True, 'import predictionguard as pg\n')] |
import logging
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Union
logger = logging.getLogger(__name__)
from hamilton import contrib
with contrib.catch_import_errors(__name__, __file__, logger):
import pyarrow as pa
import lancedb
import numpy as np
import pandas as pd
from lancedb.pydantic import LanceModel
from hamilton.function_modifiers import tag
VectorType = Union[list, np.ndarray, pa.Array, pa.ChunkedArray]
DataType = Union[Dict, List[Dict], pd.DataFrame, pa.Table, Iterable[pa.RecordBatch]]
TableSchema = Union[pa.Schema, LanceModel]
def client(uri: Union[str, Path] = "./.lancedb") -> lancedb.DBConnection:
"""Create a LanceDB connection.
:param uri: path to local LanceDB
:return: connection to LanceDB instance.
"""
return lancedb.connect(uri=uri)
def _create_table(
client: lancedb.DBConnection,
table_name: str,
schema: Optional[TableSchema] = None,
overwrite_table: bool = False,
) -> lancedb.db.LanceTable:
"""Create a new table based on schema."""
mode = "overwrite" if overwrite_table else "create"
table = client.create_table(name=table_name, schema=schema, mode=mode)
return table
@tag(side_effect="True")
def table_ref(
client: lancedb.DBConnection,
table_name: str,
schema: Optional[TableSchema] = None,
overwrite_table: bool = False,
) -> lancedb.db.LanceTable:
"""Create or reference a LanceDB table
:param vdb_client: LanceDB connection.
:param table_name: Name of the table.
:param schema: Pyarrow schema defining the table schema.
:param overwrite_table: If True, overwrite existing table
:return: Reference to existing or newly created table.
"""
try:
table = client.open_table(table_name)
except FileNotFoundError:
if schema is None:
raise ValueError("`schema` must be provided to create table.")
table = _create_table(
client=client,
table_name=table_name,
schema=schema,
overwrite_table=overwrite_table,
)
return table
@tag(side_effect="True")
def reset(client: lancedb.DBConnection) -> Dict[str, List[str]]:
"""Drop all existing tables.
:param vdb_client: LanceDB connection.
:return: dictionary containing all the dropped tables.
"""
tables_dropped = []
for table_name in client.table_names():
client.drop_table(table_name)
tables_dropped.append(table_name)
return dict(tables_dropped=tables_dropped)
@tag(side_effect="True")
def insert(table_ref: lancedb.db.LanceTable, data: DataType) -> Dict:
"""Push new data to the specified table.
:param table_ref: Reference to the LanceDB table.
:param data: Data to add to the table. Ref: https://lancedb.github.io/lancedb/guides/tables/#adding-to-a-table
:return: Reference to the table and number of rows added
"""
n_rows_before = table_ref.to_arrow().shape[0]
table_ref.add(data)
n_rows_after = table_ref.to_arrow().shape[0]
n_rows_added = n_rows_after - n_rows_before
return dict(table=table_ref, n_rows_added=n_rows_added)
@tag(side_effect="True")
def delete(table_ref: lancedb.db.LanceTable, delete_expression: str) -> Dict:
"""Delete existing data using an SQL expression.
:param table_ref: Reference to the LanceDB table.
:param data: Expression to select data. Ref: https://lancedb.github.io/lancedb/sql/
:return: Reference to the table and number of rows deleted
"""
n_rows_before = table_ref.to_arrow().shape[0]
table_ref.delete(delete_expression)
n_rows_after = table_ref.to_arrow().shape[0]
n_rows_deleted = n_rows_before - n_rows_after
return dict(table=table_ref, n_rows_deleted=n_rows_deleted)
def vector_search(
table_ref: lancedb.db.LanceTable,
vector_query: VectorType,
columns: Optional[List[str]] = None,
where: Optional[str] = None,
prefilter_where: bool = False,
limit: int = 10,
) -> pd.DataFrame:
"""Search database using an embedding vector.
:param table_ref: table to search
:param vector_query: embedding of the query
:param columns: columns to include in the results
:param where: SQL where clause to pre- or post-filter results
:param prefilter_where: If True filter rows before search else filter after search
:param limit: number of rows to return
:return: A dataframe of results
"""
query_ = (
table_ref.search(
query=vector_query,
query_type="vector",
vector_column_name="vector",
)
.select(columns=columns)
.where(where, prefilter=prefilter_where)
.limit(limit=limit)
)
return query_.to_pandas()
def full_text_search(
table_ref: lancedb.db.LanceTable,
full_text_query: str,
full_text_index: Union[str, List[str]],
where: Optional[str] = None,
limit: int = 10,
rebuild_index: bool = True,
) -> pd.DataFrame:
"""Search database using an embedding vector.
:param table_ref: table to search
:param full_text_query: text query
:param full_text_index: one or more text columns to search
:param where: SQL where clause to pre- or post-filter results
:param limit: number of rows to return
:param rebuild_index: If True rebuild the index
:return: A dataframe of results
"""
# NOTE. Currently, the index needs to be recreated whenever data is added
# ref: https://lancedb.github.io/lancedb/fts/#installation
if rebuild_index:
table_ref.create_fts_index(full_text_index)
query_ = (
table_ref.search(query=full_text_query, query_type="fts")
.select(full_text_index)
.where(where)
.limit(limit)
)
return query_.to_pandas()
| [
"lancedb.connect"
] | [((107, 134), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (124, 134), False, 'import logging\n'), ((1219, 1242), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (1222, 1242), False, 'from hamilton.function_modifiers import tag\n'), ((2122, 2145), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (2125, 2145), False, 'from hamilton.function_modifiers import tag\n'), ((2554, 2577), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (2557, 2577), False, 'from hamilton.function_modifiers import tag\n'), ((3166, 3189), 'hamilton.function_modifiers.tag', 'tag', ([], {'side_effect': '"""True"""'}), "(side_effect='True')\n", (3169, 3189), False, 'from hamilton.function_modifiers import tag\n'), ((171, 226), 'hamilton.contrib.catch_import_errors', 'contrib.catch_import_errors', (['__name__', '__file__', 'logger'], {}), '(__name__, __file__, logger)\n', (198, 226), False, 'from hamilton import contrib\n'), ((816, 840), 'lancedb.connect', 'lancedb.connect', ([], {'uri': 'uri'}), '(uri=uri)\n', (831, 840), False, 'import lancedb\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import importlib.metadata
import platform
import random
import sys
import time
from lancedb.utils import CONFIG
from lancedb.utils.general import TryExcept
from .general import (
PLATFORMS,
get_git_origin_url,
is_git_dir,
is_github_actions_ci,
is_online,
is_pip_package,
is_pytest_running,
threaded_request,
)
class _Events:
"""
A class for collecting anonymous event analytics. Event analytics are enabled when ``diagnostics=True`` in config and
disabled when ``diagnostics=False``.
You can enable or disable diagnostics by running ``lancedb diagnostics --enabled`` or ``lancedb diagnostics --disabled``.
Attributes
----------
url : str
The URL to send anonymous events.
rate_limit : float
The rate limit in seconds for sending events.
metadata : dict
A dictionary containing metadata about the environment.
enabled : bool
A flag to enable or disable Events based on certain conditions.
"""
_instance = None
url = "https://app.posthog.com/capture/"
headers = {"Content-Type": "application/json"}
api_key = "phc_oENDjGgHtmIDrV6puUiFem2RB4JA8gGWulfdulmMdZP"
# This api-key is write only and is safe to expose in the codebase.
def __init__(self):
"""
Initializes the Events object with default values for events, rate_limit, and metadata.
"""
self.events = [] # events list
self.throttled_event_names = ["search_table"]
self.throttled_events = set()
self.max_events = 5 # max events to store in memory
self.rate_limit = 60.0 * 5 # rate limit (seconds)
self.time = 0.0
if is_git_dir():
install = "git"
elif is_pip_package():
install = "pip"
else:
install = "other"
self.metadata = {
"cli": sys.argv[0],
"install": install,
"python": ".".join(platform.python_version_tuple()[:2]),
"version": importlib.metadata.version("lancedb"),
"platforms": PLATFORMS,
"session_id": round(random.random() * 1e15),
# 'engagement_time_msec': 1000 # TODO: In future we might be interested in this metric
}
TESTS_RUNNING = is_pytest_running() or is_github_actions_ci()
ONLINE = is_online()
self.enabled = (
CONFIG["diagnostics"]
and not TESTS_RUNNING
and ONLINE
and (
is_pip_package()
or get_git_origin_url() == "https://github.com/lancedb/lancedb.git"
)
)
def __call__(self, event_name, params={}):
"""
Attempts to add a new event to the events list and send events if the rate limit is reached.
Args
----
event_name : str
The name of the event to be logged.
params : dict, optional
A dictionary of additional parameters to be logged with the event.
"""
### NOTE: We might need a way to tag a session with a label to check usage from a source. Setting label should be exposed to the user.
if not self.enabled:
return
if (
len(self.events) < self.max_events
): # Events list limited to self.max_events (drop any events past this)
params.update(self.metadata)
event = {
"event": event_name,
"properties": params,
"timestamp": datetime.datetime.now(
tz=datetime.timezone.utc
).isoformat(),
"distinct_id": CONFIG["uuid"],
}
if event_name not in self.throttled_event_names:
self.events.append(event)
elif event_name not in self.throttled_events:
self.throttled_events.add(event_name)
self.events.append(event)
# Check rate limit
t = time.time()
if (t - self.time) < self.rate_limit:
return
# Time is over rate limiter, send now
data = {
"api_key": self.api_key,
"distinct_id": CONFIG["uuid"], # posthog needs this to accepts the event
"batch": self.events,
}
# POST equivalent to requests.post(self.url, json=data).
# threaded request is used to avoid blocking, retries are disabled, and verbose is disabled
# to avoid any possible disruption in the console.
threaded_request(
method="post",
url=self.url,
headers=self.headers,
json=data,
retry=0,
verbose=False,
)
# Flush & Reset
self.events = []
self.throttled_events = set()
self.time = t
@TryExcept(verbose=False)
def register_event(name: str, **kwargs):
if _Events._instance is None:
_Events._instance = _Events()
_Events._instance(name, **kwargs)
| [
"lancedb.utils.general.TryExcept"
] | [((5422, 5446), 'lancedb.utils.general.TryExcept', 'TryExcept', ([], {'verbose': '(False)'}), '(verbose=False)\n', (5431, 5446), False, 'from lancedb.utils.general import TryExcept\n'), ((4584, 4595), 'time.time', 'time.time', ([], {}), '()\n', (4593, 4595), False, 'import time\n'), ((2567, 2598), 'platform.python_version_tuple', 'platform.python_version_tuple', ([], {}), '()\n', (2596, 2598), False, 'import platform\n'), ((2735, 2750), 'random.random', 'random.random', ([], {}), '()\n', (2748, 2750), False, 'import random\n'), ((4127, 4174), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (4148, 4174), False, 'import datetime\n')] |
import argparse
import os
import sys
from concurrent.futures import ProcessPoolExecutor, as_completed
from functools import lru_cache
from pathlib import Path
from typing import Any, Iterator
import lancedb
import pandas as pd
import srsly
from codetiming import Timer
from dotenv import load_dotenv
from lancedb.pydantic import pydantic_to_schema
from sentence_transformers import SentenceTransformer
from tqdm import tqdm
sys.path.insert(1, os.path.realpath(Path(__file__).resolve().parents[1]))
from api.config import Settings
from schemas.wine import LanceModelWine, Wine
load_dotenv()
# Custom types
JsonBlob = dict[str, Any]
class FileNotFoundError(Exception):
pass
@lru_cache()
def get_settings():
# Use lru_cache to avoid loading .env file for every request
return Settings()
def chunk_iterable(item_list: list[JsonBlob], chunksize: int) -> Iterator[list[JsonBlob]]:
"""
Break a large iterable into an iterable of smaller iterables of size `chunksize`
"""
for i in range(0, len(item_list), chunksize):
yield item_list[i : i + chunksize]
def get_json_data(data_dir: Path, filename: str) -> list[JsonBlob]:
"""Get all line-delimited json files (.jsonl) from a directory with a given prefix"""
file_path = data_dir / filename
if not file_path.is_file():
# File may not have been uncompressed yet so try to do that first
data = srsly.read_gzip_jsonl(file_path)
# This time if it isn't there it really doesn't exist
if not file_path.is_file():
raise FileNotFoundError(f"No valid .jsonl file found in `{data_dir}`")
else:
data = srsly.read_gzip_jsonl(file_path)
return data
def validate(
data: list[JsonBlob],
exclude_none: bool = False,
) -> list[JsonBlob]:
validated_data = [Wine(**item).model_dump(exclude_none=exclude_none) for item in data]
return validated_data
def embed_func(batch: list[str], model) -> list[list[float]]:
return [model.encode(sentence.lower()) for sentence in batch]
def vectorize_text(data: list[JsonBlob]) -> list[LanceModelWine] | None:
# Load a sentence transformer model for semantic similarity from a specified checkpoint
model_id = get_settings().embedding_model_checkpoint
assert model_id, "Invalid embedding model checkpoint specified in .env file"
MODEL = SentenceTransformer(model_id)
ids = [item["id"] for item in data]
to_vectorize = [text.get("to_vectorize") for text in data]
vectors = embed_func(to_vectorize, MODEL)
try:
data_batch = [{**d, "vector": vector} for d, vector in zip(data, vectors)]
except Exception as e:
print(f"{e}: Failed to add ID range {min(ids)}-{max(ids)}")
return None
return data_batch
def embed_batches(tbl: str, validated_data: list[JsonBlob]) -> pd.DataFrame:
with ProcessPoolExecutor(max_workers=WORKERS) as executor:
chunked_data = chunk_iterable(validated_data, CHUNKSIZE)
embed_data = []
for chunk in tqdm(chunked_data, total=len(validated_data) // CHUNKSIZE):
futures = [executor.submit(vectorize_text, chunk)]
embed_data = [f.result() for f in as_completed(futures) if f.result()][0]
df = pd.DataFrame.from_dict(embed_data)
tbl.add(df, mode="overwrite")
def main(data: list[JsonBlob]) -> None:
DB_NAME = f"../{get_settings().lancedb_dir}"
TABLE = "wines"
db = lancedb.connect(DB_NAME)
tbl = db.create_table(TABLE, schema=pydantic_to_schema(LanceModelWine), mode="overwrite")
print(f"Created table `{TABLE}`, with length {len(tbl)}")
with Timer(name="Bulk Index", text="Validated data using Pydantic in {:.4f} sec"):
validated_data = validate(data, exclude_none=False)
with Timer(name="Embed batches", text="Created sentence embeddings in {:.4f} sec"):
embed_batches(tbl, validated_data)
print(f"Finished inserting {len(tbl)} items into LanceDB table")
with Timer(name="Create index", text="Created IVF-PQ index in {:.4f} sec"):
# Creating index (choose num partitions as a power of 2 that's closest to len(dataset) // 5000)
# In this case, we have 130k datapoints, so the nearest power of 2 is 130000//5000 ~ 32)
tbl.create_index(metric="cosine", num_partitions=4, num_sub_vectors=32)
if __name__ == "__main__":
# fmt: off
parser = argparse.ArgumentParser("Bulk index database from the wine reviews JSONL data")
parser.add_argument("--limit", type=int, default=0, help="Limit the size of the dataset to load for testing purposes")
parser.add_argument("--chunksize", type=int, default=1000, help="Size of each chunk to break the dataset into before processing")
parser.add_argument("--filename", type=str, default="winemag-data-130k-v2.jsonl.gz", help="Name of the JSONL zip file to use")
parser.add_argument("--workers", type=int, default=4, help="Number of workers to use for vectorization")
args = vars(parser.parse_args())
# fmt: on
LIMIT = args["limit"]
DATA_DIR = Path(__file__).parents[3] / "data"
FILENAME = args["filename"]
CHUNKSIZE = args["chunksize"]
WORKERS = args["workers"]
data = list(get_json_data(DATA_DIR, FILENAME))
assert data, "No data found in the specified file"
data = data[:LIMIT] if LIMIT > 0 else data
main(data)
print("Finished execution!")
| [
"lancedb.connect",
"lancedb.pydantic.pydantic_to_schema"
] | [((580, 593), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (591, 593), False, 'from dotenv import load_dotenv\n'), ((685, 696), 'functools.lru_cache', 'lru_cache', ([], {}), '()\n', (694, 696), False, 'from functools import lru_cache\n'), ((793, 803), 'api.config.Settings', 'Settings', ([], {}), '()\n', (801, 803), False, 'from api.config import Settings\n'), ((2355, 2384), 'sentence_transformers.SentenceTransformer', 'SentenceTransformer', (['model_id'], {}), '(model_id)\n', (2374, 2384), False, 'from sentence_transformers import SentenceTransformer\n'), ((3439, 3463), 'lancedb.connect', 'lancedb.connect', (['DB_NAME'], {}), '(DB_NAME)\n', (3454, 3463), False, 'import lancedb\n'), ((4390, 4469), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""Bulk index database from the wine reviews JSONL data"""'], {}), "('Bulk index database from the wine reviews JSONL data')\n", (4413, 4469), False, 'import argparse\n'), ((1408, 1440), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1429, 1440), False, 'import srsly\n'), ((1647, 1679), 'srsly.read_gzip_jsonl', 'srsly.read_gzip_jsonl', (['file_path'], {}), '(file_path)\n', (1668, 1679), False, 'import srsly\n'), ((2852, 2892), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'WORKERS'}), '(max_workers=WORKERS)\n', (2871, 2892), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((3631, 3707), 'codetiming.Timer', 'Timer', ([], {'name': '"""Bulk Index"""', 'text': '"""Validated data using Pydantic in {:.4f} sec"""'}), "(name='Bulk Index', text='Validated data using Pydantic in {:.4f} sec')\n", (3636, 3707), False, 'from codetiming import Timer\n'), ((3779, 3856), 'codetiming.Timer', 'Timer', ([], {'name': '"""Embed batches"""', 'text': '"""Created sentence embeddings in {:.4f} sec"""'}), "(name='Embed batches', text='Created sentence embeddings in {:.4f} sec')\n", (3784, 3856), False, 'from codetiming import Timer\n'), ((3981, 4050), 'codetiming.Timer', 'Timer', ([], {'name': '"""Create index"""', 'text': '"""Created IVF-PQ index in {:.4f} sec"""'}), "(name='Create index', text='Created IVF-PQ index in {:.4f} sec')\n", (3986, 4050), False, 'from codetiming import Timer\n'), ((3242, 3276), 'pandas.DataFrame.from_dict', 'pd.DataFrame.from_dict', (['embed_data'], {}), '(embed_data)\n', (3264, 3276), True, 'import pandas as pd\n'), ((3505, 3539), 'lancedb.pydantic.pydantic_to_schema', 'pydantic_to_schema', (['LanceModelWine'], {}), '(LanceModelWine)\n', (3523, 3539), False, 'from lancedb.pydantic import pydantic_to_schema\n'), ((1813, 1825), 'schemas.wine.Wine', 'Wine', ([], {}), '(**item)\n', (1817, 1825), False, 'from schemas.wine import LanceModelWine, Wine\n'), ((5060, 5074), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (5064, 5074), False, 'from pathlib import Path\n'), ((462, 476), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (466, 476), False, 'from pathlib import Path\n'), ((3185, 3206), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (3197, 3206), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n')] |
# Copyright 2023 LanceDB Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
from typing import Any, Callable, Dict, Iterable, Optional, Union
import aiohttp
import attrs
import pyarrow as pa
from pydantic import BaseModel
from lancedb.common import Credential
from lancedb.remote import VectorQuery, VectorQueryResult
from lancedb.remote.errors import LanceDBClientError
ARROW_STREAM_CONTENT_TYPE = "application/vnd.apache.arrow.stream"
def _check_not_closed(f):
@functools.wraps(f)
def wrapped(self, *args, **kwargs):
if self.closed:
raise ValueError("Connection is closed")
return f(self, *args, **kwargs)
return wrapped
async def _read_ipc(resp: aiohttp.ClientResponse) -> pa.Table:
resp_body = await resp.read()
with pa.ipc.open_file(pa.BufferReader(resp_body)) as reader:
return reader.read_all()
@attrs.define(slots=False)
class RestfulLanceDBClient:
db_name: str
region: str
api_key: Credential
host_override: Optional[str] = attrs.field(default=None)
closed: bool = attrs.field(default=False, init=False)
@functools.cached_property
def session(self) -> aiohttp.ClientSession:
url = (
self.host_override
or f"https://{self.db_name}.{self.region}.api.lancedb.com"
)
return aiohttp.ClientSession(url)
async def close(self):
await self.session.close()
self.closed = True
@functools.cached_property
def headers(self) -> Dict[str, str]:
headers = {
"x-api-key": self.api_key,
}
if self.region == "local": # Local test mode
headers["Host"] = f"{self.db_name}.{self.region}.api.lancedb.com"
if self.host_override:
headers["x-lancedb-database"] = self.db_name
return headers
@staticmethod
async def _check_status(resp: aiohttp.ClientResponse):
if resp.status == 404:
raise LanceDBClientError(f"Not found: {await resp.text()}")
elif 400 <= resp.status < 500:
raise LanceDBClientError(
f"Bad Request: {resp.status}, error: {await resp.text()}"
)
elif 500 <= resp.status < 600:
raise LanceDBClientError(
f"Internal Server Error: {resp.status}, error: {await resp.text()}"
)
elif resp.status != 200:
raise LanceDBClientError(
f"Unknown Error: {resp.status}, error: {await resp.text()}"
)
@_check_not_closed
async def get(self, uri: str, params: Union[Dict[str, Any], BaseModel] = None):
"""Send a GET request and returns the deserialized response payload."""
if isinstance(params, BaseModel):
params: Dict[str, Any] = params.dict(exclude_none=True)
async with self.session.get(
uri,
params=params,
headers=self.headers,
timeout=aiohttp.ClientTimeout(total=30),
) as resp:
await self._check_status(resp)
return await resp.json()
@_check_not_closed
async def post(
self,
uri: str,
data: Optional[Union[Dict[str, Any], BaseModel, bytes]] = None,
params: Optional[Dict[str, Any]] = None,
content_type: Optional[str] = None,
deserialize: Callable = lambda resp: resp.json(),
request_id: Optional[str] = None,
) -> Dict[str, Any]:
"""Send a POST request and returns the deserialized response payload.
Parameters
----------
uri : str
The uri to send the POST request to.
data: Union[Dict[str, Any], BaseModel]
request_id: Optional[str]
Optional client side request id to be sent in the request headers.
"""
if isinstance(data, BaseModel):
data: Dict[str, Any] = data.dict(exclude_none=True)
if isinstance(data, bytes):
req_kwargs = {"data": data}
else:
req_kwargs = {"json": data}
headers = self.headers.copy()
if content_type is not None:
headers["content-type"] = content_type
if request_id is not None:
headers["x-request-id"] = request_id
async with self.session.post(
uri,
headers=headers,
params=params,
timeout=aiohttp.ClientTimeout(total=30),
**req_kwargs,
) as resp:
resp: aiohttp.ClientResponse = resp
await self._check_status(resp)
return await deserialize(resp)
@_check_not_closed
async def list_tables(
self, limit: int, page_token: Optional[str] = None
) -> Iterable[str]:
"""List all tables in the database."""
if page_token is None:
page_token = ""
json = await self.get("/v1/table/", {"limit": limit, "page_token": page_token})
return json["tables"]
@_check_not_closed
async def query(self, table_name: str, query: VectorQuery) -> VectorQueryResult:
"""Query a table."""
tbl = await self.post(
f"/v1/table/{table_name}/query/", query, deserialize=_read_ipc
)
return VectorQueryResult(tbl)
| [
"lancedb.remote.VectorQueryResult"
] | [((1402, 1427), 'attrs.define', 'attrs.define', ([], {'slots': '(False)'}), '(slots=False)\n', (1414, 1427), False, 'import attrs\n'), ((1006, 1024), 'functools.wraps', 'functools.wraps', (['f'], {}), '(f)\n', (1021, 1024), False, 'import functools\n'), ((1548, 1573), 'attrs.field', 'attrs.field', ([], {'default': 'None'}), '(default=None)\n', (1559, 1573), False, 'import attrs\n'), ((1594, 1632), 'attrs.field', 'attrs.field', ([], {'default': '(False)', 'init': '(False)'}), '(default=False, init=False)\n', (1605, 1632), False, 'import attrs\n'), ((1856, 1882), 'aiohttp.ClientSession', 'aiohttp.ClientSession', (['url'], {}), '(url)\n', (1877, 1882), False, 'import aiohttp\n'), ((5743, 5765), 'lancedb.remote.VectorQueryResult', 'VectorQueryResult', (['tbl'], {}), '(tbl)\n', (5760, 5765), False, 'from lancedb.remote import VectorQuery, VectorQueryResult\n'), ((1327, 1353), 'pyarrow.BufferReader', 'pa.BufferReader', (['resp_body'], {}), '(resp_body)\n', (1342, 1353), True, 'import pyarrow as pa\n'), ((3473, 3504), 'aiohttp.ClientTimeout', 'aiohttp.ClientTimeout', ([], {'total': '(30)'}), '(total=30)\n', (3494, 3504), False, 'import aiohttp\n'), ((4904, 4935), 'aiohttp.ClientTimeout', 'aiohttp.ClientTimeout', ([], {'total': '(30)'}), '(total=30)\n', (4925, 4935), False, 'import aiohttp\n')] |
End of preview. Expand
in Dataset Viewer.
README.md exists but content is empty.
Use the Edit dataset card button to edit it.
- Downloads last month
- 40