id
int64 0
190k
| prompt
stringlengths 21
13.4M
| docstring
stringlengths 1
12k
⌀ |
---|---|---|
189,315 | import importlib, importlib.metadata, importlib.util, os, inspect, typing as t
from .codegen import _make_method
from ._constants import ENV_VARS_TRUE_VALUES as ENV_VARS_TRUE_VALUES
def _has_package(package: str) -> bool:
_package_available = importlib.util.find_spec(package) is not None
if _package_available:
try:
importlib.metadata.version(package)
except importlib.metadata.PackageNotFoundError:
_package_available = False
return _package_available | null |
189,316 | import importlib, importlib.metadata, importlib.util, os, inspect, typing as t
from .codegen import _make_method
from ._constants import ENV_VARS_TRUE_VALUES as ENV_VARS_TRUE_VALUES
_autoawq_available = importlib.util.find_spec('awq') is not None
def is_autoawq_available() -> bool:
global _autoawq_available
try:
importlib.metadata.version('autoawq')
except importlib.metadata.PackageNotFoundError:
_autoawq_available = False
return _autoawq_available | null |
189,317 | import importlib, importlib.metadata, importlib.util, os, inspect, typing as t
from .codegen import _make_method
from ._constants import ENV_VARS_TRUE_VALUES as ENV_VARS_TRUE_VALUES
ENV_VARS_TRUE_AND_AUTO_VALUES = ENV_VARS_TRUE_VALUES.union({'AUTO'})
USE_VLLM = os.getenv('USE_VLLM', 'AUTO').upper()
_vllm_available = importlib.util.find_spec('vllm') is not None
def is_vllm_available() -> bool:
global _vllm_available
if USE_VLLM in ENV_VARS_TRUE_AND_AUTO_VALUES or _vllm_available:
try:
importlib.metadata.version('vllm')
except importlib.metadata.PackageNotFoundError:
_vllm_available = False
return _vllm_available | null |
189,318 | import importlib, importlib.metadata, importlib.util, os, inspect, typing as t
from .codegen import _make_method
from ._constants import ENV_VARS_TRUE_VALUES as ENV_VARS_TRUE_VALUES
caller = {
f'is_{k}': _make_method(
f'is_{k}', f'def is_{k}() -> bool:\n global _{k}\n return _{k}\n', f'generated_file_{k}', {f'_{k}': v}
)
for k, v in _availables.items()
}
def __dir__() -> list[str]:
return [*list(caller.keys()), 'is_autoawq_available', 'is_vllm_available', 'USE_VLLM', 'ENV_VARS_TRUE_VALUES'] | null |
189,319 | import importlib, importlib.metadata, importlib.util, os, inspect, typing as t
from .codegen import _make_method
from ._constants import ENV_VARS_TRUE_VALUES as ENV_VARS_TRUE_VALUES
caller = {
f'is_{k}': _make_method(
f'is_{k}', f'def is_{k}() -> bool:\n global _{k}\n return _{k}\n', f'generated_file_{k}', {f'_{k}': v}
)
for k, v in _availables.items()
}
def __getattr__(it: t.Any) -> t.Any:
if it in caller:
return caller[it]
raise AttributeError(f'module {__name__!r} has no attribute {it!r}') | null |
189,320 | import sys, os
ENV_VARS_TRUE_VALUES = {'1', 'ON', 'YES', 'TRUE'}
def check_bool_env(env: str, default: bool = True):
v = os.getenv(env, default=str(default)).upper()
if v.isdigit():
return bool(int(v)) # special check for digits
return v in ENV_VARS_TRUE_VALUES | null |
189,321 | from __future__ import annotations
import functools
import inspect
import linecache
import logging
import types
import typing as t
from operator import itemgetter
from ._constants import SHOW_CODEGEN
import orjson
if t.TYPE_CHECKING:
import openllm_core
from openllm_core._typing_compat import AnyCallable, DictStrAny, LiteralString
def has_own_attribute(cls: type[t.Any], attrib_name: t.Any) -> bool:
DictStrAny = Dict[str, Any]
def get_annotations(cls: type[t.Any]) -> DictStrAny:
if has_own_attribute(cls, '__annotations__'):
return cls.__annotations__
return {} | null |
189,322 | from __future__ import annotations
import functools
import inspect
import linecache
import logging
import types
import typing as t
from operator import itemgetter
from ._constants import SHOW_CODEGEN
import orjson
if t.TYPE_CHECKING:
import openllm_core
from openllm_core._typing_compat import AnyCallable, DictStrAny, LiteralString
def is_class_var(annot: str | t.Any) -> bool:
annot = str(annot)
# Annotation can be quoted.
if annot.startswith(("'", '"')) and annot.endswith(("'", '"')):
annot = annot[1:-1]
return annot.startswith(('typing.ClassVar', 't.ClassVar', 'ClassVar', 'typing_extensions.ClassVar')) | null |
189,323 | from __future__ import annotations
import functools
import inspect
import linecache
import logging
import types
import typing as t
from operator import itemgetter
from ._constants import SHOW_CODEGEN
import orjson
if t.TYPE_CHECKING:
import openllm_core
from openllm_core._typing_compat import AnyCallable, DictStrAny, LiteralString
_T = t.TypeVar('_T', bound=t.Callable[..., t.Any])
def add_method_dunders(cls: type[t.Any], method_or_cls: _T, _overwrite_doc: str | None = None) -> _T:
try:
method_or_cls.__module__ = cls.__module__
except AttributeError:
pass
try:
method_or_cls.__qualname__ = f'{cls.__qualname__}.{method_or_cls.__name__}'
except AttributeError:
pass
try:
method_or_cls.__doc__ = _overwrite_doc or 'Generated by `openllm` for class ' f'{cls.__qualname__}.'
except AttributeError:
pass
return method_or_cls | null |
189,324 | from __future__ import annotations
import functools
import inspect
import linecache
import logging
import types
import typing as t
from operator import itemgetter
from ._constants import SHOW_CODEGEN
import orjson
if t.TYPE_CHECKING:
import openllm_core
from openllm_core._typing_compat import AnyCallable, DictStrAny, LiteralString
def _compile_and_eval(script, globs, locs=None, filename=''):
eval(compile(script, filename, 'exec'), globs, locs)
SHOW_CODEGEN = (
DEBUG and os.environ.get(DEV_DEBUG_VAR, str(0)).isdigit() and int(os.environ.get(DEV_DEBUG_VAR, str(0))) > 3
)
The provided code snippet includes necessary dependencies for implementing the `make_attr_tuple_class` function. Write a Python function `def make_attr_tuple_class(cls_name: str, attr_names: t.Sequence[str]) -> type[t.Any]` to solve the following problem:
Create a tuple subclass to hold class attributes. The subclass is a bare tuple with properties for names. class MyClassAttributes(tuple): __slots__ = () x = property(itemgetter(0))
Here is the function:
def make_attr_tuple_class(cls_name: str, attr_names: t.Sequence[str]) -> type[t.Any]:
"""Create a tuple subclass to hold class attributes.
The subclass is a bare tuple with properties for names.
class MyClassAttributes(tuple):
__slots__ = ()
x = property(itemgetter(0))
"""
attr_class_name = f'{cls_name}Attributes'
attr_class_template = [f'class {attr_class_name}(tuple):', ' __slots__ = ()']
if attr_names:
for i, attr_name in enumerate(attr_names):
attr_class_template.append(f' {attr_name} = _attrs_property(_attrs_itemgetter({i}))')
attr_class_template.append(f'setattr({attr_class_name}, "__dir__", lambda self: {attr_names!r})')
attr_class_template.append(f'setattr({attr_class_name}, "keys", lambda self: {attr_names!r})')
else:
attr_class_template.append(' pass')
globs = {'_attrs_itemgetter': itemgetter, '_attrs_property': property}
if SHOW_CODEGEN:
print(f'Generated class for {attr_class_name}:\n\n', '\n'.join(attr_class_template))
_compile_and_eval('\n'.join(attr_class_template), globs)
return globs[attr_class_name] | Create a tuple subclass to hold class attributes. The subclass is a bare tuple with properties for names. class MyClassAttributes(tuple): __slots__ = () x = property(itemgetter(0)) |
189,325 | from __future__ import annotations
import functools
import inspect
import linecache
import logging
import types
import typing as t
from operator import itemgetter
from ._constants import SHOW_CODEGEN
import orjson
if t.TYPE_CHECKING:
import openllm_core
from openllm_core._typing_compat import AnyCallable, DictStrAny, LiteralString
def generate_function(
typ: type[t.Any],
func_name: str, #
lines: list[str] | None,
args: tuple[str, ...] | None,
globs: dict[str, t.Any],
annotations: dict[str, t.Any] | None = None,
) -> AnyCallable:
script = 'def %s(%s):\n %s\n' % (
func_name,
', '.join(args) if args is not None else '',
'\n '.join(lines) if lines else 'pass',
)
meth = _make_method(func_name, script, generate_unique_filename(typ, func_name), globs)
if annotations:
meth.__annotations__ = annotations
if SHOW_CODEGEN:
print(f'Generated script for {typ}:\n\n', script)
return meth
AnyCallable = Callable[..., Any]
DictStrAny = Dict[str, Any]
def field_env_key(key, suffix=None):
return '_'.join(filter(None, map(str.upper, ['OPENLLM', suffix.strip('_') if suffix else '', key])))
def make_env_transformer(
cls: type[openllm_core.LLMConfig],
model_name: str, #
suffix: LiteralString | None = None,
default_callback: t.Callable[[str, t.Any], t.Any] | None = None,
globs: DictStrAny | None = None,
) -> AnyCallable:
from openllm_core.utils import dantic, field_env_key
def identity(_: str, x_value: t.Any) -> t.Any:
return x_value
globs = {} if globs is None else globs
globs.update({
'__populate_env': dantic.env_converter,
'__field_env': field_env_key, #
'__suffix': suffix or '',
'__model_name': model_name, #
'__default_callback': identity if default_callback is None else default_callback,
})
fields_ann = 'list[attr.Attribute[t.Any]]'
return generate_function(
cls,
'__auto_env', #
[
'__env=lambda field_name:__field_env(field_name,__suffix)',
"return [f.evolve(default=__populate_env(__default_callback(f.name,f.default),__env(f.name)),metadata={'env':f.metadata.get('env',__env(f.name)),'description':f.metadata.get('description', '(not provided)')}) for f in fields]",
],
('_', 'fields'),
globs,
{'_': 'type[LLMConfig]', 'fields': fields_ann, 'return': fields_ann}, #
) | null |
189,326 | from __future__ import annotations
import functools
import inspect
import linecache
import logging
import types
import typing as t
from operator import itemgetter
from ._constants import SHOW_CODEGEN
import orjson
if t.TYPE_CHECKING:
import openllm_core
from openllm_core._typing_compat import AnyCallable, DictStrAny, LiteralString
_T = t.TypeVar('_T', bound=t.Callable[..., t.Any])
class ReprMixin:
def __repr_keys__(self) -> set[str]:
raise NotImplementedError
def __repr__(self) -> str:
return f'{self.__class__.__name__} {orjson.dumps({k: utils.converter.unstructure(v) if attr.has(v) else v for k, v in self.__repr_args__()}, option=orjson.OPT_INDENT_2).decode()}'
def __str__(self) -> str:
return self.__repr_str__(' ')
def __repr_name__(self) -> str:
return self.__class__.__name__
def __repr_str__(self, join_str: str) -> str:
return join_str.join(repr(v) if a is None else f'{a}={v!r}' for a, v in self.__repr_args__())
def __repr_args__(self) -> ReprArgs:
return ((k, getattr(self, k)) for k in self.__repr_keys__)
def gen_sdk(func: _T, name: str | None = None, **attrs: t.Any) -> _T:
from .representation import ReprMixin
if name is None:
name = func.__name__.strip('_')
_signatures = inspect.signature(func).parameters
def _repr(self: ReprMixin) -> str:
return f'<generated function {name} {orjson.dumps(dict(self.__repr_args__()), option=orjson.OPT_NON_STR_KEYS | orjson.OPT_INDENT_2).decode()}>'
def _repr_args(self: ReprMixin) -> t.Iterator[t.Tuple[str, t.Any]]:
return ((k, _signatures[k].annotation) for k in self.__repr_keys__)
return functools.update_wrapper(
types.new_class(
name,
(functools.partial, ReprMixin),
exec_body=lambda ns: ns.update({
'__repr_keys__': property(lambda _: [i for i in _signatures.keys() if not i.startswith('_')]),
'__repr_args__': _repr_args,
'__repr__': _repr, #
'__doc__': inspect.cleandoc(f'Generated SDK for {func.__name__}' if func.__doc__ is None else func.__doc__),
'__module__': 'openllm',
}),
)(func, **attrs),
func,
) | null |
189,332 | from __future__ import annotations
import typing as t
from datetime import datetime
import attr
from cattr import Converter
from cattr.gen import make_dict_structure_fn, make_dict_unstructure_fn
def datetime_structure_hook(dt_like: str | datetime | t.Any, _: t.Any) -> datetime:
if isinstance(dt_like, str):
return datetime.fromisoformat(dt_like)
elif isinstance(dt_like, datetime):
return dt_like
else:
raise Exception(f"Unable to parse datetime from '{dt_like}'") | null |
189,333 | from __future__ import annotations
import functools, importlib, os, sys, typing as t
from enum import Enum
import attr, click, inflection, orjson, click_option_group as cog
from click import ParamType, shell_completion as sc, types as click_types
from .._typing_compat import overload, Unpack
__all__ = [
'CUDA',
'FC',
'BytesType',
'EnumChoice',
'Field',
'JsonType',
'LiteralChoice',
'ModuleType',
'allows_multiple',
'attach_pydantic_model',
'attrs_to_options',
'is_container',
'is_literal',
'is_mapping',
'is_typing',
'parse_container_args',
'parse_single_arg',
'parse_type',
]
def __dir__() -> list[str]:
return sorted(__all__) | null |
189,334 | from __future__ import annotations
import functools, importlib, os, sys, typing as t
from enum import Enum
import attr, click, inflection, orjson, click_option_group as cog
from click import ParamType, shell_completion as sc, types as click_types
from .._typing_compat import overload, Unpack
if t.TYPE_CHECKING:
from attr import _ValidatorType
from pydantic import ConfigDict
T = t.TypeVar('T')
def attach_pydantic_model(klass: t.Type[T], /, **config: Unpack[ConfigDict]) -> t.Type[T]: ... | null |
189,335 | from __future__ import annotations
import functools, importlib, os, sys, typing as t
from enum import Enum
import attr, click, inflection, orjson, click_option_group as cog
from click import ParamType, shell_completion as sc, types as click_types
from .._typing_compat import overload, Unpack
if t.TYPE_CHECKING:
from attr import _ValidatorType
from pydantic import ConfigDict
T = t.TypeVar('T')
def attach_pydantic_model(**config: Unpack[ConfigDict]) -> t.Callable[[t.Type[T]], t.Type[T]]: ... | null |
189,336 | from __future__ import annotations
import functools, importlib, os, sys, typing as t
from enum import Enum
import attr, click, inflection, orjson, click_option_group as cog
from click import ParamType, shell_completion as sc, types as click_types
from .._typing_compat import overload, Unpack
if t.TYPE_CHECKING:
from attr import _ValidatorType
from pydantic import ConfigDict
T = t.TypeVar('T')
def _error_callable(field: str):
def resolve_attrib_types(typ_: t.Any) -> t.Any:
def Field(
default: t.Any = None,
*,
ge: int | float | None = None,
le: int | float | None = None,
validator: _ValidatorType[t.Any] | None = None,
description: str | None = None,
env: str | None = None,
auto_default: bool = False,
use_default_converter: bool = True,
**attrs: t.Any,
) -> t.Any:
def pkg_version_info(pkg_name: str | ModuleType) -> tuple[int, int, int]:
def attach_pydantic_model(
klass: t.Optional[t.Type[T]] = None, /, **config: Unpack[ConfigDict]
) -> t.Type[T] | t.Callable[[t.Type[T]], t.Type[T]]:
# attach a cls.pydantic_model() -> pydantic.BaseModel compatible components.
def _decorator(_cls: t.Type[T]) -> t.Type[T]:
if not attr.has((_cls := attr.resolve_types(_cls))):
raise TypeError('this decorator should only be used with attrs-compatible classes')
from .pkg import pkg_version_info
if (ver := pkg_version_info('pydantic')) < (2,):
raise ImportError(f'Requires pydantic>=2.0, but found {".".join(map(str, ver))} instead.')
try:
from _bentoml_sdk.io_models import IODescriptor
except ImportError:
raise ImportError('Requires bentoml>=1.2 to be installed. Do "pip install -U "bentoml>=1.2""') from None
import pydantic
field_dict = {}
for key, attrib in attr.fields_dict(_cls).items():
attrib_type = resolve_attrib_types(attrib.type)
if attrib.default is attr.NOTHING:
field_dict[key] = (attrib_type, pydantic.Field(default_factory=_error_callable(key)))
elif isinstance(attrib.default, attr.Factory):
field_dict[key] = (attrib_type, pydantic.Field(default_factory=attrib.default.factory))
else:
field_dict[key] = (attrib_type, pydantic.Field(default=attrib.default))
def create_dantic_class(cls):
_klass = pydantic.create_model(
cls.__name__ + 'Pydantic',
__base__=(
IODescriptor,
pydantic.create_model(cls.__name__ + 'BaseModel', metadata_config=config, __module__=cls.__module__),
),
__module__=cls.__module__,
**field_dict,
)
_klass.media_type = 'application/json'
return _klass
setattr(_cls, 'pydantic_model', classmethod(create_dantic_class))
setattr(_cls, '__openllm_attach_pydantic_model__', True)
return _cls
return _decorator if klass is None else _decorator(klass) | null |
189,337 | from __future__ import annotations
import functools, importlib, os, sys, typing as t
from enum import Enum
import attr, click, inflection, orjson, click_option_group as cog
from click import ParamType, shell_completion as sc, types as click_types
from .._typing_compat import overload, Unpack
if t.TYPE_CHECKING:
from attr import _ValidatorType
from pydantic import ConfigDict
FC = t.TypeVar('FC', bound=t.Union[AnyCallable, click.Command])
def parse_type(field_type: t.Any) -> ParamType | tuple[ParamType, ...]:
"""Transforms the pydantic field's type into a click-compatible type.
Args:
field_type: pydantic field type
Returns:
ParamType: click type equivalent
"""
from . import lenient_issubclass
if t.get_origin(field_type) is t.Union:
raise NotImplementedError('Unions are not supported')
# enumeration strings or other Enum derivatives
if lenient_issubclass(field_type, Enum):
return EnumChoice(enum=field_type, case_sensitive=True)
# literals are enum-like with way less functionality
if is_literal(field_type):
return LiteralChoice(value=field_type, case_sensitive=True)
# modules, classes, functions
if is_typing(field_type):
return ModuleType()
# entire dictionaries:
# using a Dict, convert in advance
if is_mapping(field_type):
return JsonType()
# list, List[p], Tuple[p], Set[p] and so on
if is_container(field_type):
return parse_container_args(field_type)
# bytes are not natively supported by click
if lenient_issubclass(field_type, bytes):
return BytesType()
# return the current type: it should be a primitive
return field_type
def allows_multiple(field_type: type[t.Any]) -> bool:
"""Checks whether the current type allows for multiple arguments to be provided as input or not.
For containers, it exploits click's support for lists and such to use the same option multiple times
to create a complex object: `python run.py --subsets train --subsets test`
# becomes `subsets: ["train", "test"]`.
Args:
field_type: pydantic type.
Returns:
bool: true if it's a composite field (lists, containers and so on), false otherwise
"""
# Early out for mappings, since it's better to deal with them using strings.
if is_mapping(field_type):
return False
# Activate multiple option for (simple) container types
if is_container(field_type):
args = parse_container_args(field_type)
# A non-composite type has a single argument, such as 'List[int]'
# A composite type has a tuple of arguments, like 'Tuple[str, int, int]'.
# For the moment, only non-composite types are allowed.
return not isinstance(args, tuple)
return False
def attrs_to_options(
name: str,
field: attr.Attribute[t.Any],
model_name: str,
typ: t.Any = None,
suffix_generation: bool = False,
suffix_sampling: bool = False,
) -> t.Callable[[FC], FC]:
# TODO: support parsing nested attrs class and Union
envvar = field.metadata['env']
dasherized = inflection.dasherize(name)
underscored = inflection.underscore(name)
if typ in (None, attr.NOTHING):
typ = field.type
if typ is None:
raise RuntimeError(f'Failed to parse type for {name}')
full_option_name = f'--{dasherized}'
if field.type is bool:
full_option_name += f'/--no-{dasherized}'
if suffix_generation:
identifier = f'{model_name}_generation_{underscored}'
elif suffix_sampling:
identifier = f'{model_name}_sampling_{underscored}'
else:
identifier = f'{model_name}_{underscored}'
return cog.optgroup.option(
identifier,
full_option_name,
type=parse_type(typ),
required=field.default is attr.NOTHING,
default=field.default if field.default not in (attr.NOTHING, None) else None,
show_default=True,
multiple=allows_multiple(typ) if typ else False,
help=field.metadata.get('description', '(No description provided)'),
show_envvar=True,
envvar=envvar,
) | null |
189,338 | from __future__ import annotations
import functools, importlib, os, sys, typing as t
from enum import Enum
import attr, click, inflection, orjson, click_option_group as cog
from click import ParamType, shell_completion as sc, types as click_types
from .._typing_compat import overload, Unpack
def _get_argv_encoding() -> str:
import locale
return locale.getpreferredencoding() | null |
189,339 | from __future__ import annotations
import functools, importlib, os, sys, typing as t
from enum import Enum
import attr, click, inflection, orjson, click_option_group as cog
from click import ParamType, shell_completion as sc, types as click_types
from .._typing_compat import overload, Unpack
if sys.platform.startswith('win') and WIN:
else:
def _get_argv_encoding() -> str:
return getattr(sys.stdin, 'encoding', None) or sys.getfilesystemencoding() | null |
189,340 | from __future__ import annotations
import contextlib
import functools
import importlib.metadata
import logging
import os
import re
import typing as t
import attr
import openllm_core
from openllm_core._typing_compat import ParamSpec
P = ParamSpec('P')
T = t.TypeVar('T')
logger = logging.getLogger(__name__)
def _usage_event_debugging() -> bool:
return os.environ.get('__BENTOML_DEBUG_USAGE', str(False)).lower() == 'true'
def silent(func: t.Callable[P, T]) -> t.Callable[P, T]:
@functools.wraps(func)
def wrapper(*args: P.args, **kwargs: P.kwargs) -> t.Any:
try:
return func(*args, **kwargs)
except Exception as err:
if _usage_event_debugging():
if openllm_core.utils.get_debug_mode():
logger.error('Tracking Error: %s', err, stack_info=True, stacklevel=3)
else:
logger.info('Tracking Error: %s', err)
else:
logger.debug('Tracking Error: %s', err)
return wrapper | null |
189,341 | from __future__ import annotations
import contextlib
import functools
import importlib.metadata
import logging
import os
import re
import typing as t
import attr
import openllm_core
from openllm_core._typing_compat import ParamSpec
def do_not_track() -> bool:
return openllm_core.utils.check_bool_env(OPENLLM_DO_NOT_TRACK)
def set_bentoml_tracking() -> t.Generator[None, None, None]:
from bentoml._internal.utils import analytics
original_value = os.environ.pop(analytics.BENTOML_DO_NOT_TRACK, str(False))
try:
os.environ[analytics.BENTOML_DO_NOT_TRACK] = str(do_not_track())
yield
finally:
os.environ[analytics.BENTOML_DO_NOT_TRACK] = original_value | null |
189,342 | from __future__ import annotations
import contextlib
import functools
import importlib.metadata
import logging
import os
import re
import typing as t
import attr
import openllm_core
from openllm_core._typing_compat import ParamSpec
def do_not_track() -> bool:
return openllm_core.utils.check_bool_env(OPENLLM_DO_NOT_TRACK)
def track(event_properties: attr.AttrsInstance) -> None:
from bentoml._internal.utils import analytics
if do_not_track():
return
analytics.track(t.cast('analytics.schemas.EventMeta', event_properties))
class StartInitEvent(EventMeta):
model_name: str
llm_config: t.Dict[str, t.Any] = attr.field(default=None)
def handler(llm_config: openllm_core.LLMConfig) -> StartInitEvent:
return StartInitEvent(model_name=llm_config['model_name'], llm_config=llm_config.model_dump())
def track_start_init(llm_config: openllm_core.LLMConfig) -> None:
if do_not_track():
return
track(StartInitEvent.handler(llm_config)) | null |
189,343 | from __future__ import annotations
import enum
import typing as t
import attr
import inflection
from deepmerge import Merger
from . import dantic
from ..exceptions import ForbiddenAttributeError
class PeftType(str, enum.Enum, metaclass=_PeftEnumMeta):
PROMPT_TUNING = 'PROMPT_TUNING'
MULTITASK_PROMPT_TUNING = 'MULTITASK_PROMPT_TUNING'
P_TUNING = 'P_TUNING'
PREFIX_TUNING = 'PREFIX_TUNING'
LORA = 'LORA'
ADALORA = 'ADALORA'
ADAPTION_PROMPT = 'ADAPTION_PROMPT'
IA3 = 'IA3'
LOHA = 'LOHA'
LOKR = 'LOKR'
def _missing_(cls, value: object) -> enum.Enum | None:
if isinstance(value, str):
normalized = inflection.underscore(value).upper()
if normalized in cls._member_map_:
return cls._member_map_[normalized]
return None
def supported(cls) -> set[str]:
return {inflection.underscore(v.value) for v in cls}
def get(__key: str | t.Any, /) -> PeftType:
return PeftType[__key] # type-safe getitem.
AdapterType = Literal[
'lora', 'adalora', 'adaption_prompt', 'prefix_tuning', 'p_tuning', 'prompt_tuning', 'ia3', 'loha', 'lokr'
]
def _adapter_converter(value: AdapterType | str | PeftType | None) -> PeftType:
if value is None:
raise ValueError("'AdapterType' cannot be None.")
if isinstance(value, PeftType):
return value
if value not in PeftType.supported():
raise ValueError(f"Given '{value}' is not a supported adapter type.")
return PeftType.get(value) | null |
189,344 | from __future__ import annotations
import importlib.metadata, logging, os, pathlib
import bentoml, orjson, openllm_core
from simple_di import Provide, inject
from bentoml._internal.bento.build_config import BentoBuildConfig, DockerOptions, ModelSpec, PythonOptions
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_core.utils import SHOW_CODEGEN, check_bool_env, pkg
logger = logging.getLogger(__name__)
_service_file = pathlib.Path(os.path.abspath(__file__)).parent.parent / '_service.py'
_SERVICE_VARS = '''import orjson;model_id,model_tag,adapter_map,serialization,trust_remote_code,max_model_len,gpu_memory_utilization,services_config='{__model_id__}','{__model_tag__}',orjson.loads("""{__model_adapter_map__}"""),'{__model_serialization__}',{__model_trust_remote_code__},{__max_model_len__},{__gpu_memory_utilization__},orjson.loads("""{__services_config__}""")'''
def construct_python_options(llm, llm_fs, extra_dependencies=None, adapter_map=None):
from . import RefResolver
packages = [
'scipy',
'bentoml[tracing]>=1.2',
f'openllm[vllm]>={RefResolver.from_strategy("release").version}',
] # apparently bnb misses this one
if adapter_map is not None:
packages += ['openllm[fine-tune]']
if extra_dependencies is not None:
packages += [f'openllm[{k}]' for k in extra_dependencies]
if llm.config['requirements'] is not None:
packages.extend(llm.config['requirements'])
built_wheels = [build_editable(llm_fs.getsyspath('/'), p) for p in ('openllm_core', 'openllm_client', 'openllm')]
return PythonOptions(
packages=packages,
wheels=[llm_fs.getsyspath(f"/{i.split('/')[-1]}") for i in built_wheels] if all(i for i in built_wheels) else None,
lock_packages=False,
)
def construct_docker_options(llm, _, quantize, adapter_map, dockerfile_template, serialisation):
from openllm_cli.entrypoint import process_environ
environ = process_environ(
llm.config,
llm.config['timeout'],
1.0,
None,
True,
llm.model_id,
None,
llm._serialisation,
llm,
use_current_env=False,
)
# XXX: We need to quote this so that the envvar in container recognize as valid json
environ['OPENLLM_CONFIG'] = f"'{environ['OPENLLM_CONFIG']}'"
environ.pop('BENTOML_HOME', None) # NOTE: irrelevant in container
environ['NVIDIA_DRIVER_CAPABILITIES'] = 'compute,utility'
return DockerOptions(python_version='3.11', env=environ, dockerfile_template=dockerfile_template)
def create_bento(
bento_tag,
llm_fs,
llm, #
quantize,
dockerfile_template, #
adapter_map=None,
extra_dependencies=None,
serialisation=None, #
_bento_store=Provide[BentoMLContainer.bento_store],
_model_store=Provide[BentoMLContainer.model_store],
):
_serialisation = openllm_core.utils.first_not_none(serialisation, default=llm.config['serialisation'])
labels = dict(llm.identifying_params)
labels.update({
'_type': llm.llm_type,
'_framework': llm.__llm_backend__,
'start_name': llm.config['start_name'],
'base_name_or_path': llm.model_id,
'bundler': 'openllm.bundle',
**{
f'{package.replace("-", "_")}_version': importlib.metadata.version(package)
for package in {'openllm', 'openllm-core', 'openllm-client'}
},
})
if adapter_map:
labels.update(adapter_map)
logger.debug("Building Bento '%s' with model backend '%s'", bento_tag, llm.__llm_backend__)
logger.debug('Generating service vars %s (dir=%s)', llm.model_id, llm_fs.getsyspath('/'))
script = f"# fmt: off\n# GENERATED BY 'openllm build {llm.model_id}'. DO NOT EDIT\n" + _SERVICE_VARS.format(
__model_id__=llm.model_id,
__model_tag__=str(llm.tag), #
__model_adapter_map__=orjson.dumps(adapter_map).decode(),
__model_serialization__=llm.config['serialisation'], #
__model_trust_remote_code__=str(llm.trust_remote_code),
__max_model_len__=llm._max_model_len,
__gpu_memory_utilization__=llm._gpu_memory_utilization, #
)
if SHOW_CODEGEN:
logger.info('Generated _service_vars.py:\n%s', script)
llm_fs.writetext('_service_vars.py', script)
with open(_service_file.__fspath__(), 'r') as f:
service_src = f.read()
llm_fs.writetext(llm.config['service_name'], service_src)
return bentoml.Bento.create(
version=bento_tag.version,
build_ctx=llm_fs.getsyspath('/'),
build_config=BentoBuildConfig(
service=f"{llm.config['service_name']}:svc",
name=bento_tag.name,
labels=labels,
models=[ModelSpec.from_item({'tag': str(llm.tag), 'alias': llm.tag.name})],
description=f"OpenLLM service for {llm.config['start_name']}",
include=list(llm_fs.walk.files()),
exclude=['/venv', '/.venv', '__pycache__/', '*.py[cod]', '*$py.class'],
python=construct_python_options(llm, llm_fs, extra_dependencies, adapter_map),
docker=construct_docker_options(llm, llm_fs, quantize, adapter_map, dockerfile_template, _serialisation),
),
).save(bento_store=_bento_store, model_store=_model_store) | null |
189,346 | import functools, importlib.metadata, openllm_core
def generate_labels(serialisation):
return {
'framework': 'openllm',
'serialisation': serialisation,
**{package: importlib.metadata.version(package) for package in {'openllm', 'openllm-core', 'openllm-client'}},
} | null |
189,347 | import functools, importlib.metadata, openllm_core
def available_devices():
from ._strategies import NvidiaGpuResource
return tuple(NvidiaGpuResource.from_system())
def device_count() -> int:
return len(available_devices()) | null |
189,348 | import functools, importlib.metadata, openllm_core
__all__ = ['available_devices', 'device_count', 'generate_labels']
def __dir__():
coreutils = set(dir(openllm_core.utils)) | set([it for it in openllm_core.utils._extras if not it.startswith('_')])
return sorted(__all__) + sorted(list(coreutils)) | null |
189,349 | import functools, importlib.metadata, openllm_core
def __getattr__(it):
if hasattr(openllm_core.utils, it):
return getattr(openllm_core.utils, it)
raise AttributeError(f'module {__name__} has no attribute {it}') | null |
189,350 |
def __dir__():
import openllm_client as _client
return sorted(dir(_client)) | null |
189,351 |
def __getattr__(it):
import openllm_client as _client
return getattr(_client, it) | null |
189,352 | from __future__ import annotations
import gc, types, typing as t
import torch, bentoml, openllm
from openllm_core._schemas import CompletionChunk, GenerationOutput, SampleLogprobs
from openllm_core.utils import ReprMixin, is_vllm_available
_registry = {}
def registry(cls=None, *, alias=None):
def decorator(_cls):
_registry[_cls.__name__[:-8].lower() if alias is None else alias] = _cls
return _cls
if cls is None:
return decorator
return decorator(cls) | null |
189,353 | from __future__ import annotations
import gc, types, typing as t
import torch, bentoml, openllm
from openllm_core._schemas import CompletionChunk, GenerationOutput, SampleLogprobs
from openllm_core.utils import ReprMixin, is_vllm_available
_registry = {}
M = TypeVar('M')
T = TypeVar('T')
def runner(llm: openllm.LLM[M, T]) -> Runner[M, T]:
try:
assert llm.bentomodel
except (bentoml.exceptions.NotFound, AssertionError) as err:
raise RuntimeError(f'Failed to locate {llm.bentomodel}: {err}') from err
return types.new_class(
llm.config.__class__.__name__[:-6] + 'Runner',
(bentoml.Runner,), #
exec_body=lambda ns: ns.update({
'llm_type': llm.llm_type,
'identifying_params': llm.identifying_params, #
'llm_tag': llm.tag,
'llm': llm,
'config': llm.config,
'backend': llm.__llm_backend__, #
'__module__': llm.__module__,
'__repr__': ReprMixin.__repr__, #
'__doc__': llm.config.__class__.__doc__ or f'Generated Runner class for {llm.config["model_name"]}',
'__repr_keys__': property(lambda _: {'config', 'llm_type', 'runner_methods', 'backend', 'llm_tag'}),
'__repr_args__': lambda _: (
(
'runner_methods',
{
method.name: {
'batchable': method.config.batchable,
'batch_dim': method.config.batch_dim if method.config.batchable else None,
}
for method in _.runner_methods
},
),
('config', llm.config.model_dump()),
('llm_type', llm.llm_type),
('backend', llm.__llm_backend__),
('llm_tag', llm.tag),
),
'has_adapters': llm.has_adapters,
'template': llm.config.template,
'system_message': llm.config.system_message,
}),
)(
_registry[llm.__llm_backend__],
name=f"llm-{llm.config['start_name']}-runner",
models=[llm.bentomodel],
scheduling_strategy=openllm.CascadingResourceStrategy,
runnable_init_params={'llm': llm},
) | null |
189,354 | from __future__ import annotations
from openllm_core.exceptions import MissingDependencyError
from openllm_core.utils import is_autoawq_available, is_autogptq_available, is_bitsandbytes_available
class MissingDependencyError(BaseException):
"""Raised when a dependency is missing."""
def infer_quantisation_config(llm, quantise, **attrs):
import torch, transformers
# 8 bit configuration
int8_threshold = attrs.pop('llm_int8_threshhold', 6.0)
int8_enable_fp32_cpu_offload = attrs.pop('llm_int8_enable_fp32_cpu_offload', False)
int8_skip_modules: list[str] | None = attrs.pop('llm_int8_skip_modules', None)
int8_has_fp16_weight = attrs.pop('llm_int8_has_fp16_weight', False)
# shared arguments for gptq and awq
bits = attrs.pop('bits', 4)
group_size = attrs.pop('group_size', 128)
# 4 bit configuration
int4_compute_dtype = attrs.pop('bnb_4bit_compute_dtype', torch.bfloat16)
int4_quant_type = attrs.pop('bnb_4bit_quant_type', 'nf4')
int4_use_double_quant = attrs.pop('bnb_4bit_use_double_quant', True)
def create_awq_config():
zero_point = attrs.pop('zero_point', True)
return transformers.AwqConfig(bits=bits, group_size=group_size, zero_point=zero_point)
def create_gptq_config():
gptq_tokenizer = attrs.pop('tokenizer', llm.model_id)
gptq_dataset = attrs.pop('dataset', 'c4')
gptq_damp_percent = attrs.pop('damp_percent', 0.1)
gptq_desc_act = attrs.pop('desc_act', False)
gptq_sym = attrs.pop('sym', True)
gptq_true_sequential = attrs.pop('true_sequential', True)
gptq_use_cuda_fp16 = attrs.pop('use_cuda_fp16', True if torch.cuda.is_available() else False)
gptq_model_seqlen = attrs.pop('model_seqlen', None)
gptq_block_name_to_quantize = attrs.pop('block_name_to_quantize', None)
gptq_module_name_preceding_first_block = attrs.pop('module_name_preceding_first_block', None)
gptq_batch_size = attrs.pop('batch_size', 1)
gptq_pad_token_id = attrs.pop('pad_token_id', None)
disable_exllama = attrs.pop('disable_exllama', False) # backward compatibility
gptq_use_exllama = attrs.pop('use_exllama', True)
if disable_exllama:
gptq_use_exllama = False
return transformers.GPTQConfig(
bits=bits,
tokenizer=gptq_tokenizer,
dataset=gptq_dataset,
group_size=group_size,
damp_percent=gptq_damp_percent,
desc_act=gptq_desc_act,
sym=gptq_sym,
true_sequential=gptq_true_sequential,
use_cuda_fp16=gptq_use_cuda_fp16,
model_seqlen=gptq_model_seqlen,
block_name_to_quantize=gptq_block_name_to_quantize,
module_name_preceding_first_block=gptq_module_name_preceding_first_block,
batch_size=gptq_batch_size,
pad_token_id=gptq_pad_token_id,
use_exllama=gptq_use_exllama,
exllama_config={'version': 1},
) # XXX: See how to migrate to v2
def create_int8_config(int8_skip_modules):
# if int8_skip_modules is None: int8_skip_modules = []
# if 'lm_head' not in int8_skip_modules and self.config_class.__openllm_model_type__ == 'causal_lm':
# int8_skip_modules.append('lm_head')
return transformers.BitsAndBytesConfig(
load_in_8bit=True,
llm_int8_enable_fp32_cpu_offload=int8_enable_fp32_cpu_offload,
llm_int8_threshhold=int8_threshold,
llm_int8_skip_modules=int8_skip_modules,
llm_int8_has_fp16_weight=int8_has_fp16_weight,
)
def create_int4_config():
return transformers.BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_compute_dtype=int4_compute_dtype,
bnb_4bit_quant_type=int4_quant_type,
bnb_4bit_use_double_quant=int4_use_double_quant,
)
# NOTE: Quantization setup quantize is a openllm.LLM feature, where we can quantize the model with bitsandbytes or quantization aware training.
if not is_bitsandbytes_available():
raise RuntimeError(
'Quantization requires bitsandbytes to be installed. Make sure to install OpenLLM with \'pip install "openllm[fine-tune]"\''
)
if quantise == 'int8':
quantisation_config = create_int8_config(int8_skip_modules)
elif quantise == 'int4':
quantisation_config = create_int4_config()
elif quantise == 'gptq':
if not is_autogptq_available():
raise MissingDependencyError(
"GPTQ requires 'auto-gptq' and 'optimum>=0.12' to be installed. Do it with 'pip install \"openllm[gptq]\"'"
)
else:
quantisation_config = create_gptq_config()
elif quantise == 'awq':
if not is_autoawq_available():
raise MissingDependencyError(
"AWQ requires 'auto-awq' to be installed. Do it with 'pip install \"openllm[awq]\"'."
)
else:
quantisation_config = create_awq_config()
else:
raise ValueError(f"'quantize' must be one of ['int8', 'int4', 'gptq', 'awq'], got {quantise} instead.")
return quantisation_config, attrs | null |
189,355 | from __future__ import annotations
import inspect, logging, math, os, sys, types, warnings, typing as t
import psutil, bentoml, openllm_core.utils as coreutils
from bentoml._internal.resource import get_resource, system_resources
from bentoml._internal.runner.strategy import THREAD_ENVS
def _strtoul(s: str) -> int:
def _parse_list_with_prefix(lst: str, prefix: str) -> list[str]:
def _parse_cuda_visible_devices(default_var: str | None = None, respect_env: bool = True) -> list[str] | None:
if respect_env:
spec = os.environ.get('CUDA_VISIBLE_DEVICES', default_var)
if not spec:
return None
else:
if default_var is None:
raise ValueError('spec is required to be not None when parsing spec.')
spec = default_var
if spec.startswith('GPU-'):
return _parse_list_with_prefix(spec, 'GPU-')
if spec.startswith('MIG-'):
return _parse_list_with_prefix(spec, 'MIG-')
# XXX: We need to somehow handle cases such as '100m'
# CUDA_VISIBLE_DEVICES uses something like strtoul
# which makes `1gpu2,2ampere` is equivalent to `1,2`
rc: list[int] = []
for el in spec.split(','):
x = _strtoul(el.strip())
# Repeated ordinal results in empty set
if x in rc:
return []
# Negative value aborts the sequence
if x < 0:
break
rc.append(x)
return [str(i) for i in rc] | null |
189,356 | from __future__ import annotations
import inspect, logging, math, os, sys, types, warnings, typing as t
import psutil, bentoml, openllm_core.utils as coreutils
from bentoml._internal.resource import get_resource, system_resources
from bentoml._internal.runner.strategy import THREAD_ENVS
def _raw_device_uuid_nvml() -> list[str] | None:
from ctypes import CDLL, byref, c_int, c_void_p, create_string_buffer
try:
nvml_h = CDLL('libnvidia-ml.so.1')
except Exception:
warnings.warn('Failed to find nvidia binding', stacklevel=3)
return None
rc = nvml_h.nvmlInit()
if rc != 0:
warnings.warn("Can't initialize NVML", stacklevel=3)
return None
dev_count = c_int(-1)
rc = nvml_h.nvmlDeviceGetCount_v2(byref(dev_count))
if rc != 0:
warnings.warn('Failed to get available device from system.', stacklevel=3)
return None
uuids = []
for idx in range(dev_count.value):
dev_id = c_void_p()
rc = nvml_h.nvmlDeviceGetHandleByIndex_v2(idx, byref(dev_id))
if rc != 0:
warnings.warn(f'Failed to get device handle for {idx}', stacklevel=3)
return None
buf_len = 96
buf = create_string_buffer(buf_len)
rc = nvml_h.nvmlDeviceGetUUID(dev_id, buf, buf_len)
if rc != 0:
warnings.warn(f'Failed to get device UUID for {idx}', stacklevel=3)
return None
uuids.append(buf.raw.decode('ascii').strip('\0'))
del nvml_h
return uuids | null |
189,357 | from __future__ import annotations
import inspect, logging, math, os, sys, types, warnings, typing as t
import psutil, bentoml, openllm_core.utils as coreutils
from bentoml._internal.resource import get_resource, system_resources
from bentoml._internal.runner.strategy import THREAD_ENVS
class _ResourceMixin:
def from_system(cls) -> list[str]:
def from_spec(cls, spec) -> list[str]:
def validate(cls, val: list[t.Any]) -> None:
def _make_resource_class(name: str, resource_kind: str, docstring: str) -> type[bentoml.Resource[t.List[str]]]:
return types.new_class(
name,
(bentoml.Resource[t.List[str]], coreutils.ReprMixin),
{'resource_id': resource_kind},
lambda ns: ns.update({
'resource_id': resource_kind,
'from_spec': classmethod(_ResourceMixin.from_spec),
'from_system': classmethod(_ResourceMixin.from_system), #
'validate': classmethod(_ResourceMixin.validate),
'__repr_keys__': property(lambda _: {'resource_id'}), #
'__doc__': inspect.cleandoc(docstring),
'__module__': 'openllm._strategies', #
}),
) | null |
189,358 | from __future__ import annotations
import contextlib, attr, bentoml, openllm, types, logging, typing as t
from simple_di import Provide, inject
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_core._typing_compat import LiteralSerialisation, LiteralQuantise, LiteralBackend
_object_setattr = object.__setattr__
def get_hash(config: transformers.PretrainedConfig) -> str:
_commit_hash = getattr(config, '_commit_hash', None)
if _commit_hash is None:
logger.warning('Cannot find commit hash in %r', config)
return _commit_hash
def patch_correct_tag(llm, config, _revision=None) -> None:
# NOTE: The following won't hit during local since we generated a correct version based on local path hash It will only hit if we use model from HF Hub
if llm.revision is not None:
return
if not llm.local:
try:
if _revision is None:
_revision = get_hash(config)
except ValueError:
pass
if _revision is None and llm.tag.version is not None:
_revision = llm.tag.version
if llm.tag.version is None:
_object_setattr(
llm, '_tag', attr.evolve(llm.tag, version=_revision)
) # HACK: This copies the correct revision into llm.tag
if llm._revision is None:
_object_setattr(llm, '_revision', _revision) # HACK: This copies the correct revision into llm._model_version | null |
189,359 | from __future__ import annotations
import contextlib, attr, bentoml, openllm, types, logging, typing as t
from simple_di import Provide, inject
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_core._typing_compat import LiteralSerialisation, LiteralQuantise, LiteralBackend
def get_hash(config: transformers.PretrainedConfig) -> str:
_commit_hash = getattr(config, '_commit_hash', None)
if _commit_hash is None:
logger.warning('Cannot find commit hash in %r', config)
return _commit_hash
def _create_metadata(llm, config, safe_serialisation, trust_remote_code, metadata=None):
if metadata is None:
metadata = {}
metadata.update({'_framework': llm.__llm_backend__})
if llm.quantise:
metadata['_quantize'] = llm.quantise
architectures = getattr(config, 'architectures', [])
if not architectures:
if trust_remote_code:
auto_map = getattr(config, 'auto_map', {})
if not auto_map:
raise RuntimeError(
f'Failed to determine the architecture from both `auto_map` and `architectures` from {llm.model_id}'
)
autoclass = 'AutoModelForSeq2SeqLM' if llm.config['model_type'] == 'seq2seq_lm' else 'AutoModelForCausalLM'
if autoclass not in auto_map:
raise RuntimeError(
f"Given model '{llm.model_id}' is yet to be supported with 'auto_map'. OpenLLM currently only support encoder-decoders or decoders only models."
)
architectures = [auto_map[autoclass]]
else:
raise RuntimeError(
'Failed to determine the architecture for this model. Make sure the `config.json` is valid and can be loaded with `transformers.AutoConfig`'
)
metadata.update({
'_pretrained_class': architectures[0],
'_revision': get_hash(config) if not llm.local else llm.revision,
'_local': llm.local,
'serialisation': llm._serialisation,
'model_name': llm.config['model_name'],
'architecture': llm.config['architecture'],
'model_id': llm.model_id,
})
return metadata | null |
189,360 | from __future__ import annotations
import contextlib, attr, bentoml, openllm, types, logging, typing as t
from simple_di import Provide, inject
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_core._typing_compat import LiteralSerialisation, LiteralQuantise, LiteralBackend
def get_hash(config: transformers.PretrainedConfig) -> str:
_commit_hash = getattr(config, '_commit_hash', None)
if _commit_hash is None:
logger.warning('Cannot find commit hash in %r', config)
return _commit_hash
class _Model(bentoml.Model):
_imported_modules: t.List[types.ModuleType] = None
def imported_modules(self):
if self._imported_modules is None:
self._imported_modules = []
return self._imported_modules
def imported_modules(self, value):
self._imported_modules = value
def create(cls, tag, *, module, api_version, labels=None, metadata=None):
return super().create(
tag,
module=module,
api_version=api_version,
signatures={},
labels=labels,
metadata=metadata,
context=openllm.utils.generate_context('openllm'),
)
LiteralSerialisation = Literal['safetensors', 'legacy']
LiteralQuantise = Literal['int8', 'int4', 'gptq', 'awq', 'squeezellm']
LiteralBackend = Literal['pt', 'vllm', 'triton', 'ggml']
def save_model(
tag: bentoml.Tag,
config: transformers.PretrainedConfig,
serialisation: LiteralSerialisation, #
trust_remote_code: bool,
module: str,
external_modules: list[types.ModuleType], #
model_id: str,
quantise: LiteralQuantise,
backend: LiteralBackend,
_local: bool,
_dtype: str,
_model_store: ModelStore = Provide[BentoMLContainer.model_store],
_api_version: str = 'v3.0.0', #
) -> bentoml.Model:
imported_modules = []
architectures = getattr(config, 'architectures', [])
_metadata = {
'model_id': model_id,
'backend': backend,
'dtype': _dtype,
'architectures': architectures,
'_revision': get_hash(config) or tag.version,
'_local': _local,
'serialisation': serialisation,
}
if quantise:
_metadata['_quantize'] = quantise
bentomodel = _Model.create(
tag,
module=f'openllm.serialisation.{module}',
api_version=_api_version,
labels=openllm.utils.generate_labels(serialisation),
metadata=_metadata,
)
with openllm.utils.analytics.set_bentoml_tracking():
try:
bentomodel.enter_cloudpickle_context(external_modules, imported_modules)
bentomodel.imported_modules = imported_modules
yield bentomodel
except Exception:
raise
else:
bentomodel.flush()
bentomodel.save(_model_store)
openllm.utils.analytics.track(
openllm.utils.analytics.ModelSaveEvent(
module=bentomodel.info.module, model_size_in_kb=openllm.utils.calc_dir_size(bentomodel.path) / 1024
)
)
finally:
bentomodel.exit_cloudpickle_context(bentomodel.imported_modules)
return bentomodel | null |
189,361 | from __future__ import annotations
import attr, traceback, functools, pathlib, typing as t
from huggingface_hub import HfApi
from openllm_core.exceptions import Error
from openllm_core.utils import resolve_filepath, validate_is_path
def ModelInfo(model_id: str, revision: str | None = None) -> HfModelInfo:
if model_id in __cached_id__:
return __cached_id__[model_id]
try:
__cached_id__[model_id] = Client().model_info(model_id, revision=revision)
return __cached_id__[model_id]
except Exception as err:
traceback.print_exc()
raise Error(f'Failed to fetch {model_id} from huggingface.co') from err
def resolve_filepath(path, ctx=None):
try:
return resolve_user_filepath(path, ctx)
except FileNotFoundError:
return path
def validate_is_path(maybe_path):
return os.path.exists(os.path.dirname(resolve_filepath(maybe_path)))
def has_weights(model_id: str, revision: str | None = None, *, extensions: str) -> bool:
if validate_is_path(model_id):
return next((True for _ in pathlib.Path(resolve_filepath(model_id)).glob(f'*.{extensions}')), False)
return any(s.rfilename.endswith(f'.{extensions}') for s in ModelInfo(model_id, revision=revision).siblings) | null |
189,362 | import copy, logging
import transformers
from openllm.serialisation.constants import HUB_ATTRS
def get_tokenizer(model_id_or_path, trust_remote_code, **attrs):
tokenizer = transformers.AutoTokenizer.from_pretrained(
model_id_or_path, trust_remote_code=trust_remote_code, **attrs
)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
return tokenizer | null |
189,363 | import copy, logging
import transformers
from openllm.serialisation.constants import HUB_ATTRS
HUB_ATTRS = [
'cache_dir',
'code_revision',
'force_download', #
'local_files_only',
'proxies',
'resume_download', #
'revision',
'subfolder',
'use_auth_token', #
]
def process_config(model_id, trust_remote_code, **attrs):
config = attrs.pop('config', None)
# this logic below is synonymous to handling `from_pretrained` attrs.
hub_attrs = {k: attrs.pop(k) for k in HUB_ATTRS if k in attrs}
if not isinstance(config, transformers.PretrainedConfig):
copied_attrs = copy.deepcopy(attrs)
if copied_attrs.get('torch_dtype', None) == 'auto':
copied_attrs.pop('torch_dtype')
config, attrs = transformers.AutoConfig.from_pretrained(
model_id, return_unused_kwargs=True, trust_remote_code=trust_remote_code, **hub_attrs, **copied_attrs
)
return config, hub_attrs, attrs | null |
189,364 | import copy, logging
import transformers
from openllm.serialisation.constants import HUB_ATTRS
logger = logging.getLogger(__name__)
def infer_autoclass_from_llm(llm, config, /):
autoclass = 'AutoModelForSeq2SeqLM' if llm.config['model_type'] == 'seq2seq_lm' else 'AutoModelForCausalLM'
if llm.trust_remote_code:
if not hasattr(config, 'auto_map'):
raise ValueError(
f'Invalid configuration for {llm.model_id}. ``trust_remote_code=True`` requires `transformers.PretrainedConfig` to contain a `auto_map` mapping'
)
# in case this model doesn't use the correct auto class for model type, for example like chatglm
# where it uses AutoModel instead of AutoModelForCausalLM. Then we fallback to AutoModel
if autoclass not in config.auto_map:
logger.warning(
"OpenLLM failed to determine compatible Auto classes to load %s. Falling back to 'AutoModel'.\nTip: Make sure to specify 'AutoModelForCausalLM' or 'AutoModelForSeq2SeqLM' in your 'config.auto_map'. If your model type is yet to be supported, please file an issues on our GitHub tracker.",
llm._model_id,
)
autoclass = 'AutoModel'
return getattr(transformers, autoclass) | null |
189,368 | from __future__ import annotations
import inspect, logging, math, os, sys, types, warnings, typing as t
import psutil, bentoml, openllm_core.utils as coreutils
from bentoml._internal.resource import get_resource, system_resources
from bentoml._internal.runner.strategy import THREAD_ENVS
def _strtoul(s: str) -> int:
# Return -1 or positive integer sequence string starts with.
if not s:
return -1
idx = 0
for idx, c in enumerate(s):
if not (c.isdigit() or (idx == 0 and c in '+-')):
break
if idx + 1 == len(s):
idx += 1
# NOTE: idx will be set via enumerate
return int(s[:idx]) if idx > 0 else -1
def _parse_list_with_prefix(lst: str, prefix: str) -> list[str]:
rcs = []
for elem in lst.split(','):
# Repeated id results in empty set
if elem in rcs:
return []
# Anything other but prefix is ignored
if not elem.startswith(prefix):
break
rcs.append(elem)
return rcs
def _parse_cuda_visible_devices(default_var: str | None = None, respect_env: bool = True) -> list[str] | None:
if respect_env:
spec = os.environ.get('CUDA_VISIBLE_DEVICES', default_var)
if not spec:
return None
else:
if default_var is None:
raise ValueError('spec is required to be not None when parsing spec.')
spec = default_var
if spec.startswith('GPU-'):
return _parse_list_with_prefix(spec, 'GPU-')
if spec.startswith('MIG-'):
return _parse_list_with_prefix(spec, 'MIG-')
# XXX: We need to somehow handle cases such as '100m'
# CUDA_VISIBLE_DEVICES uses something like strtoul
# which makes `1gpu2,2ampere` is equivalent to `1,2`
rc: list[int] = []
for el in spec.split(','):
x = _strtoul(el.strip())
# Repeated ordinal results in empty set
if x in rc:
return []
# Negative value aborts the sequence
if x < 0:
break
rc.append(x)
return [str(i) for i in rc] | null |
189,373 | import functools, logging
from http import HTTPStatus
import orjson
from starlette.applications import Starlette
from starlette.responses import JSONResponse
from starlette.routing import Route
from openllm_core.utils import converter
from ._openapi import add_schema_definitions, append_schemas, get_generator
from ..protocol.hf import AgentRequest, AgentResponse, HFErrorResponse
schemas = get_generator(
'hf',
components=[AgentRequest, AgentResponse, HFErrorResponse],
tags=[
{
'name': 'HF',
'description': 'HF integration, including Agent and others schema endpoints.',
'externalDocs': 'https://huggingface.co/docs/transformers/main_classes/agent',
}
],
)
async def hf_agent(req, llm):
json_str = await req.body()
try:
request = converter.structure(orjson.loads(json_str), AgentRequest)
except orjson.JSONDecodeError as err:
logger.debug('Sent body: %s', json_str)
logger.error('Invalid JSON input received: %s', err)
return error_response(HTTPStatus.BAD_REQUEST, 'Invalid JSON input received (Check server log).')
stop = request.parameters.pop('stop', [])
try:
result = await llm.generate(request.inputs, stop=stop, **request.parameters)
return JSONResponse(
converter.unstructure([AgentResponse(generated_text=result.outputs[0].text)]), status_code=HTTPStatus.OK.value
)
except Exception as err:
logger.error('Error while generating: %s', err)
return error_response(HTTPStatus.INTERNAL_SERVER_ERROR, 'Error while generating (Check server log).')
def append_schemas(svc, generated_schema, tags_order='prepend', inject=True):
# HACK: Dirty hack to append schemas to existing service. We def need to support mounting Starlette app OpenAPI spec.
from bentoml._internal.service.openapi.specification import OpenAPISpecification
if not inject:
return svc
svc_schema = svc.openapi_spec
if isinstance(svc_schema, (OpenAPISpecification, _SimpleSchema)):
svc_schema = svc_schema.asdict()
if 'tags' in generated_schema:
if tags_order == 'prepend':
svc_schema['tags'] = generated_schema['tags'] + svc_schema['tags']
elif tags_order == 'append':
svc_schema['tags'].extend(generated_schema['tags'])
else:
raise ValueError(f'Invalid tags_order: {tags_order}')
if 'components' in generated_schema:
svc_schema['components']['schemas'].update(generated_schema['components']['schemas'])
svc_schema['paths'].update(generated_schema['paths'])
# HACK: mk this attribute until we have a better way to add starlette schemas.
from bentoml._internal.service import openapi
def _generate_spec(svc, openapi_version=OPENAPI_VERSION):
return _SimpleSchema(svc_schema)
def asdict(self):
return svc_schema
openapi.generate_spec = _generate_spec
OpenAPISpecification.asdict = asdict
return svc
def mount_to_svc(svc, llm):
app = Starlette(
debug=True,
routes=[
Route('/agent', endpoint=functools.partial(hf_agent, llm=llm), name='hf_agent', methods=['POST']),
Route('/schema', endpoint=lambda req: schemas.OpenAPIResponse(req), include_in_schema=False),
],
)
mount_path = '/hf'
svc.mount_asgi_app(app, path=mount_path)
return append_schemas(svc, schemas.get_schema(routes=app.routes, mount_path=mount_path), tags_order='append') | null |
189,374 | from __future__ import annotations
import functools
import inspect
import types
import typing as t
import attr
from starlette.routing import Host, Mount, Route
from starlette.schemas import EndpointInfo, SchemaGenerator
from openllm_core.utils import first_not_none
_SCHEMAS = {k[:-7].lower(): v for k, v in locals().items() if k.endswith('_SCHEMA')}
def add_schema_definitions(func):
append_str = _SCHEMAS.get(func.__name__.lower(), '')
if not append_str:
return func
if func.__doc__ is None:
func.__doc__ = ''
func.__doc__ = func.__doc__.strip() + '\n\n' + append_str.strip()
return func | null |
189,375 | from __future__ import annotations
import functools
import inspect
import types
import typing as t
import attr
from starlette.routing import Host, Mount, Route
from starlette.schemas import EndpointInfo, SchemaGenerator
from openllm_core.utils import first_not_none
OPENAPI_VERSION, API_VERSION = '3.0.2', '1.0'
class OpenLLMSchemaGenerator(SchemaGenerator):
def get_endpoints(self, routes):
endpoints_info = []
for route in routes:
if isinstance(route, (Mount, Host)):
routes = route.routes or []
path = self._remove_converter(route.path) if isinstance(route, Mount) else ''
sub_endpoints = [
EndpointInfo(path=f'{path}{sub_endpoint.path}', http_method=sub_endpoint.http_method, func=sub_endpoint.func)
for sub_endpoint in self.get_endpoints(routes)
]
endpoints_info.extend(sub_endpoints)
elif not isinstance(route, Route) or not route.include_in_schema:
continue
elif (
inspect.isfunction(route.endpoint)
or inspect.ismethod(route.endpoint)
or isinstance(route.endpoint, functools.partial)
):
endpoint = route.endpoint.func if isinstance(route.endpoint, functools.partial) else route.endpoint
path = self._remove_converter(route.path)
for method in route.methods or ['GET']:
if method == 'HEAD':
continue
endpoints_info.append(EndpointInfo(path, method.lower(), endpoint))
else:
path = self._remove_converter(route.path)
for method in ['get', 'post', 'put', 'patch', 'delete', 'options']:
if not hasattr(route.endpoint, method):
continue
func = getattr(route.endpoint, method)
endpoints_info.append(EndpointInfo(path, method.lower(), func))
return endpoints_info
def get_schema(self, routes, mount_path=None):
schema = dict(self.base_schema)
schema.setdefault('paths', {})
endpoints_info = self.get_endpoints(routes)
if mount_path:
mount_path = f'/{mount_path}' if not mount_path.startswith('/') else mount_path
for endpoint in endpoints_info:
parsed = self.parse_docstring(endpoint.func)
if not parsed:
continue
path = endpoint.path if mount_path is None else mount_path + endpoint.path
if path not in schema['paths']:
schema['paths'][path] = {}
schema['paths'][path][endpoint.http_method] = parsed
return schema
def component_schema_generator(attr_cls: pydantic.BaseModel, description=None):
schema = {'type': 'object', 'required': [], 'properties': {}, 'title': attr_cls.__name__}
schema['description'] = first_not_none(
getattr(attr_cls, '__doc__', None), description, default=f'Generated components for {attr_cls.__name__}'
)
for name, field in attr_cls.model_fields.items():
attr_type = field.annotation
origin_type = t.get_origin(attr_type)
args_type = t.get_args(attr_type)
# Map Python types to OpenAPI schema types
if isinstance(attr_type, str):
schema_type = 'string'
elif isinstance(attr_type, int):
schema_type = 'integer'
elif isinstance(attr_type, float):
schema_type = 'number'
elif isinstance(attr_type, bool):
schema_type = 'boolean'
elif origin_type is list or origin_type is tuple:
schema_type = 'array'
elif origin_type is dict:
schema_type = 'object'
# Assuming string keys for simplicity, and handling Any type for values
prop_schema = {'type': 'object', 'additionalProperties': True if args_type[1] is t.Any else {'type': 'string'}}
elif attr_type == t.Optional[str]:
schema_type = 'string'
elif origin_type is t.Union and t.Any in args_type:
schema_type = 'object'
prop_schema = {'type': 'object', 'additionalProperties': True}
else:
schema_type = 'string'
if 'prop_schema' not in locals():
prop_schema = {'type': schema_type}
if field.default is not attr.NOTHING and not isinstance(field.default, attr.Factory):
prop_schema['default'] = field.default
if field.default is attr.NOTHING and not isinstance(attr_type, type(t.Optional)):
schema['required'].append(name)
schema['properties'][name] = prop_schema
locals().pop('prop_schema', None)
return schema
def get_generator(title, components=None, tags=None, inject=True):
base_schema = {'info': {'title': title, 'version': API_VERSION}, 'version': OPENAPI_VERSION}
if components and inject:
base_schema['components'] = {'schemas': {c.__name__: component_schema_generator(c) for c in components}}
if tags is not None and tags and inject:
base_schema['tags'] = tags
return OpenLLMSchemaGenerator(base_schema) | null |
189,376 | import functools
import logging
import time
import traceback
from http import HTTPStatus
import orjson
from starlette.applications import Starlette
from starlette.responses import JSONResponse, StreamingResponse
from starlette.routing import Route
from openllm_core.utils import converter, gen_random_uuid
from ._openapi import add_schema_definitions, append_schemas, apply_schema, get_generator
from openllm_core.protocol.openai import (
ChatCompletionRequest,
ChatCompletionResponse,
ChatCompletionResponseChoice,
ChatCompletionResponseStreamChoice,
ChatCompletionStreamResponse,
ChatMessage,
CompletionRequest,
CompletionResponse,
CompletionResponseChoice,
CompletionResponseStreamChoice,
CompletionStreamResponse,
Delta,
ErrorResponse,
LogProbs,
ModelCard,
ModelList,
UsageInfo,
)
schemas = get_generator(
'openai',
components=[
ErrorResponse,
ModelList,
ChatCompletionResponse,
ChatCompletionRequest,
ChatCompletionStreamResponse,
CompletionRequest,
CompletionResponse,
CompletionStreamResponse,
],
tags=[
{
'name': 'OpenAI',
'description': 'OpenAI Compatible API support',
'externalDocs': 'https://platform.openai.com/docs/api-reference/completions/object',
}
],
)
def list_models(_, llm):
return JSONResponse(
converter.unstructure(ModelList(data=[ModelCard(id=llm.llm_type)])), status_code=HTTPStatus.OK.value
)
async def chat_completions(req, llm):
# TODO: Check for length based on model context_length
json_str = await req.body()
try:
request = converter.structure(orjson.loads(json_str), ChatCompletionRequest)
except orjson.JSONDecodeError as err:
logger.debug('Sent body: %s', json_str)
logger.error('Invalid JSON input received: %s', err)
return error_response(HTTPStatus.BAD_REQUEST, 'Invalid JSON input received (Check server log).')
logger.debug('Received chat completion request: %s', request)
err_check = await check_model(request, llm.llm_type)
if err_check is not None:
return err_check
if request.logit_bias is not None and len(request.logit_bias) > 0:
return error_response(HTTPStatus.BAD_REQUEST, "'logit_bias' is not yet supported.")
model_name, request_id = request.model, gen_random_uuid('chatcmpl')
created_time = int(time.monotonic())
prompt = llm.tokenizer.apply_chat_template(
request.messages,
tokenize=False,
chat_template=request.chat_template if request.chat_template != 'None' else None,
add_generation_prompt=request.add_generation_prompt,
)
logger.debug('Prompt: %r', prompt)
config = llm.config.compatible_options(request)
def get_role() -> str:
return (
request.messages[-1]['role'] if not request.add_generation_prompt else 'assistant'
) # TODO: Support custom role here.
try:
result_generator = llm.generate_iterator(prompt, request_id=request_id, **config)
except Exception as err:
traceback.print_exc()
logger.error('Error generating completion: %s', err)
return error_response(HTTPStatus.INTERNAL_SERVER_ERROR, f'Exception: {err!s} (check server log)')
def create_stream_response_json(index, text, finish_reason=None, usage=None):
response = ChatCompletionStreamResponse(
id=request_id,
created=created_time,
model=model_name,
choices=[
ChatCompletionResponseStreamChoice(index=index, delta=Delta(content=text), finish_reason=finish_reason)
],
)
if usage is not None:
response.usage = usage
return jsonify_attr(response)
async def completion_stream_generator():
# first chunk with role
role = get_role()
for i in range(config['n']):
yield f'data: {jsonify_attr(ChatCompletionStreamResponse(id=request_id, created=created_time, choices=[ChatCompletionResponseStreamChoice(index=i, delta=Delta(role=role), finish_reason=None)], model=model_name))}\n\n'
if request.echo:
last_message, last_content = request.messages[-1], ''
if last_message.get('content') and last_message.get('role') == role:
last_content = last_message['content']
if last_content:
for i in range(config['n']):
yield f'data: {jsonify_attr(ChatCompletionStreamResponse(id=request_id, created=created_time, choices=[ChatCompletionResponseStreamChoice(index=i, delta=Delta(content=last_content), finish_reason=None)], model=model_name))}\n\n'
previous_num_tokens = [0] * config['n']
finish_reason_sent = [False] * config['n']
async for res in result_generator:
for output in res.outputs:
if finish_reason_sent[output.index]:
continue
yield f'data: {create_stream_response_json(output.index, output.text)}\n\n'
previous_num_tokens[output.index] += len(output.token_ids)
if output.finish_reason is not None:
prompt_tokens = len(res.prompt_token_ids)
usage = UsageInfo(prompt_tokens, previous_num_tokens[i], prompt_tokens + previous_num_tokens[i])
yield f'data: {create_stream_response_json(output.index, "", output.finish_reason, usage)}\n\n'
finish_reason_sent[output.index] = True
yield 'data: [DONE]\n\n'
try:
# Streaming case
if request.stream:
return StreamingResponse(completion_stream_generator(), media_type='text/event-stream')
# Non-streaming case
final_result, texts, token_ids = None, [[]] * config['n'], [[]] * config['n']
async for res in result_generator:
if await req.is_disconnected():
return error_response(HTTPStatus.BAD_REQUEST, 'Client disconnected.')
for output in res.outputs:
texts[output.index].append(output.text)
token_ids[output.index].extend(output.token_ids)
final_result = res
if final_result is None:
return error_response(HTTPStatus.BAD_REQUEST, 'No response from model.')
final_result = final_result.model_copy(
update=dict(
outputs=[
output.model_copy(update=dict(text=''.join(texts[output.index]), token_ids=token_ids[output.index]))
for output in final_result.outputs
]
)
)
role = get_role()
choices = [
ChatCompletionResponseChoice(
index=output.index, message=ChatMessage(role=role, content=output.text), finish_reason=output.finish_reason
)
for output in final_result.outputs
]
if request.echo:
last_message, last_content = request.messages[-1], ''
if last_message.get('content') and last_message.get('role') == role:
last_content = last_message['content']
for choice in choices:
full_message = last_content + choice.message.content
choice.message.content = full_message
num_prompt_tokens = len(final_result.prompt_token_ids)
num_generated_tokens = sum(len(output.token_ids) for output in final_result.outputs)
usage = UsageInfo(num_prompt_tokens, num_generated_tokens, num_prompt_tokens + num_generated_tokens)
response = ChatCompletionResponse(
id=request_id, created=created_time, model=model_name, usage=usage, choices=choices
)
return JSONResponse(converter.unstructure(response), status_code=HTTPStatus.OK.value)
except Exception as err:
traceback.print_exc()
logger.error('Error generating completion: %s', err)
return error_response(HTTPStatus.INTERNAL_SERVER_ERROR, f'Exception: {err!s} (check server log)')
async def completions(req, llm):
# TODO: Check for length based on model context_length
json_str = await req.body()
try:
request = converter.structure(orjson.loads(json_str), CompletionRequest)
except orjson.JSONDecodeError as err:
logger.debug('Sent body: %s', json_str)
logger.error('Invalid JSON input received: %s', err)
return error_response(HTTPStatus.BAD_REQUEST, 'Invalid JSON input received (Check server log).')
logger.debug('Received legacy completion request: %s', request)
err_check = await check_model(request, llm.llm_type)
if err_check is not None:
return err_check
# OpenAI API supports echoing the prompt when max_tokens is 0.
echo_without_generation = request.echo and request.max_tokens == 0
if echo_without_generation:
request.max_tokens = 1 # XXX: Hack to make sure we get the prompt back.
if request.suffix is not None:
return error_response(HTTPStatus.BAD_REQUEST, "'suffix' is not yet supported.")
if request.logit_bias is not None and len(request.logit_bias) > 0:
return error_response(HTTPStatus.BAD_REQUEST, "'logit_bias' is not yet supported.")
if not request.prompt:
return error_response(HTTPStatus.BAD_REQUEST, 'Please provide a prompt.')
prompt = request.prompt
# TODO: Support multiple prompts
model_name, request_id = request.model, gen_random_uuid('cmpl')
created_time = int(time.monotonic())
config = llm.config.compatible_options(request)
try:
result_generator = llm.generate_iterator(prompt, request_id=request_id, **config)
except Exception as err:
traceback.print_exc()
logger.error('Error generating completion: %s', err)
return error_response(HTTPStatus.INTERNAL_SERVER_ERROR, f'Exception: {err!s} (check server log)')
# best_of != n then we don't stream
# TODO: support use_beam_search
stream = request.stream and (config['best_of'] is None or config['n'] == config['best_of'])
def create_stream_response_json(index, text, logprobs=None, finish_reason=None, usage=None):
response = CompletionStreamResponse(
id=request_id,
created=created_time,
model=model_name,
choices=[CompletionResponseStreamChoice(index=index, text=text, logprobs=logprobs, finish_reason=finish_reason)],
)
if usage:
response.usage = usage
return jsonify_attr(response)
async def completion_stream_generator():
previous_num_tokens = [0] * config['n']
previous_texts = [''] * config['n']
previous_echo = [False] * config['n']
async for res in result_generator:
for output in res.outputs:
i = output.index
delta_text = output.text
token_ids = output.token_ids
logprobs = None
top_logprobs = None
if request.logprobs is not None:
top_logprobs = output.logprobs[previous_num_tokens[i] :]
if request.echo and not previous_echo[i]:
if not echo_without_generation:
delta_text = res.prompt + delta_text
token_ids = res.prompt_token_ids + token_ids
if top_logprobs:
top_logprobs = res.prompt_logprobs + top_logprobs
else:
delta_text = res.prompt
token_ids = res.prompt_token_ids
if top_logprobs:
top_logprobs = res.prompt_logprobs
previous_echo[i] = True
if request.logprobs is not None:
logprobs = create_logprobs(
output.token_ids,
output.logprobs[previous_num_tokens[i] :],
request.logprobs,
len(previous_texts[i]),
llm=llm,
)
previous_num_tokens[i] += len(output.token_ids)
previous_texts[i] += output.text
yield f'data: {create_stream_response_json(index=i, text=output.text, logprobs=logprobs, finish_reason=output.finish_reason)}\n\n'
if output.finish_reason is not None:
logprobs = LogProbs() if request.logprobs is not None else None
prompt_tokens = len(res.prompt_token_ids)
usage = UsageInfo(prompt_tokens, previous_num_tokens[i], prompt_tokens + previous_num_tokens[i])
yield f'data: {create_stream_response_json(i, "", logprobs, output.finish_reason, usage)}\n\n'
yield 'data: [DONE]\n\n'
try:
# Streaming case
if stream:
return StreamingResponse(completion_stream_generator(), media_type='text/event-stream')
# Non-streaming case
final_result, texts, token_ids = None, [[]] * config['n'], [[]] * config['n']
async for res in result_generator:
if await req.is_disconnected():
return error_response(HTTPStatus.BAD_REQUEST, 'Client disconnected.')
for output in res.outputs:
texts[output.index].append(output.text)
token_ids[output.index].extend(output.token_ids)
final_result = res
if final_result is None:
return error_response(HTTPStatus.BAD_REQUEST, 'No response from model.')
final_result = final_result.model_copy(
update=dict(
outputs=[
output.model_copy(update=dict(text=''.join(texts[output.index]), token_ids=token_ids[output.index]))
for output in final_result.outputs
]
)
)
choices = []
prompt_token_ids = final_result.prompt_token_ids
prompt_logprobs = final_result.prompt_logprobs
prompt_text = final_result.prompt
for output in final_result.outputs:
logprobs = None
if request.logprobs is not None:
if not echo_without_generation:
token_ids, top_logprobs = output.token_ids, output.logprobs
if request.echo:
token_ids, top_logprobs = prompt_token_ids + token_ids, prompt_logprobs + top_logprobs
else:
token_ids, top_logprobs = prompt_token_ids, prompt_logprobs
logprobs = create_logprobs(token_ids, top_logprobs, request.logprobs, llm=llm)
if not echo_without_generation:
output_text = output.text
if request.echo:
output_text = prompt_text + output_text
else:
output_text = prompt_text
choice_data = CompletionResponseChoice(
index=output.index, text=output_text, logprobs=logprobs, finish_reason=output.finish_reason
)
choices.append(choice_data)
num_prompt_tokens = len(final_result.prompt_token_ids)
num_generated_tokens = sum(len(output.token_ids) for output in final_result.outputs)
usage = UsageInfo(num_prompt_tokens, num_generated_tokens, num_prompt_tokens + num_generated_tokens)
response = CompletionResponse(id=request_id, created=created_time, model=model_name, usage=usage, choices=choices)
return JSONResponse(converter.unstructure(response), status_code=HTTPStatus.OK.value)
except Exception as err:
traceback.print_exc()
logger.error('Error generating completion: %s', err)
return error_response(HTTPStatus.INTERNAL_SERVER_ERROR, f'Exception: {err!s} (check server log)')
def apply_schema(func, **attrs):
for k, v in attrs.items():
func.__doc__ = func.__doc__.replace(k, v)
return func
def append_schemas(svc, generated_schema, tags_order='prepend', inject=True):
# HACK: Dirty hack to append schemas to existing service. We def need to support mounting Starlette app OpenAPI spec.
from bentoml._internal.service.openapi.specification import OpenAPISpecification
if not inject:
return svc
svc_schema = svc.openapi_spec
if isinstance(svc_schema, (OpenAPISpecification, _SimpleSchema)):
svc_schema = svc_schema.asdict()
if 'tags' in generated_schema:
if tags_order == 'prepend':
svc_schema['tags'] = generated_schema['tags'] + svc_schema['tags']
elif tags_order == 'append':
svc_schema['tags'].extend(generated_schema['tags'])
else:
raise ValueError(f'Invalid tags_order: {tags_order}')
if 'components' in generated_schema:
svc_schema['components']['schemas'].update(generated_schema['components']['schemas'])
svc_schema['paths'].update(generated_schema['paths'])
# HACK: mk this attribute until we have a better way to add starlette schemas.
from bentoml._internal.service import openapi
def _generate_spec(svc, openapi_version=OPENAPI_VERSION):
return _SimpleSchema(svc_schema)
def asdict(self):
return svc_schema
openapi.generate_spec = _generate_spec
OpenAPISpecification.asdict = asdict
return svc
def mount_to_svc(svc, llm):
list_models.__doc__ = list_models.__doc__.replace('__model_id__', llm.llm_type)
completions.__doc__ = completions.__doc__.replace('__model_id__', llm.llm_type)
chat_completions.__doc__ = chat_completions.__doc__.replace('__model_id__', llm.llm_type)
app = Starlette(
debug=True,
routes=[
Route(
'/models', functools.partial(apply_schema(list_models, __model_id__=llm.llm_type), llm=llm), methods=['GET']
),
Route(
'/completions',
functools.partial(apply_schema(completions, __model_id__=llm.llm_type), llm=llm),
methods=['POST'],
),
Route(
'/chat/completions',
functools.partial(
apply_schema(
chat_completions,
__model_id__=llm.llm_type,
__chat_template__=orjson.dumps(llm.config.chat_template).decode(),
__chat_messages__=orjson.dumps(llm.config.chat_messages).decode(),
__add_generation_prompt__=str(True) if llm.config.chat_messages is not None else str(False),
),
llm=llm,
),
methods=['POST'],
),
Route('/schema', endpoint=lambda req: schemas.OpenAPIResponse(req), include_in_schema=False),
],
)
svc.mount_asgi_app(app, path='/v1')
return append_schemas(svc, schemas.get_schema(routes=app.routes, mount_path='/v1')) | null |
189,377 | import functools
import logging
import time
import traceback
from http import HTTPStatus
import orjson
from starlette.applications import Starlette
from starlette.responses import JSONResponse, StreamingResponse
from starlette.routing import Route
from openllm_core.utils import converter, gen_random_uuid
from ._openapi import add_schema_definitions, append_schemas, apply_schema, get_generator
from openllm_core.protocol.openai import (
ChatCompletionRequest,
ChatCompletionResponse,
ChatCompletionResponseChoice,
ChatCompletionResponseStreamChoice,
ChatCompletionStreamResponse,
ChatMessage,
CompletionRequest,
CompletionResponse,
CompletionResponseChoice,
CompletionResponseStreamChoice,
CompletionStreamResponse,
Delta,
ErrorResponse,
LogProbs,
ModelCard,
ModelList,
UsageInfo,
)
schemas = get_generator(
'openai',
components=[
ErrorResponse,
ModelList,
ChatCompletionResponse,
ChatCompletionRequest,
ChatCompletionStreamResponse,
CompletionRequest,
CompletionResponse,
CompletionStreamResponse,
],
tags=[
{
'name': 'OpenAI',
'description': 'OpenAI Compatible API support',
'externalDocs': 'https://platform.openai.com/docs/api-reference/completions/object',
}
],
)
def list_models(_, llm):
async def chat_completions(req, llm):
async def completions(req, llm):
def apply_schema(func, **attrs):
def append_schemas(svc, generated_schema, tags_order='prepend', inject=True):
def mount_to_svc(svc, llm):
list_models.__doc__ = list_models.__doc__.replace('__model_id__', llm.llm_type)
completions.__doc__ = completions.__doc__.replace('__model_id__', llm.llm_type)
chat_completions.__doc__ = chat_completions.__doc__.replace('__model_id__', llm.llm_type)
app = Starlette(
debug=True,
routes=[
Route(
'/models', functools.partial(apply_schema(list_models, __model_id__=llm.llm_type), llm=llm), methods=['GET']
),
Route(
'/completions',
functools.partial(apply_schema(completions, __model_id__=llm.llm_type), llm=llm),
methods=['POST'],
),
Route(
'/chat/completions',
functools.partial(
apply_schema(
chat_completions,
__model_id__=llm.llm_type,
__chat_template__=orjson.dumps(llm.config.chat_template).decode(),
__chat_messages__=orjson.dumps(llm.config.chat_messages).decode(),
__add_generation_prompt__=str(True) if llm.config.chat_messages is not None else str(False),
),
llm=llm,
),
methods=['POST'],
),
Route('/schema', endpoint=lambda req: schemas.OpenAPIResponse(req), include_in_schema=False),
],
)
svc.mount_asgi_app(app, path='/v1')
return append_schemas(svc, schemas.get_schema(routes=app.routes, mount_path='/v1')) | null |
189,381 | from __future__ import annotations
import functools, logging, os, warnings, typing as t
import attr, orjson, bentoml, openllm, openllm_core
from openllm_core._schemas import GenerationOutput
from openllm_core._typing_compat import (
AdapterMap,
AdapterTuple,
AdapterType,
LiteralBackend,
LiteralDtype,
LiteralQuantise,
LiteralSerialisation,
)
from openllm.serialisation import _make_tag_components
from openllm_core.exceptions import MissingDependencyError
from openllm_core.utils import (
DEBUG,
check_bool_env,
codegen,
first_not_none,
normalise_model_name,
flatten_attrs,
gen_random_uuid,
getenv,
is_peft_available,
is_transformers_available,
is_vllm_available,
resolve_filepath,
validate_is_path,
)
from .exceptions import ForbiddenAttributeError, OpenLLMException
from .serialisation.constants import PEFT_CONFIG_NAME
def _torch_dtype_mapping() -> dict[str, torch.dtype]:
import torch
return {
'half': torch.float16,
'float16': torch.float16, #
'float': torch.float32,
'float32': torch.float32, #
'bfloat16': torch.bfloat16,
} | null |
189,382 | from __future__ import annotations
import functools, logging, os, warnings, typing as t
import attr, orjson, bentoml, openllm, openllm_core
from openllm_core._schemas import GenerationOutput
from openllm_core._typing_compat import (
AdapterMap,
AdapterTuple,
AdapterType,
LiteralBackend,
LiteralDtype,
LiteralQuantise,
LiteralSerialisation,
)
from openllm.serialisation import _make_tag_components
from openllm_core.exceptions import MissingDependencyError
from openllm_core.utils import (
DEBUG,
check_bool_env,
codegen,
first_not_none,
normalise_model_name,
flatten_attrs,
gen_random_uuid,
getenv,
is_peft_available,
is_transformers_available,
is_vllm_available,
resolve_filepath,
validate_is_path,
)
from .exceptions import ForbiddenAttributeError, OpenLLMException
from .serialisation.constants import PEFT_CONFIG_NAME
_AdapterTuple: type[AdapterTuple] = codegen.make_attr_tuple_class('AdapterTuple', ['adapter_id', 'name', 'config'])
AdapterMap = Dict[AdapterType, Tuple[AdapterTuple, ...]]
PEFT_CONFIG_NAME = 'adapter_config.json'
def convert_peft_config_type(adapter_map: dict[str, str]) -> AdapterMap:
if not is_peft_available():
raise RuntimeError(
"LoRA adapter requires 'peft' to be installed. Make sure to do 'pip install \"openllm[fine-tune]\"'"
)
from huggingface_hub import hf_hub_download
resolved: AdapterMap = {}
for path_or_adapter_id, name in adapter_map.items():
if name is None:
raise ValueError('Adapter name must be specified.')
if os.path.isfile(os.path.join(path_or_adapter_id, PEFT_CONFIG_NAME)):
config_file = os.path.join(path_or_adapter_id, PEFT_CONFIG_NAME)
else:
try:
config_file = hf_hub_download(path_or_adapter_id, PEFT_CONFIG_NAME)
except Exception as err:
raise ValueError(f"Can't find '{PEFT_CONFIG_NAME}' at '{path_or_adapter_id}'") from err
with open(config_file, 'r') as file:
resolved_config = orjson.loads(file.read())
_peft_type = resolved_config['peft_type'].lower()
if _peft_type not in resolved:
resolved[_peft_type] = ()
resolved[_peft_type] += (_AdapterTuple((path_or_adapter_id, name, resolved_config)),)
return resolved | null |
189,383 |
def prepare_logits_processor(config):
import transformers
generation_config = config.generation_config
logits_processor = transformers.LogitsProcessorList()
if generation_config['temperature'] >= 1e-5 and generation_config['temperature'] != 1.0:
logits_processor.append(transformers.TemperatureLogitsWarper(generation_config['temperature']))
if generation_config['repetition_penalty'] > 1.0:
logits_processor.append(transformers.RepetitionPenaltyLogitsProcessor(generation_config['repetition_penalty']))
if 1e-8 <= generation_config['top_p']:
logits_processor.append(transformers.TopPLogitsWarper(generation_config['top_p']))
if generation_config['top_k'] > 0:
logits_processor.append(transformers.TopKLogitsWarper(generation_config['top_k']))
return logits_processor | null |
189,384 | SEQLEN_KEYS = ['max_sequence_length', 'seq_length', 'max_position_embeddings', 'max_seq_len', 'model_max_length']
def get_context_length(config):
rope_scaling = getattr(config, 'rope_scaling', None)
rope_scaling_factor = config.rope_scaling['factor'] if rope_scaling else 1.0
for key in SEQLEN_KEYS:
if getattr(config, key, None) is not None:
return int(rope_scaling_factor * getattr(config, key))
return 2048 | null |
189,385 |
def is_sentence_complete(output):
return output.endswith(('.', '?', '!', '...', '。', '?', '!', '…', '"', "'", '”')) | null |
189,386 |
def is_partial_stop(output, stop_str):
for i in range(min(len(output), len(stop_str))):
if stop_str.startswith(output[-i:]):
return True
return False | null |
189,397 | from __future__ import annotations
import logging, os, warnings, typing as t
import openllm
from openllm_core._typing_compat import LiteralBackend
from openllm_core.utils import first_not_none, getenv, is_vllm_available
logger = logging.getLogger(__name__)
LiteralBackend = Literal['pt', 'vllm', 'triton', 'ggml']
def getenv(env, default=None, var=None, return_type=t.Any):
env_key = {env.upper(), f'OPENLLM_{env.upper()}'}
if var is not None:
env_key = set(var) | env_key
def callback(k: str) -> t.Any:
_var = os.getenv(k)
if _var and k.startswith('OPENLLM_'):
logger.warning("Using '%s' environment is deprecated, use '%s' instead.", k.upper(), k[8:].upper())
return _var
return t.cast(return_type, first_not_none(*(callback(k) for k in env_key), default=default))
def first_not_none(*args, default=None):
return next((arg for arg in args if arg is not None), default)
def Runner(
model_name: str,
ensure_available: bool = True, #
init_local: bool = False,
backend: LiteralBackend | None = None, #
llm_config: openllm.LLMConfig | None = None,
**attrs: t.Any,
):
if llm_config is None:
llm_config = openllm.AutoConfig.for_model(model_name)
if not ensure_available:
logger.warning(
"'ensure_available=False' won't have any effect as LLM will always check to download the model on initialisation."
)
model_id = attrs.get('model_id', os.getenv('OPENLLM_MODEL_ID', llm_config['default_id']))
warnings.warn(
f"""\
Using 'openllm.Runner' is now deprecated. Make sure to switch to the following syntax:
```python
llm = openllm.LLM('{model_id}')
svc = bentoml.Service('...', runners=[llm.runner])
@svc.api(...)
async def chat(input: str) -> str:
async for it in llm.generate_iterator(input): print(it)
```""",
DeprecationWarning,
stacklevel=2,
)
attrs.update({
'model_id': model_id,
'quantize': getenv('QUANTIZE', var=['QUANTISE'], default=attrs.get('quantize', None)), #
'serialisation': getenv(
'serialization', default=attrs.get('serialisation', llm_config['serialisation']), var=['SERIALISATION']
),
})
# XXX: Make this back to Runnable implementation
return openllm.LLM(
backend=first_not_none(backend, default='vllm' if is_vllm_available() else 'pt'),
llm_config=llm_config,
embedded=init_local,
**attrs,
).runner | null |
189,398 | from __future__ import annotations
import openllm, traceback, logging, time, pathlib, pydantic, typing as t
from openllm_core.exceptions import ModelNotFound, OpenLLMException, ValidationError
from openllm_core.utils import gen_random_uuid, resolve_filepath
from openllm_core.protocol.openai import (
ChatCompletionRequest,
ChatCompletionResponseChoice,
ChatCompletionResponseStreamChoice,
ChatCompletionStreamResponse,
ChatMessage,
CompletionRequest,
CompletionResponse,
ChatCompletionResponse,
Delta,
ErrorResponse,
NotSupportedError,
LogProbs,
UsageInfo,
)
from starlette.requests import Request
from huggingface_hub import scan_cache_dir
class Error(pydantic.BaseModel):
error: ErrorResponse
class OpenLLMException(Exception):
"""Base class for all OpenLLM exceptions. This shares similar interface with BentoMLException."""
error_code = HTTPStatus.INTERNAL_SERVER_ERROR
def __init__(self, message: str):
self.message = message
super().__init__(message)
class ErrorResponse(pydantic.BaseModel):
message: str
type: str
object: str = 'error'
param: t.Optional[str] = None
code: t.Optional[str] = None
def error_response(exception: type[OpenLLMException], message: str) -> ErrorResponse:
return Error(
error=ErrorResponse(message=message, type=str(exception.__qualname__), code=str(exception.error_code.value))
) | null |
189,399 | from __future__ import annotations
import os, logging, traceback, pathlib, sys, fs, click, enum, inflection, bentoml, orjson, openllm, openllm_core, platform, typing as t
from ._helpers import recommended_instance_type
from openllm_core.utils import (
DEBUG_ENV_VAR,
QUIET_ENV_VAR,
SHOW_CODEGEN,
check_bool_env,
compose,
first_not_none,
dantic,
gen_random_uuid,
get_debug_mode,
get_quiet_mode,
normalise_model_name,
)
from openllm_core._typing_compat import (
LiteralQuantise,
LiteralSerialisation,
LiteralDtype,
get_literal_args,
TypedDict,
)
from openllm_cli import termui
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli() -> None` to solve the following problem:
\b ██████╗ ██████╗ ███████╗███╗ ██╗██╗ ██╗ ███╗ ███╗ ██╔═══██╗██╔══██╗██╔════╝████╗ ██║██║ ██║ ████╗ ████║ ██║ ██║██████╔╝█████╗ ██╔██╗ ██║██║ ██║ ██╔████╔██║ ██║ ██║██╔═══╝ ██╔══╝ ██║╚██╗██║██║ ██║ ██║╚██╔╝██║ ╚██████╔╝██║ ███████╗██║ ╚████║███████╗███████╗██║ ╚═╝ ██║ ╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝╚══════╝╚══════╝╚═╝ ╚═╝. \b An open platform for operating large language models in production. Fine-tune, serve, deploy, and monitor any LLMs with ease.
Here is the function:
def cli() -> None:
"""\b
██████╗ ██████╗ ███████╗███╗ ██╗██╗ ██╗ ███╗ ███╗
██╔═══██╗██╔══██╗██╔════╝████╗ ██║██║ ██║ ████╗ ████║
██║ ██║██████╔╝█████╗ ██╔██╗ ██║██║ ██║ ██╔████╔██║
██║ ██║██╔═══╝ ██╔══╝ ██║╚██╗██║██║ ██║ ██║╚██╔╝██║
╚██████╔╝██║ ███████╗██║ ╚████║███████╗███████╗██║ ╚═╝ ██║
╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝╚══════╝╚══════╝╚═╝ ╚═╝.
\b
An open platform for operating large language models in production.
Fine-tune, serve, deploy, and monitor any LLMs with ease.
""" | \b ██████╗ ██████╗ ███████╗███╗ ██╗██╗ ██╗ ███╗ ███╗ ██╔═══██╗██╔══██╗██╔════╝████╗ ██║██║ ██║ ████╗ ████║ ██║ ██║██████╔╝█████╗ ██╔██╗ ██║██║ ██║ ██╔████╔██║ ██║ ██║██╔═══╝ ██╔══╝ ██║╚██╗██║██║ ██║ ██║╚██╔╝██║ ╚██████╔╝██║ ███████╗██║ ╚████║███████╗███████╗██║ ╚═╝ ██║ ╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝╚══════╝╚══════╝╚═╝ ╚═╝. \b An open platform for operating large language models in production. Fine-tune, serve, deploy, and monitor any LLMs with ease. |
189,400 | from __future__ import annotations
import os, logging, traceback, pathlib, sys, fs, click, enum, inflection, bentoml, orjson, openllm, openllm_core, platform, typing as t
from ._helpers import recommended_instance_type
from openllm_core.utils import (
DEBUG_ENV_VAR,
QUIET_ENV_VAR,
SHOW_CODEGEN,
check_bool_env,
compose,
first_not_none,
dantic,
gen_random_uuid,
get_debug_mode,
get_quiet_mode,
normalise_model_name,
)
from openllm_core._typing_compat import (
LiteralQuantise,
LiteralSerialisation,
LiteralDtype,
get_literal_args,
TypedDict,
)
from openllm_cli import termui
def parse_device_callback(
_: click.Context, param: click.Parameter, value: tuple[tuple[str], ...] | None
) -> t.Tuple[str, ...] | None:
if value is None:
return value
el: t.Tuple[str, ...] = tuple(i for k in value for i in k)
# NOTE: --device all is a special case
if len(el) == 1 and el[0] == 'all':
return tuple(map(str, openllm.utils.available_devices()))
return el
None,
'--version',
'-v',
package_name=_PACKAGE_NAME,
message=f'{_PACKAGE_NAME}, %(version)s (compiled: {openllm.COMPILED})\nPython ({platform.python_implementation()}) {platform.python_version()}',
type=str,
default=None,
help='Optional bento version for this BentoLLM. Default is the the model revision.'
type=click.Choice(['tag', 'default']),
default='default',
show_default=True,
help="Output log format. '-o tag' to display only bento tag."
def compose(*funcs):
return functools.reduce(lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs)), funcs)
def get_literal_args(typ: Any) -> Tuple[str, ...]:
return getattr(typ, '__args__', tuple())
LiteralSerialisation = Literal['safetensors', 'legacy']
def optimization_decorator(fn):
# NOTE: return device, quantize, serialisation, dtype, max_model_len, gpu_memory_utilization
optimization = [
click.option(
'--device',
type=dantic.CUDA,
multiple=True,
envvar='CUDA_VISIBLE_DEVICES',
callback=parse_device_callback,
help='Assign GPU devices (if available)',
show_envvar=True,
),
click.option(
'--dtype',
type=str,
envvar='DTYPE',
default='auto',
help="Optional dtype for casting tensors for running inference ['float16', 'float32', 'bfloat16', 'int8', 'int16']",
),
click.option(
'--quantise',
'--quantize',
'quantize',
type=str,
default=None,
envvar='QUANTIZE',
show_envvar=True,
help="""Dynamic quantization for running this LLM.
The following quantization strategies are supported:
- ``int8``: ``LLM.int8`` for [8-bit](https://arxiv.org/abs/2208.07339) quantization.
- ``int4``: ``SpQR`` for [4-bit](https://arxiv.org/abs/2306.03078) quantization.
- ``gptq``: ``GPTQ`` [quantization](https://arxiv.org/abs/2210.17323)
- ``awq``: ``AWQ`` [AWQ: Activation-aware Weight Quantization](https://arxiv.org/abs/2306.00978)
- ``squeezellm``: ``SqueezeLLM`` [SqueezeLLM: Dense-and-Sparse Quantization](https://arxiv.org/abs/2306.07629)
> [!NOTE] that the model can also be served with quantized weights.
""",
),
click.option(
'--serialisation',
'--serialization',
'serialisation',
type=click.Choice(get_literal_args(LiteralSerialisation)),
default=None,
show_default=True,
show_envvar=True,
envvar='OPENLLM_SERIALIZATION',
help="""Serialisation format for save/load LLM.
Currently the following strategies are supported:
- ``safetensors``: This will use safetensors format, which is synonymous to ``safe_serialization=True``.
> [!NOTE] Safetensors might not work for every cases, and you can always fallback to ``legacy`` if needed.
- ``legacy``: This will use PyTorch serialisation format, often as ``.bin`` files. This should be used if the model doesn't yet support safetensors.
""",
),
click.option(
'--max-model-len',
'--max_model_len',
'max_model_len',
type=int,
default=None,
help='Maximum sequence length for the model. If not specified, we will use the default value from the model config.',
),
click.option(
'--gpu-memory-utilization',
'--gpu_memory_utilization',
'gpu_memory_utilization',
default=0.9,
help='The percentage of GPU memory to be used for the model executor',
),
]
return compose(*optimization)(fn) | null |
189,401 | from __future__ import annotations
import os, logging, traceback, pathlib, sys, fs, click, enum, inflection, bentoml, orjson, openllm, openllm_core, platform, typing as t
from ._helpers import recommended_instance_type
from openllm_core.utils import (
DEBUG_ENV_VAR,
QUIET_ENV_VAR,
SHOW_CODEGEN,
check_bool_env,
compose,
first_not_none,
dantic,
gen_random_uuid,
get_debug_mode,
get_quiet_mode,
normalise_model_name,
)
from openllm_core._typing_compat import (
LiteralQuantise,
LiteralSerialisation,
LiteralDtype,
get_literal_args,
TypedDict,
)
from openllm_cli import termui
type=str,
default=None,
help='Optional bento version for this BentoLLM. Default is the the model revision.'
type=click.Choice(['tag', 'default']),
default='default',
help="Output log format. '-o tag' to display only bento tag."
def compose(*funcs):
def shared_decorator(fn):
shared = [
click.argument(
'model_id', type=click.STRING, metavar='[REMOTE_REPO/MODEL_ID | /path/to/local/model]', required=True
),
click.option(
'--revision',
'--bentomodel-version',
'--model-version',
'model_version',
type=click.STRING,
default=None,
help='Optional model revision to save for this model. It will be inferred automatically from model-id.',
),
click.option(
'--model-tag',
'--bentomodel-tag',
'model_tag',
type=click.STRING,
default=None,
help='Optional bentomodel tag to save for this model. It will be generated automatically based on model_id and model_version if not specified.',
),
]
return compose(*shared)(fn) | null |
189,402 | from __future__ import annotations
import os, logging, traceback, pathlib, sys, fs, click, enum, inflection, bentoml, orjson, openllm, openllm_core, platform, typing as t
from ._helpers import recommended_instance_type
from openllm_core.utils import (
DEBUG_ENV_VAR,
QUIET_ENV_VAR,
SHOW_CODEGEN,
check_bool_env,
compose,
first_not_none,
dantic,
gen_random_uuid,
get_debug_mode,
get_quiet_mode,
normalise_model_name,
)
from openllm_core._typing_compat import (
LiteralQuantise,
LiteralSerialisation,
LiteralDtype,
get_literal_args,
TypedDict,
)
from openllm_cli import termui
def get_debug_mode():
return check_bool_env(DEBUG_ENV_VAR, False) if (not DEBUG and DEBUG_ENV_VAR in os.environ) else DEBUG
def get_quiet_mode():
if QUIET_ENV_VAR in os.environ:
return check_bool_env(QUIET_ENV_VAR, False)
if DEBUG:
return False
return False
LiteralDtype = Literal['float16', 'float32', 'bfloat16', 'int8', 'int16']
LiteralSerialisation = Literal['safetensors', 'legacy']
LiteralQuantise = Literal['int8', 'int4', 'gptq', 'awq', 'squeezellm']
The provided code snippet includes necessary dependencies for implementing the `start_command` function. Write a Python function `def start_command( model_id: str, model_version: str | None, model_tag: str | None, timeout: int, device: t.Tuple[str, ...], quantize: LiteralQuantise | None, serialisation: LiteralSerialisation | None, dtype: LiteralDtype | t.Literal['auto', 'float'], max_model_len: int | None, gpu_memory_utilization: float, )` to solve the following problem:
Start any LLM as a REST server. \b ```bash $ openllm <start|start-http> <model_id> --<options> ... ```
Here is the function:
def start_command(
model_id: str,
model_version: str | None,
model_tag: str | None,
timeout: int,
device: t.Tuple[str, ...],
quantize: LiteralQuantise | None,
serialisation: LiteralSerialisation | None,
dtype: LiteralDtype | t.Literal['auto', 'float'],
max_model_len: int | None,
gpu_memory_utilization: float,
):
"""Start any LLM as a REST server.
\b
```bash
$ openllm <start|start-http> <model_id> --<options> ...
```
"""
import transformers
from _bentoml_impl.server import serve_http
from bentoml._internal.service.loader import load
from bentoml._internal.log import configure_server_logging
configure_server_logging()
trust_remote_code = check_bool_env('TRUST_REMOTE_CODE', False)
try:
# if given model_id is a private model, then we can use it directly
bentomodel = bentoml.models.get(model_id.lower())
model_id = bentomodel.path
except (ValueError, bentoml.exceptions.NotFound):
bentomodel = None
config = transformers.AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code)
for arch in config.architectures:
if arch in openllm_core.AutoConfig._architecture_mappings:
model_name = openllm_core.AutoConfig._architecture_mappings[arch]
break
else:
raise RuntimeError(f'Failed to determine config class for {model_id}')
llm_config = openllm_core.AutoConfig.for_model(model_name).model_construct_env()
if serialisation is None:
serialisation = llm_config['serialisation']
# TODO: support LoRA adapters
os.environ.update({
QUIET_ENV_VAR: str(openllm.utils.get_quiet_mode()),
DEBUG_ENV_VAR: str(openllm.utils.get_debug_mode()),
'MODEL_ID': model_id,
'MODEL_NAME': model_name,
'SERIALIZATION': serialisation,
'OPENLLM_CONFIG': llm_config.model_dump_json(),
'DTYPE': dtype,
'TRUST_REMOTE_CODE': str(trust_remote_code),
'MAX_MODEL_LEN': orjson.dumps(max_model_len).decode(),
'GPU_MEMORY_UTILIZATION': orjson.dumps(gpu_memory_utilization).decode(),
'SERVICES_CONFIG': orjson.dumps(
dict(
resources={'gpu' if device else 'cpu': len(device) if device else 'cpu_count'}, traffic=dict(timeout=timeout)
)
).decode(),
})
if quantize:
os.environ['QUANTIZE'] = str(quantize)
working_dir = os.path.abspath(os.path.dirname(__file__))
if sys.path[0] != working_dir:
sys.path.insert(0, working_dir)
load('.', working_dir=working_dir).inject_config()
serve_http('.', working_dir=working_dir) | Start any LLM as a REST server. \b ```bash $ openllm <start|start-http> <model_id> --<options> ... ``` |
189,403 | from __future__ import annotations
import os, logging, traceback, pathlib, sys, fs, click, enum, inflection, bentoml, orjson, openllm, openllm_core, platform, typing as t
from ._helpers import recommended_instance_type
from openllm_core.utils import (
DEBUG_ENV_VAR,
QUIET_ENV_VAR,
SHOW_CODEGEN,
check_bool_env,
compose,
first_not_none,
dantic,
gen_random_uuid,
get_debug_mode,
get_quiet_mode,
normalise_model_name,
)
from openllm_core._typing_compat import (
LiteralQuantise,
LiteralSerialisation,
LiteralDtype,
get_literal_args,
TypedDict,
)
from openllm_cli import termui
logger = logging.getLogger(__name__)
OPENLLM_FIGLET = """
██████╗ ██████╗ ███████╗███╗ ██╗██╗ ██╗ ███╗ ███╗
██╔═══██╗██╔══██╗██╔════╝████╗ ██║██║ ██║ ████╗ ████║
██║ ██║██████╔╝█████╗ ██╔██╗ ██║██║ ██║ ██╔████╔██║
██║ ██║██╔═══╝ ██╔══╝ ██║╚██╗██║██║ ██║ ██║╚██╔╝██║
╚██████╔╝██║ ███████╗██║ ╚████║███████╗███████╗██║ ╚═╝ ██║
╚═════╝ ╚═╝ ╚══════╝╚═╝ ╚═══╝╚══════╝╚══════╝╚═╝ ╚═╝.
"""
_SERVICE_FILE = pathlib.Path(os.path.abspath(__file__)).parent / '_service.py'
_SERVICE_VARS = '''\
# fmt: off
# GENERATED BY 'openllm build {__model_id__}'. DO NOT EDIT
import orjson,openllm_core.utils as coreutils
model_id='{__model_id__}'
model_name='{__model_name__}'
quantise=coreutils.getenv('quantize',default='{__model_quantise__}',var=['QUANTISE'])
serialisation=coreutils.getenv('serialization',default='{__model_serialization__}',var=['SERIALISATION'])
dtype=coreutils.getenv('dtype', default='{__model_dtype__}', var=['TORCH_DTYPE'])
trust_remote_code=coreutils.check_bool_env("TRUST_REMOTE_CODE",{__model_trust_remote_code__})
max_model_len={__max_model_len__}
gpu_memory_utilization={__gpu_memory_utilization__}
services_config=orjson.loads(coreutils.getenv('services_config',"""{__services_config__}"""))
'''
class ItemState(enum.Enum):
NOT_FOUND = 'NOT_FOUND'
ADDED = 'ADDED'
EXISTS = 'EXISTS'
OVERWRITE = 'OVERWRITE'
def construct_python_options(llm_config, llm_fs):
from bentoml._internal.bento.build_config import PythonOptions
from openllm.bundle._package import build_editable
packages = ['scipy', 'bentoml[tracing]>=1.2', 'openllm[vllm]>0.4']
if llm_config['requirements'] is not None:
packages.extend(llm_config['requirements'])
built_wheels = [build_editable(llm_fs.getsyspath('/'), p) for p in ('openllm_core', 'openllm_client', 'openllm')]
return PythonOptions(
packages=packages,
wheels=[llm_fs.getsyspath(f"/{i.split('/')[-1]}") for i in built_wheels] if all(i for i in built_wheels) else None,
lock_packages=False,
)
class EnvironmentEntry(TypedDict):
name: str
value: str
'--bento-version',
type=str,
default=None,
help='Optional bento version for this BentoLLM. Default is the the model revision.',
default=None,
default='default',
def recommended_instance_type(model_id, bentomodel=None):
if bentomodel is not None:
size = sum(f.stat().st_size for f in pathlib.Path(resolve_filepath(model_id)).glob('**/*') if f.is_file())
else:
info = next(filter(lambda repo: repo.repo_id == model_id, scan_cache_dir().repos))
size = info.size_on_disk
# find the first occurence of the gpu_type in the recommended mapping such that "size" should be less than or equal to 70% of the recommended size
for gpu, max_size in RECOMMENDED_MAPPING.items():
if size <= max_size * 0.7:
return gpu
def normalise_model_name(name):
return (
os.path.basename(resolve_filepath(name))
if validate_is_path(name)
else inflection.dasherize(name.replace('/', '--'))
)
def get_debug_mode():
return check_bool_env(DEBUG_ENV_VAR, False) if (not DEBUG and DEBUG_ENV_VAR in os.environ) else DEBUG
def get_quiet_mode():
if QUIET_ENV_VAR in os.environ:
return check_bool_env(QUIET_ENV_VAR, False)
if DEBUG:
return False
return False
def gen_random_uuid(prefix: str | None = None) -> str:
return '-'.join([prefix or 'openllm', str(uuid.uuid4().hex)])
def first_not_none(*args, default=None):
return next((arg for arg in args if arg is not None), default)
LiteralDtype = Literal['float16', 'float32', 'bfloat16', 'int8', 'int16']
LiteralSerialisation = Literal['safetensors', 'legacy']
LiteralQuantise = Literal['int8', 'int4', 'gptq', 'awq', 'squeezellm']
The provided code snippet includes necessary dependencies for implementing the `build_command` function. Write a Python function `def build_command( ctx: click.Context, /, model_id: str, model_version: str | None, model_tag: str | None, bento_version: str | None, bento_tag: str | None, overwrite: bool, device: t.Tuple[str, ...], timeout: int, quantize: LiteralQuantise | None, serialisation: LiteralSerialisation | None, dtype: LiteralDtype | t.Literal['auto', 'float'], max_model_len: int | None, gpu_memory_utilization: float, output: t.Literal['default', 'tag'], )` to solve the following problem:
Package a given models into a BentoLLM. \b ```bash $ openllm build google/flan-t5-large ``` \b > [!NOTE] > To run a container built from this Bento with GPU support, make sure > to have https://github.com/NVIDIA/nvidia-container-toolkit install locally. \b > [!IMPORTANT] > To build the bento with compiled OpenLLM, make sure to prepend HATCH_BUILD_HOOKS_ENABLE=1. Make sure that the deployment > target also use the same Python version and architecture as build machine.
Here is the function:
def build_command(
ctx: click.Context,
/,
model_id: str,
model_version: str | None,
model_tag: str | None,
bento_version: str | None,
bento_tag: str | None,
overwrite: bool,
device: t.Tuple[str, ...],
timeout: int,
quantize: LiteralQuantise | None,
serialisation: LiteralSerialisation | None,
dtype: LiteralDtype | t.Literal['auto', 'float'],
max_model_len: int | None,
gpu_memory_utilization: float,
output: t.Literal['default', 'tag'],
):
"""Package a given models into a BentoLLM.
\b
```bash
$ openllm build google/flan-t5-large
```
\b
> [!NOTE]
> To run a container built from this Bento with GPU support, make sure
> to have https://github.com/NVIDIA/nvidia-container-toolkit install locally.
\b
> [!IMPORTANT]
> To build the bento with compiled OpenLLM, make sure to prepend HATCH_BUILD_HOOKS_ENABLE=1. Make sure that the deployment
> target also use the same Python version and architecture as build machine.
"""
import transformers
from bentoml._internal.configuration.containers import BentoMLContainer
from bentoml._internal.configuration import set_quiet_mode
from bentoml._internal.log import configure_logging
from bentoml._internal.bento.build_config import BentoBuildConfig
from bentoml._internal.bento.build_config import DockerOptions
from bentoml._internal.bento.build_config import ModelSpec
if output == 'tag':
set_quiet_mode(True)
configure_logging()
trust_remote_code = check_bool_env('TRUST_REMOTE_CODE', False)
try:
# if given model_id is a private model, then we can use it directly
bentomodel = bentoml.models.get(model_id.lower())
model_id = bentomodel.path
_revision = bentomodel.tag.version
except (ValueError, bentoml.exceptions.NotFound):
bentomodel = None
_revision = None
config = transformers.AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code)
for arch in config.architectures:
if arch in openllm_core.AutoConfig._architecture_mappings:
model_name = openllm_core.AutoConfig._architecture_mappings[arch]
break
else:
raise RuntimeError(f'Failed to determine config class for {model_id}')
llm_config: openllm_core.LLMConfig = openllm_core.AutoConfig.for_model(model_name).model_construct_env()
_revision = first_not_none(_revision, getattr(config, '_commit_hash', None), default=gen_random_uuid())
if serialisation is None:
termui.warning(
f"Serialisation format is not specified. Defaulting to '{llm_config['serialisation']}'. Your model might not work with this format. Make sure to explicitly specify the serialisation format."
)
serialisation = llm_config['serialisation']
if bento_tag is None:
_bento_version = first_not_none(bento_version, default=_revision)
bento_tag = bentoml.Tag.from_taglike(f'{normalise_model_name(model_id)}-service:{_bento_version}'.lower().strip())
else:
bento_tag = bentoml.Tag.from_taglike(bento_tag)
state = ItemState.NOT_FOUND
try:
bento = bentoml.get(bento_tag)
if overwrite:
bentoml.delete(bento_tag)
state = ItemState.OVERWRITE
raise bentoml.exceptions.NotFound(f'Rebuilding existing Bento {bento_tag}') from None
state = ItemState.EXISTS
except bentoml.exceptions.NotFound:
if state != ItemState.OVERWRITE:
state = ItemState.ADDED
labels = {'library': 'vllm'}
service_config = dict(
resources={
'gpu' if device else 'cpu': len(device) if device else 'cpu_count',
'gpu_type': recommended_instance_type(model_id, bentomodel),
},
traffic=dict(timeout=timeout),
)
with fs.open_fs(f'temp://llm_{gen_random_uuid()}') as llm_fs:
logger.debug('Generating service vars %s (dir=%s)', model_id, llm_fs.getsyspath('/'))
script = _SERVICE_VARS.format(
__model_id__=model_id,
__model_name__=model_name,
__model_quantise__=quantize,
__model_dtype__=dtype,
__model_serialization__=serialisation,
__model_trust_remote_code__=trust_remote_code,
__max_model_len__=max_model_len,
__gpu_memory_utilization__=gpu_memory_utilization,
__services_config__=orjson.dumps(service_config).decode(),
)
models = []
if bentomodel is not None:
models.append(ModelSpec.from_item({'tag': str(bentomodel.tag), 'alias': bentomodel.tag.name}))
if SHOW_CODEGEN:
logger.info('Generated _service_vars.py:\n%s', script)
llm_fs.writetext('_service_vars.py', script)
with _SERVICE_FILE.open('r') as f:
service_src = f.read()
llm_fs.writetext(llm_config['service_name'], service_src)
bento = bentoml.Bento.create(
version=bento_tag.version,
build_ctx=llm_fs.getsyspath('/'),
build_config=BentoBuildConfig(
service=f"{llm_config['service_name']}:LLMService",
name=bento_tag.name,
labels=labels,
models=models,
envs=[
EnvironmentEntry(name=QUIET_ENV_VAR, value=str(openllm.utils.get_quiet_mode())),
EnvironmentEntry(name=DEBUG_ENV_VAR, value=str(openllm.utils.get_debug_mode())),
EnvironmentEntry(name='OPENLLM_CONFIG', value=llm_config.model_dump_json()),
EnvironmentEntry(name='NVIDIA_DRIVER_CAPABILITIES', value='compute,utility'),
],
description=f"OpenLLM service for {llm_config['start_name']}",
include=list(llm_fs.walk.files()),
exclude=['/venv', '/.venv', '__pycache__/', '*.py[cod]', '*$py.class'],
python=construct_python_options(llm_config, llm_fs),
docker=DockerOptions(python_version='3.11'),
),
).save(bento_store=BentoMLContainer.bento_store.get(), model_store=BentoMLContainer.model_store.get())
except Exception as err:
traceback.print_exc()
raise click.ClickException('Exception caught while building BentoLLM:\n' + str(err)) from err
if output == 'tag':
termui.echo(f'__tag__:{bento.tag}')
return
if not get_quiet_mode():
if state != ItemState.EXISTS:
termui.info(f"Successfully built Bento '{bento.tag}'.\n")
elif not overwrite:
termui.warning(f"Bento for '{model_id}' already exists [{bento}]. To overwrite it pass '--overwrite'.\n")
if not get_debug_mode():
termui.echo(OPENLLM_FIGLET)
termui.echo('📖 Next steps:\n', nl=False)
termui.echo(f'☁️ Deploy to BentoCloud:\n $ bentoml deploy {bento.tag} -n ${{DEPLOYMENT_NAME}}\n', nl=False)
termui.echo(
f'☁️ Update existing deployment on BentoCloud:\n $ bentoml deployment update --bento {bento.tag} ${{DEPLOYMENT_NAME}}\n',
nl=False,
)
termui.echo(f'🐳 Containerize BentoLLM:\n $ bentoml containerize {bento.tag} --opt progress=plain\n', nl=False)
return bento | Package a given models into a BentoLLM. \b ```bash $ openllm build google/flan-t5-large ``` \b > [!NOTE] > To run a container built from this Bento with GPU support, make sure > to have https://github.com/NVIDIA/nvidia-container-toolkit install locally. \b > [!IMPORTANT] > To build the bento with compiled OpenLLM, make sure to prepend HATCH_BUILD_HOOKS_ENABLE=1. Make sure that the deployment > target also use the same Python version and architecture as build machine. |
189,404 | from __future__ import annotations
import inspect, orjson, dataclasses, bentoml, functools, attr, openllm_core, traceback, openllm, typing as t
from openllm_core.utils import (
get_debug_mode,
is_vllm_available,
normalise_model_name,
gen_random_uuid,
dict_filter_none,
)
from openllm_core._typing_compat import LiteralQuantise, LiteralSerialisation, LiteralDtype
from openllm_core._schemas import GenerationOutput, GenerationInput
if t.TYPE_CHECKING:
from vllm import RequestOutput
def check_engine_args(_, attr: attr.Attribute[dict[str, t.Any]], v: dict[str, t.Any]) -> dict[str, t.Any]:
from vllm import AsyncEngineArgs
fields = dataclasses.fields(AsyncEngineArgs)
invalid_args = {k: v for k, v in v.items() if k not in {f.name for f in fields}}
if len(invalid_args) > 0:
raise ValueError(f'Invalid engine args: {list(invalid_args)}')
return v | null |
189,405 | from __future__ import annotations
import dataclasses
import logging
import os
import sys
import typing as t
import torch
import transformers
import openllm
from functools import partial
from itertools import chain
from random import randint, randrange
import bitsandbytes as bnb
from datasets import load_dataset
def prepare_datasets(tokenizer, dataset_name=DATASET_NAME):
# Load dataset from the hub
dataset = load_dataset(dataset_name, split='train')
print(f'dataset size: {len(dataset)}')
print(dataset[randrange(len(dataset))])
# apply prompt template per sample
dataset = dataset.map(partial(template_dataset, tokenizer=tokenizer), remove_columns=list(dataset.features))
# print random sample
print('Sample from dolly-v2 ds:', dataset[randint(0, len(dataset))]['text'])
# tokenize and chunk dataset
lm_dataset = dataset.map(
lambda sample: tokenizer(sample['text']), batched=True, remove_columns=list(dataset.features)
).map(partial(chunk, chunk_length=2048), batched=True)
# Print total number of samples
print(f'Total number of samples: {len(lm_dataset)}')
return lm_dataset
def prepare_for_int4_training(
model_id: str, model_version: str | None = None, gradient_checkpointing: bool = True, bf16: bool = True
) -> tuple[peft.PeftModel, transformers.LlamaTokenizerFast]:
from peft.tuners.lora import LoraLayer
llm = openllm.LLM(
model_id,
revision=model_version,
quantize='int4',
bnb_4bit_compute_dtype=torch.bfloat16,
use_cache=not gradient_checkpointing,
device_map='auto',
)
print('Model summary:', llm.model)
# get lora target modules
modules = find_all_linear_names(llm.model)
print(f'Found {len(modules)} modules to quantize: {modules}')
model, tokenizer = llm.prepare('lora', use_gradient_checkpointing=gradient_checkpointing, target_modules=modules)
# pre-process the model by upcasting the layer norms in float 32 for
for name, module in model.named_modules():
if isinstance(module, LoraLayer):
if bf16:
module = module.to(torch.bfloat16)
if 'norm' in name:
module = module.to(torch.float32)
if 'lm_head' in name or 'embed_tokens' in name:
if hasattr(module, 'weight'):
if bf16 and module.weight.dtype == torch.float32:
module = module.to(torch.bfloat16)
return model, tokenizer
class TrainingArguments:
per_device_train_batch_size: int = dataclasses.field(default=1)
gradient_checkpointing: bool = dataclasses.field(default=True)
bf16: bool = dataclasses.field(default=torch.cuda.get_device_capability()[0] == 8)
learning_rate: float = dataclasses.field(default=5e-5)
num_train_epochs: int = dataclasses.field(default=3)
logging_steps: int = dataclasses.field(default=1)
report_to: str = dataclasses.field(default='none')
output_dir: str = dataclasses.field(default=os.path.join(os.getcwd(), 'outputs', 'llama'))
save_strategy: str = dataclasses.field(default='no')
class ModelArguments:
model_id: str = dataclasses.field(default=DEFAULT_MODEL_ID)
model_version: str = dataclasses.field(default=DEFAULT_MODEL_VERSION)
seed: int = dataclasses.field(default=42)
merge_weights: bool = dataclasses.field(default=False)
def train_loop(model_args: ModelArguments, training_args: TrainingArguments):
import peft
transformers.set_seed(model_args.seed)
model, tokenizer = prepare_for_int4_training(
model_args.model_id, gradient_checkpointing=training_args.gradient_checkpointing, bf16=training_args.bf16
)
datasets = prepare_datasets(tokenizer)
trainer = transformers.Trainer(
model=model,
args=dataclasses.replace(
transformers.TrainingArguments(training_args.output_dir), **dataclasses.asdict(training_args)
),
train_dataset=datasets,
data_collator=transformers.default_data_collator,
)
trainer.train()
if model_args.merge_weights:
# note that this will requires larger GPU as we will load the whole model into memory
# merge adapter weights with base model and save
# save int4 model
trainer.model.save_pretrained(training_args.output_dir, safe_serialization=False)
# gc mem
del model, trainer
torch.cuda.empty_cache()
model = peft.AutoPeftModelForCausalLM.from_pretrained(
training_args.output_dir, low_cpu_mem_usage=True, torch_dtype=torch.float16
)
# merge lora with base weights and save
model = model.merge_and_unload()
model.save_pretrained(
os.path.join(os.getcwd(), 'outputs', 'merged_llama_lora'), safe_serialization=True, max_shard_size='2GB'
)
else:
trainer.model.save_pretrained(os.path.join(training_args.output_dir, 'lora')) | null |
189,406 | from __future__ import annotations
import dataclasses
import logging
import os
import sys
import typing as t
import transformers
import openllm
from datasets import load_dataset
if t.TYPE_CHECKING:
from peft import PeftModel
class TrainingArguments:
per_device_train_batch_size: int = dataclasses.field(default=4)
gradient_accumulation_steps: int = dataclasses.field(default=4)
warmup_steps: int = dataclasses.field(default=10)
max_steps: int = dataclasses.field(default=50)
learning_rate: float = dataclasses.field(default=3e-4)
fp16: bool = dataclasses.field(default=True)
logging_steps: int = dataclasses.field(default=1)
output_dir: str = dataclasses.field(default=os.path.join(os.getcwd(), 'outputs', 'opt'))
def load_trainer(
model: PeftModel, tokenizer: transformers.GPT2TokenizerFast, dataset_dict: t.Any, training_args: TrainingArguments
):
return transformers.Trainer(
model=model,
train_dataset=dataset_dict['train'],
args=dataclasses.replace(
transformers.TrainingArguments(training_args.output_dir), **dataclasses.asdict(training_args)
),
data_collator=transformers.DataCollatorForLanguageModeling(tokenizer, mlm=False),
) | null |
189,407 | from __future__ import annotations
import argparse
import asyncio
import logging
import typing as t
import openllm
async def main() -> int:
parser = argparse.ArgumentParser()
parser.add_argument('question', default=question)
if openllm.utils.in_notebook():
args = parser.parse_args(args=[question])
else:
args = parser.parse_args()
llm = openllm.LLM[t.Any, t.Any]('facebook/opt-2.7b')
prompt = Q.format(q=args.question)
logger.info('-' * 50, "Running with 'generate()'", '-' * 50)
res = await llm.generate(prompt)
logger.info('=' * 10, 'Response:', res)
logger.info('-' * 50, "Running with 'generate()' with per-requests argument", '-' * 50)
res = await llm.generate(prompt, max_new_tokens=MAX_NEW_TOKENS)
logger.info('=' * 10, 'Response:', res)
return 0
def _mp_fn(index: t.Any): # type: ignore
# For xla_spawn (TPUs)
asyncio.run(main()) | null |
189,408 | from __future__ import annotations
import logging
import string
import typing as t
import attr
import click
import inflection
import orjson
from bentoml_cli.utils import opt_callback
import openllm
from openllm_cli import termui
from openllm_cli._factory import model_complete_envvar
logger = logging.getLogger(__name__)
class PromptTemplate:
template: str
_input_variables: t.Sequence[str] = attr.field(init=False)
def __attrs_post_init__(self) -> None:
self._input_variables = default_formatter.extract_template_variables(self.template)
def with_options(self, **attrs: t.Any) -> PromptTemplate:
prompt_variables = {key: '{' + key + '}' if key not in attrs else attrs[key] for key in self._input_variables}
o = attr.evolve(self, template=self.template.format(**prompt_variables))
_object_setattr(o, '_input_variables', default_formatter.extract_template_variables(o.template))
return o
def format(self, **attrs: t.Any) -> str:
prompt_variables = {k: v for k, v in attrs.items() if k in self._input_variables}
try:
return self.template.format(**prompt_variables)
except KeyError as e:
raise RuntimeError(
f"Missing variable '{e.args[0]}' (required: {self._input_variables}) in the prompt template."
) from None
'--add-generation-prompt/--no-add-generation-prompt',
default=False,
help='See https://huggingface.co/docs/transformers/main/chat_templating#what-template-should-i-use. This only applicable if model-id is a HF model_id',
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli( ctx: click.Context, /, model_id: str, prompt: str, prompt_template_file: t.IO[t.Any] | None, chat_template_file: t.IO[t.Any] | None, system_message: str | None, add_generation_prompt: bool, _memoized: dict[str, t.Any], **_: t.Any, ) -> str | None` to solve the following problem:
Helpers for generating prompts. \b It accepts remote HF model_ids as well as model name passed to `openllm start`. If you pass in a HF model_id, then it will use the tokenizer to generate the prompt. ```bash openllm get-prompt WizardLM/WizardCoder-15B-V1.0 "Hello there" ``` If you need change the prompt template, you can create the template file that contains the jina2 template through `--chat-template-file` See https://huggingface.co/docs/transformers/main/chat_templating#templates-for-chat-models for more details. \b ```bash openllm get-prompt WizardLM/WizardCoder-15B-V1.0 "Hello there" --chat-template-file template.jinja2 ``` \b If you pass a model name, then it will use OpenLLM configuration to generate the prompt. Note that this is mainly for utilities, as OpenLLM won't use these prompts to format for you. \b ```bash openllm get-prompt mistral "Hello there"
Here is the function:
def cli(
ctx: click.Context,
/,
model_id: str,
prompt: str,
prompt_template_file: t.IO[t.Any] | None,
chat_template_file: t.IO[t.Any] | None,
system_message: str | None,
add_generation_prompt: bool,
_memoized: dict[str, t.Any],
**_: t.Any,
) -> str | None:
"""Helpers for generating prompts.
\b
It accepts remote HF model_ids as well as model name passed to `openllm start`.
If you pass in a HF model_id, then it will use the tokenizer to generate the prompt.
```bash
openllm get-prompt WizardLM/WizardCoder-15B-V1.0 "Hello there"
```
If you need change the prompt template, you can create the template file that contains the jina2 template through `--chat-template-file`
See https://huggingface.co/docs/transformers/main/chat_templating#templates-for-chat-models for more details.
\b
```bash
openllm get-prompt WizardLM/WizardCoder-15B-V1.0 "Hello there" --chat-template-file template.jinja2
```
\b
If you pass a model name, then it will use OpenLLM configuration to generate the prompt.
Note that this is mainly for utilities, as OpenLLM won't use these prompts to format for you.
\b
```bash
openllm get-prompt mistral "Hello there"
"""
_memoized = {k: v[0] for k, v in _memoized.items() if v}
if prompt_template_file and chat_template_file:
ctx.fail('prompt-template-file and chat-template-file are mutually exclusive.')
acceptable = set(openllm.CONFIG_MAPPING_NAMES.keys()) | set(
inflection.dasherize(name) for name in openllm.CONFIG_MAPPING_NAMES.keys()
)
if model_id in acceptable:
logger.warning(
'Using a default prompt from OpenLLM. Note that this prompt might not work for your intended usage.\n'
)
config = openllm.AutoConfig.for_model(model_id)
template = prompt_template_file.read() if prompt_template_file is not None else config.template
system_message = system_message or config.system_message
try:
formatted = (
PromptTemplate(template).with_options(system_message=system_message).format(instruction=prompt, **_memoized)
)
except RuntimeError as err:
logger.debug('Exception caught while formatting prompt: %s', err)
ctx.fail(str(err))
else:
import transformers
trust_remote_code = openllm.utils.check_bool_env('TRUST_REMOTE_CODE', False)
config = transformers.AutoConfig.from_pretrained(model_id, trust_remote_code=trust_remote_code)
tokenizer = transformers.AutoTokenizer.from_pretrained(model_id, trust_remote_code=trust_remote_code)
if chat_template_file is not None:
chat_template_file = chat_template_file.read()
if system_message is None:
logger.warning('system-message is not provided, using default infer from the model architecture.\n')
for architecture in config.architectures:
if architecture in openllm.AutoConfig._CONFIG_MAPPING_NAMES_TO_ARCHITECTURE():
system_message = (
openllm.AutoConfig.infer_class_from_name(
openllm.AutoConfig._CONFIG_MAPPING_NAMES_TO_ARCHITECTURE()[architecture]
)
.model_construct_env()
.system_message
)
break
else:
ctx.fail(
f'Failed to infer system message from model architecture: {config.architectures}. Please pass in --system-message'
)
messages = [{'role': 'system', 'content': system_message}, {'role': 'user', 'content': prompt}]
formatted = tokenizer.apply_chat_template(
messages, chat_template=chat_template_file, add_generation_prompt=add_generation_prompt, tokenize=False
)
termui.echo(orjson.dumps({'prompt': formatted}, option=orjson.OPT_INDENT_2).decode(), fg='white')
ctx.exit(0) | Helpers for generating prompts. \b It accepts remote HF model_ids as well as model name passed to `openllm start`. If you pass in a HF model_id, then it will use the tokenizer to generate the prompt. ```bash openllm get-prompt WizardLM/WizardCoder-15B-V1.0 "Hello there" ``` If you need change the prompt template, you can create the template file that contains the jina2 template through `--chat-template-file` See https://huggingface.co/docs/transformers/main/chat_templating#templates-for-chat-models for more details. \b ```bash openllm get-prompt WizardLM/WizardCoder-15B-V1.0 "Hello there" --chat-template-file template.jinja2 ``` \b If you pass a model name, then it will use OpenLLM configuration to generate the prompt. Note that this is mainly for utilities, as OpenLLM won't use these prompts to format for you. \b ```bash openllm get-prompt mistral "Hello there" |
189,409 | from __future__ import annotations
import typing as t
import click
import inflection
import orjson
import bentoml
import openllm
from bentoml._internal.utils import human_readable_size
from openllm_cli import termui
from openllm_cli._factory import model_complete_envvar, model_name_argument
DictStrAny = Dict[str, Any]
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli(model_name: str | None) -> DictStrAny` to solve the following problem:
List available models in local store to be used with OpenLLM.
Here is the function:
def cli(model_name: str | None) -> DictStrAny:
"""List available models in local store to be used with OpenLLM."""
models = tuple(inflection.dasherize(key) for key in openllm.CONFIG_MAPPING.keys())
ids_in_local_store = {
k: [
i
for i in bentoml.models.list()
if 'framework' in i.info.labels
and i.info.labels['framework'] == 'openllm'
and 'model_name' in i.info.labels
and i.info.labels['model_name'] == k
]
for k in models
}
if model_name is not None:
ids_in_local_store = {
k: [
i
for i in v
if 'model_name' in i.info.labels and i.info.labels['model_name'] == inflection.dasherize(model_name)
]
for k, v in ids_in_local_store.items()
}
ids_in_local_store = {k: v for k, v in ids_in_local_store.items() if v}
local_models = {
k: [{'tag': str(i.tag), 'size': human_readable_size(openllm.utils.calc_dir_size(i.path))} for i in val]
for k, val in ids_in_local_store.items()
}
termui.echo(orjson.dumps(local_models, option=orjson.OPT_INDENT_2).decode(), fg='white')
return local_models | List available models in local store to be used with OpenLLM. |
189,410 | from __future__ import annotations
import importlib.machinery
import logging
import os
import pkgutil
import subprocess
import sys
import tempfile
import typing as t
import click
import jupytext
import nbformat
import yaml
from openllm_cli import playground, termui
from openllm_core.utils import is_jupyter_available, is_jupytext_available, is_notebook_available
logger = logging.getLogger(__name__)
def load_notebook_metadata() -> DictStrAny:
with open(os.path.join(os.path.dirname(playground.__file__), '_meta.yml'), 'r') as f:
content = yaml.safe_load(f)
if not all('description' in k for k in content.values()):
raise ValueError("Invalid metadata file. All entries must have a 'description' key.")
return content
'--port',
envvar='JUPYTER_PORT',
show_envvar=True,
show_default=True,
default=8888,
help='Default port for Jupyter server',
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli(ctx: click.Context, output_dir: str | None, port: int) -> None` to solve the following problem:
OpenLLM Playground. A collections of notebooks to explore the capabilities of OpenLLM. This includes notebooks for fine-tuning, inference, and more. All of the script available in the playground can also be run directly as a Python script: For example: \b ```bash python -m openllm.playground.falcon_tuned --help ``` \b > [!NOTE] > This command requires Jupyter to be installed. Install it with 'pip install "openllm[playground]"'
Here is the function:
def cli(ctx: click.Context, output_dir: str | None, port: int) -> None:
"""OpenLLM Playground.
A collections of notebooks to explore the capabilities of OpenLLM.
This includes notebooks for fine-tuning, inference, and more.
All of the script available in the playground can also be run directly as a Python script:
For example:
\b
```bash
python -m openllm.playground.falcon_tuned --help
```
\b
> [!NOTE]
> This command requires Jupyter to be installed. Install it with 'pip install "openllm[playground]"'
"""
if not is_jupyter_available() or not is_jupytext_available() or not is_notebook_available():
raise RuntimeError(
"Playground requires 'jupyter', 'jupytext', and 'notebook'. Install it with 'pip install \"openllm[playground]\"'"
)
metadata = load_notebook_metadata()
_temp_dir = False
if output_dir is None:
_temp_dir = True
output_dir = tempfile.mkdtemp(prefix='openllm-playground-')
else:
os.makedirs(os.path.abspath(os.path.expandvars(os.path.expanduser(output_dir))), exist_ok=True)
termui.echo('The playground notebooks will be saved to: ' + os.path.abspath(output_dir), fg='blue')
for module in pkgutil.iter_modules(playground.__path__):
if module.ispkg or os.path.exists(os.path.join(output_dir, module.name + '.ipynb')):
logger.debug(
'Skipping: %s (%s)', module.name, 'File already exists' if not module.ispkg else f'{module.name} is a module'
)
continue
if not isinstance(module.module_finder, importlib.machinery.FileFinder):
continue
termui.echo('Generating notebook for: ' + module.name, fg='magenta')
markdown_cell = nbformat.v4.new_markdown_cell(metadata[module.name]['description'])
f = jupytext.read(os.path.join(module.module_finder.path, module.name + '.py'))
f.cells.insert(0, markdown_cell)
jupytext.write(f, os.path.join(output_dir, module.name + '.ipynb'), fmt='notebook')
try:
subprocess.check_output([
sys.executable,
'-m',
'jupyter',
'notebook',
'--notebook-dir',
output_dir,
'--port',
str(port),
'--no-browser',
'--debug',
])
except subprocess.CalledProcessError as e:
termui.echo(e.output, fg='red')
raise click.ClickException(f'Failed to start a jupyter server:\n{e}') from None
except KeyboardInterrupt:
termui.echo('\nShutting down Jupyter server...', fg='yellow')
if _temp_dir:
termui.echo('Note: You can access the generated notebooks in: ' + output_dir, fg='blue')
ctx.exit(0) | OpenLLM Playground. A collections of notebooks to explore the capabilities of OpenLLM. This includes notebooks for fine-tuning, inference, and more. All of the script available in the playground can also be run directly as a Python script: For example: \b ```bash python -m openllm.playground.falcon_tuned --help ``` \b > [!NOTE] > This command requires Jupyter to be installed. Install it with 'pip install "openllm[playground]"' |
189,411 | from __future__ import annotations
import shutil
import subprocess
import typing as t
import click
import psutil
from simple_di import Provide, inject
import bentoml
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_cli import termui
from openllm_cli._factory import bento_complete_envvar, machine_option
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli( ctx: click.Context, bento: str, machine: bool, _bento_store: BentoStore = Provide[BentoMLContainer.bento_store] ) -> str | None` to solve the following problem:
Dive into a BentoLLM. This is synonymous to cd $(b get <bento>:<tag> -o path).
Here is the function:
def cli(
ctx: click.Context, bento: str, machine: bool, _bento_store: BentoStore = Provide[BentoMLContainer.bento_store]
) -> str | None:
"""Dive into a BentoLLM. This is synonymous to cd $(b get <bento>:<tag> -o path)."""
try:
bentomodel = _bento_store.get(bento)
except bentoml.exceptions.NotFound:
ctx.fail(f'Bento {bento} not found. Make sure to call `openllm build` first.')
if 'bundler' not in bentomodel.info.labels or bentomodel.info.labels['bundler'] != 'openllm.bundle':
ctx.fail(
f"Bento is either too old or not built with OpenLLM. Make sure to use ``openllm build {bentomodel.info.labels['start_name']}`` for correctness."
)
if machine:
return bentomodel.path
# copy and paste this into a new shell
if psutil.WINDOWS:
subprocess.check_call([shutil.which('dir') or 'dir'], cwd=bentomodel.path)
else:
subprocess.check_call([shutil.which('ls') or 'ls', '-Rrthla'], cwd=bentomodel.path)
ctx.exit(0) | Dive into a BentoLLM. This is synonymous to cd $(b get <bento>:<tag> -o path). |
189,412 | from __future__ import annotations
import typing as t
import click
from simple_di import Provide, inject
import bentoml
from bentoml._internal.bento.bento import BentoInfo
from bentoml._internal.bento.build_config import DockerOptions
from bentoml._internal.configuration.containers import BentoMLContainer
from bentoml._internal.container.generate import generate_containerfile
from openllm_cli import termui
from openllm_cli._factory import bento_complete_envvar
from openllm_core.utils import converter
def cli(ctx: click.Context, bento: str, _bento_store: BentoStore = Provide[BentoMLContainer.bento_store]) -> str:
try:
bentomodel = _bento_store.get(bento)
except bentoml.exceptions.NotFound:
ctx.fail(f'Bento {bento} not found. Make sure to call `openllm build` first.')
# The logic below are similar to bentoml._internal.container.construct_containerfile
with open(bentomodel.path_of('bento.yaml'), 'r') as f:
options = BentoInfo.from_yaml_file(f)
# NOTE: dockerfile_template is already included in the
# Dockerfile inside bento, and it is not relevant to
# construct_containerfile. Hence it is safe to set it to None here.
# See https://github.com/bentoml/BentoML/issues/3399.
docker_attrs = converter.unstructure(options.docker)
# NOTE: if users specify a dockerfile_template, we will
# save it to /env/docker/Dockerfile.template. This is necessary
# for the reconstruction of the Dockerfile.
if 'dockerfile_template' in docker_attrs and docker_attrs['dockerfile_template'] is not None:
docker_attrs['dockerfile_template'] = 'env/docker/Dockerfile.template'
doc = generate_containerfile(
docker=DockerOptions(**docker_attrs),
build_ctx=bentomodel.path,
conda=options.conda,
bento_fs=bentomodel._fs,
enable_buildkit=True,
add_header=True,
)
termui.echo(doc, fg='white')
return bentomodel.path | null |
189,413 | from __future__ import annotations
import click
import inflection
import orjson
import bentoml
import openllm
from bentoml._internal.utils import human_readable_size
from openllm_cli import termui
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli(ctx: click.Context) -> None` to solve the following problem:
List available bentos built by OpenLLM.
Here is the function:
def cli(ctx: click.Context) -> None:
"""List available bentos built by OpenLLM."""
mapping = {
k: [
{
'tag': str(b.tag),
'size': human_readable_size(openllm.utils.calc_dir_size(b.path)),
'models': [
{'tag': str(m.tag), 'size': human_readable_size(openllm.utils.calc_dir_size(m.path))}
for m in (bentoml.models.get(_.tag) for _ in b.info.models)
],
}
for b in tuple(i for i in bentoml.list() if all(k in i.info.labels for k in {'start_name', 'bundler'}))
if b.info.labels['start_name'] == k
]
for k in tuple(inflection.dasherize(key) for key in openllm.CONFIG_MAPPING.keys())
}
mapping = {k: v for k, v in mapping.items() if v}
termui.echo(orjson.dumps(mapping, option=orjson.OPT_INDENT_2).decode(), fg='white')
ctx.exit(0) | List available bentos built by OpenLLM. |
189,414 | from __future__ import annotations
import itertools, logging, os, re, subprocess, sys, typing as t, bentoml, openllm_core, orjson
from simple_di import Provide, inject
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_core._typing_compat import LiteralSerialisation
from openllm_core.exceptions import OpenLLMException
from openllm_core.utils import WARNING_ENV_VAR, codegen, first_not_none, get_disable_warnings, is_vllm_available
if t.TYPE_CHECKING:
from bentoml._internal.bento import BentoStore
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import LiteralBackend, LiteralQuantise, LiteralString
def first_not_none(*args, default=None):
return next((arg for arg in args if arg is not None), default)
class LLMConfig(pydantic.BaseModel, abc.ABC):
model_config = pydantic.ConfigDict(extra='forbid', protected_namespaces=())
if t.TYPE_CHECKING:
metadata_config: ModelSettings = pydantic.Field(default_factory=dict)
generation_config: GenerationConfig = pydantic.Field(default_factory=lambda: GenerationConfig.model_construct())
_done_initialisation = False
def __setattr__(self, attr: str, value: t.Any) -> None:
if attr in _reserved_namespace and self._done_initialisation:
raise ForbiddenAttributeError(
f'{attr} should not be set during runtime as these value will be reflected during runtime. Instead, you can create a custom LLM subclass {self.__class__.__name__}.'
)
super().__setattr__(attr, value)
def __pydantic_init_subclass__(cls, **_: t.Any):
if any(i not in cls.model_fields for i in ('metadata_config', 'generation_config')):
raise TypeError(f'{cls.__name__} must have a `metadata_config` annd `generation_config` attribute.')
def model_post_init(self, *_: t.Any):
_DEFAULT.update(self.metadata_config)
self.metadata_config = _DEFAULT
self._done_initialisation = True
# fmt: off
# update-config-stubs.py: start
# NOTE: ModelSettings arguments
def __getitem__(self, item: t.Literal['default_id']) -> str: ...
def __getitem__(self, item: t.Literal['model_ids']) -> ListStr: ...
def __getitem__(self, item: t.Literal['architecture']) -> str: ...
def __getitem__(self, item: t.Literal['url']) -> str: ...
def __getitem__(self, item: t.Literal['serialisation']) -> LiteralSerialisation: ...
def __getitem__(self, item: t.Literal['trust_remote_code']) -> bool: ...
def __getitem__(self, item: t.Literal['service_name']) -> str: ...
def __getitem__(self, item: t.Literal['requirements']) -> t.Optional[ListStr]: ...
def __getitem__(self, item: t.Literal['model_type']) -> t.Literal['causal_lm', 'seq2seq_lm']: ...
def __getitem__(self, item: t.Literal['timeout']) -> int: ...
def __getitem__(self, item: t.Literal['fine_tune_strategies']) -> t.Tuple[t.Dict[str, t.Any], ...]: ...
# NOTE: GenerationConfig arguments
def __getitem__(self, item: t.Literal['min_length']) -> int: ...
def __getitem__(self, item: t.Literal['min_new_tokens']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['max_time']) -> t.Optional[float]: ...
def __getitem__(self, item: t.Literal['num_beams']) -> int: ...
def __getitem__(self, item: t.Literal['num_beam_groups']) -> int: ...
def __getitem__(self, item: t.Literal['penalty_alpha']) -> t.Optional[float]: ...
def __getitem__(self, item: t.Literal['use_cache']) -> bool: ...
def __getitem__(self, item: t.Literal['typical_p']) -> float: ...
def __getitem__(self, item: t.Literal['epsilon_cutoff']) -> float: ...
def __getitem__(self, item: t.Literal['eta_cutoff']) -> float: ...
def __getitem__(self, item: t.Literal['diversity_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['repetition_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['encoder_repetition_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['no_repeat_ngram_size']) -> int: ...
def __getitem__(self, item: t.Literal['bad_words_ids']) -> t.Optional[t.List[t.List[int]]]: ...
def __getitem__(self, item: t.Literal['force_words_ids']) -> t.Optional[t.Union[t.List[t.List[int]], t.List[t.List[t.List[int]]]]]: ...
def __getitem__(self, item: t.Literal['renormalize_logits']) -> bool: ...
def __getitem__(self, item: t.Literal['forced_bos_token_id']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['forced_eos_token_id']) -> t.Optional[t.Union[int, t.List[int]]]: ...
def __getitem__(self, item: t.Literal['remove_invalid_values']) -> bool: ...
def __getitem__(self, item: t.Literal['exponential_decay_length_penalty']) -> t.Optional[t.Tuple[int, float]]: ...
def __getitem__(self, item: t.Literal['suppress_tokens']) -> t.Optional[t.List[int]]: ...
def __getitem__(self, item: t.Literal['begin_suppress_tokens']) -> t.Optional[t.List[int]]: ...
def __getitem__(self, item: t.Literal['forced_decoder_ids']) -> t.Optional[t.List[t.List[int]]]: ...
def __getitem__(self, item: t.Literal['num_return_sequences']) -> int: ...
def __getitem__(self, item: t.Literal['output_attentions']) -> bool: ...
def __getitem__(self, item: t.Literal['output_hidden_states']) -> bool: ...
def __getitem__(self, item: t.Literal['output_scores']) -> bool: ...
def __getitem__(self, item: t.Literal['pad_token_id']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['bos_token_id']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['eos_token_id']) -> t.Optional[t.Union[int, t.List[int]]]: ...
def __getitem__(self, item: t.Literal['encoder_no_repeat_ngram_size']) -> int: ...
def __getitem__(self, item: t.Literal['decoder_start_token_id']) -> int: ...
def __getitem__(self, item: t.Literal['n']) -> int: ...
def __getitem__(self, item: t.Literal['best_of']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['presence_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['frequency_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['temperature']) -> float: ...
def __getitem__(self, item: t.Literal['top_k']) -> int: ...
def __getitem__(self, item: t.Literal['top_p']) -> float: ...
def __getitem__(self, item: t.Literal['min_p']) -> float: ...
def __getitem__(self, item: t.Literal['use_beam_search']) -> bool: ...
def __getitem__(self, item: t.Literal['length_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['early_stopping']) -> bool: ...
def __getitem__(self, item: t.Literal['stop']) -> t.Optional[t.Union[str, t.List[str]]]: ...
def __getitem__(self, item: t.Literal['stop_token_ids']) -> t.Optional[t.List[int]]: ...
def __getitem__(self, item: t.Literal['include_stop_str_in_output']) -> bool: ...
def __getitem__(self, item: t.Literal['ignore_eos']) -> bool: ...
def __getitem__(self, item: t.Literal['max_tokens']) -> int: ...
def __getitem__(self, item: t.Literal['logprobs']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['prompt_logprobs']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['skip_special_tokens']) -> bool: ...
def __getitem__(self, item: t.Literal['spaces_between_special_tokens']) -> bool: ...
def __getitem__(self, item: t.Literal['logits_processors']) -> t.Optional[t.List[LogitsProcessor]]: ...
def __getitem__(self, item: t.Literal['max_new_tokens']) -> int: ...
def __getitem__(self, item: t.Literal['start_name']) -> str: ...
def __getitem__(self, item: t.Literal['model_name']) -> str: ...
# update-config-stubs.py: stop
# fmt: on
def __getitem__(self, item: t.Any) -> t.Any:
if item is None:
raise TypeError(f"{self} doesn't understand how to index None.")
item = inflection.underscore(item)
if item in _reserved_namespace:
raise ForbiddenAttributeError(
f"'{item}' is a reserved namespace for {self.__class__} and should not be access nor modified."
)
# backward compatible
if item == 'max_new_tokens':
item = 'max_tokens'
if self.model_extra and item in self.model_extra:
return self.model_extra[item]
elif hasattr(self.generation_config, item):
return getattr(self.generation_config, item)
elif item in self.metadata_config:
return self.metadata_config[item]
elif hasattr(self, item):
return getattr(self, item)
elif item in {'start_name', 'model_name'}: # backward compatible
from .config.configuration_auto import CONFIG_TO_ALIAS_NAMES
if (cls_name := self.__class__.__name__) in CONFIG_TO_ALIAS_NAMES:
return CONFIG_TO_ALIAS_NAMES[cls_name]
raise KeyError(item)
def __contains__(self, item: t.Any) -> bool:
try:
self[item]
return True
except KeyError:
return False
def ser_model(self) -> dict[str, t.Any]:
return self.generation_config.model_dump()
def model_construct_env(cls, **attrs: t.Any) -> Self: # All LLMConfig init should start from here.
env_json_string = os.environ.get('OPENLLM_CONFIG', None)
config_from_env: DictStrAny = {}
if env_json_string is not None:
try:
config_from_env = orjson.loads(env_json_string)
except orjson.JSONDecodeError as e:
raise RuntimeError("Failed to parse 'OPENLLM_CONFIG' as valid JSON string.") from e
generation_config = {}
if 'generation_config' in attrs and 'sampling_config' in attrs: # backward compatibility
generation_config = attrs.pop('generation_config')
sampling_config = attrs.pop('sampling_config')
generation_config.update(sampling_config)
elif 'llm_config' in attrs: # NOTE: this is the new key
generation_config = attrs.pop('llm_config')
config_from_env.update({**generation_config, **cls().generation_config.model_dump(), **attrs})
config_from_env = {k: v for k, v in config_from_env.items() if v is not None}
return cls.model_construct(generation_config=GenerationConfig.model_construct(**config_from_env))
def inference_options(self, llm: openllm.LLM, backend: str | None = None) -> tuple[Self, t.Any]:
backend = backend if backend is not None else llm.__llm_backend__
framework = getattr(self, backend, None)
if framework is None:
raise ValueError(f'Unknown backend {backend}')
try:
return self, framework.build(self)
except AttributeError:
raise RuntimeError(f'Unknown backend {backend}') from None
class vllm:
def build(config: LLMConfig) -> vllm.SamplingParams:
top_p = 1.0 if config['temperature'] <= 1e-5 else config['top_p']
generation_config = config.generation_config.model_copy(update={'top_p': top_p})
return generation_config.build('vllm')
class pt:
def build(config: LLMConfig) -> LLMConfig:
return config.generation_config.build('pt')
class hf:
def build(config: LLMConfig) -> transformers.GenerationConfig:
return config.generation_config.build('pt')
def compatible_options(self, request: ChatCompletionRequest | CompletionRequest) -> dict[str, t.Any]:
from .protocol.openai import ChatCompletionRequest, CompletionRequest
if isinstance(request, (ChatCompletionRequest, CompletionRequest)):
return self.openai.build(self, request)
raise TypeError(f'Unknown request type {type(request)}')
class openai:
def build(config: LLMConfig, request: ChatCompletionRequest | CompletionRequest) -> dict[str, t.Any]:
d = dict(
temperature=first_not_none(request.temperature, config['temperature']),
top_p=first_not_none(request.top_p, config['top_p']),
top_k=first_not_none(request.top_k, config['top_k']),
best_of=first_not_none(request.best_of, config['best_of']),
n=first_not_none(request.n, default=config['n']),
stop=first_not_none(request.stop, default=None),
max_new_tokens=first_not_none(request.max_tokens, default=config['max_tokens']),
presence_penalty=first_not_none(request.presence_penalty, default=config['presence_penalty']),
frequency_penalty=first_not_none(request.frequency_penalty, default=config['frequency_penalty']),
)
if hasattr(request, 'logprobs'):
d['logprobs'] = first_not_none(request.logprobs, default=config['logprobs'])
return d
def template(self) -> str:
return '{system_message}{instruction}'
def system_message(self) -> str:
return ''
def chat_template(self) -> str | None:
return
def chat_messages(self) -> list[MessageParam]:
from ._schemas import MessageParam
return [
MessageParam(role='system', content='You are a helpful assistant'),
MessageParam(role='user', content="Hello, I'm looking for a chatbot that can help me with my work."),
MessageParam(role='assistant', content='Yes? What can I help you with?'),
]
LiteralQuantise = Literal['int8', 'int4', 'gptq', 'awq', 'squeezellm']
LiteralBackend = Literal['pt', 'vllm', 'triton', 'ggml']
The provided code snippet includes necessary dependencies for implementing the `_start` function. Write a Python function `def _start( model_id: str, timeout: int = 30, workers_per_resource: t.Literal['conserved', 'round_robin'] | float | None = None, device: tuple[str, ...] | t.Literal['all'] | None = None, quantize: LiteralQuantise | None = None, adapter_map: dict[LiteralString, str | None] | None = None, backend: LiteralBackend | None = None, additional_args: list[str] | None = None, cors: bool = False, __test__: bool = False, **_: t.Any, ) -> LLMConfig | subprocess.Popen[bytes]` to solve the following problem:
Python API to start a LLM server. These provides one-to-one mapping to CLI arguments. For all additional arguments, pass it as string to ``additional_args``. For example, if you want to pass ``--port 5001``, you can pass ``additional_args=["--port", "5001"]`` > [!NOTE] This will create a blocking process, so if you use this API, you can create a running sub thread > to start the server instead of blocking the main thread. ``openllm.start`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as the CLI interaction. Args: model_id: The model id to start this LLMServer timeout: The server timeout workers_per_resource: Number of workers per resource assigned. See [resource scheduling](https://docs.bentoml.org/en/latest/guides/scheduling.html#resource-scheduling-strategy) for more information. By default, this is set to 1. > [!NOTE] ``--workers-per-resource`` will also accept the following strategies: > - ``round_robin``: Similar behaviour when setting ``--workers-per-resource 1``. This is useful for smaller models. > - ``conserved``: This will determine the number of available GPU resources, and only assign > one worker for the LLMRunner. For example, if ther are 4 GPUs available, then ``conserved`` is > equivalent to ``--workers-per-resource 0.25``. device: Assign GPU devices (if available) to this LLM. By default, this is set to ``None``. It also accepts 'all' argument to assign all available GPUs to this LLM. quantize: Quantize the model weights. This is only applicable for PyTorch models. Possible quantisation strategies: - int8: Quantize the model with 8bit (bitsandbytes required) - int4: Quantize the model with 4bit (bitsandbytes required) - gptq: Quantize the model with GPTQ (auto-gptq required) cors: Whether to enable CORS for this LLM. By default, this is set to ``False``. adapter_map: The adapter mapping of LoRA to use for this LLM. It accepts a dictionary of ``{adapter_id: adapter_name}``. backend: The backend to use for this LLM. By default, this is set to ``pt``. additional_args: Additional arguments to pass to ``openllm start``.
Here is the function:
def _start(
model_id: str,
timeout: int = 30,
workers_per_resource: t.Literal['conserved', 'round_robin'] | float | None = None,
device: tuple[str, ...] | t.Literal['all'] | None = None,
quantize: LiteralQuantise | None = None,
adapter_map: dict[LiteralString, str | None] | None = None,
backend: LiteralBackend | None = None,
additional_args: list[str] | None = None,
cors: bool = False,
__test__: bool = False,
**_: t.Any,
) -> LLMConfig | subprocess.Popen[bytes]:
"""Python API to start a LLM server. These provides one-to-one mapping to CLI arguments.
For all additional arguments, pass it as string to ``additional_args``. For example, if you want to
pass ``--port 5001``, you can pass ``additional_args=["--port", "5001"]``
> [!NOTE] This will create a blocking process, so if you use this API, you can create a running sub thread
> to start the server instead of blocking the main thread.
``openllm.start`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as the CLI interaction.
Args:
model_id: The model id to start this LLMServer
timeout: The server timeout
workers_per_resource: Number of workers per resource assigned.
See [resource scheduling](https://docs.bentoml.org/en/latest/guides/scheduling.html#resource-scheduling-strategy)
for more information. By default, this is set to 1.
> [!NOTE] ``--workers-per-resource`` will also accept the following strategies:
> - ``round_robin``: Similar behaviour when setting ``--workers-per-resource 1``. This is useful for smaller models.
> - ``conserved``: This will determine the number of available GPU resources, and only assign
> one worker for the LLMRunner. For example, if ther are 4 GPUs available, then ``conserved`` is
> equivalent to ``--workers-per-resource 0.25``.
device: Assign GPU devices (if available) to this LLM. By default, this is set to ``None``. It also accepts 'all'
argument to assign all available GPUs to this LLM.
quantize: Quantize the model weights. This is only applicable for PyTorch models.
Possible quantisation strategies:
- int8: Quantize the model with 8bit (bitsandbytes required)
- int4: Quantize the model with 4bit (bitsandbytes required)
- gptq: Quantize the model with GPTQ (auto-gptq required)
cors: Whether to enable CORS for this LLM. By default, this is set to ``False``.
adapter_map: The adapter mapping of LoRA to use for this LLM. It accepts a dictionary of ``{adapter_id: adapter_name}``.
backend: The backend to use for this LLM. By default, this is set to ``pt``.
additional_args: Additional arguments to pass to ``openllm start``.
"""
from .entrypoint import start_command
os.environ['BACKEND'] = openllm_core.utils.first_not_none(backend, default='vllm' if is_vllm_available() else 'pt')
args: list[str] = [model_id]
if timeout:
args.extend(['--server-timeout', str(timeout)])
if workers_per_resource:
args.extend([
'--workers-per-resource',
str(workers_per_resource) if not isinstance(workers_per_resource, str) else workers_per_resource,
])
if device and not os.environ.get('CUDA_VISIBLE_DEVICES'):
args.extend(['--device', ','.join(device)])
if quantize:
args.extend(['--quantize', str(quantize)])
if cors:
args.append('--cors')
if adapter_map:
args.extend(
list(
itertools.chain.from_iterable([['--adapter-id', f"{k}{':' + v if v else ''}"] for k, v in adapter_map.items()])
)
)
if additional_args:
args.extend(additional_args)
if __test__:
args.append('--return-process')
cmd = start_command
return cmd.main(args=args, standalone_mode=False) | Python API to start a LLM server. These provides one-to-one mapping to CLI arguments. For all additional arguments, pass it as string to ``additional_args``. For example, if you want to pass ``--port 5001``, you can pass ``additional_args=["--port", "5001"]`` > [!NOTE] This will create a blocking process, so if you use this API, you can create a running sub thread > to start the server instead of blocking the main thread. ``openllm.start`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as the CLI interaction. Args: model_id: The model id to start this LLMServer timeout: The server timeout workers_per_resource: Number of workers per resource assigned. See [resource scheduling](https://docs.bentoml.org/en/latest/guides/scheduling.html#resource-scheduling-strategy) for more information. By default, this is set to 1. > [!NOTE] ``--workers-per-resource`` will also accept the following strategies: > - ``round_robin``: Similar behaviour when setting ``--workers-per-resource 1``. This is useful for smaller models. > - ``conserved``: This will determine the number of available GPU resources, and only assign > one worker for the LLMRunner. For example, if ther are 4 GPUs available, then ``conserved`` is > equivalent to ``--workers-per-resource 0.25``. device: Assign GPU devices (if available) to this LLM. By default, this is set to ``None``. It also accepts 'all' argument to assign all available GPUs to this LLM. quantize: Quantize the model weights. This is only applicable for PyTorch models. Possible quantisation strategies: - int8: Quantize the model with 8bit (bitsandbytes required) - int4: Quantize the model with 4bit (bitsandbytes required) - gptq: Quantize the model with GPTQ (auto-gptq required) cors: Whether to enable CORS for this LLM. By default, this is set to ``False``. adapter_map: The adapter mapping of LoRA to use for this LLM. It accepts a dictionary of ``{adapter_id: adapter_name}``. backend: The backend to use for this LLM. By default, this is set to ``pt``. additional_args: Additional arguments to pass to ``openllm start``. |
189,415 | from __future__ import annotations
import itertools, logging, os, re, subprocess, sys, typing as t, bentoml, openllm_core, orjson
from simple_di import Provide, inject
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_core._typing_compat import LiteralSerialisation
from openllm_core.exceptions import OpenLLMException
from openllm_core.utils import WARNING_ENV_VAR, codegen, first_not_none, get_disable_warnings, is_vllm_available
logger = logging.getLogger(__name__)
LiteralSerialisation = Literal['safetensors', 'legacy']
LiteralQuantise = Literal['int8', 'int4', 'gptq', 'awq', 'squeezellm']
class OpenLLMException(Exception):
"""Base class for all OpenLLM exceptions. This shares similar interface with BentoMLException."""
error_code = HTTPStatus.INTERNAL_SERVER_ERROR
def __init__(self, message: str):
self.message = message
super().__init__(message)
def get_disable_warnings():
return check_bool_env(WARNING_ENV_VAR, False)
def first_not_none(*args, default=None):
return next((arg for arg in args if arg is not None), default)
has_safetensors_weights = functools.partial(has_weights, extensions='safetensors')
The provided code snippet includes necessary dependencies for implementing the `_build` function. Write a Python function `def _build( model_id: str, model_version: str | None = None, bento_version: str | None = None, quantize: LiteralQuantise | None = None, adapter_map: dict[str, str | None] | None = None, build_ctx: str | None = None, enable_features: tuple[str, ...] | None = None, dockerfile_template: str | None = None, overwrite: bool = False, push: bool = False, force_push: bool = False, containerize: bool = False, serialisation: LiteralSerialisation | None = None, additional_args: list[str] | None = None, bento_store: BentoStore = Provide[BentoMLContainer.bento_store], ) -> bentoml.Bento` to solve the following problem:
Package a LLM into a BentoLLM. The LLM will be built into a BentoService with the following structure: if ``quantize`` is passed, it will instruct the model to be quantized dynamically during serving time. ``openllm.build`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as ``openllm build`` CLI. Args: model_id: The model id to build this BentoLLM model_version: Optional model version for this given LLM bento_version: Optional bento veresion for this given BentoLLM quantize: Quantize the model weights. This is only applicable for PyTorch models. Possible quantisation strategies: - int8: Quantize the model with 8bit (bitsandbytes required) - int4: Quantize the model with 4bit (bitsandbytes required) - gptq: Quantize the model with GPTQ (auto-gptq required) adapter_map: The adapter mapping of LoRA to use for this LLM. It accepts a dictionary of ``{adapter_id: adapter_name}``. build_ctx: The build context to use for building BentoLLM. By default, it sets to current directory. enable_features: Additional OpenLLM features to be included with this BentoLLM. dockerfile_template: The dockerfile template to use for building BentoLLM. See https://docs.bentoml.com/en/latest/guides/containerization.html#dockerfile-template. overwrite: Whether to overwrite the existing BentoLLM. By default, this is set to ``False``. push: Whether to push the result bento to BentoCloud. Make sure to login with 'bentoml cloud login' first. containerize: Whether to containerize the Bento after building. '--containerize' is the shortcut of 'openllm build && bentoml containerize'. Note that 'containerize' and 'push' are mutually exclusive container_registry: Container registry to choose the base OpenLLM container image to build from. Default to ECR. serialisation: Serialisation for saving models. Default to 'safetensors', which is equivalent to `safe_serialization=True` additional_args: Additional arguments to pass to ``openllm build``. bento_store: Optional BentoStore for saving this BentoLLM. Default to the default BentoML local store. Returns: ``bentoml.Bento | str``: BentoLLM instance. This can be used to serve the LLM or can be pushed to BentoCloud.
Here is the function:
def _build(
model_id: str,
model_version: str | None = None,
bento_version: str | None = None,
quantize: LiteralQuantise | None = None,
adapter_map: dict[str, str | None] | None = None,
build_ctx: str | None = None,
enable_features: tuple[str, ...] | None = None,
dockerfile_template: str | None = None,
overwrite: bool = False,
push: bool = False,
force_push: bool = False,
containerize: bool = False,
serialisation: LiteralSerialisation | None = None,
additional_args: list[str] | None = None,
bento_store: BentoStore = Provide[BentoMLContainer.bento_store],
) -> bentoml.Bento:
"""Package a LLM into a BentoLLM.
The LLM will be built into a BentoService with the following structure:
if ``quantize`` is passed, it will instruct the model to be quantized dynamically during serving time.
``openllm.build`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as ``openllm build`` CLI.
Args:
model_id: The model id to build this BentoLLM
model_version: Optional model version for this given LLM
bento_version: Optional bento veresion for this given BentoLLM
quantize: Quantize the model weights. This is only applicable for PyTorch models.
Possible quantisation strategies:
- int8: Quantize the model with 8bit (bitsandbytes required)
- int4: Quantize the model with 4bit (bitsandbytes required)
- gptq: Quantize the model with GPTQ (auto-gptq required)
adapter_map: The adapter mapping of LoRA to use for this LLM. It accepts a dictionary of ``{adapter_id: adapter_name}``.
build_ctx: The build context to use for building BentoLLM. By default, it sets to current directory.
enable_features: Additional OpenLLM features to be included with this BentoLLM.
dockerfile_template: The dockerfile template to use for building BentoLLM. See https://docs.bentoml.com/en/latest/guides/containerization.html#dockerfile-template.
overwrite: Whether to overwrite the existing BentoLLM. By default, this is set to ``False``.
push: Whether to push the result bento to BentoCloud. Make sure to login with 'bentoml cloud login' first.
containerize: Whether to containerize the Bento after building. '--containerize' is the shortcut of 'openllm build && bentoml containerize'.
Note that 'containerize' and 'push' are mutually exclusive
container_registry: Container registry to choose the base OpenLLM container image to build from. Default to ECR.
serialisation: Serialisation for saving models. Default to 'safetensors', which is equivalent to `safe_serialization=True`
additional_args: Additional arguments to pass to ``openllm build``.
bento_store: Optional BentoStore for saving this BentoLLM. Default to the default BentoML local store.
Returns:
``bentoml.Bento | str``: BentoLLM instance. This can be used to serve the LLM or can be pushed to BentoCloud.
"""
from openllm.serialisation.transformers.weights import has_safetensors_weights
args: list[str] = [
sys.executable,
'-m',
'openllm',
'build',
model_id,
'--machine',
'--quiet',
'--serialisation',
first_not_none(
serialisation, default='safetensors' if has_safetensors_weights(model_id, model_version) else 'legacy'
),
]
if quantize:
args.extend(['--quantize', quantize])
if containerize and push:
raise OpenLLMException("'containerize' and 'push' are currently mutually exclusive.")
if push:
args.extend(['--push'])
if containerize:
args.extend(['--containerize'])
if build_ctx:
args.extend(['--build-ctx', build_ctx])
if enable_features:
args.extend([f'--enable-features={f}' for f in enable_features])
if overwrite:
args.append('--overwrite')
if adapter_map:
args.extend([f"--adapter-id={k}{':' + v if v is not None else ''}" for k, v in adapter_map.items()])
if model_version:
args.extend(['--model-version', model_version])
if bento_version:
args.extend(['--bento-version', bento_version])
if dockerfile_template:
args.extend(['--dockerfile-template', dockerfile_template])
if additional_args:
args.extend(additional_args)
if force_push:
args.append('--force-push')
current_disable_warning = get_disable_warnings()
os.environ[WARNING_ENV_VAR] = str(True)
try:
output = subprocess.check_output(args, env=os.environ.copy(), cwd=build_ctx or os.getcwd())
except subprocess.CalledProcessError as e:
logger.error("Exception caught while building Bento for '%s'", model_id, exc_info=e)
if e.stderr:
raise OpenLLMException(e.stderr.decode('utf-8')) from None
raise OpenLLMException(str(e)) from None
matched = re.match(r'__object__:(\{.*\})$', output.decode('utf-8').strip())
if matched is None:
raise ValueError(
f"Failed to find tag from output: {output.decode('utf-8').strip()}\nNote: Output from 'openllm build' might not be correct. Please open an issue on GitHub."
)
os.environ[WARNING_ENV_VAR] = str(current_disable_warning)
try:
result = orjson.loads(matched.group(1))
except orjson.JSONDecodeError as e:
raise ValueError(
f"Failed to decode JSON from output: {output.decode('utf-8').strip()}\nNote: Output from 'openllm build' might not be correct. Please open an issue on GitHub."
) from e
return bentoml.get(result['tag'], _bento_store=bento_store) | Package a LLM into a BentoLLM. The LLM will be built into a BentoService with the following structure: if ``quantize`` is passed, it will instruct the model to be quantized dynamically during serving time. ``openllm.build`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as ``openllm build`` CLI. Args: model_id: The model id to build this BentoLLM model_version: Optional model version for this given LLM bento_version: Optional bento veresion for this given BentoLLM quantize: Quantize the model weights. This is only applicable for PyTorch models. Possible quantisation strategies: - int8: Quantize the model with 8bit (bitsandbytes required) - int4: Quantize the model with 4bit (bitsandbytes required) - gptq: Quantize the model with GPTQ (auto-gptq required) adapter_map: The adapter mapping of LoRA to use for this LLM. It accepts a dictionary of ``{adapter_id: adapter_name}``. build_ctx: The build context to use for building BentoLLM. By default, it sets to current directory. enable_features: Additional OpenLLM features to be included with this BentoLLM. dockerfile_template: The dockerfile template to use for building BentoLLM. See https://docs.bentoml.com/en/latest/guides/containerization.html#dockerfile-template. overwrite: Whether to overwrite the existing BentoLLM. By default, this is set to ``False``. push: Whether to push the result bento to BentoCloud. Make sure to login with 'bentoml cloud login' first. containerize: Whether to containerize the Bento after building. '--containerize' is the shortcut of 'openllm build && bentoml containerize'. Note that 'containerize' and 'push' are mutually exclusive container_registry: Container registry to choose the base OpenLLM container image to build from. Default to ECR. serialisation: Serialisation for saving models. Default to 'safetensors', which is equivalent to `safe_serialization=True` additional_args: Additional arguments to pass to ``openllm build``. bento_store: Optional BentoStore for saving this BentoLLM. Default to the default BentoML local store. Returns: ``bentoml.Bento | str``: BentoLLM instance. This can be used to serve the LLM or can be pushed to BentoCloud. |
189,416 | from __future__ import annotations
import itertools, logging, os, re, subprocess, sys, typing as t, bentoml, openllm_core, orjson
from simple_di import Provide, inject
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_core._typing_compat import LiteralSerialisation
from openllm_core.exceptions import OpenLLMException
from openllm_core.utils import WARNING_ENV_VAR, codegen, first_not_none, get_disable_warnings, is_vllm_available
if t.TYPE_CHECKING:
from bentoml._internal.bento import BentoStore
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import LiteralBackend, LiteralQuantise, LiteralString
LiteralSerialisation = Literal['safetensors', 'legacy']
LiteralQuantise = Literal['int8', 'int4', 'gptq', 'awq', 'squeezellm']
LiteralBackend = Literal['pt', 'vllm', 'triton', 'ggml']
The provided code snippet includes necessary dependencies for implementing the `_import_model` function. Write a Python function `def _import_model( model_id: str, model_version: str | None = None, backend: LiteralBackend | None = None, quantize: LiteralQuantise | None = None, serialisation: LiteralSerialisation | None = None, additional_args: t.Sequence[str] | None = None, ) -> dict[str, t.Any]` to solve the following problem:
Import a LLM into local store. > [!NOTE] > If ``quantize`` is passed, the model weights will be saved as quantized weights. You should > only use this option if you want the weight to be quantized by default. Note that OpenLLM also > support on-demand quantisation during initial startup. ``openllm.import_model`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as the CLI ``openllm import``. > [!NOTE] > ``openllm.start`` will automatically invoke ``openllm.import_model`` under the hood. Args: model_id: required model id for this given LLM model_version: Optional model version for this given LLM backend: The backend to use for this LLM. By default, this is set to ``pt``. quantize: Quantize the model weights. This is only applicable for PyTorch models. Possible quantisation strategies: - int8: Quantize the model with 8bit (bitsandbytes required) - int4: Quantize the model with 4bit (bitsandbytes required) - gptq: Quantize the model with GPTQ (auto-gptq required) serialisation: Type of model format to save to local store. If set to 'safetensors', then OpenLLM will save model using safetensors. Default behaviour is similar to ``safe_serialization=False``. additional_args: Additional arguments to pass to ``openllm import``. Returns: ``bentoml.Model``:BentoModel of the given LLM. This can be used to serve the LLM or can be pushed to BentoCloud.
Here is the function:
def _import_model(
model_id: str,
model_version: str | None = None,
backend: LiteralBackend | None = None,
quantize: LiteralQuantise | None = None,
serialisation: LiteralSerialisation | None = None,
additional_args: t.Sequence[str] | None = None,
) -> dict[str, t.Any]:
"""Import a LLM into local store.
> [!NOTE]
> If ``quantize`` is passed, the model weights will be saved as quantized weights. You should
> only use this option if you want the weight to be quantized by default. Note that OpenLLM also
> support on-demand quantisation during initial startup.
``openllm.import_model`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as the CLI ``openllm import``.
> [!NOTE]
> ``openllm.start`` will automatically invoke ``openllm.import_model`` under the hood.
Args:
model_id: required model id for this given LLM
model_version: Optional model version for this given LLM
backend: The backend to use for this LLM. By default, this is set to ``pt``.
quantize: Quantize the model weights. This is only applicable for PyTorch models.
Possible quantisation strategies:
- int8: Quantize the model with 8bit (bitsandbytes required)
- int4: Quantize the model with 4bit (bitsandbytes required)
- gptq: Quantize the model with GPTQ (auto-gptq required)
serialisation: Type of model format to save to local store. If set to 'safetensors', then OpenLLM will save model using safetensors. Default behaviour is similar to ``safe_serialization=False``.
additional_args: Additional arguments to pass to ``openllm import``.
Returns:
``bentoml.Model``:BentoModel of the given LLM. This can be used to serve the LLM or can be pushed to BentoCloud.
"""
from .entrypoint import import_command
args = [model_id, '--quiet']
if backend is not None:
args.extend(['--backend', backend])
if model_version is not None:
args.extend(['--model-version', str(model_version)])
if quantize is not None:
args.extend(['--quantize', quantize])
if serialisation is not None:
args.extend(['--serialisation', serialisation])
if additional_args is not None:
args.extend(additional_args)
return import_command.main(args=args, standalone_mode=False) | Import a LLM into local store. > [!NOTE] > If ``quantize`` is passed, the model weights will be saved as quantized weights. You should > only use this option if you want the weight to be quantized by default. Note that OpenLLM also > support on-demand quantisation during initial startup. ``openllm.import_model`` will invoke ``click.Command`` under the hood, so it behaves exactly the same as the CLI ``openllm import``. > [!NOTE] > ``openllm.start`` will automatically invoke ``openllm.import_model`` under the hood. Args: model_id: required model id for this given LLM model_version: Optional model version for this given LLM backend: The backend to use for this LLM. By default, this is set to ``pt``. quantize: Quantize the model weights. This is only applicable for PyTorch models. Possible quantisation strategies: - int8: Quantize the model with 8bit (bitsandbytes required) - int4: Quantize the model with 4bit (bitsandbytes required) - gptq: Quantize the model with GPTQ (auto-gptq required) serialisation: Type of model format to save to local store. If set to 'safetensors', then OpenLLM will save model using safetensors. Default behaviour is similar to ``safe_serialization=False``. additional_args: Additional arguments to pass to ``openllm import``. Returns: ``bentoml.Model``:BentoModel of the given LLM. This can be used to serve the LLM or can be pushed to BentoCloud. |
189,417 | from __future__ import annotations
import itertools, logging, os, re, subprocess, sys, typing as t, bentoml, openllm_core, orjson
from simple_di import Provide, inject
from bentoml._internal.configuration.containers import BentoMLContainer
from openllm_core._typing_compat import LiteralSerialisation
from openllm_core.exceptions import OpenLLMException
from openllm_core.utils import WARNING_ENV_VAR, codegen, first_not_none, get_disable_warnings, is_vllm_available
if t.TYPE_CHECKING:
from bentoml._internal.bento import BentoStore
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import LiteralBackend, LiteralQuantise, LiteralString
The provided code snippet includes necessary dependencies for implementing the `_list_models` function. Write a Python function `def _list_models() -> dict[str, t.Any]` to solve the following problem:
List all available models within the local store.
Here is the function:
def _list_models() -> dict[str, t.Any]:
"""List all available models within the local store."""
from .entrypoint import models_command
return models_command.main(args=['--quiet'], standalone_mode=False) | List all available models within the local store. |
189,418 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
def bento_complete_envvar(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[sc.CompletionItem]:
return [
sc.CompletionItem(str(it.tag), help='Bento')
for it in bentoml.list()
if str(it.tag).startswith(incomplete) and all(k in it.info.labels for k in {'start_name', 'bundler'})
] | null |
189,419 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
def model_complete_envvar(ctx: click.Context, param: click.Parameter, incomplete: str) -> list[sc.CompletionItem]:
return [
sc.CompletionItem(inflection.dasherize(it), help='Model')
for it in openllm.CONFIG_MAPPING
if it.startswith(incomplete)
] | null |
189,420 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
logger = logging.getLogger(__name__)
class LLMConfig(pydantic.BaseModel, abc.ABC):
model_config = pydantic.ConfigDict(extra='forbid', protected_namespaces=())
if t.TYPE_CHECKING:
metadata_config: ModelSettings = pydantic.Field(default_factory=dict)
generation_config: GenerationConfig = pydantic.Field(default_factory=lambda: GenerationConfig.model_construct())
_done_initialisation = False
def __setattr__(self, attr: str, value: t.Any) -> None:
if attr in _reserved_namespace and self._done_initialisation:
raise ForbiddenAttributeError(
f'{attr} should not be set during runtime as these value will be reflected during runtime. Instead, you can create a custom LLM subclass {self.__class__.__name__}.'
)
super().__setattr__(attr, value)
def __pydantic_init_subclass__(cls, **_: t.Any):
if any(i not in cls.model_fields for i in ('metadata_config', 'generation_config')):
raise TypeError(f'{cls.__name__} must have a `metadata_config` annd `generation_config` attribute.')
def model_post_init(self, *_: t.Any):
_DEFAULT.update(self.metadata_config)
self.metadata_config = _DEFAULT
self._done_initialisation = True
# fmt: off
# update-config-stubs.py: start
# NOTE: ModelSettings arguments
def __getitem__(self, item: t.Literal['default_id']) -> str: ...
def __getitem__(self, item: t.Literal['model_ids']) -> ListStr: ...
def __getitem__(self, item: t.Literal['architecture']) -> str: ...
def __getitem__(self, item: t.Literal['url']) -> str: ...
def __getitem__(self, item: t.Literal['serialisation']) -> LiteralSerialisation: ...
def __getitem__(self, item: t.Literal['trust_remote_code']) -> bool: ...
def __getitem__(self, item: t.Literal['service_name']) -> str: ...
def __getitem__(self, item: t.Literal['requirements']) -> t.Optional[ListStr]: ...
def __getitem__(self, item: t.Literal['model_type']) -> t.Literal['causal_lm', 'seq2seq_lm']: ...
def __getitem__(self, item: t.Literal['timeout']) -> int: ...
def __getitem__(self, item: t.Literal['fine_tune_strategies']) -> t.Tuple[t.Dict[str, t.Any], ...]: ...
# NOTE: GenerationConfig arguments
def __getitem__(self, item: t.Literal['min_length']) -> int: ...
def __getitem__(self, item: t.Literal['min_new_tokens']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['max_time']) -> t.Optional[float]: ...
def __getitem__(self, item: t.Literal['num_beams']) -> int: ...
def __getitem__(self, item: t.Literal['num_beam_groups']) -> int: ...
def __getitem__(self, item: t.Literal['penalty_alpha']) -> t.Optional[float]: ...
def __getitem__(self, item: t.Literal['use_cache']) -> bool: ...
def __getitem__(self, item: t.Literal['typical_p']) -> float: ...
def __getitem__(self, item: t.Literal['epsilon_cutoff']) -> float: ...
def __getitem__(self, item: t.Literal['eta_cutoff']) -> float: ...
def __getitem__(self, item: t.Literal['diversity_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['repetition_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['encoder_repetition_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['no_repeat_ngram_size']) -> int: ...
def __getitem__(self, item: t.Literal['bad_words_ids']) -> t.Optional[t.List[t.List[int]]]: ...
def __getitem__(self, item: t.Literal['force_words_ids']) -> t.Optional[t.Union[t.List[t.List[int]], t.List[t.List[t.List[int]]]]]: ...
def __getitem__(self, item: t.Literal['renormalize_logits']) -> bool: ...
def __getitem__(self, item: t.Literal['forced_bos_token_id']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['forced_eos_token_id']) -> t.Optional[t.Union[int, t.List[int]]]: ...
def __getitem__(self, item: t.Literal['remove_invalid_values']) -> bool: ...
def __getitem__(self, item: t.Literal['exponential_decay_length_penalty']) -> t.Optional[t.Tuple[int, float]]: ...
def __getitem__(self, item: t.Literal['suppress_tokens']) -> t.Optional[t.List[int]]: ...
def __getitem__(self, item: t.Literal['begin_suppress_tokens']) -> t.Optional[t.List[int]]: ...
def __getitem__(self, item: t.Literal['forced_decoder_ids']) -> t.Optional[t.List[t.List[int]]]: ...
def __getitem__(self, item: t.Literal['num_return_sequences']) -> int: ...
def __getitem__(self, item: t.Literal['output_attentions']) -> bool: ...
def __getitem__(self, item: t.Literal['output_hidden_states']) -> bool: ...
def __getitem__(self, item: t.Literal['output_scores']) -> bool: ...
def __getitem__(self, item: t.Literal['pad_token_id']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['bos_token_id']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['eos_token_id']) -> t.Optional[t.Union[int, t.List[int]]]: ...
def __getitem__(self, item: t.Literal['encoder_no_repeat_ngram_size']) -> int: ...
def __getitem__(self, item: t.Literal['decoder_start_token_id']) -> int: ...
def __getitem__(self, item: t.Literal['n']) -> int: ...
def __getitem__(self, item: t.Literal['best_of']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['presence_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['frequency_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['temperature']) -> float: ...
def __getitem__(self, item: t.Literal['top_k']) -> int: ...
def __getitem__(self, item: t.Literal['top_p']) -> float: ...
def __getitem__(self, item: t.Literal['min_p']) -> float: ...
def __getitem__(self, item: t.Literal['use_beam_search']) -> bool: ...
def __getitem__(self, item: t.Literal['length_penalty']) -> float: ...
def __getitem__(self, item: t.Literal['early_stopping']) -> bool: ...
def __getitem__(self, item: t.Literal['stop']) -> t.Optional[t.Union[str, t.List[str]]]: ...
def __getitem__(self, item: t.Literal['stop_token_ids']) -> t.Optional[t.List[int]]: ...
def __getitem__(self, item: t.Literal['include_stop_str_in_output']) -> bool: ...
def __getitem__(self, item: t.Literal['ignore_eos']) -> bool: ...
def __getitem__(self, item: t.Literal['max_tokens']) -> int: ...
def __getitem__(self, item: t.Literal['logprobs']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['prompt_logprobs']) -> t.Optional[int]: ...
def __getitem__(self, item: t.Literal['skip_special_tokens']) -> bool: ...
def __getitem__(self, item: t.Literal['spaces_between_special_tokens']) -> bool: ...
def __getitem__(self, item: t.Literal['logits_processors']) -> t.Optional[t.List[LogitsProcessor]]: ...
def __getitem__(self, item: t.Literal['max_new_tokens']) -> int: ...
def __getitem__(self, item: t.Literal['start_name']) -> str: ...
def __getitem__(self, item: t.Literal['model_name']) -> str: ...
# update-config-stubs.py: stop
# fmt: on
def __getitem__(self, item: t.Any) -> t.Any:
if item is None:
raise TypeError(f"{self} doesn't understand how to index None.")
item = inflection.underscore(item)
if item in _reserved_namespace:
raise ForbiddenAttributeError(
f"'{item}' is a reserved namespace for {self.__class__} and should not be access nor modified."
)
# backward compatible
if item == 'max_new_tokens':
item = 'max_tokens'
if self.model_extra and item in self.model_extra:
return self.model_extra[item]
elif hasattr(self.generation_config, item):
return getattr(self.generation_config, item)
elif item in self.metadata_config:
return self.metadata_config[item]
elif hasattr(self, item):
return getattr(self, item)
elif item in {'start_name', 'model_name'}: # backward compatible
from .config.configuration_auto import CONFIG_TO_ALIAS_NAMES
if (cls_name := self.__class__.__name__) in CONFIG_TO_ALIAS_NAMES:
return CONFIG_TO_ALIAS_NAMES[cls_name]
raise KeyError(item)
def __contains__(self, item: t.Any) -> bool:
try:
self[item]
return True
except KeyError:
return False
def ser_model(self) -> dict[str, t.Any]:
return self.generation_config.model_dump()
def model_construct_env(cls, **attrs: t.Any) -> Self: # All LLMConfig init should start from here.
env_json_string = os.environ.get('OPENLLM_CONFIG', None)
config_from_env: DictStrAny = {}
if env_json_string is not None:
try:
config_from_env = orjson.loads(env_json_string)
except orjson.JSONDecodeError as e:
raise RuntimeError("Failed to parse 'OPENLLM_CONFIG' as valid JSON string.") from e
generation_config = {}
if 'generation_config' in attrs and 'sampling_config' in attrs: # backward compatibility
generation_config = attrs.pop('generation_config')
sampling_config = attrs.pop('sampling_config')
generation_config.update(sampling_config)
elif 'llm_config' in attrs: # NOTE: this is the new key
generation_config = attrs.pop('llm_config')
config_from_env.update({**generation_config, **cls().generation_config.model_dump(), **attrs})
config_from_env = {k: v for k, v in config_from_env.items() if v is not None}
return cls.model_construct(generation_config=GenerationConfig.model_construct(**config_from_env))
def inference_options(self, llm: openllm.LLM, backend: str | None = None) -> tuple[Self, t.Any]:
backend = backend if backend is not None else llm.__llm_backend__
framework = getattr(self, backend, None)
if framework is None:
raise ValueError(f'Unknown backend {backend}')
try:
return self, framework.build(self)
except AttributeError:
raise RuntimeError(f'Unknown backend {backend}') from None
class vllm:
def build(config: LLMConfig) -> vllm.SamplingParams:
top_p = 1.0 if config['temperature'] <= 1e-5 else config['top_p']
generation_config = config.generation_config.model_copy(update={'top_p': top_p})
return generation_config.build('vllm')
class pt:
def build(config: LLMConfig) -> LLMConfig:
return config.generation_config.build('pt')
class hf:
def build(config: LLMConfig) -> transformers.GenerationConfig:
return config.generation_config.build('pt')
def compatible_options(self, request: ChatCompletionRequest | CompletionRequest) -> dict[str, t.Any]:
from .protocol.openai import ChatCompletionRequest, CompletionRequest
if isinstance(request, (ChatCompletionRequest, CompletionRequest)):
return self.openai.build(self, request)
raise TypeError(f'Unknown request type {type(request)}')
class openai:
def build(config: LLMConfig, request: ChatCompletionRequest | CompletionRequest) -> dict[str, t.Any]:
d = dict(
temperature=first_not_none(request.temperature, config['temperature']),
top_p=first_not_none(request.top_p, config['top_p']),
top_k=first_not_none(request.top_k, config['top_k']),
best_of=first_not_none(request.best_of, config['best_of']),
n=first_not_none(request.n, default=config['n']),
stop=first_not_none(request.stop, default=None),
max_new_tokens=first_not_none(request.max_tokens, default=config['max_tokens']),
presence_penalty=first_not_none(request.presence_penalty, default=config['presence_penalty']),
frequency_penalty=first_not_none(request.frequency_penalty, default=config['frequency_penalty']),
)
if hasattr(request, 'logprobs'):
d['logprobs'] = first_not_none(request.logprobs, default=config['logprobs'])
return d
def template(self) -> str:
return '{system_message}{instruction}'
def system_message(self) -> str:
return ''
def chat_template(self) -> str | None:
return
def chat_messages(self) -> list[MessageParam]:
from ._schemas import MessageParam
return [
MessageParam(role='system', content='You are a helpful assistant'),
MessageParam(role='user', content="Hello, I'm looking for a chatbot that can help me with my work."),
MessageParam(role='assistant', content='Yes? What can I help you with?'),
]
DictStrAny = Dict[str, Any]
def parse_config_options(
config: LLMConfig,
server_timeout: int,
workers_per_resource: float,
device: t.Tuple[str, ...] | None,
cors: bool,
environ: DictStrAny,
) -> DictStrAny:
# TODO: Support amd.com/gpu on k8s
_bentoml_config_options_env = environ.pop('BENTOML_CONFIG_OPTIONS', '')
_bentoml_config_options_opts = [
'tracing.sample_rate=1.0',
'api_server.max_runner_connections=25',
f'runners."llm-{config["start_name"]}-runner".batching.max_batch_size=128',
f'api_server.traffic.timeout={server_timeout}',
f'runners."llm-{config["start_name"]}-runner".traffic.timeout={config["timeout"]}',
f'runners."llm-{config["start_name"]}-runner".workers_per_resource={workers_per_resource}',
]
if device:
if len(device) > 1:
_bentoml_config_options_opts.extend([
f'runners."llm-{config["start_name"]}-runner".resources."nvidia.com/gpu"[{idx}]={dev}'
for idx, dev in enumerate(device)
])
else:
_bentoml_config_options_opts.append(
f'runners."llm-{config["start_name"]}-runner".resources."nvidia.com/gpu"=[{device[0]}]'
)
if cors:
_bentoml_config_options_opts.extend([
'api_server.http.cors.enabled=true',
'api_server.http.cors.access_control_allow_origins="*"',
])
_bentoml_config_options_opts.extend([
f'api_server.http.cors.access_control_allow_methods[{idx}]="{it}"'
for idx, it in enumerate(['GET', 'OPTIONS', 'POST', 'HEAD', 'PUT'])
])
_bentoml_config_options_env += ' ' if _bentoml_config_options_env else '' + ' '.join(_bentoml_config_options_opts)
environ['BENTOML_CONFIG_OPTIONS'] = _bentoml_config_options_env
if DEBUG:
logger.debug('Setting BENTOML_CONFIG_OPTIONS=%s', _bentoml_config_options_env)
return environ | null |
189,421 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
P = ParamSpec('P')
FC = t.TypeVar('FC', bound=t.Union[_AnyCallable, click.Command])
_IGNORED_OPTIONS = {'working_dir', 'production', 'protocol_version'}
class LLMConfig(pydantic.BaseModel, abc.ABC):
def __setattr__(self, attr: str, value: t.Any) -> None:
def __pydantic_init_subclass__(cls, **_: t.Any):
def model_post_init(self, *_: t.Any):
def __getitem__(self, item: t.Literal['default_id']) -> str:
def __getitem__(self, item: t.Literal['model_ids']) -> ListStr:
def __getitem__(self, item: t.Literal['architecture']) -> str:
def __getitem__(self, item: t.Literal['url']) -> str:
def __getitem__(self, item: t.Literal['serialisation']) -> LiteralSerialisation:
def __getitem__(self, item: t.Literal['trust_remote_code']) -> bool:
def __getitem__(self, item: t.Literal['service_name']) -> str:
def __getitem__(self, item: t.Literal['requirements']) -> t.Optional[ListStr]:
def __getitem__(self, item: t.Literal['model_type']) -> t.Literal['causal_lm', 'seq2seq_lm']:
def __getitem__(self, item: t.Literal['timeout']) -> int:
def __getitem__(self, item: t.Literal['fine_tune_strategies']) -> t.Tuple[t.Dict[str, t.Any], ...]:
def __getitem__(self, item: t.Literal['min_length']) -> int:
def __getitem__(self, item: t.Literal['min_new_tokens']) -> t.Optional[int]:
def __getitem__(self, item: t.Literal['max_time']) -> t.Optional[float]:
def __getitem__(self, item: t.Literal['num_beams']) -> int:
def __getitem__(self, item: t.Literal['num_beam_groups']) -> int:
def __getitem__(self, item: t.Literal['penalty_alpha']) -> t.Optional[float]:
def __getitem__(self, item: t.Literal['use_cache']) -> bool:
def __getitem__(self, item: t.Literal['typical_p']) -> float:
def __getitem__(self, item: t.Literal['epsilon_cutoff']) -> float:
def __getitem__(self, item: t.Literal['eta_cutoff']) -> float:
def __getitem__(self, item: t.Literal['diversity_penalty']) -> float:
def __getitem__(self, item: t.Literal['repetition_penalty']) -> float:
def __getitem__(self, item: t.Literal['encoder_repetition_penalty']) -> float:
def __getitem__(self, item: t.Literal['no_repeat_ngram_size']) -> int:
def __getitem__(self, item: t.Literal['bad_words_ids']) -> t.Optional[t.List[t.List[int]]]:
def __getitem__(self, item: t.Literal['force_words_ids']) -> t.Optional[t.Union[t.List[t.List[int]], t.List[t.List[t.List[int]]]]]:
def __getitem__(self, item: t.Literal['renormalize_logits']) -> bool:
def __getitem__(self, item: t.Literal['forced_bos_token_id']) -> t.Optional[int]:
def __getitem__(self, item: t.Literal['forced_eos_token_id']) -> t.Optional[t.Union[int, t.List[int]]]:
def __getitem__(self, item: t.Literal['remove_invalid_values']) -> bool:
def __getitem__(self, item: t.Literal['exponential_decay_length_penalty']) -> t.Optional[t.Tuple[int, float]]:
def __getitem__(self, item: t.Literal['suppress_tokens']) -> t.Optional[t.List[int]]:
def __getitem__(self, item: t.Literal['begin_suppress_tokens']) -> t.Optional[t.List[int]]:
def __getitem__(self, item: t.Literal['forced_decoder_ids']) -> t.Optional[t.List[t.List[int]]]:
def __getitem__(self, item: t.Literal['num_return_sequences']) -> int:
def __getitem__(self, item: t.Literal['output_attentions']) -> bool:
def __getitem__(self, item: t.Literal['output_hidden_states']) -> bool:
def __getitem__(self, item: t.Literal['output_scores']) -> bool:
def __getitem__(self, item: t.Literal['pad_token_id']) -> t.Optional[int]:
def __getitem__(self, item: t.Literal['bos_token_id']) -> t.Optional[int]:
def __getitem__(self, item: t.Literal['eos_token_id']) -> t.Optional[t.Union[int, t.List[int]]]:
def __getitem__(self, item: t.Literal['encoder_no_repeat_ngram_size']) -> int:
def __getitem__(self, item: t.Literal['decoder_start_token_id']) -> int:
def __getitem__(self, item: t.Literal['n']) -> int:
def __getitem__(self, item: t.Literal['best_of']) -> t.Optional[int]:
def __getitem__(self, item: t.Literal['presence_penalty']) -> float:
def __getitem__(self, item: t.Literal['frequency_penalty']) -> float:
def __getitem__(self, item: t.Literal['temperature']) -> float:
def __getitem__(self, item: t.Literal['top_k']) -> int:
def __getitem__(self, item: t.Literal['top_p']) -> float:
def __getitem__(self, item: t.Literal['min_p']) -> float:
def __getitem__(self, item: t.Literal['use_beam_search']) -> bool:
def __getitem__(self, item: t.Literal['length_penalty']) -> float:
def __getitem__(self, item: t.Literal['early_stopping']) -> bool:
def __getitem__(self, item: t.Literal['stop']) -> t.Optional[t.Union[str, t.List[str]]]:
def __getitem__(self, item: t.Literal['stop_token_ids']) -> t.Optional[t.List[int]]:
def __getitem__(self, item: t.Literal['include_stop_str_in_output']) -> bool:
def __getitem__(self, item: t.Literal['ignore_eos']) -> bool:
def __getitem__(self, item: t.Literal['max_tokens']) -> int:
def __getitem__(self, item: t.Literal['logprobs']) -> t.Optional[int]:
def __getitem__(self, item: t.Literal['prompt_logprobs']) -> t.Optional[int]:
def __getitem__(self, item: t.Literal['skip_special_tokens']) -> bool:
def __getitem__(self, item: t.Literal['spaces_between_special_tokens']) -> bool:
def __getitem__(self, item: t.Literal['logits_processors']) -> t.Optional[t.List[LogitsProcessor]]:
def __getitem__(self, item: t.Literal['max_new_tokens']) -> int:
def __getitem__(self, item: t.Literal['start_name']) -> str:
def __getitem__(self, item: t.Literal['model_name']) -> str:
def __getitem__(self, item: t.Any) -> t.Any:
def __contains__(self, item: t.Any) -> bool:
def ser_model(self) -> dict[str, t.Any]:
def model_construct_env(cls, **attrs: t.Any) -> Self:
def inference_options(self, llm: openllm.LLM, backend: str | None = None) -> tuple[Self, t.Any]:
def build(config: LLMConfig) -> vllm.SamplingParams:
def build(config: LLMConfig) -> LLMConfig:
def build(config: LLMConfig) -> transformers.GenerationConfig:
def compatible_options(self, request: ChatCompletionRequest | CompletionRequest) -> dict[str, t.Any]:
def build(config: LLMConfig, request: ChatCompletionRequest | CompletionRequest) -> dict[str, t.Any]:
def template(self) -> str:
def system_message(self) -> str:
def chat_template(self) -> str | None:
def chat_messages(self) -> list[MessageParam]:
def parse_serve_args() -> t.Callable[[t.Callable[..., LLMConfig]], t.Callable[[FC], FC]]:
from bentoml_cli.cli import cli
group = cog.optgroup.group(
'Start a HTTP server options', help='Related to serving the model [synonymous to `bentoml serve-http`]'
)
def decorator(f: t.Callable[Concatenate[int, t.Optional[str], P], LLMConfig]) -> t.Callable[[FC], FC]:
serve_command = cli.commands['serve']
# The first variable is the argument bento
# The last five is from BentoMLCommandGroup.NUMBER_OF_COMMON_PARAMS
serve_options = [
p
for p in serve_command.params[1 : -BentoMLCommandGroup.NUMBER_OF_COMMON_PARAMS]
if p.name not in _IGNORED_OPTIONS
]
for options in reversed(serve_options):
attrs = options.to_info_dict()
# we don't need param_type_name, since it should all be options
attrs.pop('param_type_name')
# name is not a valid args
attrs.pop('name')
# type can be determine from default value
attrs.pop('type')
param_decls = (*attrs.pop('opts'), *attrs.pop('secondary_opts'))
f = cog.optgroup.option(*param_decls, **attrs)(f)
return group(f)
return decorator | null |
189,422 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
FC = t.TypeVar('FC', bound=t.Union[_AnyCallable, click.Command])
def optimization_decorator(fn: FC, *, factory=click, _eager=True) -> FC | list[AnyCallable]:
def parse_device_callback(
_: click.Context, param: click.Parameter, value: tuple[tuple[str], ...] | None
) -> t.Tuple[str, ...] | None:
def adapter_id_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
def compose(*funcs):
def start_decorator(fn: FC) -> FC:
composed = compose(
cog.optgroup.group(
'LLM Options',
help="""The following options are related to running LLM Server as well as optimization options.
OpenLLM supports running model k-bit quantization (8-bit, 4-bit), GPTQ quantization, PagedAttention via vLLM.
The following are either in our roadmap or currently being worked on:
- DeepSpeed Inference: [link](https://www.deepspeed.ai/inference/)
- GGML: Fast inference on [bare metal](https://github.com/ggerganov/ggml)
""",
),
*optimization_decorator(fn, factory=cog.optgroup, _eager=False),
cog.optgroup.option(
'--device',
type=dantic.CUDA,
multiple=True,
envvar='CUDA_VISIBLE_DEVICES',
callback=parse_device_callback,
help='Assign GPU devices (if available)',
show_envvar=True,
),
adapter_id_option(factory=cog.optgroup),
)
return composed(fn) | null |
189,423 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
FC = t.TypeVar('FC', bound=t.Union[_AnyCallable, click.Command])
The provided code snippet includes necessary dependencies for implementing the `_click_factory_type` function. Write a Python function `def _click_factory_type(*param_decls: t.Any, **attrs: t.Any) -> t.Callable[[FC | None], FC]` to solve the following problem:
General ``@click`` decorator with some sauce. This decorator extends the default ``@click.option`` plus a factory option and factory attr to provide type-safe click.option or click.argument wrapper for all compatible factory.
Here is the function:
def _click_factory_type(*param_decls: t.Any, **attrs: t.Any) -> t.Callable[[FC | None], FC]:
"""General ``@click`` decorator with some sauce.
This decorator extends the default ``@click.option`` plus a factory option and factory attr to
provide type-safe click.option or click.argument wrapper for all compatible factory.
"""
factory = attrs.pop('factory', click)
factory_attr = attrs.pop('attr', 'option')
if factory_attr != 'argument':
attrs.setdefault('help', 'General option for OpenLLM CLI.')
def decorator(f: FC | None) -> FC:
callback = getattr(factory, factory_attr, None)
if callback is None:
raise ValueError(f'Factory {factory} has no attribute {factory_attr}.')
return t.cast(FC, callback(*param_decls, **attrs)(f) if f is not None else callback(*param_decls, **attrs))
return decorator | General ``@click`` decorator with some sauce. This decorator extends the default ``@click.option`` plus a factory option and factory attr to provide type-safe click.option or click.argument wrapper for all compatible factory. |
189,424 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
_AnyCallable = t.Callable[..., t.Any]
FC = t.TypeVar('FC', bound=t.Union[_AnyCallable, click.Command])
cli_option = functools.partial(_click_factory_type, attr='option')
def cors_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option(
'--cors/--no-cors',
show_default=True,
default=False,
envvar='OPENLLM_CORS',
show_envvar=True,
help='Enable CORS for the server.',
**attrs,
)(f) | null |
189,425 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
_AnyCallable = t.Callable[..., t.Any]
FC = t.TypeVar('FC', bound=t.Union[_AnyCallable, click.Command])
cli_option = functools.partial(_click_factory_type, attr='option')
def machine_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option('--machine', is_flag=True, default=False, hidden=True, **attrs)(f) | null |
189,426 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
_AnyCallable = t.Callable[..., t.Any]
FC = t.TypeVar('FC', bound=t.Union[_AnyCallable, click.Command])
cli_option = functools.partial(_click_factory_type, attr='option')
def model_id_option(f: _AnyCallable | None = None, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_option(
'--model-id',
type=click.STRING,
default=None,
envvar='OPENLLM_MODEL_ID',
show_envvar=True,
help='Optional model_id name or path for (fine-tune) weight.',
**attrs,
)(f) | null |
189,427 | from __future__ import annotations
import functools, logging, os, typing as t
import bentoml, openllm, click, inflection, click_option_group as cog
from bentoml_cli.utils import BentoMLCommandGroup
from click import shell_completion as sc
from openllm_core._configuration import LLMConfig
from openllm_core._typing_compat import (
Concatenate,
DictStrAny,
LiteralBackend,
LiteralSerialisation,
ParamSpec,
AnyCallable,
get_literal_args,
)
from openllm_core.utils import DEBUG, compose, dantic, resolve_user_filepath
_AnyCallable = t.Callable[..., t.Any]
FC = t.TypeVar('FC', bound=t.Union[_AnyCallable, click.Command])
cli_argument = functools.partial(_click_factory_type, attr='argument')
def model_name_argument(f: _AnyCallable | None = None, required: bool = True, **attrs: t.Any) -> t.Callable[[FC], FC]:
return cli_argument(
'model_name',
type=click.Choice([inflection.dasherize(name) for name in openllm.CONFIG_MAPPING]),
required=required,
**attrs,
)(f) | null |
189,428 | from __future__ import annotations
import os, typing as t, fs
from pathlib import Path
from ghapi.all import GhApi
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
from plumbum.cmd import curl, cut, shasum
if t.TYPE_CHECKING:
from plumbum.commands.base import Pipeline
_gz_strategies: dict[t.Literal['macos_arm', 'macos_intel', 'linux_intel'], str] = {
'macos_arm': 'aarch64-apple-darwin',
'macos_intel': 'x86_64-apple-darwin',
'linux_intel': 'x86_64-unknown-linux-musl',
}
def determine_release_url(
svn_url: str, tag: str, target: t.Literal['macos_arm', 'macos_intel', 'linux_intel', 'archive']
) -> str:
if target == 'archive':
return f'{svn_url}/archive/{tag}.tar.gz'
return f"{svn_url}/releases/download/{tag}/openllm-{tag.replace('v', '')}-{_gz_strategies[target]}.tar.gz" | null |
189,429 | from __future__ import annotations
import os, typing as t, fs
from pathlib import Path
from ghapi.all import GhApi
from jinja2 import Environment
from jinja2.loaders import FileSystemLoader
from plumbum.cmd import curl, cut, shasum
def get_release_hash_command(svn_url: str, tag: str) -> Pipeline:
return curl['-sSL', svn_url] | shasum['-a256'] | cut['-d', ' ', '-f1'] | null |
189,430 | from __future__ import annotations
import dataclasses
import os
import sys
import typing as t
import inflection
import tomlkit
from ghapi.all import GhApi
import openllm
class Classifier:
identifier: t.Dict[str, str] = dataclasses.field(
default_factory=lambda: {
'status': 'Development Status',
'environment': 'Environment',
'license': 'License',
'topic': 'Topic',
'os': 'Operating System',
'audience': 'Intended Audience',
'typing': 'Typing',
'language': 'Programming Language',
}
)
joiner: str = ' :: '
def status() -> dict[int, str]:
return {
v: status
for v, status in zip(
range(1, 8),
[
'1 - Planning',
'2 - Pre-Alpha',
'3 - Alpha',
'4 - Beta',
'5 - Production/Stable',
'6 - Mature',
'7 - Inactive',
],
)
}
def apache() -> str:
return Classifier.create_classifier('license', 'OSI Approved', 'Apache Software License')
def create_classifier(identifier: str, *decls: t.Any) -> str:
cls_ = Classifier()
if identifier not in cls_.identifier:
raise ValueError(f'{identifier} is not yet supported (supported alias: {Classifier.identifier})')
return cls_.joiner.join([cls_.identifier[identifier], *decls])
def create_python_classifier(
implementation: list[str] | None = None, supported_version: list[str] | None = None
) -> list[str]:
if supported_version is None:
supported_version = ['3.8', '3.9', '3.10', '3.11', '3.12']
if implementation is None:
implementation = ['CPython', 'PyPy']
base = [
Classifier.create_classifier('language', 'Python'),
Classifier.create_classifier('language', 'Python', '3'),
]
base.append(Classifier.create_classifier('language', 'Python', '3', 'Only'))
base.extend([Classifier.create_classifier('language', 'Python', version) for version in supported_version])
base.extend(
[Classifier.create_classifier('language', 'Python', 'Implementation', impl) for impl in implementation]
)
return base
def create_status_classifier(level: int) -> str:
return Classifier.create_classifier('status', Classifier.status()[level])
def correct_style(it: t.Any) -> t.Any:
return it
def create_classifiers() -> Array:
arr = correct_style(tomlkit.array())
arr.extend(
[
Classifier.create_status_classifier(5),
Classifier.create_classifier('environment', 'GPU', 'NVIDIA CUDA'),
Classifier.create_classifier('environment', 'GPU', 'NVIDIA CUDA', '12'),
Classifier.create_classifier('environment', 'GPU', 'NVIDIA CUDA', '11.8'),
Classifier.create_classifier('environment', 'GPU', 'NVIDIA CUDA', '11.7'),
Classifier.apache(),
Classifier.create_classifier('topic', 'Scientific/Engineering', 'Artificial Intelligence'),
Classifier.create_classifier('topic', 'Software Development', 'Libraries'),
Classifier.create_classifier('os', 'OS Independent'),
Classifier.create_classifier('audience', 'Developers'),
Classifier.create_classifier('audience', 'Science/Research'),
Classifier.create_classifier('audience', 'System Administrators'),
Classifier.create_classifier('typing', 'Typed'),
*Classifier.create_python_classifier(),
]
)
return arr.multiline(True) | null |
189,431 | from __future__ import annotations
import dataclasses
import os
import sys
import typing as t
import inflection
import tomlkit
from ghapi.all import GhApi
import openllm
_base_requirements: dict[str, t.Any] = {
inflection.dasherize(name): config_cls()['requirements']
for name, config_cls in openllm.CONFIG_MAPPING.items()
if 'requirements' in config_cls()
}
_base_requirements.update(
{v: _locals.get(f'{inflection.underscore(v).upper()}_DEPS') for v in openllm.utils.OPTIONAL_DEPENDENCIES}
)
_base_requirements = {k: v for k, v in sorted(_base_requirements.items())}
def correct_style(it: t.Any) -> t.Any:
return it
def create_optional_table() -> Table:
all_array = tomlkit.array()
all_array.append(f"openllm[{','.join([k for k,v in _base_requirements.items() if v])}]")
table = tomlkit.table(is_super_table=True)
_base_requirements.update(
{'full': correct_style(all_array.multiline(True)), 'all': tomlkit.array('["openllm[full]"]')}
)
table.update({k: v for k, v in sorted(_base_requirements.items()) if v})
table.add(tomlkit.nl())
return table | null |
189,432 | from __future__ import annotations
import dataclasses
import os
import sys
import typing as t
import inflection
import tomlkit
from ghapi.all import GhApi
if t.TYPE_CHECKING:
from tomlkit.items import Array, Table
import openllm
def create_url_table(_info: t.Any) -> Table:
table = tomlkit.table()
_urls = {
'Blog': 'https://modelserving.com',
'Chat': 'https://discord.gg/openllm',
'Documentation': 'https://github.com/bentoml/openllm#readme',
'GitHub': _info.html_url,
'History': f'{_info.html_url}/blob/main/CHANGELOG.md',
'Homepage': _info.homepage,
'Tracker': f'{_info.html_url}/issues',
'Twitter': 'https://twitter.com/bentomlai',
}
table.update({k: v for k, v in sorted(_urls.items())})
return table | null |
189,433 | from __future__ import annotations
import dataclasses
import os
import sys
import typing as t
import inflection
import tomlkit
from ghapi.all import GhApi
import openllm
def correct_style(it: t.Any) -> t.Any:
return it
def build_system() -> Table:
table = tomlkit.table()
table.add('build-backend', 'hatchling.build')
requires_array = correct_style(tomlkit.array())
requires_array.extend(['hatchling==1.18.0', 'hatch-vcs==0.3.0', 'hatch-fancy-pypi-readme==23.1.0'])
table.add('requires', requires_array.multiline(True))
return table | null |
189,434 | from __future__ import annotations
import dataclasses
import os
import sys
import typing as t
import inflection
import tomlkit
from ghapi.all import GhApi
import openllm
def correct_style(it: t.Any) -> t.Any:
return it
def keywords() -> Array:
arr = correct_style(tomlkit.array())
arr.extend(
[
'MLOps',
'AI',
'BentoML',
'Model Serving',
'Model Deployment',
'LLMOps',
'Falcon',
'Vicuna',
'Llama 2',
'Fine tuning',
'Serverless',
'Large Language Model',
'Generative AI',
'StableLM',
'Alpaca',
'PyTorch',
'Mistral',
'vLLM',
'Transformers',
]
)
return arr.multiline(True) | null |
189,435 | from __future__ import annotations
import dataclasses
import os
import sys
import typing as t
import inflection
import tomlkit
from ghapi.all import GhApi
import openllm
def build_cli_extensions() -> Table:
table = tomlkit.table()
table.update({'openllm': '_openllm_tiny._entrypoint:cli'})
return table | null |
189,436 | import os, shutil, sys, tomlkit
from openllm_core.config import CONFIG_MAPPING
from openllm_core.config.configuration_auto import CONFIG_TO_ALIAS_NAMES
def markdown_noteblock(text: str):
return ['\n', f'> **Note:** {text}\n'] | null |
189,437 | import os, shutil, sys, tomlkit
from openllm_core.config import CONFIG_MAPPING
from openllm_core.config.configuration_auto import CONFIG_TO_ALIAS_NAMES
def markdown_importantblock(text: str):
return ['\n', f'> **Important:** {text}\n'] | null |
189,438 | from __future__ import annotations
import os, sys
from pathlib import Path
from openllm_core._configuration import GenerationConfig, ModelSettings
from openllm_core.config.configuration_auto import CONFIG_MAPPING_NAMES
from openllm_core.utils import codegen, import_utils as iutils
def process_annotations(annotations: str) -> str:
if 'NotRequired' in annotations:
return annotations[len('NotRequired[') : -1]
elif 'Required' in annotations:
return annotations[len('Required[') : -1]
else:
return annotations | null |
189,439 | import concurrent.futures
import configparser
import os
from typing import List
def pyi_in_subdir(directory: str, git_root: str) -> List[str]:
pyi_files = []
for root, _, files in os.walk(directory):
for file in files:
if file.endswith('.pyi') or file == '_typing_compat.py' or '_openllm_tiny' in file:
full_path = os.path.join(root, file)
# Convert to relative path with respect to the git root
relative_path = os.path.relpath(full_path, git_root)
pyi_files.append(relative_path)
return pyi_files
def find_pyi_files(git_root: str) -> List[str]:
# List all subdirectories
subdirectories = [
os.path.join(git_root, name)
for name in os.listdir(git_root)
if os.path.isdir(os.path.join(git_root, name)) and name not in ['venv', '.git', '.venv']
]
# Use a thread pool to execute searches concurrently
with concurrent.futures.ThreadPoolExecutor() as executor:
# Map of future to subdirectory
future_to_subdir = {executor.submit(pyi_in_subdir, subdir, git_root): subdir for subdir in subdirectories}
all_pyi_files = set()
for future in concurrent.futures.as_completed(future_to_subdir):
pyi_files = future.result()
all_pyi_files.update(pyi_files)
return list(all_pyi_files) | null |
189,440 | import concurrent.futures
import configparser
import os
from typing import List
_MYPY_CONFIG = {
'pretty': 'true',
'python_version': '3.9',
'show_error_codes': 'true',
'strict': 'true',
'plugins': 'pydantic.mypy',
'ignore_missing_imports': 'true',
'warn_unreachable': 'true',
'explicit_package_bases': 'true',
}
def update_mypy_ini(pyi_files: List[str], mypy_ini_path: str) -> int:
config = configparser.ConfigParser()
config.read(mypy_ini_path)
# Existing files from mypy.ini
existing_files = config.get('mypy', 'files', fallback='').split(', ')
# Add new .pyi files if they are not already in the list
updated_files = existing_files + [f for f in pyi_files if f not in existing_files]
# Update the 'files' entry
config['mypy']['files'] = ', '.join(updated_files)
for key, value in _MYPY_CONFIG.items():
config.set('mypy', key, value)
# Write changes back to mypy.ini
with open(mypy_ini_path, 'w') as configfile:
configfile.write(
f'# The following is autogenerated by {os.path.join(os.path.basename(os.path.dirname(__file__)), os.path.basename(__file__))}\n'
)
config.write(configfile)
# Remove last newline if exists
with open(mypy_ini_path, 'rb+') as file:
file.seek(-1, os.SEEK_END)
if file.read(1) == b'\n':
file.seek(-1, os.SEEK_END)
file.truncate()
return 0 | null |