response
stringlengths 1
33.1k
| instruction
stringlengths 22
582k
|
---|---|
Determines the command for Bash on windows. | def windows_bash_command():
"""Determines the command for Bash on windows."""
# Check that bash is on path otherwise try the default directory
# used by Git for windows
from xonsh.built_ins import XSH
wbc = "bash"
cmd_cache = XSH.commands_cache
bash_on_path = cmd_cache.lazy_locate_binary("bash", ignore_alias=True)
if bash_on_path:
try:
out = subprocess.check_output(
[bash_on_path, "--version"],
stderr=subprocess.PIPE,
text=True,
)
except subprocess.CalledProcessError:
bash_works = False
else:
# Check if Bash is from the "Windows Subsystem for Linux" (WSL)
# which can't be used by xonsh foreign-shell/completer
bash_works = out and "pc-linux-gnu" not in out.splitlines()[0]
if bash_works:
wbc = bash_on_path
else:
gfwp = git_for_windows_path()
if gfwp:
bashcmd = os.path.join(gfwp, "bin\\bash.exe")
if os.path.isfile(bashcmd):
wbc = bashcmd
return wbc |
This dispatches to the correct, case-sensitive version of os.environ.
This is mainly a problem for Windows. See #2024 for more details.
This can probably go away once support for Python v3.5 or v3.6 is
dropped. | def os_environ():
"""This dispatches to the correct, case-sensitive version of os.environ.
This is mainly a problem for Windows. See #2024 for more details.
This can probably go away once support for Python v3.5 or v3.6 is
dropped.
"""
if ON_WINDOWS:
return OSEnvironCasePreserving()
else:
return os.environ |
Determines the command for Bash on the current platform. | def bash_command():
"""Determines the command for Bash on the current platform."""
if (bc := os.getenv("XONSH_BASH_PATH_OVERRIDE", None)) is not None:
bc = str(bc) # for pathlib Paths
elif ON_WINDOWS:
bc = windows_bash_command()
else:
bc = "bash"
return bc |
A possibly empty tuple with default paths to Bash completions known for
the current platform. | def BASH_COMPLETIONS_DEFAULT():
"""A possibly empty tuple with default paths to Bash completions known for
the current platform.
"""
if ON_LINUX or ON_CYGWIN or ON_MSYS:
bcd = ("/usr/share/bash-completion/bash_completion",)
elif ON_DARWIN:
bcd = (
"/usr/local/share/bash-completion/bash_completion", # v2.x
"/usr/local/etc/bash_completion", # v1.x
"/opt/homebrew/share/bash-completion/bash_completion", # v2.x on M1
"/opt/homebrew/etc/bash_completion", # v1.x on M1
)
elif ON_WINDOWS and git_for_windows_path():
bcd = (
os.path.join(
git_for_windows_path(), "usr\\share\\bash-completion\\bash_completion"
),
os.path.join(
git_for_windows_path(),
"mingw64\\share\\git\\completion\\" "git-completion.bash",
),
)
else:
bcd = ()
return bcd |
The platform dependent libc implementation. | def LIBC():
"""The platform dependent libc implementation."""
global ctypes
if ON_DARWIN:
import ctypes.util
libc = ctypes.CDLL(ctypes.util.find_library("c"))
elif ON_CYGWIN:
libc = ctypes.CDLL("cygwin1.dll")
elif ON_MSYS:
libc = ctypes.CDLL("msys-2.0.dll")
elif ON_FREEBSD:
try:
libc = ctypes.CDLL("libc.so.7")
except OSError:
libc = None
elif ON_BSD:
try:
libc = ctypes.CDLL("libc.so")
except AttributeError:
libc = None
except OSError:
# OS X; can't use ctypes.util.find_library because that creates
# a new process on Linux, which is undesirable.
try:
libc = ctypes.CDLL("libc.dylib")
except OSError:
libc = None
elif ON_POSIX:
try:
libc = ctypes.CDLL("libc.so")
except AttributeError:
libc = None
except OSError:
# Debian and derivatives do the wrong thing because /usr/lib/libc.so
# is a GNU ld script rather than an ELF object. To get around this, we
# have to be more specific.
# We don't want to use ctypes.util.find_library because that creates a
# new process on Linux. We also don't want to try too hard because at
# this point we're already pretty sure this isn't Linux.
try:
libc = ctypes.CDLL("libc.so.6")
except OSError:
libc = None
if not hasattr(libc, "sysinfo"):
# Not Linux.
libc = None
elif ON_WINDOWS:
if hasattr(ctypes, "windll") and hasattr(ctypes.windll, "kernel32"):
libc = ctypes.windll.kernel32
else:
try:
# Windows CE uses the cdecl calling convention.
libc = ctypes.CDLL("coredll.lib")
except (AttributeError, OSError):
libc = None
elif ON_BEOS:
libc = ctypes.CDLL("libroot.so")
else:
libc = None
return libc |
Safe version of getattr.
Same as getattr, but will return ``default`` on any Exception,
rather than raising. | def _safe_getattr(obj, attr, default=None):
"""Safe version of getattr.
Same as getattr, but will return ``default`` on any Exception,
rather than raising.
"""
try:
return getattr(obj, attr, default)
except Exception:
return default |
Pretty print the object's representation. | def pretty(
obj, verbose=False, max_width=79, newline="\n", max_seq_length=MAX_SEQ_LENGTH
):
"""
Pretty print the object's representation.
"""
if _safe_getattr(obj, "xonsh_display"):
return obj.xonsh_display()
stream = io.StringIO()
printer = RepresentationPrinter(
stream, verbose, max_width, newline, max_seq_length=max_seq_length
)
printer.pretty(obj)
printer.flush()
return stream.getvalue() |
Like pretty() but print to stdout. | def pretty_print(
obj, verbose=False, max_width=79, newline="\n", max_seq_length=MAX_SEQ_LENGTH
):
"""
Like pretty() but print to stdout.
"""
printer = RepresentationPrinter(
sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length
)
printer.pretty(obj)
printer.flush()
sys.stdout.write(newline)
sys.stdout.flush() |
Get a reasonable method resolution order of a class and its superclasses
for both old-style and new-style classes. | def _get_mro(obj_class):
"""Get a reasonable method resolution order of a class and its superclasses
for both old-style and new-style classes.
"""
if not hasattr(obj_class, "__mro__"):
# Old-style class. Mix in object to make a fake new-style class.
try:
obj_class = type(obj_class.__name__, (obj_class, object), {})
except TypeError:
# Old-style extension type that does not descend from object.
# FIXME: try to construct a more thorough MRO.
mro = [obj_class]
else:
mro = obj_class.__mro__[1:-1]
else:
mro = obj_class.__mro__
return mro |
The default print function. Used if an object does not provide one and
it's none of the builtin objects. | def _default_pprint(obj, p, cycle):
"""
The default print function. Used if an object does not provide one and
it's none of the builtin objects.
"""
klass = _safe_getattr(obj, "__class__", None) or type(obj)
if _safe_getattr(klass, "__repr__", None) not in _baseclass_reprs:
# A user-provided repr. Find newlines and replace them with p.break_()
_repr_pprint(obj, p, cycle)
return
p.begin_group(1, "<")
p.pretty(klass)
p.text(f" at 0x{id(obj):x}")
if cycle:
p.text(" ...")
elif p.verbose:
first = True
for key in dir(obj):
if not key.startswith("_"):
try:
value = getattr(obj, key)
except AttributeError:
continue
if isinstance(value, types.MethodType):
continue
if not first:
p.text(",")
p.breakable()
p.text(key)
p.text("=")
step = len(key) + 1
p.indentation += step
p.pretty(value)
p.indentation -= step
first = False
p.end_group(1, ">") |
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples, dicts, and lists. | def _seq_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sequences. Used by
the default pprint for tuples, dicts, and lists.
"""
def inner(obj, p, cycle):
typ = type(obj)
if (
basetype is not None
and typ is not basetype
and typ.__repr__ != basetype.__repr__
):
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + "..." + end)
step = len(start)
p.begin_group(step, start)
for idx, x in p._enumerate(obj):
if idx:
p.text(",")
p.breakable()
p.pretty(x)
if len(obj) == 1 and type(obj) is tuple:
# Special case for 1-item tuples.
p.text(",")
p.end_group(step, end)
return inner |
Factory that returns a pprint function useful for sets and frozensets. | def _set_pprinter_factory(start, end, basetype):
"""
Factory that returns a pprint function useful for sets and frozensets.
"""
def inner(obj, p, cycle):
typ = type(obj)
if (
basetype is not None
and typ is not basetype
and typ.__repr__ != basetype.__repr__
):
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text(start + "..." + end)
if len(obj) == 0:
# Special case.
p.text(basetype.__name__ + "()")
else:
step = len(start)
p.begin_group(step, start)
# Like dictionary keys, we will try to sort the items if there aren't too many
items = obj
if not (p.max_seq_length and len(obj) >= p.max_seq_length):
try:
items = sorted(obj)
except Exception:
# Sometimes the items don't sort.
pass
for idx, x in p._enumerate(items):
if idx:
p.text(",")
p.breakable()
p.pretty(x)
p.end_group(step, end)
return inner |
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies. | def _dict_pprinter_factory(start, end, basetype=None):
"""
Factory that returns a pprint function used by the default pprint of
dicts and dict proxies.
"""
def inner(obj, p, cycle):
typ = type(obj)
if (
basetype is not None
and typ is not basetype
and typ.__repr__ != basetype.__repr__
):
# If the subclass provides its own repr, use it instead.
return p.text(typ.__repr__(obj))
if cycle:
return p.text("{...}")
p.begin_group(1, start)
keys = obj.keys()
for idx, key in p._enumerate(keys):
if idx:
p.text(",")
p.breakable()
p.pretty(key)
p.text(": ")
p.pretty(obj[key])
p.end_group(1, end)
return inner |
The pprint for the super type. | def _super_pprint(obj, p, cycle):
"""The pprint for the super type."""
p.begin_group(8, "<super: ")
p.pretty(obj.__thisclass__)
p.text(",")
p.breakable()
p.pretty(obj.__self__)
p.end_group(8, ">") |
The pprint function for regular expression patterns. | def _re_pattern_pprint(obj, p, cycle):
"""The pprint function for regular expression patterns."""
p.text("re.compile(")
pattern = repr(obj.pattern)
if pattern[:1] in "uU":
pattern = pattern[1:]
prefix = "ur"
else:
prefix = "r"
pattern = prefix + pattern.replace("\\\\", "\\")
p.text(pattern)
if obj.flags:
p.text(",")
p.breakable()
done_one = False
for flag in (
"TEMPLATE",
"IGNORECASE",
"LOCALE",
"MULTILINE",
"DOTALL",
"UNICODE",
"VERBOSE",
"DEBUG",
):
if obj.flags & getattr(re, flag):
if done_one:
p.text("|")
p.text("re." + flag)
done_one = True
p.text(")") |
The pprint for classes and types. | def _type_pprint(obj, p, cycle):
"""The pprint for classes and types."""
# Heap allocated types might not have the module attribute,
# and others may set it to None.
# Checks for a __repr__ override in the metaclass
if type(obj).__repr__ is not type.__repr__:
_repr_pprint(obj, p, cycle)
return
mod = _safe_getattr(obj, "__module__", None)
try:
name = obj.__qualname__
if not isinstance(name, str):
# This can happen if the type implements __qualname__ as a property
# or other descriptor in Python 2.
raise Exception("Try __name__")
except Exception:
name = obj.__name__
if not isinstance(name, str):
name = "<unknown type>"
if mod in (None, "__builtin__", "builtins", "exceptions"):
p.text(name)
else:
p.text(mod + "." + name) |
A pprint that just redirects to the normal repr function. | def _repr_pprint(obj, p, cycle):
"""A pprint that just redirects to the normal repr function."""
# Find newlines and replace them with p.break_()
output = repr(obj)
for idx, output_line in enumerate(output.splitlines()):
if idx:
p.break_()
p.text(output_line) |
Base pprint for all functions and builtin functions. | def _function_pprint(obj, p, cycle):
"""Base pprint for all functions and builtin functions."""
name = _safe_getattr(obj, "__qualname__", obj.__name__)
mod = obj.__module__
if mod and mod not in ("__builtin__", "builtins", "exceptions"):
name = mod + "." + name
p.text(f"<function {name}>") |
Base pprint for all exceptions. | def _exception_pprint(obj, p, cycle):
"""Base pprint for all exceptions."""
name = getattr(obj.__class__, "__qualname__", obj.__class__.__name__)
if obj.__class__.__module__ not in ("exceptions", "builtins"):
name = f"{obj.__class__.__module__}.{name}"
step = len(name) + 1
p.begin_group(step, name + "(")
for idx, arg in enumerate(getattr(obj, "args", ())):
if idx:
p.text(",")
p.breakable()
p.pretty(arg)
p.end_group(step, ")") |
Add a pretty printer for a given type. | def for_type(typ, func):
"""
Add a pretty printer for a given type.
"""
oldfunc = _type_pprinters.get(typ, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
_type_pprinters[typ] = func
return oldfunc |
Add a pretty printer for a type specified by the module and name of a type
rather than the type object itself. | def for_type_by_name(type_module, type_name, func, dtp=None):
"""
Add a pretty printer for a type specified by the module and name of a type
rather than the type object itself.
"""
if dtp is None:
dtp = _deferred_type_pprinters
key = (type_module, type_name)
oldfunc = dtp.get(key, None)
if func is not None:
# To support easy restoration of old pprinters, we need to ignore Nones.
dtp[key] = func
return oldfunc |
Converts a color name to a color token, foreground name,
and background name. Will take into consideration current foreground
and background colors, if provided.
Parameters
----------
name : str
Color name.
fg : str, optional
Foreground color name.
bg : str, optional
Background color name.
Returns
-------
tok : Token
Pygments Token.Color subclass
fg : str or None
New computed foreground color name.
bg : str or None
New computed background color name. | def color_by_name(name, fg=None, bg=None):
"""Converts a color name to a color token, foreground name,
and background name. Will take into consideration current foreground
and background colors, if provided.
Parameters
----------
name : str
Color name.
fg : str, optional
Foreground color name.
bg : str, optional
Background color name.
Returns
-------
tok : Token
Pygments Token.Color subclass
fg : str or None
New computed foreground color name.
bg : str or None
New computed background color name.
"""
name = name.upper()
if name in ("RESET", "NO_COLOR"):
return Color.DEFAULT, None, None
m = RE_BACKGROUND.search(name)
if m is None: # must be foreground color
fg = norm_name(name)
else:
bg = norm_name(name)
# assemble token
if fg is None and bg is None:
tokname = "RESET"
elif fg is None:
tokname = bg
elif bg is None:
tokname = fg
else:
tokname = fg + "__" + bg
tok = getattr(Color, tokname)
return tok, fg, bg |
Converts a xonsh color name to a pygments color code. | def color_name_to_pygments_code(name, styles):
"""Converts a xonsh color name to a pygments color code."""
token = getattr(Color, norm_name(name))
if token in styles:
return styles[token]
m = RE_XONSH_COLOR.match(name)
if m is None:
raise ValueError(f"{name!r} is not a color!")
parts = m.groupdict()
# convert regex match into actual pygments colors
if parts["reset"] is not None:
if parts["reset"] == "NO_COLOR":
warn_deprecated_no_color()
res = "noinherit"
elif parts["bghex"] is not None:
res = "bg:#" + parts["bghex"][3:]
elif parts["background"] is not None:
color = parts["color"]
if "#" in color:
fgcolor = color
else:
fgcolor = styles[getattr(Color, color)]
if fgcolor == "noinherit":
res = "noinherit"
else:
res = f"bg:{fgcolor}"
else:
# have regular, non-background color
mods = parts["modifiers"]
if mods is None:
mods = []
else:
mods = mods.strip("_").split("_")
mods = [PYGMENTS_MODIFIERS[mod] for mod in mods]
mods = list(filter(None, mods)) # remove unsupported entries
color = parts["color"]
if "#" in color:
mods.append(color)
else:
mods.append(styles[getattr(Color, color)])
res = " ".join(mods)
styles[token] = res
return res |
Converts a token name into a pygments-style color code.
Parameters
----------
name : str
Color token name.
styles : Mapping
Mapping for looking up non-hex colors
Returns
-------
code : str
Pygments style color code. | def code_by_name(name, styles):
"""Converts a token name into a pygments-style color code.
Parameters
----------
name : str
Color token name.
styles : Mapping
Mapping for looking up non-hex colors
Returns
-------
code : str
Pygments style color code.
"""
fg, _, bg = name.upper().replace("HEX", "#").partition("__")
if fg.startswith("BACKGROUND_") or fg.startswith("BG#"):
# swap fore & back if needed.
fg, bg = bg, fg
# convert names to codes
if len(fg) == 0 and len(bg) == 0:
code = "noinherit"
elif len(fg) == 0:
code = color_name_to_pygments_code(bg, styles)
elif len(bg) == 0:
code = color_name_to_pygments_code(fg, styles)
else:
# have both colors
code = color_name_to_pygments_code(bg, styles)
code += " "
code += color_name_to_pygments_code(fg, styles)
return code |
Returns (color) token corresponding to Xonsh color tuple, side effect: defines token is defined in styles | def color_token_by_name(xc: tuple, styles=None) -> _TokenType:
"""Returns (color) token corresponding to Xonsh color tuple, side effect: defines token is defined in styles"""
if not styles:
try:
styles = XSH.shell.shell.styler.styles # type:ignore
except AttributeError:
pass
tokName = xc[0]
if styles:
pc = color_name_to_pygments_code(xc[0], styles)
if len(xc) > 1:
pc += " " + color_name_to_pygments_code(xc[1], styles)
tokName += "__" + xc[1]
token = getattr(Color, norm_name(tokName))
if styles and (token not in styles or not styles[token]):
styles[token] = pc
return token |
Tokenizes a template string containing colors. Will return a list
of tuples mapping the token to the string which has that color.
These sub-strings maybe templates themselves. | def partial_color_tokenize(template):
"""Tokenizes a template string containing colors. Will return a list
of tuples mapping the token to the string which has that color.
These sub-strings maybe templates themselves.
"""
if XSH.shell is not None:
styles = XSH.shell.shell.styler.styles
else:
styles = None
color = Color.DEFAULT
try:
toks, color = _partial_color_tokenize_main(template, styles)
except Exception:
toks = [(Color.DEFAULT, template)]
if styles is not None:
styles[color] # ensure color is available
return toks |
Factory for a proxy class to a xonsh style. | def xonsh_style_proxy(styler):
"""Factory for a proxy class to a xonsh style."""
# Monky patch pygments' list of known ansi colors
# with the new ansi color names used by PTK2
# Can be removed once pygment names get fixed.
if pygments_version_info() and pygments_version_info() < (2, 4, 0):
pygments.style.ansicolors.update(ANSICOLOR_NAMES_MAP)
class XonshStyleProxy(Style):
"""Simple proxy class to fool prompt toolkit."""
target = styler
styles = styler.styles
highlight_color = styler.highlight_color
background_color = styler.background_color
def __new__(cls, *args, **kwargs):
return cls.target
return XonshStyleProxy |
Checks if the given value is PTK style specific | def _ptk_specific_style_value(style_value):
"""Checks if the given value is PTK style specific"""
for ptk_spec in PTK_SPECIFIC_VALUES:
if ptk_spec in style_value:
return True
return False |
Format PTK style name to be able to include it in a pygments style | def _format_ptk_style_name(name):
"""Format PTK style name to be able to include it in a pygments style"""
parts = name.split("-")
return "".join(part.capitalize() for part in parts) |
Get pygments token object by its string representation. | def _get_token_by_name(name):
"""Get pygments token object by its string representation."""
if not isinstance(name, str):
return name
token = Token
parts = name.split(".")
# PTK - all lowercase
if parts[0] == parts[0].lower():
parts = ["PTK"] + [_format_ptk_style_name(part) for part in parts]
# color name
if len(parts) == 1:
return color_token_by_name((name,))
if parts[0] == "Token":
parts = parts[1:]
while len(parts) > 0:
token = getattr(token, parts[0])
parts = parts[1:]
return token |
Converts possible string keys in style dicts to Tokens | def _tokenize_style_dict(styles):
"""Converts possible string keys in style dicts to Tokens"""
return {
_get_token_by_name(token): value
for token, value in styles.items()
if not _ptk_specific_style_value(value)
} |
Register custom style.
Parameters
----------
name : str
Style name.
styles : dict
Token -> style mapping.
highlight_color : str
Hightlight color.
background_color : str
Background color.
base : str, optional
Base style to use as default.
Returns
-------
style : The ``pygments.Style`` subclass created | def register_custom_pygments_style(
name, styles, highlight_color=None, background_color=None, base="default"
):
"""Register custom style.
Parameters
----------
name : str
Style name.
styles : dict
Token -> style mapping.
highlight_color : str
Hightlight color.
background_color : str
Background color.
base : str, optional
Base style to use as default.
Returns
-------
style : The ``pygments.Style`` subclass created
"""
base_style = get_style_by_name(base)
custom_styles = base_style.styles.copy()
for token, value in _tokenize_style_dict(styles).items():
custom_styles[token] = value
non_pygments_rules = {
token: value
for token, value in styles.items()
if _ptk_specific_style_value(value)
}
style = type(
f"Custom{name}Style",
(Style,),
{
"styles": custom_styles,
"highlight_color": (
highlight_color
if highlight_color is not None
else base_style.highlight_color
),
"background_color": (
background_color
if background_color is not None
else base_style.background_color
),
},
)
add_custom_style(name, style)
cmap = pygments_style_by_name(base).copy()
# replace colors in color map if found in styles
for token in cmap.keys():
if token in custom_styles:
cmap[token] = custom_styles[token]
STYLES[name] = cmap
if len(non_pygments_rules) > 0:
NON_PYGMENTS_RULES[name] = non_pygments_rules
return style |
Makes a pygments style based on a color palette. | def make_pygments_style(palette):
"""Makes a pygments style based on a color palette."""
global Color
style = {Color.DEFAULT: "noinherit"}
for name, t in BASE_XONSH_COLORS.items():
color = find_closest_color(t, palette)
style[getattr(Color, name)] = "#" + color
return style |
Gets or makes a pygments color style by its name. | def pygments_style_by_name(name):
"""Gets or makes a pygments color style by its name."""
if name in STYLES:
return STYLES[name]
pstyle = get_style_by_name(name)
palette = make_palette(pstyle.styles.values())
astyle = make_pygments_style(palette)
STYLES[name] = astyle
return astyle |
Monky patch pygments' dict of console codes,
with new color names | def _monkey_patch_pygments_codes():
"""Monky patch pygments' dict of console codes,
with new color names
"""
if pygments_version_info() and pygments_version_info() >= (2, 4, 0):
return
import pygments.console
if "brightblack" in pygments.console.codes:
# Assume that colors are already fixed in pygments
# for example when using pygments from source
return
if not getattr(pygments.console, "_xonsh_patched", False):
patched_codes = {}
for new, old in PTK_NEW_OLD_COLOR_MAP.items():
if old in pygments.console.codes:
patched_codes[new[1:]] = pygments.console.codes[old]
pygments.console.codes.update(patched_codes)
pygments.console._xonsh_patched = True |
if LS_COLORS updated, update file_color_tokens and corresponding color token in style | def on_lscolors_change(key, oldvalue, newvalue, **kwargs):
"""if LS_COLORS updated, update file_color_tokens and corresponding color token in style"""
if newvalue is None:
del file_color_tokens[key]
else:
file_color_tokens[key] = color_token_by_name(newvalue) |
Determine color to use for file *approximately* as ls --color would,
given lstat() results and its path.
Parameters
----------
file_path
relative path of file (as user typed it).
path_stat
lstat() results for file_path.
Returns
-------
color token, color_key
Notes
-----
* implementation follows one authority:
https://github.com/coreutils/coreutils/blob/master/src/ls.c#L4879
* except:
1. does not return 'mi'. That's the color ls uses to show the (missing) *target* of a symlink
(in ls -l, not ls).
2. in dircolors, setting type code to '0 or '00' bypasses that test and proceeds to others.
In our implementation, setting code to '00' paints the file with no color.
This is arguably a bug. | def color_file(file_path: str, path_stat: os.stat_result) -> tuple[_TokenType, str]:
"""Determine color to use for file *approximately* as ls --color would,
given lstat() results and its path.
Parameters
----------
file_path
relative path of file (as user typed it).
path_stat
lstat() results for file_path.
Returns
-------
color token, color_key
Notes
-----
* implementation follows one authority:
https://github.com/coreutils/coreutils/blob/master/src/ls.c#L4879
* except:
1. does not return 'mi'. That's the color ls uses to show the (missing) *target* of a symlink
(in ls -l, not ls).
2. in dircolors, setting type code to '0 or '00' bypasses that test and proceeds to others.
In our implementation, setting code to '00' paints the file with no color.
This is arguably a bug.
"""
lsc = XSH.env["LS_COLORS"] # type:ignore
color_key = "fi"
# if symlink, get info on (final) target
if stat.S_ISLNK(path_stat.st_mode):
try:
tar_path_stat = os.stat(file_path) # and work with its properties
if lsc.is_target("ln"): # if ln=target
path_stat = tar_path_stat
except FileNotFoundError: # bug always color broken link 'or'
color_key = "or" # early exit
ret_color_token = file_color_tokens.get(color_key, Text)
return ret_color_token, color_key
mode = path_stat.st_mode
if stat.S_ISREG(mode):
if mode & stat.S_ISUID:
color_key = "su"
elif mode & stat.S_ISGID:
color_key = "sg"
else:
cap = os_listxattr(file_path, follow_symlinks=False)
if cap and "security.capability" in cap: # protect None return on some OS?
color_key = "ca"
elif stat.S_IMODE(mode) & (stat.S_IXUSR + stat.S_IXGRP + stat.S_IXOTH):
color_key = "ex"
elif path_stat.st_nlink > 1:
color_key = "mh"
else:
color_key = "fi"
elif stat.S_ISDIR(mode): # ls --color doesn't colorize sticky or ow if not dirs...
color_key = "di"
if not (ON_WINDOWS): # on Windows, these do not mean what you think they mean.
if (mode & stat.S_ISVTX) and (mode & stat.S_IWOTH):
color_key = "tw"
elif mode & stat.S_IWOTH:
color_key = "ow"
elif mode & stat.S_ISVTX:
color_key = "st"
elif stat.S_ISLNK(mode):
color_key = "ln"
elif stat.S_ISFIFO(mode):
color_key = "pi"
elif stat.S_ISSOCK(mode):
color_key = "so"
elif stat.S_ISBLK(mode):
color_key = "bd"
elif stat.S_ISCHR(mode):
color_key = "cd"
elif stat.S_ISDOOR(mode): # type:ignore
color_key = "do"
else:
color_key = "or" # any other type --> orphan
# if still normal file -- try color by file extension.
# note: symlink to *.<ext> will be colored 'fi' unless the symlink itself
# ends with .<ext>. `ls` does the same. Bug-for-bug compatibility!
if color_key == "fi":
match = color_file_extension_RE.match(file_path)
if match:
ext = "*" + match.group(1) # look for *.<fileExtension> coloring
if ext in lsc:
color_key = ext
ret_color_token = file_color_tokens.get(color_key, Text)
return ret_color_token, color_key |
Yield Builtin token if match contains valid command,
otherwise fallback to fallback lexer. | def subproc_cmd_callback(_, match):
"""Yield Builtin token if match contains valid command,
otherwise fallback to fallback lexer.
"""
cmd = match.group()
yield match.start(), Name.Builtin if _command_is_valid(cmd) else Error, cmd |
Check if match contains valid path | def subproc_arg_callback(_, match):
"""Check if match contains valid path"""
text = match.group()
yieldVal = Text
try:
path = os.path.expanduser(text)
path_stat = os.lstat(path) # lstat() will raise FNF if not a real file
yieldVal, _ = color_file(path, path_stat)
except OSError:
pass
yield (match.start(), yieldVal, text) |
Does the hard work of building a cache from nothing. | def build_cache():
"""Does the hard work of building a cache from nothing."""
cache = {}
cache["lexers"] = _discover_lexers()
cache["formatters"] = _discover_formatters()
cache["styles"] = _discover_styles()
cache["filters"] = _discover_filters()
return cache |
Gets the name of the cache file to use. | def cache_filename():
"""Gets the name of the cache file to use."""
# Configuration variables read from the environment
if "PYGMENTS_CACHE_FILE" in os.environ:
return os.environ["PYGMENTS_CACHE_FILE"]
else:
return os.path.join(
os.environ.get(
"XDG_DATA_HOME",
os.path.join(os.path.expanduser("~"), ".local", "share"),
),
"pygments-cache",
"cache.py",
) |
Register custom style to be able to retrieve it by ``get_style_by_name``.
Parameters
----------
name
Style name.
style
Custom style to add. | def add_custom_style(name: str, style: "Style"):
"""Register custom style to be able to retrieve it by ``get_style_by_name``.
Parameters
----------
name
Style name.
style
Custom style to add.
"""
CUSTOM_STYLES[name] = style |
Loads the cache from a filename. | def load(filename):
"""Loads the cache from a filename."""
global CACHE
with open(filename) as f:
s = f.read()
ctx = globals()
CACHE = eval(s, ctx, ctx)
return CACHE |
Writes the current cache to the file | def write_cache(filename):
"""Writes the current cache to the file"""
from pprint import pformat
d = os.path.dirname(filename)
os.makedirs(d, exist_ok=True)
s = pformat(CACHE)
with open(filename, "w") as f:
f.write(s) |
Loads the cache from disk. If the cache does not exist,
this will build and write it out. | def load_or_build():
"""Loads the cache from disk. If the cache does not exist,
this will build and write it out.
"""
global CACHE
fname = cache_filename()
if os.path.exists(fname):
load(fname)
else:
import sys
if DEBUG:
print("pygments cache not found, building...", file=sys.stderr)
CACHE = build_cache()
if DEBUG:
print("...writing cache to " + fname, file=sys.stderr)
write_cache(fname) |
Gets a lexer from a filename (usually via the filename extension).
This mimics the behavior of ``pygments.lexers.get_lexer_for_filename()``
and ``pygments.lexers.guess_lexer_for_filename()``. | def get_lexer_for_filename(filename, text="", **options):
"""Gets a lexer from a filename (usually via the filename extension).
This mimics the behavior of ``pygments.lexers.get_lexer_for_filename()``
and ``pygments.lexers.guess_lexer_for_filename()``.
"""
if CACHE is None:
load_or_build()
exts = CACHE["lexers"]["exts"]
fname = os.path.basename(filename)
key = fname if fname in exts else os.path.splitext(fname)[1]
if key in exts:
modname, clsname = exts[key]
mod = importlib.import_module(modname)
cls = getattr(mod, clsname)
lexer = cls(**options)
else:
# couldn't find lexer in cache, fallback to the hard way
import inspect
from pygments.lexers import guess_lexer_for_filename
lexer = guess_lexer_for_filename(filename, text, **options)
# add this filename to the cache for future use
cls = type(lexer)
mod = inspect.getmodule(cls)
exts[fname] = (mod.__name__, cls.__name__)
write_cache(cache_filename())
return lexer |
Gets a formatter instance from a filename (usually via the filename
extension). This mimics the behavior of
``pygments.formatters.get_formatter_for_filename()``. | def get_formatter_for_filename(fn, **options):
"""Gets a formatter instance from a filename (usually via the filename
extension). This mimics the behavior of
``pygments.formatters.get_formatter_for_filename()``.
"""
if CACHE is None:
load_or_build()
exts = CACHE["formatters"]["exts"]
fname = os.path.basename(fn)
key = fname if fname in exts else os.path.splitext(fname)[1]
if key in exts:
modname, clsname = exts[key]
mod = importlib.import_module(modname)
cls = getattr(mod, clsname)
formatter = cls(**options)
else:
# couldn't find formatter in cache, fallback to the hard way
import inspect
from pygments.formatters import get_formatter_for_filename
formatter = get_formatter_for_filename(fn, **options)
# add this filename to the cache for future use
cls = type(formatter)
mod = inspect.getmodule(cls)
exts[fname] = (mod.__name__, cls.__name__)
write_cache(cache_filename())
return formatter |
Gets a formatter instance from its name or alias.
This mimics the behavior of ``pygments.formatters.get_formatter_by_name()``. | def get_formatter_by_name(alias, **options):
"""Gets a formatter instance from its name or alias.
This mimics the behavior of ``pygments.formatters.get_formatter_by_name()``.
"""
if CACHE is None:
load_or_build()
names = CACHE["formatters"]["names"]
if alias in names:
modname, clsname = names[alias]
mod = importlib.import_module(modname)
cls = getattr(mod, clsname)
formatter = cls(**options)
else:
# couldn't find formatter in cache, fallback to the hard way
import inspect
from pygments.formatters import get_formatter_by_name
formatter = get_formatter_by_name(alias, **options)
# add this filename to the cache for future use
cls = type(formatter)
mod = inspect.getmodule(cls)
names[alias] = (mod.__name__, cls.__name__)
write_cache(cache_filename())
return formatter |
Gets a style class from its name or alias.
This mimics the behavior of ``pygments.styles.get_style_by_name()``. | def get_style_by_name(name):
"""Gets a style class from its name or alias.
This mimics the behavior of ``pygments.styles.get_style_by_name()``.
"""
if CACHE is None:
load_or_build()
names = CACHE["styles"]["names"]
if name in names:
modname, clsname = names[name]
mod = importlib.import_module(modname)
style = getattr(mod, clsname)
elif name in CUSTOM_STYLES:
style = CUSTOM_STYLES[name]
else:
# couldn't find style in cache, fallback to the hard way
import inspect
from pygments.styles import get_style_by_name
style = get_style_by_name(name)
# add this style to the cache for future use
mod = inspect.getmodule(style)
names[name] = (mod.__name__, style.__name__)
write_cache(cache_filename())
return style |
Iterable through all known style names.
This mimics the behavior of ``pygments.styles.get_all_styles``. | def get_all_styles():
"""Iterable through all known style names.
This mimics the behavior of ``pygments.styles.get_all_styles``.
"""
if CACHE is None:
load_or_build()
yield from CACHE["styles"]["names"]
yield from CUSTOM_STYLES |
Gets a filter instance from its name. This mimics the behavior of
``pygments.filters.get_filtere_by_name()``. | def get_filter_by_name(filtername, **options):
"""Gets a filter instance from its name. This mimics the behavior of
``pygments.filters.get_filtere_by_name()``.
"""
if CACHE is None:
load_or_build()
names = CACHE["filters"]["names"]
if filtername in names:
modname, clsname = names[filtername]
mod = importlib.import_module(modname)
cls = getattr(mod, clsname)
filter = cls(**options)
else:
# couldn't find style in cache, fallback to the hard way
import inspect
from pygments.filters import get_filter_by_name
filter = get_filter_by_name(filtername, **options)
# add this filter to the cache for future use
cls = type(filter)
mod = inspect.getmodule(cls)
names[filtername] = (mod.__name__, cls.__name__)
write_cache(cache_filename())
return filter |
Sets up the readline module and completion suppression, if available. | def setup_readline():
"""Sets up the readline module and completion suppression, if available."""
global \
RL_COMPLETION_SUPPRESS_APPEND, \
RL_LIB, \
RL_CAN_RESIZE, \
RL_STATE, \
readline, \
RL_COMPLETION_QUERY_ITEMS
if RL_COMPLETION_SUPPRESS_APPEND is not None:
return
for _rlmod_name in ("gnureadline", "readline"):
try:
readline = importlib.import_module(_rlmod_name)
sys.modules["readline"] = readline
except ImportError:
pass
else:
break
if readline is None:
print(
"""Skipping setup. Because no `readline` implementation available.
Please install a backend (`readline`, `prompt-toolkit`, etc) to use
`xonsh` interactively.
See https://github.com/xonsh/xonsh/issues/1170"""
)
return
import ctypes
import ctypes.util
uses_libedit = readline.__doc__ and "libedit" in readline.__doc__
readline.set_completer_delims(" \t\n")
# Cygwin seems to hang indefinitely when querying the readline lib
if (not ON_CYGWIN) and (not ON_MSYS) and (not readline.__file__.endswith(".py")):
RL_LIB = lib = ctypes.cdll.LoadLibrary(readline.__file__)
try:
RL_COMPLETION_SUPPRESS_APPEND = ctypes.c_int.in_dll(
lib, "rl_completion_suppress_append"
)
except ValueError:
# not all versions of readline have this symbol, ie Macs sometimes
RL_COMPLETION_SUPPRESS_APPEND = None
try:
RL_COMPLETION_QUERY_ITEMS = ctypes.c_int.in_dll(
lib, "rl_completion_query_items"
)
except ValueError:
# not all versions of readline have this symbol, ie Macs sometimes
RL_COMPLETION_QUERY_ITEMS = None
try:
RL_STATE = ctypes.c_int.in_dll(lib, "rl_readline_state")
except Exception:
pass
RL_CAN_RESIZE = hasattr(lib, "rl_reset_screen_size")
env = XSH.env
# reads in history
readline.set_history_length(-1)
ReadlineHistoryAdder()
# sets up IPython-like history matching with up and down
readline.parse_and_bind('"\\e[B": history-search-forward')
readline.parse_and_bind('"\\e[A": history-search-backward')
# Setup Shift-Tab to indent
readline.parse_and_bind('"\\e[Z": "{}"'.format(env.get("INDENT")))
# handle tab completion differences found in libedit readline compatibility
# as discussed at http://stackoverflow.com/a/7116997
if uses_libedit and ON_DARWIN:
readline.parse_and_bind("bind ^I rl_complete")
print(
"\n".join(
[
"",
"*" * 78,
"libedit detected - readline will not be well behaved, including but not limited to:",
" * crashes on tab completion",
" * incorrect history navigation",
" * corrupting long-lines",
" * failure to wrap or indent lines properly",
"",
"It is highly recommended that you install gnureadline, which is installable with:",
" xpip install gnureadline",
"*" * 78,
]
),
file=sys.stderr,
)
else:
readline.parse_and_bind("tab: complete")
# try to load custom user settings
inputrc_name = os_environ.get("INPUTRC")
if inputrc_name is None:
if uses_libedit:
inputrc_name = ".editrc"
else:
inputrc_name = ".inputrc"
inputrc_name = os.path.join(os.path.expanduser("~"), inputrc_name)
if (not ON_WINDOWS) and (not os.path.isfile(inputrc_name)):
inputrc_name = "/etc/inputrc"
if ON_WINDOWS:
winutils.enable_virtual_terminal_processing()
if os.path.isfile(inputrc_name):
try:
readline.read_init_file(inputrc_name)
except Exception:
# this seems to fail with libedit
print_exception("xonsh: could not load readline default init file.")
# Protection against paste jacking (issue #1154)
# This must be set after the init file is loaded since read_init_file()
# automatically disables bracketed paste
# (https://github.com/python/cpython/pull/24108)
readline.parse_and_bind("set enable-bracketed-paste on")
# properly reset input typed before the first prompt
readline.set_startup_hook(carriage_return) |
Tears down up the readline module, if available. | def teardown_readline():
"""Tears down up the readline module, if available."""
try:
import readline
except (ImportError, TypeError):
return |
Fix to allow Ctrl-C to exit reverse-i-search.
Based on code from:
http://bugs.python.org/file39467/raw_input__workaround_demo.py | def fix_readline_state_after_ctrl_c():
"""
Fix to allow Ctrl-C to exit reverse-i-search.
Based on code from:
http://bugs.python.org/file39467/raw_input__workaround_demo.py
"""
if ON_WINDOWS:
# hack to make pyreadline mimic the desired behavior
try:
_q = readline.rl.mode.process_keyevent_queue
if len(_q) > 1:
_q.pop()
except Exception:
pass
if RL_STATE is None:
return
if RL_STATE.value & _RL_STATE_ISEARCH:
RL_STATE.value &= ~_RL_STATE_ISEARCH
if not RL_STATE.value & _RL_STATE_DONE:
RL_STATE.value |= _RL_STATE_DONE |
Sets the rl_completion_suppress_append variable, if possible.
A value of 1 (default) means to suppress, a value of 0 means to enable. | def rl_completion_suppress_append(val=1):
"""Sets the rl_completion_suppress_append variable, if possible.
A value of 1 (default) means to suppress, a value of 0 means to enable.
"""
if RL_COMPLETION_SUPPRESS_APPEND is None:
return
RL_COMPLETION_SUPPRESS_APPEND.value = val |
Sets the rl_completion_query_items variable, if possible.
A None value will set this to $COMPLETION_QUERY_LIMIT, otherwise any integer
is accepted. | def rl_completion_query_items(val=None):
"""Sets the rl_completion_query_items variable, if possible.
A None value will set this to $COMPLETION_QUERY_LIMIT, otherwise any integer
is accepted.
"""
if RL_COMPLETION_QUERY_ITEMS is None:
return
if val is None:
val = XSH.env.get("COMPLETION_QUERY_LIMIT")
RL_COMPLETION_QUERY_ITEMS.value = val |
Dumps the currently set readline variables. If readable is True, then this
output may be used in an inputrc file. | def rl_variable_dumper(readable=True):
"""Dumps the currently set readline variables. If readable is True, then this
output may be used in an inputrc file.
"""
RL_LIB.rl_variable_dumper(int(readable)) |
Returns the currently set value for a readline configuration variable. | def rl_variable_value(variable):
"""Returns the currently set value for a readline configuration variable."""
global RL_VARIABLE_VALUE
if RL_VARIABLE_VALUE is None:
import ctypes
RL_VARIABLE_VALUE = RL_LIB.rl_variable_value
RL_VARIABLE_VALUE.restype = ctypes.c_char_p
env = XSH.env
enc, errors = env.get("XONSH_ENCODING"), env.get("XONSH_ENCODING_ERRORS")
if isinstance(variable, str):
variable = variable.encode(encoding=enc, errors=errors)
rtn = RL_VARIABLE_VALUE(variable)
return rtn.decode(encoding=enc, errors=errors) |
Grabs one of a few possible redisplay functions in readline. | def rl_on_new_line():
"""Grabs one of a few possible redisplay functions in readline."""
names = ["rl_on_new_line", "rl_forced_update_display", "rl_redisplay"]
for name in names:
func = getattr(RL_LIB, name, None)
if func is not None:
break
else:
def print_for_newline():
print()
func = print_for_newline
return func |
Creates a function to insert text via readline. | def _insert_text_func(s, readline):
"""Creates a function to insert text via readline."""
def inserter():
readline.insert_text(s)
readline.redisplay()
return inserter |
Render the completions according to the required prefix_len.
Readline will replace the current prefix with the chosen rendered completion. | def _render_completions(completions, prefix, prefix_len):
"""Render the completions according to the required prefix_len.
Readline will replace the current prefix with the chosen rendered completion.
"""
chopped = prefix[:-prefix_len] if prefix_len else prefix
rendered_completions = []
for comp in completions:
if isinstance(comp, xct.RichCompletion) and comp.prefix_len is not None:
if comp.prefix_len:
comp = prefix[: -comp.prefix_len] + comp
else:
comp = prefix + comp
elif chopped:
comp = chopped + comp
rendered_completions.append(comp)
return rendered_completions |
Returns the results of firing the precommand handles. | def transform_command(src, show_diff=True):
"""Returns the results of firing the precommand handles."""
i = 0
limit = sys.getrecursionlimit()
lst = ""
raw = src
while src != lst:
lst = src
srcs = events.on_transform_command.fire(cmd=src)
for s in srcs:
if s != lst:
src = s
break
i += 1
if i == limit:
print_exception(
"Modifications to source input took more than "
"the recursion limit number of iterations to "
"converge."
)
debug_level = XSH.env.get("XONSH_DEBUG")
if show_diff and debug_level >= 1 and src != raw:
sys.stderr.writelines(
difflib.unified_diff(
raw.splitlines(keepends=True),
src.splitlines(keepends=True),
fromfile="before precommand event",
tofile="after precommand event",
)
)
return src |
Tokenizes a template string containing colors. Will return a list
of tuples mapping the token to the string which has that color.
These sub-strings maybe templates themselves. | def partial_color_tokenize(template):
"""Tokenizes a template string containing colors. Will return a list
of tuples mapping the token to the string which has that color.
These sub-strings maybe templates themselves.
"""
from xonsh.built_ins import XSH
if HAS_PYGMENTS and XSH.shell is not None:
styles = XSH.shell.shell.styler.styles
elif XSH.shell is not None:
styles = DEFAULT_STYLE_DICT
else:
styles = None
color = Color.RESET
try:
toks, color = _partial_color_tokenize_main(template, styles)
except Exception:
toks = [(Color.RESET, template)]
if styles is not None:
styles[color] # ensure color is available
return toks |
Converts a color name to a color token, foreground name,
and background name. Will take into consideration current foreground
and background colors, if provided.
Parameters
----------
name : str
Color name.
fg : str, optional
Foreground color name.
bg : str, optional
Background color name.
Returns
-------
tok : Token
Pygments Token.Color subclass
fg : str or None
New computed foreground color name.
bg : str or None
New computed background color name. | def color_by_name(name, fg=None, bg=None):
"""Converts a color name to a color token, foreground name,
and background name. Will take into consideration current foreground
and background colors, if provided.
Parameters
----------
name : str
Color name.
fg : str, optional
Foreground color name.
bg : str, optional
Background color name.
Returns
-------
tok : Token
Pygments Token.Color subclass
fg : str or None
New computed foreground color name.
bg : str or None
New computed background color name.
"""
name = name.upper()
if name in ("RESET", "NO_COLOR"):
if name == "NO_COLOR":
warn_deprecated_no_color()
return Color.RESET, None, None
m = RE_BACKGROUND.search(name)
if m is None: # must be foreground color
fg = norm_name(name)
else:
bg = norm_name(name)
# assemble token
if fg is None and bg is None:
tokname = "RESET"
elif fg is None:
tokname = bg
elif bg is None:
tokname = fg
else:
tokname = fg + "__" + bg
tok = getattr(Color, tokname)
return tok, fg, bg |
Normalizes a color name. | def norm_name(name):
"""Normalizes a color name."""
return name.upper().replace("#", "HEX") |
Remove the colors from the template string and style as faded. | def style_as_faded(template: str) -> str:
"""Remove the colors from the template string and style as faded."""
tokens = partial_color_tokenize(template)
without_color = "".join([str(sect) for _, sect in tokens])
return "{RESET}{#d3d3d3}" + without_color + "{RESET}" |
Formats the timespan in a human readable form | def format_time(timespan, precision=3):
"""Formats the timespan in a human readable form"""
if timespan >= 60.0:
# we have more than a minute, format that in a human readable form
parts = [("d", 60 * 60 * 24), ("h", 60 * 60), ("min", 60), ("s", 1)]
time = []
leftover = timespan
for suffix, length in parts:
value = int(leftover / length)
if value > 0:
leftover = leftover % length
time.append(f"{str(value)}{suffix}")
if leftover < 1:
break
return " ".join(time)
# Unfortunately the unicode 'micro' symbol can cause problems in
# certain terminals.
# See bug: https://bugs.launchpad.net/ipython/+bug/348466
# Try to prevent crashes by being more secure than it needs to
# E.g. eclipse is able to print a mu, but has no sys.stdout.encoding set.
units = ["s", "ms", "us", "ns"] # the save value
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding:
try:
"\xb5".encode(sys.stdout.encoding)
units = ["s", "ms", "\xb5s", "ns"]
except Exception:
pass
scaling = [1, 1e3, 1e6, 1e9]
if timespan > 0.0:
order = min(-int(math.floor(math.log10(timespan)) // 3), 3)
else:
order = 3
return "{1:.{0}g} {2}".format(precision, timespan * scaling[order], units[order]) |
Runs timing study on arguments. | def timeit_alias(args, stdin=None):
"""Runs timing study on arguments."""
if not args:
print("Usage: timeit! <expression>")
return -1
# some real args
number = 0
quiet = False
repeat = 3
precision = 3
# setup
ctx = XSH.ctx
timer = Timer(timer=clock)
stmt = " ".join(args)
innerstr = INNER_TEMPLATE.format(stmt=stmt)
# Track compilation time so it can be reported if too long
# Minimum time above which compilation time will be reported
tc_min = 0.1
t0 = clock()
innercode = XSH.builtins.compilex(
innerstr, filename="<xonsh-timeit>", mode="exec", glbs=ctx
)
tc = clock() - t0
# get inner func
ns = {}
XSH.builtins.execx(innercode, glbs=ctx, locs=ns, mode="exec")
timer.inner = ns["inner"]
# Check if there is a huge difference between the best and worst timings.
worst_tuning = 0
if number == 0:
# determine number so that 0.2 <= total time < 2.0
number = 1
for _ in range(1, 10):
time_number = timer.timeit(number)
worst_tuning = max(worst_tuning, time_number / number)
if time_number >= 0.2:
break
number *= 10
all_runs = timer.repeat(repeat, number)
best = min(all_runs) / number
# print some debug info
if not quiet:
worst = max(all_runs) / number
if worst_tuning:
worst = max(worst, worst_tuning)
# Check best timing is greater than zero to avoid a
# ZeroDivisionError.
# In cases where the slowest timing is less than 10 microseconds
# we assume that it does not really matter if the fastest
# timing is 4 times faster than the slowest timing or not.
if worst > 4 * best and best > 0 and worst > 1e-5:
print(
f"The slowest run took {worst / best:0.2f} times longer than the "
"fastest. This could mean that an intermediate result "
"is being cached."
)
print(
f"{number} loops, best of {repeat}: {format_time(best, precision)} per loop"
)
if tc > tc_min:
print(f"Compiler time: {tc:.2f} s")
return |
Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2 | def untokenize(iterable):
"""Transform tokens back into Python source code.
It returns a bytes object, encoded using the ENCODING
token, which is the first token sequence output by tokenize.
Each element returned by the iterable must be a token sequence
with at least two elements, a token number and token value. If
only two tokens are passed, the resulting output is poor.
Round-trip invariant for full input:
Untokenized source will match input source exactly
Round-trip invariant for limited intput:
# Output bytes will tokenize the back to the input
t1 = [tok[:2] for tok in tokenize(f.readline)]
newcode = untokenize(t1)
readline = BytesIO(newcode).readline
t2 = [tok[:2] for tok in tokenize(readline)]
assert t1 == t2
"""
ut = Untokenizer()
out = ut.untokenize(iterable)
if ut.encoding is not None:
out = out.encode(ut.encoding)
return out |
Imitates get_normal_name in tokenizer.c. | def _get_normal_name(orig_enc):
"""Imitates get_normal_name in tokenizer.c."""
# Only care about the first 12 characters.
enc = orig_enc[:12].lower().replace("_", "-")
if enc == "utf-8" or enc.startswith("utf-8-"):
return "utf-8"
if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or enc.startswith(
("latin-1-", "iso-8859-1-", "iso-latin-1-")
):
return "iso-8859-1"
return orig_enc |
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned. | def detect_encoding(readline):
"""
The detect_encoding() function is used to detect the encoding that should
be used to decode a Python source file. It requires one argument, readline,
in the same way as the tokenize() generator.
It will call readline a maximum of twice, and return the encoding used
(as a string) and a list of any lines (left as bytes) it has read in.
It detects the encoding from the presence of a utf-8 bom or an encoding
cookie as specified in pep-0263. If both a bom and a cookie are present,
but disagree, a SyntaxError will be raised. If the encoding cookie is an
invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found,
'utf-8-sig' is returned.
If no encoding is specified, then the default of 'utf-8' will be returned.
"""
try:
filename = readline.__self__.name
except AttributeError:
filename = None
bom_found = False
encoding = None
default = "utf-8"
def read_or_stop():
try:
return readline()
except StopIteration:
return b""
def find_cookie(line):
try:
# Decode as UTF-8. Either the line is an encoding declaration,
# in which case it should be pure ASCII, or it must be UTF-8
# per default encoding.
line_string = line.decode("utf-8")
except UnicodeDecodeError:
msg = "invalid or missing encoding declaration"
if filename is not None:
msg = f"{msg} for {filename!r}"
raise SyntaxError(msg)
match = cookie_re.match(line_string)
if not match:
return None
encoding = _get_normal_name(match.group(1))
try:
codecs.lookup(encoding)
except LookupError:
# This behaviour mimics the Python interpreter
if filename is None:
msg = "unknown encoding: " + encoding
else:
msg = f"unknown encoding for {filename!r}: {encoding}"
raise SyntaxError(msg)
if bom_found:
if encoding != "utf-8":
# This behaviour mimics the Python interpreter
if filename is None:
msg = "encoding problem: utf-8"
else:
msg = f"encoding problem for {filename!r}: utf-8"
raise SyntaxError(msg)
encoding += "-sig"
return encoding
first = read_or_stop()
if first.startswith(codecs.BOM_UTF8):
bom_found = True
first = first[3:]
default = "utf-8-sig"
if not first:
return default, []
encoding = find_cookie(first)
if encoding:
return encoding, [first]
if not blank_re.match(first):
return default, [first]
second = read_or_stop()
if not second:
return default, [first]
encoding = find_cookie(second)
if encoding:
return encoding, [first, second]
return default, [first, second] |
Open a file in read only mode using the encoding detected by
detect_encoding(). | def tokopen(filename):
"""Open a file in read only mode using the encoding detected by
detect_encoding().
"""
buffer = builtins.open(filename, "rb")
try:
encoding, lines = detect_encoding(buffer.readline)
buffer.seek(0)
text = io.TextIOWrapper(buffer, encoding, line_buffering=True)
text.mode = "r"
return text
except Exception:
buffer.close()
raise |
The tokenize() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
If ``tolerant`` is True, yield ERRORTOKEN with the erroneous string instead of
throwing an exception when encountering an error.
If ``tokenize_ioredirects`` is True, produce IOREDIRECT tokens for special
io-redirection operators like ``2>``. Otherwise, treat code like ``2>`` as
regular Python code. | def tokenize(readline, tolerant=False, tokenize_ioredirects=True):
"""
The tokenize() generator requires one argument, readline, which
must be a callable object which provides the same interface as the
readline() method of built-in file objects. Each call to the function
should return one line of input as bytes. Alternately, readline
can be a callable function terminating with StopIteration:
readline = open(myfile, 'rb').__next__ # Example of alternate readline
The generator produces 5-tuples with these members: the token type; the
token string; a 2-tuple (srow, scol) of ints specifying the row and
column where the token begins in the source; a 2-tuple (erow, ecol) of
ints specifying the row and column where the token ends in the source;
and the line on which the token was found. The line passed is the
logical line; continuation lines are included.
The first token sequence will always be an ENCODING token
which tells you which encoding was used to decode the bytes stream.
If ``tolerant`` is True, yield ERRORTOKEN with the erroneous string instead of
throwing an exception when encountering an error.
If ``tokenize_ioredirects`` is True, produce IOREDIRECT tokens for special
io-redirection operators like ``2>``. Otherwise, treat code like ``2>`` as
regular Python code.
"""
encoding, consumed = detect_encoding(readline)
rl_gen = iter(readline, b"")
empty = itertools.repeat(b"")
return _tokenize(
itertools.chain(consumed, rl_gen, empty).__next__,
encoding,
tolerant,
tokenize_ioredirects,
) |
Takes a string path and expands ~ to home if expand_user is set
and environment vars if EXPAND_ENV_VARS is set. | def expand_path(s, expand_user=True):
"""Takes a string path and expands ~ to home if expand_user is set
and environment vars if EXPAND_ENV_VARS is set."""
env = xsh.env or os_environ
if env.get("EXPAND_ENV_VARS", False):
s = expandvars(s)
if expand_user:
# expand ~ according to Bash unquoted rules "Each variable assignment is
# checked for unquoted tilde-prefixes immediately following a ':' or the
# first '='". See the following for more details.
# https://www.gnu.org/software/bash/manual/html_node/Tilde-Expansion.html
pre, char, post = s.partition("=")
if char:
s = expanduser(pre) + char
s += os.pathsep.join(map(expanduser, post.split(os.pathsep)))
else:
s = expanduser(s)
return s |
Performs environment variable / user expansion on a given path
if EXPAND_ENV_VARS is set. | def _expandpath(path):
"""Performs environment variable / user expansion on a given path
if EXPAND_ENV_VARS is set.
"""
env = xsh.env or os_environ
expand_user = env.get("EXPAND_ENV_VARS", False)
return expand_path(path, expand_user=expand_user) |
Returns random element from the list with length less than 1 million elements. | def simple_random_choice(lst):
"""Returns random element from the list with length less than 1 million elements."""
size = len(lst)
if size > 1000000: # microsecond maximum
raise ValueError("The list is too long.")
return lst[datetime.datetime.now().microsecond % size] |
Tries to decode the bytes using XONSH_ENCODING if available,
otherwise using sys.getdefaultencoding(). | def decode_bytes(b):
"""Tries to decode the bytes using XONSH_ENCODING if available,
otherwise using sys.getdefaultencoding().
"""
env = xsh.env or os_environ
enc = env.get("XONSH_ENCODING") or DEFAULT_ENCODING
err = env.get("XONSH_ENCODING_ERRORS") or "strict"
return b.decode(encoding=enc, errors=err) |
Finds whichever of the given substrings occurs first in the given string
and returns that substring, or returns None if no such strings occur. | def findfirst(s, substrs):
"""Finds whichever of the given substrings occurs first in the given string
and returns that substring, or returns None if no such strings occur.
"""
i = len(s)
result = None
for substr in substrs:
pos = s.find(substr)
if -1 < pos < i:
i = pos
result = substr
return i, result |
Tests if an RPAREN token is matched with something other than a plain old
LPAREN type. | def _is_not_lparen_and_rparen(lparens, rtok):
"""Tests if an RPAREN token is matched with something other than a plain old
LPAREN type.
"""
# note that any([]) is False, so this covers len(lparens) == 0
return rtok.type == "RPAREN" and any(x != "LPAREN" for x in lparens) |
Determines if parentheses are balanced in an expression. | def balanced_parens(line, mincol=0, maxcol=None, lexer=None):
"""Determines if parentheses are balanced in an expression."""
line = line[mincol:maxcol]
if lexer is None:
lexer = xsh.execer.parser.lexer
if "(" not in line and ")" not in line:
return True
cnt = 0
lexer.input(line)
for tok in lexer:
if tok.type in LPARENS:
cnt += 1
elif tok.type == "RPAREN":
cnt -= 1
elif tok.type == "ERRORTOKEN" and ")" in tok.value:
cnt -= 1
return cnt == 0 |
Determines whether a line ends with a colon token, ignoring comments. | def ends_with_colon_token(line, lexer=None):
"""Determines whether a line ends with a colon token, ignoring comments."""
if lexer is None:
lexer = xsh.execer.parser.lexer
lexer.input(line)
toks = list(lexer)
return len(toks) > 0 and toks[-1].type == "COLON" |
Returns the column number of the next logical break in subproc mode.
This function may be useful in finding the maxcol argument of
subproc_toks(). | def find_next_break(line, mincol=0, lexer=None):
"""Returns the column number of the next logical break in subproc mode.
This function may be useful in finding the maxcol argument of
subproc_toks().
"""
if mincol >= 1:
line = line[mincol:]
if lexer is None:
lexer = xsh.execer.parser.lexer
if RE_END_TOKS.search(line) is None:
return None
maxcol = None
lparens = []
lexer.input(line)
for tok in lexer:
if tok.type in LPARENS:
lparens.append(tok.type)
elif tok.type in END_TOK_TYPES:
if _is_not_lparen_and_rparen(lparens, tok):
lparens.pop()
else:
maxcol = tok.lexpos + mincol + 1
break
elif tok.type == "ERRORTOKEN" and ")" in tok.value:
maxcol = tok.lexpos + mincol + 1
break
elif tok.type == "BANG":
maxcol = mincol + len(line) + 1
break
return maxcol |
Encapsulates tokens in a source code line in a uncaptured
subprocess ![] starting at a minimum column. If there are no tokens
(ie in a comment line) this returns None. If greedy is True, it will encapsulate
normal parentheses. Greedy is False by default. | def subproc_toks(
line, mincol=-1, maxcol=None, lexer=None, returnline=False, greedy=False
):
"""Encapsulates tokens in a source code line in a uncaptured
subprocess ![] starting at a minimum column. If there are no tokens
(ie in a comment line) this returns None. If greedy is True, it will encapsulate
normal parentheses. Greedy is False by default.
"""
if lexer is None:
lexer = xsh.execer.parser.lexer
if maxcol is None:
maxcol = len(line) + 1
lexer.reset()
lexer.input(line)
toks = []
lparens = []
saw_macro = False
end_offset = 0
for tok in lexer:
pos = tok.lexpos
if tok.type not in END_TOK_TYPES and pos >= maxcol:
break
if tok.type == "BANG":
saw_macro = True
if saw_macro and tok.type not in ("NEWLINE", "DEDENT"):
toks.append(tok)
continue
if tok.type in LPARENS:
lparens.append(tok.type)
if greedy and len(lparens) > 0 and "LPAREN" in lparens:
toks.append(tok)
if tok.type == "RPAREN":
lparens.pop()
continue
if len(toks) == 0 and tok.type in BEG_TOK_SKIPS:
continue # handle indentation
elif len(toks) > 0 and toks[-1].type in END_TOK_TYPES:
if _is_not_lparen_and_rparen(lparens, toks[-1]):
lparens.pop() # don't continue or break
elif pos < maxcol and tok.type not in ("NEWLINE", "DEDENT", "WS"):
if not greedy:
toks.clear()
if tok.type in BEG_TOK_SKIPS:
continue
else:
break
if pos < mincol:
continue
toks.append(tok)
if tok.type == "WS" and tok.value == "\\":
pass # line continuation
elif tok.type == "NEWLINE":
break
elif tok.type == "DEDENT":
# fake a newline when dedenting without a newline
tok.type = "NEWLINE"
tok.value = "\n"
tok.lineno -= 1
if len(toks) >= 2:
prev_tok_end = toks[-2].lexpos + len(toks[-2].value)
else:
prev_tok_end = len(line)
if "#" in line[prev_tok_end:]:
tok.lexpos = prev_tok_end # prevents wrapping comments
else:
tok.lexpos = len(line)
break
elif check_bad_str_token(tok):
return
else:
if len(toks) > 0 and toks[-1].type in END_TOK_TYPES:
if _is_not_lparen_and_rparen(lparens, toks[-1]):
pass
elif greedy and toks[-1].type == "RPAREN":
pass
else:
toks.pop()
if len(toks) == 0:
return # handle comment lines
tok = toks[-1]
pos = tok.lexpos
if isinstance(tok.value, str):
end_offset = len(tok.value.rstrip())
else:
el = line[pos:].split("#")[0].rstrip()
end_offset = len(el)
if len(toks) == 0:
return # handle comment lines
elif saw_macro or greedy:
end_offset = len(toks[-1].value.rstrip()) + 1
if toks[0].lineno != toks[-1].lineno:
# handle multiline cases
end_offset += _offset_from_prev_lines(line, toks[-1].lineno)
beg, end = toks[0].lexpos, (toks[-1].lexpos + end_offset)
end = len(line[:end].rstrip())
rtn = "![" + line[beg:end] + "]"
if returnline:
rtn = line[:beg] + rtn + line[end:]
return rtn |
Checks if a token is a bad string. | def check_bad_str_token(tok):
"""Checks if a token is a bad string."""
if tok.type == "ERRORTOKEN" and tok.value == "EOF in multi-line string":
return True
elif isinstance(tok.value, str) and not check_quotes(tok.value):
return True
else:
return False |
Checks a string to make sure that if it starts with quotes, it also
ends with quotes. | def check_quotes(s):
"""Checks a string to make sure that if it starts with quotes, it also
ends with quotes.
"""
starts_as_str = RE_BEGIN_STRING.match(s) is not None
ends_as_str = s.endswith('"') or s.endswith("'")
if not starts_as_str and not ends_as_str:
ok = True
elif starts_as_str and not ends_as_str:
ok = False
elif not starts_as_str and ends_as_str:
ok = False
else:
m = RE_COMPLETE_STRING.match(s)
ok = m is not None
return ok |
The line continuation characters used in subproc mode. In interactive
mode on Windows the backslash must be preceded by a space. This is because
paths on Windows may end in a backslash. | def get_line_continuation():
"""The line continuation characters used in subproc mode. In interactive
mode on Windows the backslash must be preceded by a space. This is because
paths on Windows may end in a backslash.
"""
if ON_WINDOWS:
env = getattr(xsh, "env", None) or {}
if env.get("XONSH_INTERACTIVE", False):
return " \\"
return "\\" |
Returns a single logical line (i.e. one without line continuations)
from a list of lines. This line should begin at index idx. This also
returns the number of physical lines the logical line spans. The lines
should not contain newlines | def get_logical_line(lines, idx):
"""Returns a single logical line (i.e. one without line continuations)
from a list of lines. This line should begin at index idx. This also
returns the number of physical lines the logical line spans. The lines
should not contain newlines
"""
n = 1
nlines = len(lines)
linecont = get_line_continuation()
while idx > 0 and lines[idx - 1].endswith(linecont):
idx -= 1
start = idx
line = lines[idx]
open_triple = _have_open_triple_quotes(line)
while (line.endswith(linecont) or open_triple) and idx < nlines - 1:
n += 1
idx += 1
if line.endswith(linecont):
line = line[:-1] + lines[idx]
else:
line = line + "\n" + lines[idx]
open_triple = _have_open_triple_quotes(line)
return line, n, start |
Replaces lines at idx that may end in line continuation with a logical
line that spans n lines. | def replace_logical_line(lines, logical, idx, n):
"""Replaces lines at idx that may end in line continuation with a logical
line that spans n lines.
"""
linecont = get_line_continuation()
if n == 1:
lines[idx] = logical
return
space = " "
for i in range(idx, idx + n - 1):
a = len(lines[i])
b = logical.find(space, a - 1)
if b < 0:
# no space found
lines[i] = logical
logical = ""
else:
# found space to split on
lines[i] = logical[:b] + linecont
logical = logical[b:]
lines[idx + n - 1] = logical |
Determines whether an expression has unbalanced opening and closing tokens. | def is_balanced(expr, ltok, rtok):
"""Determines whether an expression has unbalanced opening and closing tokens."""
lcnt = expr.count(ltok)
if lcnt == 0:
return True
rcnt = expr.count(rtok)
if lcnt == rcnt:
return True
else:
return False |
Attempts to pull out a valid subexpression for unbalanced grouping,
based on opening tokens, eg. '(', and closing tokens, eg. ')'. This
does not do full tokenization, but should be good enough for tab
completion. | def subexpr_from_unbalanced(expr, ltok, rtok):
"""Attempts to pull out a valid subexpression for unbalanced grouping,
based on opening tokens, eg. '(', and closing tokens, eg. ')'. This
does not do full tokenization, but should be good enough for tab
completion.
"""
if is_balanced(expr, ltok, rtok):
return expr
subexpr = expr.rsplit(ltok, 1)[-1]
subexpr = subexpr.rsplit(",", 1)[-1]
subexpr = subexpr.rsplit(":", 1)[-1]
return subexpr |
Obtains the expression prior to last unbalanced left token. | def subexpr_before_unbalanced(expr, ltok, rtok):
"""Obtains the expression prior to last unbalanced left token."""
subexpr, _, post = expr.rpartition(ltok)
nrtoks_in_post = post.count(rtok)
while nrtoks_in_post != 0:
for _ in range(nrtoks_in_post):
subexpr, _, post = subexpr.rpartition(ltok)
nrtoks_in_post = post.count(rtok)
_, _, subexpr = subexpr.rpartition(rtok)
_, _, subexpr = subexpr.rpartition(ltok)
return subexpr |
Returns the whitespace at the start of a string | def starting_whitespace(s):
"""Returns the whitespace at the start of a string"""
return STARTING_WHITESPACE_RE.match(s).group(1) |
In recent versions of Python, hasattr() only catches AttributeError.
This catches all errors. | def safe_hasattr(obj, attr):
"""In recent versions of Python, hasattr() only catches AttributeError.
This catches all errors.
"""
try:
getattr(obj, attr)
return True
except Exception:
return False |
Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
outstr : string indented by ntabs and nspaces. | def indent(instr, nspaces=4, ntabs=0, flatten=False):
"""Indent a string a given number of spaces or tabstops.
indent(str,nspaces=4,ntabs=0) -> indent str by ntabs+nspaces.
Parameters
----------
instr : basestring
The string to be indented.
nspaces : int (default: 4)
The number of spaces to be indented.
ntabs : int (default: 0)
The number of tabs to be indented.
flatten : bool (default: False)
Whether to scrub existing indentation. If True, all lines will be
aligned to the same indentation. If False, existing indentation will
be strictly increased.
Returns
-------
outstr : string indented by ntabs and nspaces.
"""
if instr is None:
return
ind = "\t" * ntabs + " " * nspaces
if flatten:
pat = re.compile(r"^\s*", re.MULTILINE)
else:
pat = re.compile(r"^", re.MULTILINE)
outstr = re.sub(pat, ind, instr)
if outstr.endswith(os.linesep + ind):
return outstr[: -len(ind)]
else:
return outstr |
Returns the appropriate filepath separator char depending on OS and
xonsh options set | def get_sep():
"""Returns the appropriate filepath separator char depending on OS and
xonsh options set
"""
if ON_WINDOWS and xsh.env.get("FORCE_POSIX_PATHS"):
return os.altsep
else:
return os.sep |
Decorator for returning the object if cond is true and a backup if cond
is false. | def fallback(cond, backup):
"""Decorator for returning the object if cond is true and a backup if cond
is false.
"""
def dec(obj):
return obj if cond else backup
return dec |
yield file names of executable files in path. | def _yield_accessible_unix_file_names(path):
"""yield file names of executable files in path."""
if not os.path.exists(path):
return
for file_ in os.scandir(path):
try:
if file_.is_file() and os.access(file_.path, os.X_OK):
yield file_.name
except OSError:
# broken Symlink are neither dir not files
pass |
Returns a generator of files in path that the user could execute. | def executables_in(path) -> tp.Iterable[str]:
"""Returns a generator of files in path that the user could execute."""
if ON_WINDOWS:
func = _executables_in_windows
else:
func = _executables_in_posix
try:
yield from func(path)
except PermissionError:
return |
Uses the debian/ubuntu command-not-found utility to suggest packages for a
command that cannot currently be found. | def debian_command_not_found(cmd):
"""Uses the debian/ubuntu command-not-found utility to suggest packages for a
command that cannot currently be found.
"""
if not ON_LINUX:
return ""
cnf = xsh.commands_cache.lazyget(
"command-not-found", ("/usr/lib/command-not-found",)
)[0]
if not os.path.isfile(cnf):
return ""
c = "{0} {1}; exit 0"
s = subprocess.check_output(
c.format(cnf, shlex.quote(cmd)),
text=True,
stderr=subprocess.STDOUT,
shell=True,
)
s = "\n".join(s.rstrip().splitlines()).strip()
return s |
Uses conda-suggest to suggest packages for a command that cannot
currently be found. | def conda_suggest_command_not_found(cmd, env):
"""Uses conda-suggest to suggest packages for a command that cannot
currently be found.
"""
try:
from conda_suggest import find
except ImportError:
return ""
return find.message_string(
cmd, conda_suggest_path=env.get("CONDA_SUGGEST_PATH", None)
) |