diff --git a/.venv/lib/python3.11/site-packages/numpy/_utils/__init__.py b/.venv/lib/python3.11/site-packages/numpy/_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..388dd9174f356c74d6cdd6ad9a8b1ad603234420 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/_utils/__init__.py @@ -0,0 +1,29 @@ +""" +This is a module for defining private helpers which do not depend on the +rest of NumPy. + +Everything in here must be self-contained so that it can be +imported anywhere else without creating circular imports. +If a utility requires the import of NumPy, it probably belongs +in ``numpy.core``. +""" + +from ._convertions import asunicode, asbytes + + +def set_module(module): + """Private decorator for overriding __module__ on a function or class. + + Example usage:: + + @set_module('numpy') + def example(): + pass + + assert example.__module__ == 'numpy' + """ + def decorator(func): + if module is not None: + func.__module__ = module + return func + return decorator diff --git a/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..800cca173795bc59be879938a4d122789d52bfe8 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_convertions.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_convertions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b70d617fd7d3db9d4db2fc07affdb16c310c3732 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_convertions.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_inspect.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_inspect.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c73bb26a2d9e4a3e7bd0c3a92cb924c4eeb2d4c Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_inspect.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_pep440.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_pep440.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8b2c4180cabc829004b0b638100b43ada18c701 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_pep440.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/numpy/_utils/_convertions.py b/.venv/lib/python3.11/site-packages/numpy/_utils/_convertions.py new file mode 100644 index 0000000000000000000000000000000000000000..ab15a8ba019f1b6a40ac3f562897fa4581323efc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/_utils/_convertions.py @@ -0,0 +1,18 @@ +""" +A set of methods retained from np.compat module that +are still used across codebase. +""" + +__all__ = ["asunicode", "asbytes"] + + +def asunicode(s): + if isinstance(s, bytes): + return s.decode('latin1') + return str(s) + + +def asbytes(s): + if isinstance(s, bytes): + return s + return str(s).encode('latin1') diff --git a/.venv/lib/python3.11/site-packages/numpy/_utils/_inspect.py b/.venv/lib/python3.11/site-packages/numpy/_utils/_inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..9a874a71dd0a53e25a671c51bfdceec850702bfe --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/_utils/_inspect.py @@ -0,0 +1,191 @@ +"""Subset of inspect module from upstream python + +We use this instead of upstream because upstream inspect is slow to import, and +significantly contributes to numpy import times. Importing this copy has almost +no overhead. + +""" +import types + +__all__ = ['getargspec', 'formatargspec'] + +# ----------------------------------------------------------- type-checking +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + im_class class object in which this method belongs + im_func function object containing implementation of method + im_self instance to which this method is bound, or None + + """ + return isinstance(object, types.MethodType) + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + func_code code object containing compiled function bytecode + func_defaults tuple of any default values for arguments + func_doc (same as __doc__) + func_globals global namespace in which this function was defined + func_name (same as __name__) + + """ + return isinstance(object, types.FunctionType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including * or ** args) + co_code string of raw compiled bytecode + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names of local variables + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables + + """ + return isinstance(object, types.CodeType) + +# ------------------------------------------------ argument list extraction +# These constants are from Python's compile.h. +CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8 + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where 'args' is + a list of argument names (possibly containing nested lists), and + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + + """ + + if not iscode(co): + raise TypeError('arg is not a code object') + + nargs = co.co_argcount + names = co.co_varnames + args = list(names[:nargs]) + + # The following acrobatics are for anonymous (tuple) arguments. + # Which we do not need to support, so remove to avoid importing + # the dis module. + for i in range(nargs): + if args[i][:1] in ['', '.']: + raise TypeError("tuple function arguments are not supported") + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return args, varargs, varkw + +def getargspec(func): + """Get the names and default values of a function's arguments. + + A tuple of four things is returned: (args, varargs, varkw, defaults). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'defaults' is an n-tuple of the default values of the last n arguments. + + """ + + if ismethod(func): + func = func.__func__ + if not isfunction(func): + raise TypeError('arg is not a Python function') + args, varargs, varkw = getargs(func.__code__) + return args, varargs, varkw, func.__defaults__ + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names (it may contain nested lists). + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame. + + """ + args, varargs, varkw = getargs(frame.f_code) + return args, varargs, varkw, frame.f_locals + +def joinseq(seq): + if len(seq) == 1: + return '(' + seq[0] + ',)' + else: + return '(' + ', '.join(seq) + ')' + +def strseq(object, convert, join=joinseq): + """Recursively walk a sequence, stringifying each element. + + """ + if type(object) in [list, tuple]: + return join([strseq(_o, convert, join) for _o in object]) + else: + return convert(object) + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargspec. + + The first four arguments are (args, varargs, varkw, defaults). The + other four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i in range(len(args)): + spec = strseq(args[i], formatarg, join) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(varargs)) + if varkw is not None: + specs.append(formatvarkw(varkw)) + return '(' + ', '.join(specs) + ')' + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + join=joinseq): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments. + + """ + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [strseq(arg, convert, join) for arg in args] + + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' diff --git a/.venv/lib/python3.11/site-packages/numpy/_utils/_pep440.py b/.venv/lib/python3.11/site-packages/numpy/_utils/_pep440.py new file mode 100644 index 0000000000000000000000000000000000000000..73d0afb5e95f099f8b04253177e8a3ab3d80d0c4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/numpy/_utils/_pep440.py @@ -0,0 +1,487 @@ +"""Utility to compare pep440 compatible version strings. + +The LooseVersion and StrictVersion classes that distutils provides don't +work; they don't recognize anything like alpha/beta/rc/dev versions. +""" + +# Copyright (c) Donald Stufft and individual contributors. +# All rights reserved. + +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: + +# 1. Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. + +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. + +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +import collections +import itertools +import re + + +__all__ = [ + "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN", +] + + +# BEGIN packaging/_structures.py + + +class Infinity: + def __repr__(self): + return "Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return False + + def __le__(self, other): + return False + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return True + + def __ge__(self, other): + return True + + def __neg__(self): + return NegativeInfinity + + +Infinity = Infinity() + + +class NegativeInfinity: + def __repr__(self): + return "-Infinity" + + def __hash__(self): + return hash(repr(self)) + + def __lt__(self, other): + return True + + def __le__(self, other): + return True + + def __eq__(self, other): + return isinstance(other, self.__class__) + + def __ne__(self, other): + return not isinstance(other, self.__class__) + + def __gt__(self, other): + return False + + def __ge__(self, other): + return False + + def __neg__(self): + return Infinity + + +# BEGIN packaging/version.py + + +NegativeInfinity = NegativeInfinity() + +_Version = collections.namedtuple( + "_Version", + ["epoch", "release", "dev", "pre", "post", "local"], +) + + +def parse(version): + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + + def __hash__(self): + return hash(self._key) + + def __lt__(self, other): + return self._compare(other, lambda s, o: s < o) + + def __le__(self, other): + return self._compare(other, lambda s, o: s <= o) + + def __eq__(self, other): + return self._compare(other, lambda s, o: s == o) + + def __ge__(self, other): + return self._compare(other, lambda s, o: s >= o) + + def __gt__(self, other): + return self._compare(other, lambda s, o: s > o) + + def __ne__(self, other): + return self._compare(other, lambda s, o: s != o) + + def _compare(self, other, method): + if not isinstance(other, _BaseVersion): + return NotImplemented + + return method(self._key, other._key) + + +class LegacyVersion(_BaseVersion): + + def __init__(self, version): + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + def __str__(self): + return self._version + + def __repr__(self): + return "".format(repr(str(self))) + + @property + def public(self): + return self._version + + @property + def base_version(self): + return self._version + + @property + def local(self): + return None + + @property + def is_prerelease(self): + return False + + @property + def is_postrelease(self): + return False + + +_legacy_version_component_re = re.compile( + r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, +) + +_legacy_version_replacement_map = { + "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", +} + + +def _parse_version_parts(s): + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version): + # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # its adoption of the packaging library. + parts = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + parts = tuple(parts) + + return epoch, parts + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(
+        r"^\s*" + VERSION_PATTERN + r"\s*$",
+        re.VERBOSE | re.IGNORECASE,
+    )
+
+    def __init__(self, version):
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion("Invalid version: '{0}'".format(version))
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(
+                match.group("pre_l"),
+                match.group("pre_n"),
+            ),
+            post=_parse_letter_version(
+                match.group("post_l"),
+                match.group("post_n1") or match.group("post_n2"),
+            ),
+            dev=_parse_letter_version(
+                match.group("dev_l"),
+                match.group("dev_n"),
+            ),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self):
+        return "".format(repr(str(self)))
+
+    def __str__(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        # Pre-release
+        if self._version.pre is not None:
+            parts.append("".join(str(x) for x in self._version.pre))
+
+        # Post-release
+        if self._version.post is not None:
+            parts.append(".post{0}".format(self._version.post[1]))
+
+        # Development release
+        if self._version.dev is not None:
+            parts.append(".dev{0}".format(self._version.dev[1]))
+
+        # Local version segment
+        if self._version.local is not None:
+            parts.append(
+                "+{0}".format(".".join(str(x) for x in self._version.local))
+            )
+
+        return "".join(parts)
+
+    @property
+    def public(self):
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self):
+        parts = []
+
+        # Epoch
+        if self._version.epoch != 0:
+            parts.append("{0}!".format(self._version.epoch))
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self._version.release))
+
+        return "".join(parts)
+
+    @property
+    def local(self):
+        version_string = str(self)
+        if "+" in version_string:
+            return version_string.split("+", 1)[1]
+
+    @property
+    def is_prerelease(self):
+        return bool(self._version.dev or self._version.pre)
+
+    @property
+    def is_postrelease(self):
+        return bool(self._version.post)
+
+
+def _parse_letter_version(letter, number):
+    if letter:
+        # We assume there is an implicit 0 in a pre-release if there is
+        # no numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower-case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume that if we are given a number but not given a letter,
+        # then this is using the implicit post release syntax (e.g., 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+
+_local_version_seperators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local):
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_seperators.split(local)
+        )
+
+
+def _cmpkey(epoch, release, pre, post, dev, local):
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non-zero, then take the rest,
+    # re-reverse it back into the correct order, and make it a tuple and use
+    # that for our sorting key.
+    release = tuple(
+        reversed(list(
+            itertools.dropwhile(
+                lambda x: x == 0,
+                reversed(release),
+            )
+        ))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre-segment, but we _only_ want to do this
+    # if there is no pre- or a post-segment. If we have one of those, then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        pre = -Infinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        pre = Infinity
+
+    # Versions without a post-segment should sort before those with one.
+    if post is None:
+        post = -Infinity
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        dev = Infinity
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        local = -Infinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alphanumeric segments sort before numeric segments
+        # - Alphanumeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        local = tuple(
+            (i, "") if isinstance(i, int) else (-Infinity, i)
+            for i in local
+        )
+
+    return epoch, release, pre, post, dev, local
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/__init__.py b/.venv/lib/python3.11/site-packages/numpy/compat/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..504f8b0030ba10511f47d8512acb2feff7d807b0
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/compat/__init__.py
@@ -0,0 +1,19 @@
+"""
+Compatibility module.
+
+This module contains duplicated code from Python itself or 3rd party
+extensions, which may be included for the following reasons:
+
+  * compatibility
+  * we may only need a small subset of the copied library/module
+
+"""
+
+from .._utils import _inspect
+from .._utils._inspect import getargspec, formatargspec
+from . import py3k
+from .py3k import *
+
+__all__ = []
+__all__.extend(_inspect.__all__)
+__all__.extend(py3k.__all__)
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b5a33ea1ba3669f04ea8f0e79fa2473d2ff8dd8
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/__init__.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/py3k.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/py3k.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6174e493a5ade08dadc10dd01375712dbd6ffb4e
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/py3k.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/setup.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/setup.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..532d9b2dd6758f5dba087cd71e19c0e9d42b10c2
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/setup.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/py3k.py b/.venv/lib/python3.11/site-packages/numpy/compat/py3k.py
new file mode 100644
index 0000000000000000000000000000000000000000..d02c9f8fe341859202319f9b7ed65818f139e269
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/compat/py3k.py
@@ -0,0 +1,145 @@
+"""
+Python 3.X compatibility tools.
+
+While this file was originally intended for Python 2 -> 3 transition,
+it is now used to create a compatibility layer between different
+minor versions of Python 3.
+
+While the active version of numpy may not support a given version of python, we
+allow downstream libraries to continue to use these shims for forward
+compatibility with numpy while they transition their code to newer versions of
+Python.
+"""
+__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
+           'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
+           'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
+           'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
+           'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
+
+import sys
+import os
+from pathlib import Path
+import io
+try:
+    import pickle5 as pickle
+except ImportError:
+    import pickle
+
+long = int
+integer_types = (int,)
+basestring = str
+unicode = str
+bytes = bytes
+
+def asunicode(s):
+    if isinstance(s, bytes):
+        return s.decode('latin1')
+    return str(s)
+
+def asbytes(s):
+    if isinstance(s, bytes):
+        return s
+    return str(s).encode('latin1')
+
+def asstr(s):
+    if isinstance(s, bytes):
+        return s.decode('latin1')
+    return str(s)
+
+def isfileobj(f):
+    if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)):
+        return False
+    try:
+        # BufferedReader/Writer may raise OSError when
+        # fetching `fileno()` (e.g. when wrapping BytesIO).
+        f.fileno()
+        return True
+    except OSError:
+        return False
+
+def open_latin1(filename, mode='r'):
+    return open(filename, mode=mode, encoding='iso-8859-1')
+
+def sixu(s):
+    return s
+
+strchar = 'U'
+
+def getexception():
+    return sys.exc_info()[1]
+
+def asbytes_nested(x):
+    if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
+        return [asbytes_nested(y) for y in x]
+    else:
+        return asbytes(x)
+
+def asunicode_nested(x):
+    if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
+        return [asunicode_nested(y) for y in x]
+    else:
+        return asunicode(x)
+
+def is_pathlib_path(obj):
+    """
+    Check whether obj is a `pathlib.Path` object.
+
+    Prefer using ``isinstance(obj, os.PathLike)`` instead of this function.
+    """
+    return isinstance(obj, Path)
+
+# from Python 3.7
+class contextlib_nullcontext:
+    """Context manager that does no additional processing.
+
+    Used as a stand-in for a normal context manager, when a particular
+    block of code is only sometimes used with a normal context manager:
+
+    cm = optional_cm if condition else nullcontext()
+    with cm:
+        # Perform operation, using optional_cm if condition is True
+
+    .. note::
+        Prefer using `contextlib.nullcontext` instead of this context manager.
+    """
+
+    def __init__(self, enter_result=None):
+        self.enter_result = enter_result
+
+    def __enter__(self):
+        return self.enter_result
+
+    def __exit__(self, *excinfo):
+        pass
+
+
+def npy_load_module(name, fn, info=None):
+    """
+    Load a module. Uses ``load_module`` which will be deprecated in python
+    3.12. An alternative that uses ``exec_module`` is in
+    numpy.distutils.misc_util.exec_mod_from_location
+
+    .. versionadded:: 1.11.2
+
+    Parameters
+    ----------
+    name : str
+        Full module name.
+    fn : str
+        Path to module file.
+    info : tuple, optional
+        Only here for backward compatibility with Python 2.*.
+
+    Returns
+    -------
+    mod : module
+
+    """
+    # Explicitly lazy import this to avoid paying the cost
+    # of importing importlib at startup
+    from importlib.machinery import SourceFileLoader
+    return SourceFileLoader(name, fn).load_module()
+
+
+os_fspath = os.fspath
+os_PathLike = os.PathLike
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/setup.py b/.venv/lib/python3.11/site-packages/numpy/compat/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1b34a2cc9528b859e9f40d4eaee8eb35dbf64d6
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/compat/setup.py
@@ -0,0 +1,10 @@
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+
+    config = Configuration('compat', parent_package, top_path)
+    config.add_subpackage('tests')
+    return config
+
+if __name__ == '__main__':
+    from numpy.distutils.core import setup
+    setup(configuration=configuration)
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/tests/__init__.py b/.venv/lib/python3.11/site-packages/numpy/compat/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d46d36a2ff7561bb2a22cc5a789972894a80c3ab
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ef98b1a19dcce29e4affe2efb9704d7f6a49ec98
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/compat/tests/test_compat.py b/.venv/lib/python3.11/site-packages/numpy/compat/tests/test_compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..d4391565ee074a797a8e05cc108f570e958804d7
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/compat/tests/test_compat.py
@@ -0,0 +1,22 @@
+from os.path import join
+from io import BufferedReader, BytesIO
+
+from numpy.compat import isfileobj
+from numpy.testing import assert_
+from numpy.testing import tempdir
+
+
+def test_isfileobj():
+    with tempdir(prefix="numpy_test_compat_") as folder:
+        filename = join(folder, 'a.bin')
+
+        with open(filename, 'wb') as f:
+            assert_(isfileobj(f))
+
+        with open(filename, 'ab') as f:
+            assert_(isfileobj(f))
+
+        with open(filename, 'rb') as f:
+            assert_(isfileobj(f))
+
+        assert_(isfileobj(BufferedReader(BytesIO())) is False)
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/__init__.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..d3553bbcca7ba16703f7229c051aadfbe3a34b4d
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/__init__.pyi
@@ -0,0 +1,245 @@
+import math as math
+from typing import Any
+
+from numpy._pytesttester import PytestTester
+
+from numpy import (
+    ndenumerate as ndenumerate,
+    ndindex as ndindex,
+)
+
+from numpy.version import version
+
+from numpy.lib import (
+    format as format,
+    mixins as mixins,
+    scimath as scimath,
+    stride_tricks as stride_tricks,
+)
+
+from numpy.lib._version import (
+    NumpyVersion as NumpyVersion,
+)
+
+from numpy.lib.arraypad import (
+    pad as pad,
+)
+
+from numpy.lib.arraysetops import (
+    ediff1d as ediff1d,
+    intersect1d as intersect1d,
+    setxor1d as setxor1d,
+    union1d as union1d,
+    setdiff1d as setdiff1d,
+    unique as unique,
+    in1d as in1d,
+    isin as isin,
+)
+
+from numpy.lib.arrayterator import (
+    Arrayterator as Arrayterator,
+)
+
+from numpy.lib.function_base import (
+    select as select,
+    piecewise as piecewise,
+    trim_zeros as trim_zeros,
+    copy as copy,
+    iterable as iterable,
+    percentile as percentile,
+    diff as diff,
+    gradient as gradient,
+    angle as angle,
+    unwrap as unwrap,
+    sort_complex as sort_complex,
+    disp as disp,
+    flip as flip,
+    rot90 as rot90,
+    extract as extract,
+    place as place,
+    vectorize as vectorize,
+    asarray_chkfinite as asarray_chkfinite,
+    average as average,
+    bincount as bincount,
+    digitize as digitize,
+    cov as cov,
+    corrcoef as corrcoef,
+    median as median,
+    sinc as sinc,
+    hamming as hamming,
+    hanning as hanning,
+    bartlett as bartlett,
+    blackman as blackman,
+    kaiser as kaiser,
+    trapz as trapz,
+    i0 as i0,
+    add_newdoc as add_newdoc,
+    add_docstring as add_docstring,
+    meshgrid as meshgrid,
+    delete as delete,
+    insert as insert,
+    append as append,
+    interp as interp,
+    add_newdoc_ufunc as add_newdoc_ufunc,
+    quantile as quantile,
+)
+
+from numpy.lib.histograms import (
+    histogram_bin_edges as histogram_bin_edges,
+    histogram as histogram,
+    histogramdd as histogramdd,
+)
+
+from numpy.lib.index_tricks import (
+    ravel_multi_index as ravel_multi_index,
+    unravel_index as unravel_index,
+    mgrid as mgrid,
+    ogrid as ogrid,
+    r_ as r_,
+    c_ as c_,
+    s_ as s_,
+    index_exp as index_exp,
+    ix_ as ix_,
+    fill_diagonal as fill_diagonal,
+    diag_indices as diag_indices,
+    diag_indices_from as diag_indices_from,
+)
+
+from numpy.lib.nanfunctions import (
+    nansum as nansum,
+    nanmax as nanmax,
+    nanmin as nanmin,
+    nanargmax as nanargmax,
+    nanargmin as nanargmin,
+    nanmean as nanmean,
+    nanmedian as nanmedian,
+    nanpercentile as nanpercentile,
+    nanvar as nanvar,
+    nanstd as nanstd,
+    nanprod as nanprod,
+    nancumsum as nancumsum,
+    nancumprod as nancumprod,
+    nanquantile as nanquantile,
+)
+
+from numpy.lib.npyio import (
+    savetxt as savetxt,
+    loadtxt as loadtxt,
+    genfromtxt as genfromtxt,
+    recfromtxt as recfromtxt,
+    recfromcsv as recfromcsv,
+    load as load,
+    save as save,
+    savez as savez,
+    savez_compressed as savez_compressed,
+    packbits as packbits,
+    unpackbits as unpackbits,
+    fromregex as fromregex,
+    DataSource as DataSource,
+)
+
+from numpy.lib.polynomial import (
+    poly as poly,
+    roots as roots,
+    polyint as polyint,
+    polyder as polyder,
+    polyadd as polyadd,
+    polysub as polysub,
+    polymul as polymul,
+    polydiv as polydiv,
+    polyval as polyval,
+    polyfit as polyfit,
+    RankWarning as RankWarning,
+    poly1d as poly1d,
+)
+
+from numpy.lib.shape_base import (
+    column_stack as column_stack,
+    row_stack as row_stack,
+    dstack as dstack,
+    array_split as array_split,
+    split as split,
+    hsplit as hsplit,
+    vsplit as vsplit,
+    dsplit as dsplit,
+    apply_over_axes as apply_over_axes,
+    expand_dims as expand_dims,
+    apply_along_axis as apply_along_axis,
+    kron as kron,
+    tile as tile,
+    get_array_wrap as get_array_wrap,
+    take_along_axis as take_along_axis,
+    put_along_axis as put_along_axis,
+)
+
+from numpy.lib.stride_tricks import (
+    broadcast_to as broadcast_to,
+    broadcast_arrays as broadcast_arrays,
+    broadcast_shapes as broadcast_shapes,
+)
+
+from numpy.lib.twodim_base import (
+    diag as diag,
+    diagflat as diagflat,
+    eye as eye,
+    fliplr as fliplr,
+    flipud as flipud,
+    tri as tri,
+    triu as triu,
+    tril as tril,
+    vander as vander,
+    histogram2d as histogram2d,
+    mask_indices as mask_indices,
+    tril_indices as tril_indices,
+    tril_indices_from as tril_indices_from,
+    triu_indices as triu_indices,
+    triu_indices_from as triu_indices_from,
+)
+
+from numpy.lib.type_check import (
+    mintypecode as mintypecode,
+    asfarray as asfarray,
+    real as real,
+    imag as imag,
+    iscomplex as iscomplex,
+    isreal as isreal,
+    iscomplexobj as iscomplexobj,
+    isrealobj as isrealobj,
+    nan_to_num as nan_to_num,
+    real_if_close as real_if_close,
+    typename as typename,
+    common_type as common_type,
+)
+
+from numpy.lib.ufunclike import (
+    fix as fix,
+    isposinf as isposinf,
+    isneginf as isneginf,
+)
+
+from numpy.lib.utils import (
+    issubclass_ as issubclass_,
+    issubsctype as issubsctype,
+    issubdtype as issubdtype,
+    deprecate as deprecate,
+    deprecate_with_doc as deprecate_with_doc,
+    get_include as get_include,
+    info as info,
+    source as source,
+    who as who,
+    lookfor as lookfor,
+    byte_bounds as byte_bounds,
+    safe_eval as safe_eval,
+    show_runtime as show_runtime,
+)
+
+from numpy.core.multiarray import (
+    tracemalloc_domain as tracemalloc_domain,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
+
+__version__ = version
+emath = scimath
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/_datasource.py b/.venv/lib/python3.11/site-packages/numpy/lib/_datasource.py
new file mode 100644
index 0000000000000000000000000000000000000000..613733fa51675360caf33fd97b0b038c6fb6dfa2
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/_datasource.py
@@ -0,0 +1,704 @@
+"""A file interface for handling local and remote data files.
+
+The goal of datasource is to abstract some of the file system operations
+when dealing with data files so the researcher doesn't have to know all the
+low-level details.  Through datasource, a researcher can obtain and use a
+file with one function call, regardless of location of the file.
+
+DataSource is meant to augment standard python libraries, not replace them.
+It should work seamlessly with standard file IO operations and the os
+module.
+
+DataSource files can originate locally or remotely:
+
+- local files : '/home/guido/src/local/data.txt'
+- URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
+
+DataSource files can also be compressed or uncompressed.  Currently only
+gzip, bz2 and xz are supported.
+
+Example::
+
+    >>> # Create a DataSource, use os.curdir (default) for local storage.
+    >>> from numpy import DataSource
+    >>> ds = DataSource()
+    >>>
+    >>> # Open a remote file.
+    >>> # DataSource downloads the file, stores it locally in:
+    >>> #     './www.google.com/index.html'
+    >>> # opens the file and returns a file object.
+    >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
+    >>>
+    >>> # Use the file as you normally would
+    >>> fp.read() # doctest: +SKIP
+    >>> fp.close() # doctest: +SKIP
+
+"""
+import os
+import io
+
+from .._utils import set_module
+
+
+_open = open
+
+
+def _check_mode(mode, encoding, newline):
+    """Check mode and that encoding and newline are compatible.
+
+    Parameters
+    ----------
+    mode : str
+        File open mode.
+    encoding : str
+        File encoding.
+    newline : str
+        Newline for text files.
+
+    """
+    if "t" in mode:
+        if "b" in mode:
+            raise ValueError("Invalid mode: %r" % (mode,))
+    else:
+        if encoding is not None:
+            raise ValueError("Argument 'encoding' not supported in binary mode")
+        if newline is not None:
+            raise ValueError("Argument 'newline' not supported in binary mode")
+
+
+# Using a class instead of a module-level dictionary
+# to reduce the initial 'import numpy' overhead by
+# deferring the import of lzma, bz2 and gzip until needed
+
+# TODO: .zip support, .tar support?
+class _FileOpeners:
+    """
+    Container for different methods to open (un-)compressed files.
+
+    `_FileOpeners` contains a dictionary that holds one method for each
+    supported file format. Attribute lookup is implemented in such a way
+    that an instance of `_FileOpeners` itself can be indexed with the keys
+    of that dictionary. Currently uncompressed files as well as files
+    compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
+
+    Notes
+    -----
+    `_file_openers`, an instance of `_FileOpeners`, is made available for
+    use in the `_datasource` module.
+
+    Examples
+    --------
+    >>> import gzip
+    >>> np.lib._datasource._file_openers.keys()
+    [None, '.bz2', '.gz', '.xz', '.lzma']
+    >>> np.lib._datasource._file_openers['.gz'] is gzip.open
+    True
+
+    """
+
+    def __init__(self):
+        self._loaded = False
+        self._file_openers = {None: io.open}
+
+    def _load(self):
+        if self._loaded:
+            return
+
+        try:
+            import bz2
+            self._file_openers[".bz2"] = bz2.open
+        except ImportError:
+            pass
+
+        try:
+            import gzip
+            self._file_openers[".gz"] = gzip.open
+        except ImportError:
+            pass
+
+        try:
+            import lzma
+            self._file_openers[".xz"] = lzma.open
+            self._file_openers[".lzma"] = lzma.open
+        except (ImportError, AttributeError):
+            # There are incompatible backports of lzma that do not have the
+            # lzma.open attribute, so catch that as well as ImportError.
+            pass
+
+        self._loaded = True
+
+    def keys(self):
+        """
+        Return the keys of currently supported file openers.
+
+        Parameters
+        ----------
+        None
+
+        Returns
+        -------
+        keys : list
+            The keys are None for uncompressed files and the file extension
+            strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
+            methods.
+
+        """
+        self._load()
+        return list(self._file_openers.keys())
+
+    def __getitem__(self, key):
+        self._load()
+        return self._file_openers[key]
+
+_file_openers = _FileOpeners()
+
+def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
+    """
+    Open `path` with `mode` and return the file object.
+
+    If ``path`` is an URL, it will be downloaded, stored in the
+    `DataSource` `destpath` directory and opened from there.
+
+    Parameters
+    ----------
+    path : str
+        Local file path or URL to open.
+    mode : str, optional
+        Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
+        append. Available modes depend on the type of object specified by
+        path.  Default is 'r'.
+    destpath : str, optional
+        Path to the directory where the source file gets downloaded to for
+        use.  If `destpath` is None, a temporary directory will be created.
+        The default path is the current directory.
+    encoding : {None, str}, optional
+        Open text file with given encoding. The default encoding will be
+        what `io.open` uses.
+    newline : {None, str}, optional
+        Newline to use when reading text file.
+
+    Returns
+    -------
+    out : file object
+        The opened file.
+
+    Notes
+    -----
+    This is a convenience function that instantiates a `DataSource` and
+    returns the file object from ``DataSource.open(path)``.
+
+    """
+
+    ds = DataSource(destpath)
+    return ds.open(path, mode, encoding=encoding, newline=newline)
+
+
+@set_module('numpy')
+class DataSource:
+    """
+    DataSource(destpath='.')
+
+    A generic data source file (file, http, ftp, ...).
+
+    DataSources can be local files or remote files/URLs.  The files may
+    also be compressed or uncompressed. DataSource hides some of the
+    low-level details of downloading the file, allowing you to simply pass
+    in a valid file path (or URL) and obtain a file object.
+
+    Parameters
+    ----------
+    destpath : str or None, optional
+        Path to the directory where the source file gets downloaded to for
+        use.  If `destpath` is None, a temporary directory will be created.
+        The default path is the current directory.
+
+    Notes
+    -----
+    URLs require a scheme string (``http://``) to be used, without it they
+    will fail::
+
+        >>> repos = np.DataSource()
+        >>> repos.exists('www.google.com/index.html')
+        False
+        >>> repos.exists('http://www.google.com/index.html')
+        True
+
+    Temporary directories are deleted when the DataSource is deleted.
+
+    Examples
+    --------
+    ::
+
+        >>> ds = np.DataSource('/home/guido')
+        >>> urlname = 'http://www.google.com/'
+        >>> gfile = ds.open('http://www.google.com/')
+        >>> ds.abspath(urlname)
+        '/home/guido/www.google.com/index.html'
+
+        >>> ds = np.DataSource(None)  # use with temporary file
+        >>> ds.open('/home/guido/foobar.txt')
+        
+        >>> ds.abspath('/home/guido/foobar.txt')
+        '/tmp/.../home/guido/foobar.txt'
+
+    """
+
+    def __init__(self, destpath=os.curdir):
+        """Create a DataSource with a local path at destpath."""
+        if destpath:
+            self._destpath = os.path.abspath(destpath)
+            self._istmpdest = False
+        else:
+            import tempfile  # deferring import to improve startup time
+            self._destpath = tempfile.mkdtemp()
+            self._istmpdest = True
+
+    def __del__(self):
+        # Remove temp directories
+        if hasattr(self, '_istmpdest') and self._istmpdest:
+            import shutil
+
+            shutil.rmtree(self._destpath)
+
+    def _iszip(self, filename):
+        """Test if the filename is a zip file by looking at the file extension.
+
+        """
+        fname, ext = os.path.splitext(filename)
+        return ext in _file_openers.keys()
+
+    def _iswritemode(self, mode):
+        """Test if the given mode will open a file for writing."""
+
+        # Currently only used to test the bz2 files.
+        _writemodes = ("w", "+")
+        for c in mode:
+            if c in _writemodes:
+                return True
+        return False
+
+    def _splitzipext(self, filename):
+        """Split zip extension from filename and return filename.
+
+        Returns
+        -------
+        base, zip_ext : {tuple}
+
+        """
+
+        if self._iszip(filename):
+            return os.path.splitext(filename)
+        else:
+            return filename, None
+
+    def _possible_names(self, filename):
+        """Return a tuple containing compressed filename variations."""
+        names = [filename]
+        if not self._iszip(filename):
+            for zipext in _file_openers.keys():
+                if zipext:
+                    names.append(filename+zipext)
+        return names
+
+    def _isurl(self, path):
+        """Test if path is a net location.  Tests the scheme and netloc."""
+
+        # We do this here to reduce the 'import numpy' initial import time.
+        from urllib.parse import urlparse
+
+        # BUG : URLs require a scheme string ('http://') to be used.
+        #       www.google.com will fail.
+        #       Should we prepend the scheme for those that don't have it and
+        #       test that also?  Similar to the way we append .gz and test for
+        #       for compressed versions of files.
+
+        scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
+        return bool(scheme and netloc)
+
+    def _cache(self, path):
+        """Cache the file specified by path.
+
+        Creates a copy of the file in the datasource cache.
+
+        """
+        # We import these here because importing them is slow and
+        # a significant fraction of numpy's total import time.
+        import shutil
+        from urllib.request import urlopen
+
+        upath = self.abspath(path)
+
+        # ensure directory exists
+        if not os.path.exists(os.path.dirname(upath)):
+            os.makedirs(os.path.dirname(upath))
+
+        # TODO: Doesn't handle compressed files!
+        if self._isurl(path):
+            with urlopen(path) as openedurl:
+                with _open(upath, 'wb') as f:
+                    shutil.copyfileobj(openedurl, f)
+        else:
+            shutil.copyfile(path, upath)
+        return upath
+
+    def _findfile(self, path):
+        """Searches for ``path`` and returns full path if found.
+
+        If path is an URL, _findfile will cache a local copy and return the
+        path to the cached file.  If path is a local file, _findfile will
+        return a path to that local file.
+
+        The search will include possible compressed versions of the file
+        and return the first occurrence found.
+
+        """
+
+        # Build list of possible local file paths
+        if not self._isurl(path):
+            # Valid local paths
+            filelist = self._possible_names(path)
+            # Paths in self._destpath
+            filelist += self._possible_names(self.abspath(path))
+        else:
+            # Cached URLs in self._destpath
+            filelist = self._possible_names(self.abspath(path))
+            # Remote URLs
+            filelist = filelist + self._possible_names(path)
+
+        for name in filelist:
+            if self.exists(name):
+                if self._isurl(name):
+                    name = self._cache(name)
+                return name
+        return None
+
+    def abspath(self, path):
+        """
+        Return absolute path of file in the DataSource directory.
+
+        If `path` is an URL, then `abspath` will return either the location
+        the file exists locally or the location it would exist when opened
+        using the `open` method.
+
+        Parameters
+        ----------
+        path : str
+            Can be a local file or a remote URL.
+
+        Returns
+        -------
+        out : str
+            Complete path, including the `DataSource` destination directory.
+
+        Notes
+        -----
+        The functionality is based on `os.path.abspath`.
+
+        """
+        # We do this here to reduce the 'import numpy' initial import time.
+        from urllib.parse import urlparse
+
+        # TODO:  This should be more robust.  Handles case where path includes
+        #        the destpath, but not other sub-paths. Failing case:
+        #        path = /home/guido/datafile.txt
+        #        destpath = /home/alex/
+        #        upath = self.abspath(path)
+        #        upath == '/home/alex/home/guido/datafile.txt'
+
+        # handle case where path includes self._destpath
+        splitpath = path.split(self._destpath, 2)
+        if len(splitpath) > 1:
+            path = splitpath[1]
+        scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
+        netloc = self._sanitize_relative_path(netloc)
+        upath = self._sanitize_relative_path(upath)
+        return os.path.join(self._destpath, netloc, upath)
+
+    def _sanitize_relative_path(self, path):
+        """Return a sanitised relative path for which
+        os.path.abspath(os.path.join(base, path)).startswith(base)
+        """
+        last = None
+        path = os.path.normpath(path)
+        while path != last:
+            last = path
+            # Note: os.path.join treats '/' as os.sep on Windows
+            path = path.lstrip(os.sep).lstrip('/')
+            path = path.lstrip(os.pardir).lstrip('..')
+            drive, path = os.path.splitdrive(path)  # for Windows
+        return path
+
+    def exists(self, path):
+        """
+        Test if path exists.
+
+        Test if `path` exists as (and in this order):
+
+        - a local file.
+        - a remote URL that has been downloaded and stored locally in the
+          `DataSource` directory.
+        - a remote URL that has not been downloaded, but is valid and
+          accessible.
+
+        Parameters
+        ----------
+        path : str
+            Can be a local file or a remote URL.
+
+        Returns
+        -------
+        out : bool
+            True if `path` exists.
+
+        Notes
+        -----
+        When `path` is an URL, `exists` will return True if it's either
+        stored locally in the `DataSource` directory, or is a valid remote
+        URL.  `DataSource` does not discriminate between the two, the file
+        is accessible if it exists in either location.
+
+        """
+
+        # First test for local path
+        if os.path.exists(path):
+            return True
+
+        # We import this here because importing urllib is slow and
+        # a significant fraction of numpy's total import time.
+        from urllib.request import urlopen
+        from urllib.error import URLError
+
+        # Test cached url
+        upath = self.abspath(path)
+        if os.path.exists(upath):
+            return True
+
+        # Test remote url
+        if self._isurl(path):
+            try:
+                netfile = urlopen(path)
+                netfile.close()
+                del(netfile)
+                return True
+            except URLError:
+                return False
+        return False
+
+    def open(self, path, mode='r', encoding=None, newline=None):
+        """
+        Open and return file-like object.
+
+        If `path` is an URL, it will be downloaded, stored in the
+        `DataSource` directory and opened from there.
+
+        Parameters
+        ----------
+        path : str
+            Local file path or URL to open.
+        mode : {'r', 'w', 'a'}, optional
+            Mode to open `path`.  Mode 'r' for reading, 'w' for writing,
+            'a' to append. Available modes depend on the type of object
+            specified by `path`. Default is 'r'.
+        encoding : {None, str}, optional
+            Open text file with given encoding. The default encoding will be
+            what `io.open` uses.
+        newline : {None, str}, optional
+            Newline to use when reading text file.
+
+        Returns
+        -------
+        out : file object
+            File object.
+
+        """
+
+        # TODO: There is no support for opening a file for writing which
+        #       doesn't exist yet (creating a file).  Should there be?
+
+        # TODO: Add a ``subdir`` parameter for specifying the subdirectory
+        #       used to store URLs in self._destpath.
+
+        if self._isurl(path) and self._iswritemode(mode):
+            raise ValueError("URLs are not writeable")
+
+        # NOTE: _findfile will fail on a new file opened for writing.
+        found = self._findfile(path)
+        if found:
+            _fname, ext = self._splitzipext(found)
+            if ext == 'bz2':
+                mode.replace("+", "")
+            return _file_openers[ext](found, mode=mode,
+                                      encoding=encoding, newline=newline)
+        else:
+            raise FileNotFoundError(f"{path} not found.")
+
+
+class Repository (DataSource):
+    """
+    Repository(baseurl, destpath='.')
+
+    A data repository where multiple DataSource's share a base
+    URL/directory.
+
+    `Repository` extends `DataSource` by prepending a base URL (or
+    directory) to all the files it handles. Use `Repository` when you will
+    be working with multiple files from one base URL.  Initialize
+    `Repository` with the base URL, then refer to each file by its filename
+    only.
+
+    Parameters
+    ----------
+    baseurl : str
+        Path to the local directory or remote location that contains the
+        data files.
+    destpath : str or None, optional
+        Path to the directory where the source file gets downloaded to for
+        use.  If `destpath` is None, a temporary directory will be created.
+        The default path is the current directory.
+
+    Examples
+    --------
+    To analyze all files in the repository, do something like this
+    (note: this is not self-contained code)::
+
+        >>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
+        >>> for filename in filelist:
+        ...     fp = repos.open(filename)
+        ...     fp.analyze()
+        ...     fp.close()
+
+    Similarly you could use a URL for a repository::
+
+        >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
+
+    """
+
+    def __init__(self, baseurl, destpath=os.curdir):
+        """Create a Repository with a shared url or directory of baseurl."""
+        DataSource.__init__(self, destpath=destpath)
+        self._baseurl = baseurl
+
+    def __del__(self):
+        DataSource.__del__(self)
+
+    def _fullpath(self, path):
+        """Return complete path for path.  Prepends baseurl if necessary."""
+        splitpath = path.split(self._baseurl, 2)
+        if len(splitpath) == 1:
+            result = os.path.join(self._baseurl, path)
+        else:
+            result = path    # path contains baseurl already
+        return result
+
+    def _findfile(self, path):
+        """Extend DataSource method to prepend baseurl to ``path``."""
+        return DataSource._findfile(self, self._fullpath(path))
+
+    def abspath(self, path):
+        """
+        Return absolute path of file in the Repository directory.
+
+        If `path` is an URL, then `abspath` will return either the location
+        the file exists locally or the location it would exist when opened
+        using the `open` method.
+
+        Parameters
+        ----------
+        path : str
+            Can be a local file or a remote URL. This may, but does not
+            have to, include the `baseurl` with which the `Repository` was
+            initialized.
+
+        Returns
+        -------
+        out : str
+            Complete path, including the `DataSource` destination directory.
+
+        """
+        return DataSource.abspath(self, self._fullpath(path))
+
+    def exists(self, path):
+        """
+        Test if path exists prepending Repository base URL to path.
+
+        Test if `path` exists as (and in this order):
+
+        - a local file.
+        - a remote URL that has been downloaded and stored locally in the
+          `DataSource` directory.
+        - a remote URL that has not been downloaded, but is valid and
+          accessible.
+
+        Parameters
+        ----------
+        path : str
+            Can be a local file or a remote URL. This may, but does not
+            have to, include the `baseurl` with which the `Repository` was
+            initialized.
+
+        Returns
+        -------
+        out : bool
+            True if `path` exists.
+
+        Notes
+        -----
+        When `path` is an URL, `exists` will return True if it's either
+        stored locally in the `DataSource` directory, or is a valid remote
+        URL.  `DataSource` does not discriminate between the two, the file
+        is accessible if it exists in either location.
+
+        """
+        return DataSource.exists(self, self._fullpath(path))
+
+    def open(self, path, mode='r', encoding=None, newline=None):
+        """
+        Open and return file-like object prepending Repository base URL.
+
+        If `path` is an URL, it will be downloaded, stored in the
+        DataSource directory and opened from there.
+
+        Parameters
+        ----------
+        path : str
+            Local file path or URL to open. This may, but does not have to,
+            include the `baseurl` with which the `Repository` was
+            initialized.
+        mode : {'r', 'w', 'a'}, optional
+            Mode to open `path`.  Mode 'r' for reading, 'w' for writing,
+            'a' to append. Available modes depend on the type of object
+            specified by `path`. Default is 'r'.
+        encoding : {None, str}, optional
+            Open text file with given encoding. The default encoding will be
+            what `io.open` uses.
+        newline : {None, str}, optional
+            Newline to use when reading text file.
+
+        Returns
+        -------
+        out : file object
+            File object.
+
+        """
+        return DataSource.open(self, self._fullpath(path), mode,
+                               encoding=encoding, newline=newline)
+
+    def listdir(self):
+        """
+        List files in the source Repository.
+
+        Returns
+        -------
+        files : list of str
+            List of file names (not containing a directory part).
+
+        Notes
+        -----
+        Does not currently work for remote repositories.
+
+        """
+        if self._isurl(self._baseurl):
+            raise NotImplementedError(
+                  "Directory listing of URLs, not supported yet.")
+        else:
+            return os.listdir(self._baseurl)
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/_iotools.py b/.venv/lib/python3.11/site-packages/numpy/lib/_iotools.py
new file mode 100644
index 0000000000000000000000000000000000000000..534d1b3eea636d4f68151531945ea9132d304872
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/_iotools.py
@@ -0,0 +1,897 @@
+"""A collection of functions designed to help I/O with ascii files.
+
+"""
+__docformat__ = "restructuredtext en"
+
+import numpy as np
+import numpy.core.numeric as nx
+from numpy.compat import asbytes, asunicode
+
+
+def _decode_line(line, encoding=None):
+    """Decode bytes from binary input streams.
+
+    Defaults to decoding from 'latin1'. That differs from the behavior of
+    np.compat.asunicode that decodes from 'ascii'.
+
+    Parameters
+    ----------
+    line : str or bytes
+         Line to be decoded.
+    encoding : str
+         Encoding used to decode `line`.
+
+    Returns
+    -------
+    decoded_line : str
+
+    """
+    if type(line) is bytes:
+        if encoding is None:
+            encoding = "latin1"
+        line = line.decode(encoding)
+
+    return line
+
+
+def _is_string_like(obj):
+    """
+    Check whether obj behaves like a string.
+    """
+    try:
+        obj + ''
+    except (TypeError, ValueError):
+        return False
+    return True
+
+
+def _is_bytes_like(obj):
+    """
+    Check whether obj behaves like a bytes object.
+    """
+    try:
+        obj + b''
+    except (TypeError, ValueError):
+        return False
+    return True
+
+
+def has_nested_fields(ndtype):
+    """
+    Returns whether one or several fields of a dtype are nested.
+
+    Parameters
+    ----------
+    ndtype : dtype
+        Data-type of a structured array.
+
+    Raises
+    ------
+    AttributeError
+        If `ndtype` does not have a `names` attribute.
+
+    Examples
+    --------
+    >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
+    >>> np.lib._iotools.has_nested_fields(dt)
+    False
+
+    """
+    for name in ndtype.names or ():
+        if ndtype[name].names is not None:
+            return True
+    return False
+
+
+def flatten_dtype(ndtype, flatten_base=False):
+    """
+    Unpack a structured data-type by collapsing nested fields and/or fields
+    with a shape.
+
+    Note that the field names are lost.
+
+    Parameters
+    ----------
+    ndtype : dtype
+        The datatype to collapse
+    flatten_base : bool, optional
+       If True, transform a field with a shape into several fields. Default is
+       False.
+
+    Examples
+    --------
+    >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
+    ...                ('block', int, (2, 3))])
+    >>> np.lib._iotools.flatten_dtype(dt)
+    [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
+    >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
+    [dtype('S4'),
+     dtype('float64'),
+     dtype('float64'),
+     dtype('int64'),
+     dtype('int64'),
+     dtype('int64'),
+     dtype('int64'),
+     dtype('int64'),
+     dtype('int64')]
+
+    """
+    names = ndtype.names
+    if names is None:
+        if flatten_base:
+            return [ndtype.base] * int(np.prod(ndtype.shape))
+        return [ndtype.base]
+    else:
+        types = []
+        for field in names:
+            info = ndtype.fields[field]
+            flat_dt = flatten_dtype(info[0], flatten_base)
+            types.extend(flat_dt)
+        return types
+
+
+class LineSplitter:
+    """
+    Object to split a string at a given delimiter or at given places.
+
+    Parameters
+    ----------
+    delimiter : str, int, or sequence of ints, optional
+        If a string, character used to delimit consecutive fields.
+        If an integer or a sequence of integers, width(s) of each field.
+    comments : str, optional
+        Character used to mark the beginning of a comment. Default is '#'.
+    autostrip : bool, optional
+        Whether to strip each individual field. Default is True.
+
+    """
+
+    def autostrip(self, method):
+        """
+        Wrapper to strip each member of the output of `method`.
+
+        Parameters
+        ----------
+        method : function
+            Function that takes a single argument and returns a sequence of
+            strings.
+
+        Returns
+        -------
+        wrapped : function
+            The result of wrapping `method`. `wrapped` takes a single input
+            argument and returns a list of strings that are stripped of
+            white-space.
+
+        """
+        return lambda input: [_.strip() for _ in method(input)]
+
+    def __init__(self, delimiter=None, comments='#', autostrip=True,
+                 encoding=None):
+        delimiter = _decode_line(delimiter)
+        comments = _decode_line(comments)
+
+        self.comments = comments
+
+        # Delimiter is a character
+        if (delimiter is None) or isinstance(delimiter, str):
+            delimiter = delimiter or None
+            _handyman = self._delimited_splitter
+        # Delimiter is a list of field widths
+        elif hasattr(delimiter, '__iter__'):
+            _handyman = self._variablewidth_splitter
+            idx = np.cumsum([0] + list(delimiter))
+            delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
+        # Delimiter is a single integer
+        elif int(delimiter):
+            (_handyman, delimiter) = (
+                    self._fixedwidth_splitter, int(delimiter))
+        else:
+            (_handyman, delimiter) = (self._delimited_splitter, None)
+        self.delimiter = delimiter
+        if autostrip:
+            self._handyman = self.autostrip(_handyman)
+        else:
+            self._handyman = _handyman
+        self.encoding = encoding
+
+    def _delimited_splitter(self, line):
+        """Chop off comments, strip, and split at delimiter. """
+        if self.comments is not None:
+            line = line.split(self.comments)[0]
+        line = line.strip(" \r\n")
+        if not line:
+            return []
+        return line.split(self.delimiter)
+
+    def _fixedwidth_splitter(self, line):
+        if self.comments is not None:
+            line = line.split(self.comments)[0]
+        line = line.strip("\r\n")
+        if not line:
+            return []
+        fixed = self.delimiter
+        slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
+        return [line[s] for s in slices]
+
+    def _variablewidth_splitter(self, line):
+        if self.comments is not None:
+            line = line.split(self.comments)[0]
+        if not line:
+            return []
+        slices = self.delimiter
+        return [line[s] for s in slices]
+
+    def __call__(self, line):
+        return self._handyman(_decode_line(line, self.encoding))
+
+
+class NameValidator:
+    """
+    Object to validate a list of strings to use as field names.
+
+    The strings are stripped of any non alphanumeric character, and spaces
+    are replaced by '_'. During instantiation, the user can define a list
+    of names to exclude, as well as a list of invalid characters. Names in
+    the exclusion list are appended a '_' character.
+
+    Once an instance has been created, it can be called with a list of
+    names, and a list of valid names will be created.  The `__call__`
+    method accepts an optional keyword "default" that sets the default name
+    in case of ambiguity. By default this is 'f', so that names will
+    default to `f0`, `f1`, etc.
+
+    Parameters
+    ----------
+    excludelist : sequence, optional
+        A list of names to exclude. This list is appended to the default
+        list ['return', 'file', 'print']. Excluded names are appended an
+        underscore: for example, `file` becomes `file_` if supplied.
+    deletechars : str, optional
+        A string combining invalid characters that must be deleted from the
+        names.
+    case_sensitive : {True, False, 'upper', 'lower'}, optional
+        * If True, field names are case-sensitive.
+        * If False or 'upper', field names are converted to upper case.
+        * If 'lower', field names are converted to lower case.
+
+        The default value is True.
+    replace_space : '_', optional
+        Character(s) used in replacement of white spaces.
+
+    Notes
+    -----
+    Calling an instance of `NameValidator` is the same as calling its
+    method `validate`.
+
+    Examples
+    --------
+    >>> validator = np.lib._iotools.NameValidator()
+    >>> validator(['file', 'field2', 'with space', 'CaSe'])
+    ('file_', 'field2', 'with_space', 'CaSe')
+
+    >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
+    ...                                           deletechars='q',
+    ...                                           case_sensitive=False)
+    >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
+    ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
+
+    """
+
+    defaultexcludelist = ['return', 'file', 'print']
+    defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
+
+    def __init__(self, excludelist=None, deletechars=None,
+                 case_sensitive=None, replace_space='_'):
+        # Process the exclusion list ..
+        if excludelist is None:
+            excludelist = []
+        excludelist.extend(self.defaultexcludelist)
+        self.excludelist = excludelist
+        # Process the list of characters to delete
+        if deletechars is None:
+            delete = self.defaultdeletechars
+        else:
+            delete = set(deletechars)
+        delete.add('"')
+        self.deletechars = delete
+        # Process the case option .....
+        if (case_sensitive is None) or (case_sensitive is True):
+            self.case_converter = lambda x: x
+        elif (case_sensitive is False) or case_sensitive.startswith('u'):
+            self.case_converter = lambda x: x.upper()
+        elif case_sensitive.startswith('l'):
+            self.case_converter = lambda x: x.lower()
+        else:
+            msg = 'unrecognized case_sensitive value %s.' % case_sensitive
+            raise ValueError(msg)
+
+        self.replace_space = replace_space
+
+    def validate(self, names, defaultfmt="f%i", nbfields=None):
+        """
+        Validate a list of strings as field names for a structured array.
+
+        Parameters
+        ----------
+        names : sequence of str
+            Strings to be validated.
+        defaultfmt : str, optional
+            Default format string, used if validating a given string
+            reduces its length to zero.
+        nbfields : integer, optional
+            Final number of validated names, used to expand or shrink the
+            initial list of names.
+
+        Returns
+        -------
+        validatednames : list of str
+            The list of validated field names.
+
+        Notes
+        -----
+        A `NameValidator` instance can be called directly, which is the
+        same as calling `validate`. For examples, see `NameValidator`.
+
+        """
+        # Initial checks ..............
+        if (names is None):
+            if (nbfields is None):
+                return None
+            names = []
+        if isinstance(names, str):
+            names = [names, ]
+        if nbfields is not None:
+            nbnames = len(names)
+            if (nbnames < nbfields):
+                names = list(names) + [''] * (nbfields - nbnames)
+            elif (nbnames > nbfields):
+                names = names[:nbfields]
+        # Set some shortcuts ...........
+        deletechars = self.deletechars
+        excludelist = self.excludelist
+        case_converter = self.case_converter
+        replace_space = self.replace_space
+        # Initializes some variables ...
+        validatednames = []
+        seen = dict()
+        nbempty = 0
+
+        for item in names:
+            item = case_converter(item).strip()
+            if replace_space:
+                item = item.replace(' ', replace_space)
+            item = ''.join([c for c in item if c not in deletechars])
+            if item == '':
+                item = defaultfmt % nbempty
+                while item in names:
+                    nbempty += 1
+                    item = defaultfmt % nbempty
+                nbempty += 1
+            elif item in excludelist:
+                item += '_'
+            cnt = seen.get(item, 0)
+            if cnt > 0:
+                validatednames.append(item + '_%d' % cnt)
+            else:
+                validatednames.append(item)
+            seen[item] = cnt + 1
+        return tuple(validatednames)
+
+    def __call__(self, names, defaultfmt="f%i", nbfields=None):
+        return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
+
+
+def str2bool(value):
+    """
+    Tries to transform a string supposed to represent a boolean to a boolean.
+
+    Parameters
+    ----------
+    value : str
+        The string that is transformed to a boolean.
+
+    Returns
+    -------
+    boolval : bool
+        The boolean representation of `value`.
+
+    Raises
+    ------
+    ValueError
+        If the string is not 'True' or 'False' (case independent)
+
+    Examples
+    --------
+    >>> np.lib._iotools.str2bool('TRUE')
+    True
+    >>> np.lib._iotools.str2bool('false')
+    False
+
+    """
+    value = value.upper()
+    if value == 'TRUE':
+        return True
+    elif value == 'FALSE':
+        return False
+    else:
+        raise ValueError("Invalid boolean")
+
+
+class ConverterError(Exception):
+    """
+    Exception raised when an error occurs in a converter for string values.
+
+    """
+    pass
+
+
+class ConverterLockError(ConverterError):
+    """
+    Exception raised when an attempt is made to upgrade a locked converter.
+
+    """
+    pass
+
+
+class ConversionWarning(UserWarning):
+    """
+    Warning issued when a string converter has a problem.
+
+    Notes
+    -----
+    In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
+    is explicitly suppressed with the "invalid_raise" keyword.
+
+    """
+    pass
+
+
+class StringConverter:
+    """
+    Factory class for function transforming a string into another object
+    (int, float).
+
+    After initialization, an instance can be called to transform a string
+    into another object. If the string is recognized as representing a
+    missing value, a default value is returned.
+
+    Attributes
+    ----------
+    func : function
+        Function used for the conversion.
+    default : any
+        Default value to return when the input corresponds to a missing
+        value.
+    type : type
+        Type of the output.
+    _status : int
+        Integer representing the order of the conversion.
+    _mapper : sequence of tuples
+        Sequence of tuples (dtype, function, default value) to evaluate in
+        order.
+    _locked : bool
+        Holds `locked` parameter.
+
+    Parameters
+    ----------
+    dtype_or_func : {None, dtype, function}, optional
+        If a `dtype`, specifies the input data type, used to define a basic
+        function and a default value for missing data. For example, when
+        `dtype` is float, the `func` attribute is set to `float` and the
+        default value to `np.nan`.  If a function, this function is used to
+        convert a string to another object. In this case, it is recommended
+        to give an associated default value as input.
+    default : any, optional
+        Value to return by default, that is, when the string to be
+        converted is flagged as missing. If not given, `StringConverter`
+        tries to supply a reasonable default value.
+    missing_values : {None, sequence of str}, optional
+        ``None`` or sequence of strings indicating a missing value. If ``None``
+        then missing values are indicated by empty entries. The default is
+        ``None``.
+    locked : bool, optional
+        Whether the StringConverter should be locked to prevent automatic
+        upgrade or not. Default is False.
+
+    """
+    _mapper = [(nx.bool_, str2bool, False),
+               (nx.int_, int, -1),]
+
+    # On 32-bit systems, we need to make sure that we explicitly include
+    # nx.int64 since ns.int_ is nx.int32.
+    if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
+        _mapper.append((nx.int64, int, -1))
+
+    _mapper.extend([(nx.float64, float, nx.nan),
+                    (nx.complex128, complex, nx.nan + 0j),
+                    (nx.longdouble, nx.longdouble, nx.nan),
+                    # If a non-default dtype is passed, fall back to generic
+                    # ones (should only be used for the converter)
+                    (nx.integer, int, -1),
+                    (nx.floating, float, nx.nan),
+                    (nx.complexfloating, complex, nx.nan + 0j),
+                    # Last, try with the string types (must be last, because
+                    # `_mapper[-1]` is used as default in some cases)
+                    (nx.str_, asunicode, '???'),
+                    (nx.bytes_, asbytes, '???'),
+                    ])
+
+    @classmethod
+    def _getdtype(cls, val):
+        """Returns the dtype of the input variable."""
+        return np.array(val).dtype
+
+    @classmethod
+    def _getsubdtype(cls, val):
+        """Returns the type of the dtype of the input variable."""
+        return np.array(val).dtype.type
+
+    @classmethod
+    def _dtypeortype(cls, dtype):
+        """Returns dtype for datetime64 and type of dtype otherwise."""
+
+        # This is a bit annoying. We want to return the "general" type in most
+        # cases (ie. "string" rather than "S10"), but we want to return the
+        # specific type for datetime64 (ie. "datetime64[us]" rather than
+        # "datetime64").
+        if dtype.type == np.datetime64:
+            return dtype
+        return dtype.type
+
+    @classmethod
+    def upgrade_mapper(cls, func, default=None):
+        """
+        Upgrade the mapper of a StringConverter by adding a new function and
+        its corresponding default.
+
+        The input function (or sequence of functions) and its associated
+        default value (if any) is inserted in penultimate position of the
+        mapper.  The corresponding type is estimated from the dtype of the
+        default value.
+
+        Parameters
+        ----------
+        func : var
+            Function, or sequence of functions
+
+        Examples
+        --------
+        >>> import dateutil.parser
+        >>> import datetime
+        >>> dateparser = dateutil.parser.parse
+        >>> defaultdate = datetime.date(2000, 1, 1)
+        >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
+        """
+        # Func is a single functions
+        if hasattr(func, '__call__'):
+            cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
+            return
+        elif hasattr(func, '__iter__'):
+            if isinstance(func[0], (tuple, list)):
+                for _ in func:
+                    cls._mapper.insert(-1, _)
+                return
+            if default is None:
+                default = [None] * len(func)
+            else:
+                default = list(default)
+                default.append([None] * (len(func) - len(default)))
+            for fct, dft in zip(func, default):
+                cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
+
+    @classmethod
+    def _find_map_entry(cls, dtype):
+        # if a converter for the specific dtype is available use that
+        for i, (deftype, func, default_def) in enumerate(cls._mapper):
+            if dtype.type == deftype:
+                return i, (deftype, func, default_def)
+
+        # otherwise find an inexact match
+        for i, (deftype, func, default_def) in enumerate(cls._mapper):
+            if np.issubdtype(dtype.type, deftype):
+                return i, (deftype, func, default_def)
+
+        raise LookupError
+
+    def __init__(self, dtype_or_func=None, default=None, missing_values=None,
+                 locked=False):
+        # Defines a lock for upgrade
+        self._locked = bool(locked)
+        # No input dtype: minimal initialization
+        if dtype_or_func is None:
+            self.func = str2bool
+            self._status = 0
+            self.default = default or False
+            dtype = np.dtype('bool')
+        else:
+            # Is the input a np.dtype ?
+            try:
+                self.func = None
+                dtype = np.dtype(dtype_or_func)
+            except TypeError:
+                # dtype_or_func must be a function, then
+                if not hasattr(dtype_or_func, '__call__'):
+                    errmsg = ("The input argument `dtype` is neither a"
+                              " function nor a dtype (got '%s' instead)")
+                    raise TypeError(errmsg % type(dtype_or_func))
+                # Set the function
+                self.func = dtype_or_func
+                # If we don't have a default, try to guess it or set it to
+                # None
+                if default is None:
+                    try:
+                        default = self.func('0')
+                    except ValueError:
+                        default = None
+                dtype = self._getdtype(default)
+
+            # find the best match in our mapper
+            try:
+                self._status, (_, func, default_def) = self._find_map_entry(dtype)
+            except LookupError:
+                # no match
+                self.default = default
+                _, func, _ = self._mapper[-1]
+                self._status = 0
+            else:
+                # use the found default only if we did not already have one
+                if default is None:
+                    self.default = default_def
+                else:
+                    self.default = default
+
+            # If the input was a dtype, set the function to the last we saw
+            if self.func is None:
+                self.func = func
+
+            # If the status is 1 (int), change the function to
+            # something more robust.
+            if self.func == self._mapper[1][1]:
+                if issubclass(dtype.type, np.uint64):
+                    self.func = np.uint64
+                elif issubclass(dtype.type, np.int64):
+                    self.func = np.int64
+                else:
+                    self.func = lambda x: int(float(x))
+        # Store the list of strings corresponding to missing values.
+        if missing_values is None:
+            self.missing_values = {''}
+        else:
+            if isinstance(missing_values, str):
+                missing_values = missing_values.split(",")
+            self.missing_values = set(list(missing_values) + [''])
+
+        self._callingfunction = self._strict_call
+        self.type = self._dtypeortype(dtype)
+        self._checked = False
+        self._initial_default = default
+
+    def _loose_call(self, value):
+        try:
+            return self.func(value)
+        except ValueError:
+            return self.default
+
+    def _strict_call(self, value):
+        try:
+
+            # We check if we can convert the value using the current function
+            new_value = self.func(value)
+
+            # In addition to having to check whether func can convert the
+            # value, we also have to make sure that we don't get overflow
+            # errors for integers.
+            if self.func is int:
+                try:
+                    np.array(value, dtype=self.type)
+                except OverflowError:
+                    raise ValueError
+
+            # We're still here so we can now return the new value
+            return new_value
+
+        except ValueError:
+            if value.strip() in self.missing_values:
+                if not self._status:
+                    self._checked = False
+                return self.default
+            raise ValueError("Cannot convert string '%s'" % value)
+
+    def __call__(self, value):
+        return self._callingfunction(value)
+
+    def _do_upgrade(self):
+        # Raise an exception if we locked the converter...
+        if self._locked:
+            errmsg = "Converter is locked and cannot be upgraded"
+            raise ConverterLockError(errmsg)
+        _statusmax = len(self._mapper)
+        # Complains if we try to upgrade by the maximum
+        _status = self._status
+        if _status == _statusmax:
+            errmsg = "Could not find a valid conversion function"
+            raise ConverterError(errmsg)
+        elif _status < _statusmax - 1:
+            _status += 1
+        self.type, self.func, default = self._mapper[_status]
+        self._status = _status
+        if self._initial_default is not None:
+            self.default = self._initial_default
+        else:
+            self.default = default
+
+    def upgrade(self, value):
+        """
+        Find the best converter for a given string, and return the result.
+
+        The supplied string `value` is converted by testing different
+        converters in order. First the `func` method of the
+        `StringConverter` instance is tried, if this fails other available
+        converters are tried.  The order in which these other converters
+        are tried is determined by the `_status` attribute of the instance.
+
+        Parameters
+        ----------
+        value : str
+            The string to convert.
+
+        Returns
+        -------
+        out : any
+            The result of converting `value` with the appropriate converter.
+
+        """
+        self._checked = True
+        try:
+            return self._strict_call(value)
+        except ValueError:
+            self._do_upgrade()
+            return self.upgrade(value)
+
+    def iterupgrade(self, value):
+        self._checked = True
+        if not hasattr(value, '__iter__'):
+            value = (value,)
+        _strict_call = self._strict_call
+        try:
+            for _m in value:
+                _strict_call(_m)
+        except ValueError:
+            self._do_upgrade()
+            self.iterupgrade(value)
+
+    def update(self, func, default=None, testing_value=None,
+               missing_values='', locked=False):
+        """
+        Set StringConverter attributes directly.
+
+        Parameters
+        ----------
+        func : function
+            Conversion function.
+        default : any, optional
+            Value to return by default, that is, when the string to be
+            converted is flagged as missing. If not given,
+            `StringConverter` tries to supply a reasonable default value.
+        testing_value : str, optional
+            A string representing a standard input value of the converter.
+            This string is used to help defining a reasonable default
+            value.
+        missing_values : {sequence of str, None}, optional
+            Sequence of strings indicating a missing value. If ``None``, then
+            the existing `missing_values` are cleared. The default is `''`.
+        locked : bool, optional
+            Whether the StringConverter should be locked to prevent
+            automatic upgrade or not. Default is False.
+
+        Notes
+        -----
+        `update` takes the same parameters as the constructor of
+        `StringConverter`, except that `func` does not accept a `dtype`
+        whereas `dtype_or_func` in the constructor does.
+
+        """
+        self.func = func
+        self._locked = locked
+
+        # Don't reset the default to None if we can avoid it
+        if default is not None:
+            self.default = default
+            self.type = self._dtypeortype(self._getdtype(default))
+        else:
+            try:
+                tester = func(testing_value or '1')
+            except (TypeError, ValueError):
+                tester = None
+            self.type = self._dtypeortype(self._getdtype(tester))
+
+        # Add the missing values to the existing set or clear it.
+        if missing_values is None:
+            # Clear all missing values even though the ctor initializes it to
+            # set(['']) when the argument is None.
+            self.missing_values = set()
+        else:
+            if not np.iterable(missing_values):
+                missing_values = [missing_values]
+            if not all(isinstance(v, str) for v in missing_values):
+                raise TypeError("missing_values must be strings or unicode")
+            self.missing_values.update(missing_values)
+
+
+def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
+    """
+    Convenience function to create a `np.dtype` object.
+
+    The function processes the input `dtype` and matches it with the given
+    names.
+
+    Parameters
+    ----------
+    ndtype : var
+        Definition of the dtype. Can be any string or dictionary recognized
+        by the `np.dtype` function, or a sequence of types.
+    names : str or sequence, optional
+        Sequence of strings to use as field names for a structured dtype.
+        For convenience, `names` can be a string of a comma-separated list
+        of names.
+    defaultfmt : str, optional
+        Format string used to define missing names, such as ``"f%i"``
+        (default) or ``"fields_%02i"``.
+    validationargs : optional
+        A series of optional arguments used to initialize a
+        `NameValidator`.
+
+    Examples
+    --------
+    >>> np.lib._iotools.easy_dtype(float)
+    dtype('float64')
+    >>> np.lib._iotools.easy_dtype("i4, f8")
+    dtype([('f0', '>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
+    dtype([('field_000', '>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
+    dtype([('a', '>> np.lib._iotools.easy_dtype(float, names="a,b,c")
+    dtype([('a', ' None: ...
+    def __lt__(self, other: str | NumpyVersion) -> bool: ...
+    def __le__(self, other: str | NumpyVersion) -> bool: ...
+    def __eq__(self, other: str | NumpyVersion) -> bool: ...  # type: ignore[override]
+    def __ne__(self, other: str | NumpyVersion) -> bool: ...  # type: ignore[override]
+    def __gt__(self, other: str | NumpyVersion) -> bool: ...
+    def __ge__(self, other: str | NumpyVersion) -> bool: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/arraypad.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/arraypad.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..1ac6fc7d91c868ba077235b8229cd00869386660
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/arraypad.pyi
@@ -0,0 +1,85 @@
+from typing import (
+    Literal as L,
+    Any,
+    overload,
+    TypeVar,
+    Protocol,
+)
+
+from numpy import generic
+
+from numpy._typing import (
+    ArrayLike,
+    NDArray,
+    _ArrayLikeInt,
+    _ArrayLike,
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+class _ModeFunc(Protocol):
+    def __call__(
+        self,
+        vector: NDArray[Any],
+        iaxis_pad_width: tuple[int, int],
+        iaxis: int,
+        kwargs: dict[str, Any],
+        /,
+    ) -> None: ...
+
+_ModeKind = L[
+    "constant",
+    "edge",
+    "linear_ramp",
+    "maximum",
+    "mean",
+    "median",
+    "minimum",
+    "reflect",
+    "symmetric",
+    "wrap",
+    "empty",
+]
+
+__all__: list[str]
+
+# TODO: In practice each keyword argument is exclusive to one or more
+# specific modes. Consider adding more overloads to express this in the future.
+
+# Expand `**kwargs` into explicit keyword-only arguments
+@overload
+def pad(
+    array: _ArrayLike[_SCT],
+    pad_width: _ArrayLikeInt,
+    mode: _ModeKind = ...,
+    *,
+    stat_length: None | _ArrayLikeInt = ...,
+    constant_values: ArrayLike = ...,
+    end_values: ArrayLike = ...,
+    reflect_type: L["odd", "even"] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def pad(
+    array: ArrayLike,
+    pad_width: _ArrayLikeInt,
+    mode: _ModeKind = ...,
+    *,
+    stat_length: None | _ArrayLikeInt = ...,
+    constant_values: ArrayLike = ...,
+    end_values: ArrayLike = ...,
+    reflect_type: L["odd", "even"] = ...,
+) -> NDArray[Any]: ...
+@overload
+def pad(
+    array: _ArrayLike[_SCT],
+    pad_width: _ArrayLikeInt,
+    mode: _ModeFunc,
+    **kwargs: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def pad(
+    array: ArrayLike,
+    pad_width: _ArrayLikeInt,
+    mode: _ModeFunc,
+    **kwargs: Any,
+) -> NDArray[Any]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/arraysetops.py b/.venv/lib/python3.11/site-packages/numpy/lib/arraysetops.py
new file mode 100644
index 0000000000000000000000000000000000000000..300bbda26ceb547752857e26a5871fa802ca6a6d
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/arraysetops.py
@@ -0,0 +1,981 @@
+"""
+Set operations for arrays based on sorting.
+
+Notes
+-----
+
+For floating point arrays, inaccurate results may appear due to usual round-off
+and floating point comparison issues.
+
+Speed could be gained in some operations by an implementation of
+`numpy.sort`, that can provide directly the permutation vectors, thus avoiding
+calls to `numpy.argsort`.
+
+Original author: Robert Cimrman
+
+"""
+import functools
+
+import numpy as np
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+    'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
+    'in1d', 'isin'
+    ]
+
+
+def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
+    return (ary, to_end, to_begin)
+
+
+@array_function_dispatch(_ediff1d_dispatcher)
+def ediff1d(ary, to_end=None, to_begin=None):
+    """
+    The differences between consecutive elements of an array.
+
+    Parameters
+    ----------
+    ary : array_like
+        If necessary, will be flattened before the differences are taken.
+    to_end : array_like, optional
+        Number(s) to append at the end of the returned differences.
+    to_begin : array_like, optional
+        Number(s) to prepend at the beginning of the returned differences.
+
+    Returns
+    -------
+    ediff1d : ndarray
+        The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
+
+    See Also
+    --------
+    diff, gradient
+
+    Notes
+    -----
+    When applied to masked arrays, this function drops the mask information
+    if the `to_begin` and/or `to_end` parameters are used.
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 4, 7, 0])
+    >>> np.ediff1d(x)
+    array([ 1,  2,  3, -7])
+
+    >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+    array([-99,   1,   2, ...,  -7,  88,  99])
+
+    The returned array is always 1D.
+
+    >>> y = [[1, 2, 4], [1, 6, 24]]
+    >>> np.ediff1d(y)
+    array([ 1,  2, -3,  5, 18])
+
+    """
+    # force a 1d array
+    ary = np.asanyarray(ary).ravel()
+
+    # enforce that the dtype of `ary` is used for the output
+    dtype_req = ary.dtype
+
+    # fast track default case
+    if to_begin is None and to_end is None:
+        return ary[1:] - ary[:-1]
+
+    if to_begin is None:
+        l_begin = 0
+    else:
+        to_begin = np.asanyarray(to_begin)
+        if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
+            raise TypeError("dtype of `to_begin` must be compatible "
+                            "with input `ary` under the `same_kind` rule.")
+
+        to_begin = to_begin.ravel()
+        l_begin = len(to_begin)
+
+    if to_end is None:
+        l_end = 0
+    else:
+        to_end = np.asanyarray(to_end)
+        if not np.can_cast(to_end, dtype_req, casting="same_kind"):
+            raise TypeError("dtype of `to_end` must be compatible "
+                            "with input `ary` under the `same_kind` rule.")
+
+        to_end = to_end.ravel()
+        l_end = len(to_end)
+
+    # do the calculation in place and copy to_begin and to_end
+    l_diff = max(len(ary) - 1, 0)
+    result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
+    result = ary.__array_wrap__(result)
+    if l_begin > 0:
+        result[:l_begin] = to_begin
+    if l_end > 0:
+        result[l_begin + l_diff:] = to_end
+    np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
+    return result
+
+
+def _unpack_tuple(x):
+    """ Unpacks one-element tuples for use as return values """
+    if len(x) == 1:
+        return x[0]
+    else:
+        return x
+
+
+def _unique_dispatcher(ar, return_index=None, return_inverse=None,
+                       return_counts=None, axis=None, *, equal_nan=None):
+    return (ar,)
+
+
+@array_function_dispatch(_unique_dispatcher)
+def unique(ar, return_index=False, return_inverse=False,
+           return_counts=False, axis=None, *, equal_nan=True):
+    """
+    Find the unique elements of an array.
+
+    Returns the sorted unique elements of an array. There are three optional
+    outputs in addition to the unique elements:
+
+    * the indices of the input array that give the unique values
+    * the indices of the unique array that reconstruct the input array
+    * the number of times each unique value comes up in the input array
+
+    Parameters
+    ----------
+    ar : array_like
+        Input array. Unless `axis` is specified, this will be flattened if it
+        is not already 1-D.
+    return_index : bool, optional
+        If True, also return the indices of `ar` (along the specified axis,
+        if provided, or in the flattened array) that result in the unique array.
+    return_inverse : bool, optional
+        If True, also return the indices of the unique array (for the specified
+        axis, if provided) that can be used to reconstruct `ar`.
+    return_counts : bool, optional
+        If True, also return the number of times each unique item appears
+        in `ar`.
+    axis : int or None, optional
+        The axis to operate on. If None, `ar` will be flattened. If an integer,
+        the subarrays indexed by the given axis will be flattened and treated
+        as the elements of a 1-D array with the dimension of the given axis,
+        see the notes for more details.  Object arrays or structured arrays
+        that contain objects are not supported if the `axis` kwarg is used. The
+        default is None.
+
+        .. versionadded:: 1.13.0
+
+    equal_nan : bool, optional
+        If True, collapses multiple NaN values in the return array into one.
+
+        .. versionadded:: 1.24
+
+    Returns
+    -------
+    unique : ndarray
+        The sorted unique values.
+    unique_indices : ndarray, optional
+        The indices of the first occurrences of the unique values in the
+        original array. Only provided if `return_index` is True.
+    unique_inverse : ndarray, optional
+        The indices to reconstruct the original array from the
+        unique array. Only provided if `return_inverse` is True.
+    unique_counts : ndarray, optional
+        The number of times each of the unique values comes up in the
+        original array. Only provided if `return_counts` is True.
+
+        .. versionadded:: 1.9.0
+
+    See Also
+    --------
+    numpy.lib.arraysetops : Module with a number of other functions for
+                            performing set operations on arrays.
+    repeat : Repeat elements of an array.
+
+    Notes
+    -----
+    When an axis is specified the subarrays indexed by the axis are sorted.
+    This is done by making the specified axis the first dimension of the array
+    (move the axis to the first dimension to keep the order of the other axes)
+    and then flattening the subarrays in C order. The flattened subarrays are
+    then viewed as a structured type with each element given a label, with the
+    effect that we end up with a 1-D array of structured types that can be
+    treated in the same way as any other 1-D array. The result is that the
+    flattened subarrays are sorted in lexicographic order starting with the
+    first element.
+
+    .. versionchanged: NumPy 1.21
+        If nan values are in the input array, a single nan is put
+        to the end of the sorted unique values.
+
+        Also for complex arrays all NaN values are considered equivalent
+        (no matter whether the NaN is in the real or imaginary part).
+        As the representant for the returned array the smallest one in the
+        lexicographical order is chosen - see np.sort for how the lexicographical
+        order is defined for complex arrays.
+
+    Examples
+    --------
+    >>> np.unique([1, 1, 2, 2, 3, 3])
+    array([1, 2, 3])
+    >>> a = np.array([[1, 1], [2, 3]])
+    >>> np.unique(a)
+    array([1, 2, 3])
+
+    Return the unique rows of a 2D array
+
+    >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
+    >>> np.unique(a, axis=0)
+    array([[1, 0, 0], [2, 3, 4]])
+
+    Return the indices of the original array that give the unique values:
+
+    >>> a = np.array(['a', 'b', 'b', 'c', 'a'])
+    >>> u, indices = np.unique(a, return_index=True)
+    >>> u
+    array(['a', 'b', 'c'], dtype='>> indices
+    array([0, 1, 3])
+    >>> a[indices]
+    array(['a', 'b', 'c'], dtype='>> a = np.array([1, 2, 6, 4, 2, 3, 2])
+    >>> u, indices = np.unique(a, return_inverse=True)
+    >>> u
+    array([1, 2, 3, 4, 6])
+    >>> indices
+    array([0, 1, 4, 3, 1, 2, 1])
+    >>> u[indices]
+    array([1, 2, 6, 4, 2, 3, 2])
+
+    Reconstruct the input values from the unique values and counts:
+
+    >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
+    >>> values, counts = np.unique(a, return_counts=True)
+    >>> values
+    array([1, 2, 3, 4, 6])
+    >>> counts
+    array([1, 3, 1, 1, 1])
+    >>> np.repeat(values, counts)
+    array([1, 2, 2, 2, 3, 4, 6])    # original order not preserved
+
+    """
+    ar = np.asanyarray(ar)
+    if axis is None:
+        ret = _unique1d(ar, return_index, return_inverse, return_counts, 
+                        equal_nan=equal_nan)
+        return _unpack_tuple(ret)
+
+    # axis was specified and not None
+    try:
+        ar = np.moveaxis(ar, axis, 0)
+    except np.AxisError:
+        # this removes the "axis1" or "axis2" prefix from the error message
+        raise np.AxisError(axis, ar.ndim) from None
+
+    # Must reshape to a contiguous 2D array for this to work...
+    orig_shape, orig_dtype = ar.shape, ar.dtype
+    ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
+    ar = np.ascontiguousarray(ar)
+    dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
+
+    # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
+    # data type with `m` fields where each field has the data type of `ar`.
+    # In the following, we create the array `consolidated`, which has
+    # shape `(n,)` with data type `dtype`.
+    try:
+        if ar.shape[1] > 0:
+            consolidated = ar.view(dtype)
+        else:
+            # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
+            # a data type with itemsize 0, and the call `ar.view(dtype)` will
+            # fail.  Instead, we'll use `np.empty` to explicitly create the
+            # array with shape `(len(ar),)`.  Because `dtype` in this case has
+            # itemsize 0, the total size of the result is still 0 bytes.
+            consolidated = np.empty(len(ar), dtype=dtype)
+    except TypeError as e:
+        # There's no good way to do this for object arrays, etc...
+        msg = 'The axis argument to unique is not supported for dtype {dt}'
+        raise TypeError(msg.format(dt=ar.dtype)) from e
+
+    def reshape_uniq(uniq):
+        n = len(uniq)
+        uniq = uniq.view(orig_dtype)
+        uniq = uniq.reshape(n, *orig_shape[1:])
+        uniq = np.moveaxis(uniq, 0, axis)
+        return uniq
+
+    output = _unique1d(consolidated, return_index,
+                       return_inverse, return_counts, equal_nan=equal_nan)
+    output = (reshape_uniq(output[0]),) + output[1:]
+    return _unpack_tuple(output)
+
+
+def _unique1d(ar, return_index=False, return_inverse=False,
+              return_counts=False, *, equal_nan=True):
+    """
+    Find the unique elements of an array, ignoring shape.
+    """
+    ar = np.asanyarray(ar).flatten()
+
+    optional_indices = return_index or return_inverse
+
+    if optional_indices:
+        perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
+        aux = ar[perm]
+    else:
+        ar.sort()
+        aux = ar
+    mask = np.empty(aux.shape, dtype=np.bool_)
+    mask[:1] = True
+    if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and
+            np.isnan(aux[-1])):
+        if aux.dtype.kind == "c":  # for complex all NaNs are considered equivalent
+            aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
+        else:
+            aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
+        if aux_firstnan > 0:
+            mask[1:aux_firstnan] = (
+                aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
+        mask[aux_firstnan] = True
+        mask[aux_firstnan + 1:] = False
+    else:
+        mask[1:] = aux[1:] != aux[:-1]
+
+    ret = (aux[mask],)
+    if return_index:
+        ret += (perm[mask],)
+    if return_inverse:
+        imask = np.cumsum(mask) - 1
+        inv_idx = np.empty(mask.shape, dtype=np.intp)
+        inv_idx[perm] = imask
+        ret += (inv_idx,)
+    if return_counts:
+        idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
+        ret += (np.diff(idx),)
+    return ret
+
+
+def _intersect1d_dispatcher(
+        ar1, ar2, assume_unique=None, return_indices=None):
+    return (ar1, ar2)
+
+
+@array_function_dispatch(_intersect1d_dispatcher)
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
+    """
+    Find the intersection of two arrays.
+
+    Return the sorted, unique values that are in both of the input arrays.
+
+    Parameters
+    ----------
+    ar1, ar2 : array_like
+        Input arrays. Will be flattened if not already 1D.
+    assume_unique : bool
+        If True, the input arrays are both assumed to be unique, which
+        can speed up the calculation.  If True but ``ar1`` or ``ar2`` are not
+        unique, incorrect results and out-of-bounds indices could result.
+        Default is False.
+    return_indices : bool
+        If True, the indices which correspond to the intersection of the two
+        arrays are returned. The first instance of a value is used if there are
+        multiple. Default is False.
+
+        .. versionadded:: 1.15.0
+
+    Returns
+    -------
+    intersect1d : ndarray
+        Sorted 1D array of common and unique elements.
+    comm1 : ndarray
+        The indices of the first occurrences of the common values in `ar1`.
+        Only provided if `return_indices` is True.
+    comm2 : ndarray
+        The indices of the first occurrences of the common values in `ar2`.
+        Only provided if `return_indices` is True.
+
+
+    See Also
+    --------
+    numpy.lib.arraysetops : Module with a number of other functions for
+                            performing set operations on arrays.
+
+    Examples
+    --------
+    >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
+    array([1, 3])
+
+    To intersect more than two arrays, use functools.reduce:
+
+    >>> from functools import reduce
+    >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
+    array([3])
+
+    To return the indices of the values common to the input arrays
+    along with the intersected values:
+
+    >>> x = np.array([1, 1, 2, 3, 4])
+    >>> y = np.array([2, 1, 4, 6])
+    >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
+    >>> x_ind, y_ind
+    (array([0, 2, 4]), array([1, 0, 2]))
+    >>> xy, x[x_ind], y[y_ind]
+    (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
+
+    """
+    ar1 = np.asanyarray(ar1)
+    ar2 = np.asanyarray(ar2)
+
+    if not assume_unique:
+        if return_indices:
+            ar1, ind1 = unique(ar1, return_index=True)
+            ar2, ind2 = unique(ar2, return_index=True)
+        else:
+            ar1 = unique(ar1)
+            ar2 = unique(ar2)
+    else:
+        ar1 = ar1.ravel()
+        ar2 = ar2.ravel()
+
+    aux = np.concatenate((ar1, ar2))
+    if return_indices:
+        aux_sort_indices = np.argsort(aux, kind='mergesort')
+        aux = aux[aux_sort_indices]
+    else:
+        aux.sort()
+
+    mask = aux[1:] == aux[:-1]
+    int1d = aux[:-1][mask]
+
+    if return_indices:
+        ar1_indices = aux_sort_indices[:-1][mask]
+        ar2_indices = aux_sort_indices[1:][mask] - ar1.size
+        if not assume_unique:
+            ar1_indices = ind1[ar1_indices]
+            ar2_indices = ind2[ar2_indices]
+
+        return int1d, ar1_indices, ar2_indices
+    else:
+        return int1d
+
+
+def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
+    return (ar1, ar2)
+
+
+@array_function_dispatch(_setxor1d_dispatcher)
+def setxor1d(ar1, ar2, assume_unique=False):
+    """
+    Find the set exclusive-or of two arrays.
+
+    Return the sorted, unique values that are in only one (not both) of the
+    input arrays.
+
+    Parameters
+    ----------
+    ar1, ar2 : array_like
+        Input arrays.
+    assume_unique : bool
+        If True, the input arrays are both assumed to be unique, which
+        can speed up the calculation.  Default is False.
+
+    Returns
+    -------
+    setxor1d : ndarray
+        Sorted 1D array of unique values that are in only one of the input
+        arrays.
+
+    Examples
+    --------
+    >>> a = np.array([1, 2, 3, 2, 4])
+    >>> b = np.array([2, 3, 5, 7, 5])
+    >>> np.setxor1d(a,b)
+    array([1, 4, 5, 7])
+
+    """
+    if not assume_unique:
+        ar1 = unique(ar1)
+        ar2 = unique(ar2)
+
+    aux = np.concatenate((ar1, ar2))
+    if aux.size == 0:
+        return aux
+
+    aux.sort()
+    flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
+    return aux[flag[1:] & flag[:-1]]
+
+
+def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *,
+                     kind=None):
+    return (ar1, ar2)
+
+
+@array_function_dispatch(_in1d_dispatcher)
+def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
+    """
+    Test whether each element of a 1-D array is also present in a second array.
+
+    Returns a boolean array the same length as `ar1` that is True
+    where an element of `ar1` is in `ar2` and False otherwise.
+
+    We recommend using :func:`isin` instead of `in1d` for new code.
+
+    Parameters
+    ----------
+    ar1 : (M,) array_like
+        Input array.
+    ar2 : array_like
+        The values against which to test each value of `ar1`.
+    assume_unique : bool, optional
+        If True, the input arrays are both assumed to be unique, which
+        can speed up the calculation.  Default is False.
+    invert : bool, optional
+        If True, the values in the returned array are inverted (that is,
+        False where an element of `ar1` is in `ar2` and True otherwise).
+        Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
+        to (but is faster than) ``np.invert(in1d(a, b))``.
+    kind : {None, 'sort', 'table'}, optional
+        The algorithm to use. This will not affect the final result,
+        but will affect the speed and memory use. The default, None,
+        will select automatically based on memory considerations.
+
+        * If 'sort', will use a mergesort-based approach. This will have
+          a memory usage of roughly 6 times the sum of the sizes of
+          `ar1` and `ar2`, not accounting for size of dtypes.
+        * If 'table', will use a lookup table approach similar
+          to a counting sort. This is only available for boolean and
+          integer arrays. This will have a memory usage of the
+          size of `ar1` plus the max-min value of `ar2`. `assume_unique`
+          has no effect when the 'table' option is used.
+        * If None, will automatically choose 'table' if
+          the required memory allocation is less than or equal to
+          6 times the sum of the sizes of `ar1` and `ar2`,
+          otherwise will use 'sort'. This is done to not use
+          a large amount of memory by default, even though
+          'table' may be faster in most cases. If 'table' is chosen,
+          `assume_unique` will have no effect.
+
+        .. versionadded:: 1.8.0
+
+    Returns
+    -------
+    in1d : (M,) ndarray, bool
+        The values `ar1[in1d]` are in `ar2`.
+
+    See Also
+    --------
+    isin                  : Version of this function that preserves the
+                            shape of ar1.
+    numpy.lib.arraysetops : Module with a number of other functions for
+                            performing set operations on arrays.
+
+    Notes
+    -----
+    `in1d` can be considered as an element-wise function version of the
+    python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
+    equivalent to ``np.array([item in b for item in a])``.
+    However, this idea fails if `ar2` is a set, or similar (non-sequence)
+    container:  As ``ar2`` is converted to an array, in those cases
+    ``asarray(ar2)`` is an object array rather than the expected array of
+    contained values.
+
+    Using ``kind='table'`` tends to be faster than `kind='sort'` if the
+    following relationship is true:
+    ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
+    but may use greater memory. The default value for `kind` will
+    be automatically selected based only on memory usage, so one may
+    manually set ``kind='table'`` if memory constraints can be relaxed.
+
+    .. versionadded:: 1.4.0
+
+    Examples
+    --------
+    >>> test = np.array([0, 1, 2, 5, 0])
+    >>> states = [0, 2]
+    >>> mask = np.in1d(test, states)
+    >>> mask
+    array([ True, False,  True, False,  True])
+    >>> test[mask]
+    array([0, 2, 0])
+    >>> mask = np.in1d(test, states, invert=True)
+    >>> mask
+    array([False,  True, False,  True, False])
+    >>> test[mask]
+    array([1, 5])
+    """
+    # Ravel both arrays, behavior for the first array could be different
+    ar1 = np.asarray(ar1).ravel()
+    ar2 = np.asarray(ar2).ravel()
+
+    # Ensure that iteration through object arrays yields size-1 arrays
+    if ar2.dtype == object:
+        ar2 = ar2.reshape(-1, 1)
+
+    if kind not in {None, 'sort', 'table'}:
+        raise ValueError(
+            f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.")
+
+    # Can use the table method if all arrays are integers or boolean:
+    is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2))
+    use_table_method = is_int_arrays and kind in {None, 'table'}
+
+    if use_table_method:
+        if ar2.size == 0:
+            if invert:
+                return np.ones_like(ar1, dtype=bool)
+            else:
+                return np.zeros_like(ar1, dtype=bool)
+
+        # Convert booleans to uint8 so we can use the fast integer algorithm
+        if ar1.dtype == bool:
+            ar1 = ar1.astype(np.uint8)
+        if ar2.dtype == bool:
+            ar2 = ar2.astype(np.uint8)
+
+        ar2_min = np.min(ar2)
+        ar2_max = np.max(ar2)
+
+        ar2_range = int(ar2_max) - int(ar2_min)
+
+        # Constraints on whether we can actually use the table method:
+        #  1. Assert memory usage is not too large
+        below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
+        #  2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
+        range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
+        #  3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype
+        if ar1.size > 0:
+            ar1_min = np.min(ar1)
+            ar1_max = np.max(ar1)
+
+            # After masking, the range of ar1 is guaranteed to be
+            # within the range of ar2:
+            ar1_upper = min(int(ar1_max), int(ar2_max))
+            ar1_lower = max(int(ar1_min), int(ar2_min))
+
+            range_safe_from_overflow &= all((
+                ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max,
+                ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min
+            ))
+
+        # Optimal performance is for approximately
+        # log10(size) > (log10(range) - 2.27) / 0.927.
+        # However, here we set the requirement that by default
+        # the intermediate array can only be 6x
+        # the combined memory allocation of the original
+        # arrays. See discussion on 
+        # https://github.com/numpy/numpy/pull/12065.
+
+        if (
+            range_safe_from_overflow and 
+            (below_memory_constraint or kind == 'table')
+        ):
+
+            if invert:
+                outgoing_array = np.ones_like(ar1, dtype=bool)
+            else:
+                outgoing_array = np.zeros_like(ar1, dtype=bool)
+
+            # Make elements 1 where the integer exists in ar2
+            if invert:
+                isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)
+                isin_helper_ar[ar2 - ar2_min] = 0
+            else:
+                isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)
+                isin_helper_ar[ar2 - ar2_min] = 1
+
+            # Mask out elements we know won't work
+            basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)
+            outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -
+                                                        ar2_min]
+
+            return outgoing_array
+        elif kind == 'table':  # not range_safe_from_overflow
+            raise RuntimeError(
+                "You have specified kind='table', "
+                "but the range of values in `ar2` or `ar1` exceed the "
+                "maximum integer of the datatype. "
+                "Please set `kind` to None or 'sort'."
+            )
+    elif kind == 'table':
+        raise ValueError(
+            "The 'table' method is only "
+            "supported for boolean or integer arrays. "
+            "Please select 'sort' or None for kind."
+        )
+
+
+    # Check if one of the arrays may contain arbitrary objects
+    contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
+
+    # This code is run when
+    # a) the first condition is true, making the code significantly faster
+    # b) the second condition is true (i.e. `ar1` or `ar2` may contain
+    #    arbitrary objects), since then sorting is not guaranteed to work
+    if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
+        if invert:
+            mask = np.ones(len(ar1), dtype=bool)
+            for a in ar2:
+                mask &= (ar1 != a)
+        else:
+            mask = np.zeros(len(ar1), dtype=bool)
+            for a in ar2:
+                mask |= (ar1 == a)
+        return mask
+
+    # Otherwise use sorting
+    if not assume_unique:
+        ar1, rev_idx = np.unique(ar1, return_inverse=True)
+        ar2 = np.unique(ar2)
+
+    ar = np.concatenate((ar1, ar2))
+    # We need this to be a stable sort, so always use 'mergesort'
+    # here. The values from the first array should always come before
+    # the values from the second array.
+    order = ar.argsort(kind='mergesort')
+    sar = ar[order]
+    if invert:
+        bool_ar = (sar[1:] != sar[:-1])
+    else:
+        bool_ar = (sar[1:] == sar[:-1])
+    flag = np.concatenate((bool_ar, [invert]))
+    ret = np.empty(ar.shape, dtype=bool)
+    ret[order] = flag
+
+    if assume_unique:
+        return ret[:len(ar1)]
+    else:
+        return ret[rev_idx]
+
+
+def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None,
+                     *, kind=None):
+    return (element, test_elements)
+
+
+@array_function_dispatch(_isin_dispatcher)
+def isin(element, test_elements, assume_unique=False, invert=False, *,
+         kind=None):
+    """
+    Calculates ``element in test_elements``, broadcasting over `element` only.
+    Returns a boolean array of the same shape as `element` that is True
+    where an element of `element` is in `test_elements` and False otherwise.
+
+    Parameters
+    ----------
+    element : array_like
+        Input array.
+    test_elements : array_like
+        The values against which to test each value of `element`.
+        This argument is flattened if it is an array or array_like.
+        See notes for behavior with non-array-like parameters.
+    assume_unique : bool, optional
+        If True, the input arrays are both assumed to be unique, which
+        can speed up the calculation.  Default is False.
+    invert : bool, optional
+        If True, the values in the returned array are inverted, as if
+        calculating `element not in test_elements`. Default is False.
+        ``np.isin(a, b, invert=True)`` is equivalent to (but faster
+        than) ``np.invert(np.isin(a, b))``.
+    kind : {None, 'sort', 'table'}, optional
+        The algorithm to use. This will not affect the final result,
+        but will affect the speed and memory use. The default, None,
+        will select automatically based on memory considerations.
+
+        * If 'sort', will use a mergesort-based approach. This will have
+          a memory usage of roughly 6 times the sum of the sizes of
+          `ar1` and `ar2`, not accounting for size of dtypes.
+        * If 'table', will use a lookup table approach similar
+          to a counting sort. This is only available for boolean and
+          integer arrays. This will have a memory usage of the
+          size of `ar1` plus the max-min value of `ar2`. `assume_unique`
+          has no effect when the 'table' option is used.
+        * If None, will automatically choose 'table' if
+          the required memory allocation is less than or equal to
+          6 times the sum of the sizes of `ar1` and `ar2`,
+          otherwise will use 'sort'. This is done to not use
+          a large amount of memory by default, even though
+          'table' may be faster in most cases. If 'table' is chosen,
+          `assume_unique` will have no effect.
+
+
+    Returns
+    -------
+    isin : ndarray, bool
+        Has the same shape as `element`. The values `element[isin]`
+        are in `test_elements`.
+
+    See Also
+    --------
+    in1d                  : Flattened version of this function.
+    numpy.lib.arraysetops : Module with a number of other functions for
+                            performing set operations on arrays.
+
+    Notes
+    -----
+
+    `isin` is an element-wise function version of the python keyword `in`.
+    ``isin(a, b)`` is roughly equivalent to
+    ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
+
+    `element` and `test_elements` are converted to arrays if they are not
+    already. If `test_elements` is a set (or other non-sequence collection)
+    it will be converted to an object array with one element, rather than an
+    array of the values contained in `test_elements`. This is a consequence
+    of the `array` constructor's way of handling non-sequence collections.
+    Converting the set to a list usually gives the desired behavior.
+
+    Using ``kind='table'`` tends to be faster than `kind='sort'` if the
+    following relationship is true:
+    ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
+    but may use greater memory. The default value for `kind` will
+    be automatically selected based only on memory usage, so one may
+    manually set ``kind='table'`` if memory constraints can be relaxed.
+
+    .. versionadded:: 1.13.0
+
+    Examples
+    --------
+    >>> element = 2*np.arange(4).reshape((2, 2))
+    >>> element
+    array([[0, 2],
+           [4, 6]])
+    >>> test_elements = [1, 2, 4, 8]
+    >>> mask = np.isin(element, test_elements)
+    >>> mask
+    array([[False,  True],
+           [ True, False]])
+    >>> element[mask]
+    array([2, 4])
+
+    The indices of the matched values can be obtained with `nonzero`:
+
+    >>> np.nonzero(mask)
+    (array([0, 1]), array([1, 0]))
+
+    The test can also be inverted:
+
+    >>> mask = np.isin(element, test_elements, invert=True)
+    >>> mask
+    array([[ True, False],
+           [False,  True]])
+    >>> element[mask]
+    array([0, 6])
+
+    Because of how `array` handles sets, the following does not
+    work as expected:
+
+    >>> test_set = {1, 2, 4, 8}
+    >>> np.isin(element, test_set)
+    array([[False, False],
+           [False, False]])
+
+    Casting the set to a list gives the expected result:
+
+    >>> np.isin(element, list(test_set))
+    array([[False,  True],
+           [ True, False]])
+    """
+    element = np.asarray(element)
+    return in1d(element, test_elements, assume_unique=assume_unique,
+                invert=invert, kind=kind).reshape(element.shape)
+
+
+def _union1d_dispatcher(ar1, ar2):
+    return (ar1, ar2)
+
+
+@array_function_dispatch(_union1d_dispatcher)
+def union1d(ar1, ar2):
+    """
+    Find the union of two arrays.
+
+    Return the unique, sorted array of values that are in either of the two
+    input arrays.
+
+    Parameters
+    ----------
+    ar1, ar2 : array_like
+        Input arrays. They are flattened if they are not already 1D.
+
+    Returns
+    -------
+    union1d : ndarray
+        Unique, sorted union of the input arrays.
+
+    See Also
+    --------
+    numpy.lib.arraysetops : Module with a number of other functions for
+                            performing set operations on arrays.
+
+    Examples
+    --------
+    >>> np.union1d([-1, 0, 1], [-2, 0, 2])
+    array([-2, -1,  0,  1,  2])
+
+    To find the union of more than two arrays, use functools.reduce:
+
+    >>> from functools import reduce
+    >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
+    array([1, 2, 3, 4, 6])
+    """
+    return unique(np.concatenate((ar1, ar2), axis=None))
+
+
+def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
+    return (ar1, ar2)
+
+
+@array_function_dispatch(_setdiff1d_dispatcher)
+def setdiff1d(ar1, ar2, assume_unique=False):
+    """
+    Find the set difference of two arrays.
+
+    Return the unique values in `ar1` that are not in `ar2`.
+
+    Parameters
+    ----------
+    ar1 : array_like
+        Input array.
+    ar2 : array_like
+        Input comparison array.
+    assume_unique : bool
+        If True, the input arrays are both assumed to be unique, which
+        can speed up the calculation.  Default is False.
+
+    Returns
+    -------
+    setdiff1d : ndarray
+        1D array of values in `ar1` that are not in `ar2`. The result
+        is sorted when `assume_unique=False`, but otherwise only sorted
+        if the input is sorted.
+
+    See Also
+    --------
+    numpy.lib.arraysetops : Module with a number of other functions for
+                            performing set operations on arrays.
+
+    Examples
+    --------
+    >>> a = np.array([1, 2, 3, 2, 4, 1])
+    >>> b = np.array([3, 4, 5, 6])
+    >>> np.setdiff1d(a, b)
+    array([1, 2])
+
+    """
+    if assume_unique:
+        ar1 = np.asarray(ar1).ravel()
+    else:
+        ar1 = unique(ar1)
+        ar2 = unique(ar2)
+    return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/arrayterator.py b/.venv/lib/python3.11/site-packages/numpy/lib/arrayterator.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9ea21f8e49f60461416962fc6e2a2ca625c04cd
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/arrayterator.py
@@ -0,0 +1,219 @@
+"""
+A buffered iterator for big arrays.
+
+This module solves the problem of iterating over a big file-based array
+without having to read it into memory. The `Arrayterator` class wraps
+an array object, and when iterated it will return sub-arrays with at most
+a user-specified number of elements.
+
+"""
+from operator import mul
+from functools import reduce
+
+__all__ = ['Arrayterator']
+
+
+class Arrayterator:
+    """
+    Buffered iterator for big arrays.
+
+    `Arrayterator` creates a buffered iterator for reading big arrays in small
+    contiguous blocks. The class is useful for objects stored in the
+    file system. It allows iteration over the object *without* reading
+    everything in memory; instead, small blocks are read and iterated over.
+
+    `Arrayterator` can be used with any object that supports multidimensional
+    slices. This includes NumPy arrays, but also variables from
+    Scientific.IO.NetCDF or pynetcdf for example.
+
+    Parameters
+    ----------
+    var : array_like
+        The object to iterate over.
+    buf_size : int, optional
+        The buffer size. If `buf_size` is supplied, the maximum amount of
+        data that will be read into memory is `buf_size` elements.
+        Default is None, which will read as many element as possible
+        into memory.
+
+    Attributes
+    ----------
+    var
+    buf_size
+    start
+    stop
+    step
+    shape
+    flat
+
+    See Also
+    --------
+    ndenumerate : Multidimensional array iterator.
+    flatiter : Flat array iterator.
+    memmap : Create a memory-map to an array stored in a binary file on disk.
+
+    Notes
+    -----
+    The algorithm works by first finding a "running dimension", along which
+    the blocks will be extracted. Given an array of dimensions
+    ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
+    first dimension will be used. If, on the other hand,
+    ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
+    Blocks are extracted along this dimension, and when the last block is
+    returned the process continues from the next dimension, until all
+    elements have been read.
+
+    Examples
+    --------
+    >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+    >>> a_itor = np.lib.Arrayterator(a, 2)
+    >>> a_itor.shape
+    (3, 4, 5, 6)
+
+    Now we can iterate over ``a_itor``, and it will return arrays of size
+    two. Since `buf_size` was smaller than any dimension, the first
+    dimension will be iterated over first:
+
+    >>> for subarr in a_itor:
+    ...     if not subarr.all():
+    ...         print(subarr, subarr.shape) # doctest: +SKIP
+    >>> # [[[[0 1]]]] (1, 1, 1, 2)
+
+    """
+
+    def __init__(self, var, buf_size=None):
+        self.var = var
+        self.buf_size = buf_size
+
+        self.start = [0 for dim in var.shape]
+        self.stop = [dim for dim in var.shape]
+        self.step = [1 for dim in var.shape]
+
+    def __getattr__(self, attr):
+        return getattr(self.var, attr)
+
+    def __getitem__(self, index):
+        """
+        Return a new arrayterator.
+
+        """
+        # Fix index, handling ellipsis and incomplete slices.
+        if not isinstance(index, tuple):
+            index = (index,)
+        fixed = []
+        length, dims = len(index), self.ndim
+        for slice_ in index:
+            if slice_ is Ellipsis:
+                fixed.extend([slice(None)] * (dims-length+1))
+                length = len(fixed)
+            elif isinstance(slice_, int):
+                fixed.append(slice(slice_, slice_+1, 1))
+            else:
+                fixed.append(slice_)
+        index = tuple(fixed)
+        if len(index) < dims:
+            index += (slice(None),) * (dims-len(index))
+
+        # Return a new arrayterator object.
+        out = self.__class__(self.var, self.buf_size)
+        for i, (start, stop, step, slice_) in enumerate(
+                zip(self.start, self.stop, self.step, index)):
+            out.start[i] = start + (slice_.start or 0)
+            out.step[i] = step * (slice_.step or 1)
+            out.stop[i] = start + (slice_.stop or stop-start)
+            out.stop[i] = min(stop, out.stop[i])
+        return out
+
+    def __array__(self):
+        """
+        Return corresponding data.
+
+        """
+        slice_ = tuple(slice(*t) for t in zip(
+                self.start, self.stop, self.step))
+        return self.var[slice_]
+
+    @property
+    def flat(self):
+        """
+        A 1-D flat iterator for Arrayterator objects.
+
+        This iterator returns elements of the array to be iterated over in
+        `Arrayterator` one by one. It is similar to `flatiter`.
+
+        See Also
+        --------
+        Arrayterator
+        flatiter
+
+        Examples
+        --------
+        >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
+        >>> a_itor = np.lib.Arrayterator(a, 2)
+
+        >>> for subarr in a_itor.flat:
+        ...     if not subarr:
+        ...         print(subarr, type(subarr))
+        ...
+        0 
+
+        """
+        for block in self:
+            yield from block.flat
+
+    @property
+    def shape(self):
+        """
+        The shape of the array to be iterated over.
+
+        For an example, see `Arrayterator`.
+
+        """
+        return tuple(((stop-start-1)//step+1) for start, stop, step in
+                zip(self.start, self.stop, self.step))
+
+    def __iter__(self):
+        # Skip arrays with degenerate dimensions
+        if [dim for dim in self.shape if dim <= 0]:
+            return
+
+        start = self.start[:]
+        stop = self.stop[:]
+        step = self.step[:]
+        ndims = self.var.ndim
+
+        while True:
+            count = self.buf_size or reduce(mul, self.shape)
+
+            # iterate over each dimension, looking for the
+            # running dimension (ie, the dimension along which
+            # the blocks will be built from)
+            rundim = 0
+            for i in range(ndims-1, -1, -1):
+                # if count is zero we ran out of elements to read
+                # along higher dimensions, so we read only a single position
+                if count == 0:
+                    stop[i] = start[i]+1
+                elif count <= self.shape[i]:
+                    # limit along this dimension
+                    stop[i] = start[i] + count*step[i]
+                    rundim = i
+                else:
+                    # read everything along this dimension
+                    stop[i] = self.stop[i]
+                stop[i] = min(self.stop[i], stop[i])
+                count = count//self.shape[i]
+
+            # yield a block
+            slice_ = tuple(slice(*t) for t in zip(start, stop, step))
+            yield self.var[slice_]
+
+            # Update start position, taking care of overflow to
+            # other dimensions
+            start[rundim] = stop[rundim]  # start where we stopped
+            for i in range(ndims-1, 0, -1):
+                if start[i] >= self.stop[i]:
+                    start[i] = self.start[i]
+                    start[i-1] += self.step[i-1]
+            if start[0] >= self.stop[0]:
+                return
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/arrayterator.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/arrayterator.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..aa192fb7c40ffeaddc8b082d86755eb3722b8634
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/arrayterator.pyi
@@ -0,0 +1,49 @@
+from collections.abc import Generator
+from typing import (
+    Any,
+    TypeVar,
+    Union,
+    overload,
+)
+
+from numpy import ndarray, dtype, generic
+from numpy._typing import DTypeLike
+
+# TODO: Set a shape bound once we've got proper shape support
+_Shape = TypeVar("_Shape", bound=Any)
+_DType = TypeVar("_DType", bound=dtype[Any])
+_ScalarType = TypeVar("_ScalarType", bound=generic)
+
+_Index = Union[
+    Union[ellipsis, int, slice],
+    tuple[Union[ellipsis, int, slice], ...],
+]
+
+__all__: list[str]
+
+# NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
+# but its ``__getattr__` method does wrap around the former and thus has
+# access to all its methods
+
+class Arrayterator(ndarray[_Shape, _DType]):
+    var: ndarray[_Shape, _DType]  # type: ignore[assignment]
+    buf_size: None | int
+    start: list[int]
+    stop: list[int]
+    step: list[int]
+
+    @property  # type: ignore[misc]
+    def shape(self) -> tuple[int, ...]: ...
+    @property
+    def flat(  # type: ignore[override]
+        self: ndarray[Any, dtype[_ScalarType]]
+    ) -> Generator[_ScalarType, None, None]: ...
+    def __init__(
+        self, var: ndarray[_Shape, _DType], buf_size: None | int = ...
+    ) -> None: ...
+    @overload
+    def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ...
+    @overload
+    def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ...
+    def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ...
+    def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/format.py b/.venv/lib/python3.11/site-packages/numpy/lib/format.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5b3fbac23ab6e680510cbc3d47387cdee2c6048
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/format.py
@@ -0,0 +1,976 @@
+"""
+Binary serialization
+
+NPY format
+==========
+
+A simple format for saving numpy arrays to disk with the full
+information about them.
+
+The ``.npy`` format is the standard binary file format in NumPy for
+persisting a *single* arbitrary NumPy array on disk. The format stores all
+of the shape and dtype information necessary to reconstruct the array
+correctly even on another machine with a different architecture.
+The format is designed to be as simple as possible while achieving
+its limited goals.
+
+The ``.npz`` format is the standard format for persisting *multiple* NumPy
+arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
+files, one for each array.
+
+Capabilities
+------------
+
+- Can represent all NumPy arrays including nested record arrays and
+  object arrays.
+
+- Represents the data in its native binary form.
+
+- Supports Fortran-contiguous arrays directly.
+
+- Stores all of the necessary information to reconstruct the array
+  including shape and dtype on a machine of a different
+  architecture.  Both little-endian and big-endian arrays are
+  supported, and a file with little-endian numbers will yield
+  a little-endian array on any machine reading the file. The
+  types are described in terms of their actual sizes. For example,
+  if a machine with a 64-bit C "long int" writes out an array with
+  "long ints", a reading machine with 32-bit C "long ints" will yield
+  an array with 64-bit integers.
+
+- Is straightforward to reverse engineer. Datasets often live longer than
+  the programs that created them. A competent developer should be
+  able to create a solution in their preferred programming language to
+  read most ``.npy`` files that they have been given without much
+  documentation.
+
+- Allows memory-mapping of the data. See `open_memmap`.
+
+- Can be read from a filelike stream object instead of an actual file.
+
+- Stores object arrays, i.e. arrays containing elements that are arbitrary
+  Python objects. Files with object arrays are not to be mmapable, but
+  can be read and written to disk.
+
+Limitations
+-----------
+
+- Arbitrary subclasses of numpy.ndarray are not completely preserved.
+  Subclasses will be accepted for writing, but only the array data will
+  be written out. A regular numpy.ndarray object will be created
+  upon reading the file.
+
+.. warning::
+
+  Due to limitations in the interpretation of structured dtypes, dtypes
+  with fields with empty names will have the names replaced by 'f0', 'f1',
+  etc. Such arrays will not round-trip through the format entirely
+  accurately. The data is intact; only the field names will differ. We are
+  working on a fix for this. This fix will not require a change in the
+  file format. The arrays with such structures can still be saved and
+  restored, and the correct dtype may be restored by using the
+  ``loadedarray.view(correct_dtype)`` method.
+
+File extensions
+---------------
+
+We recommend using the ``.npy`` and ``.npz`` extensions for files saved
+in this format. This is by no means a requirement; applications may wish
+to use these file formats but use an extension specific to the
+application. In the absence of an obvious alternative, however,
+we suggest using ``.npy`` and ``.npz``.
+
+Version numbering
+-----------------
+
+The version numbering of these formats is independent of NumPy version
+numbering. If the format is upgraded, the code in `numpy.io` will still
+be able to read and write Version 1.0 files.
+
+Format Version 1.0
+------------------
+
+The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
+
+The next 1 byte is an unsigned byte: the major version number of the file
+format, e.g. ``\\x01``.
+
+The next 1 byte is an unsigned byte: the minor version number of the file
+format, e.g. ``\\x00``. Note: the version of the file format is not tied
+to the version of the numpy package.
+
+The next 2 bytes form a little-endian unsigned short int: the length of
+the header data HEADER_LEN.
+
+The next HEADER_LEN bytes form the header data describing the array's
+format. It is an ASCII string which contains a Python literal expression
+of a dictionary. It is terminated by a newline (``\\n``) and padded with
+spaces (``\\x20``) to make the total of
+``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
+by 64 for alignment purposes.
+
+The dictionary contains three keys:
+
+    "descr" : dtype.descr
+      An object that can be passed as an argument to the `numpy.dtype`
+      constructor to create the array's dtype.
+    "fortran_order" : bool
+      Whether the array data is Fortran-contiguous or not. Since
+      Fortran-contiguous arrays are a common form of non-C-contiguity,
+      we allow them to be written directly to disk for efficiency.
+    "shape" : tuple of int
+      The shape of the array.
+
+For repeatability and readability, the dictionary keys are sorted in
+alphabetic order. This is for convenience only. A writer SHOULD implement
+this if possible. A reader MUST NOT depend on this.
+
+Following the header comes the array data. If the dtype contains Python
+objects (i.e. ``dtype.hasobject is True``), then the data is a Python
+pickle of the array. Otherwise the data is the contiguous (either C-
+or Fortran-, depending on ``fortran_order``) bytes of the array.
+Consumers can figure out the number of bytes by multiplying the number
+of elements given by the shape (noting that ``shape=()`` means there is
+1 element) by ``dtype.itemsize``.
+
+Format Version 2.0
+------------------
+
+The version 1.0 format only allowed the array header to have a total size of
+65535 bytes.  This can be exceeded by structured arrays with a large number of
+columns.  The version 2.0 format extends the header size to 4 GiB.
+`numpy.save` will automatically save in 2.0 format if the data requires it,
+else it will always use the more compatible 1.0 format.
+
+The description of the fourth element of the header therefore has become:
+"The next 4 bytes form a little-endian unsigned int: the length of the header
+data HEADER_LEN."
+
+Format Version 3.0
+------------------
+
+This version replaces the ASCII string (which in practice was latin1) with
+a utf8-encoded string, so supports structured types with any unicode field
+names.
+
+Notes
+-----
+The ``.npy`` format, including motivation for creating it and a comparison of
+alternatives, is described in the
+:doc:`"npy-format" NEP `, however details have
+evolved with time and this document is more current.
+
+"""
+import numpy
+import warnings
+from numpy.lib.utils import safe_eval, drop_metadata
+from numpy.compat import (
+    isfileobj, os_fspath, pickle
+    )
+
+
+__all__ = []
+
+
+EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
+MAGIC_PREFIX = b'\x93NUMPY'
+MAGIC_LEN = len(MAGIC_PREFIX) + 2
+ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
+BUFFER_SIZE = 2**18  # size of buffer for reading npz files in bytes
+# allow growth within the address space of a 64 bit machine along one axis
+GROWTH_AXIS_MAX_DIGITS = 21  # = len(str(8*2**64-1)) hypothetical int1 dtype
+
+# difference between version 1.0 and 2.0 is a 4 byte (I) header length
+# instead of 2 bytes (H) allowing storage of large structured arrays
+_header_size_info = {
+    (1, 0): (' 255:
+        raise ValueError("major version must be 0 <= major < 256")
+    if minor < 0 or minor > 255:
+        raise ValueError("minor version must be 0 <= minor < 256")
+    return MAGIC_PREFIX + bytes([major, minor])
+
+def read_magic(fp):
+    """ Read the magic string to get the version of the file format.
+
+    Parameters
+    ----------
+    fp : filelike object
+
+    Returns
+    -------
+    major : int
+    minor : int
+    """
+    magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
+    if magic_str[:-2] != MAGIC_PREFIX:
+        msg = "the magic string is not correct; expected %r, got %r"
+        raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
+    major, minor = magic_str[-2:]
+    return major, minor
+
+
+def dtype_to_descr(dtype):
+    """
+    Get a serializable descriptor from the dtype.
+
+    The .descr attribute of a dtype object cannot be round-tripped through
+    the dtype() constructor. Simple types, like dtype('float32'), have
+    a descr which looks like a record array with one field with '' as
+    a name. The dtype() constructor interprets this as a request to give
+    a default name.  Instead, we construct descriptor that can be passed to
+    dtype().
+
+    Parameters
+    ----------
+    dtype : dtype
+        The dtype of the array that will be written to disk.
+
+    Returns
+    -------
+    descr : object
+        An object that can be passed to `numpy.dtype()` in order to
+        replicate the input dtype.
+
+    """
+    # NOTE: that drop_metadata may not return the right dtype e.g. for user
+    #       dtypes.  In that case our code below would fail the same, though.
+    new_dtype = drop_metadata(dtype)
+    if new_dtype is not dtype:
+        warnings.warn("metadata on a dtype is not saved to an npy/npz. "
+                      "Use another format (such as pickle) to store it.",
+                      UserWarning, stacklevel=2)
+    if dtype.names is not None:
+        # This is a record array. The .descr is fine.  XXX: parts of the
+        # record array with an empty name, like padding bytes, still get
+        # fiddled with. This needs to be fixed in the C implementation of
+        # dtype().
+        return dtype.descr
+    else:
+        return dtype.str
+
+def descr_to_dtype(descr):
+    """
+    Returns a dtype based off the given description.
+
+    This is essentially the reverse of `dtype_to_descr()`. It will remove
+    the valueless padding fields created by, i.e. simple fields like
+    dtype('float32'), and then convert the description to its corresponding
+    dtype.
+
+    Parameters
+    ----------
+    descr : object
+        The object retrieved by dtype.descr. Can be passed to
+        `numpy.dtype()` in order to replicate the input dtype.
+
+    Returns
+    -------
+    dtype : dtype
+        The dtype constructed by the description.
+
+    """
+    if isinstance(descr, str):
+        # No padding removal needed
+        return numpy.dtype(descr)
+    elif isinstance(descr, tuple):
+        # subtype, will always have a shape descr[1]
+        dt = descr_to_dtype(descr[0])
+        return numpy.dtype((dt, descr[1]))
+
+    titles = []
+    names = []
+    formats = []
+    offsets = []
+    offset = 0
+    for field in descr:
+        if len(field) == 2:
+            name, descr_str = field
+            dt = descr_to_dtype(descr_str)
+        else:
+            name, descr_str, shape = field
+            dt = numpy.dtype((descr_to_dtype(descr_str), shape))
+
+        # Ignore padding bytes, which will be void bytes with '' as name
+        # Once support for blank names is removed, only "if name == ''" needed)
+        is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
+        if not is_pad:
+            title, name = name if isinstance(name, tuple) else (None, name)
+            titles.append(title)
+            names.append(name)
+            formats.append(dt)
+            offsets.append(offset)
+        offset += dt.itemsize
+
+    return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
+                        'offsets': offsets, 'itemsize': offset})
+
+def header_data_from_array_1_0(array):
+    """ Get the dictionary of header metadata from a numpy.ndarray.
+
+    Parameters
+    ----------
+    array : numpy.ndarray
+
+    Returns
+    -------
+    d : dict
+        This has the appropriate entries for writing its string representation
+        to the header of the file.
+    """
+    d = {'shape': array.shape}
+    if array.flags.c_contiguous:
+        d['fortran_order'] = False
+    elif array.flags.f_contiguous:
+        d['fortran_order'] = True
+    else:
+        # Totally non-contiguous data. We will have to make it C-contiguous
+        # before writing. Note that we need to test for C_CONTIGUOUS first
+        # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
+        d['fortran_order'] = False
+
+    d['descr'] = dtype_to_descr(array.dtype)
+    return d
+
+
+def _wrap_header(header, version):
+    """
+    Takes a stringified header, and attaches the prefix and padding to it
+    """
+    import struct
+    assert version is not None
+    fmt, encoding = _header_size_info[version]
+    header = header.encode(encoding)
+    hlen = len(header) + 1
+    padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
+    try:
+        header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
+    except struct.error:
+        msg = "Header length {} too big for version={}".format(hlen, version)
+        raise ValueError(msg) from None
+
+    # Pad the header with spaces and a final newline such that the magic
+    # string, the header-length short and the header are aligned on a
+    # ARRAY_ALIGN byte boundary.  This supports memory mapping of dtypes
+    # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
+    # offset must be page-aligned (i.e. the beginning of the file).
+    return header_prefix + header + b' '*padlen + b'\n'
+
+
+def _wrap_header_guess_version(header):
+    """
+    Like `_wrap_header`, but chooses an appropriate version given the contents
+    """
+    try:
+        return _wrap_header(header, (1, 0))
+    except ValueError:
+        pass
+
+    try:
+        ret = _wrap_header(header, (2, 0))
+    except UnicodeEncodeError:
+        pass
+    else:
+        warnings.warn("Stored array in format 2.0. It can only be"
+                      "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+        return ret
+
+    header = _wrap_header(header, (3, 0))
+    warnings.warn("Stored array in format 3.0. It can only be "
+                  "read by NumPy >= 1.17", UserWarning, stacklevel=2)
+    return header
+
+
+def _write_array_header(fp, d, version=None):
+    """ Write the header for an array and returns the version used
+
+    Parameters
+    ----------
+    fp : filelike object
+    d : dict
+        This has the appropriate entries for writing its string representation
+        to the header of the file.
+    version : tuple or None
+        None means use oldest that works. Providing an explicit version will
+        raise a ValueError if the format does not allow saving this data.
+        Default: None
+    """
+    header = ["{"]
+    for key, value in sorted(d.items()):
+        # Need to use repr here, since we eval these when reading
+        header.append("'%s': %s, " % (key, repr(value)))
+    header.append("}")
+    header = "".join(header)
+
+    # Add some spare space so that the array header can be modified in-place
+    # when changing the array size, e.g. when growing it by appending data at
+    # the end.
+    shape = d['shape']
+    header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
+        shape[-1 if d['fortran_order'] else 0]
+    ))) if len(shape) > 0 else 0)
+
+    if version is None:
+        header = _wrap_header_guess_version(header)
+    else:
+        header = _wrap_header(header, version)
+    fp.write(header)
+
+def write_array_header_1_0(fp, d):
+    """ Write the header for an array using the 1.0 format.
+
+    Parameters
+    ----------
+    fp : filelike object
+    d : dict
+        This has the appropriate entries for writing its string
+        representation to the header of the file.
+    """
+    _write_array_header(fp, d, (1, 0))
+
+
+def write_array_header_2_0(fp, d):
+    """ Write the header for an array using the 2.0 format.
+        The 2.0 format allows storing very large structured arrays.
+
+    .. versionadded:: 1.9.0
+
+    Parameters
+    ----------
+    fp : filelike object
+    d : dict
+        This has the appropriate entries for writing its string
+        representation to the header of the file.
+    """
+    _write_array_header(fp, d, (2, 0))
+
+def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
+    """
+    Read an array header from a filelike object using the 1.0 file format
+    version.
+
+    This will leave the file object located just after the header.
+
+    Parameters
+    ----------
+    fp : filelike object
+        A file object or something with a `.read()` method like a file.
+
+    Returns
+    -------
+    shape : tuple of int
+        The shape of the array.
+    fortran_order : bool
+        The array data will be written out directly if it is either
+        C-contiguous or Fortran-contiguous. Otherwise, it will be made
+        contiguous before writing it out.
+    dtype : dtype
+        The dtype of the file's data.
+    max_header_size : int, optional
+        Maximum allowed size of the header.  Large headers may not be safe
+        to load securely and thus require explicitly passing a larger value.
+        See :py:func:`ast.literal_eval()` for details.
+
+    Raises
+    ------
+    ValueError
+        If the data is invalid.
+
+    """
+    return _read_array_header(
+            fp, version=(1, 0), max_header_size=max_header_size)
+
+def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
+    """
+    Read an array header from a filelike object using the 2.0 file format
+    version.
+
+    This will leave the file object located just after the header.
+
+    .. versionadded:: 1.9.0
+
+    Parameters
+    ----------
+    fp : filelike object
+        A file object or something with a `.read()` method like a file.
+    max_header_size : int, optional
+        Maximum allowed size of the header.  Large headers may not be safe
+        to load securely and thus require explicitly passing a larger value.
+        See :py:func:`ast.literal_eval()` for details.
+
+    Returns
+    -------
+    shape : tuple of int
+        The shape of the array.
+    fortran_order : bool
+        The array data will be written out directly if it is either
+        C-contiguous or Fortran-contiguous. Otherwise, it will be made
+        contiguous before writing it out.
+    dtype : dtype
+        The dtype of the file's data.
+
+    Raises
+    ------
+    ValueError
+        If the data is invalid.
+
+    """
+    return _read_array_header(
+            fp, version=(2, 0), max_header_size=max_header_size)
+
+
+def _filter_header(s):
+    """Clean up 'L' in npz header ints.
+
+    Cleans up the 'L' in strings representing integers. Needed to allow npz
+    headers produced in Python2 to be read in Python3.
+
+    Parameters
+    ----------
+    s : string
+        Npy file header.
+
+    Returns
+    -------
+    header : str
+        Cleaned up header.
+
+    """
+    import tokenize
+    from io import StringIO
+
+    tokens = []
+    last_token_was_number = False
+    for token in tokenize.generate_tokens(StringIO(s).readline):
+        token_type = token[0]
+        token_string = token[1]
+        if (last_token_was_number and
+                token_type == tokenize.NAME and
+                token_string == "L"):
+            continue
+        else:
+            tokens.append(token)
+        last_token_was_number = (token_type == tokenize.NUMBER)
+    return tokenize.untokenize(tokens)
+
+
+def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
+    """
+    see read_array_header_1_0
+    """
+    # Read an unsigned, little-endian short int which has the length of the
+    # header.
+    import struct
+    hinfo = _header_size_info.get(version)
+    if hinfo is None:
+        raise ValueError("Invalid version {!r}".format(version))
+    hlength_type, encoding = hinfo
+
+    hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
+    header_length = struct.unpack(hlength_type, hlength_str)[0]
+    header = _read_bytes(fp, header_length, "array header")
+    header = header.decode(encoding)
+    if len(header) > max_header_size:
+        raise ValueError(
+            f"Header info length ({len(header)}) is large and may not be safe "
+            "to load securely.\n"
+            "To allow loading, adjust `max_header_size` or fully trust "
+            "the `.npy` file using `allow_pickle=True`.\n"
+            "For safety against large resource use or crashes, sandboxing "
+            "may be necessary.")
+
+    # The header is a pretty-printed string representation of a literal
+    # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
+    # boundary. The keys are strings.
+    #   "shape" : tuple of int
+    #   "fortran_order" : bool
+    #   "descr" : dtype.descr
+    # Versions (2, 0) and (1, 0) could have been created by a Python 2
+    # implementation before header filtering was implemented.
+    #
+    # For performance reasons, we try without _filter_header first though
+    try:
+        d = safe_eval(header)
+    except SyntaxError as e:
+        if version <= (2, 0):
+            header = _filter_header(header)
+            try:
+                d = safe_eval(header)
+            except SyntaxError as e2:
+                msg = "Cannot parse header: {!r}"
+                raise ValueError(msg.format(header)) from e2
+            else:
+                warnings.warn(
+                    "Reading `.npy` or `.npz` file required additional "
+                    "header parsing as it was created on Python 2. Save the "
+                    "file again to speed up loading and avoid this warning.",
+                    UserWarning, stacklevel=4)
+        else:
+            msg = "Cannot parse header: {!r}"
+            raise ValueError(msg.format(header)) from e
+    if not isinstance(d, dict):
+        msg = "Header is not a dictionary: {!r}"
+        raise ValueError(msg.format(d))
+
+    if EXPECTED_KEYS != d.keys():
+        keys = sorted(d.keys())
+        msg = "Header does not contain the correct keys: {!r}"
+        raise ValueError(msg.format(keys))
+
+    # Sanity-check the values.
+    if (not isinstance(d['shape'], tuple) or
+            not all(isinstance(x, int) for x in d['shape'])):
+        msg = "shape is not valid: {!r}"
+        raise ValueError(msg.format(d['shape']))
+    if not isinstance(d['fortran_order'], bool):
+        msg = "fortran_order is not a valid bool: {!r}"
+        raise ValueError(msg.format(d['fortran_order']))
+    try:
+        dtype = descr_to_dtype(d['descr'])
+    except TypeError as e:
+        msg = "descr is not a valid dtype descriptor: {!r}"
+        raise ValueError(msg.format(d['descr'])) from e
+
+    return d['shape'], d['fortran_order'], dtype
+
+def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
+    """
+    Write an array to an NPY file, including a header.
+
+    If the array is neither C-contiguous nor Fortran-contiguous AND the
+    file_like object is not a real file object, this function will have to
+    copy data in memory.
+
+    Parameters
+    ----------
+    fp : file_like object
+        An open, writable file object, or similar object with a
+        ``.write()`` method.
+    array : ndarray
+        The array to write to disk.
+    version : (int, int) or None, optional
+        The version number of the format. None means use the oldest
+        supported version that is able to store the data.  Default: None
+    allow_pickle : bool, optional
+        Whether to allow writing pickled data. Default: True
+    pickle_kwargs : dict, optional
+        Additional keyword arguments to pass to pickle.dump, excluding
+        'protocol'. These are only useful when pickling objects in object
+        arrays on Python 3 to Python 2 compatible format.
+
+    Raises
+    ------
+    ValueError
+        If the array cannot be persisted. This includes the case of
+        allow_pickle=False and array being an object array.
+    Various other errors
+        If the array contains Python objects as part of its dtype, the
+        process of pickling them may raise various errors if the objects
+        are not picklable.
+
+    """
+    _check_version(version)
+    _write_array_header(fp, header_data_from_array_1_0(array), version)
+
+    if array.itemsize == 0:
+        buffersize = 0
+    else:
+        # Set buffer size to 16 MiB to hide the Python loop overhead.
+        buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
+
+    if array.dtype.hasobject:
+        # We contain Python objects so we cannot write out the data
+        # directly.  Instead, we will pickle it out
+        if not allow_pickle:
+            raise ValueError("Object arrays cannot be saved when "
+                             "allow_pickle=False")
+        if pickle_kwargs is None:
+            pickle_kwargs = {}
+        pickle.dump(array, fp, protocol=3, **pickle_kwargs)
+    elif array.flags.f_contiguous and not array.flags.c_contiguous:
+        if isfileobj(fp):
+            array.T.tofile(fp)
+        else:
+            for chunk in numpy.nditer(
+                    array, flags=['external_loop', 'buffered', 'zerosize_ok'],
+                    buffersize=buffersize, order='F'):
+                fp.write(chunk.tobytes('C'))
+    else:
+        if isfileobj(fp):
+            array.tofile(fp)
+        else:
+            for chunk in numpy.nditer(
+                    array, flags=['external_loop', 'buffered', 'zerosize_ok'],
+                    buffersize=buffersize, order='C'):
+                fp.write(chunk.tobytes('C'))
+
+
+def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
+               max_header_size=_MAX_HEADER_SIZE):
+    """
+    Read an array from an NPY file.
+
+    Parameters
+    ----------
+    fp : file_like object
+        If this is not a real file object, then this may take extra memory
+        and time.
+    allow_pickle : bool, optional
+        Whether to allow writing pickled data. Default: False
+
+        .. versionchanged:: 1.16.3
+            Made default False in response to CVE-2019-6446.
+
+    pickle_kwargs : dict
+        Additional keyword arguments to pass to pickle.load. These are only
+        useful when loading object arrays saved on Python 2 when using
+        Python 3.
+    max_header_size : int, optional
+        Maximum allowed size of the header.  Large headers may not be safe
+        to load securely and thus require explicitly passing a larger value.
+        See :py:func:`ast.literal_eval()` for details.
+        This option is ignored when `allow_pickle` is passed.  In that case
+        the file is by definition trusted and the limit is unnecessary.
+
+    Returns
+    -------
+    array : ndarray
+        The array from the data on disk.
+
+    Raises
+    ------
+    ValueError
+        If the data is invalid, or allow_pickle=False and the file contains
+        an object array.
+
+    """
+    if allow_pickle:
+        # Effectively ignore max_header_size, since `allow_pickle` indicates
+        # that the input is fully trusted.
+        max_header_size = 2**64
+
+    version = read_magic(fp)
+    _check_version(version)
+    shape, fortran_order, dtype = _read_array_header(
+            fp, version, max_header_size=max_header_size)
+    if len(shape) == 0:
+        count = 1
+    else:
+        count = numpy.multiply.reduce(shape, dtype=numpy.int64)
+
+    # Now read the actual data.
+    if dtype.hasobject:
+        # The array contained Python objects. We need to unpickle the data.
+        if not allow_pickle:
+            raise ValueError("Object arrays cannot be loaded when "
+                             "allow_pickle=False")
+        if pickle_kwargs is None:
+            pickle_kwargs = {}
+        try:
+            array = pickle.load(fp, **pickle_kwargs)
+        except UnicodeError as err:
+            # Friendlier error message
+            raise UnicodeError("Unpickling a python object failed: %r\n"
+                               "You may need to pass the encoding= option "
+                               "to numpy.load" % (err,)) from err
+    else:
+        if isfileobj(fp):
+            # We can use the fast fromfile() function.
+            array = numpy.fromfile(fp, dtype=dtype, count=count)
+        else:
+            # This is not a real file. We have to read it the
+            # memory-intensive way.
+            # crc32 module fails on reads greater than 2 ** 32 bytes,
+            # breaking large reads from gzip streams. Chunk reads to
+            # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
+            # of the read. In non-chunked case count < max_read_count, so
+            # only one read is performed.
+
+            # Use np.ndarray instead of np.empty since the latter does
+            # not correctly instantiate zero-width string dtypes; see
+            # https://github.com/numpy/numpy/pull/6430
+            array = numpy.ndarray(count, dtype=dtype)
+
+            if dtype.itemsize > 0:
+                # If dtype.itemsize == 0 then there's nothing more to read
+                max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
+
+                for i in range(0, count, max_read_count):
+                    read_count = min(max_read_count, count - i)
+                    read_size = int(read_count * dtype.itemsize)
+                    data = _read_bytes(fp, read_size, "array data")
+                    array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
+                                                             count=read_count)
+
+        if fortran_order:
+            array.shape = shape[::-1]
+            array = array.transpose()
+        else:
+            array.shape = shape
+
+    return array
+
+
+def open_memmap(filename, mode='r+', dtype=None, shape=None,
+                fortran_order=False, version=None, *,
+                max_header_size=_MAX_HEADER_SIZE):
+    """
+    Open a .npy file as a memory-mapped array.
+
+    This may be used to read an existing file or create a new one.
+
+    Parameters
+    ----------
+    filename : str or path-like
+        The name of the file on disk.  This may *not* be a file-like
+        object.
+    mode : str, optional
+        The mode in which to open the file; the default is 'r+'.  In
+        addition to the standard file modes, 'c' is also accepted to mean
+        "copy on write."  See `memmap` for the available mode strings.
+    dtype : data-type, optional
+        The data type of the array if we are creating a new file in "write"
+        mode, if not, `dtype` is ignored.  The default value is None, which
+        results in a data-type of `float64`.
+    shape : tuple of int
+        The shape of the array if we are creating a new file in "write"
+        mode, in which case this parameter is required.  Otherwise, this
+        parameter is ignored and is thus optional.
+    fortran_order : bool, optional
+        Whether the array should be Fortran-contiguous (True) or
+        C-contiguous (False, the default) if we are creating a new file in
+        "write" mode.
+    version : tuple of int (major, minor) or None
+        If the mode is a "write" mode, then this is the version of the file
+        format used to create the file.  None means use the oldest
+        supported version that is able to store the data.  Default: None
+    max_header_size : int, optional
+        Maximum allowed size of the header.  Large headers may not be safe
+        to load securely and thus require explicitly passing a larger value.
+        See :py:func:`ast.literal_eval()` for details.
+
+    Returns
+    -------
+    marray : memmap
+        The memory-mapped array.
+
+    Raises
+    ------
+    ValueError
+        If the data or the mode is invalid.
+    OSError
+        If the file is not found or cannot be opened correctly.
+
+    See Also
+    --------
+    numpy.memmap
+
+    """
+    if isfileobj(filename):
+        raise ValueError("Filename must be a string or a path-like object."
+                         "  Memmap cannot use existing file handles.")
+
+    if 'w' in mode:
+        # We are creating the file, not reading it.
+        # Check if we ought to create the file.
+        _check_version(version)
+        # Ensure that the given dtype is an authentic dtype object rather
+        # than just something that can be interpreted as a dtype object.
+        dtype = numpy.dtype(dtype)
+        if dtype.hasobject:
+            msg = "Array can't be memory-mapped: Python objects in dtype."
+            raise ValueError(msg)
+        d = dict(
+            descr=dtype_to_descr(dtype),
+            fortran_order=fortran_order,
+            shape=shape,
+        )
+        # If we got here, then it should be safe to create the file.
+        with open(os_fspath(filename), mode+'b') as fp:
+            _write_array_header(fp, d, version)
+            offset = fp.tell()
+    else:
+        # Read the header of the file first.
+        with open(os_fspath(filename), 'rb') as fp:
+            version = read_magic(fp)
+            _check_version(version)
+
+            shape, fortran_order, dtype = _read_array_header(
+                    fp, version, max_header_size=max_header_size)
+            if dtype.hasobject:
+                msg = "Array can't be memory-mapped: Python objects in dtype."
+                raise ValueError(msg)
+            offset = fp.tell()
+
+    if fortran_order:
+        order = 'F'
+    else:
+        order = 'C'
+
+    # We need to change a write-only mode to a read-write mode since we've
+    # already written data to the file.
+    if mode == 'w+':
+        mode = 'r+'
+
+    marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
+        mode=mode, offset=offset)
+
+    return marray
+
+
+def _read_bytes(fp, size, error_template="ran out of data"):
+    """
+    Read from file-like object until size bytes are read.
+    Raises ValueError if not EOF is encountered before size bytes are read.
+    Non-blocking objects only supported if they derive from io objects.
+
+    Required as e.g. ZipExtFile in python 2.6 can return less data than
+    requested.
+    """
+    data = bytes()
+    while True:
+        # io files (default in python3) return None or raise on
+        # would-block, python2 file will truncate, probably nothing can be
+        # done about that.  note that regular files can't be non-blocking
+        try:
+            r = fp.read(size - len(data))
+            data += r
+            if len(r) == 0 or len(data) == size:
+                break
+        except BlockingIOError:
+            pass
+    if len(data) != size:
+        msg = "EOF: reading %s, expected %d bytes got %d"
+        raise ValueError(msg % (error_template, size, len(data)))
+    else:
+        return data
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/format.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/format.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..a4468f52f4646b8b9413f279b09f85cd201aaf51
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/format.pyi
@@ -0,0 +1,22 @@
+from typing import Any, Literal, Final
+
+__all__: list[str]
+
+EXPECTED_KEYS: Final[set[str]]
+MAGIC_PREFIX: Final[bytes]
+MAGIC_LEN: Literal[8]
+ARRAY_ALIGN: Literal[64]
+BUFFER_SIZE: Literal[262144]  # 2**18
+
+def magic(major, minor): ...
+def read_magic(fp): ...
+def dtype_to_descr(dtype): ...
+def descr_to_dtype(descr): ...
+def header_data_from_array_1_0(array): ...
+def write_array_header_1_0(fp, d): ...
+def write_array_header_2_0(fp, d): ...
+def read_array_header_1_0(fp): ...
+def read_array_header_2_0(fp): ...
+def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
+def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
+def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/function_base.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/function_base.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..687e4ab1708bf2667f1ff4fc8344bab9786cefc9
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/function_base.pyi
@@ -0,0 +1,697 @@
+import sys
+from collections.abc import Sequence, Iterator, Callable, Iterable
+from typing import (
+    Literal as L,
+    Any,
+    TypeVar,
+    overload,
+    Protocol,
+    SupportsIndex,
+    SupportsInt,
+)
+
+if sys.version_info >= (3, 10):
+    from typing import TypeGuard
+else:
+    from typing_extensions import TypeGuard
+
+from numpy import (
+    vectorize as vectorize,
+    ufunc,
+    generic,
+    floating,
+    complexfloating,
+    intp,
+    float64,
+    complex128,
+    timedelta64,
+    datetime64,
+    object_,
+    _OrderKACF,
+)
+
+from numpy._typing import (
+    NDArray,
+    ArrayLike,
+    DTypeLike,
+    _ShapeLike,
+    _ScalarLike_co,
+    _DTypeLike,
+    _ArrayLike,
+    _ArrayLikeInt_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+    _ArrayLikeTD64_co,
+    _ArrayLikeDT64_co,
+    _ArrayLikeObject_co,
+    _FloatLike_co,
+    _ComplexLike_co,
+)
+
+from numpy.core.function_base import (
+    add_newdoc as add_newdoc,
+)
+
+from numpy.core.multiarray import (
+    add_docstring as add_docstring,
+    bincount as bincount,
+)
+
+from numpy.core.umath import _add_newdoc_ufunc
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
+
+_2Tuple = tuple[_T, _T]
+
+class _TrimZerosSequence(Protocol[_T_co]):
+    def __len__(self) -> int: ...
+    def __getitem__(self, key: slice, /) -> _T_co: ...
+    def __iter__(self) -> Iterator[Any]: ...
+
+class _SupportsWriteFlush(Protocol):
+    def write(self, s: str, /) -> object: ...
+    def flush(self) -> object: ...
+
+__all__: list[str]
+
+# NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc`
+def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ...
+
+@overload
+def rot90(
+    m: _ArrayLike[_SCT],
+    k: int = ...,
+    axes: tuple[int, int] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def rot90(
+    m: ArrayLike,
+    k: int = ...,
+    axes: tuple[int, int] = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def flip(m: _SCT, axis: None = ...) -> _SCT: ...
+@overload
+def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
+@overload
+def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
+@overload
+def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
+
+def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
+
+@overload
+def average(
+    a: _ArrayLikeFloat_co,
+    axis: None = ...,
+    weights: None | _ArrayLikeFloat_co= ...,
+    returned: L[False] = ...,
+    keepdims: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def average(
+    a: _ArrayLikeComplex_co,
+    axis: None = ...,
+    weights: None | _ArrayLikeComplex_co = ...,
+    returned: L[False] = ...,
+    keepdims: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def average(
+    a: _ArrayLikeObject_co,
+    axis: None = ...,
+    weights: None | Any = ...,
+    returned: L[False] = ...,
+    keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def average(
+    a: _ArrayLikeFloat_co,
+    axis: None = ...,
+    weights: None | _ArrayLikeFloat_co= ...,
+    returned: L[True] = ...,
+    keepdims: L[False] = ...,
+) -> _2Tuple[floating[Any]]: ...
+@overload
+def average(
+    a: _ArrayLikeComplex_co,
+    axis: None = ...,
+    weights: None | _ArrayLikeComplex_co = ...,
+    returned: L[True] = ...,
+    keepdims: L[False] = ...,
+) -> _2Tuple[complexfloating[Any, Any]]: ...
+@overload
+def average(
+    a: _ArrayLikeObject_co,
+    axis: None = ...,
+    weights: None | Any = ...,
+    returned: L[True] = ...,
+    keepdims: L[False] = ...,
+) -> _2Tuple[Any]: ...
+@overload
+def average(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    weights: None | Any = ...,
+    returned: L[False] = ...,
+    keepdims: bool = ...,
+) -> Any: ...
+@overload
+def average(
+    a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    weights: None | Any = ...,
+    returned: L[True] = ...,
+    keepdims: bool = ...,
+) -> _2Tuple[Any]: ...
+
+@overload
+def asarray_chkfinite(
+    a: _ArrayLike[_SCT],
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray_chkfinite(
+    a: object,
+    dtype: None = ...,
+    order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+@overload
+def asarray_chkfinite(
+    a: Any,
+    dtype: _DTypeLike[_SCT],
+    order: _OrderKACF = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def asarray_chkfinite(
+    a: Any,
+    dtype: DTypeLike,
+    order: _OrderKACF = ...,
+) -> NDArray[Any]: ...
+
+# TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
+# xref python/mypy#8645
+@overload
+def piecewise(
+    x: _ArrayLike[_SCT],
+    condlist: ArrayLike,
+    funclist: Sequence[Any | Callable[..., Any]],
+    *args: Any,
+    **kw: Any,
+) -> NDArray[_SCT]: ...
+@overload
+def piecewise(
+    x: ArrayLike,
+    condlist: ArrayLike,
+    funclist: Sequence[Any | Callable[..., Any]],
+    *args: Any,
+    **kw: Any,
+) -> NDArray[Any]: ...
+
+def select(
+    condlist: Sequence[ArrayLike],
+    choicelist: Sequence[ArrayLike],
+    default: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def copy(
+    a: _ArrayType,
+    order: _OrderKACF,
+    subok: L[True],
+) -> _ArrayType: ...
+@overload
+def copy(
+    a: _ArrayType,
+    order: _OrderKACF = ...,
+    *,
+    subok: L[True],
+) -> _ArrayType: ...
+@overload
+def copy(
+    a: _ArrayLike[_SCT],
+    order: _OrderKACF = ...,
+    subok: L[False] = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def copy(
+    a: ArrayLike,
+    order: _OrderKACF = ...,
+    subok: L[False] = ...,
+) -> NDArray[Any]: ...
+
+def gradient(
+    f: ArrayLike,
+    *varargs: ArrayLike,
+    axis: None | _ShapeLike = ...,
+    edge_order: L[1, 2] = ...,
+) -> Any: ...
+
+@overload
+def diff(
+    a: _T,
+    n: L[0],
+    axis: SupportsIndex = ...,
+    prepend: ArrayLike = ...,
+    append: ArrayLike = ...,
+) -> _T: ...
+@overload
+def diff(
+    a: ArrayLike,
+    n: int = ...,
+    axis: SupportsIndex = ...,
+    prepend: ArrayLike = ...,
+    append: ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def interp(
+    x: _ArrayLikeFloat_co,
+    xp: _ArrayLikeFloat_co,
+    fp: _ArrayLikeFloat_co,
+    left: None | _FloatLike_co = ...,
+    right: None | _FloatLike_co = ...,
+    period: None | _FloatLike_co = ...,
+) -> NDArray[float64]: ...
+@overload
+def interp(
+    x: _ArrayLikeFloat_co,
+    xp: _ArrayLikeFloat_co,
+    fp: _ArrayLikeComplex_co,
+    left: None | _ComplexLike_co = ...,
+    right: None | _ComplexLike_co = ...,
+    period: None | _FloatLike_co = ...,
+) -> NDArray[complex128]: ...
+
+@overload
+def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ...
+@overload
+def angle(z: object_, deg: bool = ...) -> Any: ...
+@overload
+def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ...
+@overload
+def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
+
+@overload
+def unwrap(
+    p: _ArrayLikeFloat_co,
+    discont: None | float = ...,
+    axis: int = ...,
+    *,
+    period: float = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def unwrap(
+    p: _ArrayLikeObject_co,
+    discont: None | float = ...,
+    axis: int = ...,
+    *,
+    period: float = ...,
+) -> NDArray[object_]: ...
+
+def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
+
+def trim_zeros(
+    filt: _TrimZerosSequence[_T],
+    trim: L["f", "b", "fb", "bf"] = ...,
+) -> _T: ...
+
+@overload
+def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
+
+def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
+
+def disp(
+    mesg: object,
+    device: None | _SupportsWriteFlush = ...,
+    linefeed: bool = ...,
+) -> None: ...
+
+@overload
+def cov(
+    m: _ArrayLikeFloat_co,
+    y: None | _ArrayLikeFloat_co = ...,
+    rowvar: bool = ...,
+    bias: bool = ...,
+    ddof: None | SupportsIndex | SupportsInt = ...,
+    fweights: None | ArrayLike = ...,
+    aweights: None | ArrayLike = ...,
+    *,
+    dtype: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def cov(
+    m: _ArrayLikeComplex_co,
+    y: None | _ArrayLikeComplex_co = ...,
+    rowvar: bool = ...,
+    bias: bool = ...,
+    ddof: None | SupportsIndex | SupportsInt = ...,
+    fweights: None | ArrayLike = ...,
+    aweights: None | ArrayLike = ...,
+    *,
+    dtype: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def cov(
+    m: _ArrayLikeComplex_co,
+    y: None | _ArrayLikeComplex_co = ...,
+    rowvar: bool = ...,
+    bias: bool = ...,
+    ddof: None | SupportsIndex | SupportsInt = ...,
+    fweights: None | ArrayLike = ...,
+    aweights: None | ArrayLike = ...,
+    *,
+    dtype: _DTypeLike[_SCT],
+) -> NDArray[_SCT]: ...
+@overload
+def cov(
+    m: _ArrayLikeComplex_co,
+    y: None | _ArrayLikeComplex_co = ...,
+    rowvar: bool = ...,
+    bias: bool = ...,
+    ddof: None | SupportsIndex | SupportsInt = ...,
+    fweights: None | ArrayLike = ...,
+    aweights: None | ArrayLike = ...,
+    *,
+    dtype: DTypeLike,
+) -> NDArray[Any]: ...
+
+# NOTE `bias` and `ddof` have been deprecated
+@overload
+def corrcoef(
+    m: _ArrayLikeFloat_co,
+    y: None | _ArrayLikeFloat_co = ...,
+    rowvar: bool = ...,
+    *,
+    dtype: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def corrcoef(
+    m: _ArrayLikeComplex_co,
+    y: None | _ArrayLikeComplex_co = ...,
+    rowvar: bool = ...,
+    *,
+    dtype: None = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def corrcoef(
+    m: _ArrayLikeComplex_co,
+    y: None | _ArrayLikeComplex_co = ...,
+    rowvar: bool = ...,
+    *,
+    dtype: _DTypeLike[_SCT],
+) -> NDArray[_SCT]: ...
+@overload
+def corrcoef(
+    m: _ArrayLikeComplex_co,
+    y: None | _ArrayLikeComplex_co = ...,
+    rowvar: bool = ...,
+    *,
+    dtype: DTypeLike,
+) -> NDArray[Any]: ...
+
+def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
+
+def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
+
+def kaiser(
+    M: _FloatLike_co,
+    beta: _FloatLike_co,
+) -> NDArray[floating[Any]]: ...
+
+@overload
+def sinc(x: _FloatLike_co) -> floating[Any]: ...
+@overload
+def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
+@overload
+def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
+@overload
+def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+
+# NOTE: Deprecated
+# def msort(a: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def median(
+    a: _ArrayLikeFloat_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    keepdims: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def median(
+    a: _ArrayLikeComplex_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    keepdims: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def median(
+    a: _ArrayLikeTD64_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    keepdims: L[False] = ...,
+) -> timedelta64: ...
+@overload
+def median(
+    a: _ArrayLikeObject_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def median(
+    a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    keepdims: bool = ...,
+) -> Any: ...
+@overload
+def median(
+    a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+    axis: None | _ShapeLike = ...,
+    out: _ArrayType = ...,
+    overwrite_input: bool = ...,
+    keepdims: bool = ...,
+) -> _ArrayType: ...
+
+_MethodKind = L[
+    "inverted_cdf",
+    "averaged_inverted_cdf",
+    "closest_observation",
+    "interpolated_inverted_cdf",
+    "hazen",
+    "weibull",
+    "linear",
+    "median_unbiased",
+    "normal_unbiased",
+    "lower",
+    "higher",
+    "midpoint",
+    "nearest",
+]
+
+@overload
+def percentile(
+    a: _ArrayLikeFloat_co,
+    q: _FloatLike_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> floating[Any]: ...
+@overload
+def percentile(
+    a: _ArrayLikeComplex_co,
+    q: _FloatLike_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> complexfloating[Any, Any]: ...
+@overload
+def percentile(
+    a: _ArrayLikeTD64_co,
+    q: _FloatLike_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> timedelta64: ...
+@overload
+def percentile(
+    a: _ArrayLikeDT64_co,
+    q: _FloatLike_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> datetime64: ...
+@overload
+def percentile(
+    a: _ArrayLikeObject_co,
+    q: _FloatLike_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> Any: ...
+@overload
+def percentile(
+    a: _ArrayLikeFloat_co,
+    q: _ArrayLikeFloat_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def percentile(
+    a: _ArrayLikeComplex_co,
+    q: _ArrayLikeFloat_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def percentile(
+    a: _ArrayLikeTD64_co,
+    q: _ArrayLikeFloat_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> NDArray[timedelta64]: ...
+@overload
+def percentile(
+    a: _ArrayLikeDT64_co,
+    q: _ArrayLikeFloat_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> NDArray[datetime64]: ...
+@overload
+def percentile(
+    a: _ArrayLikeObject_co,
+    q: _ArrayLikeFloat_co,
+    axis: None = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: L[False] = ...,
+) -> NDArray[object_]: ...
+@overload
+def percentile(
+    a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+    q: _ArrayLikeFloat_co,
+    axis: None | _ShapeLike = ...,
+    out: None = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: bool = ...,
+) -> Any: ...
+@overload
+def percentile(
+    a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+    q: _ArrayLikeFloat_co,
+    axis: None | _ShapeLike = ...,
+    out: _ArrayType = ...,
+    overwrite_input: bool = ...,
+    method: _MethodKind = ...,
+    keepdims: bool = ...,
+) -> _ArrayType: ...
+
+# NOTE: Not an alias, but they do have identical signatures
+# (that we can reuse)
+quantile = percentile
+
+# TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise
+def trapz(
+    y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
+    x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ...,
+    dx: float = ...,
+    axis: SupportsIndex = ...,
+) -> Any: ...
+
+def meshgrid(
+    *xi: ArrayLike,
+    copy: bool = ...,
+    sparse: bool = ...,
+    indexing: L["xy", "ij"] = ...,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def delete(
+    arr: _ArrayLike[_SCT],
+    obj: slice | _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def delete(
+    arr: ArrayLike,
+    obj: slice | _ArrayLikeInt_co,
+    axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def insert(
+    arr: _ArrayLike[_SCT],
+    obj: slice | _ArrayLikeInt_co,
+    values: ArrayLike,
+    axis: None | SupportsIndex = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def insert(
+    arr: ArrayLike,
+    obj: slice | _ArrayLikeInt_co,
+    values: ArrayLike,
+    axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+def append(
+    arr: ArrayLike,
+    values: ArrayLike,
+    axis: None | SupportsIndex = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def digitize(
+    x: _FloatLike_co,
+    bins: _ArrayLikeFloat_co,
+    right: bool = ...,
+) -> intp: ...
+@overload
+def digitize(
+    x: _ArrayLikeFloat_co,
+    bins: _ArrayLikeFloat_co,
+    right: bool = ...,
+) -> NDArray[intp]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/histograms.py b/.venv/lib/python3.11/site-packages/numpy/lib/histograms.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ac65b726928bb21432a7a6edcbf73fbeaedb137
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/histograms.py
@@ -0,0 +1,1072 @@
+"""
+Histogram-related functions
+"""
+import contextlib
+import functools
+import operator
+import warnings
+
+import numpy as np
+from numpy.core import overrides
+
+__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+# range is a keyword argument to many functions, so save the builtin so they can
+# use it.
+_range = range
+
+
+def _ptp(x):
+    """Peak-to-peak value of x.
+
+    This implementation avoids the problem of signed integer arrays having a
+    peak-to-peak value that cannot be represented with the array's data type.
+    This function returns an unsigned value for signed integer arrays.
+    """
+    return _unsigned_subtract(x.max(), x.min())
+
+
+def _hist_bin_sqrt(x, range):
+    """
+    Square root histogram bin estimator.
+
+    Bin width is inversely proportional to the data size. Used by many
+    programs for its simplicity.
+
+    Parameters
+    ----------
+    x : array_like
+        Input data that is to be histogrammed, trimmed to range. May not
+        be empty.
+
+    Returns
+    -------
+    h : An estimate of the optimal bin width for the given data.
+    """
+    del range  # unused
+    return _ptp(x) / np.sqrt(x.size)
+
+
+def _hist_bin_sturges(x, range):
+    """
+    Sturges histogram bin estimator.
+
+    A very simplistic estimator based on the assumption of normality of
+    the data. This estimator has poor performance for non-normal data,
+    which becomes especially obvious for large data sets. The estimate
+    depends only on size of the data.
+
+    Parameters
+    ----------
+    x : array_like
+        Input data that is to be histogrammed, trimmed to range. May not
+        be empty.
+
+    Returns
+    -------
+    h : An estimate of the optimal bin width for the given data.
+    """
+    del range  # unused
+    return _ptp(x) / (np.log2(x.size) + 1.0)
+
+
+def _hist_bin_rice(x, range):
+    """
+    Rice histogram bin estimator.
+
+    Another simple estimator with no normality assumption. It has better
+    performance for large data than Sturges, but tends to overestimate
+    the number of bins. The number of bins is proportional to the cube
+    root of data size (asymptotically optimal). The estimate depends
+    only on size of the data.
+
+    Parameters
+    ----------
+    x : array_like
+        Input data that is to be histogrammed, trimmed to range. May not
+        be empty.
+
+    Returns
+    -------
+    h : An estimate of the optimal bin width for the given data.
+    """
+    del range  # unused
+    return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
+
+
+def _hist_bin_scott(x, range):
+    """
+    Scott histogram bin estimator.
+
+    The binwidth is proportional to the standard deviation of the data
+    and inversely proportional to the cube root of data size
+    (asymptotically optimal).
+
+    Parameters
+    ----------
+    x : array_like
+        Input data that is to be histogrammed, trimmed to range. May not
+        be empty.
+
+    Returns
+    -------
+    h : An estimate of the optimal bin width for the given data.
+    """
+    del range  # unused
+    return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
+
+
+def _hist_bin_stone(x, range):
+    """
+    Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
+
+    The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
+    The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
+    https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
+
+    This paper by Stone appears to be the origination of this rule.
+    http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
+
+    Parameters
+    ----------
+    x : array_like
+        Input data that is to be histogrammed, trimmed to range. May not
+        be empty.
+    range : (float, float)
+        The lower and upper range of the bins.
+
+    Returns
+    -------
+    h : An estimate of the optimal bin width for the given data.
+    """
+
+    n = x.size
+    ptp_x = _ptp(x)
+    if n <= 1 or ptp_x == 0:
+        return 0
+
+    def jhat(nbins):
+        hh = ptp_x / nbins
+        p_k = np.histogram(x, bins=nbins, range=range)[0] / n
+        return (2 - (n + 1) * p_k.dot(p_k)) / hh
+
+    nbins_upper_bound = max(100, int(np.sqrt(n)))
+    nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
+    if nbins == nbins_upper_bound:
+        warnings.warn("The number of bins estimated may be suboptimal.",
+                      RuntimeWarning, stacklevel=3)
+    return ptp_x / nbins
+
+
+def _hist_bin_doane(x, range):
+    """
+    Doane's histogram bin estimator.
+
+    Improved version of Sturges' formula which works better for
+    non-normal data. See
+    stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
+
+    Parameters
+    ----------
+    x : array_like
+        Input data that is to be histogrammed, trimmed to range. May not
+        be empty.
+
+    Returns
+    -------
+    h : An estimate of the optimal bin width for the given data.
+    """
+    del range  # unused
+    if x.size > 2:
+        sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
+        sigma = np.std(x)
+        if sigma > 0.0:
+            # These three operations add up to
+            # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
+            # but use only one temp array instead of three
+            temp = x - np.mean(x)
+            np.true_divide(temp, sigma, temp)
+            np.power(temp, 3, temp)
+            g1 = np.mean(temp)
+            return _ptp(x) / (1.0 + np.log2(x.size) +
+                                    np.log2(1.0 + np.absolute(g1) / sg1))
+    return 0.0
+
+
+def _hist_bin_fd(x, range):
+    """
+    The Freedman-Diaconis histogram bin estimator.
+
+    The Freedman-Diaconis rule uses interquartile range (IQR) to
+    estimate binwidth. It is considered a variation of the Scott rule
+    with more robustness as the IQR is less affected by outliers than
+    the standard deviation. However, the IQR depends on fewer points
+    than the standard deviation, so it is less accurate, especially for
+    long tailed distributions.
+
+    If the IQR is 0, this function returns 0 for the bin width.
+    Binwidth is inversely proportional to the cube root of data size
+    (asymptotically optimal).
+
+    Parameters
+    ----------
+    x : array_like
+        Input data that is to be histogrammed, trimmed to range. May not
+        be empty.
+
+    Returns
+    -------
+    h : An estimate of the optimal bin width for the given data.
+    """
+    del range  # unused
+    iqr = np.subtract(*np.percentile(x, [75, 25]))
+    return 2.0 * iqr * x.size ** (-1.0 / 3.0)
+
+
+def _hist_bin_auto(x, range):
+    """
+    Histogram bin estimator that uses the minimum width of the
+    Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
+    If the bin width from the FD estimator is 0, the Sturges estimator is used.
+
+    The FD estimator is usually the most robust method, but its width
+    estimate tends to be too large for small `x` and bad for data with limited
+    variance. The Sturges estimator is quite good for small (<1000) datasets
+    and is the default in the R language. This method gives good off-the-shelf
+    behaviour.
+
+    .. versionchanged:: 1.15.0
+    If there is limited variance the IQR can be 0, which results in the
+    FD bin width being 0 too. This is not a valid bin width, so
+    ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
+    If the IQR is 0, it's unlikely any variance-based estimators will be of
+    use, so we revert to the Sturges estimator, which only uses the size of the
+    dataset in its calculation.
+
+    Parameters
+    ----------
+    x : array_like
+        Input data that is to be histogrammed, trimmed to range. May not
+        be empty.
+
+    Returns
+    -------
+    h : An estimate of the optimal bin width for the given data.
+
+    See Also
+    --------
+    _hist_bin_fd, _hist_bin_sturges
+    """
+    fd_bw = _hist_bin_fd(x, range)
+    sturges_bw = _hist_bin_sturges(x, range)
+    del range  # unused
+    if fd_bw:
+        return min(fd_bw, sturges_bw)
+    else:
+        # limited variance, so we return a len dependent bw estimator
+        return sturges_bw
+
+# Private dict initialized at module load time
+_hist_bin_selectors = {'stone': _hist_bin_stone,
+                       'auto': _hist_bin_auto,
+                       'doane': _hist_bin_doane,
+                       'fd': _hist_bin_fd,
+                       'rice': _hist_bin_rice,
+                       'scott': _hist_bin_scott,
+                       'sqrt': _hist_bin_sqrt,
+                       'sturges': _hist_bin_sturges}
+
+
+def _ravel_and_check_weights(a, weights):
+    """ Check a and weights have matching shapes, and ravel both """
+    a = np.asarray(a)
+
+    # Ensure that the array is a "subtractable" dtype
+    if a.dtype == np.bool_:
+        warnings.warn("Converting input from {} to {} for compatibility."
+                      .format(a.dtype, np.uint8),
+                      RuntimeWarning, stacklevel=3)
+        a = a.astype(np.uint8)
+
+    if weights is not None:
+        weights = np.asarray(weights)
+        if weights.shape != a.shape:
+            raise ValueError(
+                'weights should have the same shape as a.')
+        weights = weights.ravel()
+    a = a.ravel()
+    return a, weights
+
+
+def _get_outer_edges(a, range):
+    """
+    Determine the outer bin edges to use, from either the data or the range
+    argument
+    """
+    if range is not None:
+        first_edge, last_edge = range
+        if first_edge > last_edge:
+            raise ValueError(
+                'max must be larger than min in range parameter.')
+        if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
+            raise ValueError(
+                "supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
+    elif a.size == 0:
+        # handle empty arrays. Can't determine range, so use 0-1.
+        first_edge, last_edge = 0, 1
+    else:
+        first_edge, last_edge = a.min(), a.max()
+        if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
+            raise ValueError(
+                "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
+
+    # expand empty range to avoid divide by zero
+    if first_edge == last_edge:
+        first_edge = first_edge - 0.5
+        last_edge = last_edge + 0.5
+
+    return first_edge, last_edge
+
+
+def _unsigned_subtract(a, b):
+    """
+    Subtract two values where a >= b, and produce an unsigned result
+
+    This is needed when finding the difference between the upper and lower
+    bound of an int16 histogram
+    """
+    # coerce to a single type
+    signed_to_unsigned = {
+        np.byte: np.ubyte,
+        np.short: np.ushort,
+        np.intc: np.uintc,
+        np.int_: np.uint,
+        np.longlong: np.ulonglong
+    }
+    dt = np.result_type(a, b)
+    try:
+        dt = signed_to_unsigned[dt.type]
+    except KeyError:
+        return np.subtract(a, b, dtype=dt)
+    else:
+        # we know the inputs are integers, and we are deliberately casting
+        # signed to unsigned
+        return np.subtract(a, b, casting='unsafe', dtype=dt)
+
+
+def _get_bin_edges(a, bins, range, weights):
+    """
+    Computes the bins used internally by `histogram`.
+
+    Parameters
+    ==========
+    a : ndarray
+        Ravelled data array
+    bins, range
+        Forwarded arguments from `histogram`.
+    weights : ndarray, optional
+        Ravelled weights array, or None
+
+    Returns
+    =======
+    bin_edges : ndarray
+        Array of bin edges
+    uniform_bins : (Number, Number, int):
+        The upper bound, lowerbound, and number of bins, used in the optimized
+        implementation of `histogram` that works on uniform bins.
+    """
+    # parse the overloaded bins argument
+    n_equal_bins = None
+    bin_edges = None
+
+    if isinstance(bins, str):
+        bin_name = bins
+        # if `bins` is a string for an automatic method,
+        # this will replace it with the number of bins calculated
+        if bin_name not in _hist_bin_selectors:
+            raise ValueError(
+                "{!r} is not a valid estimator for `bins`".format(bin_name))
+        if weights is not None:
+            raise TypeError("Automated estimation of the number of "
+                            "bins is not supported for weighted data")
+
+        first_edge, last_edge = _get_outer_edges(a, range)
+
+        # truncate the range if needed
+        if range is not None:
+            keep = (a >= first_edge)
+            keep &= (a <= last_edge)
+            if not np.logical_and.reduce(keep):
+                a = a[keep]
+
+        if a.size == 0:
+            n_equal_bins = 1
+        else:
+            # Do not call selectors on empty arrays
+            width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
+            if width:
+                n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
+            else:
+                # Width can be zero for some estimators, e.g. FD when
+                # the IQR of the data is zero.
+                n_equal_bins = 1
+
+    elif np.ndim(bins) == 0:
+        try:
+            n_equal_bins = operator.index(bins)
+        except TypeError as e:
+            raise TypeError(
+                '`bins` must be an integer, a string, or an array') from e
+        if n_equal_bins < 1:
+            raise ValueError('`bins` must be positive, when an integer')
+
+        first_edge, last_edge = _get_outer_edges(a, range)
+
+    elif np.ndim(bins) == 1:
+        bin_edges = np.asarray(bins)
+        if np.any(bin_edges[:-1] > bin_edges[1:]):
+            raise ValueError(
+                '`bins` must increase monotonically, when an array')
+
+    else:
+        raise ValueError('`bins` must be 1d, when an array')
+
+    if n_equal_bins is not None:
+        # gh-10322 means that type resolution rules are dependent on array
+        # shapes. To avoid this causing problems, we pick a type now and stick
+        # with it throughout.
+        bin_type = np.result_type(first_edge, last_edge, a)
+        if np.issubdtype(bin_type, np.integer):
+            bin_type = np.result_type(bin_type, float)
+
+        # bin edges must be computed
+        bin_edges = np.linspace(
+            first_edge, last_edge, n_equal_bins + 1,
+            endpoint=True, dtype=bin_type)
+        return bin_edges, (first_edge, last_edge, n_equal_bins)
+    else:
+        return bin_edges, None
+
+
+def _search_sorted_inclusive(a, v):
+    """
+    Like `searchsorted`, but where the last item in `v` is placed on the right.
+
+    In the context of a histogram, this makes the last bin edge inclusive
+    """
+    return np.concatenate((
+        a.searchsorted(v[:-1], 'left'),
+        a.searchsorted(v[-1:], 'right')
+    ))
+
+
+def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
+    return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_bin_edges_dispatcher)
+def histogram_bin_edges(a, bins=10, range=None, weights=None):
+    r"""
+    Function to calculate only the edges of the bins used by the `histogram`
+    function.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data. The histogram is computed over the flattened array.
+    bins : int or sequence of scalars or str, optional
+        If `bins` is an int, it defines the number of equal-width
+        bins in the given range (10, by default). If `bins` is a
+        sequence, it defines the bin edges, including the rightmost
+        edge, allowing for non-uniform bin widths.
+
+        If `bins` is a string from the list below, `histogram_bin_edges` will use
+        the method chosen to calculate the optimal bin width and
+        consequently the number of bins (see `Notes` for more detail on
+        the estimators) from the data that falls within the requested
+        range. While the bin width will be optimal for the actual data
+        in the range, the number of bins will be computed to fill the
+        entire range, including the empty portions. For visualisation,
+        using the 'auto' option is suggested. Weighted data is not
+        supported for automated bin size selection.
+
+        'auto'
+            Maximum of the 'sturges' and 'fd' estimators. Provides good
+            all around performance.
+
+        'fd' (Freedman Diaconis Estimator)
+            Robust (resilient to outliers) estimator that takes into
+            account data variability and data size.
+
+        'doane'
+            An improved version of Sturges' estimator that works better
+            with non-normal datasets.
+
+        'scott'
+            Less robust estimator that takes into account data variability
+            and data size.
+
+        'stone'
+            Estimator based on leave-one-out cross-validation estimate of
+            the integrated squared error. Can be regarded as a generalization
+            of Scott's rule.
+
+        'rice'
+            Estimator does not take variability into account, only data
+            size. Commonly overestimates number of bins required.
+
+        'sturges'
+            R's default method, only accounts for data size. Only
+            optimal for gaussian data and underestimates number of bins
+            for large non-gaussian datasets.
+
+        'sqrt'
+            Square root (of data size) estimator, used by Excel and
+            other programs for its speed and simplicity.
+
+    range : (float, float), optional
+        The lower and upper range of the bins.  If not provided, range
+        is simply ``(a.min(), a.max())``.  Values outside the range are
+        ignored. The first element of the range must be less than or
+        equal to the second. `range` affects the automatic bin
+        computation as well. While bin width is computed to be optimal
+        based on the actual data within `range`, the bin count will fill
+        the entire range including portions containing no data.
+
+    weights : array_like, optional
+        An array of weights, of the same shape as `a`.  Each value in
+        `a` only contributes its associated weight towards the bin count
+        (instead of 1). This is currently not used by any of the bin estimators,
+        but may be in the future.
+
+    Returns
+    -------
+    bin_edges : array of dtype float
+        The edges to pass into `histogram`
+
+    See Also
+    --------
+    histogram
+
+    Notes
+    -----
+    The methods to estimate the optimal number of bins are well founded
+    in literature, and are inspired by the choices R provides for
+    histogram visualisation. Note that having the number of bins
+    proportional to :math:`n^{1/3}` is asymptotically optimal, which is
+    why it appears in most estimators. These are simply plug-in methods
+    that give good starting points for number of bins. In the equations
+    below, :math:`h` is the binwidth and :math:`n_h` is the number of
+    bins. All estimators that compute bin counts are recast to bin width
+    using the `ptp` of the data. The final bin count is obtained from
+    ``np.round(np.ceil(range / h))``. The final bin width is often less
+    than what is returned by the estimators below.
+
+    'auto' (maximum of the 'sturges' and 'fd' estimators)
+        A compromise to get a good value. For small datasets the Sturges
+        value will usually be chosen, while larger datasets will usually
+        default to FD.  Avoids the overly conservative behaviour of FD
+        and Sturges for small and large datasets respectively.
+        Switchover point is usually :math:`a.size \approx 1000`.
+
+    'fd' (Freedman Diaconis Estimator)
+        .. math:: h = 2 \frac{IQR}{n^{1/3}}
+
+        The binwidth is proportional to the interquartile range (IQR)
+        and inversely proportional to cube root of a.size. Can be too
+        conservative for small datasets, but is quite good for large
+        datasets. The IQR is very robust to outliers.
+
+    'scott'
+        .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
+
+        The binwidth is proportional to the standard deviation of the
+        data and inversely proportional to cube root of ``x.size``. Can
+        be too conservative for small datasets, but is quite good for
+        large datasets. The standard deviation is not very robust to
+        outliers. Values are very similar to the Freedman-Diaconis
+        estimator in the absence of outliers.
+
+    'rice'
+        .. math:: n_h = 2n^{1/3}
+
+        The number of bins is only proportional to cube root of
+        ``a.size``. It tends to overestimate the number of bins and it
+        does not take into account data variability.
+
+    'sturges'
+        .. math:: n_h = \log _{2}(n) + 1
+
+        The number of bins is the base 2 log of ``a.size``.  This
+        estimator assumes normality of data and is too conservative for
+        larger, non-normal datasets. This is the default method in R's
+        ``hist`` method.
+
+    'doane'
+        .. math:: n_h = 1 + \log_{2}(n) +
+                        \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
+
+            g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
+
+            \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
+
+        An improved version of Sturges' formula that produces better
+        estimates for non-normal datasets. This estimator attempts to
+        account for the skew of the data.
+
+    'sqrt'
+        .. math:: n_h = \sqrt n
+
+        The simplest and fastest estimator. Only takes into account the
+        data size.
+
+    Examples
+    --------
+    >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
+    >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
+    array([0.  , 0.25, 0.5 , 0.75, 1.  ])
+    >>> np.histogram_bin_edges(arr, bins=2)
+    array([0. , 2.5, 5. ])
+
+    For consistency with histogram, an array of pre-computed bins is
+    passed through unmodified:
+
+    >>> np.histogram_bin_edges(arr, [1, 2])
+    array([1, 2])
+
+    This function allows one set of bins to be computed, and reused across
+    multiple histograms:
+
+    >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
+    >>> shared_bins
+    array([0., 1., 2., 3., 4., 5.])
+
+    >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
+    >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
+    >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
+
+    >>> hist_0; hist_1
+    array([1, 1, 0, 1, 0])
+    array([2, 0, 1, 1, 2])
+
+    Which gives more easily comparable results than using separate bins for
+    each histogram:
+
+    >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
+    >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
+    >>> hist_0; hist_1
+    array([1, 1, 1])
+    array([2, 1, 1, 2])
+    >>> bins_0; bins_1
+    array([0., 1., 2., 3.])
+    array([0.  , 1.25, 2.5 , 3.75, 5.  ])
+
+    """
+    a, weights = _ravel_and_check_weights(a, weights)
+    bin_edges, _ = _get_bin_edges(a, bins, range, weights)
+    return bin_edges
+
+
+def _histogram_dispatcher(
+        a, bins=None, range=None, density=None, weights=None):
+    return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_dispatcher)
+def histogram(a, bins=10, range=None, density=None, weights=None):
+    r"""
+    Compute the histogram of a dataset.
+
+    Parameters
+    ----------
+    a : array_like
+        Input data. The histogram is computed over the flattened array.
+    bins : int or sequence of scalars or str, optional
+        If `bins` is an int, it defines the number of equal-width
+        bins in the given range (10, by default). If `bins` is a
+        sequence, it defines a monotonically increasing array of bin edges,
+        including the rightmost edge, allowing for non-uniform bin widths.
+
+        .. versionadded:: 1.11.0
+
+        If `bins` is a string, it defines the method used to calculate the
+        optimal bin width, as defined by `histogram_bin_edges`.
+
+    range : (float, float), optional
+        The lower and upper range of the bins.  If not provided, range
+        is simply ``(a.min(), a.max())``.  Values outside the range are
+        ignored. The first element of the range must be less than or
+        equal to the second. `range` affects the automatic bin
+        computation as well. While bin width is computed to be optimal
+        based on the actual data within `range`, the bin count will fill
+        the entire range including portions containing no data.
+    weights : array_like, optional
+        An array of weights, of the same shape as `a`.  Each value in
+        `a` only contributes its associated weight towards the bin count
+        (instead of 1). If `density` is True, the weights are
+        normalized, so that the integral of the density over the range
+        remains 1.
+    density : bool, optional
+        If ``False``, the result will contain the number of samples in
+        each bin. If ``True``, the result is the value of the
+        probability *density* function at the bin, normalized such that
+        the *integral* over the range is 1. Note that the sum of the
+        histogram values will not be equal to 1 unless bins of unity
+        width are chosen; it is not a probability *mass* function.
+
+    Returns
+    -------
+    hist : array
+        The values of the histogram. See `density` and `weights` for a
+        description of the possible semantics.
+    bin_edges : array of dtype float
+        Return the bin edges ``(length(hist)+1)``.
+
+
+    See Also
+    --------
+    histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
+
+    Notes
+    -----
+    All but the last (righthand-most) bin is half-open.  In other words,
+    if `bins` is::
+
+      [1, 2, 3, 4]
+
+    then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
+    the second ``[2, 3)``.  The last bin, however, is ``[3, 4]``, which
+    *includes* 4.
+
+
+    Examples
+    --------
+    >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
+    (array([0, 2, 1]), array([0, 1, 2, 3]))
+    >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
+    (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
+    >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
+    (array([1, 4, 1]), array([0, 1, 2, 3]))
+
+    >>> a = np.arange(5)
+    >>> hist, bin_edges = np.histogram(a, density=True)
+    >>> hist
+    array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
+    >>> hist.sum()
+    2.4999999999999996
+    >>> np.sum(hist * np.diff(bin_edges))
+    1.0
+
+    .. versionadded:: 1.11.0
+
+    Automated Bin Selection Methods example, using 2 peak random data
+    with 2000 points:
+
+    >>> import matplotlib.pyplot as plt
+    >>> rng = np.random.RandomState(10)  # deterministic random data
+    >>> a = np.hstack((rng.normal(size=1000),
+    ...                rng.normal(loc=5, scale=2, size=1000)))
+    >>> _ = plt.hist(a, bins='auto')  # arguments are passed to np.histogram
+    >>> plt.title("Histogram with 'auto' bins")
+    Text(0.5, 1.0, "Histogram with 'auto' bins")
+    >>> plt.show()
+
+    """
+    a, weights = _ravel_and_check_weights(a, weights)
+
+    bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
+
+    # Histogram is an integer or a float array depending on the weights.
+    if weights is None:
+        ntype = np.dtype(np.intp)
+    else:
+        ntype = weights.dtype
+
+    # We set a block size, as this allows us to iterate over chunks when
+    # computing histograms, to minimize memory usage.
+    BLOCK = 65536
+
+    # The fast path uses bincount, but that only works for certain types
+    # of weight
+    simple_weights = (
+        weights is None or
+        np.can_cast(weights.dtype, np.double) or
+        np.can_cast(weights.dtype, complex)
+    )
+
+    if uniform_bins is not None and simple_weights:
+        # Fast algorithm for equal bins
+        # We now convert values of a to bin indices, under the assumption of
+        # equal bin widths (which is valid here).
+        first_edge, last_edge, n_equal_bins = uniform_bins
+
+        # Initialize empty histogram
+        n = np.zeros(n_equal_bins, ntype)
+
+        # Pre-compute histogram scaling factor
+        norm_numerator = n_equal_bins
+        norm_denom = _unsigned_subtract(last_edge, first_edge)
+
+        # We iterate over blocks here for two reasons: the first is that for
+        # large arrays, it is actually faster (for example for a 10^8 array it
+        # is 2x as fast) and it results in a memory footprint 3x lower in the
+        # limit of large arrays.
+        for i in _range(0, len(a), BLOCK):
+            tmp_a = a[i:i+BLOCK]
+            if weights is None:
+                tmp_w = None
+            else:
+                tmp_w = weights[i:i + BLOCK]
+
+            # Only include values in the right range
+            keep = (tmp_a >= first_edge)
+            keep &= (tmp_a <= last_edge)
+            if not np.logical_and.reduce(keep):
+                tmp_a = tmp_a[keep]
+                if tmp_w is not None:
+                    tmp_w = tmp_w[keep]
+
+            # This cast ensures no type promotions occur below, which gh-10322
+            # make unpredictable. Getting it wrong leads to precision errors
+            # like gh-8123.
+            tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
+
+            # Compute the bin indices, and for values that lie exactly on
+            # last_edge we need to subtract one
+            f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom)
+                         * norm_numerator)
+            indices = f_indices.astype(np.intp)
+            indices[indices == n_equal_bins] -= 1
+
+            # The index computation is not guaranteed to give exactly
+            # consistent results within ~1 ULP of the bin edges.
+            decrement = tmp_a < bin_edges[indices]
+            indices[decrement] -= 1
+            # The last bin includes the right edge. The other bins do not.
+            increment = ((tmp_a >= bin_edges[indices + 1])
+                         & (indices != n_equal_bins - 1))
+            indices[increment] += 1
+
+            # We now compute the histogram using bincount
+            if ntype.kind == 'c':
+                n.real += np.bincount(indices, weights=tmp_w.real,
+                                      minlength=n_equal_bins)
+                n.imag += np.bincount(indices, weights=tmp_w.imag,
+                                      minlength=n_equal_bins)
+            else:
+                n += np.bincount(indices, weights=tmp_w,
+                                 minlength=n_equal_bins).astype(ntype)
+    else:
+        # Compute via cumulative histogram
+        cum_n = np.zeros(bin_edges.shape, ntype)
+        if weights is None:
+            for i in _range(0, len(a), BLOCK):
+                sa = np.sort(a[i:i+BLOCK])
+                cum_n += _search_sorted_inclusive(sa, bin_edges)
+        else:
+            zero = np.zeros(1, dtype=ntype)
+            for i in _range(0, len(a), BLOCK):
+                tmp_a = a[i:i+BLOCK]
+                tmp_w = weights[i:i+BLOCK]
+                sorting_index = np.argsort(tmp_a)
+                sa = tmp_a[sorting_index]
+                sw = tmp_w[sorting_index]
+                cw = np.concatenate((zero, sw.cumsum()))
+                bin_index = _search_sorted_inclusive(sa, bin_edges)
+                cum_n += cw[bin_index]
+
+        n = np.diff(cum_n)
+
+    if density:
+        db = np.array(np.diff(bin_edges), float)
+        return n/db/n.sum(), bin_edges
+
+    return n, bin_edges
+
+
+def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
+                            weights=None):
+    if hasattr(sample, 'shape'):  # same condition as used in histogramdd
+        yield sample
+    else:
+        yield from sample
+    with contextlib.suppress(TypeError):
+        yield from bins
+    yield weights
+
+
+@array_function_dispatch(_histogramdd_dispatcher)
+def histogramdd(sample, bins=10, range=None, density=None, weights=None):
+    """
+    Compute the multidimensional histogram of some data.
+
+    Parameters
+    ----------
+    sample : (N, D) array, or (N, D) array_like
+        The data to be histogrammed.
+
+        Note the unusual interpretation of sample when an array_like:
+
+        * When an array, each row is a coordinate in a D-dimensional space -
+          such as ``histogramdd(np.array([p1, p2, p3]))``.
+        * When an array_like, each element is the list of values for single
+          coordinate - such as ``histogramdd((X, Y, Z))``.
+
+        The first form should be preferred.
+
+    bins : sequence or int, optional
+        The bin specification:
+
+        * A sequence of arrays describing the monotonically increasing bin
+          edges along each dimension.
+        * The number of bins for each dimension (nx, ny, ... =bins)
+        * The number of bins for all dimensions (nx=ny=...=bins).
+
+    range : sequence, optional
+        A sequence of length D, each an optional (lower, upper) tuple giving
+        the outer bin edges to be used if the edges are not given explicitly in
+        `bins`.
+        An entry of None in the sequence results in the minimum and maximum
+        values being used for the corresponding dimension.
+        The default, None, is equivalent to passing a tuple of D None values.
+    density : bool, optional
+        If False, the default, returns the number of samples in each bin.
+        If True, returns the probability *density* function at the bin,
+        ``bin_count / sample_count / bin_volume``.
+    weights : (N,) array_like, optional
+        An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
+        Weights are normalized to 1 if density is True. If density is False,
+        the values of the returned histogram are equal to the sum of the
+        weights belonging to the samples falling into each bin.
+
+    Returns
+    -------
+    H : ndarray
+        The multidimensional histogram of sample x. See density and weights
+        for the different possible semantics.
+    edges : list
+        A list of D arrays describing the bin edges for each dimension.
+
+    See Also
+    --------
+    histogram: 1-D histogram
+    histogram2d: 2-D histogram
+
+    Examples
+    --------
+    >>> r = np.random.randn(100,3)
+    >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
+    >>> H.shape, edges[0].size, edges[1].size, edges[2].size
+    ((5, 8, 4), 6, 9, 5)
+
+    """
+
+    try:
+        # Sample is an ND-array.
+        N, D = sample.shape
+    except (AttributeError, ValueError):
+        # Sample is a sequence of 1D arrays.
+        sample = np.atleast_2d(sample).T
+        N, D = sample.shape
+
+    nbin = np.empty(D, np.intp)
+    edges = D*[None]
+    dedges = D*[None]
+    if weights is not None:
+        weights = np.asarray(weights)
+
+    try:
+        M = len(bins)
+        if M != D:
+            raise ValueError(
+                'The dimension of bins must be equal to the dimension of the '
+                'sample x.')
+    except TypeError:
+        # bins is an integer
+        bins = D*[bins]
+
+    # normalize the range argument
+    if range is None:
+        range = (None,) * D
+    elif len(range) != D:
+        raise ValueError('range argument must have one entry per dimension')
+
+    # Create edge arrays
+    for i in _range(D):
+        if np.ndim(bins[i]) == 0:
+            if bins[i] < 1:
+                raise ValueError(
+                    '`bins[{}]` must be positive, when an integer'.format(i))
+            smin, smax = _get_outer_edges(sample[:,i], range[i])
+            try:
+                n = operator.index(bins[i])
+
+            except TypeError as e:
+                raise TypeError(
+                	"`bins[{}]` must be an integer, when a scalar".format(i)
+                ) from e
+
+            edges[i] = np.linspace(smin, smax, n + 1)
+        elif np.ndim(bins[i]) == 1:
+            edges[i] = np.asarray(bins[i])
+            if np.any(edges[i][:-1] > edges[i][1:]):
+                raise ValueError(
+                    '`bins[{}]` must be monotonically increasing, when an array'
+                    .format(i))
+        else:
+            raise ValueError(
+                '`bins[{}]` must be a scalar or 1d array'.format(i))
+
+        nbin[i] = len(edges[i]) + 1  # includes an outlier on each end
+        dedges[i] = np.diff(edges[i])
+
+    # Compute the bin number each sample falls into.
+    Ncount = tuple(
+        # avoid np.digitize to work around gh-11022
+        np.searchsorted(edges[i], sample[:, i], side='right')
+        for i in _range(D)
+    )
+
+    # Using digitize, values that fall on an edge are put in the right bin.
+    # For the rightmost bin, we want values equal to the right edge to be
+    # counted in the last bin, and not as an outlier.
+    for i in _range(D):
+        # Find which points are on the rightmost edge.
+        on_edge = (sample[:, i] == edges[i][-1])
+        # Shift these points one bin to the left.
+        Ncount[i][on_edge] -= 1
+
+    # Compute the sample indices in the flattened histogram matrix.
+    # This raises an error if the array is too large.
+    xy = np.ravel_multi_index(Ncount, nbin)
+
+    # Compute the number of repetitions in xy and assign it to the
+    # flattened histmat.
+    hist = np.bincount(xy, weights, minlength=nbin.prod())
+
+    # Shape into a proper matrix
+    hist = hist.reshape(nbin)
+
+    # This preserves the (bad) behavior observed in gh-7845, for now.
+    hist = hist.astype(float, casting='safe')
+
+    # Remove outliers (indices 0 and -1 for each dimension).
+    core = D*(slice(1, -1),)
+    hist = hist[core]
+
+    if density:
+        # calculate the probability density function
+        s = hist.sum()
+        for i in _range(D):
+            shape = np.ones(D, int)
+            shape[i] = nbin[i] - 2
+            hist = hist / dedges[i].reshape(shape)
+        hist /= s
+
+    if (hist.shape != nbin - 2).any():
+        raise RuntimeError(
+            "Internal Shape Error")
+    return hist, edges
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/histograms.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/histograms.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..ce02718adcd5be7129dee85ffcd8d9c43ee8bc00
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/histograms.pyi
@@ -0,0 +1,47 @@
+from collections.abc import Sequence
+from typing import (
+    Literal as L,
+    Any,
+    SupportsIndex,
+)
+
+from numpy._typing import (
+    NDArray,
+    ArrayLike,
+)
+
+_BinKind = L[
+    "stone",
+    "auto",
+    "doane",
+    "fd",
+    "rice",
+    "scott",
+    "sqrt",
+    "sturges",
+]
+
+__all__: list[str]
+
+def histogram_bin_edges(
+    a: ArrayLike,
+    bins: _BinKind | SupportsIndex | ArrayLike = ...,
+    range: None | tuple[float, float] = ...,
+    weights: None | ArrayLike = ...,
+) -> NDArray[Any]: ...
+
+def histogram(
+    a: ArrayLike,
+    bins: _BinKind | SupportsIndex | ArrayLike = ...,
+    range: None | tuple[float, float] = ...,
+    density: bool = ...,
+    weights: None | ArrayLike = ...,
+) -> tuple[NDArray[Any], NDArray[Any]]: ...
+
+def histogramdd(
+    sample: ArrayLike,
+    bins: SupportsIndex | ArrayLike = ...,
+    range: Sequence[tuple[float, float]] = ...,
+    density: None | bool = ...,
+    weights: None | ArrayLike = ...,
+) -> tuple[NDArray[Any], list[NDArray[Any]]]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/index_tricks.py b/.venv/lib/python3.11/site-packages/numpy/lib/index_tricks.py
new file mode 100644
index 0000000000000000000000000000000000000000..6913d2b95b76521f9f9d532b2762400e1bd68f43
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/index_tricks.py
@@ -0,0 +1,1046 @@
+import functools
+import sys
+import math
+import warnings
+
+import numpy as np
+from .._utils import set_module
+import numpy.core.numeric as _nx
+from numpy.core.numeric import ScalarType, array
+from numpy.core.numerictypes import issubdtype
+
+import numpy.matrixlib as matrixlib
+from .function_base import diff
+from numpy.core.multiarray import ravel_multi_index, unravel_index
+from numpy.core import overrides, linspace
+from numpy.lib.stride_tricks import as_strided
+
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+__all__ = [
+    'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
+    's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
+    'diag_indices', 'diag_indices_from'
+]
+
+
+def _ix__dispatcher(*args):
+    return args
+
+
+@array_function_dispatch(_ix__dispatcher)
+def ix_(*args):
+    """
+    Construct an open mesh from multiple sequences.
+
+    This function takes N 1-D sequences and returns N outputs with N
+    dimensions each, such that the shape is 1 in all but one dimension
+    and the dimension with the non-unit shape value cycles through all
+    N dimensions.
+
+    Using `ix_` one can quickly construct index arrays that will index
+    the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
+    ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
+
+    Parameters
+    ----------
+    args : 1-D sequences
+        Each sequence should be of integer or boolean type.
+        Boolean sequences will be interpreted as boolean masks for the
+        corresponding dimension (equivalent to passing in
+        ``np.nonzero(boolean_sequence)``).
+
+    Returns
+    -------
+    out : tuple of ndarrays
+        N arrays with N dimensions each, with N the number of input
+        sequences. Together these arrays form an open mesh.
+
+    See Also
+    --------
+    ogrid, mgrid, meshgrid
+
+    Examples
+    --------
+    >>> a = np.arange(10).reshape(2, 5)
+    >>> a
+    array([[0, 1, 2, 3, 4],
+           [5, 6, 7, 8, 9]])
+    >>> ixgrid = np.ix_([0, 1], [2, 4])
+    >>> ixgrid
+    (array([[0],
+           [1]]), array([[2, 4]]))
+    >>> ixgrid[0].shape, ixgrid[1].shape
+    ((2, 1), (1, 2))
+    >>> a[ixgrid]
+    array([[2, 4],
+           [7, 9]])
+
+    >>> ixgrid = np.ix_([True, True], [2, 4])
+    >>> a[ixgrid]
+    array([[2, 4],
+           [7, 9]])
+    >>> ixgrid = np.ix_([True, True], [False, False, True, False, True])
+    >>> a[ixgrid]
+    array([[2, 4],
+           [7, 9]])
+
+    """
+    out = []
+    nd = len(args)
+    for k, new in enumerate(args):
+        if not isinstance(new, _nx.ndarray):
+            new = np.asarray(new)
+            if new.size == 0:
+                # Explicitly type empty arrays to avoid float default
+                new = new.astype(_nx.intp)
+        if new.ndim != 1:
+            raise ValueError("Cross index must be 1 dimensional")
+        if issubdtype(new.dtype, _nx.bool_):
+            new, = new.nonzero()
+        new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
+        out.append(new)
+    return tuple(out)
+
+
+class nd_grid:
+    """
+    Construct a multi-dimensional "meshgrid".
+
+    ``grid = nd_grid()`` creates an instance which will return a mesh-grid
+    when indexed.  The dimension and number of the output arrays are equal
+    to the number of indexing dimensions.  If the step length is not a
+    complex number, then the stop is not inclusive.
+
+    However, if the step length is a **complex number** (e.g. 5j), then the
+    integer part of its magnitude is interpreted as specifying the
+    number of points to create between the start and stop values, where
+    the stop value **is inclusive**.
+
+    If instantiated with an argument of ``sparse=True``, the mesh-grid is
+    open (or not fleshed out) so that only one-dimension of each returned
+    argument is greater than 1.
+
+    Parameters
+    ----------
+    sparse : bool, optional
+        Whether the grid is sparse or not. Default is False.
+
+    Notes
+    -----
+    Two instances of `nd_grid` are made available in the NumPy namespace,
+    `mgrid` and `ogrid`, approximately defined as::
+
+        mgrid = nd_grid(sparse=False)
+        ogrid = nd_grid(sparse=True)
+
+    Users should use these pre-defined instances instead of using `nd_grid`
+    directly.
+    """
+
+    def __init__(self, sparse=False):
+        self.sparse = sparse
+
+    def __getitem__(self, key):
+        try:
+            size = []
+            # Mimic the behavior of `np.arange` and use a data type
+            # which is at least as large as `np.int_`
+            num_list = [0]
+            for k in range(len(key)):
+                step = key[k].step
+                start = key[k].start
+                stop = key[k].stop
+                if start is None:
+                    start = 0
+                if step is None:
+                    step = 1
+                if isinstance(step, (_nx.complexfloating, complex)):
+                    step = abs(step)
+                    size.append(int(step))
+                else:
+                    size.append(
+                        int(math.ceil((stop - start) / (step*1.0))))
+                num_list += [start, stop, step]
+            typ = _nx.result_type(*num_list)
+            if self.sparse:
+                nn = [_nx.arange(_x, dtype=_t)
+                      for _x, _t in zip(size, (typ,)*len(size))]
+            else:
+                nn = _nx.indices(size, typ)
+            for k, kk in enumerate(key):
+                step = kk.step
+                start = kk.start
+                if start is None:
+                    start = 0
+                if step is None:
+                    step = 1
+                if isinstance(step, (_nx.complexfloating, complex)):
+                    step = int(abs(step))
+                    if step != 1:
+                        step = (kk.stop - start) / float(step - 1)
+                nn[k] = (nn[k]*step+start)
+            if self.sparse:
+                slobj = [_nx.newaxis]*len(size)
+                for k in range(len(size)):
+                    slobj[k] = slice(None, None)
+                    nn[k] = nn[k][tuple(slobj)]
+                    slobj[k] = _nx.newaxis
+            return nn
+        except (IndexError, TypeError):
+            step = key.step
+            stop = key.stop
+            start = key.start
+            if start is None:
+                start = 0
+            if isinstance(step, (_nx.complexfloating, complex)):
+                # Prevent the (potential) creation of integer arrays
+                step_float = abs(step)
+                step = length = int(step_float)
+                if step != 1:
+                    step = (key.stop-start)/float(step-1)
+                typ = _nx.result_type(start, stop, step_float)
+                return _nx.arange(0, length, 1, dtype=typ)*step + start
+            else:
+                return _nx.arange(start, stop, step)
+
+
+class MGridClass(nd_grid):
+    """
+    An instance which returns a dense multi-dimensional "meshgrid".
+
+    An instance which returns a dense (or fleshed out) mesh-grid
+    when indexed, so that each returned argument has the same shape.
+    The dimensions and number of the output arrays are equal to the
+    number of indexing dimensions.  If the step length is not a complex
+    number, then the stop is not inclusive.
+
+    However, if the step length is a **complex number** (e.g. 5j), then
+    the integer part of its magnitude is interpreted as specifying the
+    number of points to create between the start and stop values, where
+    the stop value **is inclusive**.
+
+    Returns
+    -------
+    mesh-grid `ndarrays` all of the same dimensions
+
+    See Also
+    --------
+    ogrid : like `mgrid` but returns open (not fleshed out) mesh grids
+    meshgrid: return coordinate matrices from coordinate vectors
+    r_ : array concatenator
+    :ref:`how-to-partition`
+
+    Examples
+    --------
+    >>> np.mgrid[0:5, 0:5]
+    array([[[0, 0, 0, 0, 0],
+            [1, 1, 1, 1, 1],
+            [2, 2, 2, 2, 2],
+            [3, 3, 3, 3, 3],
+            [4, 4, 4, 4, 4]],
+           [[0, 1, 2, 3, 4],
+            [0, 1, 2, 3, 4],
+            [0, 1, 2, 3, 4],
+            [0, 1, 2, 3, 4],
+            [0, 1, 2, 3, 4]]])
+    >>> np.mgrid[-1:1:5j]
+    array([-1. , -0.5,  0. ,  0.5,  1. ])
+
+    """
+
+    def __init__(self):
+        super().__init__(sparse=False)
+
+
+mgrid = MGridClass()
+
+
+class OGridClass(nd_grid):
+    """
+    An instance which returns an open multi-dimensional "meshgrid".
+
+    An instance which returns an open (i.e. not fleshed out) mesh-grid
+    when indexed, so that only one dimension of each returned array is
+    greater than 1.  The dimension and number of the output arrays are
+    equal to the number of indexing dimensions.  If the step length is
+    not a complex number, then the stop is not inclusive.
+
+    However, if the step length is a **complex number** (e.g. 5j), then
+    the integer part of its magnitude is interpreted as specifying the
+    number of points to create between the start and stop values, where
+    the stop value **is inclusive**.
+
+    Returns
+    -------
+    mesh-grid
+        `ndarrays` with only one dimension not equal to 1
+
+    See Also
+    --------
+    mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
+    meshgrid: return coordinate matrices from coordinate vectors
+    r_ : array concatenator
+    :ref:`how-to-partition`
+
+    Examples
+    --------
+    >>> from numpy import ogrid
+    >>> ogrid[-1:1:5j]
+    array([-1. , -0.5,  0. ,  0.5,  1. ])
+    >>> ogrid[0:5,0:5]
+    [array([[0],
+            [1],
+            [2],
+            [3],
+            [4]]), array([[0, 1, 2, 3, 4]])]
+
+    """
+
+    def __init__(self):
+        super().__init__(sparse=True)
+
+
+ogrid = OGridClass()
+
+
+class AxisConcatenator:
+    """
+    Translates slice objects to concatenation along an axis.
+
+    For detailed documentation on usage, see `r_`.
+    """
+    # allow ma.mr_ to override this
+    concatenate = staticmethod(_nx.concatenate)
+    makemat = staticmethod(matrixlib.matrix)
+
+    def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
+        self.axis = axis
+        self.matrix = matrix
+        self.trans1d = trans1d
+        self.ndmin = ndmin
+
+    def __getitem__(self, key):
+        # handle matrix builder syntax
+        if isinstance(key, str):
+            frame = sys._getframe().f_back
+            mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
+            return mymat
+
+        if not isinstance(key, tuple):
+            key = (key,)
+
+        # copy attributes, since they can be overridden in the first argument
+        trans1d = self.trans1d
+        ndmin = self.ndmin
+        matrix = self.matrix
+        axis = self.axis
+
+        objs = []
+        # dtypes or scalars for weak scalar handling in result_type
+        result_type_objs = []
+
+        for k, item in enumerate(key):
+            scalar = False
+            if isinstance(item, slice):
+                step = item.step
+                start = item.start
+                stop = item.stop
+                if start is None:
+                    start = 0
+                if step is None:
+                    step = 1
+                if isinstance(step, (_nx.complexfloating, complex)):
+                    size = int(abs(step))
+                    newobj = linspace(start, stop, num=size)
+                else:
+                    newobj = _nx.arange(start, stop, step)
+                if ndmin > 1:
+                    newobj = array(newobj, copy=False, ndmin=ndmin)
+                    if trans1d != -1:
+                        newobj = newobj.swapaxes(-1, trans1d)
+            elif isinstance(item, str):
+                if k != 0:
+                    raise ValueError("special directives must be the "
+                                     "first entry.")
+                if item in ('r', 'c'):
+                    matrix = True
+                    col = (item == 'c')
+                    continue
+                if ',' in item:
+                    vec = item.split(',')
+                    try:
+                        axis, ndmin = [int(x) for x in vec[:2]]
+                        if len(vec) == 3:
+                            trans1d = int(vec[2])
+                        continue
+                    except Exception as e:
+                        raise ValueError(
+                            "unknown special directive {!r}".format(item)
+                        ) from e
+                try:
+                    axis = int(item)
+                    continue
+                except (ValueError, TypeError) as e:
+                    raise ValueError("unknown special directive") from e
+            elif type(item) in ScalarType:
+                scalar = True
+                newobj = item
+            else:
+                item_ndim = np.ndim(item)
+                newobj = array(item, copy=False, subok=True, ndmin=ndmin)
+                if trans1d != -1 and item_ndim < ndmin:
+                    k2 = ndmin - item_ndim
+                    k1 = trans1d
+                    if k1 < 0:
+                        k1 += k2 + 1
+                    defaxes = list(range(ndmin))
+                    axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
+                    newobj = newobj.transpose(axes)
+
+            objs.append(newobj)
+            if scalar:
+                result_type_objs.append(item)
+            else:
+                result_type_objs.append(newobj.dtype)
+
+        # Ensure that scalars won't up-cast unless warranted, for 0, drops
+        # through to error in concatenate.
+        if len(result_type_objs) != 0:
+            final_dtype = _nx.result_type(*result_type_objs)
+            # concatenate could do cast, but that can be overriden:
+            objs = [array(obj, copy=False, subok=True,
+                          ndmin=ndmin, dtype=final_dtype) for obj in objs]
+
+        res = self.concatenate(tuple(objs), axis=axis)
+
+        if matrix:
+            oldndim = res.ndim
+            res = self.makemat(res)
+            if oldndim == 1 and col:
+                res = res.T
+        return res
+
+    def __len__(self):
+        return 0
+
+# separate classes are used here instead of just making r_ = concatentor(0),
+# etc. because otherwise we couldn't get the doc string to come out right
+# in help(r_)
+
+
+class RClass(AxisConcatenator):
+    """
+    Translates slice objects to concatenation along the first axis.
+
+    This is a simple way to build up arrays quickly. There are two use cases.
+
+    1. If the index expression contains comma separated arrays, then stack
+       them along their first axis.
+    2. If the index expression contains slice notation or scalars then create
+       a 1-D array with a range indicated by the slice notation.
+
+    If slice notation is used, the syntax ``start:stop:step`` is equivalent
+    to ``np.arange(start, stop, step)`` inside of the brackets. However, if
+    ``step`` is an imaginary number (i.e. 100j) then its integer portion is
+    interpreted as a number-of-points desired and the start and stop are
+    inclusive. In other words ``start:stop:stepj`` is interpreted as
+    ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
+    After expansion of slice notation, all comma separated sequences are
+    concatenated together.
+
+    Optional character strings placed as the first element of the index
+    expression can be used to change the output. The strings 'r' or 'c' result
+    in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
+    matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
+    (column) matrix is produced. If the result is 2-D then both provide the
+    same matrix result.
+
+    A string integer specifies which axis to stack multiple comma separated
+    arrays along. A string of two comma-separated integers allows indication
+    of the minimum number of dimensions to force each entry into as the
+    second integer (the axis to concatenate along is still the first integer).
+
+    A string with three comma-separated integers allows specification of the
+    axis to concatenate along, the minimum number of dimensions to force the
+    entries to, and which axis should contain the start of the arrays which
+    are less than the specified number of dimensions. In other words the third
+    integer allows you to specify where the 1's should be placed in the shape
+    of the arrays that have their shapes upgraded. By default, they are placed
+    in the front of the shape tuple. The third argument allows you to specify
+    where the start of the array should be instead. Thus, a third argument of
+    '0' would place the 1's at the end of the array shape. Negative integers
+    specify where in the new shape tuple the last dimension of upgraded arrays
+    should be placed, so the default is '-1'.
+
+    Parameters
+    ----------
+    Not a function, so takes no parameters
+
+
+    Returns
+    -------
+    A concatenated ndarray or matrix.
+
+    See Also
+    --------
+    concatenate : Join a sequence of arrays along an existing axis.
+    c_ : Translates slice objects to concatenation along the second axis.
+
+    Examples
+    --------
+    >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
+    array([1, 2, 3, ..., 4, 5, 6])
+    >>> np.r_[-1:1:6j, [0]*3, 5, 6]
+    array([-1. , -0.6, -0.2,  0.2,  0.6,  1. ,  0. ,  0. ,  0. ,  5. ,  6. ])
+
+    String integers specify the axis to concatenate along or the minimum
+    number of dimensions to force entries into.
+
+    >>> a = np.array([[0, 1, 2], [3, 4, 5]])
+    >>> np.r_['-1', a, a] # concatenate along last axis
+    array([[0, 1, 2, 0, 1, 2],
+           [3, 4, 5, 3, 4, 5]])
+    >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
+    array([[1, 2, 3],
+           [4, 5, 6]])
+
+    >>> np.r_['0,2,0', [1,2,3], [4,5,6]]
+    array([[1],
+           [2],
+           [3],
+           [4],
+           [5],
+           [6]])
+    >>> np.r_['1,2,0', [1,2,3], [4,5,6]]
+    array([[1, 4],
+           [2, 5],
+           [3, 6]])
+
+    Using 'r' or 'c' as a first string argument creates a matrix.
+
+    >>> np.r_['r',[1,2,3], [4,5,6]]
+    matrix([[1, 2, 3, 4, 5, 6]])
+
+    """
+
+    def __init__(self):
+        AxisConcatenator.__init__(self, 0)
+
+
+r_ = RClass()
+
+
+class CClass(AxisConcatenator):
+    """
+    Translates slice objects to concatenation along the second axis.
+
+    This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
+    useful because of its common occurrence. In particular, arrays will be
+    stacked along their last axis after being upgraded to at least 2-D with
+    1's post-pended to the shape (column vectors made out of 1-D arrays).
+
+    See Also
+    --------
+    column_stack : Stack 1-D arrays as columns into a 2-D array.
+    r_ : For more detailed documentation.
+
+    Examples
+    --------
+    >>> np.c_[np.array([1,2,3]), np.array([4,5,6])]
+    array([[1, 4],
+           [2, 5],
+           [3, 6]])
+    >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
+    array([[1, 2, 3, ..., 4, 5, 6]])
+
+    """
+
+    def __init__(self):
+        AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
+
+
+c_ = CClass()
+
+
+@set_module('numpy')
+class ndenumerate:
+    """
+    Multidimensional index iterator.
+
+    Return an iterator yielding pairs of array coordinates and values.
+
+    Parameters
+    ----------
+    arr : ndarray
+      Input array.
+
+    See Also
+    --------
+    ndindex, flatiter
+
+    Examples
+    --------
+    >>> a = np.array([[1, 2], [3, 4]])
+    >>> for index, x in np.ndenumerate(a):
+    ...     print(index, x)
+    (0, 0) 1
+    (0, 1) 2
+    (1, 0) 3
+    (1, 1) 4
+
+    """
+
+    def __init__(self, arr):
+        self.iter = np.asarray(arr).flat
+
+    def __next__(self):
+        """
+        Standard iterator method, returns the index tuple and array value.
+
+        Returns
+        -------
+        coords : tuple of ints
+            The indices of the current iteration.
+        val : scalar
+            The array element of the current iteration.
+
+        """
+        return self.iter.coords, next(self.iter)
+
+    def __iter__(self):
+        return self
+
+
+@set_module('numpy')
+class ndindex:
+    """
+    An N-dimensional iterator object to index arrays.
+
+    Given the shape of an array, an `ndindex` instance iterates over
+    the N-dimensional index of the array. At each iteration a tuple
+    of indices is returned, the last dimension is iterated over first.
+
+    Parameters
+    ----------
+    shape : ints, or a single tuple of ints
+        The size of each dimension of the array can be passed as
+        individual parameters or as the elements of a tuple.
+
+    See Also
+    --------
+    ndenumerate, flatiter
+
+    Examples
+    --------
+    Dimensions as individual arguments
+
+    >>> for index in np.ndindex(3, 2, 1):
+    ...     print(index)
+    (0, 0, 0)
+    (0, 1, 0)
+    (1, 0, 0)
+    (1, 1, 0)
+    (2, 0, 0)
+    (2, 1, 0)
+
+    Same dimensions - but in a tuple ``(3, 2, 1)``
+
+    >>> for index in np.ndindex((3, 2, 1)):
+    ...     print(index)
+    (0, 0, 0)
+    (0, 1, 0)
+    (1, 0, 0)
+    (1, 1, 0)
+    (2, 0, 0)
+    (2, 1, 0)
+
+    """
+
+    def __init__(self, *shape):
+        if len(shape) == 1 and isinstance(shape[0], tuple):
+            shape = shape[0]
+        x = as_strided(_nx.zeros(1), shape=shape,
+                       strides=_nx.zeros_like(shape))
+        self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
+                              order='C')
+
+    def __iter__(self):
+        return self
+
+    def ndincr(self):
+        """
+        Increment the multi-dimensional index by one.
+
+        This method is for backward compatibility only: do not use.
+
+        .. deprecated:: 1.20.0
+            This method has been advised against since numpy 1.8.0, but only
+            started emitting DeprecationWarning as of this version.
+        """
+        # NumPy 1.20.0, 2020-09-08
+        warnings.warn(
+            "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",
+            DeprecationWarning, stacklevel=2)
+        next(self)
+
+    def __next__(self):
+        """
+        Standard iterator method, updates the index and returns the index
+        tuple.
+
+        Returns
+        -------
+        val : tuple of ints
+            Returns a tuple containing the indices of the current
+            iteration.
+
+        """
+        next(self._it)
+        return self._it.multi_index
+
+
+# You can do all this with slice() plus a few special objects,
+# but there's a lot to remember. This version is simpler because
+# it uses the standard array indexing syntax.
+#
+# Written by Konrad Hinsen 
+# last revision: 1999-7-23
+#
+# Cosmetic changes by T. Oliphant 2001
+#
+#
+
+class IndexExpression:
+    """
+    A nicer way to build up index tuples for arrays.
+
+    .. note::
+       Use one of the two predefined instances `index_exp` or `s_`
+       rather than directly using `IndexExpression`.
+
+    For any index combination, including slicing and axis insertion,
+    ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
+    array `a`. However, ``np.index_exp[indices]`` can be used anywhere
+    in Python code and returns a tuple of slice objects that can be
+    used in the construction of complex index expressions.
+
+    Parameters
+    ----------
+    maketuple : bool
+        If True, always returns a tuple.
+
+    See Also
+    --------
+    index_exp : Predefined instance that always returns a tuple:
+       `index_exp = IndexExpression(maketuple=True)`.
+    s_ : Predefined instance without tuple conversion:
+       `s_ = IndexExpression(maketuple=False)`.
+
+    Notes
+    -----
+    You can do all this with `slice()` plus a few special objects,
+    but there's a lot to remember and this version is simpler because
+    it uses the standard array indexing syntax.
+
+    Examples
+    --------
+    >>> np.s_[2::2]
+    slice(2, None, 2)
+    >>> np.index_exp[2::2]
+    (slice(2, None, 2),)
+
+    >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
+    array([2, 4])
+
+    """
+
+    def __init__(self, maketuple):
+        self.maketuple = maketuple
+
+    def __getitem__(self, item):
+        if self.maketuple and not isinstance(item, tuple):
+            return (item,)
+        else:
+            return item
+
+
+index_exp = IndexExpression(maketuple=True)
+s_ = IndexExpression(maketuple=False)
+
+# End contribution from Konrad.
+
+
+# The following functions complement those in twodim_base, but are
+# applicable to N-dimensions.
+
+
+def _fill_diagonal_dispatcher(a, val, wrap=None):
+    return (a,)
+
+
+@array_function_dispatch(_fill_diagonal_dispatcher)
+def fill_diagonal(a, val, wrap=False):
+    """Fill the main diagonal of the given array of any dimensionality.
+
+    For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
+    locations with indices ``a[i, ..., i]`` all identical. This function
+    modifies the input array in-place, it does not return a value.
+
+    Parameters
+    ----------
+    a : array, at least 2-D.
+      Array whose diagonal is to be filled, it gets modified in-place.
+
+    val : scalar or array_like
+      Value(s) to write on the diagonal. If `val` is scalar, the value is
+      written along the diagonal. If array-like, the flattened `val` is
+      written along the diagonal, repeating if necessary to fill all
+      diagonal entries.
+
+    wrap : bool
+      For tall matrices in NumPy version up to 1.6.2, the
+      diagonal "wrapped" after N columns. You can have this behavior
+      with this option. This affects only tall matrices.
+
+    See also
+    --------
+    diag_indices, diag_indices_from
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    This functionality can be obtained via `diag_indices`, but internally
+    this version uses a much faster implementation that never constructs the
+    indices and uses simple slicing.
+
+    Examples
+    --------
+    >>> a = np.zeros((3, 3), int)
+    >>> np.fill_diagonal(a, 5)
+    >>> a
+    array([[5, 0, 0],
+           [0, 5, 0],
+           [0, 0, 5]])
+
+    The same function can operate on a 4-D array:
+
+    >>> a = np.zeros((3, 3, 3, 3), int)
+    >>> np.fill_diagonal(a, 4)
+
+    We only show a few blocks for clarity:
+
+    >>> a[0, 0]
+    array([[4, 0, 0],
+           [0, 0, 0],
+           [0, 0, 0]])
+    >>> a[1, 1]
+    array([[0, 0, 0],
+           [0, 4, 0],
+           [0, 0, 0]])
+    >>> a[2, 2]
+    array([[0, 0, 0],
+           [0, 0, 0],
+           [0, 0, 4]])
+
+    The wrap option affects only tall matrices:
+
+    >>> # tall matrices no wrap
+    >>> a = np.zeros((5, 3), int)
+    >>> np.fill_diagonal(a, 4)
+    >>> a
+    array([[4, 0, 0],
+           [0, 4, 0],
+           [0, 0, 4],
+           [0, 0, 0],
+           [0, 0, 0]])
+
+    >>> # tall matrices wrap
+    >>> a = np.zeros((5, 3), int)
+    >>> np.fill_diagonal(a, 4, wrap=True)
+    >>> a
+    array([[4, 0, 0],
+           [0, 4, 0],
+           [0, 0, 4],
+           [0, 0, 0],
+           [4, 0, 0]])
+
+    >>> # wide matrices
+    >>> a = np.zeros((3, 5), int)
+    >>> np.fill_diagonal(a, 4, wrap=True)
+    >>> a
+    array([[4, 0, 0, 0, 0],
+           [0, 4, 0, 0, 0],
+           [0, 0, 4, 0, 0]])
+
+    The anti-diagonal can be filled by reversing the order of elements
+    using either `numpy.flipud` or `numpy.fliplr`.
+
+    >>> a = np.zeros((3, 3), int);
+    >>> np.fill_diagonal(np.fliplr(a), [1,2,3])  # Horizontal flip
+    >>> a
+    array([[0, 0, 1],
+           [0, 2, 0],
+           [3, 0, 0]])
+    >>> np.fill_diagonal(np.flipud(a), [1,2,3])  # Vertical flip
+    >>> a
+    array([[0, 0, 3],
+           [0, 2, 0],
+           [1, 0, 0]])
+
+    Note that the order in which the diagonal is filled varies depending
+    on the flip function.
+    """
+    if a.ndim < 2:
+        raise ValueError("array must be at least 2-d")
+    end = None
+    if a.ndim == 2:
+        # Explicit, fast formula for the common case.  For 2-d arrays, we
+        # accept rectangular ones.
+        step = a.shape[1] + 1
+        # This is needed to don't have tall matrix have the diagonal wrap.
+        if not wrap:
+            end = a.shape[1] * a.shape[1]
+    else:
+        # For more than d=2, the strided formula is only valid for arrays with
+        # all dimensions equal, so we check first.
+        if not np.all(diff(a.shape) == 0):
+            raise ValueError("All dimensions of input must be of equal length")
+        step = 1 + (np.cumprod(a.shape[:-1])).sum()
+
+    # Write the value out into the diagonal.
+    a.flat[:end:step] = val
+
+
+@set_module('numpy')
+def diag_indices(n, ndim=2):
+    """
+    Return the indices to access the main diagonal of an array.
+
+    This returns a tuple of indices that can be used to access the main
+    diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
+    (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
+    ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
+    for ``i = [0..n-1]``.
+
+    Parameters
+    ----------
+    n : int
+      The size, along each dimension, of the arrays for which the returned
+      indices can be used.
+
+    ndim : int, optional
+      The number of dimensions.
+
+    See Also
+    --------
+    diag_indices_from
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    Examples
+    --------
+    Create a set of indices to access the diagonal of a (4, 4) array:
+
+    >>> di = np.diag_indices(4)
+    >>> di
+    (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+    >>> a = np.arange(16).reshape(4, 4)
+    >>> a
+    array([[ 0,  1,  2,  3],
+           [ 4,  5,  6,  7],
+           [ 8,  9, 10, 11],
+           [12, 13, 14, 15]])
+    >>> a[di] = 100
+    >>> a
+    array([[100,   1,   2,   3],
+           [  4, 100,   6,   7],
+           [  8,   9, 100,  11],
+           [ 12,  13,  14, 100]])
+
+    Now, we create indices to manipulate a 3-D array:
+
+    >>> d3 = np.diag_indices(2, 3)
+    >>> d3
+    (array([0, 1]), array([0, 1]), array([0, 1]))
+
+    And use it to set the diagonal of an array of zeros to 1:
+
+    >>> a = np.zeros((2, 2, 2), dtype=int)
+    >>> a[d3] = 1
+    >>> a
+    array([[[1, 0],
+            [0, 0]],
+           [[0, 0],
+            [0, 1]]])
+
+    """
+    idx = np.arange(n)
+    return (idx,) * ndim
+
+
+def _diag_indices_from(arr):
+    return (arr,)
+
+
+@array_function_dispatch(_diag_indices_from)
+def diag_indices_from(arr):
+    """
+    Return the indices to access the main diagonal of an n-dimensional array.
+
+    See `diag_indices` for full details.
+
+    Parameters
+    ----------
+    arr : array, at least 2-D
+
+    See Also
+    --------
+    diag_indices
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    Examples
+    --------
+    
+    Create a 4 by 4 array.
+
+    >>> a = np.arange(16).reshape(4, 4)
+    >>> a
+    array([[ 0,  1,  2,  3],
+           [ 4,  5,  6,  7],
+           [ 8,  9, 10, 11],
+           [12, 13, 14, 15]])
+    
+    Get the indices of the diagonal elements.
+
+    >>> di = np.diag_indices_from(a)
+    >>> di
+    (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+
+    >>> a[di]
+    array([ 0,  5, 10, 15])
+
+    This is simply syntactic sugar for diag_indices.
+
+    >>> np.diag_indices(a.shape[0])
+    (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
+
+    """
+
+    if not arr.ndim >= 2:
+        raise ValueError("input array must be at least 2-d")
+    # For more than d=2, the strided formula is only valid for arrays with
+    # all dimensions equal, so we check first.
+    if not np.all(diff(arr.shape) == 0):
+        raise ValueError("All dimensions of input must be of equal length")
+
+    return diag_indices(arr.shape[0], arr.ndim)
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/index_tricks.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/index_tricks.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..29a6b9e2b9f95c260b5123cef75c9a1d0b34833b
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/index_tricks.pyi
@@ -0,0 +1,162 @@
+from collections.abc import Sequence
+from typing import (
+    Any,
+    TypeVar,
+    Generic,
+    overload,
+    Literal,
+    SupportsIndex,
+)
+
+from numpy import (
+    # Circumvent a naming conflict with `AxisConcatenator.matrix`
+    matrix as _Matrix,
+    ndenumerate as ndenumerate,
+    ndindex as ndindex,
+    ndarray,
+    dtype,
+    integer,
+    str_,
+    bytes_,
+    bool_,
+    int_,
+    float_,
+    complex_,
+    intp,
+    _OrderCF,
+    _ModeKind,
+)
+from numpy._typing import (
+    # Arrays
+    ArrayLike,
+    _NestedSequence,
+    _FiniteNestedSequence,
+    NDArray,
+    _ArrayLikeInt,
+
+    # DTypes
+    DTypeLike,
+    _SupportsDType,
+
+    # Shapes
+    _ShapeLike,
+)
+
+from numpy.core.multiarray import (
+    unravel_index as unravel_index,
+    ravel_multi_index as ravel_multi_index,
+)
+
+_T = TypeVar("_T")
+_DType = TypeVar("_DType", bound=dtype[Any])
+_BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
+_TupType = TypeVar("_TupType", bound=tuple[Any, ...])
+_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
+
+__all__: list[str]
+
+@overload
+def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ...
+@overload
+def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ...
+@overload
+def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ...
+@overload
+def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]: ...
+@overload
+def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ...
+@overload
+def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]: ...
+@overload
+def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]: ...
+
+class nd_grid(Generic[_BoolType]):
+    sparse: _BoolType
+    def __init__(self, sparse: _BoolType = ...) -> None: ...
+    @overload
+    def __getitem__(
+        self: nd_grid[Literal[False]],
+        key: slice | Sequence[slice],
+    ) -> NDArray[Any]: ...
+    @overload
+    def __getitem__(
+        self: nd_grid[Literal[True]],
+        key: slice | Sequence[slice],
+    ) -> list[NDArray[Any]]: ...
+
+class MGridClass(nd_grid[Literal[False]]):
+    def __init__(self) -> None: ...
+
+mgrid: MGridClass
+
+class OGridClass(nd_grid[Literal[True]]):
+    def __init__(self) -> None: ...
+
+ogrid: OGridClass
+
+class AxisConcatenator:
+    axis: int
+    matrix: bool
+    ndmin: int
+    trans1d: int
+    def __init__(
+        self,
+        axis: int = ...,
+        matrix: bool = ...,
+        ndmin: int = ...,
+        trans1d: int = ...,
+    ) -> None: ...
+    @staticmethod
+    @overload
+    def concatenate(  # type: ignore[misc]
+        *a: ArrayLike, axis: SupportsIndex = ..., out: None = ...
+    ) -> NDArray[Any]: ...
+    @staticmethod
+    @overload
+    def concatenate(
+        *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ...
+    ) -> _ArrayType: ...
+    @staticmethod
+    def makemat(
+        data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
+    ) -> _Matrix[Any, Any]: ...
+
+    # TODO: Sort out this `__getitem__` method
+    def __getitem__(self, key: Any) -> Any: ...
+
+class RClass(AxisConcatenator):
+    axis: Literal[0]
+    matrix: Literal[False]
+    ndmin: Literal[1]
+    trans1d: Literal[-1]
+    def __init__(self) -> None: ...
+
+r_: RClass
+
+class CClass(AxisConcatenator):
+    axis: Literal[-1]
+    matrix: Literal[False]
+    ndmin: Literal[2]
+    trans1d: Literal[0]
+    def __init__(self) -> None: ...
+
+c_: CClass
+
+class IndexExpression(Generic[_BoolType]):
+    maketuple: _BoolType
+    def __init__(self, maketuple: _BoolType) -> None: ...
+    @overload
+    def __getitem__(self, item: _TupType) -> _TupType: ...  # type: ignore[misc]
+    @overload
+    def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ...
+    @overload
+    def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ...
+
+index_exp: IndexExpression[Literal[True]]
+s_: IndexExpression[Literal[False]]
+
+def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ...
+def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ...
+def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ...
+
+# NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex`
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/mixins.py b/.venv/lib/python3.11/site-packages/numpy/lib/mixins.py
new file mode 100644
index 0000000000000000000000000000000000000000..117cc785187be45e8597af48d26f723eb0024d23
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/mixins.py
@@ -0,0 +1,177 @@
+"""Mixin classes for custom array types that don't inherit from ndarray."""
+from numpy.core import umath as um
+
+
+__all__ = ['NDArrayOperatorsMixin']
+
+
+def _disables_array_ufunc(obj):
+    """True when __array_ufunc__ is set to None."""
+    try:
+        return obj.__array_ufunc__ is None
+    except AttributeError:
+        return False
+
+
+def _binary_method(ufunc, name):
+    """Implement a forward binary method with a ufunc, e.g., __add__."""
+    def func(self, other):
+        if _disables_array_ufunc(other):
+            return NotImplemented
+        return ufunc(self, other)
+    func.__name__ = '__{}__'.format(name)
+    return func
+
+
+def _reflected_binary_method(ufunc, name):
+    """Implement a reflected binary method with a ufunc, e.g., __radd__."""
+    def func(self, other):
+        if _disables_array_ufunc(other):
+            return NotImplemented
+        return ufunc(other, self)
+    func.__name__ = '__r{}__'.format(name)
+    return func
+
+
+def _inplace_binary_method(ufunc, name):
+    """Implement an in-place binary method with a ufunc, e.g., __iadd__."""
+    def func(self, other):
+        return ufunc(self, other, out=(self,))
+    func.__name__ = '__i{}__'.format(name)
+    return func
+
+
+def _numeric_methods(ufunc, name):
+    """Implement forward, reflected and inplace binary methods with a ufunc."""
+    return (_binary_method(ufunc, name),
+            _reflected_binary_method(ufunc, name),
+            _inplace_binary_method(ufunc, name))
+
+
+def _unary_method(ufunc, name):
+    """Implement a unary special method with a ufunc."""
+    def func(self):
+        return ufunc(self)
+    func.__name__ = '__{}__'.format(name)
+    return func
+
+
+class NDArrayOperatorsMixin:
+    """Mixin defining all operator special methods using __array_ufunc__.
+
+    This class implements the special methods for almost all of Python's
+    builtin operators defined in the `operator` module, including comparisons
+    (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
+    deferring to the ``__array_ufunc__`` method, which subclasses must
+    implement.
+
+    It is useful for writing classes that do not inherit from `numpy.ndarray`,
+    but that should support arithmetic and numpy universal functions like
+    arrays as described in `A Mechanism for Overriding Ufuncs
+    `_.
+
+    As an trivial example, consider this implementation of an ``ArrayLike``
+    class that simply wraps a NumPy array and ensures that the result of any
+    arithmetic operation is also an ``ArrayLike`` object::
+
+        class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
+            def __init__(self, value):
+                self.value = np.asarray(value)
+
+            # One might also consider adding the built-in list type to this
+            # list, to support operations like np.add(array_like, list)
+            _HANDLED_TYPES = (np.ndarray, numbers.Number)
+
+            def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+                out = kwargs.get('out', ())
+                for x in inputs + out:
+                    # Only support operations with instances of _HANDLED_TYPES.
+                    # Use ArrayLike instead of type(self) for isinstance to
+                    # allow subclasses that don't override __array_ufunc__ to
+                    # handle ArrayLike objects.
+                    if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
+                        return NotImplemented
+
+                # Defer to the implementation of the ufunc on unwrapped values.
+                inputs = tuple(x.value if isinstance(x, ArrayLike) else x
+                               for x in inputs)
+                if out:
+                    kwargs['out'] = tuple(
+                        x.value if isinstance(x, ArrayLike) else x
+                        for x in out)
+                result = getattr(ufunc, method)(*inputs, **kwargs)
+
+                if type(result) is tuple:
+                    # multiple return values
+                    return tuple(type(self)(x) for x in result)
+                elif method == 'at':
+                    # no return value
+                    return None
+                else:
+                    # one return value
+                    return type(self)(result)
+
+            def __repr__(self):
+                return '%s(%r)' % (type(self).__name__, self.value)
+
+    In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
+    the result is always another ``ArrayLike``:
+
+        >>> x = ArrayLike([1, 2, 3])
+        >>> x - 1
+        ArrayLike(array([0, 1, 2]))
+        >>> 1 - x
+        ArrayLike(array([ 0, -1, -2]))
+        >>> np.arange(3) - x
+        ArrayLike(array([-1, -1, -1]))
+        >>> x - np.arange(3)
+        ArrayLike(array([1, 1, 1]))
+
+    Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
+    with arbitrary, unrecognized types. This ensures that interactions with
+    ArrayLike preserve a well-defined casting hierarchy.
+
+    .. versionadded:: 1.13
+    """
+    __slots__ = ()
+    # Like np.ndarray, this mixin class implements "Option 1" from the ufunc
+    # overrides NEP.
+
+    # comparisons don't have reflected and in-place versions
+    __lt__ = _binary_method(um.less, 'lt')
+    __le__ = _binary_method(um.less_equal, 'le')
+    __eq__ = _binary_method(um.equal, 'eq')
+    __ne__ = _binary_method(um.not_equal, 'ne')
+    __gt__ = _binary_method(um.greater, 'gt')
+    __ge__ = _binary_method(um.greater_equal, 'ge')
+
+    # numeric methods
+    __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
+    __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
+    __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
+    __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
+        um.matmul, 'matmul')
+    # Python 3 does not use __div__, __rdiv__, or __idiv__
+    __truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
+        um.true_divide, 'truediv')
+    __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
+        um.floor_divide, 'floordiv')
+    __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
+    __divmod__ = _binary_method(um.divmod, 'divmod')
+    __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
+    # __idivmod__ does not exist
+    # TODO: handle the optional third argument for __pow__?
+    __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
+    __lshift__, __rlshift__, __ilshift__ = _numeric_methods(
+        um.left_shift, 'lshift')
+    __rshift__, __rrshift__, __irshift__ = _numeric_methods(
+        um.right_shift, 'rshift')
+    __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
+    __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
+    __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
+
+    # unary methods
+    __neg__ = _unary_method(um.negative, 'neg')
+    __pos__ = _unary_method(um.positive, 'pos')
+    __abs__ = _unary_method(um.absolute, 'abs')
+    __invert__ = _unary_method(um.invert, 'invert')
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/mixins.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/mixins.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..c5744213372cf746fcba3a3b711b49730629e28c
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/mixins.pyi
@@ -0,0 +1,74 @@
+from abc import ABCMeta, abstractmethod
+from typing import Literal as L, Any
+
+from numpy import ufunc
+
+__all__: list[str]
+
+# NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
+# even though it's reliant on subclasses implementing `__array_ufunc__`
+
+# NOTE: The accepted input- and output-types of the various dunders are
+# completely dependent on how `__array_ufunc__` is implemented.
+# As such, only little type safety can be provided here.
+
+class NDArrayOperatorsMixin(metaclass=ABCMeta):
+    @abstractmethod
+    def __array_ufunc__(
+        self,
+        ufunc: ufunc,
+        method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
+        *inputs: Any,
+        **kwargs: Any,
+    ) -> Any: ...
+    def __lt__(self, other: Any) -> Any: ...
+    def __le__(self, other: Any) -> Any: ...
+    def __eq__(self, other: Any) -> Any: ...
+    def __ne__(self, other: Any) -> Any: ...
+    def __gt__(self, other: Any) -> Any: ...
+    def __ge__(self, other: Any) -> Any: ...
+    def __add__(self, other: Any) -> Any: ...
+    def __radd__(self, other: Any) -> Any: ...
+    def __iadd__(self, other: Any) -> Any: ...
+    def __sub__(self, other: Any) -> Any: ...
+    def __rsub__(self, other: Any) -> Any: ...
+    def __isub__(self, other: Any) -> Any: ...
+    def __mul__(self, other: Any) -> Any: ...
+    def __rmul__(self, other: Any) -> Any: ...
+    def __imul__(self, other: Any) -> Any: ...
+    def __matmul__(self, other: Any) -> Any: ...
+    def __rmatmul__(self, other: Any) -> Any: ...
+    def __imatmul__(self, other: Any) -> Any: ...
+    def __truediv__(self, other: Any) -> Any: ...
+    def __rtruediv__(self, other: Any) -> Any: ...
+    def __itruediv__(self, other: Any) -> Any: ...
+    def __floordiv__(self, other: Any) -> Any: ...
+    def __rfloordiv__(self, other: Any) -> Any: ...
+    def __ifloordiv__(self, other: Any) -> Any: ...
+    def __mod__(self, other: Any) -> Any: ...
+    def __rmod__(self, other: Any) -> Any: ...
+    def __imod__(self, other: Any) -> Any: ...
+    def __divmod__(self, other: Any) -> Any: ...
+    def __rdivmod__(self, other: Any) -> Any: ...
+    def __pow__(self, other: Any) -> Any: ...
+    def __rpow__(self, other: Any) -> Any: ...
+    def __ipow__(self, other: Any) -> Any: ...
+    def __lshift__(self, other: Any) -> Any: ...
+    def __rlshift__(self, other: Any) -> Any: ...
+    def __ilshift__(self, other: Any) -> Any: ...
+    def __rshift__(self, other: Any) -> Any: ...
+    def __rrshift__(self, other: Any) -> Any: ...
+    def __irshift__(self, other: Any) -> Any: ...
+    def __and__(self, other: Any) -> Any: ...
+    def __rand__(self, other: Any) -> Any: ...
+    def __iand__(self, other: Any) -> Any: ...
+    def __xor__(self, other: Any) -> Any: ...
+    def __rxor__(self, other: Any) -> Any: ...
+    def __ixor__(self, other: Any) -> Any: ...
+    def __or__(self, other: Any) -> Any: ...
+    def __ror__(self, other: Any) -> Any: ...
+    def __ior__(self, other: Any) -> Any: ...
+    def __neg__(self) -> Any: ...
+    def __pos__(self) -> Any: ...
+    def __abs__(self) -> Any: ...
+    def __invert__(self) -> Any: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/npyio.py b/.venv/lib/python3.11/site-packages/numpy/lib/npyio.py
new file mode 100644
index 0000000000000000000000000000000000000000..339b1dc6211377442f7c01b78c8b3c65c65be2b7
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/npyio.py
@@ -0,0 +1,2547 @@
+import os
+import re
+import functools
+import itertools
+import warnings
+import weakref
+import contextlib
+import operator
+from operator import itemgetter, index as opindex, methodcaller
+from collections.abc import Mapping
+
+import numpy as np
+from . import format
+from ._datasource import DataSource
+from numpy.core import overrides
+from numpy.core.multiarray import packbits, unpackbits
+from numpy.core._multiarray_umath import _load_from_filelike
+from numpy.core.overrides import set_array_function_like_doc, set_module
+from ._iotools import (
+    LineSplitter, NameValidator, StringConverter, ConverterError,
+    ConverterLockError, ConversionWarning, _is_string_like,
+    has_nested_fields, flatten_dtype, easy_dtype, _decode_line
+    )
+
+from numpy.compat import (
+    asbytes, asstr, asunicode, os_fspath, os_PathLike,
+    pickle
+    )
+
+
+__all__ = [
+    'savetxt', 'loadtxt', 'genfromtxt',
+    'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
+    'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
+    ]
+
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+class BagObj:
+    """
+    BagObj(obj)
+
+    Convert attribute look-ups to getitems on the object passed in.
+
+    Parameters
+    ----------
+    obj : class instance
+        Object on which attribute look-up is performed.
+
+    Examples
+    --------
+    >>> from numpy.lib.npyio import BagObj as BO
+    >>> class BagDemo:
+    ...     def __getitem__(self, key): # An instance of BagObj(BagDemo)
+    ...                                 # will call this method when any
+    ...                                 # attribute look-up is required
+    ...         result = "Doesn't matter what you want, "
+    ...         return result + "you're gonna get this"
+    ...
+    >>> demo_obj = BagDemo()
+    >>> bagobj = BO(demo_obj)
+    >>> bagobj.hello_there
+    "Doesn't matter what you want, you're gonna get this"
+    >>> bagobj.I_can_be_anything
+    "Doesn't matter what you want, you're gonna get this"
+
+    """
+
+    def __init__(self, obj):
+        # Use weakref to make NpzFile objects collectable by refcount
+        self._obj = weakref.proxy(obj)
+
+    def __getattribute__(self, key):
+        try:
+            return object.__getattribute__(self, '_obj')[key]
+        except KeyError:
+            raise AttributeError(key) from None
+
+    def __dir__(self):
+        """
+        Enables dir(bagobj) to list the files in an NpzFile.
+
+        This also enables tab-completion in an interpreter or IPython.
+        """
+        return list(object.__getattribute__(self, '_obj').keys())
+
+
+def zipfile_factory(file, *args, **kwargs):
+    """
+    Create a ZipFile.
+
+    Allows for Zip64, and the `file` argument can accept file, str, or
+    pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
+    constructor.
+    """
+    if not hasattr(file, 'read'):
+        file = os_fspath(file)
+    import zipfile
+    kwargs['allowZip64'] = True
+    return zipfile.ZipFile(file, *args, **kwargs)
+
+
+class NpzFile(Mapping):
+    """
+    NpzFile(fid)
+
+    A dictionary-like object with lazy-loading of files in the zipped
+    archive provided on construction.
+
+    `NpzFile` is used to load files in the NumPy ``.npz`` data archive
+    format. It assumes that files in the archive have a ``.npy`` extension,
+    other files are ignored.
+
+    The arrays and file strings are lazily loaded on either
+    getitem access using ``obj['key']`` or attribute lookup using
+    ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
+    be obtained with ``obj.files`` and the ZipFile object itself using
+    ``obj.zip``.
+
+    Attributes
+    ----------
+    files : list of str
+        List of all files in the archive with a ``.npy`` extension.
+    zip : ZipFile instance
+        The ZipFile object initialized with the zipped archive.
+    f : BagObj instance
+        An object on which attribute can be performed as an alternative
+        to getitem access on the `NpzFile` instance itself.
+    allow_pickle : bool, optional
+        Allow loading pickled data. Default: False
+
+        .. versionchanged:: 1.16.3
+            Made default False in response to CVE-2019-6446.
+
+    pickle_kwargs : dict, optional
+        Additional keyword arguments to pass on to pickle.load.
+        These are only useful when loading object arrays saved on
+        Python 2 when using Python 3.
+    max_header_size : int, optional
+        Maximum allowed size of the header.  Large headers may not be safe
+        to load securely and thus require explicitly passing a larger value.
+        See :py:func:`ast.literal_eval()` for details.
+        This option is ignored when `allow_pickle` is passed.  In that case
+        the file is by definition trusted and the limit is unnecessary.
+
+    Parameters
+    ----------
+    fid : file or str
+        The zipped archive to open. This is either a file-like object
+        or a string containing the path to the archive.
+    own_fid : bool, optional
+        Whether NpzFile should close the file handle.
+        Requires that `fid` is a file-like object.
+
+    Examples
+    --------
+    >>> from tempfile import TemporaryFile
+    >>> outfile = TemporaryFile()
+    >>> x = np.arange(10)
+    >>> y = np.sin(x)
+    >>> np.savez(outfile, x=x, y=y)
+    >>> _ = outfile.seek(0)
+
+    >>> npz = np.load(outfile)
+    >>> isinstance(npz, np.lib.npyio.NpzFile)
+    True
+    >>> npz
+    NpzFile 'object' with keys x, y
+    >>> sorted(npz.files)
+    ['x', 'y']
+    >>> npz['x']  # getitem access
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+    >>> npz.f.x  # attribute lookup
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+    """
+    # Make __exit__ safe if zipfile_factory raises an exception
+    zip = None
+    fid = None
+    _MAX_REPR_ARRAY_COUNT = 5
+
+    def __init__(self, fid, own_fid=False, allow_pickle=False,
+                 pickle_kwargs=None, *,
+                 max_header_size=format._MAX_HEADER_SIZE):
+        # Import is postponed to here since zipfile depends on gzip, an
+        # optional component of the so-called standard library.
+        _zip = zipfile_factory(fid)
+        self._files = _zip.namelist()
+        self.files = []
+        self.allow_pickle = allow_pickle
+        self.max_header_size = max_header_size
+        self.pickle_kwargs = pickle_kwargs
+        for x in self._files:
+            if x.endswith('.npy'):
+                self.files.append(x[:-4])
+            else:
+                self.files.append(x)
+        self.zip = _zip
+        self.f = BagObj(self)
+        if own_fid:
+            self.fid = fid
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, traceback):
+        self.close()
+
+    def close(self):
+        """
+        Close the file.
+
+        """
+        if self.zip is not None:
+            self.zip.close()
+            self.zip = None
+        if self.fid is not None:
+            self.fid.close()
+            self.fid = None
+        self.f = None  # break reference cycle
+
+    def __del__(self):
+        self.close()
+
+    # Implement the Mapping ABC
+    def __iter__(self):
+        return iter(self.files)
+
+    def __len__(self):
+        return len(self.files)
+
+    def __getitem__(self, key):
+        # FIXME: This seems like it will copy strings around
+        #   more than is strictly necessary.  The zipfile
+        #   will read the string and then
+        #   the format.read_array will copy the string
+        #   to another place in memory.
+        #   It would be better if the zipfile could read
+        #   (or at least uncompress) the data
+        #   directly into the array memory.
+        member = False
+        if key in self._files:
+            member = True
+        elif key in self.files:
+            member = True
+            key += '.npy'
+        if member:
+            bytes = self.zip.open(key)
+            magic = bytes.read(len(format.MAGIC_PREFIX))
+            bytes.close()
+            if magic == format.MAGIC_PREFIX:
+                bytes = self.zip.open(key)
+                return format.read_array(bytes,
+                                         allow_pickle=self.allow_pickle,
+                                         pickle_kwargs=self.pickle_kwargs,
+                                         max_header_size=self.max_header_size)
+            else:
+                return self.zip.read(key)
+        else:
+            raise KeyError(f"{key} is not a file in the archive")
+
+    def __contains__(self, key):
+        return (key in self._files or key in self.files)
+
+    def __repr__(self):
+        # Get filename or default to `object`
+        if isinstance(self.fid, str):
+            filename = self.fid
+        else:
+            filename = getattr(self.fid, "name", "object")
+
+        # Get the name of arrays
+        array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
+        if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
+            array_names += "..."
+        return f"NpzFile {filename!r} with keys: {array_names}"
+
+
+@set_module('numpy')
+def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
+         encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE):
+    """
+    Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
+
+    .. warning:: Loading files that contain object arrays uses the ``pickle``
+                 module, which is not secure against erroneous or maliciously
+                 constructed data. Consider passing ``allow_pickle=False`` to
+                 load data that is known not to contain object arrays for the
+                 safer handling of untrusted sources.
+
+    Parameters
+    ----------
+    file : file-like object, string, or pathlib.Path
+        The file to read. File-like objects must support the
+        ``seek()`` and ``read()`` methods and must always
+        be opened in binary mode.  Pickled files require that the
+        file-like object support the ``readline()`` method as well.
+    mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
+        If not None, then memory-map the file, using the given mode (see
+        `numpy.memmap` for a detailed description of the modes).  A
+        memory-mapped array is kept on disk. However, it can be accessed
+        and sliced like any ndarray.  Memory mapping is especially useful
+        for accessing small fragments of large files without reading the
+        entire file into memory.
+    allow_pickle : bool, optional
+        Allow loading pickled object arrays stored in npy files. Reasons for
+        disallowing pickles include security, as loading pickled data can
+        execute arbitrary code. If pickles are disallowed, loading object
+        arrays will fail. Default: False
+
+        .. versionchanged:: 1.16.3
+            Made default False in response to CVE-2019-6446.
+
+    fix_imports : bool, optional
+        Only useful when loading Python 2 generated pickled files on Python 3,
+        which includes npy/npz files containing object arrays. If `fix_imports`
+        is True, pickle will try to map the old Python 2 names to the new names
+        used in Python 3.
+    encoding : str, optional
+        What encoding to use when reading Python 2 strings. Only useful when
+        loading Python 2 generated pickled files in Python 3, which includes
+        npy/npz files containing object arrays. Values other than 'latin1',
+        'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
+        data. Default: 'ASCII'
+    max_header_size : int, optional
+        Maximum allowed size of the header.  Large headers may not be safe
+        to load securely and thus require explicitly passing a larger value.
+        See :py:func:`ast.literal_eval()` for details.
+        This option is ignored when `allow_pickle` is passed.  In that case
+        the file is by definition trusted and the limit is unnecessary.
+
+    Returns
+    -------
+    result : array, tuple, dict, etc.
+        Data stored in the file. For ``.npz`` files, the returned instance
+        of NpzFile class must be closed to avoid leaking file descriptors.
+
+    Raises
+    ------
+    OSError
+        If the input file does not exist or cannot be read.
+    UnpicklingError
+        If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
+    ValueError
+        The file contains an object array, but ``allow_pickle=False`` given.
+    EOFError
+        When calling ``np.load`` multiple times on the same file handle,
+        if all data has already been read
+
+    See Also
+    --------
+    save, savez, savez_compressed, loadtxt
+    memmap : Create a memory-map to an array stored in a file on disk.
+    lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
+
+    Notes
+    -----
+    - If the file contains pickle data, then whatever object is stored
+      in the pickle is returned.
+    - If the file is a ``.npy`` file, then a single array is returned.
+    - If the file is a ``.npz`` file, then a dictionary-like object is
+      returned, containing ``{filename: array}`` key-value pairs, one for
+      each file in the archive.
+    - If the file is a ``.npz`` file, the returned value supports the
+      context manager protocol in a similar fashion to the open function::
+
+        with load('foo.npz') as data:
+            a = data['a']
+
+      The underlying file descriptor is closed when exiting the 'with'
+      block.
+
+    Examples
+    --------
+    Store data to disk, and load it again:
+
+    >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
+    >>> np.load('/tmp/123.npy')
+    array([[1, 2, 3],
+           [4, 5, 6]])
+
+    Store compressed data to disk, and load it again:
+
+    >>> a=np.array([[1, 2, 3], [4, 5, 6]])
+    >>> b=np.array([1, 2])
+    >>> np.savez('/tmp/123.npz', a=a, b=b)
+    >>> data = np.load('/tmp/123.npz')
+    >>> data['a']
+    array([[1, 2, 3],
+           [4, 5, 6]])
+    >>> data['b']
+    array([1, 2])
+    >>> data.close()
+
+    Mem-map the stored array, and then access the second row
+    directly from disk:
+
+    >>> X = np.load('/tmp/123.npy', mmap_mode='r')
+    >>> X[1, :]
+    memmap([4, 5, 6])
+
+    """
+    if encoding not in ('ASCII', 'latin1', 'bytes'):
+        # The 'encoding' value for pickle also affects what encoding
+        # the serialized binary data of NumPy arrays is loaded
+        # in. Pickle does not pass on the encoding information to
+        # NumPy. The unpickling code in numpy.core.multiarray is
+        # written to assume that unicode data appearing where binary
+        # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
+        #
+        # Other encoding values can corrupt binary data, and we
+        # purposefully disallow them. For the same reason, the errors=
+        # argument is not exposed, as values other than 'strict'
+        # result can similarly silently corrupt numerical data.
+        raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
+
+    pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
+
+    with contextlib.ExitStack() as stack:
+        if hasattr(file, 'read'):
+            fid = file
+            own_fid = False
+        else:
+            fid = stack.enter_context(open(os_fspath(file), "rb"))
+            own_fid = True
+
+        # Code to distinguish from NumPy binary files and pickles.
+        _ZIP_PREFIX = b'PK\x03\x04'
+        _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
+        N = len(format.MAGIC_PREFIX)
+        magic = fid.read(N)
+        if not magic:
+            raise EOFError("No data left in file")
+        # If the file size is less than N, we need to make sure not
+        # to seek past the beginning of the file
+        fid.seek(-min(N, len(magic)), 1)  # back-up
+        if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
+            # zip-file (assume .npz)
+            # Potentially transfer file ownership to NpzFile
+            stack.pop_all()
+            ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
+                          pickle_kwargs=pickle_kwargs,
+                          max_header_size=max_header_size)
+            return ret
+        elif magic == format.MAGIC_PREFIX:
+            # .npy file
+            if mmap_mode:
+                if allow_pickle:
+                    max_header_size = 2**64
+                return format.open_memmap(file, mode=mmap_mode,
+                                          max_header_size=max_header_size)
+            else:
+                return format.read_array(fid, allow_pickle=allow_pickle,
+                                         pickle_kwargs=pickle_kwargs,
+                                         max_header_size=max_header_size)
+        else:
+            # Try a pickle
+            if not allow_pickle:
+                raise ValueError("Cannot load file containing pickled data "
+                                 "when allow_pickle=False")
+            try:
+                return pickle.load(fid, **pickle_kwargs)
+            except Exception as e:
+                raise pickle.UnpicklingError(
+                    f"Failed to interpret file {file!r} as a pickle") from e
+
+
+def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
+    return (arr,)
+
+
+@array_function_dispatch(_save_dispatcher)
+def save(file, arr, allow_pickle=True, fix_imports=True):
+    """
+    Save an array to a binary file in NumPy ``.npy`` format.
+
+    Parameters
+    ----------
+    file : file, str, or pathlib.Path
+        File or filename to which the data is saved.  If file is a file-object,
+        then the filename is unchanged.  If file is a string or Path, a ``.npy``
+        extension will be appended to the filename if it does not already
+        have one.
+    arr : array_like
+        Array data to be saved.
+    allow_pickle : bool, optional
+        Allow saving object arrays using Python pickles. Reasons for disallowing
+        pickles include security (loading pickled data can execute arbitrary
+        code) and portability (pickled objects may not be loadable on different
+        Python installations, for example if the stored objects require libraries
+        that are not available, and not all pickled data is compatible between
+        Python 2 and Python 3).
+        Default: True
+    fix_imports : bool, optional
+        Only useful in forcing objects in object arrays on Python 3 to be
+        pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
+        will try to map the new Python 3 names to the old module names used in
+        Python 2, so that the pickle data stream is readable with Python 2.
+
+    See Also
+    --------
+    savez : Save several arrays into a ``.npz`` archive
+    savetxt, load
+
+    Notes
+    -----
+    For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
+
+    Any data saved to the file is appended to the end of the file.
+
+    Examples
+    --------
+    >>> from tempfile import TemporaryFile
+    >>> outfile = TemporaryFile()
+
+    >>> x = np.arange(10)
+    >>> np.save(outfile, x)
+
+    >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
+    >>> np.load(outfile)
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+
+    >>> with open('test.npy', 'wb') as f:
+    ...     np.save(f, np.array([1, 2]))
+    ...     np.save(f, np.array([1, 3]))
+    >>> with open('test.npy', 'rb') as f:
+    ...     a = np.load(f)
+    ...     b = np.load(f)
+    >>> print(a, b)
+    # [1 2] [1 3]
+    """
+    if hasattr(file, 'write'):
+        file_ctx = contextlib.nullcontext(file)
+    else:
+        file = os_fspath(file)
+        if not file.endswith('.npy'):
+            file = file + '.npy'
+        file_ctx = open(file, "wb")
+
+    with file_ctx as fid:
+        arr = np.asanyarray(arr)
+        format.write_array(fid, arr, allow_pickle=allow_pickle,
+                           pickle_kwargs=dict(fix_imports=fix_imports))
+
+
+def _savez_dispatcher(file, *args, **kwds):
+    yield from args
+    yield from kwds.values()
+
+
+@array_function_dispatch(_savez_dispatcher)
+def savez(file, *args, **kwds):
+    """Save several arrays into a single file in uncompressed ``.npz`` format.
+
+    Provide arrays as keyword arguments to store them under the
+    corresponding name in the output file: ``savez(fn, x=x, y=y)``.
+
+    If arrays are specified as positional arguments, i.e., ``savez(fn,
+    x, y)``, their names will be `arr_0`, `arr_1`, etc.
+
+    Parameters
+    ----------
+    file : str or file
+        Either the filename (string) or an open file (file-like object)
+        where the data will be saved. If file is a string or a Path, the
+        ``.npz`` extension will be appended to the filename if it is not
+        already there.
+    args : Arguments, optional
+        Arrays to save to the file. Please use keyword arguments (see
+        `kwds` below) to assign names to arrays.  Arrays specified as
+        args will be named "arr_0", "arr_1", and so on.
+    kwds : Keyword arguments, optional
+        Arrays to save to the file. Each array will be saved to the
+        output file with its corresponding keyword name.
+
+    Returns
+    -------
+    None
+
+    See Also
+    --------
+    save : Save a single array to a binary file in NumPy format.
+    savetxt : Save an array to a file as plain text.
+    savez_compressed : Save several arrays into a compressed ``.npz`` archive
+
+    Notes
+    -----
+    The ``.npz`` file format is a zipped archive of files named after the
+    variables they contain.  The archive is not compressed and each file
+    in the archive contains one variable in ``.npy`` format. For a
+    description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
+
+    When opening the saved ``.npz`` file with `load` a `NpzFile` object is
+    returned. This is a dictionary-like object which can be queried for
+    its list of arrays (with the ``.files`` attribute), and for the arrays
+    themselves.
+
+    Keys passed in `kwds` are used as filenames inside the ZIP archive.
+    Therefore, keys should be valid filenames; e.g., avoid keys that begin with
+    ``/`` or contain ``.``.
+
+    When naming variables with keyword arguments, it is not possible to name a
+    variable ``file``, as this would cause the ``file`` argument to be defined
+    twice in the call to ``savez``.
+
+    Examples
+    --------
+    >>> from tempfile import TemporaryFile
+    >>> outfile = TemporaryFile()
+    >>> x = np.arange(10)
+    >>> y = np.sin(x)
+
+    Using `savez` with \\*args, the arrays are saved with default names.
+
+    >>> np.savez(outfile, x, y)
+    >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
+    >>> npzfile = np.load(outfile)
+    >>> npzfile.files
+    ['arr_0', 'arr_1']
+    >>> npzfile['arr_0']
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+    Using `savez` with \\**kwds, the arrays are saved with the keyword names.
+
+    >>> outfile = TemporaryFile()
+    >>> np.savez(outfile, x=x, y=y)
+    >>> _ = outfile.seek(0)
+    >>> npzfile = np.load(outfile)
+    >>> sorted(npzfile.files)
+    ['x', 'y']
+    >>> npzfile['x']
+    array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+    """
+    _savez(file, args, kwds, False)
+
+
+def _savez_compressed_dispatcher(file, *args, **kwds):
+    yield from args
+    yield from kwds.values()
+
+
+@array_function_dispatch(_savez_compressed_dispatcher)
+def savez_compressed(file, *args, **kwds):
+    """
+    Save several arrays into a single file in compressed ``.npz`` format.
+
+    Provide arrays as keyword arguments to store them under the
+    corresponding name in the output file: ``savez(fn, x=x, y=y)``.
+
+    If arrays are specified as positional arguments, i.e., ``savez(fn,
+    x, y)``, their names will be `arr_0`, `arr_1`, etc.
+
+    Parameters
+    ----------
+    file : str or file
+        Either the filename (string) or an open file (file-like object)
+        where the data will be saved. If file is a string or a Path, the
+        ``.npz`` extension will be appended to the filename if it is not
+        already there.
+    args : Arguments, optional
+        Arrays to save to the file. Please use keyword arguments (see
+        `kwds` below) to assign names to arrays.  Arrays specified as
+        args will be named "arr_0", "arr_1", and so on.
+    kwds : Keyword arguments, optional
+        Arrays to save to the file. Each array will be saved to the
+        output file with its corresponding keyword name.
+
+    Returns
+    -------
+    None
+
+    See Also
+    --------
+    numpy.save : Save a single array to a binary file in NumPy format.
+    numpy.savetxt : Save an array to a file as plain text.
+    numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
+    numpy.load : Load the files created by savez_compressed.
+
+    Notes
+    -----
+    The ``.npz`` file format is a zipped archive of files named after the
+    variables they contain.  The archive is compressed with
+    ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
+    in ``.npy`` format. For a description of the ``.npy`` format, see
+    :py:mod:`numpy.lib.format`.
+
+
+    When opening the saved ``.npz`` file with `load` a `NpzFile` object is
+    returned. This is a dictionary-like object which can be queried for
+    its list of arrays (with the ``.files`` attribute), and for the arrays
+    themselves.
+
+    Examples
+    --------
+    >>> test_array = np.random.rand(3, 2)
+    >>> test_vector = np.random.rand(4)
+    >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
+    >>> loaded = np.load('/tmp/123.npz')
+    >>> print(np.array_equal(test_array, loaded['a']))
+    True
+    >>> print(np.array_equal(test_vector, loaded['b']))
+    True
+
+    """
+    _savez(file, args, kwds, True)
+
+
+def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
+    # Import is postponed to here since zipfile depends on gzip, an optional
+    # component of the so-called standard library.
+    import zipfile
+
+    if not hasattr(file, 'write'):
+        file = os_fspath(file)
+        if not file.endswith('.npz'):
+            file = file + '.npz'
+
+    namedict = kwds
+    for i, val in enumerate(args):
+        key = 'arr_%d' % i
+        if key in namedict.keys():
+            raise ValueError(
+                "Cannot use un-named variables and keyword %s" % key)
+        namedict[key] = val
+
+    if compress:
+        compression = zipfile.ZIP_DEFLATED
+    else:
+        compression = zipfile.ZIP_STORED
+
+    zipf = zipfile_factory(file, mode="w", compression=compression)
+
+    for key, val in namedict.items():
+        fname = key + '.npy'
+        val = np.asanyarray(val)
+        # always force zip64, gh-10776
+        with zipf.open(fname, 'w', force_zip64=True) as fid:
+            format.write_array(fid, val,
+                               allow_pickle=allow_pickle,
+                               pickle_kwargs=pickle_kwargs)
+
+    zipf.close()
+
+
+def _ensure_ndmin_ndarray_check_param(ndmin):
+    """Just checks if the param ndmin is supported on
+        _ensure_ndmin_ndarray. It is intended to be used as
+        verification before running anything expensive.
+        e.g. loadtxt, genfromtxt
+    """
+    # Check correctness of the values of `ndmin`
+    if ndmin not in [0, 1, 2]:
+        raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
+
+def _ensure_ndmin_ndarray(a, *, ndmin: int):
+    """This is a helper function of loadtxt and genfromtxt to ensure
+        proper minimum dimension as requested
+
+        ndim : int. Supported values 1, 2, 3
+                    ^^ whenever this changes, keep in sync with
+                       _ensure_ndmin_ndarray_check_param
+    """
+    # Verify that the array has at least dimensions `ndmin`.
+    # Tweak the size and shape of the arrays - remove extraneous dimensions
+    if a.ndim > ndmin:
+        a = np.squeeze(a)
+    # and ensure we have the minimum number of dimensions asked for
+    # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
+    if a.ndim < ndmin:
+        if ndmin == 1:
+            a = np.atleast_1d(a)
+        elif ndmin == 2:
+            a = np.atleast_2d(a).T
+
+    return a
+
+
+# amount of lines loadtxt reads in one chunk, can be overridden for testing
+_loadtxt_chunksize = 50000
+
+
+def _check_nonneg_int(value, name="argument"):
+    try:
+        operator.index(value)
+    except TypeError:
+        raise TypeError(f"{name} must be an integer") from None
+    if value < 0:
+        raise ValueError(f"{name} must be nonnegative")
+
+
+def _preprocess_comments(iterable, comments, encoding):
+    """
+    Generator that consumes a line iterated iterable and strips out the
+    multiple (or multi-character) comments from lines.
+    This is a pre-processing step to achieve feature parity with loadtxt
+    (we assume that this feature is a nieche feature).
+    """
+    for line in iterable:
+        if isinstance(line, bytes):
+            # Need to handle conversion here, or the splitting would fail
+            line = line.decode(encoding)
+
+        for c in comments:
+            line = line.split(c, 1)[0]
+
+        yield line
+
+
+# The number of rows we read in one go if confronted with a parametric dtype
+_loadtxt_chunksize = 50000
+
+
+def _read(fname, *, delimiter=',', comment='#', quote='"',
+          imaginary_unit='j', usecols=None, skiplines=0,
+          max_rows=None, converters=None, ndmin=None, unpack=False,
+          dtype=np.float64, encoding="bytes"):
+    r"""
+    Read a NumPy array from a text file.
+
+    Parameters
+    ----------
+    fname : str or file object
+        The filename or the file to be read.
+    delimiter : str, optional
+        Field delimiter of the fields in line of the file.
+        Default is a comma, ','.  If None any sequence of whitespace is
+        considered a delimiter.
+    comment : str or sequence of str or None, optional
+        Character that begins a comment.  All text from the comment
+        character to the end of the line is ignored.
+        Multiple comments or multiple-character comment strings are supported,
+        but may be slower and `quote` must be empty if used.
+        Use None to disable all use of comments.
+    quote : str or None, optional
+        Character that is used to quote string fields. Default is '"'
+        (a double quote). Use None to disable quote support.
+    imaginary_unit : str, optional
+        Character that represent the imaginay unit `sqrt(-1)`.
+        Default is 'j'.
+    usecols : array_like, optional
+        A one-dimensional array of integer column numbers.  These are the
+        columns from the file to be included in the array.  If this value
+        is not given, all the columns are used.
+    skiplines : int, optional
+        Number of lines to skip before interpreting the data in the file.
+    max_rows : int, optional
+        Maximum number of rows of data to read.  Default is to read the
+        entire file.
+    converters : dict or callable, optional
+        A function to parse all columns strings into the desired value, or
+        a dictionary mapping column number to a parser function.
+        E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
+        Converters can also be used to provide a default value for missing
+        data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
+        convert empty fields to 0.
+        Default: None
+    ndmin : int, optional
+        Minimum dimension of the array returned.
+        Allowed values are 0, 1 or 2.  Default is 0.
+    unpack : bool, optional
+        If True, the returned array is transposed, so that arguments may be
+        unpacked using ``x, y, z = read(...)``.  When used with a structured
+        data-type, arrays are returned for each field.  Default is False.
+    dtype : numpy data type
+        A NumPy dtype instance, can be a structured dtype to map to the
+        columns of the file.
+    encoding : str, optional
+        Encoding used to decode the inputfile. The special value 'bytes'
+        (the default) enables backwards-compatible behavior for `converters`,
+        ensuring that inputs to the converter functions are encoded
+        bytes objects. The special value 'bytes' has no additional effect if
+        ``converters=None``. If encoding is ``'bytes'`` or ``None``, the
+        default system encoding is used.
+
+    Returns
+    -------
+    ndarray
+        NumPy array.
+
+    Examples
+    --------
+    First we create a file for the example.
+
+    >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n'
+    >>> with open('example1.csv', 'w') as f:
+    ...     f.write(s1)
+    >>> a1 = read_from_filename('example1.csv')
+    >>> a1
+    array([[1., 2., 3.],
+           [4., 5., 6.]])
+
+    The second example has columns with different data types, so a
+    one-dimensional array with a structured data type is returned.
+    The tab character is used as the field delimiter.
+
+    >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n'
+    >>> with open('example2.tsv', 'w') as f:
+    ...     f.write(s2)
+    >>> a2 = read_from_filename('example2.tsv', delimiter='\t')
+    >>> a2
+    array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')],
+          dtype=[('f0', '= 0:
+                    max_rows -= chunk_size
+                if len(next_arr) < chunk_size:
+                    # There was less data than requested, so we are done.
+                    break
+
+            # Need at least one chunk, but if empty, the last one may have
+            # the wrong shape.
+            if len(chunks) > 1 and len(chunks[-1]) == 0:
+                del chunks[-1]
+            if len(chunks) == 1:
+                arr = chunks[0]
+            else:
+                arr = np.concatenate(chunks, axis=0)
+
+    # NOTE: ndmin works as advertised for structured dtypes, but normally
+    #       these would return a 1D result plus the structured dimension,
+    #       so ndmin=2 adds a third dimension even when no squeezing occurs.
+    #       A `squeeze=False` could be a better solution (pandas uses squeeze).
+    arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
+
+    if arr.shape:
+        if arr.shape[0] == 0:
+            warnings.warn(
+                f'loadtxt: input contained no data: "{fname}"',
+                category=UserWarning,
+                stacklevel=3
+            )
+
+    if unpack:
+        # Unpack structured dtypes if requested:
+        dt = arr.dtype
+        if dt.names is not None:
+            # For structured arrays, return an array for each field.
+            return [arr[field] for field in dt.names]
+        else:
+            return arr.T
+    else:
+        return arr
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def loadtxt(fname, dtype=float, comments='#', delimiter=None,
+            converters=None, skiprows=0, usecols=None, unpack=False,
+            ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None,
+            like=None):
+    r"""
+    Load data from a text file.
+
+    Parameters
+    ----------
+    fname : file, str, pathlib.Path, list of str, generator
+        File, filename, list, or generator to read.  If the filename
+        extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
+        that generators must return bytes or strings. The strings
+        in a list or produced by a generator are treated as lines.
+    dtype : data-type, optional
+        Data-type of the resulting array; default: float.  If this is a
+        structured data-type, the resulting array will be 1-dimensional, and
+        each row will be interpreted as an element of the array.  In this
+        case, the number of columns used must match the number of fields in
+        the data-type.
+    comments : str or sequence of str or None, optional
+        The characters or list of characters used to indicate the start of a
+        comment. None implies no comments. For backwards compatibility, byte
+        strings will be decoded as 'latin1'. The default is '#'.
+    delimiter : str, optional
+        The character used to separate the values. For backwards compatibility,
+        byte strings will be decoded as 'latin1'. The default is whitespace.
+
+        .. versionchanged:: 1.23.0
+           Only single character delimiters are supported. Newline characters
+           cannot be used as the delimiter.
+
+    converters : dict or callable, optional
+        Converter functions to customize value parsing. If `converters` is
+        callable, the function is applied to all columns, else it must be a
+        dict that maps column number to a parser function.
+        See examples for further details.
+        Default: None.
+
+        .. versionchanged:: 1.23.0
+           The ability to pass a single callable to be applied to all columns
+           was added.
+
+    skiprows : int, optional
+        Skip the first `skiprows` lines, including comments; default: 0.
+    usecols : int or sequence, optional
+        Which columns to read, with 0 being the first. For example,
+        ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
+        The default, None, results in all columns being read.
+
+        .. versionchanged:: 1.11.0
+            When a single column has to be read it is possible to use
+            an integer instead of a tuple. E.g ``usecols = 3`` reads the
+            fourth column the same way as ``usecols = (3,)`` would.
+    unpack : bool, optional
+        If True, the returned array is transposed, so that arguments may be
+        unpacked using ``x, y, z = loadtxt(...)``.  When used with a
+        structured data-type, arrays are returned for each field.
+        Default is False.
+    ndmin : int, optional
+        The returned array will have at least `ndmin` dimensions.
+        Otherwise mono-dimensional axes will be squeezed.
+        Legal values: 0 (default), 1 or 2.
+
+        .. versionadded:: 1.6.0
+    encoding : str, optional
+        Encoding used to decode the inputfile. Does not apply to input streams.
+        The special value 'bytes' enables backward compatibility workarounds
+        that ensures you receive byte arrays as results if possible and passes
+        'latin1' encoded strings to converters. Override this value to receive
+        unicode arrays and pass strings as input to converters.  If set to None
+        the system default is used. The default value is 'bytes'.
+
+        .. versionadded:: 1.14.0
+    max_rows : int, optional
+        Read `max_rows` rows of content after `skiprows` lines. The default is
+        to read all the rows. Note that empty rows containing no data such as
+        empty lines and comment lines are not counted towards `max_rows`,
+        while such lines are counted in `skiprows`.
+
+        .. versionadded:: 1.16.0
+
+        .. versionchanged:: 1.23.0
+            Lines containing no data, including comment lines (e.g., lines
+            starting with '#' or as specified via `comments`) are not counted
+            towards `max_rows`.
+    quotechar : unicode character or None, optional
+        The character used to denote the start and end of a quoted item.
+        Occurrences of the delimiter or comment characters are ignored within
+        a quoted item. The default value is ``quotechar=None``, which means
+        quoting support is disabled.
+
+        If two consecutive instances of `quotechar` are found within a quoted
+        field, the first is treated as an escape character. See examples.
+
+        .. versionadded:: 1.23.0
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Data read from the text file.
+
+    See Also
+    --------
+    load, fromstring, fromregex
+    genfromtxt : Load data with missing values handled as specified.
+    scipy.io.loadmat : reads MATLAB data files
+
+    Notes
+    -----
+    This function aims to be a fast reader for simply formatted files.  The
+    `genfromtxt` function provides more sophisticated handling of, e.g.,
+    lines with missing values.
+
+    Each row in the input text file must have the same number of values to be
+    able to read all values. If all rows do not have same number of values, a
+    subset of up to n columns (where n is the least number of values present
+    in all rows) can be read by specifying the columns via `usecols`.
+
+    .. versionadded:: 1.10.0
+
+    The strings produced by the Python float.hex method can be used as
+    input for floats.
+
+    Examples
+    --------
+    >>> from io import StringIO   # StringIO behaves like a file object
+    >>> c = StringIO("0 1\n2 3")
+    >>> np.loadtxt(c)
+    array([[0., 1.],
+           [2., 3.]])
+
+    >>> d = StringIO("M 21 72\nF 35 58")
+    >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
+    ...                      'formats': ('S1', 'i4', 'f4')})
+    array([(b'M', 21, 72.), (b'F', 35, 58.)],
+          dtype=[('gender', 'S1'), ('age', '>> c = StringIO("1,0,2\n3,0,4")
+    >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
+    >>> x
+    array([1., 3.])
+    >>> y
+    array([2., 4.])
+
+    The `converters` argument is used to specify functions to preprocess the
+    text prior to parsing. `converters` can be a dictionary that maps
+    preprocessing functions to each column:
+
+    >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
+    >>> conv = {
+    ...     0: lambda x: np.floor(float(x)),  # conversion fn for column 0
+    ...     1: lambda x: np.ceil(float(x)),  # conversion fn for column 1
+    ... }
+    >>> np.loadtxt(s, delimiter=",", converters=conv)
+    array([[1., 3.],
+           [3., 5.]])
+
+    `converters` can be a callable instead of a dictionary, in which case it
+    is applied to all columns:
+
+    >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
+    >>> import functools
+    >>> conv = functools.partial(int, base=16)
+    >>> np.loadtxt(s, converters=conv)
+    array([[222., 173.],
+           [192., 222.]])
+
+    This example shows how `converters` can be used to convert a field
+    with a trailing minus sign into a negative number.
+
+    >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
+    >>> def conv(fld):
+    ...     return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
+    ...
+    >>> np.loadtxt(s, converters=conv)
+    array([[ 10.01, -31.25],
+           [ 19.22,  64.31],
+           [-17.57,  63.94]])
+
+    Using a callable as the converter can be particularly useful for handling
+    values with different formatting, e.g. floats with underscores:
+
+    >>> s = StringIO("1 2.7 100_000")
+    >>> np.loadtxt(s, converters=float)
+    array([1.e+00, 2.7e+00, 1.e+05])
+
+    This idea can be extended to automatically handle values specified in
+    many different formats:
+
+    >>> def conv(val):
+    ...     try:
+    ...         return float(val)
+    ...     except ValueError:
+    ...         return float.fromhex(val)
+    >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
+    >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None)
+    array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
+
+    Note that with the default ``encoding="bytes"``, the inputs to the
+    converter function are latin-1 encoded byte strings. To deactivate the
+    implicit encoding prior to conversion, use ``encoding=None``
+
+    >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
+    >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x)
+    >>> np.loadtxt(s, converters=conv, encoding=None)
+    array([[ 10.01, -31.25],
+           [ 19.22,  64.31],
+           [-17.57,  63.94]])
+
+    Support for quoted fields is enabled with the `quotechar` parameter.
+    Comment and delimiter characters are ignored when they appear within a
+    quoted item delineated by `quotechar`:
+
+    >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
+    >>> dtype = np.dtype([("label", "U12"), ("value", float)])
+    >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
+    array([('alpha, #42', 10.), ('beta, #64',  2.)],
+          dtype=[('label', '>> s = StringIO('"alpha, #42"       10.0\n"beta, #64" 2.0\n')
+    >>> dtype = np.dtype([("label", "U12"), ("value", float)])
+    >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
+    array([('alpha, #42', 10.), ('beta, #64',  2.)],
+          dtype=[('label', '>> s = StringIO('"Hello, my name is ""Monty""!"')
+    >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
+    array('Hello, my name is "Monty"!', dtype='>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
+    >>> np.loadtxt(d, usecols=(0, 1))
+    array([[ 1.,  2.],
+           [ 2.,  4.],
+           [ 3.,  9.],
+           [ 4., 16.]])
+
+    """
+
+    if like is not None:
+        return _loadtxt_with_like(
+            like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
+            converters=converters, skiprows=skiprows, usecols=usecols,
+            unpack=unpack, ndmin=ndmin, encoding=encoding,
+            max_rows=max_rows
+        )
+
+    if isinstance(delimiter, bytes):
+        delimiter.decode("latin1")
+
+    if dtype is None:
+        dtype = np.float64
+
+    comment = comments
+    # Control character type conversions for Py3 convenience
+    if comment is not None:
+        if isinstance(comment, (str, bytes)):
+            comment = [comment]
+        comment = [
+            x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
+    if isinstance(delimiter, bytes):
+        delimiter = delimiter.decode('latin1')
+
+    arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
+                converters=converters, skiplines=skiprows, usecols=usecols,
+                unpack=unpack, ndmin=ndmin, encoding=encoding,
+                max_rows=max_rows, quote=quotechar)
+
+    return arr
+
+
+_loadtxt_with_like = array_function_dispatch()(loadtxt)
+
+
+def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
+                        header=None, footer=None, comments=None,
+                        encoding=None):
+    return (X,)
+
+
+@array_function_dispatch(_savetxt_dispatcher)
+def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
+            footer='', comments='# ', encoding=None):
+    """
+    Save an array to a text file.
+
+    Parameters
+    ----------
+    fname : filename or file handle
+        If the filename ends in ``.gz``, the file is automatically saved in
+        compressed gzip format.  `loadtxt` understands gzipped files
+        transparently.
+    X : 1D or 2D array_like
+        Data to be saved to a text file.
+    fmt : str or sequence of strs, optional
+        A single format (%10.5f), a sequence of formats, or a
+        multi-format string, e.g. 'Iteration %d -- %10.5f', in which
+        case `delimiter` is ignored. For complex `X`, the legal options
+        for `fmt` are:
+
+        * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
+          like `' (%s+%sj)' % (fmt, fmt)`
+        * a full string specifying every real and imaginary part, e.g.
+          `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
+        * a list of specifiers, one per column - in this case, the real
+          and imaginary part must have separate specifiers,
+          e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
+    delimiter : str, optional
+        String or character separating columns.
+    newline : str, optional
+        String or character separating lines.
+
+        .. versionadded:: 1.5.0
+    header : str, optional
+        String that will be written at the beginning of the file.
+
+        .. versionadded:: 1.7.0
+    footer : str, optional
+        String that will be written at the end of the file.
+
+        .. versionadded:: 1.7.0
+    comments : str, optional
+        String that will be prepended to the ``header`` and ``footer`` strings,
+        to mark them as comments. Default: '# ',  as expected by e.g.
+        ``numpy.loadtxt``.
+
+        .. versionadded:: 1.7.0
+    encoding : {None, str}, optional
+        Encoding used to encode the outputfile. Does not apply to output
+        streams. If the encoding is something other than 'bytes' or 'latin1'
+        you will not be able to load the file in NumPy versions < 1.14. Default
+        is 'latin1'.
+
+        .. versionadded:: 1.14.0
+
+
+    See Also
+    --------
+    save : Save an array to a binary file in NumPy ``.npy`` format
+    savez : Save several arrays into an uncompressed ``.npz`` archive
+    savez_compressed : Save several arrays into a compressed ``.npz`` archive
+
+    Notes
+    -----
+    Further explanation of the `fmt` parameter
+    (``%[flag]width[.precision]specifier``):
+
+    flags:
+        ``-`` : left justify
+
+        ``+`` : Forces to precede result with + or -.
+
+        ``0`` : Left pad the number with zeros instead of space (see width).
+
+    width:
+        Minimum number of characters to be printed. The value is not truncated
+        if it has more characters.
+
+    precision:
+        - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
+          digits.
+        - For ``e, E`` and ``f`` specifiers, the number of digits to print
+          after the decimal point.
+        - For ``g`` and ``G``, the maximum number of significant digits.
+        - For ``s``, the maximum number of characters.
+
+    specifiers:
+        ``c`` : character
+
+        ``d`` or ``i`` : signed decimal integer
+
+        ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
+
+        ``f`` : decimal floating point
+
+        ``g,G`` : use the shorter of ``e,E`` or ``f``
+
+        ``o`` : signed octal
+
+        ``s`` : string of characters
+
+        ``u`` : unsigned decimal integer
+
+        ``x,X`` : unsigned hexadecimal integer
+
+    This explanation of ``fmt`` is not complete, for an exhaustive
+    specification see [1]_.
+
+    References
+    ----------
+    .. [1] `Format Specification Mini-Language
+           `_,
+           Python Documentation.
+
+    Examples
+    --------
+    >>> x = y = z = np.arange(0.0,5.0,1.0)
+    >>> np.savetxt('test.out', x, delimiter=',')   # X is an array
+    >>> np.savetxt('test.out', (x,y,z))   # x,y,z equal sized 1D arrays
+    >>> np.savetxt('test.out', x, fmt='%1.4e')   # use exponential notation
+
+    """
+
+    # Py3 conversions first
+    if isinstance(fmt, bytes):
+        fmt = asstr(fmt)
+    delimiter = asstr(delimiter)
+
+    class WriteWrap:
+        """Convert to bytes on bytestream inputs.
+
+        """
+        def __init__(self, fh, encoding):
+            self.fh = fh
+            self.encoding = encoding
+            self.do_write = self.first_write
+
+        def close(self):
+            self.fh.close()
+
+        def write(self, v):
+            self.do_write(v)
+
+        def write_bytes(self, v):
+            if isinstance(v, bytes):
+                self.fh.write(v)
+            else:
+                self.fh.write(v.encode(self.encoding))
+
+        def write_normal(self, v):
+            self.fh.write(asunicode(v))
+
+        def first_write(self, v):
+            try:
+                self.write_normal(v)
+                self.write = self.write_normal
+            except TypeError:
+                # input is probably a bytestream
+                self.write_bytes(v)
+                self.write = self.write_bytes
+
+    own_fh = False
+    if isinstance(fname, os_PathLike):
+        fname = os_fspath(fname)
+    if _is_string_like(fname):
+        # datasource doesn't support creating a new file ...
+        open(fname, 'wt').close()
+        fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
+        own_fh = True
+    elif hasattr(fname, 'write'):
+        # wrap to handle byte output streams
+        fh = WriteWrap(fname, encoding or 'latin1')
+    else:
+        raise ValueError('fname must be a string or file handle')
+
+    try:
+        X = np.asarray(X)
+
+        # Handle 1-dimensional arrays
+        if X.ndim == 0 or X.ndim > 2:
+            raise ValueError(
+                "Expected 1D or 2D array, got %dD array instead" % X.ndim)
+        elif X.ndim == 1:
+            # Common case -- 1d array of numbers
+            if X.dtype.names is None:
+                X = np.atleast_2d(X).T
+                ncol = 1
+
+            # Complex dtype -- each field indicates a separate column
+            else:
+                ncol = len(X.dtype.names)
+        else:
+            ncol = X.shape[1]
+
+        iscomplex_X = np.iscomplexobj(X)
+        # `fmt` can be a string with multiple insertion points or a
+        # list of formats.  E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
+        if type(fmt) in (list, tuple):
+            if len(fmt) != ncol:
+                raise AttributeError('fmt has wrong shape.  %s' % str(fmt))
+            format = asstr(delimiter).join(map(asstr, fmt))
+        elif isinstance(fmt, str):
+            n_fmt_chars = fmt.count('%')
+            error = ValueError('fmt has wrong number of %% formats:  %s' % fmt)
+            if n_fmt_chars == 1:
+                if iscomplex_X:
+                    fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
+                else:
+                    fmt = [fmt, ] * ncol
+                format = delimiter.join(fmt)
+            elif iscomplex_X and n_fmt_chars != (2 * ncol):
+                raise error
+            elif ((not iscomplex_X) and n_fmt_chars != ncol):
+                raise error
+            else:
+                format = fmt
+        else:
+            raise ValueError('invalid fmt: %r' % (fmt,))
+
+        if len(header) > 0:
+            header = header.replace('\n', '\n' + comments)
+            fh.write(comments + header + newline)
+        if iscomplex_X:
+            for row in X:
+                row2 = []
+                for number in row:
+                    row2.append(number.real)
+                    row2.append(number.imag)
+                s = format % tuple(row2) + newline
+                fh.write(s.replace('+-', '-'))
+        else:
+            for row in X:
+                try:
+                    v = format % tuple(row) + newline
+                except TypeError as e:
+                    raise TypeError("Mismatch between array dtype ('%s') and "
+                                    "format specifier ('%s')"
+                                    % (str(X.dtype), format)) from e
+                fh.write(v)
+
+        if len(footer) > 0:
+            footer = footer.replace('\n', '\n' + comments)
+            fh.write(comments + footer + newline)
+    finally:
+        if own_fh:
+            fh.close()
+
+
+@set_module('numpy')
+def fromregex(file, regexp, dtype, encoding=None):
+    r"""
+    Construct an array from a text file, using regular expression parsing.
+
+    The returned array is always a structured array, and is constructed from
+    all matches of the regular expression in the file. Groups in the regular
+    expression are converted to fields of the structured array.
+
+    Parameters
+    ----------
+    file : path or file
+        Filename or file object to read.
+
+        .. versionchanged:: 1.22.0
+            Now accepts `os.PathLike` implementations.
+    regexp : str or regexp
+        Regular expression used to parse the file.
+        Groups in the regular expression correspond to fields in the dtype.
+    dtype : dtype or list of dtypes
+        Dtype for the structured array; must be a structured datatype.
+    encoding : str, optional
+        Encoding used to decode the inputfile. Does not apply to input streams.
+
+        .. versionadded:: 1.14.0
+
+    Returns
+    -------
+    output : ndarray
+        The output array, containing the part of the content of `file` that
+        was matched by `regexp`. `output` is always a structured array.
+
+    Raises
+    ------
+    TypeError
+        When `dtype` is not a valid dtype for a structured array.
+
+    See Also
+    --------
+    fromstring, loadtxt
+
+    Notes
+    -----
+    Dtypes for structured arrays can be specified in several forms, but all
+    forms specify at least the data type and field name. For details see
+    `basics.rec`.
+
+    Examples
+    --------
+    >>> from io import StringIO
+    >>> text = StringIO("1312 foo\n1534  bar\n444   qux")
+
+    >>> regexp = r"(\d+)\s+(...)"  # match [digits, whitespace, anything]
+    >>> output = np.fromregex(text, regexp,
+    ...                       [('num', np.int64), ('key', 'S3')])
+    >>> output
+    array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
+          dtype=[('num', '>> output['num']
+    array([1312, 1534,  444])
+
+    """
+    own_fh = False
+    if not hasattr(file, "read"):
+        file = os.fspath(file)
+        file = np.lib._datasource.open(file, 'rt', encoding=encoding)
+        own_fh = True
+
+    try:
+        if not isinstance(dtype, np.dtype):
+            dtype = np.dtype(dtype)
+        if dtype.names is None:
+            raise TypeError('dtype must be a structured datatype.')
+
+        content = file.read()
+        if isinstance(content, bytes) and isinstance(regexp, str):
+            regexp = asbytes(regexp)
+        elif isinstance(content, str) and isinstance(regexp, bytes):
+            regexp = asstr(regexp)
+
+        if not hasattr(regexp, 'match'):
+            regexp = re.compile(regexp)
+        seq = regexp.findall(content)
+        if seq and not isinstance(seq[0], tuple):
+            # Only one group is in the regexp.
+            # Create the new array as a single data-type and then
+            #   re-interpret as a single-field structured array.
+            newdtype = np.dtype(dtype[dtype.names[0]])
+            output = np.array(seq, dtype=newdtype)
+            output.dtype = dtype
+        else:
+            output = np.array(seq, dtype=dtype)
+
+        return output
+    finally:
+        if own_fh:
+            file.close()
+
+
+#####--------------------------------------------------------------------------
+#---- --- ASCII functions ---
+#####--------------------------------------------------------------------------
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
+               skip_header=0, skip_footer=0, converters=None,
+               missing_values=None, filling_values=None, usecols=None,
+               names=None, excludelist=None,
+               deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
+               replace_space='_', autostrip=False, case_sensitive=True,
+               defaultfmt="f%i", unpack=None, usemask=False, loose=True,
+               invalid_raise=True, max_rows=None, encoding='bytes',
+               *, ndmin=0, like=None):
+    """
+    Load data from a text file, with missing values handled as specified.
+
+    Each line past the first `skip_header` lines is split at the `delimiter`
+    character, and characters following the `comments` character are discarded.
+
+    Parameters
+    ----------
+    fname : file, str, pathlib.Path, list of str, generator
+        File, filename, list, or generator to read.  If the filename
+        extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
+        that generators must return bytes or strings. The strings
+        in a list or produced by a generator are treated as lines.
+    dtype : dtype, optional
+        Data type of the resulting array.
+        If None, the dtypes will be determined by the contents of each
+        column, individually.
+    comments : str, optional
+        The character used to indicate the start of a comment.
+        All the characters occurring on a line after a comment are discarded.
+    delimiter : str, int, or sequence, optional
+        The string used to separate values.  By default, any consecutive
+        whitespaces act as delimiter.  An integer or sequence of integers
+        can also be provided as width(s) of each field.
+    skiprows : int, optional
+        `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
+    skip_header : int, optional
+        The number of lines to skip at the beginning of the file.
+    skip_footer : int, optional
+        The number of lines to skip at the end of the file.
+    converters : variable, optional
+        The set of functions that convert the data of a column to a value.
+        The converters can also be used to provide a default value
+        for missing data: ``converters = {3: lambda s: float(s or 0)}``.
+    missing : variable, optional
+        `missing` was removed in numpy 1.10. Please use `missing_values`
+        instead.
+    missing_values : variable, optional
+        The set of strings corresponding to missing data.
+    filling_values : variable, optional
+        The set of values to be used as default when the data are missing.
+    usecols : sequence, optional
+        Which columns to read, with 0 being the first.  For example,
+        ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
+    names : {None, True, str, sequence}, optional
+        If `names` is True, the field names are read from the first line after
+        the first `skip_header` lines. This line can optionally be preceded
+        by a comment delimiter. If `names` is a sequence or a single-string of
+        comma-separated names, the names will be used to define the field names
+        in a structured dtype. If `names` is None, the names of the dtype
+        fields will be used, if any.
+    excludelist : sequence, optional
+        A list of names to exclude. This list is appended to the default list
+        ['return','file','print']. Excluded names are appended with an
+        underscore: for example, `file` would become `file_`.
+    deletechars : str, optional
+        A string combining invalid characters that must be deleted from the
+        names.
+    defaultfmt : str, optional
+        A format used to define default field names, such as "f%i" or "f_%02i".
+    autostrip : bool, optional
+        Whether to automatically strip white spaces from the variables.
+    replace_space : char, optional
+        Character(s) used in replacement of white spaces in the variable
+        names. By default, use a '_'.
+    case_sensitive : {True, False, 'upper', 'lower'}, optional
+        If True, field names are case sensitive.
+        If False or 'upper', field names are converted to upper case.
+        If 'lower', field names are converted to lower case.
+    unpack : bool, optional
+        If True, the returned array is transposed, so that arguments may be
+        unpacked using ``x, y, z = genfromtxt(...)``.  When used with a
+        structured data-type, arrays are returned for each field.
+        Default is False.
+    usemask : bool, optional
+        If True, return a masked array.
+        If False, return a regular array.
+    loose : bool, optional
+        If True, do not raise errors for invalid values.
+    invalid_raise : bool, optional
+        If True, an exception is raised if an inconsistency is detected in the
+        number of columns.
+        If False, a warning is emitted and the offending lines are skipped.
+    max_rows : int,  optional
+        The maximum number of rows to read. Must not be used with skip_footer
+        at the same time.  If given, the value must be at least 1. Default is
+        to read the entire file.
+
+        .. versionadded:: 1.10.0
+    encoding : str, optional
+        Encoding used to decode the inputfile. Does not apply when `fname` is
+        a file object.  The special value 'bytes' enables backward compatibility
+        workarounds that ensure that you receive byte arrays when possible
+        and passes latin1 encoded strings to converters. Override this value to
+        receive unicode arrays and pass strings as input to converters.  If set
+        to None the system default is used. The default value is 'bytes'.
+
+        .. versionadded:: 1.14.0
+    ndmin : int, optional
+        Same parameter as `loadtxt`
+
+        .. versionadded:: 1.23.0
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    out : ndarray
+        Data read from the text file. If `usemask` is True, this is a
+        masked array.
+
+    See Also
+    --------
+    numpy.loadtxt : equivalent function when no data is missing.
+
+    Notes
+    -----
+    * When spaces are used as delimiters, or when no delimiter has been given
+      as input, there should not be any missing data between two fields.
+    * When the variables are named (either by a flexible dtype or with `names`),
+      there must not be any header in the file (else a ValueError
+      exception is raised).
+    * Individual values are not stripped of spaces by default.
+      When using a custom converter, make sure the function does remove spaces.
+
+    References
+    ----------
+    .. [1] NumPy User Guide, section `I/O with NumPy
+           `_.
+
+    Examples
+    --------
+    >>> from io import StringIO
+    >>> import numpy as np
+
+    Comma delimited file with mixed dtype
+
+    >>> s = StringIO(u"1,1.3,abcde")
+    >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
+    ... ('mystring','S5')], delimiter=",")
+    >>> data
+    array((1, 1.3, b'abcde'),
+          dtype=[('myint', '>> _ = s.seek(0) # needed for StringIO example only
+    >>> data = np.genfromtxt(s, dtype=None,
+    ... names = ['myint','myfloat','mystring'], delimiter=",")
+    >>> data
+    array((1, 1.3, b'abcde'),
+          dtype=[('myint', '>> _ = s.seek(0)
+    >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
+    ... names=['myint','myfloat','mystring'], delimiter=",")
+    >>> data
+    array((1, 1.3, b'abcde'),
+          dtype=[('myint', '>> s = StringIO(u"11.3abcde")
+    >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
+    ...     delimiter=[1,3,5])
+    >>> data
+    array((1, 1.3, b'abcde'),
+          dtype=[('intvar', '>> f = StringIO('''
+    ... text,# of chars
+    ... hello world,11
+    ... numpy,5''')
+    >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
+    array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
+      dtype=[('f0', 'S12'), ('f1', 'S12')])
+
+    """
+
+    if like is not None:
+        return _genfromtxt_with_like(
+            like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
+            skip_header=skip_header, skip_footer=skip_footer,
+            converters=converters, missing_values=missing_values,
+            filling_values=filling_values, usecols=usecols, names=names,
+            excludelist=excludelist, deletechars=deletechars,
+            replace_space=replace_space, autostrip=autostrip,
+            case_sensitive=case_sensitive, defaultfmt=defaultfmt,
+            unpack=unpack, usemask=usemask, loose=loose,
+            invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
+            ndmin=ndmin,
+        )
+
+    _ensure_ndmin_ndarray_check_param(ndmin)
+
+    if max_rows is not None:
+        if skip_footer:
+            raise ValueError(
+                    "The keywords 'skip_footer' and 'max_rows' can not be "
+                    "specified at the same time.")
+        if max_rows < 1:
+            raise ValueError("'max_rows' must be at least 1.")
+
+    if usemask:
+        from numpy.ma import MaskedArray, make_mask_descr
+    # Check the input dictionary of converters
+    user_converters = converters or {}
+    if not isinstance(user_converters, dict):
+        raise TypeError(
+            "The input argument 'converter' should be a valid dictionary "
+            "(got '%s' instead)" % type(user_converters))
+
+    if encoding == 'bytes':
+        encoding = None
+        byte_converters = True
+    else:
+        byte_converters = False
+
+    # Initialize the filehandle, the LineSplitter and the NameValidator
+    if isinstance(fname, os_PathLike):
+        fname = os_fspath(fname)
+    if isinstance(fname, str):
+        fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
+        fid_ctx = contextlib.closing(fid)
+    else:
+        fid = fname
+        fid_ctx = contextlib.nullcontext(fid)
+    try:
+        fhd = iter(fid)
+    except TypeError as e:
+        raise TypeError(
+            "fname must be a string, a filehandle, a sequence of strings,\n"
+            f"or an iterator of strings. Got {type(fname)} instead."
+        ) from e
+    with fid_ctx:
+        split_line = LineSplitter(delimiter=delimiter, comments=comments,
+                                  autostrip=autostrip, encoding=encoding)
+        validate_names = NameValidator(excludelist=excludelist,
+                                       deletechars=deletechars,
+                                       case_sensitive=case_sensitive,
+                                       replace_space=replace_space)
+
+        # Skip the first `skip_header` rows
+        try:
+            for i in range(skip_header):
+                next(fhd)
+
+            # Keep on until we find the first valid values
+            first_values = None
+
+            while not first_values:
+                first_line = _decode_line(next(fhd), encoding)
+                if (names is True) and (comments is not None):
+                    if comments in first_line:
+                        first_line = (
+                            ''.join(first_line.split(comments)[1:]))
+                first_values = split_line(first_line)
+        except StopIteration:
+            # return an empty array if the datafile is empty
+            first_line = ''
+            first_values = []
+            warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
+
+        # Should we take the first values as names ?
+        if names is True:
+            fval = first_values[0].strip()
+            if comments is not None:
+                if fval in comments:
+                    del first_values[0]
+
+        # Check the columns to use: make sure `usecols` is a list
+        if usecols is not None:
+            try:
+                usecols = [_.strip() for _ in usecols.split(",")]
+            except AttributeError:
+                try:
+                    usecols = list(usecols)
+                except TypeError:
+                    usecols = [usecols, ]
+        nbcols = len(usecols or first_values)
+
+        # Check the names and overwrite the dtype.names if needed
+        if names is True:
+            names = validate_names([str(_.strip()) for _ in first_values])
+            first_line = ''
+        elif _is_string_like(names):
+            names = validate_names([_.strip() for _ in names.split(',')])
+        elif names:
+            names = validate_names(names)
+        # Get the dtype
+        if dtype is not None:
+            dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
+                               excludelist=excludelist,
+                               deletechars=deletechars,
+                               case_sensitive=case_sensitive,
+                               replace_space=replace_space)
+        # Make sure the names is a list (for 2.5)
+        if names is not None:
+            names = list(names)
+
+        if usecols:
+            for (i, current) in enumerate(usecols):
+                # if usecols is a list of names, convert to a list of indices
+                if _is_string_like(current):
+                    usecols[i] = names.index(current)
+                elif current < 0:
+                    usecols[i] = current + len(first_values)
+            # If the dtype is not None, make sure we update it
+            if (dtype is not None) and (len(dtype) > nbcols):
+                descr = dtype.descr
+                dtype = np.dtype([descr[_] for _ in usecols])
+                names = list(dtype.names)
+            # If `names` is not None, update the names
+            elif (names is not None) and (len(names) > nbcols):
+                names = [names[_] for _ in usecols]
+        elif (names is not None) and (dtype is not None):
+            names = list(dtype.names)
+
+        # Process the missing values ...............................
+        # Rename missing_values for convenience
+        user_missing_values = missing_values or ()
+        if isinstance(user_missing_values, bytes):
+            user_missing_values = user_missing_values.decode('latin1')
+
+        # Define the list of missing_values (one column: one list)
+        missing_values = [list(['']) for _ in range(nbcols)]
+
+        # We have a dictionary: process it field by field
+        if isinstance(user_missing_values, dict):
+            # Loop on the items
+            for (key, val) in user_missing_values.items():
+                # Is the key a string ?
+                if _is_string_like(key):
+                    try:
+                        # Transform it into an integer
+                        key = names.index(key)
+                    except ValueError:
+                        # We couldn't find it: the name must have been dropped
+                        continue
+                # Redefine the key as needed if it's a column number
+                if usecols:
+                    try:
+                        key = usecols.index(key)
+                    except ValueError:
+                        pass
+                # Transform the value as a list of string
+                if isinstance(val, (list, tuple)):
+                    val = [str(_) for _ in val]
+                else:
+                    val = [str(val), ]
+                # Add the value(s) to the current list of missing
+                if key is None:
+                    # None acts as default
+                    for miss in missing_values:
+                        miss.extend(val)
+                else:
+                    missing_values[key].extend(val)
+        # We have a sequence : each item matches a column
+        elif isinstance(user_missing_values, (list, tuple)):
+            for (value, entry) in zip(user_missing_values, missing_values):
+                value = str(value)
+                if value not in entry:
+                    entry.append(value)
+        # We have a string : apply it to all entries
+        elif isinstance(user_missing_values, str):
+            user_value = user_missing_values.split(",")
+            for entry in missing_values:
+                entry.extend(user_value)
+        # We have something else: apply it to all entries
+        else:
+            for entry in missing_values:
+                entry.extend([str(user_missing_values)])
+
+        # Process the filling_values ...............................
+        # Rename the input for convenience
+        user_filling_values = filling_values
+        if user_filling_values is None:
+            user_filling_values = []
+        # Define the default
+        filling_values = [None] * nbcols
+        # We have a dictionary : update each entry individually
+        if isinstance(user_filling_values, dict):
+            for (key, val) in user_filling_values.items():
+                if _is_string_like(key):
+                    try:
+                        # Transform it into an integer
+                        key = names.index(key)
+                    except ValueError:
+                        # We couldn't find it: the name must have been dropped,
+                        continue
+                # Redefine the key if it's a column number and usecols is defined
+                if usecols:
+                    try:
+                        key = usecols.index(key)
+                    except ValueError:
+                        pass
+                # Add the value to the list
+                filling_values[key] = val
+        # We have a sequence : update on a one-to-one basis
+        elif isinstance(user_filling_values, (list, tuple)):
+            n = len(user_filling_values)
+            if (n <= nbcols):
+                filling_values[:n] = user_filling_values
+            else:
+                filling_values = user_filling_values[:nbcols]
+        # We have something else : use it for all entries
+        else:
+            filling_values = [user_filling_values] * nbcols
+
+        # Initialize the converters ................................
+        if dtype is None:
+            # Note: we can't use a [...]*nbcols, as we would have 3 times the same
+            # ... converter, instead of 3 different converters.
+            converters = [StringConverter(None, missing_values=miss, default=fill)
+                          for (miss, fill) in zip(missing_values, filling_values)]
+        else:
+            dtype_flat = flatten_dtype(dtype, flatten_base=True)
+            # Initialize the converters
+            if len(dtype_flat) > 1:
+                # Flexible type : get a converter from each dtype
+                zipit = zip(dtype_flat, missing_values, filling_values)
+                converters = [StringConverter(dt, locked=True,
+                                              missing_values=miss, default=fill)
+                              for (dt, miss, fill) in zipit]
+            else:
+                # Set to a default converter (but w/ different missing values)
+                zipit = zip(missing_values, filling_values)
+                converters = [StringConverter(dtype, locked=True,
+                                              missing_values=miss, default=fill)
+                              for (miss, fill) in zipit]
+        # Update the converters to use the user-defined ones
+        uc_update = []
+        for (j, conv) in user_converters.items():
+            # If the converter is specified by column names, use the index instead
+            if _is_string_like(j):
+                try:
+                    j = names.index(j)
+                    i = j
+                except ValueError:
+                    continue
+            elif usecols:
+                try:
+                    i = usecols.index(j)
+                except ValueError:
+                    # Unused converter specified
+                    continue
+            else:
+                i = j
+            # Find the value to test - first_line is not filtered by usecols:
+            if len(first_line):
+                testing_value = first_values[j]
+            else:
+                testing_value = None
+            if conv is bytes:
+                user_conv = asbytes
+            elif byte_converters:
+                # converters may use decode to workaround numpy's old behaviour,
+                # so encode the string again before passing to the user converter
+                def tobytes_first(x, conv):
+                    if type(x) is bytes:
+                        return conv(x)
+                    return conv(x.encode("latin1"))
+                user_conv = functools.partial(tobytes_first, conv=conv)
+            else:
+                user_conv = conv
+            converters[i].update(user_conv, locked=True,
+                                 testing_value=testing_value,
+                                 default=filling_values[i],
+                                 missing_values=missing_values[i],)
+            uc_update.append((i, user_conv))
+        # Make sure we have the corrected keys in user_converters...
+        user_converters.update(uc_update)
+
+        # Fixme: possible error as following variable never used.
+        # miss_chars = [_.missing_values for _ in converters]
+
+        # Initialize the output lists ...
+        # ... rows
+        rows = []
+        append_to_rows = rows.append
+        # ... masks
+        if usemask:
+            masks = []
+            append_to_masks = masks.append
+        # ... invalid
+        invalid = []
+        append_to_invalid = invalid.append
+
+        # Parse each line
+        for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
+            values = split_line(line)
+            nbvalues = len(values)
+            # Skip an empty line
+            if nbvalues == 0:
+                continue
+            if usecols:
+                # Select only the columns we need
+                try:
+                    values = [values[_] for _ in usecols]
+                except IndexError:
+                    append_to_invalid((i + skip_header + 1, nbvalues))
+                    continue
+            elif nbvalues != nbcols:
+                append_to_invalid((i + skip_header + 1, nbvalues))
+                continue
+            # Store the values
+            append_to_rows(tuple(values))
+            if usemask:
+                append_to_masks(tuple([v.strip() in m
+                                       for (v, m) in zip(values,
+                                                         missing_values)]))
+            if len(rows) == max_rows:
+                break
+
+    # Upgrade the converters (if needed)
+    if dtype is None:
+        for (i, converter) in enumerate(converters):
+            current_column = [itemgetter(i)(_m) for _m in rows]
+            try:
+                converter.iterupgrade(current_column)
+            except ConverterLockError:
+                errmsg = "Converter #%i is locked and cannot be upgraded: " % i
+                current_column = map(itemgetter(i), rows)
+                for (j, value) in enumerate(current_column):
+                    try:
+                        converter.upgrade(value)
+                    except (ConverterError, ValueError):
+                        errmsg += "(occurred line #%i for value '%s')"
+                        errmsg %= (j + 1 + skip_header, value)
+                        raise ConverterError(errmsg)
+
+    # Check that we don't have invalid values
+    nbinvalid = len(invalid)
+    if nbinvalid > 0:
+        nbrows = len(rows) + nbinvalid - skip_footer
+        # Construct the error message
+        template = "    Line #%%i (got %%i columns instead of %i)" % nbcols
+        if skip_footer > 0:
+            nbinvalid_skipped = len([_ for _ in invalid
+                                     if _[0] > nbrows + skip_header])
+            invalid = invalid[:nbinvalid - nbinvalid_skipped]
+            skip_footer -= nbinvalid_skipped
+#
+#            nbrows -= skip_footer
+#            errmsg = [template % (i, nb)
+#                      for (i, nb) in invalid if i < nbrows]
+#        else:
+        errmsg = [template % (i, nb)
+                  for (i, nb) in invalid]
+        if len(errmsg):
+            errmsg.insert(0, "Some errors were detected !")
+            errmsg = "\n".join(errmsg)
+            # Raise an exception ?
+            if invalid_raise:
+                raise ValueError(errmsg)
+            # Issue a warning ?
+            else:
+                warnings.warn(errmsg, ConversionWarning, stacklevel=2)
+
+    # Strip the last skip_footer data
+    if skip_footer > 0:
+        rows = rows[:-skip_footer]
+        if usemask:
+            masks = masks[:-skip_footer]
+
+    # Convert each value according to the converter:
+    # We want to modify the list in place to avoid creating a new one...
+    if loose:
+        rows = list(
+            zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
+                  for (i, conv) in enumerate(converters)]))
+    else:
+        rows = list(
+            zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
+                  for (i, conv) in enumerate(converters)]))
+
+    # Reset the dtype
+    data = rows
+    if dtype is None:
+        # Get the dtypes from the types of the converters
+        column_types = [conv.type for conv in converters]
+        # Find the columns with strings...
+        strcolidx = [i for (i, v) in enumerate(column_types)
+                     if v == np.str_]
+
+        if byte_converters and strcolidx:
+            # convert strings back to bytes for backward compatibility
+            warnings.warn(
+                "Reading unicode strings without specifying the encoding "
+                "argument is deprecated. Set the encoding, use None for the "
+                "system default.",
+                np.VisibleDeprecationWarning, stacklevel=2)
+            def encode_unicode_cols(row_tup):
+                row = list(row_tup)
+                for i in strcolidx:
+                    row[i] = row[i].encode('latin1')
+                return tuple(row)
+
+            try:
+                data = [encode_unicode_cols(r) for r in data]
+            except UnicodeEncodeError:
+                pass
+            else:
+                for i in strcolidx:
+                    column_types[i] = np.bytes_
+
+        # Update string types to be the right length
+        sized_column_types = column_types[:]
+        for i, col_type in enumerate(column_types):
+            if np.issubdtype(col_type, np.character):
+                n_chars = max(len(row[i]) for row in data)
+                sized_column_types[i] = (col_type, n_chars)
+
+        if names is None:
+            # If the dtype is uniform (before sizing strings)
+            base = {
+                c_type
+                for c, c_type in zip(converters, column_types)
+                if c._checked}
+            if len(base) == 1:
+                uniform_type, = base
+                (ddtype, mdtype) = (uniform_type, bool)
+            else:
+                ddtype = [(defaultfmt % i, dt)
+                          for (i, dt) in enumerate(sized_column_types)]
+                if usemask:
+                    mdtype = [(defaultfmt % i, bool)
+                              for (i, dt) in enumerate(sized_column_types)]
+        else:
+            ddtype = list(zip(names, sized_column_types))
+            mdtype = list(zip(names, [bool] * len(sized_column_types)))
+        output = np.array(data, dtype=ddtype)
+        if usemask:
+            outputmask = np.array(masks, dtype=mdtype)
+    else:
+        # Overwrite the initial dtype names if needed
+        if names and dtype.names is not None:
+            dtype.names = names
+        # Case 1. We have a structured type
+        if len(dtype_flat) > 1:
+            # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
+            # First, create the array using a flattened dtype:
+            # [('a', int), ('b1', int), ('b2', float)]
+            # Then, view the array using the specified dtype.
+            if 'O' in (_.char for _ in dtype_flat):
+                if has_nested_fields(dtype):
+                    raise NotImplementedError(
+                        "Nested fields involving objects are not supported...")
+                else:
+                    output = np.array(data, dtype=dtype)
+            else:
+                rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
+                output = rows.view(dtype)
+            # Now, process the rowmasks the same way
+            if usemask:
+                rowmasks = np.array(
+                    masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
+                # Construct the new dtype
+                mdtype = make_mask_descr(dtype)
+                outputmask = rowmasks.view(mdtype)
+        # Case #2. We have a basic dtype
+        else:
+            # We used some user-defined converters
+            if user_converters:
+                ishomogeneous = True
+                descr = []
+                for i, ttype in enumerate([conv.type for conv in converters]):
+                    # Keep the dtype of the current converter
+                    if i in user_converters:
+                        ishomogeneous &= (ttype == dtype.type)
+                        if np.issubdtype(ttype, np.character):
+                            ttype = (ttype, max(len(row[i]) for row in data))
+                        descr.append(('', ttype))
+                    else:
+                        descr.append(('', dtype))
+                # So we changed the dtype ?
+                if not ishomogeneous:
+                    # We have more than one field
+                    if len(descr) > 1:
+                        dtype = np.dtype(descr)
+                    # We have only one field: drop the name if not needed.
+                    else:
+                        dtype = np.dtype(ttype)
+            #
+            output = np.array(data, dtype)
+            if usemask:
+                if dtype.names is not None:
+                    mdtype = [(_, bool) for _ in dtype.names]
+                else:
+                    mdtype = bool
+                outputmask = np.array(masks, dtype=mdtype)
+    # Try to take care of the missing data we missed
+    names = output.dtype.names
+    if usemask and names:
+        for (name, conv) in zip(names, converters):
+            missing_values = [conv(_) for _ in conv.missing_values
+                              if _ != '']
+            for mval in missing_values:
+                outputmask[name] |= (output[name] == mval)
+    # Construct the final array
+    if usemask:
+        output = output.view(MaskedArray)
+        output._mask = outputmask
+
+    output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
+
+    if unpack:
+        if names is None:
+            return output.T
+        elif len(names) == 1:
+            # squeeze single-name dtypes too
+            return output[names[0]]
+        else:
+            # For structured arrays with multiple fields,
+            # return an array for each field.
+            return [output[field] for field in names]
+    return output
+
+
+_genfromtxt_with_like = array_function_dispatch()(genfromtxt)
+
+
+def recfromtxt(fname, **kwargs):
+    """
+    Load ASCII data from a file and return it in a record array.
+
+    If ``usemask=False`` a standard `recarray` is returned,
+    if ``usemask=True`` a MaskedRecords array is returned.
+
+    Parameters
+    ----------
+    fname, kwargs : For a description of input parameters, see `genfromtxt`.
+
+    See Also
+    --------
+    numpy.genfromtxt : generic function
+
+    Notes
+    -----
+    By default, `dtype` is None, which means that the data-type of the output
+    array will be determined from the data.
+
+    """
+    kwargs.setdefault("dtype", None)
+    usemask = kwargs.get('usemask', False)
+    output = genfromtxt(fname, **kwargs)
+    if usemask:
+        from numpy.ma.mrecords import MaskedRecords
+        output = output.view(MaskedRecords)
+    else:
+        output = output.view(np.recarray)
+    return output
+
+
+def recfromcsv(fname, **kwargs):
+    """
+    Load ASCII data stored in a comma-separated file.
+
+    The returned array is a record array (if ``usemask=False``, see
+    `recarray`) or a masked record array (if ``usemask=True``,
+    see `ma.mrecords.MaskedRecords`).
+
+    Parameters
+    ----------
+    fname, kwargs : For a description of input parameters, see `genfromtxt`.
+
+    See Also
+    --------
+    numpy.genfromtxt : generic function to load ASCII data.
+
+    Notes
+    -----
+    By default, `dtype` is None, which means that the data-type of the output
+    array will be determined from the data.
+
+    """
+    # Set default kwargs for genfromtxt as relevant to csv import.
+    kwargs.setdefault("case_sensitive", "lower")
+    kwargs.setdefault("names", True)
+    kwargs.setdefault("delimiter", ",")
+    kwargs.setdefault("dtype", None)
+    output = genfromtxt(fname, **kwargs)
+
+    usemask = kwargs.get("usemask", False)
+    if usemask:
+        from numpy.ma.mrecords import MaskedRecords
+        output = output.view(MaskedRecords)
+    else:
+        output = output.view(np.recarray)
+    return output
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/npyio.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/npyio.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..ef0f2a5f177f8d6726795c7af51ada2b6d97243c
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/npyio.pyi
@@ -0,0 +1,330 @@
+import os
+import sys
+import zipfile
+import types
+from re import Pattern
+from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable
+from typing import (
+    Literal as L,
+    Any,
+    TypeVar,
+    Generic,
+    IO,
+    overload,
+    Protocol,
+)
+
+from numpy import (
+    DataSource as DataSource,
+    ndarray,
+    recarray,
+    dtype,
+    generic,
+    float64,
+    void,
+    record,
+)
+
+from numpy.ma.mrecords import MaskedRecords
+from numpy._typing import (
+    ArrayLike,
+    DTypeLike,
+    NDArray,
+    _DTypeLike,
+    _SupportsArrayFunc,
+)
+
+from numpy.core.multiarray import (
+    packbits as packbits,
+    unpackbits as unpackbits,
+)
+
+_T = TypeVar("_T")
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
+_CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
+
+class _SupportsGetItem(Protocol[_T_contra, _T_co]):
+    def __getitem__(self, key: _T_contra, /) -> _T_co: ...
+
+class _SupportsRead(Protocol[_CharType_co]):
+    def read(self) -> _CharType_co: ...
+
+class _SupportsReadSeek(Protocol[_CharType_co]):
+    def read(self, n: int, /) -> _CharType_co: ...
+    def seek(self, offset: int, whence: int, /) -> object: ...
+
+class _SupportsWrite(Protocol[_CharType_contra]):
+    def write(self, s: _CharType_contra, /) -> object: ...
+
+__all__: list[str]
+
+class BagObj(Generic[_T_co]):
+    def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
+    def __getattribute__(self, key: str) -> _T_co: ...
+    def __dir__(self) -> list[str]: ...
+
+class NpzFile(Mapping[str, NDArray[Any]]):
+    zip: zipfile.ZipFile
+    fid: None | IO[str]
+    files: list[str]
+    allow_pickle: bool
+    pickle_kwargs: None | Mapping[str, Any]
+    _MAX_REPR_ARRAY_COUNT: int
+    # Represent `f` as a mutable property so we can access the type of `self`
+    @property
+    def f(self: _T) -> BagObj[_T]: ...
+    @f.setter
+    def f(self: _T, value: BagObj[_T]) -> None: ...
+    def __init__(
+        self,
+        fid: IO[str],
+        own_fid: bool = ...,
+        allow_pickle: bool = ...,
+        pickle_kwargs: None | Mapping[str, Any] = ...,
+    ) -> None: ...
+    def __enter__(self: _T) -> _T: ...
+    def __exit__(
+        self,
+        exc_type: None | type[BaseException],
+        exc_value: None | BaseException,
+        traceback: None | types.TracebackType,
+        /,
+    ) -> None: ...
+    def close(self) -> None: ...
+    def __del__(self) -> None: ...
+    def __iter__(self) -> Iterator[str]: ...
+    def __len__(self) -> int: ...
+    def __getitem__(self, key: str) -> NDArray[Any]: ...
+    def __contains__(self, key: str) -> bool: ...
+    def __repr__(self) -> str: ...
+
+# NOTE: Returns a `NpzFile` if file is a zip file;
+# returns an `ndarray`/`memmap` otherwise
+def load(
+    file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
+    mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
+    allow_pickle: bool = ...,
+    fix_imports: bool = ...,
+    encoding: L["ASCII", "latin1", "bytes"] = ...,
+) -> Any: ...
+
+def save(
+    file: str | os.PathLike[str] | _SupportsWrite[bytes],
+    arr: ArrayLike,
+    allow_pickle: bool = ...,
+    fix_imports: bool = ...,
+) -> None: ...
+
+def savez(
+    file: str | os.PathLike[str] | _SupportsWrite[bytes],
+    *args: ArrayLike,
+    **kwds: ArrayLike,
+) -> None: ...
+
+def savez_compressed(
+    file: str | os.PathLike[str] | _SupportsWrite[bytes],
+    *args: ArrayLike,
+    **kwds: ArrayLike,
+) -> None: ...
+
+# File-like objects only have to implement `__iter__` and,
+# optionally, `encoding`
+@overload
+def loadtxt(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    dtype: None = ...,
+    comments: None | str | Sequence[str] = ...,
+    delimiter: None | str = ...,
+    converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+    skiprows: int = ...,
+    usecols: int | Sequence[int] = ...,
+    unpack: bool = ...,
+    ndmin: L[0, 1, 2] = ...,
+    encoding: None | str = ...,
+    max_rows: None | int = ...,
+    *,
+    quotechar: None | str = ...,
+    like: None | _SupportsArrayFunc = ...
+) -> NDArray[float64]: ...
+@overload
+def loadtxt(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    dtype: _DTypeLike[_SCT],
+    comments: None | str | Sequence[str] = ...,
+    delimiter: None | str = ...,
+    converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+    skiprows: int = ...,
+    usecols: int | Sequence[int] = ...,
+    unpack: bool = ...,
+    ndmin: L[0, 1, 2] = ...,
+    encoding: None | str = ...,
+    max_rows: None | int = ...,
+    *,
+    quotechar: None | str = ...,
+    like: None | _SupportsArrayFunc = ...
+) -> NDArray[_SCT]: ...
+@overload
+def loadtxt(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    dtype: DTypeLike,
+    comments: None | str | Sequence[str] = ...,
+    delimiter: None | str = ...,
+    converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+    skiprows: int = ...,
+    usecols: int | Sequence[int] = ...,
+    unpack: bool = ...,
+    ndmin: L[0, 1, 2] = ...,
+    encoding: None | str = ...,
+    max_rows: None | int = ...,
+    *,
+    quotechar: None | str = ...,
+    like: None | _SupportsArrayFunc = ...
+) -> NDArray[Any]: ...
+
+def savetxt(
+    fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
+    X: ArrayLike,
+    fmt: str | Sequence[str] = ...,
+    delimiter: str = ...,
+    newline: str = ...,
+    header: str = ...,
+    footer: str = ...,
+    comments: str = ...,
+    encoding: None | str = ...,
+) -> None: ...
+
+@overload
+def fromregex(
+    file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
+    regexp: str | bytes | Pattern[Any],
+    dtype: _DTypeLike[_SCT],
+    encoding: None | str = ...
+) -> NDArray[_SCT]: ...
+@overload
+def fromregex(
+    file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
+    regexp: str | bytes | Pattern[Any],
+    dtype: DTypeLike,
+    encoding: None | str = ...
+) -> NDArray[Any]: ...
+
+@overload
+def genfromtxt(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    dtype: None = ...,
+    comments: str = ...,
+    delimiter: None | str | int | Iterable[int] = ...,
+    skip_header: int = ...,
+    skip_footer: int = ...,
+    converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+    missing_values: Any = ...,
+    filling_values: Any = ...,
+    usecols: None | Sequence[int] = ...,
+    names: L[None, True] | str | Collection[str] = ...,
+    excludelist: None | Sequence[str] = ...,
+    deletechars: str = ...,
+    replace_space: str = ...,
+    autostrip: bool = ...,
+    case_sensitive: bool | L['upper', 'lower'] = ...,
+    defaultfmt: str = ...,
+    unpack: None | bool = ...,
+    usemask: bool = ...,
+    loose: bool = ...,
+    invalid_raise: bool = ...,
+    max_rows: None | int = ...,
+    encoding: str = ...,
+    *,
+    ndmin: L[0, 1, 2] = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+@overload
+def genfromtxt(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    dtype: _DTypeLike[_SCT],
+    comments: str = ...,
+    delimiter: None | str | int | Iterable[int] = ...,
+    skip_header: int = ...,
+    skip_footer: int = ...,
+    converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+    missing_values: Any = ...,
+    filling_values: Any = ...,
+    usecols: None | Sequence[int] = ...,
+    names: L[None, True] | str | Collection[str] = ...,
+    excludelist: None | Sequence[str] = ...,
+    deletechars: str = ...,
+    replace_space: str = ...,
+    autostrip: bool = ...,
+    case_sensitive: bool | L['upper', 'lower'] = ...,
+    defaultfmt: str = ...,
+    unpack: None | bool = ...,
+    usemask: bool = ...,
+    loose: bool = ...,
+    invalid_raise: bool = ...,
+    max_rows: None | int = ...,
+    encoding: str = ...,
+    *,
+    ndmin: L[0, 1, 2] = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def genfromtxt(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    dtype: DTypeLike,
+    comments: str = ...,
+    delimiter: None | str | int | Iterable[int] = ...,
+    skip_header: int = ...,
+    skip_footer: int = ...,
+    converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
+    missing_values: Any = ...,
+    filling_values: Any = ...,
+    usecols: None | Sequence[int] = ...,
+    names: L[None, True] | str | Collection[str] = ...,
+    excludelist: None | Sequence[str] = ...,
+    deletechars: str = ...,
+    replace_space: str = ...,
+    autostrip: bool = ...,
+    case_sensitive: bool | L['upper', 'lower'] = ...,
+    defaultfmt: str = ...,
+    unpack: None | bool = ...,
+    usemask: bool = ...,
+    loose: bool = ...,
+    invalid_raise: bool = ...,
+    max_rows: None | int = ...,
+    encoding: str = ...,
+    *,
+    ndmin: L[0, 1, 2] = ...,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def recfromtxt(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    *,
+    usemask: L[False] = ...,
+    **kwargs: Any,
+) -> recarray[Any, dtype[record]]: ...
+@overload
+def recfromtxt(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    *,
+    usemask: L[True],
+    **kwargs: Any,
+) -> MaskedRecords[Any, dtype[void]]: ...
+
+@overload
+def recfromcsv(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    *,
+    usemask: L[False] = ...,
+    **kwargs: Any,
+) -> recarray[Any, dtype[record]]: ...
+@overload
+def recfromcsv(
+    fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
+    *,
+    usemask: L[True],
+    **kwargs: Any,
+) -> MaskedRecords[Any, dtype[void]]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/polynomial.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/polynomial.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..14bbaf39d24944fb565cf543002ce1a8ba06ffe0
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/polynomial.pyi
@@ -0,0 +1,303 @@
+from typing import (
+    Literal as L,
+    overload,
+    Any,
+    SupportsInt,
+    SupportsIndex,
+    TypeVar,
+    NoReturn,
+)
+
+from numpy import (
+    RankWarning as RankWarning,
+    poly1d as poly1d,
+    unsignedinteger,
+    signedinteger,
+    floating,
+    complexfloating,
+    bool_,
+    int32,
+    int64,
+    float64,
+    complex128,
+    object_,
+)
+
+from numpy._typing import (
+    NDArray,
+    ArrayLike,
+    _ArrayLikeBool_co,
+    _ArrayLikeUInt_co,
+    _ArrayLikeInt_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+    _ArrayLikeObject_co,
+)
+
+_T = TypeVar("_T")
+
+_2Tup = tuple[_T, _T]
+_5Tup = tuple[
+    _T,
+    NDArray[float64],
+    NDArray[int32],
+    NDArray[float64],
+    NDArray[float64],
+]
+
+__all__: list[str]
+
+def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
+
+# Returns either a float or complex array depending on the input values.
+# See `np.linalg.eigvals`.
+def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
+
+@overload
+def polyint(
+    p: poly1d,
+    m: SupportsInt | SupportsIndex = ...,
+    k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
+) -> poly1d: ...
+@overload
+def polyint(
+    p: _ArrayLikeFloat_co,
+    m: SupportsInt | SupportsIndex = ...,
+    k: None | _ArrayLikeFloat_co = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyint(
+    p: _ArrayLikeComplex_co,
+    m: SupportsInt | SupportsIndex = ...,
+    k: None | _ArrayLikeComplex_co = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyint(
+    p: _ArrayLikeObject_co,
+    m: SupportsInt | SupportsIndex = ...,
+    k: None | _ArrayLikeObject_co = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def polyder(
+    p: poly1d,
+    m: SupportsInt | SupportsIndex = ...,
+) -> poly1d: ...
+@overload
+def polyder(
+    p: _ArrayLikeFloat_co,
+    m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyder(
+    p: _ArrayLikeComplex_co,
+    m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyder(
+    p: _ArrayLikeObject_co,
+    m: SupportsInt | SupportsIndex = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def polyfit(
+    x: _ArrayLikeFloat_co,
+    y: _ArrayLikeFloat_co,
+    deg: SupportsIndex | SupportsInt,
+    rcond: None | float = ...,
+    full: L[False] = ...,
+    w: None | _ArrayLikeFloat_co = ...,
+    cov: L[False] = ...,
+) -> NDArray[float64]: ...
+@overload
+def polyfit(
+    x: _ArrayLikeComplex_co,
+    y: _ArrayLikeComplex_co,
+    deg: SupportsIndex | SupportsInt,
+    rcond: None | float = ...,
+    full: L[False] = ...,
+    w: None | _ArrayLikeFloat_co = ...,
+    cov: L[False] = ...,
+) -> NDArray[complex128]: ...
+@overload
+def polyfit(
+    x: _ArrayLikeFloat_co,
+    y: _ArrayLikeFloat_co,
+    deg: SupportsIndex | SupportsInt,
+    rcond: None | float = ...,
+    full: L[False] = ...,
+    w: None | _ArrayLikeFloat_co = ...,
+    cov: L[True, "unscaled"] = ...,
+) -> _2Tup[NDArray[float64]]: ...
+@overload
+def polyfit(
+    x: _ArrayLikeComplex_co,
+    y: _ArrayLikeComplex_co,
+    deg: SupportsIndex | SupportsInt,
+    rcond: None | float = ...,
+    full: L[False] = ...,
+    w: None | _ArrayLikeFloat_co = ...,
+    cov: L[True, "unscaled"] = ...,
+) -> _2Tup[NDArray[complex128]]: ...
+@overload
+def polyfit(
+    x: _ArrayLikeFloat_co,
+    y: _ArrayLikeFloat_co,
+    deg: SupportsIndex | SupportsInt,
+    rcond: None | float = ...,
+    full: L[True] = ...,
+    w: None | _ArrayLikeFloat_co = ...,
+    cov: bool | L["unscaled"] = ...,
+) -> _5Tup[NDArray[float64]]: ...
+@overload
+def polyfit(
+    x: _ArrayLikeComplex_co,
+    y: _ArrayLikeComplex_co,
+    deg: SupportsIndex | SupportsInt,
+    rcond: None | float = ...,
+    full: L[True] = ...,
+    w: None | _ArrayLikeFloat_co = ...,
+    cov: bool | L["unscaled"] = ...,
+) -> _5Tup[NDArray[complex128]]: ...
+
+@overload
+def polyval(
+    p: _ArrayLikeBool_co,
+    x: _ArrayLikeBool_co,
+) -> NDArray[int64]: ...
+@overload
+def polyval(
+    p: _ArrayLikeUInt_co,
+    x: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polyval(
+    p: _ArrayLikeInt_co,
+    x: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polyval(
+    p: _ArrayLikeFloat_co,
+    x: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyval(
+    p: _ArrayLikeComplex_co,
+    x: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyval(
+    p: _ArrayLikeObject_co,
+    x: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+@overload
+def polyadd(
+    a1: poly1d,
+    a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> poly1d: ...
+@overload
+def polyadd(
+    a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    a2: poly1d,
+) -> poly1d: ...
+@overload
+def polyadd(
+    a1: _ArrayLikeBool_co,
+    a2: _ArrayLikeBool_co,
+) -> NDArray[bool_]: ...
+@overload
+def polyadd(
+    a1: _ArrayLikeUInt_co,
+    a2: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polyadd(
+    a1: _ArrayLikeInt_co,
+    a2: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polyadd(
+    a1: _ArrayLikeFloat_co,
+    a2: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polyadd(
+    a1: _ArrayLikeComplex_co,
+    a2: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polyadd(
+    a1: _ArrayLikeObject_co,
+    a2: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+@overload
+def polysub(
+    a1: poly1d,
+    a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> poly1d: ...
+@overload
+def polysub(
+    a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    a2: poly1d,
+) -> poly1d: ...
+@overload
+def polysub(
+    a1: _ArrayLikeBool_co,
+    a2: _ArrayLikeBool_co,
+) -> NoReturn: ...
+@overload
+def polysub(
+    a1: _ArrayLikeUInt_co,
+    a2: _ArrayLikeUInt_co,
+) -> NDArray[unsignedinteger[Any]]: ...
+@overload
+def polysub(
+    a1: _ArrayLikeInt_co,
+    a2: _ArrayLikeInt_co,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def polysub(
+    a1: _ArrayLikeFloat_co,
+    a2: _ArrayLikeFloat_co,
+) -> NDArray[floating[Any]]: ...
+@overload
+def polysub(
+    a1: _ArrayLikeComplex_co,
+    a2: _ArrayLikeComplex_co,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def polysub(
+    a1: _ArrayLikeObject_co,
+    a2: _ArrayLikeObject_co,
+) -> NDArray[object_]: ...
+
+# NOTE: Not an alias, but they do have the same signature (that we can reuse)
+polymul = polyadd
+
+@overload
+def polydiv(
+    u: poly1d,
+    v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+) -> _2Tup[poly1d]: ...
+@overload
+def polydiv(
+    u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
+    v: poly1d,
+) -> _2Tup[poly1d]: ...
+@overload
+def polydiv(
+    u: _ArrayLikeFloat_co,
+    v: _ArrayLikeFloat_co,
+) -> _2Tup[NDArray[floating[Any]]]: ...
+@overload
+def polydiv(
+    u: _ArrayLikeComplex_co,
+    v: _ArrayLikeComplex_co,
+) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
+@overload
+def polydiv(
+    u: _ArrayLikeObject_co,
+    v: _ArrayLikeObject_co,
+) -> _2Tup[NDArray[Any]]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/recfunctions.py b/.venv/lib/python3.11/site-packages/numpy/lib/recfunctions.py
new file mode 100644
index 0000000000000000000000000000000000000000..83ae413c6032bceec05c7e4dce17e16113f7625c
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/recfunctions.py
@@ -0,0 +1,1673 @@
+"""
+Collection of utilities to manipulate structured arrays.
+
+Most of these functions were initially implemented by John Hunter for
+matplotlib.  They have been rewritten and extended for convenience.
+
+"""
+import itertools
+import numpy as np
+import numpy.ma as ma
+from numpy import ndarray, recarray
+from numpy.ma import MaskedArray
+from numpy.ma.mrecords import MaskedRecords
+from numpy.core.overrides import array_function_dispatch
+from numpy.lib._iotools import _is_string_like
+
+_check_fill_value = np.ma.core._check_fill_value
+
+
+__all__ = [
+    'append_fields', 'apply_along_fields', 'assign_fields_by_name',
+    'drop_fields', 'find_duplicates', 'flatten_descr',
+    'get_fieldstructure', 'get_names', 'get_names_flat',
+    'join_by', 'merge_arrays', 'rec_append_fields',
+    'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
+    'rename_fields', 'repack_fields', 'require_fields',
+    'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
+    ]
+
+
+def _recursive_fill_fields_dispatcher(input, output):
+    return (input, output)
+
+
+@array_function_dispatch(_recursive_fill_fields_dispatcher)
+def recursive_fill_fields(input, output):
+    """
+    Fills fields from output with fields from input,
+    with support for nested structures.
+
+    Parameters
+    ----------
+    input : ndarray
+        Input array.
+    output : ndarray
+        Output array.
+
+    Notes
+    -----
+    * `output` should be at least the same size as `input`
+
+    Examples
+    --------
+    >>> from numpy.lib import recfunctions as rfn
+    >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
+    >>> b = np.zeros((3,), dtype=a.dtype)
+    >>> rfn.recursive_fill_fields(a, b)
+    array([(1, 10.), (2, 20.), (0,  0.)], dtype=[('A', '>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
+    >>> dt.descr
+    [(('a', 'A'), '>> _get_fieldspec(dt)
+    [(('a', 'A'), dtype('int64')), ('b', dtype(('>> from numpy.lib import recfunctions as rfn
+    >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
+    ('A',)
+    >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
+    ('A', 'B')
+    >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
+    >>> rfn.get_names(adtype)
+    ('a', ('b', ('ba', 'bb')))
+    """
+    listnames = []
+    names = adtype.names
+    for name in names:
+        current = adtype[name]
+        if current.names is not None:
+            listnames.append((name, tuple(get_names(current))))
+        else:
+            listnames.append(name)
+    return tuple(listnames)
+
+
+def get_names_flat(adtype):
+    """
+    Returns the field names of the input datatype as a tuple. Input datatype
+    must have fields otherwise error is raised.
+    Nested structure are flattened beforehand.
+
+    Parameters
+    ----------
+    adtype : dtype
+        Input datatype
+
+    Examples
+    --------
+    >>> from numpy.lib import recfunctions as rfn
+    >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
+    False
+    >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
+    ('A', 'B')
+    >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
+    >>> rfn.get_names_flat(adtype)
+    ('a', 'b', 'ba', 'bb')
+    """
+    listnames = []
+    names = adtype.names
+    for name in names:
+        listnames.append(name)
+        current = adtype[name]
+        if current.names is not None:
+            listnames.extend(get_names_flat(current))
+    return tuple(listnames)
+
+
+def flatten_descr(ndtype):
+    """
+    Flatten a structured data-type description.
+
+    Examples
+    --------
+    >>> from numpy.lib import recfunctions as rfn
+    >>> ndtype = np.dtype([('a', '>> rfn.flatten_descr(ndtype)
+    (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
+
+    """
+    names = ndtype.names
+    if names is None:
+        return (('', ndtype),)
+    else:
+        descr = []
+        for field in names:
+            (typ, _) = ndtype.fields[field]
+            if typ.names is not None:
+                descr.extend(flatten_descr(typ))
+            else:
+                descr.append((field, typ))
+        return tuple(descr)
+
+
+def _zip_dtype(seqarrays, flatten=False):
+    newdtype = []
+    if flatten:
+        for a in seqarrays:
+            newdtype.extend(flatten_descr(a.dtype))
+    else:
+        for a in seqarrays:
+            current = a.dtype
+            if current.names is not None and len(current.names) == 1:
+                # special case - dtypes of 1 field are flattened
+                newdtype.extend(_get_fieldspec(current))
+            else:
+                newdtype.append(('', current))
+    return np.dtype(newdtype)
+
+
+def _zip_descr(seqarrays, flatten=False):
+    """
+    Combine the dtype description of a series of arrays.
+
+    Parameters
+    ----------
+    seqarrays : sequence of arrays
+        Sequence of arrays
+    flatten : {boolean}, optional
+        Whether to collapse nested descriptions.
+    """
+    return _zip_dtype(seqarrays, flatten=flatten).descr
+
+
+def get_fieldstructure(adtype, lastname=None, parents=None,):
+    """
+    Returns a dictionary with fields indexing lists of their parent fields.
+
+    This function is used to simplify access to fields nested in other fields.
+
+    Parameters
+    ----------
+    adtype : np.dtype
+        Input datatype
+    lastname : optional
+        Last processed field name (used internally during recursion).
+    parents : dictionary
+        Dictionary of parent fields (used interbally during recursion).
+
+    Examples
+    --------
+    >>> from numpy.lib import recfunctions as rfn
+    >>> ndtype =  np.dtype([('A', int),
+    ...                     ('B', [('BA', int),
+    ...                            ('BB', [('BBA', int), ('BBB', int)])])])
+    >>> rfn.get_fieldstructure(ndtype)
+    ... # XXX: possible regression, order of BBA and BBB is swapped
+    {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
+
+    """
+    if parents is None:
+        parents = {}
+    names = adtype.names
+    for name in names:
+        current = adtype[name]
+        if current.names is not None:
+            if lastname:
+                parents[name] = [lastname, ]
+            else:
+                parents[name] = []
+            parents.update(get_fieldstructure(current, name, parents))
+        else:
+            lastparent = [_ for _ in (parents.get(lastname, []) or [])]
+            if lastparent:
+                lastparent.append(lastname)
+            elif lastname:
+                lastparent = [lastname, ]
+            parents[name] = lastparent or []
+    return parents
+
+
+def _izip_fields_flat(iterable):
+    """
+    Returns an iterator of concatenated fields from a sequence of arrays,
+    collapsing any nested structure.
+
+    """
+    for element in iterable:
+        if isinstance(element, np.void):
+            yield from _izip_fields_flat(tuple(element))
+        else:
+            yield element
+
+
+def _izip_fields(iterable):
+    """
+    Returns an iterator of concatenated fields from a sequence of arrays.
+
+    """
+    for element in iterable:
+        if (hasattr(element, '__iter__') and
+                not isinstance(element, str)):
+            yield from _izip_fields(element)
+        elif isinstance(element, np.void) and len(tuple(element)) == 1:
+            # this statement is the same from the previous expression
+            yield from _izip_fields(element)
+        else:
+            yield element
+
+
+def _izip_records(seqarrays, fill_value=None, flatten=True):
+    """
+    Returns an iterator of concatenated items from a sequence of arrays.
+
+    Parameters
+    ----------
+    seqarrays : sequence of arrays
+        Sequence of arrays.
+    fill_value : {None, integer}
+        Value used to pad shorter iterables.
+    flatten : {True, False},
+        Whether to
+    """
+
+    # Should we flatten the items, or just use a nested approach
+    if flatten:
+        zipfunc = _izip_fields_flat
+    else:
+        zipfunc = _izip_fields
+
+    for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
+        yield tuple(zipfunc(tup))
+
+
+def _fix_output(output, usemask=True, asrecarray=False):
+    """
+    Private function: return a recarray, a ndarray, a MaskedArray
+    or a MaskedRecords depending on the input parameters
+    """
+    if not isinstance(output, MaskedArray):
+        usemask = False
+    if usemask:
+        if asrecarray:
+            output = output.view(MaskedRecords)
+    else:
+        output = ma.filled(output)
+        if asrecarray:
+            output = output.view(recarray)
+    return output
+
+
+def _fix_defaults(output, defaults=None):
+    """
+    Update the fill_value and masked data of `output`
+    from the default given in a dictionary defaults.
+    """
+    names = output.dtype.names
+    (data, mask, fill_value) = (output.data, output.mask, output.fill_value)
+    for (k, v) in (defaults or {}).items():
+        if k in names:
+            fill_value[k] = v
+            data[k][mask[k]] = v
+    return output
+
+
+def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
+                             usemask=None, asrecarray=None):
+    return seqarrays
+
+
+@array_function_dispatch(_merge_arrays_dispatcher)
+def merge_arrays(seqarrays, fill_value=-1, flatten=False,
+                 usemask=False, asrecarray=False):
+    """
+    Merge arrays field by field.
+
+    Parameters
+    ----------
+    seqarrays : sequence of ndarrays
+        Sequence of arrays
+    fill_value : {float}, optional
+        Filling value used to pad missing data on the shorter arrays.
+    flatten : {False, True}, optional
+        Whether to collapse nested fields.
+    usemask : {False, True}, optional
+        Whether to return a masked array or not.
+    asrecarray : {False, True}, optional
+        Whether to return a recarray (MaskedRecords) or not.
+
+    Examples
+    --------
+    >>> from numpy.lib import recfunctions as rfn
+    >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
+    array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+          dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
+    ...         np.array([10., 20., 30.])), usemask=False)
+     array([(1, 10.0), (2, 20.0), (-1, 30.0)],
+             dtype=[('f0', '>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
+    ...               np.array([10., 20., 30.])),
+    ...              usemask=False, asrecarray=True)
+    rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+              dtype=[('a', '>> from numpy.lib import recfunctions as rfn
+    >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
+    ...   dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
+    >>> rfn.drop_fields(a, 'a')
+    array([((2., 3),), ((5., 6),)],
+          dtype=[('b', [('ba', '>> rfn.drop_fields(a, 'ba')
+    array([(1, (3,)), (4, (6,))], dtype=[('a', '>> rfn.drop_fields(a, ['ba', 'bb'])
+    array([(1,), (4,)], dtype=[('a', '>> from numpy.lib import recfunctions as rfn
+    >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
+    ...   dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
+    >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
+    array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
+          dtype=[('A', ' 1:
+        data = merge_arrays(data, flatten=True, usemask=usemask,
+                            fill_value=fill_value)
+    else:
+        data = data.pop()
+    #
+    output = ma.masked_all(
+        max(len(base), len(data)),
+        dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
+    output = recursive_fill_fields(base, output)
+    output = recursive_fill_fields(data, output)
+    #
+    return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+
+
+def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
+    yield base
+    yield from data
+
+
+@array_function_dispatch(_rec_append_fields_dispatcher)
+def rec_append_fields(base, names, data, dtypes=None):
+    """
+    Add new fields to an existing array.
+
+    The names of the fields are given with the `names` arguments,
+    the corresponding values with the `data` arguments.
+    If a single field is appended, `names`, `data` and `dtypes` do not have
+    to be lists but just values.
+
+    Parameters
+    ----------
+    base : array
+        Input array to extend.
+    names : string, sequence
+        String or sequence of strings corresponding to the names
+        of the new fields.
+    data : array or sequence of arrays
+        Array or sequence of arrays storing the fields to add to the base.
+    dtypes : sequence of datatypes, optional
+        Datatype or sequence of datatypes.
+        If None, the datatypes are estimated from the `data`.
+
+    See Also
+    --------
+    append_fields
+
+    Returns
+    -------
+    appended_array : np.recarray
+    """
+    return append_fields(base, names, data=data, dtypes=dtypes,
+                         asrecarray=True, usemask=False)
+
+
+def _repack_fields_dispatcher(a, align=None, recurse=None):
+    return (a,)
+
+
+@array_function_dispatch(_repack_fields_dispatcher)
+def repack_fields(a, align=False, recurse=False):
+    """
+    Re-pack the fields of a structured array or dtype in memory.
+
+    The memory layout of structured datatypes allows fields at arbitrary
+    byte offsets. This means the fields can be separated by padding bytes,
+    their offsets can be non-monotonically increasing, and they can overlap.
+
+    This method removes any overlaps and reorders the fields in memory so they
+    have increasing byte offsets, and adds or removes padding bytes depending
+    on the `align` option, which behaves like the `align` option to
+    `numpy.dtype`.
+
+    If `align=False`, this method produces a "packed" memory layout in which
+    each field starts at the byte the previous field ended, and any padding
+    bytes are removed.
+
+    If `align=True`, this methods produces an "aligned" memory layout in which
+    each field's offset is a multiple of its alignment, and the total itemsize
+    is a multiple of the largest alignment, by adding padding bytes as needed.
+
+    Parameters
+    ----------
+    a : ndarray or dtype
+       array or dtype for which to repack the fields.
+    align : boolean
+       If true, use an "aligned" memory layout, otherwise use a "packed" layout.
+    recurse : boolean
+       If True, also repack nested structures.
+
+    Returns
+    -------
+    repacked : ndarray or dtype
+       Copy of `a` with fields repacked, or `a` itself if no repacking was
+       needed.
+
+    Examples
+    --------
+
+    >>> from numpy.lib import recfunctions as rfn
+    >>> def print_offsets(d):
+    ...     print("offsets:", [d.fields[name][1] for name in d.names])
+    ...     print("itemsize:", d.itemsize)
+    ...
+    >>> dt = np.dtype('u1, >> dt
+    dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '>> print_offsets(dt)
+    offsets: [0, 8, 16]
+    itemsize: 24
+    >>> packed_dt = rfn.repack_fields(dt)
+    >>> packed_dt
+    dtype([('f0', 'u1'), ('f1', '>> print_offsets(packed_dt)
+    offsets: [0, 1, 9]
+    itemsize: 17
+
+    """
+    if not isinstance(a, np.dtype):
+        dt = repack_fields(a.dtype, align=align, recurse=recurse)
+        return a.astype(dt, copy=False)
+
+    if a.names is None:
+        return a
+
+    fieldinfo = []
+    for name in a.names:
+        tup = a.fields[name]
+        if recurse:
+            fmt = repack_fields(tup[0], align=align, recurse=True)
+        else:
+            fmt = tup[0]
+
+        if len(tup) == 3:
+            name = (tup[2], name)
+
+        fieldinfo.append((name, fmt))
+
+    dt = np.dtype(fieldinfo, align=align)
+    return np.dtype((a.type, dt))
+
+def _get_fields_and_offsets(dt, offset=0):
+    """
+    Returns a flat list of (dtype, count, offset) tuples of all the
+    scalar fields in the dtype "dt", including nested fields, in left
+    to right order.
+    """
+
+    # counts up elements in subarrays, including nested subarrays, and returns
+    # base dtype and count
+    def count_elem(dt):
+        count = 1
+        while dt.shape != ():
+            for size in dt.shape:
+                count *= size
+            dt = dt.base
+        return dt, count
+
+    fields = []
+    for name in dt.names:
+        field = dt.fields[name]
+        f_dt, f_offset = field[0], field[1]
+        f_dt, n = count_elem(f_dt)
+
+        if f_dt.names is None:
+            fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
+        else:
+            subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
+            size = f_dt.itemsize
+
+            for i in range(n):
+                if i == 0:
+                    # optimization: avoid list comprehension if no subarray
+                    fields.extend(subfields)
+                else:
+                    fields.extend([(d, c, o + i*size) for d, c, o in subfields])
+    return fields
+
+def _common_stride(offsets, counts, itemsize):
+    """
+    Returns the stride between the fields, or None if the stride is not
+    constant. The values in "counts" designate the lengths of
+    subarrays. Subarrays are treated as many contiguous fields, with
+    always positive stride.
+    """
+    if len(offsets) <= 1:
+        return itemsize
+
+    negative = offsets[1] < offsets[0]  # negative stride
+    if negative:
+        # reverse, so offsets will be ascending
+        it = zip(reversed(offsets), reversed(counts))
+    else:
+        it = zip(offsets, counts)
+
+    prev_offset = None
+    stride = None
+    for offset, count in it:
+        if count != 1:  # subarray: always c-contiguous
+            if negative:
+                return None  # subarrays can never have a negative stride
+            if stride is None:
+                stride = itemsize
+            if stride != itemsize:
+                return None
+            end_offset = offset + (count - 1) * itemsize
+        else:
+            end_offset = offset
+
+        if prev_offset is not None:
+            new_stride = offset - prev_offset
+            if stride is None:
+                stride = new_stride
+            if stride != new_stride:
+                return None
+
+        prev_offset = end_offset
+
+    if negative:
+        return -stride
+    return stride
+
+
+def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
+                                           casting=None):
+    return (arr,)
+
+@array_function_dispatch(_structured_to_unstructured_dispatcher)
+def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
+    """
+    Converts an n-D structured array into an (n+1)-D unstructured array.
+
+    The new array will have a new last dimension equal in size to the
+    number of field-elements of the input array. If not supplied, the output
+    datatype is determined from the numpy type promotion rules applied to all
+    the field datatypes.
+
+    Nested fields, as well as each element of any subarray fields, all count
+    as a single field-elements.
+
+    Parameters
+    ----------
+    arr : ndarray
+       Structured array or dtype to convert. Cannot contain object datatype.
+    dtype : dtype, optional
+       The dtype of the output unstructured array.
+    copy : bool, optional
+        If true, always return a copy. If false, a view is returned if
+        possible, such as when the `dtype` and strides of the fields are
+        suitable and the array subtype is one of `np.ndarray`, `np.recarray`
+        or `np.memmap`.
+
+        .. versionchanged:: 1.25.0
+            A view can now be returned if the fields are separated by a
+            uniform stride.
+
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        See casting argument of `numpy.ndarray.astype`. Controls what kind of
+        data casting may occur.
+
+    Returns
+    -------
+    unstructured : ndarray
+       Unstructured array with one more dimension.
+
+    Examples
+    --------
+
+    >>> from numpy.lib import recfunctions as rfn
+    >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+    >>> a
+    array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
+           (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
+          dtype=[('a', '>> rfn.structured_to_unstructured(a)
+    array([[0., 0., 0., 0., 0.],
+           [0., 0., 0., 0., 0.],
+           [0., 0., 0., 0., 0.],
+           [0., 0., 0., 0., 0.]])
+
+    >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+    ...              dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+    >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
+    array([ 3. ,  5.5,  9. , 11. ])
+
+    """
+    if arr.dtype.names is None:
+        raise ValueError('arr must be a structured array')
+
+    fields = _get_fields_and_offsets(arr.dtype)
+    n_fields = len(fields)
+    if n_fields == 0 and dtype is None:
+        raise ValueError("arr has no fields. Unable to guess dtype")
+    elif n_fields == 0:
+        # too many bugs elsewhere for this to work now
+        raise NotImplementedError("arr with no fields is not supported")
+
+    dts, counts, offsets = zip(*fields)
+    names = ['f{}'.format(n) for n in range(n_fields)]
+
+    if dtype is None:
+        out_dtype = np.result_type(*[dt.base for dt in dts])
+    else:
+        out_dtype = np.dtype(dtype)
+
+    # Use a series of views and casts to convert to an unstructured array:
+
+    # first view using flattened fields (doesn't work for object arrays)
+    # Note: dts may include a shape for subarrays
+    flattened_fields = np.dtype({'names': names,
+                                 'formats': dts,
+                                 'offsets': offsets,
+                                 'itemsize': arr.dtype.itemsize})
+    arr = arr.view(flattened_fields)
+
+    # we only allow a few types to be unstructured by manipulating the
+    # strides, because we know it won't work with, for example, np.matrix nor
+    # np.ma.MaskedArray.
+    can_view = type(arr) in (np.ndarray, np.recarray, np.memmap)
+    if (not copy) and can_view and all(dt.base == out_dtype for dt in dts):
+        # all elements have the right dtype already; if they have a common
+        # stride, we can just return a view
+        common_stride = _common_stride(offsets, counts, out_dtype.itemsize)
+        if common_stride is not None:
+            wrap = arr.__array_wrap__
+
+            new_shape = arr.shape + (sum(counts), out_dtype.itemsize)
+            new_strides = arr.strides + (abs(common_stride), 1)
+
+            arr = arr[..., np.newaxis].view(np.uint8)  # view as bytes
+            arr = arr[..., min(offsets):]  # remove the leading unused data
+            arr = np.lib.stride_tricks.as_strided(arr,
+                                                  new_shape,
+                                                  new_strides,
+                                                  subok=True)
+
+            # cast and drop the last dimension again
+            arr = arr.view(out_dtype)[..., 0]
+
+            if common_stride < 0:
+                arr = arr[..., ::-1]  # reverse, if the stride was negative
+            if type(arr) is not type(wrap.__self__):
+                # Some types (e.g. recarray) turn into an ndarray along the
+                # way, so we have to wrap it again in order to match the
+                # behavior with copy=True.
+                arr = wrap(arr)
+            return arr
+
+    # next cast to a packed format with all fields converted to new dtype
+    packed_fields = np.dtype({'names': names,
+                              'formats': [(out_dtype, dt.shape) for dt in dts]})
+    arr = arr.astype(packed_fields, copy=copy, casting=casting)
+
+    # finally is it safe to view the packed fields as the unstructured type
+    return arr.view((out_dtype, (sum(counts),)))
+
+
+def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
+                                           align=None, copy=None, casting=None):
+    return (arr,)
+
+@array_function_dispatch(_unstructured_to_structured_dispatcher)
+def unstructured_to_structured(arr, dtype=None, names=None, align=False,
+                               copy=False, casting='unsafe'):
+    """
+    Converts an n-D unstructured array into an (n-1)-D structured array.
+
+    The last dimension of the input array is converted into a structure, with
+    number of field-elements equal to the size of the last dimension of the
+    input array. By default all output fields have the input array's dtype, but
+    an output structured dtype with an equal number of fields-elements can be
+    supplied instead.
+
+    Nested fields, as well as each element of any subarray fields, all count
+    towards the number of field-elements.
+
+    Parameters
+    ----------
+    arr : ndarray
+       Unstructured array or dtype to convert.
+    dtype : dtype, optional
+       The structured dtype of the output array
+    names : list of strings, optional
+       If dtype is not supplied, this specifies the field names for the output
+       dtype, in order. The field dtypes will be the same as the input array.
+    align : boolean, optional
+       Whether to create an aligned memory layout.
+    copy : bool, optional
+        See copy argument to `numpy.ndarray.astype`. If true, always return a
+        copy. If false, and `dtype` requirements are satisfied, a view is
+        returned.
+    casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+        See casting argument of `numpy.ndarray.astype`. Controls what kind of
+        data casting may occur.
+
+    Returns
+    -------
+    structured : ndarray
+       Structured array with fewer dimensions.
+
+    Examples
+    --------
+
+    >>> from numpy.lib import recfunctions as rfn
+    >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+    >>> a = np.arange(20).reshape((4,5))
+    >>> a
+    array([[ 0,  1,  2,  3,  4],
+           [ 5,  6,  7,  8,  9],
+           [10, 11, 12, 13, 14],
+           [15, 16, 17, 18, 19]])
+    >>> rfn.unstructured_to_structured(a, dt)
+    array([( 0, ( 1.,  2), [ 3.,  4.]), ( 5, ( 6.,  7), [ 8.,  9.]),
+           (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
+          dtype=[('a', '>> from numpy.lib import recfunctions as rfn
+    >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+    ...              dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+    >>> rfn.apply_along_fields(np.mean, b)
+    array([ 2.66666667,  5.33333333,  8.66666667, 11.        ])
+    >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
+    array([ 3. ,  5.5,  9. , 11. ])
+
+    """
+    if arr.dtype.names is None:
+        raise ValueError('arr must be a structured array')
+
+    uarr = structured_to_unstructured(arr)
+    return func(uarr, axis=-1)
+    # works and avoids axis requirement, but very, very slow:
+    #return np.apply_along_axis(func, -1, uarr)
+
+def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
+    return dst, src
+
+@array_function_dispatch(_assign_fields_by_name_dispatcher)
+def assign_fields_by_name(dst, src, zero_unassigned=True):
+    """
+    Assigns values from one structured array to another by field name.
+
+    Normally in numpy >= 1.14, assignment of one structured array to another
+    copies fields "by position", meaning that the first field from the src is
+    copied to the first field of the dst, and so on, regardless of field name.
+
+    This function instead copies "by field name", such that fields in the dst
+    are assigned from the identically named field in the src. This applies
+    recursively for nested structures. This is how structure assignment worked
+    in numpy >= 1.6 to <= 1.13.
+
+    Parameters
+    ----------
+    dst : ndarray
+    src : ndarray
+        The source and destination arrays during assignment.
+    zero_unassigned : bool, optional
+        If True, fields in the dst for which there was no matching
+        field in the src are filled with the value 0 (zero). This
+        was the behavior of numpy <= 1.13. If False, those fields
+        are not modified.
+    """
+
+    if dst.dtype.names is None:
+        dst[...] = src
+        return
+
+    for name in dst.dtype.names:
+        if name not in src.dtype.names:
+            if zero_unassigned:
+                dst[name] = 0
+        else:
+            assign_fields_by_name(dst[name], src[name],
+                                  zero_unassigned)
+
+def _require_fields_dispatcher(array, required_dtype):
+    return (array,)
+
+@array_function_dispatch(_require_fields_dispatcher)
+def require_fields(array, required_dtype):
+    """
+    Casts a structured array to a new dtype using assignment by field-name.
+
+    This function assigns from the old to the new array by name, so the
+    value of a field in the output array is the value of the field with the
+    same name in the source array. This has the effect of creating a new
+    ndarray containing only the fields "required" by the required_dtype.
+
+    If a field name in the required_dtype does not exist in the
+    input array, that field is created and set to 0 in the output array.
+
+    Parameters
+    ----------
+    a : ndarray
+       array to cast
+    required_dtype : dtype
+       datatype for output array
+
+    Returns
+    -------
+    out : ndarray
+        array with the new dtype, with field values copied from the fields in
+        the input array with the same name
+
+    Examples
+    --------
+
+    >>> from numpy.lib import recfunctions as rfn
+    >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+    >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
+    array([(1., 1), (1., 1), (1., 1), (1., 1)],
+      dtype=[('b', '>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
+    array([(1., 0), (1., 0), (1., 0), (1., 0)],
+      dtype=[('b', '>> from numpy.lib import recfunctions as rfn
+    >>> x = np.array([1, 2,])
+    >>> rfn.stack_arrays(x) is x
+    True
+    >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
+    >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
+    ...   dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
+    >>> test = rfn.stack_arrays((z,zz))
+    >>> test
+    masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
+                       (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
+                 mask=[(False, False,  True), (False, False,  True),
+                       (False, False, False), (False, False, False),
+                       (False, False, False)],
+           fill_value=(b'N/A', 1.e+20, 1.e+20),
+                dtype=[('A', 'S3'), ('B', ' '%s'" %
+                                    (cdtype, fdtype))
+    # Only one field: use concatenate
+    if len(newdescr) == 1:
+        output = ma.concatenate(seqarrays)
+    else:
+        #
+        output = ma.masked_all((np.sum(nrecords),), newdescr)
+        offset = np.cumsum(np.r_[0, nrecords])
+        seen = []
+        for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
+            names = a.dtype.names
+            if names is None:
+                output['f%i' % len(seen)][i:j] = a
+            else:
+                for name in n:
+                    output[name][i:j] = a[name]
+                    if name not in seen:
+                        seen.append(name)
+    #
+    return _fix_output(_fix_defaults(output, defaults),
+                       usemask=usemask, asrecarray=asrecarray)
+
+
+def _find_duplicates_dispatcher(
+        a, key=None, ignoremask=None, return_index=None):
+    return (a,)
+
+
+@array_function_dispatch(_find_duplicates_dispatcher)
+def find_duplicates(a, key=None, ignoremask=True, return_index=False):
+    """
+    Find the duplicates in a structured array along a given key
+
+    Parameters
+    ----------
+    a : array-like
+        Input array
+    key : {string, None}, optional
+        Name of the fields along which to check the duplicates.
+        If None, the search is performed by records
+    ignoremask : {True, False}, optional
+        Whether masked data should be discarded or considered as duplicates.
+    return_index : {False, True}, optional
+        Whether to return the indices of the duplicated values.
+
+    Examples
+    --------
+    >>> from numpy.lib import recfunctions as rfn
+    >>> ndtype = [('a', int)]
+    >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
+    ...         mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
+    >>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
+    (masked_array(data=[(1,), (1,), (2,), (2,)],
+                 mask=[(False,), (False,), (False,), (False,)],
+           fill_value=(999999,),
+                dtype=[('a', '= nb1)] - nb1
+    (r1cmn, r2cmn) = (len(idx_1), len(idx_2))
+    if jointype == 'inner':
+        (r1spc, r2spc) = (0, 0)
+    elif jointype == 'outer':
+        idx_out = idx_sort[~flag_in]
+        idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
+        idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
+        (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
+    elif jointype == 'leftouter':
+        idx_out = idx_sort[~flag_in]
+        idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
+        (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
+    # Select the entries from each input
+    (s1, s2) = (r1[idx_1], r2[idx_2])
+    #
+    # Build the new description of the output array .......
+    # Start with the key fields
+    ndtype = _get_fieldspec(r1k.dtype)
+
+    # Add the fields from r1
+    for fname, fdtype in _get_fieldspec(r1.dtype):
+        if fname not in key:
+            ndtype.append((fname, fdtype))
+
+    # Add the fields from r2
+    for fname, fdtype in _get_fieldspec(r2.dtype):
+        # Have we seen the current name already ?
+        # we need to rebuild this list every time
+        names = list(name for name, dtype in ndtype)
+        try:
+            nameidx = names.index(fname)
+        except ValueError:
+            #... we haven't: just add the description to the current list
+            ndtype.append((fname, fdtype))
+        else:
+            # collision
+            _, cdtype = ndtype[nameidx]
+            if fname in key:
+                # The current field is part of the key: take the largest dtype
+                ndtype[nameidx] = (fname, max(fdtype, cdtype))
+            else:
+                # The current field is not part of the key: add the suffixes,
+                # and place the new field adjacent to the old one
+                ndtype[nameidx:nameidx + 1] = [
+                    (fname + r1postfix, cdtype),
+                    (fname + r2postfix, fdtype)
+                ]
+    # Rebuild a dtype from the new fields
+    ndtype = np.dtype(ndtype)
+    # Find the largest nb of common fields :
+    # r1cmn and r2cmn should be equal, but...
+    cmn = max(r1cmn, r2cmn)
+    # Construct an empty array
+    output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
+    names = output.dtype.names
+    for f in r1names:
+        selected = s1[f]
+        if f not in names or (f in r2names and not r2postfix and f not in key):
+            f += r1postfix
+        current = output[f]
+        current[:r1cmn] = selected[:r1cmn]
+        if jointype in ('outer', 'leftouter'):
+            current[cmn:cmn + r1spc] = selected[r1cmn:]
+    for f in r2names:
+        selected = s2[f]
+        if f not in names or (f in r1names and not r1postfix and f not in key):
+            f += r2postfix
+        current = output[f]
+        current[:r2cmn] = selected[:r2cmn]
+        if (jointype == 'outer') and r2spc:
+            current[-r2spc:] = selected[r2cmn:]
+    # Sort and finalize the output
+    output.sort(order=key)
+    kwargs = dict(usemask=usemask, asrecarray=asrecarray)
+    return _fix_output(_fix_defaults(output, defaults), **kwargs)
+
+
+def _rec_join_dispatcher(
+        key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+        defaults=None):
+    return (r1, r2)
+
+
+@array_function_dispatch(_rec_join_dispatcher)
+def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
+             defaults=None):
+    """
+    Join arrays `r1` and `r2` on keys.
+    Alternative to join_by, that always returns a np.recarray.
+
+    See Also
+    --------
+    join_by : equivalent function
+    """
+    kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
+                  defaults=defaults, usemask=False, asrecarray=True)
+    return join_by(key, r1, r2, **kwargs)
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/scimath.py b/.venv/lib/python3.11/site-packages/numpy/lib/scimath.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7ef0d7109c63cffc7c30f59d97389a4a4a230f7
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/scimath.py
@@ -0,0 +1,625 @@
+"""
+Wrapper functions to more user-friendly calling of certain math functions
+whose output data-type is different than the input data-type in certain
+domains of the input.
+
+For example, for functions like `log` with branch cuts, the versions in this
+module provide the mathematically valid answers in the complex plane::
+
+  >>> import math
+  >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
+  True
+
+Similarly, `sqrt`, other base logarithms, `power` and trig functions are
+correctly handled.  See their respective docstrings for specific examples.
+
+Functions
+---------
+
+.. autosummary::
+   :toctree: generated/
+
+   sqrt
+   log
+   log2
+   logn
+   log10
+   power
+   arccos
+   arcsin
+   arctanh
+
+"""
+import numpy.core.numeric as nx
+import numpy.core.numerictypes as nt
+from numpy.core.numeric import asarray, any
+from numpy.core.overrides import array_function_dispatch
+from numpy.lib.type_check import isreal
+
+
+__all__ = [
+    'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
+    'arctanh'
+    ]
+
+
+_ln2 = nx.log(2.0)
+
+
+def _tocomplex(arr):
+    """Convert its input `arr` to a complex array.
+
+    The input is returned as a complex array of the smallest type that will fit
+    the original data: types like single, byte, short, etc. become csingle,
+    while others become cdouble.
+
+    A copy of the input is always made.
+
+    Parameters
+    ----------
+    arr : array
+
+    Returns
+    -------
+    array
+        An array with the same input data as the input but in complex form.
+
+    Examples
+    --------
+
+    First, consider an input of type short:
+
+    >>> a = np.array([1,2,3],np.short)
+
+    >>> ac = np.lib.scimath._tocomplex(a); ac
+    array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+
+    >>> ac.dtype
+    dtype('complex64')
+
+    If the input is of type double, the output is correspondingly of the
+    complex double type as well:
+
+    >>> b = np.array([1,2,3],np.double)
+
+    >>> bc = np.lib.scimath._tocomplex(b); bc
+    array([1.+0.j, 2.+0.j, 3.+0.j])
+
+    >>> bc.dtype
+    dtype('complex128')
+
+    Note that even if the input was complex to begin with, a copy is still
+    made, since the astype() method always copies:
+
+    >>> c = np.array([1,2,3],np.csingle)
+
+    >>> cc = np.lib.scimath._tocomplex(c); cc
+    array([1.+0.j,  2.+0.j,  3.+0.j], dtype=complex64)
+
+    >>> c *= 2; c
+    array([2.+0.j,  4.+0.j,  6.+0.j], dtype=complex64)
+
+    >>> cc
+    array([1.+0.j,  2.+0.j,  3.+0.j], dtype=complex64)
+    """
+    if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
+                                   nt.ushort, nt.csingle)):
+        return arr.astype(nt.csingle)
+    else:
+        return arr.astype(nt.cdouble)
+
+
+def _fix_real_lt_zero(x):
+    """Convert `x` to complex if it has real, negative components.
+
+    Otherwise, output is just the array version of the input (via asarray).
+
+    Parameters
+    ----------
+    x : array_like
+
+    Returns
+    -------
+    array
+
+    Examples
+    --------
+    >>> np.lib.scimath._fix_real_lt_zero([1,2])
+    array([1, 2])
+
+    >>> np.lib.scimath._fix_real_lt_zero([-1,2])
+    array([-1.+0.j,  2.+0.j])
+
+    """
+    x = asarray(x)
+    if any(isreal(x) & (x < 0)):
+        x = _tocomplex(x)
+    return x
+
+
+def _fix_int_lt_zero(x):
+    """Convert `x` to double if it has real, negative components.
+
+    Otherwise, output is just the array version of the input (via asarray).
+
+    Parameters
+    ----------
+    x : array_like
+
+    Returns
+    -------
+    array
+
+    Examples
+    --------
+    >>> np.lib.scimath._fix_int_lt_zero([1,2])
+    array([1, 2])
+
+    >>> np.lib.scimath._fix_int_lt_zero([-1,2])
+    array([-1.,  2.])
+    """
+    x = asarray(x)
+    if any(isreal(x) & (x < 0)):
+        x = x * 1.0
+    return x
+
+
+def _fix_real_abs_gt_1(x):
+    """Convert `x` to complex if it has real components x_i with abs(x_i)>1.
+
+    Otherwise, output is just the array version of the input (via asarray).
+
+    Parameters
+    ----------
+    x : array_like
+
+    Returns
+    -------
+    array
+
+    Examples
+    --------
+    >>> np.lib.scimath._fix_real_abs_gt_1([0,1])
+    array([0, 1])
+
+    >>> np.lib.scimath._fix_real_abs_gt_1([0,2])
+    array([0.+0.j, 2.+0.j])
+    """
+    x = asarray(x)
+    if any(isreal(x) & (abs(x) > 1)):
+        x = _tocomplex(x)
+    return x
+
+
+def _unary_dispatcher(x):
+    return (x,)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def sqrt(x):
+    """
+    Compute the square root of x.
+
+    For negative input elements, a complex value is returned
+    (unlike `numpy.sqrt` which returns NaN).
+
+    Parameters
+    ----------
+    x : array_like
+       The input value(s).
+
+    Returns
+    -------
+    out : ndarray or scalar
+       The square root of `x`. If `x` was a scalar, so is `out`,
+       otherwise an array is returned.
+
+    See Also
+    --------
+    numpy.sqrt
+
+    Examples
+    --------
+    For real, non-negative inputs this works just like `numpy.sqrt`:
+
+    >>> np.emath.sqrt(1)
+    1.0
+    >>> np.emath.sqrt([1, 4])
+    array([1.,  2.])
+
+    But it automatically handles negative inputs:
+
+    >>> np.emath.sqrt(-1)
+    1j
+    >>> np.emath.sqrt([-1,4])
+    array([0.+1.j, 2.+0.j])
+
+    Different results are expected because:
+    floating point 0.0 and -0.0 are distinct.
+
+    For more control, explicitly use complex() as follows:
+
+    >>> np.emath.sqrt(complex(-4.0, 0.0))
+    2j
+    >>> np.emath.sqrt(complex(-4.0, -0.0))
+    -2j
+    """
+    x = _fix_real_lt_zero(x)
+    return nx.sqrt(x)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def log(x):
+    """
+    Compute the natural logarithm of `x`.
+
+    Return the "principal value" (for a description of this, see `numpy.log`)
+    of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
+    returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
+    complex principle value is returned.
+
+    Parameters
+    ----------
+    x : array_like
+       The value(s) whose log is (are) required.
+
+    Returns
+    -------
+    out : ndarray or scalar
+       The log of the `x` value(s). If `x` was a scalar, so is `out`,
+       otherwise an array is returned.
+
+    See Also
+    --------
+    numpy.log
+
+    Notes
+    -----
+    For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
+    (note, however, that otherwise `numpy.log` and this `log` are identical,
+    i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
+    notably, the complex principle value if ``x.imag != 0``).
+
+    Examples
+    --------
+    >>> np.emath.log(np.exp(1))
+    1.0
+
+    Negative arguments are handled "correctly" (recall that
+    ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
+
+    >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
+    True
+
+    """
+    x = _fix_real_lt_zero(x)
+    return nx.log(x)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def log10(x):
+    """
+    Compute the logarithm base 10 of `x`.
+
+    Return the "principal value" (for a description of this, see
+    `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
+    is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
+    returns ``inf``). Otherwise, the complex principle value is returned.
+
+    Parameters
+    ----------
+    x : array_like or scalar
+       The value(s) whose log base 10 is (are) required.
+
+    Returns
+    -------
+    out : ndarray or scalar
+       The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
+       otherwise an array object is returned.
+
+    See Also
+    --------
+    numpy.log10
+
+    Notes
+    -----
+    For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
+    (note, however, that otherwise `numpy.log10` and this `log10` are
+    identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
+    and, notably, the complex principle value if ``x.imag != 0``).
+
+    Examples
+    --------
+
+    (We set the printing precision so the example can be auto-tested)
+
+    >>> np.set_printoptions(precision=4)
+
+    >>> np.emath.log10(10**1)
+    1.0
+
+    >>> np.emath.log10([-10**1, -10**2, 10**2])
+    array([1.+1.3644j, 2.+1.3644j, 2.+0.j    ])
+
+    """
+    x = _fix_real_lt_zero(x)
+    return nx.log10(x)
+
+
+def _logn_dispatcher(n, x):
+    return (n, x,)
+
+
+@array_function_dispatch(_logn_dispatcher)
+def logn(n, x):
+    """
+    Take log base n of x.
+
+    If `x` contains negative inputs, the answer is computed and returned in the
+    complex domain.
+
+    Parameters
+    ----------
+    n : array_like
+       The integer base(s) in which the log is taken.
+    x : array_like
+       The value(s) whose log base `n` is (are) required.
+
+    Returns
+    -------
+    out : ndarray or scalar
+       The log base `n` of the `x` value(s). If `x` was a scalar, so is
+       `out`, otherwise an array is returned.
+
+    Examples
+    --------
+    >>> np.set_printoptions(precision=4)
+
+    >>> np.emath.logn(2, [4, 8])
+    array([2., 3.])
+    >>> np.emath.logn(2, [-4, -8, 8])
+    array([2.+4.5324j, 3.+4.5324j, 3.+0.j    ])
+
+    """
+    x = _fix_real_lt_zero(x)
+    n = _fix_real_lt_zero(n)
+    return nx.log(x)/nx.log(n)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def log2(x):
+    """
+    Compute the logarithm base 2 of `x`.
+
+    Return the "principal value" (for a description of this, see
+    `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
+    a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
+    ``inf``). Otherwise, the complex principle value is returned.
+
+    Parameters
+    ----------
+    x : array_like
+       The value(s) whose log base 2 is (are) required.
+
+    Returns
+    -------
+    out : ndarray or scalar
+       The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
+       otherwise an array is returned.
+
+    See Also
+    --------
+    numpy.log2
+
+    Notes
+    -----
+    For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
+    (note, however, that otherwise `numpy.log2` and this `log2` are
+    identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
+    and, notably, the complex principle value if ``x.imag != 0``).
+
+    Examples
+    --------
+    We set the printing precision so the example can be auto-tested:
+
+    >>> np.set_printoptions(precision=4)
+
+    >>> np.emath.log2(8)
+    3.0
+    >>> np.emath.log2([-4, -8, 8])
+    array([2.+4.5324j, 3.+4.5324j, 3.+0.j    ])
+
+    """
+    x = _fix_real_lt_zero(x)
+    return nx.log2(x)
+
+
+def _power_dispatcher(x, p):
+    return (x, p)
+
+
+@array_function_dispatch(_power_dispatcher)
+def power(x, p):
+    """
+    Return x to the power p, (x**p).
+
+    If `x` contains negative values, the output is converted to the
+    complex domain.
+
+    Parameters
+    ----------
+    x : array_like
+        The input value(s).
+    p : array_like of ints
+        The power(s) to which `x` is raised. If `x` contains multiple values,
+        `p` has to either be a scalar, or contain the same number of values
+        as `x`. In the latter case, the result is
+        ``x[0]**p[0], x[1]**p[1], ...``.
+
+    Returns
+    -------
+    out : ndarray or scalar
+        The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
+        otherwise an array is returned.
+
+    See Also
+    --------
+    numpy.power
+
+    Examples
+    --------
+    >>> np.set_printoptions(precision=4)
+
+    >>> np.emath.power([2, 4], 2)
+    array([ 4, 16])
+    >>> np.emath.power([2, 4], -2)
+    array([0.25  ,  0.0625])
+    >>> np.emath.power([-2, 4], 2)
+    array([ 4.-0.j, 16.+0.j])
+
+    """
+    x = _fix_real_lt_zero(x)
+    p = _fix_int_lt_zero(p)
+    return nx.power(x, p)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def arccos(x):
+    """
+    Compute the inverse cosine of x.
+
+    Return the "principal value" (for a description of this, see
+    `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
+    `abs(x) <= 1`, this is a real number in the closed interval
+    :math:`[0, \\pi]`.  Otherwise, the complex principle value is returned.
+
+    Parameters
+    ----------
+    x : array_like or scalar
+       The value(s) whose arccos is (are) required.
+
+    Returns
+    -------
+    out : ndarray or scalar
+       The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
+       is `out`, otherwise an array object is returned.
+
+    See Also
+    --------
+    numpy.arccos
+
+    Notes
+    -----
+    For an arccos() that returns ``NAN`` when real `x` is not in the
+    interval ``[-1,1]``, use `numpy.arccos`.
+
+    Examples
+    --------
+    >>> np.set_printoptions(precision=4)
+
+    >>> np.emath.arccos(1) # a scalar is returned
+    0.0
+
+    >>> np.emath.arccos([1,2])
+    array([0.-0.j   , 0.-1.317j])
+
+    """
+    x = _fix_real_abs_gt_1(x)
+    return nx.arccos(x)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def arcsin(x):
+    """
+    Compute the inverse sine of x.
+
+    Return the "principal value" (for a description of this, see
+    `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
+    `abs(x) <= 1`, this is a real number in the closed interval
+    :math:`[-\\pi/2, \\pi/2]`.  Otherwise, the complex principle value is
+    returned.
+
+    Parameters
+    ----------
+    x : array_like or scalar
+       The value(s) whose arcsin is (are) required.
+
+    Returns
+    -------
+    out : ndarray or scalar
+       The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
+       is `out`, otherwise an array object is returned.
+
+    See Also
+    --------
+    numpy.arcsin
+
+    Notes
+    -----
+    For an arcsin() that returns ``NAN`` when real `x` is not in the
+    interval ``[-1,1]``, use `numpy.arcsin`.
+
+    Examples
+    --------
+    >>> np.set_printoptions(precision=4)
+
+    >>> np.emath.arcsin(0)
+    0.0
+
+    >>> np.emath.arcsin([0,1])
+    array([0.    , 1.5708])
+
+    """
+    x = _fix_real_abs_gt_1(x)
+    return nx.arcsin(x)
+
+
+@array_function_dispatch(_unary_dispatcher)
+def arctanh(x):
+    """
+    Compute the inverse hyperbolic tangent of `x`.
+
+    Return the "principal value" (for a description of this, see
+    `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
+    ``abs(x) < 1``, this is a real number.  If `abs(x) > 1`, or if `x` is
+    complex, the result is complex. Finally, `x = 1` returns``inf`` and
+    ``x=-1`` returns ``-inf``.
+
+    Parameters
+    ----------
+    x : array_like
+       The value(s) whose arctanh is (are) required.
+
+    Returns
+    -------
+    out : ndarray or scalar
+       The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
+       a scalar so is `out`, otherwise an array is returned.
+
+
+    See Also
+    --------
+    numpy.arctanh
+
+    Notes
+    -----
+    For an arctanh() that returns ``NAN`` when real `x` is not in the
+    interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
+    return +/-inf for ``x = +/-1``).
+
+    Examples
+    --------
+    >>> np.set_printoptions(precision=4)
+
+    >>> from numpy.testing import suppress_warnings
+    >>> with suppress_warnings() as sup:
+    ...     sup.filter(RuntimeWarning)
+    ...     np.emath.arctanh(np.eye(2))
+    array([[inf,  0.],
+           [ 0., inf]])
+    >>> np.emath.arctanh([1j])
+    array([0.+0.7854j])
+
+    """
+    x = _fix_real_abs_gt_1(x)
+    return nx.arctanh(x)
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/setup.py b/.venv/lib/python3.11/site-packages/numpy/lib/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..7520b72d7ac02d17b8baecd98918082397632a14
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/setup.py
@@ -0,0 +1,12 @@
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+
+    config = Configuration('lib', parent_package, top_path)
+    config.add_subpackage('tests')
+    config.add_data_dir('tests/data')
+    config.add_data_files('*.pyi')
+    return config
+
+if __name__ == '__main__':
+    from numpy.distutils.core import setup
+    setup(configuration=configuration)
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/shape_base.py b/.venv/lib/python3.11/site-packages/numpy/lib/shape_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d8a41bfe4a9c6d0c5666968a31c78b7c27497dd
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/shape_base.py
@@ -0,0 +1,1274 @@
+import functools
+
+import numpy.core.numeric as _nx
+from numpy.core.numeric import asarray, zeros, array, asanyarray
+from numpy.core.fromnumeric import reshape, transpose
+from numpy.core.multiarray import normalize_axis_index
+from numpy.core import overrides
+from numpy.core import vstack, atleast_3d
+from numpy.core.numeric import normalize_axis_tuple
+from numpy.core.shape_base import _arrays_for_stack_dispatcher
+from numpy.lib.index_tricks import ndindex
+from numpy.matrixlib.defmatrix import matrix  # this raises all the right alarm bells
+
+
+__all__ = [
+    'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
+    'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
+    'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
+    'put_along_axis'
+    ]
+
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+def _make_along_axis_idx(arr_shape, indices, axis):
+    # compute dimensions to iterate over
+    if not _nx.issubdtype(indices.dtype, _nx.integer):
+        raise IndexError('`indices` must be an integer array')
+    if len(arr_shape) != indices.ndim:
+        raise ValueError(
+            "`indices` and `arr` must have the same number of dimensions")
+    shape_ones = (1,) * indices.ndim
+    dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
+
+    # build a fancy index, consisting of orthogonal aranges, with the
+    # requested index inserted at the right location
+    fancy_index = []
+    for dim, n in zip(dest_dims, arr_shape):
+        if dim is None:
+            fancy_index.append(indices)
+        else:
+            ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
+            fancy_index.append(_nx.arange(n).reshape(ind_shape))
+
+    return tuple(fancy_index)
+
+
+def _take_along_axis_dispatcher(arr, indices, axis):
+    return (arr, indices)
+
+
+@array_function_dispatch(_take_along_axis_dispatcher)
+def take_along_axis(arr, indices, axis):
+    """
+    Take values from the input array by matching 1d index and data slices.
+
+    This iterates over matching 1d slices oriented along the specified axis in
+    the index and data arrays, and uses the former to look up values in the
+    latter. These slices can be different lengths.
+
+    Functions returning an index along an axis, like `argsort` and
+    `argpartition`, produce suitable indices for this function.
+
+    .. versionadded:: 1.15.0
+
+    Parameters
+    ----------
+    arr : ndarray (Ni..., M, Nk...)
+        Source array
+    indices : ndarray (Ni..., J, Nk...)
+        Indices to take along each 1d slice of `arr`. This must match the
+        dimension of arr, but dimensions Ni and Nj only need to broadcast
+        against `arr`.
+    axis : int
+        The axis to take 1d slices along. If axis is None, the input array is
+        treated as if it had first been flattened to 1d, for consistency with
+        `sort` and `argsort`.
+
+    Returns
+    -------
+    out: ndarray (Ni..., J, Nk...)
+        The indexed result.
+
+    Notes
+    -----
+    This is equivalent to (but faster than) the following use of `ndindex` and
+    `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+        Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+        J = indices.shape[axis]  # Need not equal M
+        out = np.empty(Ni + (J,) + Nk)
+
+        for ii in ndindex(Ni):
+            for kk in ndindex(Nk):
+                a_1d       = a      [ii + s_[:,] + kk]
+                indices_1d = indices[ii + s_[:,] + kk]
+                out_1d     = out    [ii + s_[:,] + kk]
+                for j in range(J):
+                    out_1d[j] = a_1d[indices_1d[j]]
+
+    Equivalently, eliminating the inner loop, the last two lines would be::
+
+                out_1d[:] = a_1d[indices_1d]
+
+    See Also
+    --------
+    take : Take along an axis, using the same indices for every 1d slice
+    put_along_axis :
+        Put values into the destination array by matching 1d index and data slices
+
+    Examples
+    --------
+
+    For this sample array
+
+    >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+    We can sort either by using sort directly, or argsort and this function
+
+    >>> np.sort(a, axis=1)
+    array([[10, 20, 30],
+           [40, 50, 60]])
+    >>> ai = np.argsort(a, axis=1)
+    >>> ai
+    array([[0, 2, 1],
+           [1, 2, 0]])
+    >>> np.take_along_axis(a, ai, axis=1)
+    array([[10, 20, 30],
+           [40, 50, 60]])
+
+    The same works for max and min, if you maintain the trivial dimension
+    with ``keepdims``:
+
+    >>> np.max(a, axis=1, keepdims=True)
+    array([[30],
+           [60]])
+    >>> ai = np.argmax(a, axis=1, keepdims=True)
+    >>> ai
+    array([[1],
+           [0]])
+    >>> np.take_along_axis(a, ai, axis=1)
+    array([[30],
+           [60]])
+
+    If we want to get the max and min at the same time, we can stack the
+    indices first
+
+    >>> ai_min = np.argmin(a, axis=1, keepdims=True)
+    >>> ai_max = np.argmax(a, axis=1, keepdims=True)
+    >>> ai = np.concatenate([ai_min, ai_max], axis=1)
+    >>> ai
+    array([[0, 1],
+           [1, 0]])
+    >>> np.take_along_axis(a, ai, axis=1)
+    array([[10, 30],
+           [40, 60]])
+    """
+    # normalize inputs
+    if axis is None:
+        arr = arr.flat
+        arr_shape = (len(arr),)  # flatiter has no .shape
+        axis = 0
+    else:
+        axis = normalize_axis_index(axis, arr.ndim)
+        arr_shape = arr.shape
+
+    # use the fancy index
+    return arr[_make_along_axis_idx(arr_shape, indices, axis)]
+
+
+def _put_along_axis_dispatcher(arr, indices, values, axis):
+    return (arr, indices, values)
+
+
+@array_function_dispatch(_put_along_axis_dispatcher)
+def put_along_axis(arr, indices, values, axis):
+    """
+    Put values into the destination array by matching 1d index and data slices.
+
+    This iterates over matching 1d slices oriented along the specified axis in
+    the index and data arrays, and uses the former to place values into the
+    latter. These slices can be different lengths.
+
+    Functions returning an index along an axis, like `argsort` and
+    `argpartition`, produce suitable indices for this function.
+
+    .. versionadded:: 1.15.0
+
+    Parameters
+    ----------
+    arr : ndarray (Ni..., M, Nk...)
+        Destination array.
+    indices : ndarray (Ni..., J, Nk...)
+        Indices to change along each 1d slice of `arr`. This must match the
+        dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
+        against `arr`.
+    values : array_like (Ni..., J, Nk...)
+        values to insert at those indices. Its shape and dimension are
+        broadcast to match that of `indices`.
+    axis : int
+        The axis to take 1d slices along. If axis is None, the destination
+        array is treated as if a flattened 1d view had been created of it.
+
+    Notes
+    -----
+    This is equivalent to (but faster than) the following use of `ndindex` and
+    `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
+
+        Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
+        J = indices.shape[axis]  # Need not equal M
+
+        for ii in ndindex(Ni):
+            for kk in ndindex(Nk):
+                a_1d       = a      [ii + s_[:,] + kk]
+                indices_1d = indices[ii + s_[:,] + kk]
+                values_1d  = values [ii + s_[:,] + kk]
+                for j in range(J):
+                    a_1d[indices_1d[j]] = values_1d[j]
+
+    Equivalently, eliminating the inner loop, the last two lines would be::
+
+                a_1d[indices_1d] = values_1d
+
+    See Also
+    --------
+    take_along_axis :
+        Take values from the input array by matching 1d index and data slices
+
+    Examples
+    --------
+
+    For this sample array
+
+    >>> a = np.array([[10, 30, 20], [60, 40, 50]])
+
+    We can replace the maximum values with:
+
+    >>> ai = np.argmax(a, axis=1, keepdims=True)
+    >>> ai
+    array([[1],
+           [0]])
+    >>> np.put_along_axis(a, ai, 99, axis=1)
+    >>> a
+    array([[10, 99, 20],
+           [99, 40, 50]])
+
+    """
+    # normalize inputs
+    if axis is None:
+        arr = arr.flat
+        axis = 0
+        arr_shape = (len(arr),)  # flatiter has no .shape
+    else:
+        axis = normalize_axis_index(axis, arr.ndim)
+        arr_shape = arr.shape
+
+    # use the fancy index
+    arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
+
+
+def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
+    return (arr,)
+
+
+@array_function_dispatch(_apply_along_axis_dispatcher)
+def apply_along_axis(func1d, axis, arr, *args, **kwargs):
+    """
+    Apply a function to 1-D slices along the given axis.
+
+    Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays
+    and `a` is a 1-D slice of `arr` along `axis`.
+
+    This is equivalent to (but faster than) the following use of `ndindex` and
+    `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
+
+        Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+        for ii in ndindex(Ni):
+            for kk in ndindex(Nk):
+                f = func1d(arr[ii + s_[:,] + kk])
+                Nj = f.shape
+                for jj in ndindex(Nj):
+                    out[ii + jj + kk] = f[jj]
+
+    Equivalently, eliminating the inner loop, this can be expressed as::
+
+        Ni, Nk = a.shape[:axis], a.shape[axis+1:]
+        for ii in ndindex(Ni):
+            for kk in ndindex(Nk):
+                out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])
+
+    Parameters
+    ----------
+    func1d : function (M,) -> (Nj...)
+        This function should accept 1-D arrays. It is applied to 1-D
+        slices of `arr` along the specified axis.
+    axis : integer
+        Axis along which `arr` is sliced.
+    arr : ndarray (Ni..., M, Nk...)
+        Input array.
+    args : any
+        Additional arguments to `func1d`.
+    kwargs : any
+        Additional named arguments to `func1d`.
+
+        .. versionadded:: 1.9.0
+
+
+    Returns
+    -------
+    out : ndarray  (Ni..., Nj..., Nk...)
+        The output array. The shape of `out` is identical to the shape of
+        `arr`, except along the `axis` dimension. This axis is removed, and
+        replaced with new dimensions equal to the shape of the return value
+        of `func1d`. So if `func1d` returns a scalar `out` will have one
+        fewer dimensions than `arr`.
+
+    See Also
+    --------
+    apply_over_axes : Apply a function repeatedly over multiple axes.
+
+    Examples
+    --------
+    >>> def my_func(a):
+    ...     \"\"\"Average first and last element of a 1-D array\"\"\"
+    ...     return (a[0] + a[-1]) * 0.5
+    >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
+    >>> np.apply_along_axis(my_func, 0, b)
+    array([4., 5., 6.])
+    >>> np.apply_along_axis(my_func, 1, b)
+    array([2.,  5.,  8.])
+
+    For a function that returns a 1D array, the number of dimensions in
+    `outarr` is the same as `arr`.
+
+    >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
+    >>> np.apply_along_axis(sorted, 1, b)
+    array([[1, 7, 8],
+           [3, 4, 9],
+           [2, 5, 6]])
+
+    For a function that returns a higher dimensional array, those dimensions
+    are inserted in place of the `axis` dimension.
+
+    >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
+    >>> np.apply_along_axis(np.diag, -1, b)
+    array([[[1, 0, 0],
+            [0, 2, 0],
+            [0, 0, 3]],
+           [[4, 0, 0],
+            [0, 5, 0],
+            [0, 0, 6]],
+           [[7, 0, 0],
+            [0, 8, 0],
+            [0, 0, 9]]])
+    """
+    # handle negative axes
+    arr = asanyarray(arr)
+    nd = arr.ndim
+    axis = normalize_axis_index(axis, nd)
+
+    # arr, with the iteration axis at the end
+    in_dims = list(range(nd))
+    inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])
+
+    # compute indices for the iteration axes, and append a trailing ellipsis to
+    # prevent 0d arrays decaying to scalars, which fixes gh-8642
+    inds = ndindex(inarr_view.shape[:-1])
+    inds = (ind + (Ellipsis,) for ind in inds)
+
+    # invoke the function on the first item
+    try:
+        ind0 = next(inds)
+    except StopIteration:
+        raise ValueError(
+            'Cannot apply_along_axis when any iteration dimensions are 0'
+        ) from None
+    res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
+
+    # build a buffer for storing evaluations of func1d.
+    # remove the requested axis, and add the new ones on the end.
+    # laid out so that each write is contiguous.
+    # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
+    buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
+
+    # permutation of axes such that out = buff.transpose(buff_permute)
+    buff_dims = list(range(buff.ndim))
+    buff_permute = (
+        buff_dims[0 : axis] +
+        buff_dims[buff.ndim-res.ndim : buff.ndim] +
+        buff_dims[axis : buff.ndim-res.ndim]
+    )
+
+    # matrices have a nasty __array_prepare__ and __array_wrap__
+    if not isinstance(res, matrix):
+        buff = res.__array_prepare__(buff)
+
+    # save the first result, then compute and save all remaining results
+    buff[ind0] = res
+    for ind in inds:
+        buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
+
+    if not isinstance(res, matrix):
+        # wrap the array, to preserve subclasses
+        buff = res.__array_wrap__(buff)
+
+        # finally, rotate the inserted axes back to where they belong
+        return transpose(buff, buff_permute)
+
+    else:
+        # matrices have to be transposed first, because they collapse dimensions!
+        out_arr = transpose(buff, buff_permute)
+        return res.__array_wrap__(out_arr)
+
+
+def _apply_over_axes_dispatcher(func, a, axes):
+    return (a,)
+
+
+@array_function_dispatch(_apply_over_axes_dispatcher)
+def apply_over_axes(func, a, axes):
+    """
+    Apply a function repeatedly over multiple axes.
+
+    `func` is called as `res = func(a, axis)`, where `axis` is the first
+    element of `axes`.  The result `res` of the function call must have
+    either the same dimensions as `a` or one less dimension.  If `res`
+    has one less dimension than `a`, a dimension is inserted before
+    `axis`.  The call to `func` is then repeated for each axis in `axes`,
+    with `res` as the first argument.
+
+    Parameters
+    ----------
+    func : function
+        This function must take two arguments, `func(a, axis)`.
+    a : array_like
+        Input array.
+    axes : array_like
+        Axes over which `func` is applied; the elements must be integers.
+
+    Returns
+    -------
+    apply_over_axis : ndarray
+        The output array.  The number of dimensions is the same as `a`,
+        but the shape can be different.  This depends on whether `func`
+        changes the shape of its output with respect to its input.
+
+    See Also
+    --------
+    apply_along_axis :
+        Apply a function to 1-D slices of an array along the given axis.
+
+    Notes
+    -----
+    This function is equivalent to tuple axis arguments to reorderable ufuncs
+    with keepdims=True. Tuple axis arguments to ufuncs have been available since
+    version 1.7.0.
+
+    Examples
+    --------
+    >>> a = np.arange(24).reshape(2,3,4)
+    >>> a
+    array([[[ 0,  1,  2,  3],
+            [ 4,  5,  6,  7],
+            [ 8,  9, 10, 11]],
+           [[12, 13, 14, 15],
+            [16, 17, 18, 19],
+            [20, 21, 22, 23]]])
+
+    Sum over axes 0 and 2. The result has same number of dimensions
+    as the original array:
+
+    >>> np.apply_over_axes(np.sum, a, [0,2])
+    array([[[ 60],
+            [ 92],
+            [124]]])
+
+    Tuple axis arguments to ufuncs are equivalent:
+
+    >>> np.sum(a, axis=(0,2), keepdims=True)
+    array([[[ 60],
+            [ 92],
+            [124]]])
+
+    """
+    val = asarray(a)
+    N = a.ndim
+    if array(axes).ndim == 0:
+        axes = (axes,)
+    for axis in axes:
+        if axis < 0:
+            axis = N + axis
+        args = (val, axis)
+        res = func(*args)
+        if res.ndim == val.ndim:
+            val = res
+        else:
+            res = expand_dims(res, axis)
+            if res.ndim == val.ndim:
+                val = res
+            else:
+                raise ValueError("function is not returning "
+                                 "an array of the correct shape")
+    return val
+
+
+def _expand_dims_dispatcher(a, axis):
+    return (a,)
+
+
+@array_function_dispatch(_expand_dims_dispatcher)
+def expand_dims(a, axis):
+    """
+    Expand the shape of an array.
+
+    Insert a new axis that will appear at the `axis` position in the expanded
+    array shape.
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    axis : int or tuple of ints
+        Position in the expanded axes where the new axis (or axes) is placed.
+
+        .. deprecated:: 1.13.0
+            Passing an axis where ``axis > a.ndim`` will be treated as
+            ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
+            be treated as ``axis == 0``. This behavior is deprecated.
+
+        .. versionchanged:: 1.18.0
+            A tuple of axes is now supported.  Out of range axes as
+            described above are now forbidden and raise an `AxisError`.
+
+    Returns
+    -------
+    result : ndarray
+        View of `a` with the number of dimensions increased.
+
+    See Also
+    --------
+    squeeze : The inverse operation, removing singleton dimensions
+    reshape : Insert, remove, and combine dimensions, and resize existing ones
+    doc.indexing, atleast_1d, atleast_2d, atleast_3d
+
+    Examples
+    --------
+    >>> x = np.array([1, 2])
+    >>> x.shape
+    (2,)
+
+    The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
+
+    >>> y = np.expand_dims(x, axis=0)
+    >>> y
+    array([[1, 2]])
+    >>> y.shape
+    (1, 2)
+
+    The following is equivalent to ``x[:, np.newaxis]``:
+
+    >>> y = np.expand_dims(x, axis=1)
+    >>> y
+    array([[1],
+           [2]])
+    >>> y.shape
+    (2, 1)
+
+    ``axis`` may also be a tuple:
+
+    >>> y = np.expand_dims(x, axis=(0, 1))
+    >>> y
+    array([[[1, 2]]])
+
+    >>> y = np.expand_dims(x, axis=(2, 0))
+    >>> y
+    array([[[1],
+            [2]]])
+
+    Note that some examples may use ``None`` instead of ``np.newaxis``.  These
+    are the same objects:
+
+    >>> np.newaxis is None
+    True
+
+    """
+    if isinstance(a, matrix):
+        a = asarray(a)
+    else:
+        a = asanyarray(a)
+
+    if type(axis) not in (tuple, list):
+        axis = (axis,)
+
+    out_ndim = len(axis) + a.ndim
+    axis = normalize_axis_tuple(axis, out_ndim)
+
+    shape_it = iter(a.shape)
+    shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
+
+    return a.reshape(shape)
+
+
+row_stack = vstack
+
+
+def _column_stack_dispatcher(tup):
+    return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_column_stack_dispatcher)
+def column_stack(tup):
+    """
+    Stack 1-D arrays as columns into a 2-D array.
+
+    Take a sequence of 1-D arrays and stack them as columns
+    to make a single 2-D array. 2-D arrays are stacked as-is,
+    just like with `hstack`.  1-D arrays are turned into 2-D columns
+    first.
+
+    Parameters
+    ----------
+    tup : sequence of 1-D or 2-D arrays.
+        Arrays to stack. All of them must have the same first dimension.
+
+    Returns
+    -------
+    stacked : 2-D array
+        The array formed by stacking the given arrays.
+
+    See Also
+    --------
+    stack, hstack, vstack, concatenate
+
+    Examples
+    --------
+    >>> a = np.array((1,2,3))
+    >>> b = np.array((2,3,4))
+    >>> np.column_stack((a,b))
+    array([[1, 2],
+           [2, 3],
+           [3, 4]])
+
+    """
+    arrays = []
+    for v in tup:
+        arr = asanyarray(v)
+        if arr.ndim < 2:
+            arr = array(arr, copy=False, subok=True, ndmin=2).T
+        arrays.append(arr)
+    return _nx.concatenate(arrays, 1)
+
+
+def _dstack_dispatcher(tup):
+    return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_dstack_dispatcher)
+def dstack(tup):
+    """
+    Stack arrays in sequence depth wise (along third axis).
+
+    This is equivalent to concatenation along the third axis after 2-D arrays
+    of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
+    `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
+    `dsplit`.
+
+    This function makes most sense for arrays with up to 3 dimensions. For
+    instance, for pixel-data with a height (first axis), width (second axis),
+    and r/g/b channels (third axis). The functions `concatenate`, `stack` and
+    `block` provide more general stacking and concatenation operations.
+
+    Parameters
+    ----------
+    tup : sequence of arrays
+        The arrays must have the same shape along all but the third axis.
+        1-D or 2-D arrays must have the same shape.
+
+    Returns
+    -------
+    stacked : ndarray
+        The array formed by stacking the given arrays, will be at least 3-D.
+
+    See Also
+    --------
+    concatenate : Join a sequence of arrays along an existing axis.
+    stack : Join a sequence of arrays along a new axis.
+    block : Assemble an nd-array from nested lists of blocks.
+    vstack : Stack arrays in sequence vertically (row wise).
+    hstack : Stack arrays in sequence horizontally (column wise).
+    column_stack : Stack 1-D arrays as columns into a 2-D array.
+    dsplit : Split array along third axis.
+
+    Examples
+    --------
+    >>> a = np.array((1,2,3))
+    >>> b = np.array((2,3,4))
+    >>> np.dstack((a,b))
+    array([[[1, 2],
+            [2, 3],
+            [3, 4]]])
+
+    >>> a = np.array([[1],[2],[3]])
+    >>> b = np.array([[2],[3],[4]])
+    >>> np.dstack((a,b))
+    array([[[1, 2]],
+           [[2, 3]],
+           [[3, 4]]])
+
+    """
+    arrs = atleast_3d(*tup)
+    if not isinstance(arrs, list):
+        arrs = [arrs]
+    return _nx.concatenate(arrs, 2)
+
+
+def _replace_zero_by_x_arrays(sub_arys):
+    for i in range(len(sub_arys)):
+        if _nx.ndim(sub_arys[i]) == 0:
+            sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
+        elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
+            sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
+    return sub_arys
+
+
+def _array_split_dispatcher(ary, indices_or_sections, axis=None):
+    return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_array_split_dispatcher)
+def array_split(ary, indices_or_sections, axis=0):
+    """
+    Split an array into multiple sub-arrays.
+
+    Please refer to the ``split`` documentation.  The only difference
+    between these functions is that ``array_split`` allows
+    `indices_or_sections` to be an integer that does *not* equally
+    divide the axis. For an array of length l that should be split
+    into n sections, it returns l % n sub-arrays of size l//n + 1
+    and the rest of size l//n.
+
+    See Also
+    --------
+    split : Split array into multiple sub-arrays of equal size.
+
+    Examples
+    --------
+    >>> x = np.arange(8.0)
+    >>> np.array_split(x, 3)
+    [array([0.,  1.,  2.]), array([3.,  4.,  5.]), array([6.,  7.])]
+
+    >>> x = np.arange(9)
+    >>> np.array_split(x, 4)
+    [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])]
+
+    """
+    try:
+        Ntotal = ary.shape[axis]
+    except AttributeError:
+        Ntotal = len(ary)
+    try:
+        # handle array case.
+        Nsections = len(indices_or_sections) + 1
+        div_points = [0] + list(indices_or_sections) + [Ntotal]
+    except TypeError:
+        # indices_or_sections is a scalar, not an array.
+        Nsections = int(indices_or_sections)
+        if Nsections <= 0:
+            raise ValueError('number sections must be larger than 0.') from None
+        Neach_section, extras = divmod(Ntotal, Nsections)
+        section_sizes = ([0] +
+                         extras * [Neach_section+1] +
+                         (Nsections-extras) * [Neach_section])
+        div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
+
+    sub_arys = []
+    sary = _nx.swapaxes(ary, axis, 0)
+    for i in range(Nsections):
+        st = div_points[i]
+        end = div_points[i + 1]
+        sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
+
+    return sub_arys
+
+
+def _split_dispatcher(ary, indices_or_sections, axis=None):
+    return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_split_dispatcher)
+def split(ary, indices_or_sections, axis=0):
+    """
+    Split an array into multiple sub-arrays as views into `ary`.
+
+    Parameters
+    ----------
+    ary : ndarray
+        Array to be divided into sub-arrays.
+    indices_or_sections : int or 1-D array
+        If `indices_or_sections` is an integer, N, the array will be divided
+        into N equal arrays along `axis`.  If such a split is not possible,
+        an error is raised.
+
+        If `indices_or_sections` is a 1-D array of sorted integers, the entries
+        indicate where along `axis` the array is split.  For example,
+        ``[2, 3]`` would, for ``axis=0``, result in
+
+          - ary[:2]
+          - ary[2:3]
+          - ary[3:]
+
+        If an index exceeds the dimension of the array along `axis`,
+        an empty sub-array is returned correspondingly.
+    axis : int, optional
+        The axis along which to split, default is 0.
+
+    Returns
+    -------
+    sub-arrays : list of ndarrays
+        A list of sub-arrays as views into `ary`.
+
+    Raises
+    ------
+    ValueError
+        If `indices_or_sections` is given as an integer, but
+        a split does not result in equal division.
+
+    See Also
+    --------
+    array_split : Split an array into multiple sub-arrays of equal or
+                  near-equal size.  Does not raise an exception if
+                  an equal division cannot be made.
+    hsplit : Split array into multiple sub-arrays horizontally (column-wise).
+    vsplit : Split array into multiple sub-arrays vertically (row wise).
+    dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+    concatenate : Join a sequence of arrays along an existing axis.
+    stack : Join a sequence of arrays along a new axis.
+    hstack : Stack arrays in sequence horizontally (column wise).
+    vstack : Stack arrays in sequence vertically (row wise).
+    dstack : Stack arrays in sequence depth wise (along third dimension).
+
+    Examples
+    --------
+    >>> x = np.arange(9.0)
+    >>> np.split(x, 3)
+    [array([0.,  1.,  2.]), array([3.,  4.,  5.]), array([6.,  7.,  8.])]
+
+    >>> x = np.arange(8.0)
+    >>> np.split(x, [3, 5, 6, 10])
+    [array([0.,  1.,  2.]),
+     array([3.,  4.]),
+     array([5.]),
+     array([6.,  7.]),
+     array([], dtype=float64)]
+
+    """
+    try:
+        len(indices_or_sections)
+    except TypeError:
+        sections = indices_or_sections
+        N = ary.shape[axis]
+        if N % sections:
+            raise ValueError(
+                'array split does not result in an equal division') from None
+    return array_split(ary, indices_or_sections, axis)
+
+
+def _hvdsplit_dispatcher(ary, indices_or_sections):
+    return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def hsplit(ary, indices_or_sections):
+    """
+    Split an array into multiple sub-arrays horizontally (column-wise).
+
+    Please refer to the `split` documentation.  `hsplit` is equivalent
+    to `split` with ``axis=1``, the array is always split along the second
+    axis except for 1-D arrays, where it is split at ``axis=0``.
+
+    See Also
+    --------
+    split : Split an array into multiple sub-arrays of equal size.
+
+    Examples
+    --------
+    >>> x = np.arange(16.0).reshape(4, 4)
+    >>> x
+    array([[ 0.,   1.,   2.,   3.],
+           [ 4.,   5.,   6.,   7.],
+           [ 8.,   9.,  10.,  11.],
+           [12.,  13.,  14.,  15.]])
+    >>> np.hsplit(x, 2)
+    [array([[  0.,   1.],
+           [  4.,   5.],
+           [  8.,   9.],
+           [12.,  13.]]),
+     array([[  2.,   3.],
+           [  6.,   7.],
+           [10.,  11.],
+           [14.,  15.]])]
+    >>> np.hsplit(x, np.array([3, 6]))
+    [array([[ 0.,   1.,   2.],
+           [ 4.,   5.,   6.],
+           [ 8.,   9.,  10.],
+           [12.,  13.,  14.]]),
+     array([[ 3.],
+           [ 7.],
+           [11.],
+           [15.]]),
+     array([], shape=(4, 0), dtype=float64)]
+
+    With a higher dimensional array the split is still along the second axis.
+
+    >>> x = np.arange(8.0).reshape(2, 2, 2)
+    >>> x
+    array([[[0.,  1.],
+            [2.,  3.]],
+           [[4.,  5.],
+            [6.,  7.]]])
+    >>> np.hsplit(x, 2)
+    [array([[[0.,  1.]],
+           [[4.,  5.]]]),
+     array([[[2.,  3.]],
+           [[6.,  7.]]])]
+
+    With a 1-D array, the split is along axis 0.
+
+    >>> x = np.array([0, 1, 2, 3, 4, 5])
+    >>> np.hsplit(x, 2)
+    [array([0, 1, 2]), array([3, 4, 5])]
+
+    """
+    if _nx.ndim(ary) == 0:
+        raise ValueError('hsplit only works on arrays of 1 or more dimensions')
+    if ary.ndim > 1:
+        return split(ary, indices_or_sections, 1)
+    else:
+        return split(ary, indices_or_sections, 0)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def vsplit(ary, indices_or_sections):
+    """
+    Split an array into multiple sub-arrays vertically (row-wise).
+
+    Please refer to the ``split`` documentation.  ``vsplit`` is equivalent
+    to ``split`` with `axis=0` (default), the array is always split along the
+    first axis regardless of the array dimension.
+
+    See Also
+    --------
+    split : Split an array into multiple sub-arrays of equal size.
+
+    Examples
+    --------
+    >>> x = np.arange(16.0).reshape(4, 4)
+    >>> x
+    array([[ 0.,   1.,   2.,   3.],
+           [ 4.,   5.,   6.,   7.],
+           [ 8.,   9.,  10.,  11.],
+           [12.,  13.,  14.,  15.]])
+    >>> np.vsplit(x, 2)
+    [array([[0., 1., 2., 3.],
+           [4., 5., 6., 7.]]), array([[ 8.,  9., 10., 11.],
+           [12., 13., 14., 15.]])]
+    >>> np.vsplit(x, np.array([3, 6]))
+    [array([[ 0.,  1.,  2.,  3.],
+           [ 4.,  5.,  6.,  7.],
+           [ 8.,  9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]
+
+    With a higher dimensional array the split is still along the first axis.
+
+    >>> x = np.arange(8.0).reshape(2, 2, 2)
+    >>> x
+    array([[[0.,  1.],
+            [2.,  3.]],
+           [[4.,  5.],
+            [6.,  7.]]])
+    >>> np.vsplit(x, 2)
+    [array([[[0., 1.],
+            [2., 3.]]]), array([[[4., 5.],
+            [6., 7.]]])]
+
+    """
+    if _nx.ndim(ary) < 2:
+        raise ValueError('vsplit only works on arrays of 2 or more dimensions')
+    return split(ary, indices_or_sections, 0)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
+def dsplit(ary, indices_or_sections):
+    """
+    Split array into multiple sub-arrays along the 3rd axis (depth).
+
+    Please refer to the `split` documentation.  `dsplit` is equivalent
+    to `split` with ``axis=2``, the array is always split along the third
+    axis provided the array dimension is greater than or equal to 3.
+
+    See Also
+    --------
+    split : Split an array into multiple sub-arrays of equal size.
+
+    Examples
+    --------
+    >>> x = np.arange(16.0).reshape(2, 2, 4)
+    >>> x
+    array([[[ 0.,   1.,   2.,   3.],
+            [ 4.,   5.,   6.,   7.]],
+           [[ 8.,   9.,  10.,  11.],
+            [12.,  13.,  14.,  15.]]])
+    >>> np.dsplit(x, 2)
+    [array([[[ 0.,  1.],
+            [ 4.,  5.]],
+           [[ 8.,  9.],
+            [12., 13.]]]), array([[[ 2.,  3.],
+            [ 6.,  7.]],
+           [[10., 11.],
+            [14., 15.]]])]
+    >>> np.dsplit(x, np.array([3, 6]))
+    [array([[[ 0.,   1.,   2.],
+            [ 4.,   5.,   6.]],
+           [[ 8.,   9.,  10.],
+            [12.,  13.,  14.]]]),
+     array([[[ 3.],
+            [ 7.]],
+           [[11.],
+            [15.]]]),
+    array([], shape=(2, 2, 0), dtype=float64)]
+    """
+    if _nx.ndim(ary) < 3:
+        raise ValueError('dsplit only works on arrays of 3 or more dimensions')
+    return split(ary, indices_or_sections, 2)
+
+
+def get_array_prepare(*args):
+    """Find the wrapper for the array with the highest priority.
+
+    In case of ties, leftmost wins. If no wrapper is found, return None
+    """
+    wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
+                 x.__array_prepare__) for i, x in enumerate(args)
+                                   if hasattr(x, '__array_prepare__'))
+    if wrappers:
+        return wrappers[-1][-1]
+    return None
+
+
+def get_array_wrap(*args):
+    """Find the wrapper for the array with the highest priority.
+
+    In case of ties, leftmost wins. If no wrapper is found, return None
+    """
+    wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
+                 x.__array_wrap__) for i, x in enumerate(args)
+                                   if hasattr(x, '__array_wrap__'))
+    if wrappers:
+        return wrappers[-1][-1]
+    return None
+
+
+def _kron_dispatcher(a, b):
+    return (a, b)
+
+
+@array_function_dispatch(_kron_dispatcher)
+def kron(a, b):
+    """
+    Kronecker product of two arrays.
+
+    Computes the Kronecker product, a composite array made of blocks of the
+    second array scaled by the first.
+
+    Parameters
+    ----------
+    a, b : array_like
+
+    Returns
+    -------
+    out : ndarray
+
+    See Also
+    --------
+    outer : The outer product
+
+    Notes
+    -----
+    The function assumes that the number of dimensions of `a` and `b`
+    are the same, if necessary prepending the smallest with ones.
+    If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
+    the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
+    The elements are products of elements from `a` and `b`, organized
+    explicitly by::
+
+        kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
+
+    where::
+
+        kt = it * st + jt,  t = 0,...,N
+
+    In the common 2-D case (N=1), the block structure can be visualized::
+
+        [[ a[0,0]*b,   a[0,1]*b,  ... , a[0,-1]*b  ],
+         [  ...                              ...   ],
+         [ a[-1,0]*b,  a[-1,1]*b, ... , a[-1,-1]*b ]]
+
+
+    Examples
+    --------
+    >>> np.kron([1,10,100], [5,6,7])
+    array([  5,   6,   7, ..., 500, 600, 700])
+    >>> np.kron([5,6,7], [1,10,100])
+    array([  5,  50, 500, ...,   7,  70, 700])
+
+    >>> np.kron(np.eye(2), np.ones((2,2)))
+    array([[1.,  1.,  0.,  0.],
+           [1.,  1.,  0.,  0.],
+           [0.,  0.,  1.,  1.],
+           [0.,  0.,  1.,  1.]])
+
+    >>> a = np.arange(100).reshape((2,5,2,5))
+    >>> b = np.arange(24).reshape((2,3,4))
+    >>> c = np.kron(a,b)
+    >>> c.shape
+    (2, 10, 6, 20)
+    >>> I = (1,3,0,2)
+    >>> J = (0,2,1)
+    >>> J1 = (0,) + J             # extend to ndim=4
+    >>> S1 = (1,) + b.shape
+    >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
+    >>> c[K] == a[I]*b[J]
+    True
+
+    """
+    # Working:
+    # 1. Equalise the shapes by prepending smaller array with 1s
+    # 2. Expand shapes of both the arrays by adding new axes at
+    #    odd positions for 1st array and even positions for 2nd
+    # 3. Compute the product of the modified array
+    # 4. The inner most array elements now contain the rows of
+    #    the Kronecker product
+    # 5. Reshape the result to kron's shape, which is same as
+    #    product of shapes of the two arrays.
+    b = asanyarray(b)
+    a = array(a, copy=False, subok=True, ndmin=b.ndim)
+    is_any_mat = isinstance(a, matrix) or isinstance(b, matrix)
+    ndb, nda = b.ndim, a.ndim
+    nd = max(ndb, nda)
+
+    if (nda == 0 or ndb == 0):
+        return _nx.multiply(a, b)
+
+    as_ = a.shape
+    bs = b.shape
+    if not a.flags.contiguous:
+        a = reshape(a, as_)
+    if not b.flags.contiguous:
+        b = reshape(b, bs)
+
+    # Equalise the shapes by prepending smaller one with 1s
+    as_ = (1,)*max(0, ndb-nda) + as_
+    bs = (1,)*max(0, nda-ndb) + bs
+
+    # Insert empty dimensions
+    a_arr = expand_dims(a, axis=tuple(range(ndb-nda)))
+    b_arr = expand_dims(b, axis=tuple(range(nda-ndb)))
+
+    # Compute the product
+    a_arr = expand_dims(a_arr, axis=tuple(range(1, nd*2, 2)))
+    b_arr = expand_dims(b_arr, axis=tuple(range(0, nd*2, 2)))
+    # In case of `mat`, convert result to `array`
+    result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))
+
+    # Reshape back
+    result = result.reshape(_nx.multiply(as_, bs))
+
+    return result if not is_any_mat else matrix(result, copy=False)
+
+
+def _tile_dispatcher(A, reps):
+    return (A, reps)
+
+
+@array_function_dispatch(_tile_dispatcher)
+def tile(A, reps):
+    """
+    Construct an array by repeating A the number of times given by reps.
+
+    If `reps` has length ``d``, the result will have dimension of
+    ``max(d, A.ndim)``.
+
+    If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
+    axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
+    or shape (1, 1, 3) for 3-D replication. If this is not the desired
+    behavior, promote `A` to d-dimensions manually before calling this
+    function.
+
+    If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
+    Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
+    (1, 1, 2, 2).
+
+    Note : Although tile may be used for broadcasting, it is strongly
+    recommended to use numpy's broadcasting operations and functions.
+
+    Parameters
+    ----------
+    A : array_like
+        The input array.
+    reps : array_like
+        The number of repetitions of `A` along each axis.
+
+    Returns
+    -------
+    c : ndarray
+        The tiled output array.
+
+    See Also
+    --------
+    repeat : Repeat elements of an array.
+    broadcast_to : Broadcast an array to a new shape
+
+    Examples
+    --------
+    >>> a = np.array([0, 1, 2])
+    >>> np.tile(a, 2)
+    array([0, 1, 2, 0, 1, 2])
+    >>> np.tile(a, (2, 2))
+    array([[0, 1, 2, 0, 1, 2],
+           [0, 1, 2, 0, 1, 2]])
+    >>> np.tile(a, (2, 1, 2))
+    array([[[0, 1, 2, 0, 1, 2]],
+           [[0, 1, 2, 0, 1, 2]]])
+
+    >>> b = np.array([[1, 2], [3, 4]])
+    >>> np.tile(b, 2)
+    array([[1, 2, 1, 2],
+           [3, 4, 3, 4]])
+    >>> np.tile(b, (2, 1))
+    array([[1, 2],
+           [3, 4],
+           [1, 2],
+           [3, 4]])
+
+    >>> c = np.array([1,2,3,4])
+    >>> np.tile(c,(4,1))
+    array([[1, 2, 3, 4],
+           [1, 2, 3, 4],
+           [1, 2, 3, 4],
+           [1, 2, 3, 4]])
+    """
+    try:
+        tup = tuple(reps)
+    except TypeError:
+        tup = (reps,)
+    d = len(tup)
+    if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
+        # Fixes the problem that the function does not make a copy if A is a
+        # numpy array and the repetitions are 1 in all dimensions
+        return _nx.array(A, copy=True, subok=True, ndmin=d)
+    else:
+        # Note that no copy of zero-sized arrays is made. However since they
+        # have no data there is no risk of an inadvertent overwrite.
+        c = _nx.array(A, copy=False, subok=True, ndmin=d)
+    if (d < c.ndim):
+        tup = (1,)*(c.ndim-d) + tup
+    shape_out = tuple(s*t for s, t in zip(c.shape, tup))
+    n = c.size
+    if n > 0:
+        for dim_in, nrep in zip(c.shape, tup):
+            if nrep != 1:
+                c = c.reshape(-1, n).repeat(nrep, 0)
+            n //= dim_in
+    return c.reshape(shape_out)
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/shape_base.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/shape_base.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..7cd9608b42fc6fb4f4566a823147daa99580fdcf
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/shape_base.pyi
@@ -0,0 +1,220 @@
+import sys
+from collections.abc import Callable, Sequence
+from typing import TypeVar, Any, overload, SupportsIndex, Protocol
+
+if sys.version_info >= (3, 10):
+    from typing import ParamSpec, Concatenate
+else:
+    from typing_extensions import ParamSpec, Concatenate
+
+from numpy import (
+    generic,
+    integer,
+    ufunc,
+    bool_,
+    unsignedinteger,
+    signedinteger,
+    floating,
+    complexfloating,
+    object_,
+)
+
+from numpy._typing import (
+    ArrayLike,
+    NDArray,
+    _ShapeLike,
+    _ArrayLike,
+    _ArrayLikeBool_co,
+    _ArrayLikeUInt_co,
+    _ArrayLikeInt_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+    _ArrayLikeObject_co,
+)
+
+from numpy.core.shape_base import vstack
+
+_P = ParamSpec("_P")
+_SCT = TypeVar("_SCT", bound=generic)
+
+# The signatures of `__array_wrap__` and `__array_prepare__` are the same;
+# give them unique names for the sake of clarity
+class _ArrayWrap(Protocol):
+    def __call__(
+        self,
+        array: NDArray[Any],
+        context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
+        /,
+    ) -> Any: ...
+
+class _ArrayPrepare(Protocol):
+    def __call__(
+        self,
+        array: NDArray[Any],
+        context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
+        /,
+    ) -> Any: ...
+
+class _SupportsArrayWrap(Protocol):
+    @property
+    def __array_wrap__(self) -> _ArrayWrap: ...
+
+class _SupportsArrayPrepare(Protocol):
+    @property
+    def __array_prepare__(self) -> _ArrayPrepare: ...
+
+__all__: list[str]
+
+row_stack = vstack
+
+def take_along_axis(
+    arr: _SCT | NDArray[_SCT],
+    indices: NDArray[integer[Any]],
+    axis: None | int,
+) -> NDArray[_SCT]: ...
+
+def put_along_axis(
+    arr: NDArray[_SCT],
+    indices: NDArray[integer[Any]],
+    values: ArrayLike,
+    axis: None | int,
+) -> None: ...
+
+@overload
+def apply_along_axis(
+    func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]],
+    axis: SupportsIndex,
+    arr: ArrayLike,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> NDArray[_SCT]: ...
+@overload
+def apply_along_axis(
+    func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike],
+    axis: SupportsIndex,
+    arr: ArrayLike,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> NDArray[Any]: ...
+
+def apply_over_axes(
+    func: Callable[[NDArray[Any], int], NDArray[_SCT]],
+    a: ArrayLike,
+    axes: int | Sequence[int],
+) -> NDArray[_SCT]: ...
+
+@overload
+def expand_dims(
+    a: _ArrayLike[_SCT],
+    axis: _ShapeLike,
+) -> NDArray[_SCT]: ...
+@overload
+def expand_dims(
+    a: ArrayLike,
+    axis: _ShapeLike,
+) -> NDArray[Any]: ...
+
+@overload
+def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
+@overload
+def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
+
+@overload
+def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
+@overload
+def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
+
+@overload
+def array_split(
+    ary: _ArrayLike[_SCT],
+    indices_or_sections: _ShapeLike,
+    axis: SupportsIndex = ...,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def array_split(
+    ary: ArrayLike,
+    indices_or_sections: _ShapeLike,
+    axis: SupportsIndex = ...,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def split(
+    ary: _ArrayLike[_SCT],
+    indices_or_sections: _ShapeLike,
+    axis: SupportsIndex = ...,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def split(
+    ary: ArrayLike,
+    indices_or_sections: _ShapeLike,
+    axis: SupportsIndex = ...,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def hsplit(
+    ary: _ArrayLike[_SCT],
+    indices_or_sections: _ShapeLike,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def hsplit(
+    ary: ArrayLike,
+    indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def vsplit(
+    ary: _ArrayLike[_SCT],
+    indices_or_sections: _ShapeLike,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def vsplit(
+    ary: ArrayLike,
+    indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def dsplit(
+    ary: _ArrayLike[_SCT],
+    indices_or_sections: _ShapeLike,
+) -> list[NDArray[_SCT]]: ...
+@overload
+def dsplit(
+    ary: ArrayLike,
+    indices_or_sections: _ShapeLike,
+) -> list[NDArray[Any]]: ...
+
+@overload
+def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare: ...
+@overload
+def get_array_prepare(*args: object) -> None | _ArrayPrepare: ...
+
+@overload
+def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
+@overload
+def get_array_wrap(*args: object) -> None | _ArrayWrap: ...
+
+@overload
+def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[bool_]: ...  # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ...  # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ...  # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...  # type: ignore[misc]
+@overload
+def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
+@overload
+def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
+
+@overload
+def tile(
+    A: _ArrayLike[_SCT],
+    reps: int | Sequence[int],
+) -> NDArray[_SCT]: ...
+@overload
+def tile(
+    A: ArrayLike,
+    reps: int | Sequence[int],
+) -> NDArray[Any]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.py b/.venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.py
new file mode 100644
index 0000000000000000000000000000000000000000..6794ad557a2e309e3ca7e652e0c5cc093d34e615
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.py
@@ -0,0 +1,547 @@
+"""
+Utilities that manipulate strides to achieve desirable effects.
+
+An explanation of strides can be found in the "ndarray.rst" file in the
+NumPy reference guide.
+
+"""
+import numpy as np
+from numpy.core.numeric import normalize_axis_tuple
+from numpy.core.overrides import array_function_dispatch, set_module
+
+__all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
+
+
+class DummyArray:
+    """Dummy object that just exists to hang __array_interface__ dictionaries
+    and possibly keep alive a reference to a base array.
+    """
+
+    def __init__(self, interface, base=None):
+        self.__array_interface__ = interface
+        self.base = base
+
+
+def _maybe_view_as_subclass(original_array, new_array):
+    if type(original_array) is not type(new_array):
+        # if input was an ndarray subclass and subclasses were OK,
+        # then view the result as that subclass.
+        new_array = new_array.view(type=type(original_array))
+        # Since we have done something akin to a view from original_array, we
+        # should let the subclass finalize (if it has it implemented, i.e., is
+        # not None).
+        if new_array.__array_finalize__:
+            new_array.__array_finalize__(original_array)
+    return new_array
+
+
+def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
+    """
+    Create a view into the array with the given shape and strides.
+
+    .. warning:: This function has to be used with extreme care, see notes.
+
+    Parameters
+    ----------
+    x : ndarray
+        Array to create a new.
+    shape : sequence of int, optional
+        The shape of the new array. Defaults to ``x.shape``.
+    strides : sequence of int, optional
+        The strides of the new array. Defaults to ``x.strides``.
+    subok : bool, optional
+        .. versionadded:: 1.10
+
+        If True, subclasses are preserved.
+    writeable : bool, optional
+        .. versionadded:: 1.12
+
+        If set to False, the returned array will always be readonly.
+        Otherwise it will be writable if the original array was. It
+        is advisable to set this to False if possible (see Notes).
+
+    Returns
+    -------
+    view : ndarray
+
+    See also
+    --------
+    broadcast_to : broadcast an array to a given shape.
+    reshape : reshape an array.
+    lib.stride_tricks.sliding_window_view :
+        userfriendly and safe function for the creation of sliding window views.
+
+    Notes
+    -----
+    ``as_strided`` creates a view into the array given the exact strides
+    and shape. This means it manipulates the internal data structure of
+    ndarray and, if done incorrectly, the array elements can point to
+    invalid memory and can corrupt results or crash your program.
+    It is advisable to always use the original ``x.strides`` when
+    calculating new strides to avoid reliance on a contiguous memory
+    layout.
+
+    Furthermore, arrays created with this function often contain self
+    overlapping memory, so that two elements are identical.
+    Vectorized write operations on such arrays will typically be
+    unpredictable. They may even give different results for small, large,
+    or transposed arrays.
+
+    Since writing to these arrays has to be tested and done with great
+    care, you may want to use ``writeable=False`` to avoid accidental write
+    operations.
+
+    For these reasons it is advisable to avoid ``as_strided`` when
+    possible.
+    """
+    # first convert input to array, possibly keeping subclass
+    x = np.array(x, copy=False, subok=subok)
+    interface = dict(x.__array_interface__)
+    if shape is not None:
+        interface['shape'] = tuple(shape)
+    if strides is not None:
+        interface['strides'] = tuple(strides)
+
+    array = np.asarray(DummyArray(interface, base=x))
+    # The route via `__interface__` does not preserve structured
+    # dtypes. Since dtype should remain unchanged, we set it explicitly.
+    array.dtype = x.dtype
+
+    view = _maybe_view_as_subclass(x, array)
+
+    if view.flags.writeable and not writeable:
+        view.flags.writeable = False
+
+    return view
+
+
+def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
+                                    subok=None, writeable=None):
+    return (x,)
+
+
+@array_function_dispatch(_sliding_window_view_dispatcher)
+def sliding_window_view(x, window_shape, axis=None, *,
+                        subok=False, writeable=False):
+    """
+    Create a sliding window view into the array with the given window shape.
+
+    Also known as rolling or moving window, the window slides across all
+    dimensions of the array and extracts subsets of the array at all window
+    positions.
+    
+    .. versionadded:: 1.20.0
+
+    Parameters
+    ----------
+    x : array_like
+        Array to create the sliding window view from.
+    window_shape : int or tuple of int
+        Size of window over each axis that takes part in the sliding window.
+        If `axis` is not present, must have same length as the number of input
+        array dimensions. Single integers `i` are treated as if they were the
+        tuple `(i,)`.
+    axis : int or tuple of int, optional
+        Axis or axes along which the sliding window is applied.
+        By default, the sliding window is applied to all axes and
+        `window_shape[i]` will refer to axis `i` of `x`.
+        If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
+        the axis `axis[i]` of `x`.
+        Single integers `i` are treated as if they were the tuple `(i,)`.
+    subok : bool, optional
+        If True, sub-classes will be passed-through, otherwise the returned
+        array will be forced to be a base-class array (default).
+    writeable : bool, optional
+        When true, allow writing to the returned view. The default is false,
+        as this should be used with caution: the returned view contains the
+        same memory location multiple times, so writing to one location will
+        cause others to change.
+
+    Returns
+    -------
+    view : ndarray
+        Sliding window view of the array. The sliding window dimensions are
+        inserted at the end, and the original dimensions are trimmed as
+        required by the size of the sliding window.
+        That is, ``view.shape = x_shape_trimmed + window_shape``, where
+        ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
+        than the corresponding window size.
+
+    See Also
+    --------
+    lib.stride_tricks.as_strided: A lower-level and less safe routine for
+        creating arbitrary views from custom shape and strides.
+    broadcast_to: broadcast an array to a given shape.
+
+    Notes
+    -----
+    For many applications using a sliding window view can be convenient, but
+    potentially very slow. Often specialized solutions exist, for example:
+
+    - `scipy.signal.fftconvolve`
+
+    - filtering functions in `scipy.ndimage`
+
+    - moving window functions provided by
+      `bottleneck `_.
+
+    As a rough estimate, a sliding window approach with an input size of `N`
+    and a window size of `W` will scale as `O(N*W)` where frequently a special
+    algorithm can achieve `O(N)`. That means that the sliding window variant
+    for a window size of 100 can be a 100 times slower than a more specialized
+    version.
+
+    Nevertheless, for small window sizes, when no custom algorithm exists, or
+    as a prototyping and developing tool, this function can be a good solution.
+
+    Examples
+    --------
+    >>> x = np.arange(6)
+    >>> x.shape
+    (6,)
+    >>> v = sliding_window_view(x, 3)
+    >>> v.shape
+    (4, 3)
+    >>> v
+    array([[0, 1, 2],
+           [1, 2, 3],
+           [2, 3, 4],
+           [3, 4, 5]])
+
+    This also works in more dimensions, e.g.
+
+    >>> i, j = np.ogrid[:3, :4]
+    >>> x = 10*i + j
+    >>> x.shape
+    (3, 4)
+    >>> x
+    array([[ 0,  1,  2,  3],
+           [10, 11, 12, 13],
+           [20, 21, 22, 23]])
+    >>> shape = (2,2)
+    >>> v = sliding_window_view(x, shape)
+    >>> v.shape
+    (2, 3, 2, 2)
+    >>> v
+    array([[[[ 0,  1],
+             [10, 11]],
+            [[ 1,  2],
+             [11, 12]],
+            [[ 2,  3],
+             [12, 13]]],
+           [[[10, 11],
+             [20, 21]],
+            [[11, 12],
+             [21, 22]],
+            [[12, 13],
+             [22, 23]]]])
+
+    The axis can be specified explicitly:
+
+    >>> v = sliding_window_view(x, 3, 0)
+    >>> v.shape
+    (1, 4, 3)
+    >>> v
+    array([[[ 0, 10, 20],
+            [ 1, 11, 21],
+            [ 2, 12, 22],
+            [ 3, 13, 23]]])
+
+    The same axis can be used several times. In that case, every use reduces
+    the corresponding original dimension:
+
+    >>> v = sliding_window_view(x, (2, 3), (1, 1))
+    >>> v.shape
+    (3, 1, 2, 3)
+    >>> v
+    array([[[[ 0,  1,  2],
+             [ 1,  2,  3]]],
+           [[[10, 11, 12],
+             [11, 12, 13]]],
+           [[[20, 21, 22],
+             [21, 22, 23]]]])
+
+    Combining with stepped slicing (`::step`), this can be used to take sliding
+    views which skip elements:
+
+    >>> x = np.arange(7)
+    >>> sliding_window_view(x, 5)[:, ::2]
+    array([[0, 2, 4],
+           [1, 3, 5],
+           [2, 4, 6]])
+
+    or views which move by multiple elements
+
+    >>> x = np.arange(7)
+    >>> sliding_window_view(x, 3)[::2, :]
+    array([[0, 1, 2],
+           [2, 3, 4],
+           [4, 5, 6]])
+
+    A common application of `sliding_window_view` is the calculation of running
+    statistics. The simplest example is the
+    `moving average `_:
+
+    >>> x = np.arange(6)
+    >>> x.shape
+    (6,)
+    >>> v = sliding_window_view(x, 3)
+    >>> v.shape
+    (4, 3)
+    >>> v
+    array([[0, 1, 2],
+           [1, 2, 3],
+           [2, 3, 4],
+           [3, 4, 5]])
+    >>> moving_average = v.mean(axis=-1)
+    >>> moving_average
+    array([1., 2., 3., 4.])
+
+    Note that a sliding window approach is often **not** optimal (see Notes).
+    """
+    window_shape = (tuple(window_shape)
+                    if np.iterable(window_shape)
+                    else (window_shape,))
+    # first convert input to array, possibly keeping subclass
+    x = np.array(x, copy=False, subok=subok)
+
+    window_shape_array = np.array(window_shape)
+    if np.any(window_shape_array < 0):
+        raise ValueError('`window_shape` cannot contain negative values')
+
+    if axis is None:
+        axis = tuple(range(x.ndim))
+        if len(window_shape) != len(axis):
+            raise ValueError(f'Since axis is `None`, must provide '
+                             f'window_shape for all dimensions of `x`; '
+                             f'got {len(window_shape)} window_shape elements '
+                             f'and `x.ndim` is {x.ndim}.')
+    else:
+        axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
+        if len(window_shape) != len(axis):
+            raise ValueError(f'Must provide matching length window_shape and '
+                             f'axis; got {len(window_shape)} window_shape '
+                             f'elements and {len(axis)} axes elements.')
+
+    out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
+
+    # note: same axis can be windowed repeatedly
+    x_shape_trimmed = list(x.shape)
+    for ax, dim in zip(axis, window_shape):
+        if x_shape_trimmed[ax] < dim:
+            raise ValueError(
+                'window shape cannot be larger than input array shape')
+        x_shape_trimmed[ax] -= dim - 1
+    out_shape = tuple(x_shape_trimmed) + window_shape
+    return as_strided(x, strides=out_strides, shape=out_shape,
+                      subok=subok, writeable=writeable)
+
+
+def _broadcast_to(array, shape, subok, readonly):
+    shape = tuple(shape) if np.iterable(shape) else (shape,)
+    array = np.array(array, copy=False, subok=subok)
+    if not shape and array.shape:
+        raise ValueError('cannot broadcast a non-scalar to a scalar array')
+    if any(size < 0 for size in shape):
+        raise ValueError('all elements of broadcast shape must be non-'
+                         'negative')
+    extras = []
+    it = np.nditer(
+        (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
+        op_flags=['readonly'], itershape=shape, order='C')
+    with it:
+        # never really has writebackifcopy semantics
+        broadcast = it.itviews[0]
+    result = _maybe_view_as_subclass(array, broadcast)
+    # In a future version this will go away
+    if not readonly and array.flags._writeable_no_warn:
+        result.flags.writeable = True
+        result.flags._warn_on_write = True
+    return result
+
+
+def _broadcast_to_dispatcher(array, shape, subok=None):
+    return (array,)
+
+
+@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
+def broadcast_to(array, shape, subok=False):
+    """Broadcast an array to a new shape.
+
+    Parameters
+    ----------
+    array : array_like
+        The array to broadcast.
+    shape : tuple or int
+        The shape of the desired array. A single integer ``i`` is interpreted
+        as ``(i,)``.
+    subok : bool, optional
+        If True, then sub-classes will be passed-through, otherwise
+        the returned array will be forced to be a base-class array (default).
+
+    Returns
+    -------
+    broadcast : array
+        A readonly view on the original array with the given shape. It is
+        typically not contiguous. Furthermore, more than one element of a
+        broadcasted array may refer to a single memory location.
+
+    Raises
+    ------
+    ValueError
+        If the array is not compatible with the new shape according to NumPy's
+        broadcasting rules.
+
+    See Also
+    --------
+    broadcast
+    broadcast_arrays
+    broadcast_shapes
+
+    Notes
+    -----
+    .. versionadded:: 1.10.0
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3])
+    >>> np.broadcast_to(x, (3, 3))
+    array([[1, 2, 3],
+           [1, 2, 3],
+           [1, 2, 3]])
+    """
+    return _broadcast_to(array, shape, subok=subok, readonly=True)
+
+
+def _broadcast_shape(*args):
+    """Returns the shape of the arrays that would result from broadcasting the
+    supplied arrays against each other.
+    """
+    # use the old-iterator because np.nditer does not handle size 0 arrays
+    # consistently
+    b = np.broadcast(*args[:32])
+    # unfortunately, it cannot handle 32 or more arguments directly
+    for pos in range(32, len(args), 31):
+        # ironically, np.broadcast does not properly handle np.broadcast
+        # objects (it treats them as scalars)
+        # use broadcasting to avoid allocating the full array
+        b = broadcast_to(0, b.shape)
+        b = np.broadcast(b, *args[pos:(pos + 31)])
+    return b.shape
+
+
+@set_module('numpy')
+def broadcast_shapes(*args):
+    """
+    Broadcast the input shapes into a single shape.
+
+    :ref:`Learn more about broadcasting here `.
+
+    .. versionadded:: 1.20.0
+
+    Parameters
+    ----------
+    `*args` : tuples of ints, or ints
+        The shapes to be broadcast against each other.
+
+    Returns
+    -------
+    tuple
+        Broadcasted shape.
+
+    Raises
+    ------
+    ValueError
+        If the shapes are not compatible and cannot be broadcast according
+        to NumPy's broadcasting rules.
+
+    See Also
+    --------
+    broadcast
+    broadcast_arrays
+    broadcast_to
+
+    Examples
+    --------
+    >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
+    (3, 2)
+
+    >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
+    (5, 6, 7)
+    """
+    arrays = [np.empty(x, dtype=[]) for x in args]
+    return _broadcast_shape(*arrays)
+
+
+def _broadcast_arrays_dispatcher(*args, subok=None):
+    return args
+
+
+@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
+def broadcast_arrays(*args, subok=False):
+    """
+    Broadcast any number of arrays against each other.
+
+    Parameters
+    ----------
+    `*args` : array_likes
+        The arrays to broadcast.
+
+    subok : bool, optional
+        If True, then sub-classes will be passed-through, otherwise
+        the returned arrays will be forced to be a base-class array (default).
+
+    Returns
+    -------
+    broadcasted : list of arrays
+        These arrays are views on the original arrays.  They are typically
+        not contiguous.  Furthermore, more than one element of a
+        broadcasted array may refer to a single memory location. If you need
+        to write to the arrays, make copies first. While you can set the
+        ``writable`` flag True, writing to a single output value may end up
+        changing more than one location in the output array.
+
+        .. deprecated:: 1.17
+            The output is currently marked so that if written to, a deprecation
+            warning will be emitted. A future version will set the
+            ``writable`` flag False so writing to it will raise an error.
+
+    See Also
+    --------
+    broadcast
+    broadcast_to
+    broadcast_shapes
+
+    Examples
+    --------
+    >>> x = np.array([[1,2,3]])
+    >>> y = np.array([[4],[5]])
+    >>> np.broadcast_arrays(x, y)
+    [array([[1, 2, 3],
+           [1, 2, 3]]), array([[4, 4, 4],
+           [5, 5, 5]])]
+
+    Here is a useful idiom for getting contiguous copies instead of
+    non-contiguous views.
+
+    >>> [np.array(a) for a in np.broadcast_arrays(x, y)]
+    [array([[1, 2, 3],
+           [1, 2, 3]]), array([[4, 4, 4],
+           [5, 5, 5]])]
+
+    """
+    # nditer is not used here to avoid the limit of 32 arrays.
+    # Otherwise, something like the following one-liner would suffice:
+    # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
+    #                  order='C').itviews
+
+    args = [np.array(_m, copy=False, subok=subok) for _m in args]
+
+    shape = _broadcast_shape(*args)
+
+    if all(array.shape == shape for array in args):
+        # Common case where nothing needs to be broadcasted.
+        return args
+
+    return [_broadcast_to(array, shape, subok=subok, readonly=False)
+            for array in args]
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..4c9a98e85f7849ad262ca9e8a3d43f548234d8bb
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.pyi
@@ -0,0 +1,80 @@
+from collections.abc import Iterable
+from typing import Any, TypeVar, overload, SupportsIndex
+
+from numpy import generic
+from numpy._typing import (
+    NDArray,
+    ArrayLike,
+    _ShapeLike,
+    _Shape,
+    _ArrayLike
+)
+
+_SCT = TypeVar("_SCT", bound=generic)
+
+__all__: list[str]
+
+class DummyArray:
+    __array_interface__: dict[str, Any]
+    base: None | NDArray[Any]
+    def __init__(
+        self,
+        interface: dict[str, Any],
+        base: None | NDArray[Any] = ...,
+    ) -> None: ...
+
+@overload
+def as_strided(
+    x: _ArrayLike[_SCT],
+    shape: None | Iterable[int] = ...,
+    strides: None | Iterable[int] = ...,
+    subok: bool = ...,
+    writeable: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def as_strided(
+    x: ArrayLike,
+    shape: None | Iterable[int] = ...,
+    strides: None | Iterable[int] = ...,
+    subok: bool = ...,
+    writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def sliding_window_view(
+    x: _ArrayLike[_SCT],
+    window_shape: int | Iterable[int],
+    axis: None | SupportsIndex = ...,
+    *,
+    subok: bool = ...,
+    writeable: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def sliding_window_view(
+    x: ArrayLike,
+    window_shape: int | Iterable[int],
+    axis: None | SupportsIndex = ...,
+    *,
+    subok: bool = ...,
+    writeable: bool = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def broadcast_to(
+    array: _ArrayLike[_SCT],
+    shape: int | Iterable[int],
+    subok: bool = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def broadcast_to(
+    array: ArrayLike,
+    shape: int | Iterable[int],
+    subok: bool = ...,
+) -> NDArray[Any]: ...
+
+def broadcast_shapes(*args: _ShapeLike) -> _Shape: ...
+
+def broadcast_arrays(
+    *args: ArrayLike,
+    subok: bool = ...,
+) -> list[NDArray[Any]]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/twodim_base.py b/.venv/lib/python3.11/site-packages/numpy/lib/twodim_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..6dcb656519342e516a6f1be0bf283bb1f326214f
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/twodim_base.py
@@ -0,0 +1,1183 @@
+""" Basic functions for manipulating 2d arrays
+
+"""
+import functools
+import operator
+
+from numpy.core.numeric import (
+    asanyarray, arange, zeros, greater_equal, multiply, ones,
+    asarray, where, int8, int16, int32, int64, intp, empty, promote_types,
+    diagonal, nonzero, indices
+    )
+from numpy.core.overrides import set_array_function_like_doc, set_module
+from numpy.core import overrides
+from numpy.core import iinfo
+from numpy.lib.stride_tricks import broadcast_to
+
+
+__all__ = [
+    'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
+    'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
+    'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
+
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+i1 = iinfo(int8)
+i2 = iinfo(int16)
+i4 = iinfo(int32)
+
+
+def _min_int(low, high):
+    """ get small int that fits the range """
+    if high <= i1.max and low >= i1.min:
+        return int8
+    if high <= i2.max and low >= i2.min:
+        return int16
+    if high <= i4.max and low >= i4.min:
+        return int32
+    return int64
+
+
+def _flip_dispatcher(m):
+    return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
+def fliplr(m):
+    """
+    Reverse the order of elements along axis 1 (left/right).
+
+    For a 2-D array, this flips the entries in each row in the left/right
+    direction. Columns are preserved, but appear in a different order than
+    before.
+
+    Parameters
+    ----------
+    m : array_like
+        Input array, must be at least 2-D.
+
+    Returns
+    -------
+    f : ndarray
+        A view of `m` with the columns reversed.  Since a view
+        is returned, this operation is :math:`\\mathcal O(1)`.
+
+    See Also
+    --------
+    flipud : Flip array in the up/down direction.
+    flip : Flip array in one or more dimensions.
+    rot90 : Rotate array counterclockwise.
+
+    Notes
+    -----
+    Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``.
+    Requires the array to be at least 2-D.
+
+    Examples
+    --------
+    >>> A = np.diag([1.,2.,3.])
+    >>> A
+    array([[1.,  0.,  0.],
+           [0.,  2.,  0.],
+           [0.,  0.,  3.]])
+    >>> np.fliplr(A)
+    array([[0.,  0.,  1.],
+           [0.,  2.,  0.],
+           [3.,  0.,  0.]])
+
+    >>> A = np.random.randn(2,3,5)
+    >>> np.all(np.fliplr(A) == A[:,::-1,...])
+    True
+
+    """
+    m = asanyarray(m)
+    if m.ndim < 2:
+        raise ValueError("Input must be >= 2-d.")
+    return m[:, ::-1]
+
+
+@array_function_dispatch(_flip_dispatcher)
+def flipud(m):
+    """
+    Reverse the order of elements along axis 0 (up/down).
+
+    For a 2-D array, this flips the entries in each column in the up/down
+    direction. Rows are preserved, but appear in a different order than before.
+
+    Parameters
+    ----------
+    m : array_like
+        Input array.
+
+    Returns
+    -------
+    out : array_like
+        A view of `m` with the rows reversed.  Since a view is
+        returned, this operation is :math:`\\mathcal O(1)`.
+
+    See Also
+    --------
+    fliplr : Flip array in the left/right direction.
+    flip : Flip array in one or more dimensions.
+    rot90 : Rotate array counterclockwise.
+
+    Notes
+    -----
+    Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``.
+    Requires the array to be at least 1-D.
+
+    Examples
+    --------
+    >>> A = np.diag([1.0, 2, 3])
+    >>> A
+    array([[1.,  0.,  0.],
+           [0.,  2.,  0.],
+           [0.,  0.,  3.]])
+    >>> np.flipud(A)
+    array([[0.,  0.,  3.],
+           [0.,  2.,  0.],
+           [1.,  0.,  0.]])
+
+    >>> A = np.random.randn(2,3,5)
+    >>> np.all(np.flipud(A) == A[::-1,...])
+    True
+
+    >>> np.flipud([1,2])
+    array([2, 1])
+
+    """
+    m = asanyarray(m)
+    if m.ndim < 1:
+        raise ValueError("Input must be >= 1-d.")
+    return m[::-1, ...]
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
+    """
+    Return a 2-D array with ones on the diagonal and zeros elsewhere.
+
+    Parameters
+    ----------
+    N : int
+      Number of rows in the output.
+    M : int, optional
+      Number of columns in the output. If None, defaults to `N`.
+    k : int, optional
+      Index of the diagonal: 0 (the default) refers to the main diagonal,
+      a positive value refers to an upper diagonal, and a negative value
+      to a lower diagonal.
+    dtype : data-type, optional
+      Data-type of the returned array.
+    order : {'C', 'F'}, optional
+        Whether the output should be stored in row-major (C-style) or
+        column-major (Fortran-style) order in memory.
+
+        .. versionadded:: 1.14.0
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    I : ndarray of shape (N,M)
+      An array where all elements are equal to zero, except for the `k`-th
+      diagonal, whose values are equal to one.
+
+    See Also
+    --------
+    identity : (almost) equivalent function
+    diag : diagonal 2-D array from a 1-D array specified by the user.
+
+    Examples
+    --------
+    >>> np.eye(2, dtype=int)
+    array([[1, 0],
+           [0, 1]])
+    >>> np.eye(3, k=1)
+    array([[0.,  1.,  0.],
+           [0.,  0.,  1.],
+           [0.,  0.,  0.]])
+
+    """
+    if like is not None:
+        return _eye_with_like(like, N, M=M, k=k, dtype=dtype, order=order)
+    if M is None:
+        M = N
+    m = zeros((N, M), dtype=dtype, order=order)
+    if k >= M:
+        return m
+    # Ensure M and k are integers, so we don't get any surprise casting
+    # results in the expressions `M-k` and `M+1` used below.  This avoids
+    # a problem with inputs with type (for example) np.uint64.
+    M = operator.index(M)
+    k = operator.index(k)
+    if k >= 0:
+        i = k
+    else:
+        i = (-k) * M
+    m[:M-k].flat[i::M+1] = 1
+    return m
+
+
+_eye_with_like = array_function_dispatch()(eye)
+
+
+def _diag_dispatcher(v, k=None):
+    return (v,)
+
+
+@array_function_dispatch(_diag_dispatcher)
+def diag(v, k=0):
+    """
+    Extract a diagonal or construct a diagonal array.
+
+    See the more detailed documentation for ``numpy.diagonal`` if you use this
+    function to extract a diagonal and wish to write to the resulting array;
+    whether it returns a copy or a view depends on what version of numpy you
+    are using.
+
+    Parameters
+    ----------
+    v : array_like
+        If `v` is a 2-D array, return a copy of its `k`-th diagonal.
+        If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
+        diagonal.
+    k : int, optional
+        Diagonal in question. The default is 0. Use `k>0` for diagonals
+        above the main diagonal, and `k<0` for diagonals below the main
+        diagonal.
+
+    Returns
+    -------
+    out : ndarray
+        The extracted diagonal or constructed diagonal array.
+
+    See Also
+    --------
+    diagonal : Return specified diagonals.
+    diagflat : Create a 2-D array with the flattened input as a diagonal.
+    trace : Sum along diagonals.
+    triu : Upper triangle of an array.
+    tril : Lower triangle of an array.
+
+    Examples
+    --------
+    >>> x = np.arange(9).reshape((3,3))
+    >>> x
+    array([[0, 1, 2],
+           [3, 4, 5],
+           [6, 7, 8]])
+
+    >>> np.diag(x)
+    array([0, 4, 8])
+    >>> np.diag(x, k=1)
+    array([1, 5])
+    >>> np.diag(x, k=-1)
+    array([3, 7])
+
+    >>> np.diag(np.diag(x))
+    array([[0, 0, 0],
+           [0, 4, 0],
+           [0, 0, 8]])
+
+    """
+    v = asanyarray(v)
+    s = v.shape
+    if len(s) == 1:
+        n = s[0]+abs(k)
+        res = zeros((n, n), v.dtype)
+        if k >= 0:
+            i = k
+        else:
+            i = (-k) * n
+        res[:n-k].flat[i::n+1] = v
+        return res
+    elif len(s) == 2:
+        return diagonal(v, k)
+    else:
+        raise ValueError("Input must be 1- or 2-d.")
+
+
+@array_function_dispatch(_diag_dispatcher)
+def diagflat(v, k=0):
+    """
+    Create a two-dimensional array with the flattened input as a diagonal.
+
+    Parameters
+    ----------
+    v : array_like
+        Input data, which is flattened and set as the `k`-th
+        diagonal of the output.
+    k : int, optional
+        Diagonal to set; 0, the default, corresponds to the "main" diagonal,
+        a positive (negative) `k` giving the number of the diagonal above
+        (below) the main.
+
+    Returns
+    -------
+    out : ndarray
+        The 2-D output array.
+
+    See Also
+    --------
+    diag : MATLAB work-alike for 1-D and 2-D arrays.
+    diagonal : Return specified diagonals.
+    trace : Sum along diagonals.
+
+    Examples
+    --------
+    >>> np.diagflat([[1,2], [3,4]])
+    array([[1, 0, 0, 0],
+           [0, 2, 0, 0],
+           [0, 0, 3, 0],
+           [0, 0, 0, 4]])
+
+    >>> np.diagflat([1,2], 1)
+    array([[0, 1, 0],
+           [0, 0, 2],
+           [0, 0, 0]])
+
+    """
+    try:
+        wrap = v.__array_wrap__
+    except AttributeError:
+        wrap = None
+    v = asarray(v).ravel()
+    s = len(v)
+    n = s + abs(k)
+    res = zeros((n, n), v.dtype)
+    if (k >= 0):
+        i = arange(0, n-k, dtype=intp)
+        fi = i+k+i*n
+    else:
+        i = arange(0, n+k, dtype=intp)
+        fi = i+(i-k)*n
+    res.flat[fi] = v
+    if not wrap:
+        return res
+    return wrap(res)
+
+
+@set_array_function_like_doc
+@set_module('numpy')
+def tri(N, M=None, k=0, dtype=float, *, like=None):
+    """
+    An array with ones at and below the given diagonal and zeros elsewhere.
+
+    Parameters
+    ----------
+    N : int
+        Number of rows in the array.
+    M : int, optional
+        Number of columns in the array.
+        By default, `M` is taken equal to `N`.
+    k : int, optional
+        The sub-diagonal at and below which the array is filled.
+        `k` = 0 is the main diagonal, while `k` < 0 is below it,
+        and `k` > 0 is above.  The default is 0.
+    dtype : dtype, optional
+        Data type of the returned array.  The default is float.
+    ${ARRAY_FUNCTION_LIKE}
+
+        .. versionadded:: 1.20.0
+
+    Returns
+    -------
+    tri : ndarray of shape (N, M)
+        Array with its lower triangle filled with ones and zero elsewhere;
+        in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
+
+    Examples
+    --------
+    >>> np.tri(3, 5, 2, dtype=int)
+    array([[1, 1, 1, 0, 0],
+           [1, 1, 1, 1, 0],
+           [1, 1, 1, 1, 1]])
+
+    >>> np.tri(3, 5, -1)
+    array([[0.,  0.,  0.,  0.,  0.],
+           [1.,  0.,  0.,  0.,  0.],
+           [1.,  1.,  0.,  0.,  0.]])
+
+    """
+    if like is not None:
+        return _tri_with_like(like, N, M=M, k=k, dtype=dtype)
+
+    if M is None:
+        M = N
+
+    m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
+                            arange(-k, M-k, dtype=_min_int(-k, M - k)))
+
+    # Avoid making a copy if the requested type is already bool
+    m = m.astype(dtype, copy=False)
+
+    return m
+
+
+_tri_with_like = array_function_dispatch()(tri)
+
+
+def _trilu_dispatcher(m, k=None):
+    return (m,)
+
+
+@array_function_dispatch(_trilu_dispatcher)
+def tril(m, k=0):
+    """
+    Lower triangle of an array.
+
+    Return a copy of an array with elements above the `k`-th diagonal zeroed.
+    For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two
+    axes.
+
+    Parameters
+    ----------
+    m : array_like, shape (..., M, N)
+        Input array.
+    k : int, optional
+        Diagonal above which to zero elements.  `k = 0` (the default) is the
+        main diagonal, `k < 0` is below it and `k > 0` is above.
+
+    Returns
+    -------
+    tril : ndarray, shape (..., M, N)
+        Lower triangle of `m`, of same shape and data-type as `m`.
+
+    See Also
+    --------
+    triu : same thing, only for the upper triangle
+
+    Examples
+    --------
+    >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+    array([[ 0,  0,  0],
+           [ 4,  0,  0],
+           [ 7,  8,  0],
+           [10, 11, 12]])
+
+    >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5))
+    array([[[ 0,  0,  0,  0,  0],
+            [ 5,  6,  0,  0,  0],
+            [10, 11, 12,  0,  0],
+            [15, 16, 17, 18,  0]],
+           [[20,  0,  0,  0,  0],
+            [25, 26,  0,  0,  0],
+            [30, 31, 32,  0,  0],
+            [35, 36, 37, 38,  0]],
+           [[40,  0,  0,  0,  0],
+            [45, 46,  0,  0,  0],
+            [50, 51, 52,  0,  0],
+            [55, 56, 57, 58,  0]]])
+
+    """
+    m = asanyarray(m)
+    mask = tri(*m.shape[-2:], k=k, dtype=bool)
+
+    return where(mask, m, zeros(1, m.dtype))
+
+
+@array_function_dispatch(_trilu_dispatcher)
+def triu(m, k=0):
+    """
+    Upper triangle of an array.
+
+    Return a copy of an array with the elements below the `k`-th diagonal
+    zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
+    final two axes.
+
+    Please refer to the documentation for `tril` for further details.
+
+    See Also
+    --------
+    tril : lower triangle of an array
+
+    Examples
+    --------
+    >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
+    array([[ 1,  2,  3],
+           [ 4,  5,  6],
+           [ 0,  8,  9],
+           [ 0,  0, 12]])
+
+    >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
+    array([[[ 0,  1,  2,  3,  4],
+            [ 0,  6,  7,  8,  9],
+            [ 0,  0, 12, 13, 14],
+            [ 0,  0,  0, 18, 19]],
+           [[20, 21, 22, 23, 24],
+            [ 0, 26, 27, 28, 29],
+            [ 0,  0, 32, 33, 34],
+            [ 0,  0,  0, 38, 39]],
+           [[40, 41, 42, 43, 44],
+            [ 0, 46, 47, 48, 49],
+            [ 0,  0, 52, 53, 54],
+            [ 0,  0,  0, 58, 59]]])
+
+    """
+    m = asanyarray(m)
+    mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
+
+    return where(mask, zeros(1, m.dtype), m)
+
+
+def _vander_dispatcher(x, N=None, increasing=None):
+    return (x,)
+
+
+# Originally borrowed from John Hunter and matplotlib
+@array_function_dispatch(_vander_dispatcher)
+def vander(x, N=None, increasing=False):
+    """
+    Generate a Vandermonde matrix.
+
+    The columns of the output matrix are powers of the input vector. The
+    order of the powers is determined by the `increasing` boolean argument.
+    Specifically, when `increasing` is False, the `i`-th output column is
+    the input vector raised element-wise to the power of ``N - i - 1``. Such
+    a matrix with a geometric progression in each row is named for Alexandre-
+    Theophile Vandermonde.
+
+    Parameters
+    ----------
+    x : array_like
+        1-D input array.
+    N : int, optional
+        Number of columns in the output.  If `N` is not specified, a square
+        array is returned (``N = len(x)``).
+    increasing : bool, optional
+        Order of the powers of the columns.  If True, the powers increase
+        from left to right, if False (the default) they are reversed.
+
+        .. versionadded:: 1.9.0
+
+    Returns
+    -------
+    out : ndarray
+        Vandermonde matrix.  If `increasing` is False, the first column is
+        ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
+        True, the columns are ``x^0, x^1, ..., x^(N-1)``.
+
+    See Also
+    --------
+    polynomial.polynomial.polyvander
+
+    Examples
+    --------
+    >>> x = np.array([1, 2, 3, 5])
+    >>> N = 3
+    >>> np.vander(x, N)
+    array([[ 1,  1,  1],
+           [ 4,  2,  1],
+           [ 9,  3,  1],
+           [25,  5,  1]])
+
+    >>> np.column_stack([x**(N-1-i) for i in range(N)])
+    array([[ 1,  1,  1],
+           [ 4,  2,  1],
+           [ 9,  3,  1],
+           [25,  5,  1]])
+
+    >>> x = np.array([1, 2, 3, 5])
+    >>> np.vander(x)
+    array([[  1,   1,   1,   1],
+           [  8,   4,   2,   1],
+           [ 27,   9,   3,   1],
+           [125,  25,   5,   1]])
+    >>> np.vander(x, increasing=True)
+    array([[  1,   1,   1,   1],
+           [  1,   2,   4,   8],
+           [  1,   3,   9,  27],
+           [  1,   5,  25, 125]])
+
+    The determinant of a square Vandermonde matrix is the product
+    of the differences between the values of the input vector:
+
+    >>> np.linalg.det(np.vander(x))
+    48.000000000000043 # may vary
+    >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
+    48
+
+    """
+    x = asarray(x)
+    if x.ndim != 1:
+        raise ValueError("x must be a one-dimensional array or sequence.")
+    if N is None:
+        N = len(x)
+
+    v = empty((len(x), N), dtype=promote_types(x.dtype, int))
+    tmp = v[:, ::-1] if not increasing else v
+
+    if N > 0:
+        tmp[:, 0] = 1
+    if N > 1:
+        tmp[:, 1:] = x[:, None]
+        multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
+
+    return v
+
+
+def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None,
+                            weights=None):
+    yield x
+    yield y
+
+    # This terrible logic is adapted from the checks in histogram2d
+    try:
+        N = len(bins)
+    except TypeError:
+        N = 1
+    if N == 2:
+        yield from bins  # bins=[x, y]
+    else:
+        yield bins
+
+    yield weights
+
+
+@array_function_dispatch(_histogram2d_dispatcher)
+def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
+    """
+    Compute the bi-dimensional histogram of two data samples.
+
+    Parameters
+    ----------
+    x : array_like, shape (N,)
+        An array containing the x coordinates of the points to be
+        histogrammed.
+    y : array_like, shape (N,)
+        An array containing the y coordinates of the points to be
+        histogrammed.
+    bins : int or array_like or [int, int] or [array, array], optional
+        The bin specification:
+
+          * If int, the number of bins for the two dimensions (nx=ny=bins).
+          * If array_like, the bin edges for the two dimensions
+            (x_edges=y_edges=bins).
+          * If [int, int], the number of bins in each dimension
+            (nx, ny = bins).
+          * If [array, array], the bin edges in each dimension
+            (x_edges, y_edges = bins).
+          * A combination [int, array] or [array, int], where int
+            is the number of bins and array is the bin edges.
+
+    range : array_like, shape(2,2), optional
+        The leftmost and rightmost edges of the bins along each dimension
+        (if not specified explicitly in the `bins` parameters):
+        ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
+        will be considered outliers and not tallied in the histogram.
+    density : bool, optional
+        If False, the default, returns the number of samples in each bin.
+        If True, returns the probability *density* function at the bin,
+        ``bin_count / sample_count / bin_area``.
+    weights : array_like, shape(N,), optional
+        An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
+        Weights are normalized to 1 if `density` is True. If `density` is
+        False, the values of the returned histogram are equal to the sum of
+        the weights belonging to the samples falling into each bin.
+
+    Returns
+    -------
+    H : ndarray, shape(nx, ny)
+        The bi-dimensional histogram of samples `x` and `y`. Values in `x`
+        are histogrammed along the first dimension and values in `y` are
+        histogrammed along the second dimension.
+    xedges : ndarray, shape(nx+1,)
+        The bin edges along the first dimension.
+    yedges : ndarray, shape(ny+1,)
+        The bin edges along the second dimension.
+
+    See Also
+    --------
+    histogram : 1D histogram
+    histogramdd : Multidimensional histogram
+
+    Notes
+    -----
+    When `density` is True, then the returned histogram is the sample
+    density, defined such that the sum over bins of the product
+    ``bin_value * bin_area`` is 1.
+
+    Please note that the histogram does not follow the Cartesian convention
+    where `x` values are on the abscissa and `y` values on the ordinate
+    axis.  Rather, `x` is histogrammed along the first dimension of the
+    array (vertical), and `y` along the second dimension of the array
+    (horizontal).  This ensures compatibility with `histogramdd`.
+
+    Examples
+    --------
+    >>> from matplotlib.image import NonUniformImage
+    >>> import matplotlib.pyplot as plt
+
+    Construct a 2-D histogram with variable bin width. First define the bin
+    edges:
+
+    >>> xedges = [0, 1, 3, 5]
+    >>> yedges = [0, 2, 3, 4, 6]
+
+    Next we create a histogram H with random bin content:
+
+    >>> x = np.random.normal(2, 1, 100)
+    >>> y = np.random.normal(1, 1, 100)
+    >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
+    >>> # Histogram does not follow Cartesian convention (see Notes),
+    >>> # therefore transpose H for visualization purposes.
+    >>> H = H.T
+
+    :func:`imshow ` can only display square bins:
+
+    >>> fig = plt.figure(figsize=(7, 3))
+    >>> ax = fig.add_subplot(131, title='imshow: square bins')
+    >>> plt.imshow(H, interpolation='nearest', origin='lower',
+    ...         extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
+    
+
+    :func:`pcolormesh ` can display actual edges:
+
+    >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
+    ...         aspect='equal')
+    >>> X, Y = np.meshgrid(xedges, yedges)
+    >>> ax.pcolormesh(X, Y, H)
+    
+
+    :class:`NonUniformImage ` can be used to
+    display actual bin edges with interpolation:
+
+    >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
+    ...         aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
+    >>> im = NonUniformImage(ax, interpolation='bilinear')
+    >>> xcenters = (xedges[:-1] + xedges[1:]) / 2
+    >>> ycenters = (yedges[:-1] + yedges[1:]) / 2
+    >>> im.set_data(xcenters, ycenters, H)
+    >>> ax.add_image(im)
+    >>> plt.show()
+
+    It is also possible to construct a 2-D histogram without specifying bin
+    edges:
+
+    >>> # Generate non-symmetric test data
+    >>> n = 10000
+    >>> x = np.linspace(1, 100, n)
+    >>> y = 2*np.log(x) + np.random.rand(n) - 0.5
+    >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges
+    >>> H, yedges, xedges = np.histogram2d(y, x, bins=20)
+
+    Now we can plot the histogram using
+    :func:`pcolormesh `, and a
+    :func:`hexbin ` for comparison.
+
+    >>> # Plot histogram using pcolormesh
+    >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
+    >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')
+    >>> ax1.plot(x, 2*np.log(x), 'k-')
+    >>> ax1.set_xlim(x.min(), x.max())
+    >>> ax1.set_ylim(y.min(), y.max())
+    >>> ax1.set_xlabel('x')
+    >>> ax1.set_ylabel('y')
+    >>> ax1.set_title('histogram2d')
+    >>> ax1.grid()
+
+    >>> # Create hexbin plot for comparison
+    >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')
+    >>> ax2.plot(x, 2*np.log(x), 'k-')
+    >>> ax2.set_title('hexbin')
+    >>> ax2.set_xlim(x.min(), x.max())
+    >>> ax2.set_xlabel('x')
+    >>> ax2.grid()
+
+    >>> plt.show()
+    """
+    from numpy import histogramdd
+
+    if len(x) != len(y):
+        raise ValueError('x and y must have the same length.')
+
+    try:
+        N = len(bins)
+    except TypeError:
+        N = 1
+
+    if N != 1 and N != 2:
+        xedges = yedges = asarray(bins)
+        bins = [xedges, yedges]
+    hist, edges = histogramdd([x, y], bins, range, density, weights)
+    return hist, edges[0], edges[1]
+
+
+@set_module('numpy')
+def mask_indices(n, mask_func, k=0):
+    """
+    Return the indices to access (n, n) arrays, given a masking function.
+
+    Assume `mask_func` is a function that, for a square array a of size
+    ``(n, n)`` with a possible offset argument `k`, when called as
+    ``mask_func(a, k)`` returns a new array with zeros in certain locations
+    (functions like `triu` or `tril` do precisely this). Then this function
+    returns the indices where the non-zero values would be located.
+
+    Parameters
+    ----------
+    n : int
+        The returned indices will be valid to access arrays of shape (n, n).
+    mask_func : callable
+        A function whose call signature is similar to that of `triu`, `tril`.
+        That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
+        `k` is an optional argument to the function.
+    k : scalar
+        An optional argument which is passed through to `mask_func`. Functions
+        like `triu`, `tril` take a second argument that is interpreted as an
+        offset.
+
+    Returns
+    -------
+    indices : tuple of arrays.
+        The `n` arrays of indices corresponding to the locations where
+        ``mask_func(np.ones((n, n)), k)`` is True.
+
+    See Also
+    --------
+    triu, tril, triu_indices, tril_indices
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    Examples
+    --------
+    These are the indices that would allow you to access the upper triangular
+    part of any 3x3 array:
+
+    >>> iu = np.mask_indices(3, np.triu)
+
+    For example, if `a` is a 3x3 array:
+
+    >>> a = np.arange(9).reshape(3, 3)
+    >>> a
+    array([[0, 1, 2],
+           [3, 4, 5],
+           [6, 7, 8]])
+    >>> a[iu]
+    array([0, 1, 2, 4, 5, 8])
+
+    An offset can be passed also to the masking function.  This gets us the
+    indices starting on the first diagonal right of the main one:
+
+    >>> iu1 = np.mask_indices(3, np.triu, 1)
+
+    with which we now extract only three elements:
+
+    >>> a[iu1]
+    array([1, 2, 5])
+
+    """
+    m = ones((n, n), int)
+    a = mask_func(m, k)
+    return nonzero(a != 0)
+
+
+@set_module('numpy')
+def tril_indices(n, k=0, m=None):
+    """
+    Return the indices for the lower-triangle of an (n, m) array.
+
+    Parameters
+    ----------
+    n : int
+        The row dimension of the arrays for which the returned
+        indices will be valid.
+    k : int, optional
+        Diagonal offset (see `tril` for details).
+    m : int, optional
+        .. versionadded:: 1.9.0
+
+        The column dimension of the arrays for which the returned
+        arrays will be valid.
+        By default `m` is taken equal to `n`.
+
+
+    Returns
+    -------
+    inds : tuple of arrays
+        The indices for the triangle. The returned tuple contains two arrays,
+        each with the indices along one dimension of the array.
+
+    See also
+    --------
+    triu_indices : similar function, for upper-triangular.
+    mask_indices : generic function accepting an arbitrary mask function.
+    tril, triu
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    Examples
+    --------
+    Compute two different sets of indices to access 4x4 arrays, one for the
+    lower triangular part starting at the main diagonal, and one starting two
+    diagonals further right:
+
+    >>> il1 = np.tril_indices(4)
+    >>> il2 = np.tril_indices(4, 2)
+
+    Here is how they can be used with a sample array:
+
+    >>> a = np.arange(16).reshape(4, 4)
+    >>> a
+    array([[ 0,  1,  2,  3],
+           [ 4,  5,  6,  7],
+           [ 8,  9, 10, 11],
+           [12, 13, 14, 15]])
+
+    Both for indexing:
+
+    >>> a[il1]
+    array([ 0,  4,  5, ..., 13, 14, 15])
+
+    And for assigning values:
+
+    >>> a[il1] = -1
+    >>> a
+    array([[-1,  1,  2,  3],
+           [-1, -1,  6,  7],
+           [-1, -1, -1, 11],
+           [-1, -1, -1, -1]])
+
+    These cover almost the whole array (two diagonals right of the main one):
+
+    >>> a[il2] = -10
+    >>> a
+    array([[-10, -10, -10,   3],
+           [-10, -10, -10, -10],
+           [-10, -10, -10, -10],
+           [-10, -10, -10, -10]])
+
+    """
+    tri_ = tri(n, m, k=k, dtype=bool)
+
+    return tuple(broadcast_to(inds, tri_.shape)[tri_]
+                 for inds in indices(tri_.shape, sparse=True))
+
+
+def _trilu_indices_form_dispatcher(arr, k=None):
+    return (arr,)
+
+
+@array_function_dispatch(_trilu_indices_form_dispatcher)
+def tril_indices_from(arr, k=0):
+    """
+    Return the indices for the lower-triangle of arr.
+
+    See `tril_indices` for full details.
+
+    Parameters
+    ----------
+    arr : array_like
+        The indices will be valid for square arrays whose dimensions are
+        the same as arr.
+    k : int, optional
+        Diagonal offset (see `tril` for details).
+
+    Examples
+    --------
+
+    Create a 4 by 4 array.
+
+    >>> a = np.arange(16).reshape(4, 4)
+    >>> a
+    array([[ 0,  1,  2,  3],
+           [ 4,  5,  6,  7],
+           [ 8,  9, 10, 11],
+           [12, 13, 14, 15]])
+
+    Pass the array to get the indices of the lower triangular elements.
+
+    >>> trili = np.tril_indices_from(a)
+    >>> trili
+    (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+    >>> a[trili]
+    array([ 0,  4,  5,  8,  9, 10, 12, 13, 14, 15])
+
+    This is syntactic sugar for tril_indices().
+
+    >>> np.tril_indices(a.shape[0])
+    (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
+
+    Use the `k` parameter to return the indices for the lower triangular array
+    up to the k-th diagonal.
+
+    >>> trili1 = np.tril_indices_from(a, k=1)
+    >>> a[trili1]
+    array([ 0,  1,  4,  5,  6,  8,  9, 10, 11, 12, 13, 14, 15])
+
+    See Also
+    --------
+    tril_indices, tril, triu_indices_from
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    """
+    if arr.ndim != 2:
+        raise ValueError("input array must be 2-d")
+    return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
+
+
+@set_module('numpy')
+def triu_indices(n, k=0, m=None):
+    """
+    Return the indices for the upper-triangle of an (n, m) array.
+
+    Parameters
+    ----------
+    n : int
+        The size of the arrays for which the returned indices will
+        be valid.
+    k : int, optional
+        Diagonal offset (see `triu` for details).
+    m : int, optional
+        .. versionadded:: 1.9.0
+
+        The column dimension of the arrays for which the returned
+        arrays will be valid.
+        By default `m` is taken equal to `n`.
+
+
+    Returns
+    -------
+    inds : tuple, shape(2) of ndarrays, shape(`n`)
+        The indices for the triangle. The returned tuple contains two arrays,
+        each with the indices along one dimension of the array.  Can be used
+        to slice a ndarray of shape(`n`, `n`).
+
+    See also
+    --------
+    tril_indices : similar function, for lower-triangular.
+    mask_indices : generic function accepting an arbitrary mask function.
+    triu, tril
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    Examples
+    --------
+    Compute two different sets of indices to access 4x4 arrays, one for the
+    upper triangular part starting at the main diagonal, and one starting two
+    diagonals further right:
+
+    >>> iu1 = np.triu_indices(4)
+    >>> iu2 = np.triu_indices(4, 2)
+
+    Here is how they can be used with a sample array:
+
+    >>> a = np.arange(16).reshape(4, 4)
+    >>> a
+    array([[ 0,  1,  2,  3],
+           [ 4,  5,  6,  7],
+           [ 8,  9, 10, 11],
+           [12, 13, 14, 15]])
+
+    Both for indexing:
+
+    >>> a[iu1]
+    array([ 0,  1,  2, ..., 10, 11, 15])
+
+    And for assigning values:
+
+    >>> a[iu1] = -1
+    >>> a
+    array([[-1, -1, -1, -1],
+           [ 4, -1, -1, -1],
+           [ 8,  9, -1, -1],
+           [12, 13, 14, -1]])
+
+    These cover only a small part of the whole array (two diagonals right
+    of the main one):
+
+    >>> a[iu2] = -10
+    >>> a
+    array([[ -1,  -1, -10, -10],
+           [  4,  -1,  -1, -10],
+           [  8,   9,  -1,  -1],
+           [ 12,  13,  14,  -1]])
+
+    """
+    tri_ = ~tri(n, m, k=k - 1, dtype=bool)
+
+    return tuple(broadcast_to(inds, tri_.shape)[tri_]
+                 for inds in indices(tri_.shape, sparse=True))
+
+
+@array_function_dispatch(_trilu_indices_form_dispatcher)
+def triu_indices_from(arr, k=0):
+    """
+    Return the indices for the upper-triangle of arr.
+
+    See `triu_indices` for full details.
+
+    Parameters
+    ----------
+    arr : ndarray, shape(N, N)
+        The indices will be valid for square arrays.
+    k : int, optional
+        Diagonal offset (see `triu` for details).
+
+    Returns
+    -------
+    triu_indices_from : tuple, shape(2) of ndarray, shape(N)
+        Indices for the upper-triangle of `arr`.
+
+    Examples
+    --------
+
+    Create a 4 by 4 array.
+
+    >>> a = np.arange(16).reshape(4, 4)
+    >>> a
+    array([[ 0,  1,  2,  3],
+           [ 4,  5,  6,  7],
+           [ 8,  9, 10, 11],
+           [12, 13, 14, 15]])
+
+    Pass the array to get the indices of the upper triangular elements.
+
+    >>> triui = np.triu_indices_from(a)
+    >>> triui
+    (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+    >>> a[triui]
+    array([ 0,  1,  2,  3,  5,  6,  7, 10, 11, 15])
+
+    This is syntactic sugar for triu_indices().
+
+    >>> np.triu_indices(a.shape[0])
+    (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
+
+    Use the `k` parameter to return the indices for the upper triangular array
+    from the k-th diagonal.
+
+    >>> triuim1 = np.triu_indices_from(a, k=1)
+    >>> a[triuim1]
+    array([ 1,  2,  3,  6,  7, 11])
+
+
+    See Also
+    --------
+    triu_indices, triu, tril_indices_from
+
+    Notes
+    -----
+    .. versionadded:: 1.4.0
+
+    """
+    if arr.ndim != 2:
+        raise ValueError("input array must be 2-d")
+    return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/twodim_base.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/twodim_base.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..1b3b94bd5cba589f3d16c7e9a66ab261f0bd97cf
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/twodim_base.pyi
@@ -0,0 +1,239 @@
+from collections.abc import Callable, Sequence
+from typing import (
+    Any,
+    overload,
+    TypeVar,
+    Union,
+)
+
+from numpy import (
+    generic,
+    number,
+    bool_,
+    timedelta64,
+    datetime64,
+    int_,
+    intp,
+    float64,
+    signedinteger,
+    floating,
+    complexfloating,
+    object_,
+    _OrderCF,
+)
+
+from numpy._typing import (
+    DTypeLike,
+    _DTypeLike,
+    ArrayLike,
+    _ArrayLike,
+    NDArray,
+    _SupportsArrayFunc,
+    _ArrayLikeInt_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeComplex_co,
+    _ArrayLikeObject_co,
+)
+
+_T = TypeVar("_T")
+_SCT = TypeVar("_SCT", bound=generic)
+
+# The returned arrays dtype must be compatible with `np.equal`
+_MaskFunc = Callable[
+    [NDArray[int_], _T],
+    NDArray[Union[number[Any], bool_, timedelta64, datetime64, object_]],
+]
+
+__all__: list[str]
+
+@overload
+def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def fliplr(m: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
+@overload
+def flipud(m: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def eye(
+    N: int,
+    M: None | int = ...,
+    k: int = ...,
+    dtype: None = ...,
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[float64]: ...
+@overload
+def eye(
+    N: int,
+    M: None | int = ...,
+    k: int = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def eye(
+    N: int,
+    M: None | int = ...,
+    k: int = ...,
+    dtype: DTypeLike = ...,
+    order: _OrderCF = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def tri(
+    N: int,
+    M: None | int = ...,
+    k: int = ...,
+    dtype: None = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...
+) -> NDArray[float64]: ...
+@overload
+def tri(
+    N: int,
+    M: None | int = ...,
+    k: int = ...,
+    dtype: _DTypeLike[_SCT] = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...
+) -> NDArray[_SCT]: ...
+@overload
+def tri(
+    N: int,
+    M: None | int = ...,
+    k: int = ...,
+    dtype: DTypeLike = ...,
+    *,
+    like: None | _SupportsArrayFunc = ...
+) -> NDArray[Any]: ...
+
+@overload
+def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
+@overload
+def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
+
+@overload
+def vander(  # type: ignore[misc]
+    x: _ArrayLikeInt_co,
+    N: None | int = ...,
+    increasing: bool = ...,
+) -> NDArray[signedinteger[Any]]: ...
+@overload
+def vander(  # type: ignore[misc]
+    x: _ArrayLikeFloat_co,
+    N: None | int = ...,
+    increasing: bool = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def vander(
+    x: _ArrayLikeComplex_co,
+    N: None | int = ...,
+    increasing: bool = ...,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def vander(
+    x: _ArrayLikeObject_co,
+    N: None | int = ...,
+    increasing: bool = ...,
+) -> NDArray[object_]: ...
+
+@overload
+def histogram2d(  # type: ignore[misc]
+    x: _ArrayLikeFloat_co,
+    y: _ArrayLikeFloat_co,
+    bins: int | Sequence[int] = ...,
+    range: None | _ArrayLikeFloat_co = ...,
+    density: None | bool = ...,
+    weights: None | _ArrayLikeFloat_co = ...,
+) -> tuple[
+    NDArray[float64],
+    NDArray[floating[Any]],
+    NDArray[floating[Any]],
+]: ...
+@overload
+def histogram2d(
+    x: _ArrayLikeComplex_co,
+    y: _ArrayLikeComplex_co,
+    bins: int | Sequence[int] = ...,
+    range: None | _ArrayLikeFloat_co = ...,
+    density: None | bool = ...,
+    weights: None | _ArrayLikeFloat_co = ...,
+) -> tuple[
+    NDArray[float64],
+    NDArray[complexfloating[Any, Any]],
+    NDArray[complexfloating[Any, Any]],
+]: ...
+@overload  # TODO: Sort out `bins`
+def histogram2d(
+    x: _ArrayLikeComplex_co,
+    y: _ArrayLikeComplex_co,
+    bins: Sequence[_ArrayLikeInt_co],
+    range: None | _ArrayLikeFloat_co = ...,
+    density: None | bool = ...,
+    weights: None | _ArrayLikeFloat_co = ...,
+) -> tuple[
+    NDArray[float64],
+    NDArray[Any],
+    NDArray[Any],
+]: ...
+
+# NOTE: we're assuming/demanding here the `mask_func` returns
+# an ndarray of shape `(n, n)`; otherwise there is the possibility
+# of the output tuple having more or less than 2 elements
+@overload
+def mask_indices(
+    n: int,
+    mask_func: _MaskFunc[int],
+    k: int = ...,
+) -> tuple[NDArray[intp], NDArray[intp]]: ...
+@overload
+def mask_indices(
+    n: int,
+    mask_func: _MaskFunc[_T],
+    k: _T,
+) -> tuple[NDArray[intp], NDArray[intp]]: ...
+
+def tril_indices(
+    n: int,
+    k: int = ...,
+    m: None | int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def tril_indices_from(
+    arr: NDArray[Any],
+    k: int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def triu_indices(
+    n: int,
+    k: int = ...,
+    m: None | int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
+
+def triu_indices_from(
+    arr: NDArray[Any],
+    k: int = ...,
+) -> tuple[NDArray[int_], NDArray[int_]]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/type_check.py b/.venv/lib/python3.11/site-packages/numpy/lib/type_check.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f84b80e5860c5bbd9e73790f8a5d21715ab9b6d
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/type_check.py
@@ -0,0 +1,735 @@
+"""Automatically adapted for numpy Sep 19, 2005 by convertcode.py
+
+"""
+import functools
+
+__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
+           'isreal', 'nan_to_num', 'real', 'real_if_close',
+           'typename', 'asfarray', 'mintypecode',
+           'common_type']
+
+from .._utils import set_module
+import numpy.core.numeric as _nx
+from numpy.core.numeric import asarray, asanyarray, isnan, zeros
+from numpy.core import overrides, getlimits
+from .ufunclike import isneginf, isposinf
+
+
+array_function_dispatch = functools.partial(
+    overrides.array_function_dispatch, module='numpy')
+
+
+_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
+
+
+@set_module('numpy')
+def mintypecode(typechars, typeset='GDFgdf', default='d'):
+    """
+    Return the character for the minimum-size type to which given types can
+    be safely cast.
+
+    The returned type character must represent the smallest size dtype such
+    that an array of the returned type can handle the data from an array of
+    all types in `typechars` (or if `typechars` is an array, then its
+    dtype.char).
+
+    Parameters
+    ----------
+    typechars : list of str or array_like
+        If a list of strings, each string should represent a dtype.
+        If array_like, the character representation of the array dtype is used.
+    typeset : str or list of str, optional
+        The set of characters that the returned character is chosen from.
+        The default set is 'GDFgdf'.
+    default : str, optional
+        The default character, this is returned if none of the characters in
+        `typechars` matches a character in `typeset`.
+
+    Returns
+    -------
+    typechar : str
+        The character representing the minimum-size type that was found.
+
+    See Also
+    --------
+    dtype, sctype2char, maximum_sctype
+
+    Examples
+    --------
+    >>> np.mintypecode(['d', 'f', 'S'])
+    'd'
+    >>> x = np.array([1.1, 2-3.j])
+    >>> np.mintypecode(x)
+    'D'
+
+    >>> np.mintypecode('abceh', default='G')
+    'G'
+
+    """
+    typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
+                 for t in typechars)
+    intersection = set(t for t in typecodes if t in typeset)
+    if not intersection:
+        return default
+    if 'F' in intersection and 'd' in intersection:
+        return 'D'
+    return min(intersection, key=_typecodes_by_elsize.index)
+
+
+def _asfarray_dispatcher(a, dtype=None):
+    return (a,)
+
+
+@array_function_dispatch(_asfarray_dispatcher)
+def asfarray(a, dtype=_nx.float_):
+    """
+    Return an array converted to a float type.
+
+    Parameters
+    ----------
+    a : array_like
+        The input array.
+    dtype : str or dtype object, optional
+        Float type code to coerce input array `a`.  If `dtype` is one of the
+        'int' dtypes, it is replaced with float64.
+
+    Returns
+    -------
+    out : ndarray
+        The input `a` as a float ndarray.
+
+    Examples
+    --------
+    >>> np.asfarray([2, 3])
+    array([2.,  3.])
+    >>> np.asfarray([2, 3], dtype='float')
+    array([2.,  3.])
+    >>> np.asfarray([2, 3], dtype='int8')
+    array([2.,  3.])
+
+    """
+    if not _nx.issubdtype(dtype, _nx.inexact):
+        dtype = _nx.float_
+    return asarray(a, dtype=dtype)
+
+
+def _real_dispatcher(val):
+    return (val,)
+
+
+@array_function_dispatch(_real_dispatcher)
+def real(val):
+    """
+    Return the real part of the complex argument.
+
+    Parameters
+    ----------
+    val : array_like
+        Input array.
+
+    Returns
+    -------
+    out : ndarray or scalar
+        The real component of the complex argument. If `val` is real, the type
+        of `val` is used for the output.  If `val` has complex elements, the
+        returned type is float.
+
+    See Also
+    --------
+    real_if_close, imag, angle
+
+    Examples
+    --------
+    >>> a = np.array([1+2j, 3+4j, 5+6j])
+    >>> a.real
+    array([1.,  3.,  5.])
+    >>> a.real = 9
+    >>> a
+    array([9.+2.j,  9.+4.j,  9.+6.j])
+    >>> a.real = np.array([9, 8, 7])
+    >>> a
+    array([9.+2.j,  8.+4.j,  7.+6.j])
+    >>> np.real(1 + 1j)
+    1.0
+
+    """
+    try:
+        return val.real
+    except AttributeError:
+        return asanyarray(val).real
+
+
+def _imag_dispatcher(val):
+    return (val,)
+
+
+@array_function_dispatch(_imag_dispatcher)
+def imag(val):
+    """
+    Return the imaginary part of the complex argument.
+
+    Parameters
+    ----------
+    val : array_like
+        Input array.
+
+    Returns
+    -------
+    out : ndarray or scalar
+        The imaginary component of the complex argument. If `val` is real,
+        the type of `val` is used for the output.  If `val` has complex
+        elements, the returned type is float.
+
+    See Also
+    --------
+    real, angle, real_if_close
+
+    Examples
+    --------
+    >>> a = np.array([1+2j, 3+4j, 5+6j])
+    >>> a.imag
+    array([2.,  4.,  6.])
+    >>> a.imag = np.array([8, 10, 12])
+    >>> a
+    array([1. +8.j,  3.+10.j,  5.+12.j])
+    >>> np.imag(1 + 1j)
+    1.0
+
+    """
+    try:
+        return val.imag
+    except AttributeError:
+        return asanyarray(val).imag
+
+
+def _is_type_dispatcher(x):
+    return (x,)
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def iscomplex(x):
+    """
+    Returns a bool array, where True if input element is complex.
+
+    What is tested is whether the input has a non-zero imaginary part, not if
+    the input type is complex.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+
+    Returns
+    -------
+    out : ndarray of bools
+        Output array.
+
+    See Also
+    --------
+    isreal
+    iscomplexobj : Return True if x is a complex type or an array of complex
+                   numbers.
+
+    Examples
+    --------
+    >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
+    array([ True, False, False, False, False,  True])
+
+    """
+    ax = asanyarray(x)
+    if issubclass(ax.dtype.type, _nx.complexfloating):
+        return ax.imag != 0
+    res = zeros(ax.shape, bool)
+    return res[()]   # convert to scalar if needed
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def isreal(x):
+    """
+    Returns a bool array, where True if input element is real.
+
+    If element has complex type with zero complex part, the return value
+    for that element is True.
+
+    Parameters
+    ----------
+    x : array_like
+        Input array.
+
+    Returns
+    -------
+    out : ndarray, bool
+        Boolean array of same shape as `x`.
+
+    Notes
+    -----
+    `isreal` may behave unexpectedly for string or object arrays (see examples)
+
+    See Also
+    --------
+    iscomplex
+    isrealobj : Return True if x is not a complex type.
+
+    Examples
+    --------
+    >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
+    >>> np.isreal(a)
+    array([False,  True,  True,  True,  True, False])
+
+    The function does not work on string arrays.
+
+    >>> a = np.array([2j, "a"], dtype="U")
+    >>> np.isreal(a)  # Warns about non-elementwise comparison
+    False
+
+    Returns True for all elements in input array of ``dtype=object`` even if
+    any of the elements is complex.
+
+    >>> a = np.array([1, "2", 3+4j], dtype=object)
+    >>> np.isreal(a)
+    array([ True,  True,  True])
+
+    isreal should not be used with object arrays
+
+    >>> a = np.array([1+2j, 2+1j], dtype=object)
+    >>> np.isreal(a)
+    array([ True,  True])
+
+    """
+    return imag(x) == 0
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def iscomplexobj(x):
+    """
+    Check for a complex type or an array of complex numbers.
+
+    The type of the input is checked, not the value. Even if the input
+    has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
+
+    Parameters
+    ----------
+    x : any
+        The input can be of any type and shape.
+
+    Returns
+    -------
+    iscomplexobj : bool
+        The return value, True if `x` is of a complex type or has at least
+        one complex element.
+
+    See Also
+    --------
+    isrealobj, iscomplex
+
+    Examples
+    --------
+    >>> np.iscomplexobj(1)
+    False
+    >>> np.iscomplexobj(1+0j)
+    True
+    >>> np.iscomplexobj([3, 1+0j, True])
+    True
+
+    """
+    try:
+        dtype = x.dtype
+        type_ = dtype.type
+    except AttributeError:
+        type_ = asarray(x).dtype.type
+    return issubclass(type_, _nx.complexfloating)
+
+
+@array_function_dispatch(_is_type_dispatcher)
+def isrealobj(x):
+    """
+    Return True if x is a not complex type or an array of complex numbers.
+
+    The type of the input is checked, not the value. So even if the input
+    has an imaginary part equal to zero, `isrealobj` evaluates to False
+    if the data type is complex.
+
+    Parameters
+    ----------
+    x : any
+        The input can be of any type and shape.
+
+    Returns
+    -------
+    y : bool
+        The return value, False if `x` is of a complex type.
+
+    See Also
+    --------
+    iscomplexobj, isreal
+
+    Notes
+    -----
+    The function is only meant for arrays with numerical values but it
+    accepts all other objects. Since it assumes array input, the return
+    value of other objects may be True.
+
+    >>> np.isrealobj('A string')
+    True
+    >>> np.isrealobj(False)
+    True
+    >>> np.isrealobj(None)
+    True
+
+    Examples
+    --------
+    >>> np.isrealobj(1)
+    True
+    >>> np.isrealobj(1+0j)
+    False
+    >>> np.isrealobj([3, 1+0j, True])
+    False
+
+    """
+    return not iscomplexobj(x)
+
+#-----------------------------------------------------------------------------
+
+def _getmaxmin(t):
+    from numpy.core import getlimits
+    f = getlimits.finfo(t)
+    return f.max, f.min
+
+
+def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
+    return (x,)
+
+
+@array_function_dispatch(_nan_to_num_dispatcher)
+def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
+    """
+    Replace NaN with zero and infinity with large finite numbers (default
+    behaviour) or with the numbers defined by the user using the `nan`,
+    `posinf` and/or `neginf` keywords.
+
+    If `x` is inexact, NaN is replaced by zero or by the user defined value in
+    `nan` keyword, infinity is replaced by the largest finite floating point
+    values representable by ``x.dtype`` or by the user defined value in
+    `posinf` keyword and -infinity is replaced by the most negative finite
+    floating point values representable by ``x.dtype`` or by the user defined
+    value in `neginf` keyword.
+
+    For complex dtypes, the above is applied to each of the real and
+    imaginary components of `x` separately.
+
+    If `x` is not inexact, then no replacements are made.
+
+    Parameters
+    ----------
+    x : scalar or array_like
+        Input data.
+    copy : bool, optional
+        Whether to create a copy of `x` (True) or to replace values
+        in-place (False). The in-place operation only occurs if
+        casting to an array does not require a copy.
+        Default is True.
+
+        .. versionadded:: 1.13
+    nan : int, float, optional
+        Value to be used to fill NaN values. If no value is passed
+        then NaN values will be replaced with 0.0.
+
+        .. versionadded:: 1.17
+    posinf : int, float, optional
+        Value to be used to fill positive infinity values. If no value is
+        passed then positive infinity values will be replaced with a very
+        large number.
+
+        .. versionadded:: 1.17
+    neginf : int, float, optional
+        Value to be used to fill negative infinity values. If no value is
+        passed then negative infinity values will be replaced with a very
+        small (or negative) number.
+
+        .. versionadded:: 1.17
+
+
+
+    Returns
+    -------
+    out : ndarray
+        `x`, with the non-finite values replaced. If `copy` is False, this may
+        be `x` itself.
+
+    See Also
+    --------
+    isinf : Shows which elements are positive or negative infinity.
+    isneginf : Shows which elements are negative infinity.
+    isposinf : Shows which elements are positive infinity.
+    isnan : Shows which elements are Not a Number (NaN).
+    isfinite : Shows which elements are finite (not NaN, not infinity)
+
+    Notes
+    -----
+    NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+    (IEEE 754). This means that Not a Number is not equivalent to infinity.
+
+    Examples
+    --------
+    >>> np.nan_to_num(np.inf)
+    1.7976931348623157e+308
+    >>> np.nan_to_num(-np.inf)
+    -1.7976931348623157e+308
+    >>> np.nan_to_num(np.nan)
+    0.0
+    >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
+    >>> np.nan_to_num(x)
+    array([ 1.79769313e+308, -1.79769313e+308,  0.00000000e+000, # may vary
+           -1.28000000e+002,  1.28000000e+002])
+    >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
+    array([ 3.3333333e+07,  3.3333333e+07, -9.9990000e+03,
+           -1.2800000e+02,  1.2800000e+02])
+    >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
+    array([  1.79769313e+308,  -1.79769313e+308,   0.00000000e+000, # may vary
+         -1.28000000e+002,   1.28000000e+002])
+    >>> np.nan_to_num(y)
+    array([  1.79769313e+308 +0.00000000e+000j, # may vary
+             0.00000000e+000 +0.00000000e+000j,
+             0.00000000e+000 +1.79769313e+308j])
+    >>> np.nan_to_num(y, nan=111111, posinf=222222)
+    array([222222.+111111.j, 111111.     +0.j, 111111.+222222.j])
+    """
+    x = _nx.array(x, subok=True, copy=copy)
+    xtype = x.dtype.type
+
+    isscalar = (x.ndim == 0)
+
+    if not issubclass(xtype, _nx.inexact):
+        return x[()] if isscalar else x
+
+    iscomplex = issubclass(xtype, _nx.complexfloating)
+
+    dest = (x.real, x.imag) if iscomplex else (x,)
+    maxf, minf = _getmaxmin(x.real.dtype)
+    if posinf is not None:
+        maxf = posinf
+    if neginf is not None:
+        minf = neginf
+    for d in dest:
+        idx_nan = isnan(d)
+        idx_posinf = isposinf(d)
+        idx_neginf = isneginf(d)
+        _nx.copyto(d, nan, where=idx_nan)
+        _nx.copyto(d, maxf, where=idx_posinf)
+        _nx.copyto(d, minf, where=idx_neginf)
+    return x[()] if isscalar else x
+
+#-----------------------------------------------------------------------------
+
+def _real_if_close_dispatcher(a, tol=None):
+    return (a,)
+
+
+@array_function_dispatch(_real_if_close_dispatcher)
+def real_if_close(a, tol=100):
+    """
+    If input is complex with all imaginary parts close to zero, return
+    real parts.
+
+    "Close to zero" is defined as `tol` * (machine epsilon of the type for
+    `a`).
+
+    Parameters
+    ----------
+    a : array_like
+        Input array.
+    tol : float
+        Tolerance in machine epsilons for the complex part of the elements
+        in the array. If the tolerance is <=1, then the absolute tolerance
+        is used.
+
+    Returns
+    -------
+    out : ndarray
+        If `a` is real, the type of `a` is used for the output.  If `a`
+        has complex elements, the returned type is float.
+
+    See Also
+    --------
+    real, imag, angle
+
+    Notes
+    -----
+    Machine epsilon varies from machine to machine and between data types
+    but Python floats on most platforms have a machine epsilon equal to
+    2.2204460492503131e-16.  You can use 'np.finfo(float).eps' to print
+    out the machine epsilon for floats.
+
+    Examples
+    --------
+    >>> np.finfo(float).eps
+    2.2204460492503131e-16 # may vary
+
+    >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
+    array([2.1, 5.2])
+    >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
+    array([2.1+4.e-13j, 5.2 + 3e-15j])
+
+    """
+    a = asanyarray(a)
+    type_ = a.dtype.type
+    if not issubclass(type_, _nx.complexfloating):
+        return a
+    if tol > 1:
+        f = getlimits.finfo(type_)
+        tol = f.eps * tol
+    if _nx.all(_nx.absolute(a.imag) < tol):
+        a = a.real
+    return a
+
+
+#-----------------------------------------------------------------------------
+
+_namefromtype = {'S1': 'character',
+                 '?': 'bool',
+                 'b': 'signed char',
+                 'B': 'unsigned char',
+                 'h': 'short',
+                 'H': 'unsigned short',
+                 'i': 'integer',
+                 'I': 'unsigned integer',
+                 'l': 'long integer',
+                 'L': 'unsigned long integer',
+                 'q': 'long long integer',
+                 'Q': 'unsigned long long integer',
+                 'f': 'single precision',
+                 'd': 'double precision',
+                 'g': 'long precision',
+                 'F': 'complex single precision',
+                 'D': 'complex double precision',
+                 'G': 'complex long double precision',
+                 'S': 'string',
+                 'U': 'unicode',
+                 'V': 'void',
+                 'O': 'object'
+                 }
+
+@set_module('numpy')
+def typename(char):
+    """
+    Return a description for the given data type code.
+
+    Parameters
+    ----------
+    char : str
+        Data type code.
+
+    Returns
+    -------
+    out : str
+        Description of the input data type code.
+
+    See Also
+    --------
+    dtype, typecodes
+
+    Examples
+    --------
+    >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
+    ...              'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
+    >>> for typechar in typechars:
+    ...     print(typechar, ' : ', np.typename(typechar))
+    ...
+    S1  :  character
+    ?  :  bool
+    B  :  unsigned char
+    D  :  complex double precision
+    G  :  complex long double precision
+    F  :  complex single precision
+    I  :  unsigned integer
+    H  :  unsigned short
+    L  :  unsigned long integer
+    O  :  object
+    Q  :  unsigned long long integer
+    S  :  string
+    U  :  unicode
+    V  :  void
+    b  :  signed char
+    d  :  double precision
+    g  :  long precision
+    f  :  single precision
+    i  :  integer
+    h  :  short
+    l  :  long integer
+    q  :  long long integer
+
+    """
+    return _namefromtype[char]
+
+#-----------------------------------------------------------------------------
+
+#determine the "minimum common type" for a group of arrays.
+array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble],
+              [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]]
+array_precision = {_nx.half: 0,
+                   _nx.single: 1,
+                   _nx.double: 2,
+                   _nx.longdouble: 3,
+                   _nx.csingle: 1,
+                   _nx.cdouble: 2,
+                   _nx.clongdouble: 3}
+
+
+def _common_type_dispatcher(*arrays):
+    return arrays
+
+
+@array_function_dispatch(_common_type_dispatcher)
+def common_type(*arrays):
+    """
+    Return a scalar type which is common to the input arrays.
+
+    The return type will always be an inexact (i.e. floating point) scalar
+    type, even if all the arrays are integer arrays. If one of the inputs is
+    an integer array, the minimum precision type that is returned is a
+    64-bit floating point dtype.
+
+    All input arrays except int64 and uint64 can be safely cast to the
+    returned dtype without loss of information.
+
+    Parameters
+    ----------
+    array1, array2, ... : ndarrays
+        Input arrays.
+
+    Returns
+    -------
+    out : data type code
+        Data type code.
+
+    See Also
+    --------
+    dtype, mintypecode
+
+    Examples
+    --------
+    >>> np.common_type(np.arange(2, dtype=np.float32))
+    
+    >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
+    
+    >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
+    
+
+    """
+    is_complex = False
+    precision = 0
+    for a in arrays:
+        t = a.dtype.type
+        if iscomplexobj(a):
+            is_complex = True
+        if issubclass(t, _nx.integer):
+            p = 2  # array_precision[_nx.double]
+        else:
+            p = array_precision.get(t, None)
+            if p is None:
+                raise TypeError("can't get common type for non-numeric array")
+        precision = max(precision, p)
+    if is_complex:
+        return array_type[1][precision]
+    else:
+        return array_type[0][precision]
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/type_check.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/type_check.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..b04da21d44b6bad5cb50ea8abaa091b5a753da11
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/type_check.pyi
@@ -0,0 +1,222 @@
+from collections.abc import Container, Iterable
+from typing import (
+    Literal as L,
+    Any,
+    overload,
+    TypeVar,
+    Protocol,
+)
+
+from numpy import (
+    dtype,
+    generic,
+    bool_,
+    floating,
+    float64,
+    complexfloating,
+    integer,
+)
+
+from numpy._typing import (
+    ArrayLike,
+    DTypeLike,
+    NBitBase,
+    NDArray,
+    _64Bit,
+    _SupportsDType,
+    _ScalarLike_co,
+    _ArrayLike,
+    _DTypeLikeComplex,
+)
+
+_T = TypeVar("_T")
+_T_co = TypeVar("_T_co", covariant=True)
+_SCT = TypeVar("_SCT", bound=generic)
+_NBit1 = TypeVar("_NBit1", bound=NBitBase)
+_NBit2 = TypeVar("_NBit2", bound=NBitBase)
+
+class _SupportsReal(Protocol[_T_co]):
+    @property
+    def real(self) -> _T_co: ...
+
+class _SupportsImag(Protocol[_T_co]):
+    @property
+    def imag(self) -> _T_co: ...
+
+__all__: list[str]
+
+def mintypecode(
+    typechars: Iterable[str | ArrayLike],
+    typeset: Container[str] = ...,
+    default: str = ...,
+) -> str: ...
+
+# `asfarray` ignores dtypes if they're not inexact
+
+@overload
+def asfarray(
+    a: object,
+    dtype: None | type[float] = ...,
+) -> NDArray[float64]: ...
+@overload
+def asfarray(  # type: ignore[misc]
+    a: Any,
+    dtype: _DTypeLikeComplex,
+) -> NDArray[complexfloating[Any, Any]]: ...
+@overload
+def asfarray(
+    a: Any,
+    dtype: DTypeLike,
+) -> NDArray[floating[Any]]: ...
+
+@overload
+def real(val: _SupportsReal[_T]) -> _T: ...
+@overload
+def real(val: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def imag(val: _SupportsImag[_T]) -> _T: ...
+@overload
+def imag(val: ArrayLike) -> NDArray[Any]: ...
+
+@overload
+def iscomplex(x: _ScalarLike_co) -> bool_: ...  # type: ignore[misc]
+@overload
+def iscomplex(x: ArrayLike) -> NDArray[bool_]: ...
+
+@overload
+def isreal(x: _ScalarLike_co) -> bool_: ...  # type: ignore[misc]
+@overload
+def isreal(x: ArrayLike) -> NDArray[bool_]: ...
+
+def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
+
+def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
+
+@overload
+def nan_to_num(  # type: ignore[misc]
+    x: _SCT,
+    copy: bool = ...,
+    nan: float = ...,
+    posinf: None | float = ...,
+    neginf: None | float = ...,
+) -> _SCT: ...
+@overload
+def nan_to_num(
+    x: _ScalarLike_co,
+    copy: bool = ...,
+    nan: float = ...,
+    posinf: None | float = ...,
+    neginf: None | float = ...,
+) -> Any: ...
+@overload
+def nan_to_num(
+    x: _ArrayLike[_SCT],
+    copy: bool = ...,
+    nan: float = ...,
+    posinf: None | float = ...,
+    neginf: None | float = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def nan_to_num(
+    x: ArrayLike,
+    copy: bool = ...,
+    nan: float = ...,
+    posinf: None | float = ...,
+    neginf: None | float = ...,
+) -> NDArray[Any]: ...
+
+# If one passes a complex array to `real_if_close`, then one is reasonably
+# expected to verify the output dtype (so we can return an unsafe union here)
+
+@overload
+def real_if_close(  # type: ignore[misc]
+    a: _ArrayLike[complexfloating[_NBit1, _NBit1]],
+    tol: float = ...,
+) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ...
+@overload
+def real_if_close(
+    a: _ArrayLike[_SCT],
+    tol: float = ...,
+) -> NDArray[_SCT]: ...
+@overload
+def real_if_close(
+    a: ArrayLike,
+    tol: float = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def typename(char: L['S1']) -> L['character']: ...
+@overload
+def typename(char: L['?']) -> L['bool']: ...
+@overload
+def typename(char: L['b']) -> L['signed char']: ...
+@overload
+def typename(char: L['B']) -> L['unsigned char']: ...
+@overload
+def typename(char: L['h']) -> L['short']: ...
+@overload
+def typename(char: L['H']) -> L['unsigned short']: ...
+@overload
+def typename(char: L['i']) -> L['integer']: ...
+@overload
+def typename(char: L['I']) -> L['unsigned integer']: ...
+@overload
+def typename(char: L['l']) -> L['long integer']: ...
+@overload
+def typename(char: L['L']) -> L['unsigned long integer']: ...
+@overload
+def typename(char: L['q']) -> L['long long integer']: ...
+@overload
+def typename(char: L['Q']) -> L['unsigned long long integer']: ...
+@overload
+def typename(char: L['f']) -> L['single precision']: ...
+@overload
+def typename(char: L['d']) -> L['double precision']: ...
+@overload
+def typename(char: L['g']) -> L['long precision']: ...
+@overload
+def typename(char: L['F']) -> L['complex single precision']: ...
+@overload
+def typename(char: L['D']) -> L['complex double precision']: ...
+@overload
+def typename(char: L['G']) -> L['complex long double precision']: ...
+@overload
+def typename(char: L['S']) -> L['string']: ...
+@overload
+def typename(char: L['U']) -> L['unicode']: ...
+@overload
+def typename(char: L['V']) -> L['void']: ...
+@overload
+def typename(char: L['O']) -> L['object']: ...
+
+@overload
+def common_type(  # type: ignore[misc]
+    *arrays: _SupportsDType[dtype[
+        integer[Any]
+    ]]
+) -> type[floating[_64Bit]]: ...
+@overload
+def common_type(  # type: ignore[misc]
+    *arrays: _SupportsDType[dtype[
+        floating[_NBit1]
+    ]]
+) -> type[floating[_NBit1]]: ...
+@overload
+def common_type(  # type: ignore[misc]
+    *arrays: _SupportsDType[dtype[
+        integer[Any] | floating[_NBit1]
+    ]]
+) -> type[floating[_NBit1 | _64Bit]]: ...
+@overload
+def common_type(  # type: ignore[misc]
+    *arrays: _SupportsDType[dtype[
+        floating[_NBit1] | complexfloating[_NBit2, _NBit2]
+    ]]
+) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ...
+@overload
+def common_type(
+    *arrays: _SupportsDType[dtype[
+        integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2]
+    ]]
+) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/ufunclike.py b/.venv/lib/python3.11/site-packages/numpy/lib/ufunclike.py
new file mode 100644
index 0000000000000000000000000000000000000000..05fe60c5b105cd92d1ab1932522a61e0b7bc9d1f
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/ufunclike.py
@@ -0,0 +1,210 @@
+"""
+Module of functions that are like ufuncs in acting on arrays and optionally
+storing results in an output array.
+
+"""
+__all__ = ['fix', 'isneginf', 'isposinf']
+
+import numpy.core.numeric as nx
+from numpy.core.overrides import array_function_dispatch
+import warnings
+import functools
+
+
+def _dispatcher(x, out=None):
+    return (x, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+def fix(x, out=None):
+    """
+    Round to nearest integer towards zero.
+
+    Round an array of floats element-wise to nearest integer towards zero.
+    The rounded values are returned as floats.
+
+    Parameters
+    ----------
+    x : array_like
+        An array of floats to be rounded
+    out : ndarray, optional
+        A location into which the result is stored. If provided, it must have
+        a shape that the input broadcasts to. If not provided or None, a
+        freshly-allocated array is returned.
+
+    Returns
+    -------
+    out : ndarray of floats
+        A float array with the same dimensions as the input.
+        If second argument is not supplied then a float array is returned
+        with the rounded values.
+
+        If a second argument is supplied the result is stored there.
+        The return value `out` is then a reference to that array.
+
+    See Also
+    --------
+    rint, trunc, floor, ceil
+    around : Round to given number of decimals
+
+    Examples
+    --------
+    >>> np.fix(3.14)
+    3.0
+    >>> np.fix(3)
+    3.0
+    >>> np.fix([2.1, 2.9, -2.1, -2.9])
+    array([ 2.,  2., -2., -2.])
+
+    """
+    # promote back to an array if flattened
+    res = nx.asanyarray(nx.ceil(x, out=out))
+    res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))
+
+    # when no out argument is passed and no subclasses are involved, flatten
+    # scalars
+    if out is None and type(res) is nx.ndarray:
+        res = res[()]
+    return res
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+def isposinf(x, out=None):
+    """
+    Test element-wise for positive infinity, return result as bool array.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    out : array_like, optional
+        A location into which the result is stored. If provided, it must have a
+        shape that the input broadcasts to. If not provided or None, a
+        freshly-allocated boolean array is returned.
+
+    Returns
+    -------
+    out : ndarray
+        A boolean array with the same dimensions as the input.
+        If second argument is not supplied then a boolean array is returned
+        with values True where the corresponding element of the input is
+        positive infinity and values False where the element of the input is
+        not positive infinity.
+
+        If a second argument is supplied the result is stored there. If the
+        type of that array is a numeric type the result is represented as zeros
+        and ones, if the type is boolean then as False and True.
+        The return value `out` is then a reference to that array.
+
+    See Also
+    --------
+    isinf, isneginf, isfinite, isnan
+
+    Notes
+    -----
+    NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+    (IEEE 754).
+
+    Errors result if the second argument is also supplied when x is a scalar
+    input, if first and second arguments have different shapes, or if the
+    first argument has complex values
+
+    Examples
+    --------
+    >>> np.isposinf(np.PINF)
+    True
+    >>> np.isposinf(np.inf)
+    True
+    >>> np.isposinf(np.NINF)
+    False
+    >>> np.isposinf([-np.inf, 0., np.inf])
+    array([False, False,  True])
+
+    >>> x = np.array([-np.inf, 0., np.inf])
+    >>> y = np.array([2, 2, 2])
+    >>> np.isposinf(x, y)
+    array([0, 0, 1])
+    >>> y
+    array([0, 0, 1])
+
+    """
+    is_inf = nx.isinf(x)
+    try:
+        signbit = ~nx.signbit(x)
+    except TypeError as e:
+        dtype = nx.asanyarray(x).dtype
+        raise TypeError(f'This operation is not supported for {dtype} values '
+                        'because it would be ambiguous.') from e
+    else:
+        return nx.logical_and(is_inf, signbit, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+def isneginf(x, out=None):
+    """
+    Test element-wise for negative infinity, return result as bool array.
+
+    Parameters
+    ----------
+    x : array_like
+        The input array.
+    out : array_like, optional
+        A location into which the result is stored. If provided, it must have a
+        shape that the input broadcasts to. If not provided or None, a
+        freshly-allocated boolean array is returned.
+
+    Returns
+    -------
+    out : ndarray
+        A boolean array with the same dimensions as the input.
+        If second argument is not supplied then a numpy boolean array is
+        returned with values True where the corresponding element of the
+        input is negative infinity and values False where the element of
+        the input is not negative infinity.
+
+        If a second argument is supplied the result is stored there. If the
+        type of that array is a numeric type the result is represented as
+        zeros and ones, if the type is boolean then as False and True. The
+        return value `out` is then a reference to that array.
+
+    See Also
+    --------
+    isinf, isposinf, isnan, isfinite
+
+    Notes
+    -----
+    NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+    (IEEE 754).
+
+    Errors result if the second argument is also supplied when x is a scalar
+    input, if first and second arguments have different shapes, or if the
+    first argument has complex values.
+
+    Examples
+    --------
+    >>> np.isneginf(np.NINF)
+    True
+    >>> np.isneginf(np.inf)
+    False
+    >>> np.isneginf(np.PINF)
+    False
+    >>> np.isneginf([-np.inf, 0., np.inf])
+    array([ True, False, False])
+
+    >>> x = np.array([-np.inf, 0., np.inf])
+    >>> y = np.array([2, 2, 2])
+    >>> np.isneginf(x, y)
+    array([1, 0, 0])
+    >>> y
+    array([1, 0, 0])
+
+    """
+    is_inf = nx.isinf(x)
+    try:
+        signbit = nx.signbit(x)
+    except TypeError as e:
+        dtype = nx.asanyarray(x).dtype
+        raise TypeError(f'This operation is not supported for {dtype} values '
+                        'because it would be ambiguous.') from e
+    else:
+        return nx.logical_and(is_inf, signbit, out)
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/ufunclike.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/ufunclike.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..82537e2acd953e3ce82541b04cdca0dfba1963b4
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/ufunclike.pyi
@@ -0,0 +1,66 @@
+from typing import Any, overload, TypeVar
+
+from numpy import floating, bool_, object_, ndarray
+from numpy._typing import (
+    NDArray,
+    _FloatLike_co,
+    _ArrayLikeFloat_co,
+    _ArrayLikeObject_co,
+)
+
+_ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
+
+__all__: list[str]
+
+@overload
+def fix(  # type: ignore[misc]
+    x: _FloatLike_co,
+    out: None = ...,
+) -> floating[Any]: ...
+@overload
+def fix(
+    x: _ArrayLikeFloat_co,
+    out: None = ...,
+) -> NDArray[floating[Any]]: ...
+@overload
+def fix(
+    x: _ArrayLikeObject_co,
+    out: None = ...,
+) -> NDArray[object_]: ...
+@overload
+def fix(
+    x: _ArrayLikeFloat_co | _ArrayLikeObject_co,
+    out: _ArrayType,
+) -> _ArrayType: ...
+
+@overload
+def isposinf(  # type: ignore[misc]
+    x: _FloatLike_co,
+    out: None = ...,
+) -> bool_: ...
+@overload
+def isposinf(
+    x: _ArrayLikeFloat_co,
+    out: None = ...,
+) -> NDArray[bool_]: ...
+@overload
+def isposinf(
+    x: _ArrayLikeFloat_co,
+    out: _ArrayType,
+) -> _ArrayType: ...
+
+@overload
+def isneginf(  # type: ignore[misc]
+    x: _FloatLike_co,
+    out: None = ...,
+) -> bool_: ...
+@overload
+def isneginf(
+    x: _ArrayLikeFloat_co,
+    out: None = ...,
+) -> NDArray[bool_]: ...
+@overload
+def isneginf(
+    x: _ArrayLikeFloat_co,
+    out: _ArrayType,
+) -> _ArrayType: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/user_array.py b/.venv/lib/python3.11/site-packages/numpy/lib/user_array.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e96b477ef7456e5ce575b17698323b7ff479dcd
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/user_array.py
@@ -0,0 +1,286 @@
+"""
+Standard container-class for easy multiple-inheritance.
+
+Try to inherit from the ndarray instead of using this class as this is not
+complete.
+
+"""
+from numpy.core import (
+    array, asarray, absolute, add, subtract, multiply, divide,
+    remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
+    bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
+    greater_equal, shape, reshape, arange, sin, sqrt, transpose
+)
+
+
+class container:
+    """
+    container(data, dtype=None, copy=True)
+
+    Standard container-class for easy multiple-inheritance.
+
+    Methods
+    -------
+    copy
+    tostring
+    byteswap
+    astype
+
+    """
+    def __init__(self, data, dtype=None, copy=True):
+        self.array = array(data, dtype, copy=copy)
+
+    def __repr__(self):
+        if self.ndim > 0:
+            return self.__class__.__name__ + repr(self.array)[len("array"):]
+        else:
+            return self.__class__.__name__ + "(" + repr(self.array) + ")"
+
+    def __array__(self, t=None):
+        if t:
+            return self.array.astype(t)
+        return self.array
+
+    # Array as sequence
+    def __len__(self):
+        return len(self.array)
+
+    def __getitem__(self, index):
+        return self._rc(self.array[index])
+
+    def __setitem__(self, index, value):
+        self.array[index] = asarray(value, self.dtype)
+
+    def __abs__(self):
+        return self._rc(absolute(self.array))
+
+    def __neg__(self):
+        return self._rc(-self.array)
+
+    def __add__(self, other):
+        return self._rc(self.array + asarray(other))
+
+    __radd__ = __add__
+
+    def __iadd__(self, other):
+        add(self.array, other, self.array)
+        return self
+
+    def __sub__(self, other):
+        return self._rc(self.array - asarray(other))
+
+    def __rsub__(self, other):
+        return self._rc(asarray(other) - self.array)
+
+    def __isub__(self, other):
+        subtract(self.array, other, self.array)
+        return self
+
+    def __mul__(self, other):
+        return self._rc(multiply(self.array, asarray(other)))
+
+    __rmul__ = __mul__
+
+    def __imul__(self, other):
+        multiply(self.array, other, self.array)
+        return self
+
+    def __div__(self, other):
+        return self._rc(divide(self.array, asarray(other)))
+
+    def __rdiv__(self, other):
+        return self._rc(divide(asarray(other), self.array))
+
+    def __idiv__(self, other):
+        divide(self.array, other, self.array)
+        return self
+
+    def __mod__(self, other):
+        return self._rc(remainder(self.array, other))
+
+    def __rmod__(self, other):
+        return self._rc(remainder(other, self.array))
+
+    def __imod__(self, other):
+        remainder(self.array, other, self.array)
+        return self
+
+    def __divmod__(self, other):
+        return (self._rc(divide(self.array, other)),
+                self._rc(remainder(self.array, other)))
+
+    def __rdivmod__(self, other):
+        return (self._rc(divide(other, self.array)),
+                self._rc(remainder(other, self.array)))
+
+    def __pow__(self, other):
+        return self._rc(power(self.array, asarray(other)))
+
+    def __rpow__(self, other):
+        return self._rc(power(asarray(other), self.array))
+
+    def __ipow__(self, other):
+        power(self.array, other, self.array)
+        return self
+
+    def __lshift__(self, other):
+        return self._rc(left_shift(self.array, other))
+
+    def __rshift__(self, other):
+        return self._rc(right_shift(self.array, other))
+
+    def __rlshift__(self, other):
+        return self._rc(left_shift(other, self.array))
+
+    def __rrshift__(self, other):
+        return self._rc(right_shift(other, self.array))
+
+    def __ilshift__(self, other):
+        left_shift(self.array, other, self.array)
+        return self
+
+    def __irshift__(self, other):
+        right_shift(self.array, other, self.array)
+        return self
+
+    def __and__(self, other):
+        return self._rc(bitwise_and(self.array, other))
+
+    def __rand__(self, other):
+        return self._rc(bitwise_and(other, self.array))
+
+    def __iand__(self, other):
+        bitwise_and(self.array, other, self.array)
+        return self
+
+    def __xor__(self, other):
+        return self._rc(bitwise_xor(self.array, other))
+
+    def __rxor__(self, other):
+        return self._rc(bitwise_xor(other, self.array))
+
+    def __ixor__(self, other):
+        bitwise_xor(self.array, other, self.array)
+        return self
+
+    def __or__(self, other):
+        return self._rc(bitwise_or(self.array, other))
+
+    def __ror__(self, other):
+        return self._rc(bitwise_or(other, self.array))
+
+    def __ior__(self, other):
+        bitwise_or(self.array, other, self.array)
+        return self
+
+    def __pos__(self):
+        return self._rc(self.array)
+
+    def __invert__(self):
+        return self._rc(invert(self.array))
+
+    def _scalarfunc(self, func):
+        if self.ndim == 0:
+            return func(self[0])
+        else:
+            raise TypeError(
+                "only rank-0 arrays can be converted to Python scalars.")
+
+    def __complex__(self):
+        return self._scalarfunc(complex)
+
+    def __float__(self):
+        return self._scalarfunc(float)
+
+    def __int__(self):
+        return self._scalarfunc(int)
+
+    def __hex__(self):
+        return self._scalarfunc(hex)
+
+    def __oct__(self):
+        return self._scalarfunc(oct)
+
+    def __lt__(self, other):
+        return self._rc(less(self.array, other))
+
+    def __le__(self, other):
+        return self._rc(less_equal(self.array, other))
+
+    def __eq__(self, other):
+        return self._rc(equal(self.array, other))
+
+    def __ne__(self, other):
+        return self._rc(not_equal(self.array, other))
+
+    def __gt__(self, other):
+        return self._rc(greater(self.array, other))
+
+    def __ge__(self, other):
+        return self._rc(greater_equal(self.array, other))
+
+    def copy(self):
+        ""
+        return self._rc(self.array.copy())
+
+    def tostring(self):
+        ""
+        return self.array.tostring()
+
+    def tobytes(self):
+        ""
+        return self.array.tobytes()
+
+    def byteswap(self):
+        ""
+        return self._rc(self.array.byteswap())
+
+    def astype(self, typecode):
+        ""
+        return self._rc(self.array.astype(typecode))
+
+    def _rc(self, a):
+        if len(shape(a)) == 0:
+            return a
+        else:
+            return self.__class__(a)
+
+    def __array_wrap__(self, *args):
+        return self.__class__(args[0])
+
+    def __setattr__(self, attr, value):
+        if attr == 'array':
+            object.__setattr__(self, attr, value)
+            return
+        try:
+            self.array.__setattr__(attr, value)
+        except AttributeError:
+            object.__setattr__(self, attr, value)
+
+    # Only called after other approaches fail.
+    def __getattr__(self, attr):
+        if (attr == 'array'):
+            return object.__getattribute__(self, attr)
+        return self.array.__getattribute__(attr)
+
+#############################################################
+# Test of class container
+#############################################################
+if __name__ == '__main__':
+    temp = reshape(arange(10000), (100, 100))
+
+    ua = container(temp)
+    # new object created begin test
+    print(dir(ua))
+    print(shape(ua), ua.shape)  # I have changed Numeric.py
+
+    ua_small = ua[:3, :5]
+    print(ua_small)
+    # this did not change ua[0,0], which is not normal behavior
+    ua_small[0, 0] = 10
+    print(ua_small[0, 0], ua[0, 0])
+    print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
+    print(less(ua_small, 103), type(less(ua_small, 103)))
+    print(type(ua_small * reshape(arange(15), shape(ua_small))))
+    print(reshape(ua_small, (5, 3)))
+    print(transpose(ua_small))
diff --git a/.venv/lib/python3.11/site-packages/numpy/lib/utils.pyi b/.venv/lib/python3.11/site-packages/numpy/lib/utils.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..52ca92774975c010cae5b8485eb8e162c5bd22ae
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/lib/utils.pyi
@@ -0,0 +1,91 @@
+from ast import AST
+from collections.abc import Callable, Mapping, Sequence
+from typing import (
+    Any,
+    overload,
+    TypeVar,
+    Protocol,
+)
+
+from numpy import ndarray, generic
+
+from numpy.core.numerictypes import (
+    issubclass_ as issubclass_,
+    issubdtype as issubdtype,
+    issubsctype as issubsctype,
+)
+
+_T_contra = TypeVar("_T_contra", contravariant=True)
+_FuncType = TypeVar("_FuncType", bound=Callable[..., Any])
+
+# A file-like object opened in `w` mode
+class _SupportsWrite(Protocol[_T_contra]):
+    def write(self, s: _T_contra, /) -> Any: ...
+
+__all__: list[str]
+
+class _Deprecate:
+    old_name: None | str
+    new_name: None | str
+    message: None | str
+    def __init__(
+        self,
+        old_name: None | str = ...,
+        new_name: None | str = ...,
+        message: None | str = ...,
+    ) -> None: ...
+    # NOTE: `__call__` can in principle take arbitrary `*args` and `**kwargs`,
+    # even though they aren't used for anything
+    def __call__(self, func: _FuncType) -> _FuncType: ...
+
+def get_include() -> str: ...
+
+@overload
+def deprecate(
+    *,
+    old_name: None | str = ...,
+    new_name: None | str = ...,
+    message: None | str = ...,
+) -> _Deprecate: ...
+@overload
+def deprecate(
+    func: _FuncType,
+    /,
+    old_name: None | str = ...,
+    new_name: None | str = ...,
+    message: None | str = ...,
+) -> _FuncType: ...
+
+def deprecate_with_doc(msg: None | str) -> _Deprecate: ...
+
+# NOTE: In practice `byte_bounds` can (potentially) take any object
+# implementing the `__array_interface__` protocol. The caveat is
+# that certain keys, marked as optional in the spec, must be present for
+#  `byte_bounds`. This concerns `"strides"` and `"data"`.
+def byte_bounds(a: generic | ndarray[Any, Any]) -> tuple[int, int]: ...
+
+def who(vardict: None | Mapping[str, ndarray[Any, Any]] = ...) -> None: ...
+
+def info(
+    object: object = ...,
+    maxwidth: int = ...,
+    output: None | _SupportsWrite[str] = ...,
+    toplevel: str = ...,
+) -> None: ...
+
+def source(
+    object: object,
+    output: None | _SupportsWrite[str] = ...,
+) -> None: ...
+
+def lookfor(
+    what: str,
+    module: None | str | Sequence[str] = ...,
+    import_modules: bool = ...,
+    regenerate: bool = ...,
+    output: None | _SupportsWrite[str] =...,
+) -> None: ...
+
+def safe_eval(source: str | AST) -> Any: ...
+
+def show_runtime() -> None: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..73b1c5c8b759c05696dfd53471404d5e6f638bdb
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/ma/tests/__pycache__/__init__.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..893e24d3d308eade9fc9ecc94d6dd480fc447c56
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/ma/tests/__pycache__/test_subclassing.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/ma/tests/test_extras.py b/.venv/lib/python3.11/site-packages/numpy/ma/tests/test_extras.py
new file mode 100644
index 0000000000000000000000000000000000000000..d09a50fecd4a62e06e202a2c07443d9a58332e4a
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/ma/tests/test_extras.py
@@ -0,0 +1,1870 @@
+# pylint: disable-msg=W0611, W0612, W0511
+"""Tests suite for MaskedArray.
+Adapted from the original test_ma by Pierre Gerard-Marchant
+
+:author: Pierre Gerard-Marchant
+:contact: pierregm_at_uga_dot_edu
+:version: $Id: test_extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
+
+"""
+import warnings
+import itertools
+import pytest
+
+import numpy as np
+from numpy.core.numeric import normalize_axis_tuple
+from numpy.testing import (
+    assert_warns, suppress_warnings
+    )
+from numpy.ma.testutils import (
+    assert_, assert_array_equal, assert_equal, assert_almost_equal
+    )
+from numpy.ma.core import (
+    array, arange, masked, MaskedArray, masked_array, getmaskarray, shape,
+    nomask, ones, zeros, count
+    )
+from numpy.ma.extras import (
+    atleast_1d, atleast_2d, atleast_3d, mr_, dot, polyfit, cov, corrcoef,
+    median, average, unique, setxor1d, setdiff1d, union1d, intersect1d, in1d,
+    ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols,
+    mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous,
+    notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin,
+    diagflat, ndenumerate, stack, vstack
+    )
+
+
+class TestGeneric:
+    #
+    def test_masked_all(self):
+        # Tests masked_all
+        # Standard dtype
+        test = masked_all((2,), dtype=float)
+        control = array([1, 1], mask=[1, 1], dtype=float)
+        assert_equal(test, control)
+        # Flexible dtype
+        dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
+        test = masked_all((2,), dtype=dt)
+        control = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
+        assert_equal(test, control)
+        test = masked_all((2, 2), dtype=dt)
+        control = array([[(0, 0), (0, 0)], [(0, 0), (0, 0)]],
+                        mask=[[(1, 1), (1, 1)], [(1, 1), (1, 1)]],
+                        dtype=dt)
+        assert_equal(test, control)
+        # Nested dtype
+        dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
+        test = masked_all((2,), dtype=dt)
+        control = array([(1, (1, 1)), (1, (1, 1))],
+                        mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
+        assert_equal(test, control)
+        test = masked_all((2,), dtype=dt)
+        control = array([(1, (1, 1)), (1, (1, 1))],
+                        mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
+        assert_equal(test, control)
+        test = masked_all((1, 1), dtype=dt)
+        control = array([[(1, (1, 1))]], mask=[[(1, (1, 1))]], dtype=dt)
+        assert_equal(test, control)
+
+    def test_masked_all_with_object_nested(self):
+        # Test masked_all works with nested array with dtype of an 'object'
+        # refers to issue #15895
+        my_dtype = np.dtype([('b', ([('c', object)], (1,)))])
+        masked_arr = np.ma.masked_all((1,), my_dtype)
+
+        assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray)
+        assert_equal(type(masked_arr['b']['c']), np.ma.core.MaskedArray)
+        assert_equal(len(masked_arr['b']['c']), 1)
+        assert_equal(masked_arr['b']['c'].shape, (1, 1))
+        assert_equal(masked_arr['b']['c']._fill_value.shape, ())
+
+    def test_masked_all_with_object(self):
+        # same as above except that the array is not nested
+        my_dtype = np.dtype([('b', (object, (1,)))])
+        masked_arr = np.ma.masked_all((1,), my_dtype)
+
+        assert_equal(type(masked_arr['b']), np.ma.core.MaskedArray)
+        assert_equal(len(masked_arr['b']), 1)
+        assert_equal(masked_arr['b'].shape, (1, 1))
+        assert_equal(masked_arr['b']._fill_value.shape, ())
+
+    def test_masked_all_like(self):
+        # Tests masked_all
+        # Standard dtype
+        base = array([1, 2], dtype=float)
+        test = masked_all_like(base)
+        control = array([1, 1], mask=[1, 1], dtype=float)
+        assert_equal(test, control)
+        # Flexible dtype
+        dt = np.dtype({'names': ['a', 'b'], 'formats': ['f', 'f']})
+        base = array([(0, 0), (0, 0)], mask=[(1, 1), (1, 1)], dtype=dt)
+        test = masked_all_like(base)
+        control = array([(10, 10), (10, 10)], mask=[(1, 1), (1, 1)], dtype=dt)
+        assert_equal(test, control)
+        # Nested dtype
+        dt = np.dtype([('a', 'f'), ('b', [('ba', 'f'), ('bb', 'f')])])
+        control = array([(1, (1, 1)), (1, (1, 1))],
+                        mask=[(1, (1, 1)), (1, (1, 1))], dtype=dt)
+        test = masked_all_like(control)
+        assert_equal(test, control)
+
+    def check_clump(self, f):
+        for i in range(1, 7):
+            for j in range(2**i):
+                k = np.arange(i, dtype=int)
+                ja = np.full(i, j, dtype=int)
+                a = masked_array(2**k)
+                a.mask = (ja & (2**k)) != 0
+                s = 0
+                for sl in f(a):
+                    s += a.data[sl].sum()
+                if f == clump_unmasked:
+                    assert_equal(a.compressed().sum(), s)
+                else:
+                    a.mask = ~a.mask
+                    assert_equal(a.compressed().sum(), s)
+
+    def test_clump_masked(self):
+        # Test clump_masked
+        a = masked_array(np.arange(10))
+        a[[0, 1, 2, 6, 8, 9]] = masked
+        #
+        test = clump_masked(a)
+        control = [slice(0, 3), slice(6, 7), slice(8, 10)]
+        assert_equal(test, control)
+
+        self.check_clump(clump_masked)
+
+    def test_clump_unmasked(self):
+        # Test clump_unmasked
+        a = masked_array(np.arange(10))
+        a[[0, 1, 2, 6, 8, 9]] = masked
+        test = clump_unmasked(a)
+        control = [slice(3, 6), slice(7, 8), ]
+        assert_equal(test, control)
+
+        self.check_clump(clump_unmasked)
+
+    def test_flatnotmasked_contiguous(self):
+        # Test flatnotmasked_contiguous
+        a = arange(10)
+        # No mask
+        test = flatnotmasked_contiguous(a)
+        assert_equal(test, [slice(0, a.size)])
+        # mask of all false
+        a.mask = np.zeros(10, dtype=bool)
+        assert_equal(test, [slice(0, a.size)])
+        # Some mask
+        a[(a < 3) | (a > 8) | (a == 5)] = masked
+        test = flatnotmasked_contiguous(a)
+        assert_equal(test, [slice(3, 5), slice(6, 9)])
+        #
+        a[:] = masked
+        test = flatnotmasked_contiguous(a)
+        assert_equal(test, [])
+
+
+class TestAverage:
+    # Several tests of average. Why so many ? Good point...
+    def test_testAverage1(self):
+        # Test of average.
+        ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
+        assert_equal(2.0, average(ott, axis=0))
+        assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
+        result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
+        assert_equal(2.0, result)
+        assert_(wts == 4.0)
+        ott[:] = masked
+        assert_equal(average(ott, axis=0).mask, [True])
+        ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
+        ott = ott.reshape(2, 2)
+        ott[:, 1] = masked
+        assert_equal(average(ott, axis=0), [2.0, 0.0])
+        assert_equal(average(ott, axis=1).mask[0], [True])
+        assert_equal([2., 0.], average(ott, axis=0))
+        result, wts = average(ott, axis=0, returned=True)
+        assert_equal(wts, [1., 0.])
+
+    def test_testAverage2(self):
+        # More tests of average.
+        w1 = [0, 1, 1, 1, 1, 0]
+        w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
+        x = arange(6, dtype=np.float_)
+        assert_equal(average(x, axis=0), 2.5)
+        assert_equal(average(x, axis=0, weights=w1), 2.5)
+        y = array([arange(6, dtype=np.float_), 2.0 * arange(6)])
+        assert_equal(average(y, None), np.add.reduce(np.arange(6)) * 3. / 12.)
+        assert_equal(average(y, axis=0), np.arange(6) * 3. / 2.)
+        assert_equal(average(y, axis=1),
+                     [average(x, axis=0), average(x, axis=0) * 2.0])
+        assert_equal(average(y, None, weights=w2), 20. / 6.)
+        assert_equal(average(y, axis=0, weights=w2),
+                     [0., 1., 2., 3., 4., 10.])
+        assert_equal(average(y, axis=1),
+                     [average(x, axis=0), average(x, axis=0) * 2.0])
+        m1 = zeros(6)
+        m2 = [0, 0, 1, 1, 0, 0]
+        m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
+        m4 = ones(6)
+        m5 = [0, 1, 1, 1, 1, 1]
+        assert_equal(average(masked_array(x, m1), axis=0), 2.5)
+        assert_equal(average(masked_array(x, m2), axis=0), 2.5)
+        assert_equal(average(masked_array(x, m4), axis=0).mask, [True])
+        assert_equal(average(masked_array(x, m5), axis=0), 0.0)
+        assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
+        z = masked_array(y, m3)
+        assert_equal(average(z, None), 20. / 6.)
+        assert_equal(average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
+        assert_equal(average(z, axis=1), [2.5, 5.0])
+        assert_equal(average(z, axis=0, weights=w2),
+                     [0., 1., 99., 99., 4.0, 10.0])
+
+    def test_testAverage3(self):
+        # Yet more tests of average!
+        a = arange(6)
+        b = arange(6) * 3
+        r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
+        assert_equal(shape(r1), shape(w1))
+        assert_equal(r1.shape, w1.shape)
+        r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
+        assert_equal(shape(w2), shape(r2))
+        r2, w2 = average(ones((2, 2, 3)), returned=True)
+        assert_equal(shape(w2), shape(r2))
+        r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
+        assert_equal(shape(w2), shape(r2))
+        a2d = array([[1, 2], [0, 4]], float)
+        a2dm = masked_array(a2d, [[False, False], [True, False]])
+        a2da = average(a2d, axis=0)
+        assert_equal(a2da, [0.5, 3.0])
+        a2dma = average(a2dm, axis=0)
+        assert_equal(a2dma, [1.0, 3.0])
+        a2dma = average(a2dm, axis=None)
+        assert_equal(a2dma, 7. / 3.)
+        a2dma = average(a2dm, axis=1)
+        assert_equal(a2dma, [1.5, 4.0])
+
+    def test_testAverage4(self):
+        # Test that `keepdims` works with average
+        x = np.array([2, 3, 4]).reshape(3, 1)
+        b = np.ma.array(x, mask=[[False], [False], [True]])
+        w = np.array([4, 5, 6]).reshape(3, 1)
+        actual = average(b, weights=w, axis=1, keepdims=True)
+        desired = masked_array([[2.], [3.], [4.]], [[False], [False], [True]])
+        assert_equal(actual, desired)
+
+    def test_onintegers_with_mask(self):
+        # Test average on integers with mask
+        a = average(array([1, 2]))
+        assert_equal(a, 1.5)
+        a = average(array([1, 2, 3, 4], mask=[False, False, True, True]))
+        assert_equal(a, 1.5)
+
+    def test_complex(self):
+        # Test with complex data.
+        # (Regression test for https://github.com/numpy/numpy/issues/2684)
+        mask = np.array([[0, 0, 0, 1, 0],
+                         [0, 1, 0, 0, 0]], dtype=bool)
+        a = masked_array([[0, 1+2j, 3+4j, 5+6j, 7+8j],
+                          [9j, 0+1j, 2+3j, 4+5j, 7+7j]],
+                         mask=mask)
+
+        av = average(a)
+        expected = np.average(a.compressed())
+        assert_almost_equal(av.real, expected.real)
+        assert_almost_equal(av.imag, expected.imag)
+
+        av0 = average(a, axis=0)
+        expected0 = average(a.real, axis=0) + average(a.imag, axis=0)*1j
+        assert_almost_equal(av0.real, expected0.real)
+        assert_almost_equal(av0.imag, expected0.imag)
+
+        av1 = average(a, axis=1)
+        expected1 = average(a.real, axis=1) + average(a.imag, axis=1)*1j
+        assert_almost_equal(av1.real, expected1.real)
+        assert_almost_equal(av1.imag, expected1.imag)
+
+        # Test with the 'weights' argument.
+        wts = np.array([[0.5, 1.0, 2.0, 1.0, 0.5],
+                        [1.0, 1.0, 1.0, 1.0, 1.0]])
+        wav = average(a, weights=wts)
+        expected = np.average(a.compressed(), weights=wts[~mask])
+        assert_almost_equal(wav.real, expected.real)
+        assert_almost_equal(wav.imag, expected.imag)
+
+        wav0 = average(a, weights=wts, axis=0)
+        expected0 = (average(a.real, weights=wts, axis=0) +
+                     average(a.imag, weights=wts, axis=0)*1j)
+        assert_almost_equal(wav0.real, expected0.real)
+        assert_almost_equal(wav0.imag, expected0.imag)
+
+        wav1 = average(a, weights=wts, axis=1)
+        expected1 = (average(a.real, weights=wts, axis=1) +
+                     average(a.imag, weights=wts, axis=1)*1j)
+        assert_almost_equal(wav1.real, expected1.real)
+        assert_almost_equal(wav1.imag, expected1.imag)
+
+    @pytest.mark.parametrize(
+        'x, axis, expected_avg, weights, expected_wavg, expected_wsum',
+        [([1, 2, 3], None, [2.0], [3, 4, 1], [1.75], [8.0]),
+         ([[1, 2, 5], [1, 6, 11]], 0, [[1.0, 4.0, 8.0]],
+          [1, 3], [[1.0, 5.0, 9.5]], [[4, 4, 4]])],
+    )
+    def test_basic_keepdims(self, x, axis, expected_avg,
+                            weights, expected_wavg, expected_wsum):
+        avg = np.ma.average(x, axis=axis, keepdims=True)
+        assert avg.shape == np.shape(expected_avg)
+        assert_array_equal(avg, expected_avg)
+
+        wavg = np.ma.average(x, axis=axis, weights=weights, keepdims=True)
+        assert wavg.shape == np.shape(expected_wavg)
+        assert_array_equal(wavg, expected_wavg)
+
+        wavg, wsum = np.ma.average(x, axis=axis, weights=weights,
+                                   returned=True, keepdims=True)
+        assert wavg.shape == np.shape(expected_wavg)
+        assert_array_equal(wavg, expected_wavg)
+        assert wsum.shape == np.shape(expected_wsum)
+        assert_array_equal(wsum, expected_wsum)
+
+    def test_masked_weights(self):
+        # Test with masked weights.
+        # (Regression test for https://github.com/numpy/numpy/issues/10438)
+        a = np.ma.array(np.arange(9).reshape(3, 3),
+                        mask=[[1, 0, 0], [1, 0, 0], [0, 0, 0]])
+        weights_unmasked = masked_array([5, 28, 31], mask=False)
+        weights_masked = masked_array([5, 28, 31], mask=[1, 0, 0])
+
+        avg_unmasked = average(a, axis=0,
+                               weights=weights_unmasked, returned=False)
+        expected_unmasked = np.array([6.0, 5.21875, 6.21875])
+        assert_almost_equal(avg_unmasked, expected_unmasked)
+
+        avg_masked = average(a, axis=0, weights=weights_masked, returned=False)
+        expected_masked = np.array([6.0, 5.576271186440678, 6.576271186440678])
+        assert_almost_equal(avg_masked, expected_masked)
+
+        # weights should be masked if needed
+        # depending on the array mask. This is to avoid summing
+        # masked nan or other values that are not cancelled by a zero
+        a = np.ma.array([1.0,   2.0,   3.0,  4.0],
+                   mask=[False, False, True, True])
+        avg_unmasked = average(a, weights=[1, 1, 1, np.nan])
+
+        assert_almost_equal(avg_unmasked, 1.5)
+
+        a = np.ma.array([
+            [1.0, 2.0, 3.0, 4.0],
+            [5.0, 6.0, 7.0, 8.0],
+            [9.0, 1.0, 2.0, 3.0],
+        ], mask=[
+            [False, True, True, False],
+            [True, False, True, True],
+            [True, False, True, False],
+        ])
+
+        avg_masked = np.ma.average(a, weights=[1, np.nan, 1], axis=0)
+        avg_expected = np.ma.array([1.0, np.nan, np.nan, 3.5],
+                              mask=[False, True, True, False])
+
+        assert_almost_equal(avg_masked, avg_expected)
+        assert_equal(avg_masked.mask, avg_expected.mask)
+
+
+class TestConcatenator:
+    # Tests for mr_, the equivalent of r_ for masked arrays.
+
+    def test_1d(self):
+        # Tests mr_ on 1D arrays.
+        assert_array_equal(mr_[1, 2, 3, 4, 5, 6], array([1, 2, 3, 4, 5, 6]))
+        b = ones(5)
+        m = [1, 0, 0, 0, 0]
+        d = masked_array(b, mask=m)
+        c = mr_[d, 0, 0, d]
+        assert_(isinstance(c, MaskedArray))
+        assert_array_equal(c, [1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1])
+        assert_array_equal(c.mask, mr_[m, 0, 0, m])
+
+    def test_2d(self):
+        # Tests mr_ on 2D arrays.
+        a_1 = np.random.rand(5, 5)
+        a_2 = np.random.rand(5, 5)
+        m_1 = np.round(np.random.rand(5, 5), 0)
+        m_2 = np.round(np.random.rand(5, 5), 0)
+        b_1 = masked_array(a_1, mask=m_1)
+        b_2 = masked_array(a_2, mask=m_2)
+        # append columns
+        d = mr_['1', b_1, b_2]
+        assert_(d.shape == (5, 10))
+        assert_array_equal(d[:, :5], b_1)
+        assert_array_equal(d[:, 5:], b_2)
+        assert_array_equal(d.mask, np.r_['1', m_1, m_2])
+        d = mr_[b_1, b_2]
+        assert_(d.shape == (10, 5))
+        assert_array_equal(d[:5,:], b_1)
+        assert_array_equal(d[5:,:], b_2)
+        assert_array_equal(d.mask, np.r_[m_1, m_2])
+
+    def test_masked_constant(self):
+        actual = mr_[np.ma.masked, 1]
+        assert_equal(actual.mask, [True, False])
+        assert_equal(actual.data[1], 1)
+
+        actual = mr_[[1, 2], np.ma.masked]
+        assert_equal(actual.mask, [False, False, True])
+        assert_equal(actual.data[:2], [1, 2])
+
+
+class TestNotMasked:
+    # Tests notmasked_edges and notmasked_contiguous.
+
+    def test_edges(self):
+        # Tests unmasked_edges
+        data = masked_array(np.arange(25).reshape(5, 5),
+                            mask=[[0, 0, 1, 0, 0],
+                                  [0, 0, 0, 1, 1],
+                                  [1, 1, 0, 0, 0],
+                                  [0, 0, 0, 0, 0],
+                                  [1, 1, 1, 0, 0]],)
+        test = notmasked_edges(data, None)
+        assert_equal(test, [0, 24])
+        test = notmasked_edges(data, 0)
+        assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)])
+        assert_equal(test[1], [(3, 3, 3, 4, 4), (0, 1, 2, 3, 4)])
+        test = notmasked_edges(data, 1)
+        assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 2, 0, 3)])
+        assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 2, 4, 4, 4)])
+        #
+        test = notmasked_edges(data.data, None)
+        assert_equal(test, [0, 24])
+        test = notmasked_edges(data.data, 0)
+        assert_equal(test[0], [(0, 0, 0, 0, 0), (0, 1, 2, 3, 4)])
+        assert_equal(test[1], [(4, 4, 4, 4, 4), (0, 1, 2, 3, 4)])
+        test = notmasked_edges(data.data, -1)
+        assert_equal(test[0], [(0, 1, 2, 3, 4), (0, 0, 0, 0, 0)])
+        assert_equal(test[1], [(0, 1, 2, 3, 4), (4, 4, 4, 4, 4)])
+        #
+        data[-2] = masked
+        test = notmasked_edges(data, 0)
+        assert_equal(test[0], [(0, 0, 1, 0, 0), (0, 1, 2, 3, 4)])
+        assert_equal(test[1], [(1, 1, 2, 4, 4), (0, 1, 2, 3, 4)])
+        test = notmasked_edges(data, -1)
+        assert_equal(test[0], [(0, 1, 2, 4), (0, 0, 2, 3)])
+        assert_equal(test[1], [(0, 1, 2, 4), (4, 2, 4, 4)])
+
+    def test_contiguous(self):
+        # Tests notmasked_contiguous
+        a = masked_array(np.arange(24).reshape(3, 8),
+                         mask=[[0, 0, 0, 0, 1, 1, 1, 1],
+                               [1, 1, 1, 1, 1, 1, 1, 1],
+                               [0, 0, 0, 0, 0, 0, 1, 0]])
+        tmp = notmasked_contiguous(a, None)
+        assert_equal(tmp, [
+            slice(0, 4, None),
+            slice(16, 22, None),
+            slice(23, 24, None)
+        ])
+
+        tmp = notmasked_contiguous(a, 0)
+        assert_equal(tmp, [
+            [slice(0, 1, None), slice(2, 3, None)],
+            [slice(0, 1, None), slice(2, 3, None)],
+            [slice(0, 1, None), slice(2, 3, None)],
+            [slice(0, 1, None), slice(2, 3, None)],
+            [slice(2, 3, None)],
+            [slice(2, 3, None)],
+            [],
+            [slice(2, 3, None)]
+        ])
+        #
+        tmp = notmasked_contiguous(a, 1)
+        assert_equal(tmp, [
+            [slice(0, 4, None)],
+            [],
+            [slice(0, 6, None), slice(7, 8, None)]
+        ])
+
+
+class TestCompressFunctions:
+
+    def test_compress_nd(self):
+        # Tests compress_nd
+        x = np.array(list(range(3*4*5))).reshape(3, 4, 5)
+        m = np.zeros((3,4,5)).astype(bool)
+        m[1,1,1] = True
+        x = array(x, mask=m)
+
+        # axis=None
+        a = compress_nd(x)
+        assert_equal(a, [[[ 0,  2,  3,  4],
+                          [10, 12, 13, 14],
+                          [15, 17, 18, 19]],
+                         [[40, 42, 43, 44],
+                          [50, 52, 53, 54],
+                          [55, 57, 58, 59]]])
+
+        # axis=0
+        a = compress_nd(x, 0)
+        assert_equal(a, [[[ 0,  1,  2,  3,  4],
+                          [ 5,  6,  7,  8,  9],
+                          [10, 11, 12, 13, 14],
+                          [15, 16, 17, 18, 19]],
+                         [[40, 41, 42, 43, 44],
+                          [45, 46, 47, 48, 49],
+                          [50, 51, 52, 53, 54],
+                          [55, 56, 57, 58, 59]]])
+
+        # axis=1
+        a = compress_nd(x, 1)
+        assert_equal(a, [[[ 0,  1,  2,  3,  4],
+                          [10, 11, 12, 13, 14],
+                          [15, 16, 17, 18, 19]],
+                         [[20, 21, 22, 23, 24],
+                          [30, 31, 32, 33, 34],
+                          [35, 36, 37, 38, 39]],
+                         [[40, 41, 42, 43, 44],
+                          [50, 51, 52, 53, 54],
+                          [55, 56, 57, 58, 59]]])
+
+        a2 = compress_nd(x, (1,))
+        a3 = compress_nd(x, -2)
+        a4 = compress_nd(x, (-2,))
+        assert_equal(a, a2)
+        assert_equal(a, a3)
+        assert_equal(a, a4)
+
+        # axis=2
+        a = compress_nd(x, 2)
+        assert_equal(a, [[[ 0, 2,  3,  4],
+                          [ 5, 7,  8,  9],
+                          [10, 12, 13, 14],
+                          [15, 17, 18, 19]],
+                         [[20, 22, 23, 24],
+                          [25, 27, 28, 29],
+                          [30, 32, 33, 34],
+                          [35, 37, 38, 39]],
+                         [[40, 42, 43, 44],
+                          [45, 47, 48, 49],
+                          [50, 52, 53, 54],
+                          [55, 57, 58, 59]]])
+
+        a2 = compress_nd(x, (2,))
+        a3 = compress_nd(x, -1)
+        a4 = compress_nd(x, (-1,))
+        assert_equal(a, a2)
+        assert_equal(a, a3)
+        assert_equal(a, a4)
+
+        # axis=(0, 1)
+        a = compress_nd(x, (0, 1))
+        assert_equal(a, [[[ 0,  1,  2,  3,  4],
+                          [10, 11, 12, 13, 14],
+                          [15, 16, 17, 18, 19]],
+                         [[40, 41, 42, 43, 44],
+                          [50, 51, 52, 53, 54],
+                          [55, 56, 57, 58, 59]]])
+        a2 = compress_nd(x, (0, -2))
+        assert_equal(a, a2)
+
+        # axis=(1, 2)
+        a = compress_nd(x, (1, 2))
+        assert_equal(a, [[[ 0,  2,  3,  4],
+                          [10, 12, 13, 14],
+                          [15, 17, 18, 19]],
+                         [[20, 22, 23, 24],
+                          [30, 32, 33, 34],
+                          [35, 37, 38, 39]],
+                         [[40, 42, 43, 44],
+                          [50, 52, 53, 54],
+                          [55, 57, 58, 59]]])
+
+        a2 = compress_nd(x, (-2, 2))
+        a3 = compress_nd(x, (1, -1))
+        a4 = compress_nd(x, (-2, -1))
+        assert_equal(a, a2)
+        assert_equal(a, a3)
+        assert_equal(a, a4)
+
+        # axis=(0, 2)
+        a = compress_nd(x, (0, 2))
+        assert_equal(a, [[[ 0,  2,  3,  4],
+                          [ 5,  7,  8,  9],
+                          [10, 12, 13, 14],
+                          [15, 17, 18, 19]],
+                         [[40, 42, 43, 44],
+                          [45, 47, 48, 49],
+                          [50, 52, 53, 54],
+                          [55, 57, 58, 59]]])
+
+        a2 = compress_nd(x, (0, -1))
+        assert_equal(a, a2)
+
+    def test_compress_rowcols(self):
+        # Tests compress_rowcols
+        x = array(np.arange(9).reshape(3, 3),
+                  mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+        assert_equal(compress_rowcols(x), [[4, 5], [7, 8]])
+        assert_equal(compress_rowcols(x, 0), [[3, 4, 5], [6, 7, 8]])
+        assert_equal(compress_rowcols(x, 1), [[1, 2], [4, 5], [7, 8]])
+        x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+        assert_equal(compress_rowcols(x), [[0, 2], [6, 8]])
+        assert_equal(compress_rowcols(x, 0), [[0, 1, 2], [6, 7, 8]])
+        assert_equal(compress_rowcols(x, 1), [[0, 2], [3, 5], [6, 8]])
+        x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
+        assert_equal(compress_rowcols(x), [[8]])
+        assert_equal(compress_rowcols(x, 0), [[6, 7, 8]])
+        assert_equal(compress_rowcols(x, 1,), [[2], [5], [8]])
+        x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+        assert_equal(compress_rowcols(x).size, 0)
+        assert_equal(compress_rowcols(x, 0).size, 0)
+        assert_equal(compress_rowcols(x, 1).size, 0)
+
+    def test_mask_rowcols(self):
+        # Tests mask_rowcols.
+        x = array(np.arange(9).reshape(3, 3),
+                  mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+        assert_equal(mask_rowcols(x).mask,
+                     [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
+        assert_equal(mask_rowcols(x, 0).mask,
+                     [[1, 1, 1], [0, 0, 0], [0, 0, 0]])
+        assert_equal(mask_rowcols(x, 1).mask,
+                     [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
+        x = array(x._data, mask=[[0, 0, 0], [0, 1, 0], [0, 0, 0]])
+        assert_equal(mask_rowcols(x).mask,
+                     [[0, 1, 0], [1, 1, 1], [0, 1, 0]])
+        assert_equal(mask_rowcols(x, 0).mask,
+                     [[0, 0, 0], [1, 1, 1], [0, 0, 0]])
+        assert_equal(mask_rowcols(x, 1).mask,
+                     [[0, 1, 0], [0, 1, 0], [0, 1, 0]])
+        x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 0]])
+        assert_equal(mask_rowcols(x).mask,
+                     [[1, 1, 1], [1, 1, 1], [1, 1, 0]])
+        assert_equal(mask_rowcols(x, 0).mask,
+                     [[1, 1, 1], [1, 1, 1], [0, 0, 0]])
+        assert_equal(mask_rowcols(x, 1,).mask,
+                     [[1, 1, 0], [1, 1, 0], [1, 1, 0]])
+        x = array(x._data, mask=[[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+        assert_(mask_rowcols(x).all() is masked)
+        assert_(mask_rowcols(x, 0).all() is masked)
+        assert_(mask_rowcols(x, 1).all() is masked)
+        assert_(mask_rowcols(x).mask.all())
+        assert_(mask_rowcols(x, 0).mask.all())
+        assert_(mask_rowcols(x, 1).mask.all())
+
+    @pytest.mark.parametrize("axis", [None, 0, 1])
+    @pytest.mark.parametrize(["func", "rowcols_axis"],
+                             [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)])
+    def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis):
+        # Test deprecation of the axis argument to `mask_rows` and `mask_cols`
+        x = array(np.arange(9).reshape(3, 3),
+                  mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+        with assert_warns(DeprecationWarning):
+            res = func(x, axis=axis)
+            assert_equal(res, mask_rowcols(x, rowcols_axis))
+
+    def test_dot(self):
+        # Tests dot product
+        n = np.arange(1, 7)
+        #
+        m = [1, 0, 0, 0, 0, 0]
+        a = masked_array(n, mask=m).reshape(2, 3)
+        b = masked_array(n, mask=m).reshape(3, 2)
+        c = dot(a, b, strict=True)
+        assert_equal(c.mask, [[1, 1], [1, 0]])
+        c = dot(b, a, strict=True)
+        assert_equal(c.mask, [[1, 1, 1], [1, 0, 0], [1, 0, 0]])
+        c = dot(a, b, strict=False)
+        assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+        c = dot(b, a, strict=False)
+        assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+        #
+        m = [0, 0, 0, 0, 0, 1]
+        a = masked_array(n, mask=m).reshape(2, 3)
+        b = masked_array(n, mask=m).reshape(3, 2)
+        c = dot(a, b, strict=True)
+        assert_equal(c.mask, [[0, 1], [1, 1]])
+        c = dot(b, a, strict=True)
+        assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [1, 1, 1]])
+        c = dot(a, b, strict=False)
+        assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+        assert_equal(c, dot(a, b))
+        c = dot(b, a, strict=False)
+        assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+        #
+        m = [0, 0, 0, 0, 0, 0]
+        a = masked_array(n, mask=m).reshape(2, 3)
+        b = masked_array(n, mask=m).reshape(3, 2)
+        c = dot(a, b)
+        assert_equal(c.mask, nomask)
+        c = dot(b, a)
+        assert_equal(c.mask, nomask)
+        #
+        a = masked_array(n, mask=[1, 0, 0, 0, 0, 0]).reshape(2, 3)
+        b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
+        c = dot(a, b, strict=True)
+        assert_equal(c.mask, [[1, 1], [0, 0]])
+        c = dot(a, b, strict=False)
+        assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+        c = dot(b, a, strict=True)
+        assert_equal(c.mask, [[1, 0, 0], [1, 0, 0], [1, 0, 0]])
+        c = dot(b, a, strict=False)
+        assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+        #
+        a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
+        b = masked_array(n, mask=[0, 0, 0, 0, 0, 0]).reshape(3, 2)
+        c = dot(a, b, strict=True)
+        assert_equal(c.mask, [[0, 0], [1, 1]])
+        c = dot(a, b)
+        assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+        c = dot(b, a, strict=True)
+        assert_equal(c.mask, [[0, 0, 1], [0, 0, 1], [0, 0, 1]])
+        c = dot(b, a, strict=False)
+        assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+        #
+        a = masked_array(n, mask=[0, 0, 0, 0, 0, 1]).reshape(2, 3)
+        b = masked_array(n, mask=[0, 0, 1, 0, 0, 0]).reshape(3, 2)
+        c = dot(a, b, strict=True)
+        assert_equal(c.mask, [[1, 0], [1, 1]])
+        c = dot(a, b, strict=False)
+        assert_equal(c, np.dot(a.filled(0), b.filled(0)))
+        c = dot(b, a, strict=True)
+        assert_equal(c.mask, [[0, 0, 1], [1, 1, 1], [0, 0, 1]])
+        c = dot(b, a, strict=False)
+        assert_equal(c, np.dot(b.filled(0), a.filled(0)))
+        #
+        a = masked_array(np.arange(8).reshape(2, 2, 2),
+                         mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+        b = masked_array(np.arange(8).reshape(2, 2, 2),
+                         mask=[[[0, 0], [0, 0]], [[0, 0], [0, 1]]])
+        c = dot(a, b, strict=True)
+        assert_equal(c.mask,
+                     [[[[1, 1], [1, 1]], [[0, 0], [0, 1]]],
+                      [[[0, 0], [0, 1]], [[0, 0], [0, 1]]]])
+        c = dot(a, b, strict=False)
+        assert_equal(c.mask,
+                     [[[[0, 0], [0, 1]], [[0, 0], [0, 0]]],
+                      [[[0, 0], [0, 0]], [[0, 0], [0, 0]]]])
+        c = dot(b, a, strict=True)
+        assert_equal(c.mask,
+                     [[[[1, 0], [0, 0]], [[1, 0], [0, 0]]],
+                      [[[1, 0], [0, 0]], [[1, 1], [1, 1]]]])
+        c = dot(b, a, strict=False)
+        assert_equal(c.mask,
+                     [[[[0, 0], [0, 0]], [[0, 0], [0, 0]]],
+                      [[[0, 0], [0, 0]], [[1, 0], [0, 0]]]])
+        #
+        a = masked_array(np.arange(8).reshape(2, 2, 2),
+                         mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+        b = 5.
+        c = dot(a, b, strict=True)
+        assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+        c = dot(a, b, strict=False)
+        assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+        c = dot(b, a, strict=True)
+        assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+        c = dot(b, a, strict=False)
+        assert_equal(c.mask, [[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+        #
+        a = masked_array(np.arange(8).reshape(2, 2, 2),
+                         mask=[[[1, 0], [0, 0]], [[0, 0], [0, 0]]])
+        b = masked_array(np.arange(2), mask=[0, 1])
+        c = dot(a, b, strict=True)
+        assert_equal(c.mask, [[1, 1], [1, 1]])
+        c = dot(a, b, strict=False)
+        assert_equal(c.mask, [[1, 0], [0, 0]])
+
+    def test_dot_returns_maskedarray(self):
+        # See gh-6611
+        a = np.eye(3)
+        b = array(a)
+        assert_(type(dot(a, a)) is MaskedArray)
+        assert_(type(dot(a, b)) is MaskedArray)
+        assert_(type(dot(b, a)) is MaskedArray)
+        assert_(type(dot(b, b)) is MaskedArray)
+
+    def test_dot_out(self):
+        a = array(np.eye(3))
+        out = array(np.zeros((3, 3)))
+        res = dot(a, a, out=out)
+        assert_(res is out)
+        assert_equal(a, res)
+
+
+class TestApplyAlongAxis:
+    # Tests 2D functions
+    def test_3d(self):
+        a = arange(12.).reshape(2, 2, 3)
+
+        def myfunc(b):
+            return b[1]
+
+        xa = apply_along_axis(myfunc, 2, a)
+        assert_equal(xa, [[1, 4], [7, 10]])
+
+    # Tests kwargs functions
+    def test_3d_kwargs(self):
+        a = arange(12).reshape(2, 2, 3)
+
+        def myfunc(b, offset=0):
+            return b[1+offset]
+
+        xa = apply_along_axis(myfunc, 2, a, offset=1)
+        assert_equal(xa, [[2, 5], [8, 11]])
+
+
+class TestApplyOverAxes:
+    # Tests apply_over_axes
+    def test_basic(self):
+        a = arange(24).reshape(2, 3, 4)
+        test = apply_over_axes(np.sum, a, [0, 2])
+        ctrl = np.array([[[60], [92], [124]]])
+        assert_equal(test, ctrl)
+        a[(a % 2).astype(bool)] = masked
+        test = apply_over_axes(np.sum, a, [0, 2])
+        ctrl = np.array([[[28], [44], [60]]])
+        assert_equal(test, ctrl)
+
+
+class TestMedian:
+    def test_pytype(self):
+        r = np.ma.median([[np.inf, np.inf], [np.inf, np.inf]], axis=-1)
+        assert_equal(r, np.inf)
+
+    def test_inf(self):
+        # test that even which computes handles inf / x = masked
+        r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
+                                             [np.inf, np.inf]]), axis=-1)
+        assert_equal(r, np.inf)
+        r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
+                                             [np.inf, np.inf]]), axis=None)
+        assert_equal(r, np.inf)
+        # all masked
+        r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
+                                             [np.inf, np.inf]], mask=True),
+                         axis=-1)
+        assert_equal(r.mask, True)
+        r = np.ma.median(np.ma.masked_array([[np.inf, np.inf],
+                                             [np.inf, np.inf]], mask=True),
+                         axis=None)
+        assert_equal(r.mask, True)
+
+    def test_non_masked(self):
+        x = np.arange(9)
+        assert_equal(np.ma.median(x), 4.)
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        x = range(8)
+        assert_equal(np.ma.median(x), 3.5)
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        x = 5
+        assert_equal(np.ma.median(x), 5.)
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        # integer
+        x = np.arange(9 * 8).reshape(9, 8)
+        assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0))
+        assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1))
+        assert_(np.ma.median(x, axis=1) is not MaskedArray)
+        # float
+        x = np.arange(9 * 8.).reshape(9, 8)
+        assert_equal(np.ma.median(x, axis=0), np.median(x, axis=0))
+        assert_equal(np.ma.median(x, axis=1), np.median(x, axis=1))
+        assert_(np.ma.median(x, axis=1) is not MaskedArray)
+
+    def test_docstring_examples(self):
+        "test the examples given in the docstring of ma.median"
+        x = array(np.arange(8), mask=[0]*4 + [1]*4)
+        assert_equal(np.ma.median(x), 1.5)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        x = array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
+        assert_equal(np.ma.median(x), 2.5)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        ma_x = np.ma.median(x, axis=-1, overwrite_input=True)
+        assert_equal(ma_x, [2., 5.])
+        assert_equal(ma_x.shape, (2,), "shape mismatch")
+        assert_(type(ma_x) is MaskedArray)
+
+    def test_axis_argument_errors(self):
+        msg = "mask = %s, ndim = %s, axis = %s, overwrite_input = %s"
+        for ndmin in range(5):
+            for mask in [False, True]:
+                x = array(1, ndmin=ndmin, mask=mask)
+
+                # Valid axis values should not raise exception
+                args = itertools.product(range(-ndmin, ndmin), [False, True])
+                for axis, over in args:
+                    try:
+                        np.ma.median(x, axis=axis, overwrite_input=over)
+                    except Exception:
+                        raise AssertionError(msg % (mask, ndmin, axis, over))
+
+                # Invalid axis values should raise exception
+                args = itertools.product([-(ndmin + 1), ndmin], [False, True])
+                for axis, over in args:
+                    try:
+                        np.ma.median(x, axis=axis, overwrite_input=over)
+                    except np.AxisError:
+                        pass
+                    else:
+                        raise AssertionError(msg % (mask, ndmin, axis, over))
+
+    def test_masked_0d(self):
+        # Check values
+        x = array(1, mask=False)
+        assert_equal(np.ma.median(x), 1)
+        x = array(1, mask=True)
+        assert_equal(np.ma.median(x), np.ma.masked)
+
+    def test_masked_1d(self):
+        x = array(np.arange(5), mask=True)
+        assert_equal(np.ma.median(x), np.ma.masked)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is np.ma.core.MaskedConstant)
+        x = array(np.arange(5), mask=False)
+        assert_equal(np.ma.median(x), 2.)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        x = array(np.arange(5), mask=[0,1,0,0,0])
+        assert_equal(np.ma.median(x), 2.5)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        x = array(np.arange(5), mask=[0,1,1,1,1])
+        assert_equal(np.ma.median(x), 0.)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        # integer
+        x = array(np.arange(5), mask=[0,1,1,0,0])
+        assert_equal(np.ma.median(x), 3.)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        # float
+        x = array(np.arange(5.), mask=[0,1,1,0,0])
+        assert_equal(np.ma.median(x), 3.)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        # integer
+        x = array(np.arange(6), mask=[0,1,1,1,1,0])
+        assert_equal(np.ma.median(x), 2.5)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        # float
+        x = array(np.arange(6.), mask=[0,1,1,1,1,0])
+        assert_equal(np.ma.median(x), 2.5)
+        assert_equal(np.ma.median(x).shape, (), "shape mismatch")
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+
+    def test_1d_shape_consistency(self):
+        assert_equal(np.ma.median(array([1,2,3],mask=[0,0,0])).shape,
+                     np.ma.median(array([1,2,3],mask=[0,1,0])).shape )
+
+    def test_2d(self):
+        # Tests median w/ 2D
+        (n, p) = (101, 30)
+        x = masked_array(np.linspace(-1., 1., n),)
+        x[:10] = x[-10:] = masked
+        z = masked_array(np.empty((n, p), dtype=float))
+        z[:, 0] = x[:]
+        idx = np.arange(len(x))
+        for i in range(1, p):
+            np.random.shuffle(idx)
+            z[:, i] = x[idx]
+        assert_equal(median(z[:, 0]), 0)
+        assert_equal(median(z), 0)
+        assert_equal(median(z, axis=0), np.zeros(p))
+        assert_equal(median(z.T, axis=1), np.zeros(p))
+
+    def test_2d_waxis(self):
+        # Tests median w/ 2D arrays and different axis.
+        x = masked_array(np.arange(30).reshape(10, 3))
+        x[:3] = x[-3:] = masked
+        assert_equal(median(x), 14.5)
+        assert_(type(np.ma.median(x)) is not MaskedArray)
+        assert_equal(median(x, axis=0), [13.5, 14.5, 15.5])
+        assert_(type(np.ma.median(x, axis=0)) is MaskedArray)
+        assert_equal(median(x, axis=1), [0, 0, 0, 10, 13, 16, 19, 0, 0, 0])
+        assert_(type(np.ma.median(x, axis=1)) is MaskedArray)
+        assert_equal(median(x, axis=1).mask, [1, 1, 1, 0, 0, 0, 0, 1, 1, 1])
+
+    def test_3d(self):
+        # Tests median w/ 3D
+        x = np.ma.arange(24).reshape(3, 4, 2)
+        x[x % 3 == 0] = masked
+        assert_equal(median(x, 0), [[12, 9], [6, 15], [12, 9], [18, 15]])
+        x.shape = (4, 3, 2)
+        assert_equal(median(x, 0), [[99, 10], [11, 99], [13, 14]])
+        x = np.ma.arange(24).reshape(4, 3, 2)
+        x[x % 5 == 0] = masked
+        assert_equal(median(x, 0), [[12, 10], [8, 9], [16, 17]])
+
+    def test_neg_axis(self):
+        x = masked_array(np.arange(30).reshape(10, 3))
+        x[:3] = x[-3:] = masked
+        assert_equal(median(x, axis=-1), median(x, axis=1))
+
+    def test_out_1d(self):
+        # integer float even odd
+        for v in (30, 30., 31, 31.):
+            x = masked_array(np.arange(v))
+            x[:3] = x[-3:] = masked
+            out = masked_array(np.ones(()))
+            r = median(x, out=out)
+            if v == 30:
+                assert_equal(out, 14.5)
+            else:
+                assert_equal(out, 15.)
+            assert_(r is out)
+            assert_(type(r) is MaskedArray)
+
+    def test_out(self):
+        # integer float even odd
+        for v in (40, 40., 30, 30.):
+            x = masked_array(np.arange(v).reshape(10, -1))
+            x[:3] = x[-3:] = masked
+            out = masked_array(np.ones(10))
+            r = median(x, axis=1, out=out)
+            if v == 30:
+                e = masked_array([0.]*3 + [10, 13, 16, 19] + [0.]*3,
+                                 mask=[True] * 3 + [False] * 4 + [True] * 3)
+            else:
+                e = masked_array([0.]*3 + [13.5, 17.5, 21.5, 25.5] + [0.]*3,
+                                 mask=[True]*3 + [False]*4 + [True]*3)
+            assert_equal(r, e)
+            assert_(r is out)
+            assert_(type(r) is MaskedArray)
+
+    @pytest.mark.parametrize(
+        argnames='axis',
+        argvalues=[
+            None,
+            1,
+            (1, ),
+            (0, 1),
+            (-3, -1),
+        ]
+    )
+    def test_keepdims_out(self, axis):
+        mask = np.zeros((3, 5, 7, 11), dtype=bool)
+        # Randomly set some elements to True:
+        w = np.random.random((4, 200)) * np.array(mask.shape)[:, None]
+        w = w.astype(np.intp)
+        mask[tuple(w)] = np.nan
+        d = masked_array(np.ones(mask.shape), mask=mask)
+        if axis is None:
+            shape_out = (1,) * d.ndim
+        else:
+            axis_norm = normalize_axis_tuple(axis, d.ndim)
+            shape_out = tuple(
+                1 if i in axis_norm else d.shape[i] for i in range(d.ndim))
+        out = masked_array(np.empty(shape_out))
+        result = median(d, axis=axis, keepdims=True, out=out)
+        assert result is out
+        assert_equal(result.shape, shape_out)
+
+    def test_single_non_masked_value_on_axis(self):
+        data = [[1., 0.],
+                [0., 3.],
+                [0., 0.]]
+        masked_arr = np.ma.masked_equal(data, 0)
+        expected = [1., 3.]
+        assert_array_equal(np.ma.median(masked_arr, axis=0),
+                           expected)
+
+    def test_nan(self):
+        for mask in (False, np.zeros(6, dtype=bool)):
+            dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
+            dm.mask = mask
+
+            # scalar result
+            r = np.ma.median(dm, axis=None)
+            assert_(np.isscalar(r))
+            assert_array_equal(r, np.nan)
+            r = np.ma.median(dm.ravel(), axis=0)
+            assert_(np.isscalar(r))
+            assert_array_equal(r, np.nan)
+
+            r = np.ma.median(dm, axis=0)
+            assert_equal(type(r), MaskedArray)
+            assert_array_equal(r, [1, np.nan, 3])
+            r = np.ma.median(dm, axis=1)
+            assert_equal(type(r), MaskedArray)
+            assert_array_equal(r, [np.nan, 2])
+            r = np.ma.median(dm, axis=-1)
+            assert_equal(type(r), MaskedArray)
+            assert_array_equal(r, [np.nan, 2])
+
+        dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
+        dm[:, 2] = np.ma.masked
+        assert_array_equal(np.ma.median(dm, axis=None), np.nan)
+        assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3])
+        assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5])
+
+    def test_out_nan(self):
+        o = np.ma.masked_array(np.zeros((4,)))
+        d = np.ma.masked_array(np.ones((3, 4)))
+        d[2, 1] = np.nan
+        d[2, 2] = np.ma.masked
+        assert_equal(np.ma.median(d, 0, out=o), o)
+        o = np.ma.masked_array(np.zeros((3,)))
+        assert_equal(np.ma.median(d, 1, out=o), o)
+        o = np.ma.masked_array(np.zeros(()))
+        assert_equal(np.ma.median(d, out=o), o)
+
+    def test_nan_behavior(self):
+        a = np.ma.masked_array(np.arange(24, dtype=float))
+        a[::3] = np.ma.masked
+        a[2] = np.nan
+        assert_array_equal(np.ma.median(a), np.nan)
+        assert_array_equal(np.ma.median(a, axis=0), np.nan)
+
+        a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4))
+        a.mask = np.arange(a.size) % 2 == 1
+        aorig = a.copy()
+        a[1, 2, 3] = np.nan
+        a[1, 1, 2] = np.nan
+
+        # no axis
+        assert_array_equal(np.ma.median(a), np.nan)
+        assert_(np.isscalar(np.ma.median(a)))
+
+        # axis0
+        b = np.ma.median(aorig, axis=0)
+        b[2, 3] = np.nan
+        b[1, 2] = np.nan
+        assert_equal(np.ma.median(a, 0), b)
+
+        # axis1
+        b = np.ma.median(aorig, axis=1)
+        b[1, 3] = np.nan
+        b[1, 2] = np.nan
+        assert_equal(np.ma.median(a, 1), b)
+
+        # axis02
+        b = np.ma.median(aorig, axis=(0, 2))
+        b[1] = np.nan
+        b[2] = np.nan
+        assert_equal(np.ma.median(a, (0, 2)), b)
+
+    def test_ambigous_fill(self):
+        # 255 is max value, used as filler for sort
+        a = np.array([[3, 3, 255], [3, 3, 255]], dtype=np.uint8)
+        a = np.ma.masked_array(a, mask=a == 3)
+        assert_array_equal(np.ma.median(a, axis=1), 255)
+        assert_array_equal(np.ma.median(a, axis=1).mask, False)
+        assert_array_equal(np.ma.median(a, axis=0), a[0])
+        assert_array_equal(np.ma.median(a), 255)
+
+    def test_special(self):
+        for inf in [np.inf, -np.inf]:
+            a = np.array([[inf,  np.nan], [np.nan, np.nan]])
+            a = np.ma.masked_array(a, mask=np.isnan(a))
+            assert_equal(np.ma.median(a, axis=0), [inf,  np.nan])
+            assert_equal(np.ma.median(a, axis=1), [inf,  np.nan])
+            assert_equal(np.ma.median(a), inf)
+
+            a = np.array([[np.nan, np.nan, inf], [np.nan, np.nan, inf]])
+            a = np.ma.masked_array(a, mask=np.isnan(a))
+            assert_array_equal(np.ma.median(a, axis=1), inf)
+            assert_array_equal(np.ma.median(a, axis=1).mask, False)
+            assert_array_equal(np.ma.median(a, axis=0), a[0])
+            assert_array_equal(np.ma.median(a), inf)
+
+            # no mask
+            a = np.array([[inf, inf], [inf, inf]])
+            assert_equal(np.ma.median(a), inf)
+            assert_equal(np.ma.median(a, axis=0), inf)
+            assert_equal(np.ma.median(a, axis=1), inf)
+
+            a = np.array([[inf, 7, -inf, -9],
+                          [-10, np.nan, np.nan, 5],
+                          [4, np.nan, np.nan, inf]],
+                          dtype=np.float32)
+            a = np.ma.masked_array(a, mask=np.isnan(a))
+            if inf > 0:
+                assert_equal(np.ma.median(a, axis=0), [4., 7., -inf, 5.])
+                assert_equal(np.ma.median(a), 4.5)
+            else:
+                assert_equal(np.ma.median(a, axis=0), [-10., 7., -inf, -9.])
+                assert_equal(np.ma.median(a), -2.5)
+            assert_equal(np.ma.median(a, axis=1), [-1., -2.5, inf])
+
+            for i in range(0, 10):
+                for j in range(1, 10):
+                    a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
+                    a = np.ma.masked_array(a, mask=np.isnan(a))
+                    assert_equal(np.ma.median(a), inf)
+                    assert_equal(np.ma.median(a, axis=1), inf)
+                    assert_equal(np.ma.median(a, axis=0),
+                                 ([np.nan] * i) + [inf] * j)
+
+    def test_empty(self):
+        # empty arrays
+        a = np.ma.masked_array(np.array([], dtype=float))
+        with suppress_warnings() as w:
+            w.record(RuntimeWarning)
+            assert_array_equal(np.ma.median(a), np.nan)
+            assert_(w.log[0].category is RuntimeWarning)
+
+        # multiple dimensions
+        a = np.ma.masked_array(np.array([], dtype=float, ndmin=3))
+        # no axis
+        with suppress_warnings() as w:
+            w.record(RuntimeWarning)
+            warnings.filterwarnings('always', '', RuntimeWarning)
+            assert_array_equal(np.ma.median(a), np.nan)
+            assert_(w.log[0].category is RuntimeWarning)
+
+        # axis 0 and 1
+        b = np.ma.masked_array(np.array([], dtype=float, ndmin=2))
+        assert_equal(np.ma.median(a, axis=0), b)
+        assert_equal(np.ma.median(a, axis=1), b)
+
+        # axis 2
+        b = np.ma.masked_array(np.array(np.nan, dtype=float, ndmin=2))
+        with warnings.catch_warnings(record=True) as w:
+            warnings.filterwarnings('always', '', RuntimeWarning)
+            assert_equal(np.ma.median(a, axis=2), b)
+            assert_(w[0].category is RuntimeWarning)
+
+    def test_object(self):
+        o = np.ma.masked_array(np.arange(7.))
+        assert_(type(np.ma.median(o.astype(object))), float)
+        o[2] = np.nan
+        assert_(type(np.ma.median(o.astype(object))), float)
+
+
+class TestCov:
+
+    def setup_method(self):
+        self.data = array(np.random.rand(12))
+
+    def test_1d_without_missing(self):
+        # Test cov on 1D variable w/o missing values
+        x = self.data
+        assert_almost_equal(np.cov(x), cov(x))
+        assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
+        assert_almost_equal(np.cov(x, rowvar=False, bias=True),
+                            cov(x, rowvar=False, bias=True))
+
+    def test_2d_without_missing(self):
+        # Test cov on 1 2D variable w/o missing values
+        x = self.data.reshape(3, 4)
+        assert_almost_equal(np.cov(x), cov(x))
+        assert_almost_equal(np.cov(x, rowvar=False), cov(x, rowvar=False))
+        assert_almost_equal(np.cov(x, rowvar=False, bias=True),
+                            cov(x, rowvar=False, bias=True))
+
+    def test_1d_with_missing(self):
+        # Test cov 1 1D variable w/missing values
+        x = self.data
+        x[-1] = masked
+        x -= x.mean()
+        nx = x.compressed()
+        assert_almost_equal(np.cov(nx), cov(x))
+        assert_almost_equal(np.cov(nx, rowvar=False), cov(x, rowvar=False))
+        assert_almost_equal(np.cov(nx, rowvar=False, bias=True),
+                            cov(x, rowvar=False, bias=True))
+        #
+        try:
+            cov(x, allow_masked=False)
+        except ValueError:
+            pass
+        #
+        # 2 1D variables w/ missing values
+        nx = x[1:-1]
+        assert_almost_equal(np.cov(nx, nx[::-1]), cov(x, x[::-1]))
+        assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False),
+                            cov(x, x[::-1], rowvar=False))
+        assert_almost_equal(np.cov(nx, nx[::-1], rowvar=False, bias=True),
+                            cov(x, x[::-1], rowvar=False, bias=True))
+
+    def test_2d_with_missing(self):
+        # Test cov on 2D variable w/ missing value
+        x = self.data
+        x[-1] = masked
+        x = x.reshape(3, 4)
+        valid = np.logical_not(getmaskarray(x)).astype(int)
+        frac = np.dot(valid, valid.T)
+        xf = (x - x.mean(1)[:, None]).filled(0)
+        assert_almost_equal(cov(x),
+                            np.cov(xf) * (x.shape[1] - 1) / (frac - 1.))
+        assert_almost_equal(cov(x, bias=True),
+                            np.cov(xf, bias=True) * x.shape[1] / frac)
+        frac = np.dot(valid.T, valid)
+        xf = (x - x.mean(0)).filled(0)
+        assert_almost_equal(cov(x, rowvar=False),
+                            (np.cov(xf, rowvar=False) *
+                             (x.shape[0] - 1) / (frac - 1.)))
+        assert_almost_equal(cov(x, rowvar=False, bias=True),
+                            (np.cov(xf, rowvar=False, bias=True) *
+                             x.shape[0] / frac))
+
+
+class TestCorrcoef:
+
+    def setup_method(self):
+        self.data = array(np.random.rand(12))
+        self.data2 = array(np.random.rand(12))
+
+    def test_ddof(self):
+        # ddof raises DeprecationWarning
+        x, y = self.data, self.data2
+        expected = np.corrcoef(x)
+        expected2 = np.corrcoef(x, y)
+        with suppress_warnings() as sup:
+            warnings.simplefilter("always")
+            assert_warns(DeprecationWarning, corrcoef, x, ddof=-1)
+            sup.filter(DeprecationWarning, "bias and ddof have no effect")
+            # ddof has no or negligible effect on the function
+            assert_almost_equal(np.corrcoef(x, ddof=0), corrcoef(x, ddof=0))
+            assert_almost_equal(corrcoef(x, ddof=-1), expected)
+            assert_almost_equal(corrcoef(x, y, ddof=-1), expected2)
+            assert_almost_equal(corrcoef(x, ddof=3), expected)
+            assert_almost_equal(corrcoef(x, y, ddof=3), expected2)
+
+    def test_bias(self):
+        x, y = self.data, self.data2
+        expected = np.corrcoef(x)
+        # bias raises DeprecationWarning
+        with suppress_warnings() as sup:
+            warnings.simplefilter("always")
+            assert_warns(DeprecationWarning, corrcoef, x, y, True, False)
+            assert_warns(DeprecationWarning, corrcoef, x, y, True, True)
+            assert_warns(DeprecationWarning, corrcoef, x, bias=False)
+            sup.filter(DeprecationWarning, "bias and ddof have no effect")
+            # bias has no or negligible effect on the function
+            assert_almost_equal(corrcoef(x, bias=1), expected)
+
+    def test_1d_without_missing(self):
+        # Test cov on 1D variable w/o missing values
+        x = self.data
+        assert_almost_equal(np.corrcoef(x), corrcoef(x))
+        assert_almost_equal(np.corrcoef(x, rowvar=False),
+                            corrcoef(x, rowvar=False))
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "bias and ddof have no effect")
+            assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
+                                corrcoef(x, rowvar=False, bias=True))
+
+    def test_2d_without_missing(self):
+        # Test corrcoef on 1 2D variable w/o missing values
+        x = self.data.reshape(3, 4)
+        assert_almost_equal(np.corrcoef(x), corrcoef(x))
+        assert_almost_equal(np.corrcoef(x, rowvar=False),
+                            corrcoef(x, rowvar=False))
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "bias and ddof have no effect")
+            assert_almost_equal(np.corrcoef(x, rowvar=False, bias=True),
+                                corrcoef(x, rowvar=False, bias=True))
+
+    def test_1d_with_missing(self):
+        # Test corrcoef 1 1D variable w/missing values
+        x = self.data
+        x[-1] = masked
+        x -= x.mean()
+        nx = x.compressed()
+        assert_almost_equal(np.corrcoef(nx), corrcoef(x))
+        assert_almost_equal(np.corrcoef(nx, rowvar=False),
+                            corrcoef(x, rowvar=False))
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "bias and ddof have no effect")
+            assert_almost_equal(np.corrcoef(nx, rowvar=False, bias=True),
+                                corrcoef(x, rowvar=False, bias=True))
+        try:
+            corrcoef(x, allow_masked=False)
+        except ValueError:
+            pass
+        # 2 1D variables w/ missing values
+        nx = x[1:-1]
+        assert_almost_equal(np.corrcoef(nx, nx[::-1]), corrcoef(x, x[::-1]))
+        assert_almost_equal(np.corrcoef(nx, nx[::-1], rowvar=False),
+                            corrcoef(x, x[::-1], rowvar=False))
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "bias and ddof have no effect")
+            # ddof and bias have no or negligible effect on the function
+            assert_almost_equal(np.corrcoef(nx, nx[::-1]),
+                                corrcoef(x, x[::-1], bias=1))
+            assert_almost_equal(np.corrcoef(nx, nx[::-1]),
+                                corrcoef(x, x[::-1], ddof=2))
+
+    def test_2d_with_missing(self):
+        # Test corrcoef on 2D variable w/ missing value
+        x = self.data
+        x[-1] = masked
+        x = x.reshape(3, 4)
+
+        test = corrcoef(x)
+        control = np.corrcoef(x)
+        assert_almost_equal(test[:-1, :-1], control[:-1, :-1])
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "bias and ddof have no effect")
+            # ddof and bias have no or negligible effect on the function
+            assert_almost_equal(corrcoef(x, ddof=-2)[:-1, :-1],
+                                control[:-1, :-1])
+            assert_almost_equal(corrcoef(x, ddof=3)[:-1, :-1],
+                                control[:-1, :-1])
+            assert_almost_equal(corrcoef(x, bias=1)[:-1, :-1],
+                                control[:-1, :-1])
+
+
+class TestPolynomial:
+    #
+    def test_polyfit(self):
+        # Tests polyfit
+        # On ndarrays
+        x = np.random.rand(10)
+        y = np.random.rand(20).reshape(-1, 2)
+        assert_almost_equal(polyfit(x, y, 3), np.polyfit(x, y, 3))
+        # ON 1D maskedarrays
+        x = x.view(MaskedArray)
+        x[0] = masked
+        y = y.view(MaskedArray)
+        y[0, 0] = y[-1, -1] = masked
+        #
+        (C, R, K, S, D) = polyfit(x, y[:, 0], 3, full=True)
+        (c, r, k, s, d) = np.polyfit(x[1:], y[1:, 0].compressed(), 3,
+                                     full=True)
+        for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+            assert_almost_equal(a, a_)
+        #
+        (C, R, K, S, D) = polyfit(x, y[:, -1], 3, full=True)
+        (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1, -1], 3, full=True)
+        for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+            assert_almost_equal(a, a_)
+        #
+        (C, R, K, S, D) = polyfit(x, y, 3, full=True)
+        (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
+        for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+            assert_almost_equal(a, a_)
+        #
+        w = np.random.rand(10) + 1
+        wo = w.copy()
+        xs = x[1:-1]
+        ys = y[1:-1]
+        ws = w[1:-1]
+        (C, R, K, S, D) = polyfit(x, y, 3, full=True, w=w)
+        (c, r, k, s, d) = np.polyfit(xs, ys, 3, full=True, w=ws)
+        assert_equal(w, wo)
+        for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+            assert_almost_equal(a, a_)
+
+    def test_polyfit_with_masked_NaNs(self):
+        x = np.random.rand(10)
+        y = np.random.rand(20).reshape(-1, 2)
+
+        x[0] = np.nan
+        y[-1,-1] = np.nan
+        x = x.view(MaskedArray)
+        y = y.view(MaskedArray)
+        x[0] = masked
+        y[-1,-1] = masked
+
+        (C, R, K, S, D) = polyfit(x, y, 3, full=True)
+        (c, r, k, s, d) = np.polyfit(x[1:-1], y[1:-1,:], 3, full=True)
+        for (a, a_) in zip((C, R, K, S, D), (c, r, k, s, d)):
+            assert_almost_equal(a, a_)
+
+
+class TestArraySetOps:
+
+    def test_unique_onlist(self):
+        # Test unique on list
+        data = [1, 1, 1, 2, 2, 3]
+        test = unique(data, return_index=True, return_inverse=True)
+        assert_(isinstance(test[0], MaskedArray))
+        assert_equal(test[0], masked_array([1, 2, 3], mask=[0, 0, 0]))
+        assert_equal(test[1], [0, 3, 5])
+        assert_equal(test[2], [0, 0, 0, 1, 1, 2])
+
+    def test_unique_onmaskedarray(self):
+        # Test unique on masked data w/use_mask=True
+        data = masked_array([1, 1, 1, 2, 2, 3], mask=[0, 0, 1, 0, 1, 0])
+        test = unique(data, return_index=True, return_inverse=True)
+        assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
+        assert_equal(test[1], [0, 3, 5, 2])
+        assert_equal(test[2], [0, 0, 3, 1, 3, 2])
+        #
+        data.fill_value = 3
+        data = masked_array(data=[1, 1, 1, 2, 2, 3],
+                            mask=[0, 0, 1, 0, 1, 0], fill_value=3)
+        test = unique(data, return_index=True, return_inverse=True)
+        assert_equal(test[0], masked_array([1, 2, 3, -1], mask=[0, 0, 0, 1]))
+        assert_equal(test[1], [0, 3, 5, 2])
+        assert_equal(test[2], [0, 0, 3, 1, 3, 2])
+
+    def test_unique_allmasked(self):
+        # Test all masked
+        data = masked_array([1, 1, 1], mask=True)
+        test = unique(data, return_index=True, return_inverse=True)
+        assert_equal(test[0], masked_array([1, ], mask=[True]))
+        assert_equal(test[1], [0])
+        assert_equal(test[2], [0, 0, 0])
+        #
+        # Test masked
+        data = masked
+        test = unique(data, return_index=True, return_inverse=True)
+        assert_equal(test[0], masked_array(masked))
+        assert_equal(test[1], [0])
+        assert_equal(test[2], [0])
+
+    def test_ediff1d(self):
+        # Tests mediff1d
+        x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
+        control = array([1, 1, 1, 4], mask=[1, 0, 0, 1])
+        test = ediff1d(x)
+        assert_equal(test, control)
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+
+    def test_ediff1d_tobegin(self):
+        # Test ediff1d w/ to_begin
+        x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
+        test = ediff1d(x, to_begin=masked)
+        control = array([0, 1, 1, 1, 4], mask=[1, 1, 0, 0, 1])
+        assert_equal(test, control)
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+        #
+        test = ediff1d(x, to_begin=[1, 2, 3])
+        control = array([1, 2, 3, 1, 1, 1, 4], mask=[0, 0, 0, 1, 0, 0, 1])
+        assert_equal(test, control)
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+
+    def test_ediff1d_toend(self):
+        # Test ediff1d w/ to_end
+        x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
+        test = ediff1d(x, to_end=masked)
+        control = array([1, 1, 1, 4, 0], mask=[1, 0, 0, 1, 1])
+        assert_equal(test, control)
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+        #
+        test = ediff1d(x, to_end=[1, 2, 3])
+        control = array([1, 1, 1, 4, 1, 2, 3], mask=[1, 0, 0, 1, 0, 0, 0])
+        assert_equal(test, control)
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+
+    def test_ediff1d_tobegin_toend(self):
+        # Test ediff1d w/ to_begin and to_end
+        x = masked_array(np.arange(5), mask=[1, 0, 0, 0, 1])
+        test = ediff1d(x, to_end=masked, to_begin=masked)
+        control = array([0, 1, 1, 1, 4, 0], mask=[1, 1, 0, 0, 1, 1])
+        assert_equal(test, control)
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+        #
+        test = ediff1d(x, to_end=[1, 2, 3], to_begin=masked)
+        control = array([0, 1, 1, 1, 4, 1, 2, 3],
+                        mask=[1, 1, 0, 0, 1, 0, 0, 0])
+        assert_equal(test, control)
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+
+    def test_ediff1d_ndarray(self):
+        # Test ediff1d w/ a ndarray
+        x = np.arange(5)
+        test = ediff1d(x)
+        control = array([1, 1, 1, 1], mask=[0, 0, 0, 0])
+        assert_equal(test, control)
+        assert_(isinstance(test, MaskedArray))
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+        #
+        test = ediff1d(x, to_end=masked, to_begin=masked)
+        control = array([0, 1, 1, 1, 1, 0], mask=[1, 0, 0, 0, 0, 1])
+        assert_(isinstance(test, MaskedArray))
+        assert_equal(test.filled(0), control.filled(0))
+        assert_equal(test.mask, control.mask)
+
+    def test_intersect1d(self):
+        # Test intersect1d
+        x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
+        y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
+        test = intersect1d(x, y)
+        control = array([1, 3, -1], mask=[0, 0, 1])
+        assert_equal(test, control)
+
+    def test_setxor1d(self):
+        # Test setxor1d
+        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+        test = setxor1d(a, b)
+        assert_equal(test, array([3, 4, 7]))
+        #
+        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+        b = [1, 2, 3, 4, 5]
+        test = setxor1d(a, b)
+        assert_equal(test, array([3, 4, 7, -1], mask=[0, 0, 0, 1]))
+        #
+        a = array([1, 2, 3])
+        b = array([6, 5, 4])
+        test = setxor1d(a, b)
+        assert_(isinstance(test, MaskedArray))
+        assert_equal(test, [1, 2, 3, 4, 5, 6])
+        #
+        a = array([1, 8, 2, 3], mask=[0, 1, 0, 0])
+        b = array([6, 5, 4, 8], mask=[0, 0, 0, 1])
+        test = setxor1d(a, b)
+        assert_(isinstance(test, MaskedArray))
+        assert_equal(test, [1, 2, 3, 4, 5, 6])
+        #
+        assert_array_equal([], setxor1d([], []))
+
+    def test_isin(self):
+        # the tests for in1d cover most of isin's behavior
+        # if in1d is removed, would need to change those tests to test
+        # isin instead.
+        a = np.arange(24).reshape([2, 3, 4])
+        mask = np.zeros([2, 3, 4])
+        mask[1, 2, 0] = 1
+        a = array(a, mask=mask)
+        b = array(data=[0, 10, 20, 30,  1,  3, 11, 22, 33],
+                  mask=[0,  1,  0,  1,  0,  1,  0,  1,  0])
+        ec = zeros((2, 3, 4), dtype=bool)
+        ec[0, 0, 0] = True
+        ec[0, 0, 1] = True
+        ec[0, 2, 3] = True
+        c = isin(a, b)
+        assert_(isinstance(c, MaskedArray))
+        assert_array_equal(c, ec)
+        #compare results of np.isin to ma.isin
+        d = np.isin(a, b[~b.mask]) & ~a.mask
+        assert_array_equal(c, d)
+
+    def test_in1d(self):
+        # Test in1d
+        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+        test = in1d(a, b)
+        assert_equal(test, [True, True, True, False, True])
+        #
+        a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
+        b = array([1, 5, -1], mask=[0, 0, 1])
+        test = in1d(a, b)
+        assert_equal(test, [True, True, False, True, True])
+        #
+        assert_array_equal([], in1d([], []))
+
+    def test_in1d_invert(self):
+        # Test in1d's invert parameter
+        a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1])
+        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+        assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
+
+        a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1])
+        b = array([1, 5, -1], mask=[0, 0, 1])
+        assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
+
+        assert_array_equal([], in1d([], [], invert=True))
+
+    def test_union1d(self):
+        # Test union1d
+        a = array([1, 2, 5, 7, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+        b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1])
+        test = union1d(a, b)
+        control = array([1, 2, 3, 4, 5, 7, -1], mask=[0, 0, 0, 0, 0, 0, 1])
+        assert_equal(test, control)
+
+        # Tests gh-10340, arguments to union1d should be
+        # flattened if they are not already 1D
+        x = array([[0, 1, 2], [3, 4, 5]], mask=[[0, 0, 0], [0, 0, 1]])
+        y = array([0, 1, 2, 3, 4], mask=[0, 0, 0, 0, 1])
+        ez = array([0, 1, 2, 3, 4, 5], mask=[0, 0, 0, 0, 0, 1])
+        z = union1d(x, y)
+        assert_equal(z, ez)
+        #
+        assert_array_equal([], union1d([], []))
+
+    def test_setdiff1d(self):
+        # Test setdiff1d
+        a = array([6, 5, 4, 7, 7, 1, 2, 1], mask=[0, 0, 0, 0, 0, 0, 0, 1])
+        b = array([2, 4, 3, 3, 2, 1, 5])
+        test = setdiff1d(a, b)
+        assert_equal(test, array([6, 7, -1], mask=[0, 0, 1]))
+        #
+        a = arange(10)
+        b = arange(8)
+        assert_equal(setdiff1d(a, b), array([8, 9]))
+        a = array([], np.uint32, mask=[])
+        assert_equal(setdiff1d(a, []).dtype, np.uint32)
+
+    def test_setdiff1d_char_array(self):
+        # Test setdiff1d_charray
+        a = np.array(['a', 'b', 'c'])
+        b = np.array(['a', 'b', 's'])
+        assert_array_equal(setdiff1d(a, b), np.array(['c']))
+
+
+class TestShapeBase:
+
+    def test_atleast_2d(self):
+        # Test atleast_2d
+        a = masked_array([0, 1, 2], mask=[0, 1, 0])
+        b = atleast_2d(a)
+        assert_equal(b.shape, (1, 3))
+        assert_equal(b.mask.shape, b.data.shape)
+        assert_equal(a.shape, (3,))
+        assert_equal(a.mask.shape, a.data.shape)
+        assert_equal(b.mask.shape, b.data.shape)
+
+    def test_shape_scalar(self):
+        # the atleast and diagflat function should work with scalars
+        # GitHub issue #3367
+        # Additionally, the atleast functions should accept multiple scalars
+        # correctly
+        b = atleast_1d(1.0)
+        assert_equal(b.shape, (1,))
+        assert_equal(b.mask.shape, b.shape)
+        assert_equal(b.data.shape, b.shape)
+
+        b = atleast_1d(1.0, 2.0)
+        for a in b:
+            assert_equal(a.shape, (1,))
+            assert_equal(a.mask.shape, a.shape)
+            assert_equal(a.data.shape, a.shape)
+
+        b = atleast_2d(1.0)
+        assert_equal(b.shape, (1, 1))
+        assert_equal(b.mask.shape, b.shape)
+        assert_equal(b.data.shape, b.shape)
+
+        b = atleast_2d(1.0, 2.0)
+        for a in b:
+            assert_equal(a.shape, (1, 1))
+            assert_equal(a.mask.shape, a.shape)
+            assert_equal(a.data.shape, a.shape)
+
+        b = atleast_3d(1.0)
+        assert_equal(b.shape, (1, 1, 1))
+        assert_equal(b.mask.shape, b.shape)
+        assert_equal(b.data.shape, b.shape)
+
+        b = atleast_3d(1.0, 2.0)
+        for a in b:
+            assert_equal(a.shape, (1, 1, 1))
+            assert_equal(a.mask.shape, a.shape)
+            assert_equal(a.data.shape, a.shape)
+
+        b = diagflat(1.0)
+        assert_equal(b.shape, (1, 1))
+        assert_equal(b.mask.shape, b.data.shape)
+
+
+class TestNDEnumerate:
+
+    def test_ndenumerate_nomasked(self):
+        ordinary = np.arange(6.).reshape((1, 3, 2))
+        empty_mask = np.zeros_like(ordinary, dtype=bool)
+        with_mask = masked_array(ordinary, mask=empty_mask)
+        assert_equal(list(np.ndenumerate(ordinary)),
+                     list(ndenumerate(ordinary)))
+        assert_equal(list(ndenumerate(ordinary)),
+                     list(ndenumerate(with_mask)))
+        assert_equal(list(ndenumerate(with_mask)),
+                     list(ndenumerate(with_mask, compressed=False)))
+
+    def test_ndenumerate_allmasked(self):
+        a = masked_all(())
+        b = masked_all((100,))
+        c = masked_all((2, 3, 4))
+        assert_equal(list(ndenumerate(a)), [])
+        assert_equal(list(ndenumerate(b)), [])
+        assert_equal(list(ndenumerate(b, compressed=False)),
+                     list(zip(np.ndindex((100,)), 100 * [masked])))
+        assert_equal(list(ndenumerate(c)), [])
+        assert_equal(list(ndenumerate(c, compressed=False)),
+                     list(zip(np.ndindex((2, 3, 4)), 2 * 3 * 4 * [masked])))
+
+    def test_ndenumerate_mixedmasked(self):
+        a = masked_array(np.arange(12).reshape((3, 4)),
+                         mask=[[1, 1, 1, 1],
+                               [1, 1, 0, 1],
+                               [0, 0, 0, 0]])
+        items = [((1, 2), 6),
+                 ((2, 0), 8), ((2, 1), 9), ((2, 2), 10), ((2, 3), 11)]
+        assert_equal(list(ndenumerate(a)), items)
+        assert_equal(len(list(ndenumerate(a, compressed=False))), a.size)
+        for coordinate, value in ndenumerate(a, compressed=False):
+            assert_equal(a[coordinate], value)
+
+
+class TestStack:
+
+    def test_stack_1d(self):
+        a = masked_array([0, 1, 2], mask=[0, 1, 0])
+        b = masked_array([9, 8, 7], mask=[1, 0, 0])
+
+        c = stack([a, b], axis=0)
+        assert_equal(c.shape, (2, 3))
+        assert_array_equal(a.mask, c[0].mask)
+        assert_array_equal(b.mask, c[1].mask)
+
+        d = vstack([a, b])
+        assert_array_equal(c.data, d.data)
+        assert_array_equal(c.mask, d.mask)
+
+        c = stack([a, b], axis=1)
+        assert_equal(c.shape, (3, 2))
+        assert_array_equal(a.mask, c[:, 0].mask)
+        assert_array_equal(b.mask, c[:, 1].mask)
+
+    def test_stack_masks(self):
+        a = masked_array([0, 1, 2], mask=True)
+        b = masked_array([9, 8, 7], mask=False)
+
+        c = stack([a, b], axis=0)
+        assert_equal(c.shape, (2, 3))
+        assert_array_equal(a.mask, c[0].mask)
+        assert_array_equal(b.mask, c[1].mask)
+
+        d = vstack([a, b])
+        assert_array_equal(c.data, d.data)
+        assert_array_equal(c.mask, d.mask)
+
+        c = stack([a, b], axis=1)
+        assert_equal(c.shape, (3, 2))
+        assert_array_equal(a.mask, c[:, 0].mask)
+        assert_array_equal(b.mask, c[:, 1].mask)
+
+    def test_stack_nd(self):
+        # 2D
+        shp = (3, 2)
+        d1 = np.random.randint(0, 10, shp)
+        d2 = np.random.randint(0, 10, shp)
+        m1 = np.random.randint(0, 2, shp).astype(bool)
+        m2 = np.random.randint(0, 2, shp).astype(bool)
+        a1 = masked_array(d1, mask=m1)
+        a2 = masked_array(d2, mask=m2)
+
+        c = stack([a1, a2], axis=0)
+        c_shp = (2,) + shp
+        assert_equal(c.shape, c_shp)
+        assert_array_equal(a1.mask, c[0].mask)
+        assert_array_equal(a2.mask, c[1].mask)
+
+        c = stack([a1, a2], axis=-1)
+        c_shp = shp + (2,)
+        assert_equal(c.shape, c_shp)
+        assert_array_equal(a1.mask, c[..., 0].mask)
+        assert_array_equal(a2.mask, c[..., 1].mask)
+
+        # 4D
+        shp = (3, 2, 4, 5,)
+        d1 = np.random.randint(0, 10, shp)
+        d2 = np.random.randint(0, 10, shp)
+        m1 = np.random.randint(0, 2, shp).astype(bool)
+        m2 = np.random.randint(0, 2, shp).astype(bool)
+        a1 = masked_array(d1, mask=m1)
+        a2 = masked_array(d2, mask=m2)
+
+        c = stack([a1, a2], axis=0)
+        c_shp = (2,) + shp
+        assert_equal(c.shape, c_shp)
+        assert_array_equal(a1.mask, c[0].mask)
+        assert_array_equal(a2.mask, c[1].mask)
+
+        c = stack([a1, a2], axis=-1)
+        c_shp = shp + (2,)
+        assert_equal(c.shape, c_shp)
+        assert_array_equal(a1.mask, c[..., 0].mask)
+        assert_array_equal(a2.mask, c[..., 1].mask)
diff --git a/.venv/lib/python3.11/site-packages/numpy/ma/tests/test_regression.py b/.venv/lib/python3.11/site-packages/numpy/ma/tests/test_regression.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4f32cc7a98b4567076c9ccb340f6063321ba607
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/ma/tests/test_regression.py
@@ -0,0 +1,97 @@
+import numpy as np
+from numpy.testing import (
+    assert_, assert_array_equal, assert_allclose, suppress_warnings
+    )
+
+
+class TestRegression:
+    def test_masked_array_create(self):
+        # Ticket #17
+        x = np.ma.masked_array([0, 1, 2, 3, 0, 4, 5, 6],
+                               mask=[0, 0, 0, 1, 1, 1, 0, 0])
+        assert_array_equal(np.ma.nonzero(x), [[1, 2, 6, 7]])
+
+    def test_masked_array(self):
+        # Ticket #61
+        np.ma.array(1, mask=[1])
+
+    def test_mem_masked_where(self):
+        # Ticket #62
+        from numpy.ma import masked_where, MaskType
+        a = np.zeros((1, 1))
+        b = np.zeros(a.shape, MaskType)
+        c = masked_where(b, a)
+        a-c
+
+    def test_masked_array_multiply(self):
+        # Ticket #254
+        a = np.ma.zeros((4, 1))
+        a[2, 0] = np.ma.masked
+        b = np.zeros((4, 2))
+        a*b
+        b*a
+
+    def test_masked_array_repeat(self):
+        # Ticket #271
+        np.ma.array([1], mask=False).repeat(10)
+
+    def test_masked_array_repr_unicode(self):
+        # Ticket #1256
+        repr(np.ma.array("Unicode"))
+
+    def test_atleast_2d(self):
+        # Ticket #1559
+        a = np.ma.masked_array([0.0, 1.2, 3.5], mask=[False, True, False])
+        b = np.atleast_2d(a)
+        assert_(a.mask.ndim == 1)
+        assert_(b.mask.ndim == 2)
+
+    def test_set_fill_value_unicode_py3(self):
+        # Ticket #2733
+        a = np.ma.masked_array(['a', 'b', 'c'], mask=[1, 0, 0])
+        a.fill_value = 'X'
+        assert_(a.fill_value == 'X')
+
+    def test_var_sets_maskedarray_scalar(self):
+        # Issue gh-2757
+        a = np.ma.array(np.arange(5), mask=True)
+        mout = np.ma.array(-1, dtype=float)
+        a.var(out=mout)
+        assert_(mout._data == 0)
+
+    def test_ddof_corrcoef(self):
+        # See gh-3336
+        x = np.ma.masked_equal([1, 2, 3, 4, 5], 4)
+        y = np.array([2, 2.5, 3.1, 3, 5])
+        # this test can be removed after deprecation.
+        with suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "bias and ddof have no effect")
+            r0 = np.ma.corrcoef(x, y, ddof=0)
+            r1 = np.ma.corrcoef(x, y, ddof=1)
+            # ddof should not have an effect (it gets cancelled out)
+            assert_allclose(r0.data, r1.data)
+
+    def test_mask_not_backmangled(self):
+        # See gh-10314.  Test case taken from gh-3140.
+        a = np.ma.MaskedArray([1., 2.], mask=[False, False])
+        assert_(a.mask.shape == (2,))
+        b = np.tile(a, (2, 1))
+        # Check that the above no longer changes a.shape to (1, 2)
+        assert_(a.mask.shape == (2,))
+        assert_(b.shape == (2, 2))
+        assert_(b.mask.shape == (2, 2))
+
+    def test_empty_list_on_structured(self):
+        # See gh-12464. Indexing with empty list should give empty result.
+        ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4')
+        assert_array_equal(ma[[]], ma[:0])
+
+    def test_masked_array_tobytes_fortran(self):
+        ma = np.ma.arange(4).reshape((2,2))
+        assert_array_equal(ma.tobytes(order='F'), ma.T.tobytes())
+
+    def test_structured_array(self):
+        # see gh-22041
+        np.ma.array((1, (b"", b"")),
+                    dtype=[("x", np.int_),
+                          ("y", [("i", np.void), ("j", np.void)])])
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/__init__.py b/.venv/lib/python3.11/site-packages/numpy/testing/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a34221e4dde5f8a1eeab7446193344915467769
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/__init__.py
@@ -0,0 +1,22 @@
+"""Common test support for all numpy test scripts.
+
+This single module should provide all the common functionality for numpy tests
+in a single location, so that test scripts can just import it and work right
+away.
+
+"""
+from unittest import TestCase
+
+from . import _private
+from ._private.utils import *
+from ._private.utils import (_assert_valid_refcount, _gen_alignment_data)
+from ._private import extbuild
+from . import overrides
+
+__all__ = (
+    _private.utils.__all__ + ['TestCase', 'overrides']
+)
+
+from numpy._pytesttester import PytestTester
+test = PytestTester(__name__)
+del PytestTester
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/__init__.pyi b/.venv/lib/python3.11/site-packages/numpy/testing/__init__.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..d65860ccb044c7c01ade91327f25a0e94e4c9b32
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/__init__.pyi
@@ -0,0 +1,50 @@
+from numpy._pytesttester import PytestTester
+
+from unittest import (
+    TestCase as TestCase,
+)
+
+from numpy.testing._private.utils import (
+    assert_equal as assert_equal,
+    assert_almost_equal as assert_almost_equal,
+    assert_approx_equal as assert_approx_equal,
+    assert_array_equal as assert_array_equal,
+    assert_array_less as assert_array_less,
+    assert_string_equal as assert_string_equal,
+    assert_array_almost_equal as assert_array_almost_equal,
+    assert_raises as assert_raises,
+    build_err_msg as build_err_msg,
+    decorate_methods as decorate_methods,
+    jiffies as jiffies,
+    memusage as memusage,
+    print_assert_equal as print_assert_equal,
+    rundocs as rundocs,
+    runstring as runstring,
+    verbose as verbose,
+    measure as measure,
+    assert_ as assert_,
+    assert_array_almost_equal_nulp as assert_array_almost_equal_nulp,
+    assert_raises_regex as assert_raises_regex,
+    assert_array_max_ulp as assert_array_max_ulp,
+    assert_warns as assert_warns,
+    assert_no_warnings as assert_no_warnings,
+    assert_allclose as assert_allclose,
+    IgnoreException as IgnoreException,
+    clear_and_catch_warnings as clear_and_catch_warnings,
+    SkipTest as SkipTest,
+    KnownFailureException as KnownFailureException,
+    temppath as temppath,
+    tempdir as tempdir,
+    IS_PYPY as IS_PYPY,
+    IS_PYSTON as IS_PYSTON,
+    HAS_REFCOUNT as HAS_REFCOUNT,
+    suppress_warnings as suppress_warnings,
+    assert_array_compare as assert_array_compare,
+    assert_no_gc_cycles as assert_no_gc_cycles,
+    break_cycles as break_cycles,
+    HAS_LAPACK64 as HAS_LAPACK64,
+)
+
+__all__: list[str]
+__path__: list[str]
+test: PytestTester
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..11f9341d78e30f891d9ec7c38ae686211a49fe31
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/__init__.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/overrides.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/overrides.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19cb4344afcdc295abef90d9fb06a54a8b2fa948
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/overrides.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff111ef8f5425c34225e3f88f5c6d8427ebce5f4
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/setup.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/setup.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5a619d08c7a63510c6be7b45aab93c3cbf7aa01b
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/testing/__pycache__/setup.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/_private/__init__.py b/.venv/lib/python3.11/site-packages/numpy/testing/_private/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d2ee12e66fdfb7274035fd5285a6e46482cc1c17
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0220a48dc48fe7e10e8b212a4b10a6a53ef24183
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/_private/extbuild.py b/.venv/lib/python3.11/site-packages/numpy/testing/_private/extbuild.py
new file mode 100644
index 0000000000000000000000000000000000000000..541f551151f54b4bb649f403404325d2dd79cd7f
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/_private/extbuild.py
@@ -0,0 +1,248 @@
+"""
+Build a c-extension module on-the-fly in tests.
+See build_and_import_extensions for usage hints
+
+"""
+
+import os
+import pathlib
+import subprocess
+import sys
+import sysconfig
+import textwrap
+
+__all__ = ['build_and_import_extension', 'compile_extension_module']
+
+
+def build_and_import_extension(
+        modname, functions, *, prologue="", build_dir=None,
+        include_dirs=[], more_init=""):
+    """
+    Build and imports a c-extension module `modname` from a list of function
+    fragments `functions`.
+
+
+    Parameters
+    ----------
+    functions : list of fragments
+        Each fragment is a sequence of func_name, calling convention, snippet.
+    prologue : string
+        Code to precede the rest, usually extra ``#include`` or ``#define``
+        macros.
+    build_dir : pathlib.Path
+        Where to build the module, usually a temporary directory
+    include_dirs : list
+        Extra directories to find include files when compiling
+    more_init : string
+        Code to appear in the module PyMODINIT_FUNC
+
+    Returns
+    -------
+    out: module
+        The module will have been loaded and is ready for use
+
+    Examples
+    --------
+    >>> functions = [("test_bytes", "METH_O", \"\"\"
+        if ( !PyBytesCheck(args)) {
+            Py_RETURN_FALSE;
+        }
+        Py_RETURN_TRUE;
+    \"\"\")]
+    >>> mod = build_and_import_extension("testme", functions)
+    >>> assert not mod.test_bytes(u'abc')
+    >>> assert mod.test_bytes(b'abc')
+    """
+    body = prologue + _make_methods(functions, modname)
+    init = """PyObject *mod = PyModule_Create(&moduledef);
+           """
+    if not build_dir:
+        build_dir = pathlib.Path('.')
+    if more_init:
+        init += """#define INITERROR return NULL
+                """
+        init += more_init
+    init += "\nreturn mod;"
+    source_string = _make_source(modname, init, body)
+    try:
+        mod_so = compile_extension_module(
+            modname, build_dir, include_dirs, source_string)
+    except Exception as e:
+        # shorten the exception chain
+        raise RuntimeError(f"could not compile in {build_dir}:") from e
+    import importlib.util
+    spec = importlib.util.spec_from_file_location(modname, mod_so)
+    foo = importlib.util.module_from_spec(spec)
+    spec.loader.exec_module(foo)
+    return foo
+
+
+def compile_extension_module(
+        name, builddir, include_dirs,
+        source_string, libraries=[], library_dirs=[]):
+    """
+    Build an extension module and return the filename of the resulting
+    native code file.
+
+    Parameters
+    ----------
+    name : string
+        name of the module, possibly including dots if it is a module inside a
+        package.
+    builddir : pathlib.Path
+        Where to build the module, usually a temporary directory
+    include_dirs : list
+        Extra directories to find include files when compiling
+    libraries : list
+        Libraries to link into the extension module
+    library_dirs: list
+        Where to find the libraries, ``-L`` passed to the linker
+    """
+    modname = name.split('.')[-1]
+    dirname = builddir / name
+    dirname.mkdir(exist_ok=True)
+    cfile = _convert_str_to_file(source_string, dirname)
+    include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')]
+
+    return _c_compile(
+        cfile, outputfilename=dirname / modname,
+        include_dirs=include_dirs, libraries=[], library_dirs=[],
+        )
+
+
+def _convert_str_to_file(source, dirname):
+    """Helper function to create a file ``source.c`` in `dirname` that contains
+    the string in `source`. Returns the file name
+    """
+    filename = dirname / 'source.c'
+    with filename.open('w') as f:
+        f.write(str(source))
+    return filename
+
+
+def _make_methods(functions, modname):
+    """ Turns the name, signature, code in functions into complete functions
+    and lists them in a methods_table. Then turns the methods_table into a
+    ``PyMethodDef`` structure and returns the resulting code fragment ready
+    for compilation
+    """
+    methods_table = []
+    codes = []
+    for funcname, flags, code in functions:
+        cfuncname = "%s_%s" % (modname, funcname)
+        if 'METH_KEYWORDS' in flags:
+            signature = '(PyObject *self, PyObject *args, PyObject *kwargs)'
+        else:
+            signature = '(PyObject *self, PyObject *args)'
+        methods_table.append(
+            "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags))
+        func_code = """
+        static PyObject* {cfuncname}{signature}
+        {{
+        {code}
+        }}
+        """.format(cfuncname=cfuncname, signature=signature, code=code)
+        codes.append(func_code)
+
+    body = "\n".join(codes) + """
+    static PyMethodDef methods[] = {
+    %(methods)s
+    { NULL }
+    };
+    static struct PyModuleDef moduledef = {
+        PyModuleDef_HEAD_INIT,
+        "%(modname)s",  /* m_name */
+        NULL,           /* m_doc */
+        -1,             /* m_size */
+        methods,        /* m_methods */
+    };
+    """ % dict(methods='\n'.join(methods_table), modname=modname)
+    return body
+
+
+def _make_source(name, init, body):
+    """ Combines the code fragments into source code ready to be compiled
+    """
+    code = """
+    #include 
+
+    %(body)s
+
+    PyMODINIT_FUNC
+    PyInit_%(name)s(void) {
+    %(init)s
+    }
+    """ % dict(
+        name=name, init=init, body=body,
+    )
+    return code
+
+
+def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[],
+               library_dirs=[]):
+    if sys.platform == 'win32':
+        compile_extra = ["/we4013"]
+        link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')]
+    elif sys.platform.startswith('linux'):
+        compile_extra = [
+            "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"]
+        link_extra = []
+    else:
+        compile_extra = link_extra = []
+        pass
+    if sys.platform == 'win32':
+        link_extra = link_extra + ['/DEBUG']  # generate .pdb file
+    if sys.platform == 'darwin':
+        # support Fink & Darwinports
+        for s in ('/sw/', '/opt/local/'):
+            if (s + 'include' not in include_dirs
+                    and os.path.exists(s + 'include')):
+                include_dirs.append(s + 'include')
+            if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
+                library_dirs.append(s + 'lib')
+
+    outputfilename = outputfilename.with_suffix(get_so_suffix())
+    build(
+        cfile, outputfilename,
+        compile_extra, link_extra,
+        include_dirs, libraries, library_dirs)
+    return outputfilename
+
+
+def build(cfile, outputfilename, compile_extra, link_extra,
+          include_dirs, libraries, library_dirs):
+    "use meson to build"
+
+    build_dir = cfile.parent / "build"
+    os.makedirs(build_dir, exist_ok=True)
+    so_name = outputfilename.parts[-1]
+    with open(cfile.parent / "meson.build", "wt") as fid:
+        includes = ['-I' + d for d in include_dirs]
+        link_dirs = ['-L' + d for d in library_dirs]
+        fid.write(textwrap.dedent(f"""\
+            project('foo', 'c')
+            shared_module('{so_name}', '{cfile.parts[-1]}',
+                c_args: {includes} + {compile_extra},
+                link_args: {link_dirs} + {link_extra},
+                link_with: {libraries},
+                name_prefix: '',
+                name_suffix: 'dummy',
+            )
+        """))
+    if sys.platform == "win32":
+        subprocess.check_call(["meson", "setup",
+                               "--buildtype=release", 
+                               "--vsenv", ".."],
+                              cwd=build_dir,
+                              )
+    else:
+        subprocess.check_call(["meson", "setup", "--vsenv", ".."],
+                              cwd=build_dir
+                              )
+    subprocess.check_call(["meson", "compile"], cwd=build_dir)
+    os.rename(str(build_dir / so_name) + ".dummy", cfile.parent / so_name)
+        
+def get_so_suffix():
+    ret = sysconfig.get_config_var('EXT_SUFFIX')
+    assert ret
+    return ret
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/_private/utils.py b/.venv/lib/python3.11/site-packages/numpy/testing/_private/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..28dd656c4a4d0aae97560e1858a7fdcc3c3a02b4
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/_private/utils.py
@@ -0,0 +1,2509 @@
+"""
+Utility function to facilitate testing.
+
+"""
+import os
+import sys
+import platform
+import re
+import gc
+import operator
+import warnings
+from functools import partial, wraps
+import shutil
+import contextlib
+from tempfile import mkdtemp, mkstemp
+from unittest.case import SkipTest
+from warnings import WarningMessage
+import pprint
+import sysconfig
+
+import numpy as np
+from numpy.core import (
+     intp, float32, empty, arange, array_repr, ndarray, isnat, array)
+from numpy import isfinite, isnan, isinf
+import numpy.linalg._umath_linalg
+
+from io import StringIO
+
+__all__ = [
+        'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
+        'assert_array_equal', 'assert_array_less', 'assert_string_equal',
+        'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
+        'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
+        'rundocs', 'runstring', 'verbose', 'measure',
+        'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
+        'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
+        'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
+        'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
+        'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare',
+        'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON',
+        '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE'
+        ]
+
+
+class KnownFailureException(Exception):
+    '''Raise this exception to mark a test as a known failing test.'''
+    pass
+
+
+KnownFailureTest = KnownFailureException  # backwards compat
+verbose = 0
+
+IS_WASM = platform.machine() in ["wasm32", "wasm64"]
+IS_PYPY = sys.implementation.name == 'pypy'
+IS_PYSTON = hasattr(sys, "pyston_version_info")
+HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON
+HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64
+
+_OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy'
+
+IS_MUSL = False
+# alternate way is
+# from packaging.tags import sys_tags
+#     _tags = list(sys_tags())
+#     if 'musllinux' in _tags[0].platform:
+_v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
+if 'musl' in _v:
+    IS_MUSL = True
+
+
+def assert_(val, msg=''):
+    """
+    Assert that works in release mode.
+    Accepts callable msg to allow deferring evaluation until failure.
+
+    The Python built-in ``assert`` does not work when executing code in
+    optimized mode (the ``-O`` flag) - no byte-code is generated for it.
+
+    For documentation on usage, refer to the Python documentation.
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    if not val:
+        try:
+            smsg = msg()
+        except TypeError:
+            smsg = msg
+        raise AssertionError(smsg)
+
+
+if os.name == 'nt':
+    # Code "stolen" from enthought/debug/memusage.py
+    def GetPerformanceAttributes(object, counter, instance=None,
+                                 inum=-1, format=None, machine=None):
+        # NOTE: Many counters require 2 samples to give accurate results,
+        # including "% Processor Time" (as by definition, at any instant, a
+        # thread's CPU usage is either 0 or 100).  To read counters like this,
+        # you should copy this function, but keep the counter open, and call
+        # CollectQueryData() each time you need to know.
+        # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link)
+        # My older explanation for this was that the "AddCounter" process
+        # forced the CPU to 100%, but the above makes more sense :)
+        import win32pdh
+        if format is None:
+            format = win32pdh.PDH_FMT_LONG
+        path = win32pdh.MakeCounterPath( (machine, object, instance, None,
+                                          inum, counter))
+        hq = win32pdh.OpenQuery()
+        try:
+            hc = win32pdh.AddCounter(hq, path)
+            try:
+                win32pdh.CollectQueryData(hq)
+                type, val = win32pdh.GetFormattedCounterValue(hc, format)
+                return val
+            finally:
+                win32pdh.RemoveCounter(hc)
+        finally:
+            win32pdh.CloseQuery(hq)
+
+    def memusage(processName="python", instance=0):
+        # from win32pdhutil, part of the win32all package
+        import win32pdh
+        return GetPerformanceAttributes("Process", "Virtual Bytes",
+                                        processName, instance,
+                                        win32pdh.PDH_FMT_LONG, None)
+elif sys.platform[:5] == 'linux':
+
+    def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'):
+        """
+        Return virtual memory size in bytes of the running python.
+
+        """
+        try:
+            with open(_proc_pid_stat) as f:
+                l = f.readline().split(' ')
+            return int(l[22])
+        except Exception:
+            return
+else:
+    def memusage():
+        """
+        Return memory usage of running python. [Not implemented]
+
+        """
+        raise NotImplementedError
+
+
+if sys.platform[:5] == 'linux':
+    def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
+        """
+        Return number of jiffies elapsed.
+
+        Return number of jiffies (1/100ths of a second) that this
+        process has been scheduled in user mode. See man 5 proc.
+
+        """
+        import time
+        if not _load_time:
+            _load_time.append(time.time())
+        try:
+            with open(_proc_pid_stat) as f:
+                l = f.readline().split(' ')
+            return int(l[13])
+        except Exception:
+            return int(100*(time.time()-_load_time[0]))
+else:
+    # os.getpid is not in all platforms available.
+    # Using time is safe but inaccurate, especially when process
+    # was suspended or sleeping.
+    def jiffies(_load_time=[]):
+        """
+        Return number of jiffies elapsed.
+
+        Return number of jiffies (1/100ths of a second) that this
+        process has been scheduled in user mode. See man 5 proc.
+
+        """
+        import time
+        if not _load_time:
+            _load_time.append(time.time())
+        return int(100*(time.time()-_load_time[0]))
+
+
+def build_err_msg(arrays, err_msg, header='Items are not equal:',
+                  verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
+    msg = ['\n' + header]
+    if err_msg:
+        if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
+            msg = [msg[0] + ' ' + err_msg]
+        else:
+            msg.append(err_msg)
+    if verbose:
+        for i, a in enumerate(arrays):
+
+            if isinstance(a, ndarray):
+                # precision argument is only needed if the objects are ndarrays
+                r_func = partial(array_repr, precision=precision)
+            else:
+                r_func = repr
+
+            try:
+                r = r_func(a)
+            except Exception as exc:
+                r = f'[repr failed for <{type(a).__name__}>: {exc}]'
+            if r.count('\n') > 3:
+                r = '\n'.join(r.splitlines()[:3])
+                r += '...'
+            msg.append(f' {names[i]}: {r}')
+    return '\n'.join(msg)
+
+
+def assert_equal(actual, desired, err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two objects are not equal.
+
+    Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
+    check that all elements of these objects are equal. An exception is raised
+    at the first conflicting values.
+
+    When one of `actual` and `desired` is a scalar and the other is array_like,
+    the function checks that each element of the array_like object is equal to
+    the scalar.
+
+    This function handles NaN comparisons as if NaN was a "normal" number.
+    That is, AssertionError is not raised if both objects have NaNs in the same
+    positions.  This is in contrast to the IEEE standard on NaNs, which says
+    that NaN compared to anything must return False.
+
+    Parameters
+    ----------
+    actual : array_like
+        The object to check.
+    desired : array_like
+        The expected object.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+        If actual and desired are not equal.
+
+    Examples
+    --------
+    >>> np.testing.assert_equal([4,5], [4,6])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Items are not equal:
+    item=1
+     ACTUAL: 5
+     DESIRED: 6
+
+    The following comparison does not raise an exception.  There are NaNs
+    in the inputs, but they are in the same positions.
+
+    >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    if isinstance(desired, dict):
+        if not isinstance(actual, dict):
+            raise AssertionError(repr(type(actual)))
+        assert_equal(len(actual), len(desired), err_msg, verbose)
+        for k, i in desired.items():
+            if k not in actual:
+                raise AssertionError(repr(k))
+            assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}',
+                         verbose)
+        return
+    if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
+        assert_equal(len(actual), len(desired), err_msg, verbose)
+        for k in range(len(desired)):
+            assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}',
+                         verbose)
+        return
+    from numpy.core import ndarray, isscalar, signbit
+    from numpy.lib import iscomplexobj, real, imag
+    if isinstance(actual, ndarray) or isinstance(desired, ndarray):
+        return assert_array_equal(actual, desired, err_msg, verbose)
+    msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
+
+    # Handle complex numbers: separate into real/imag to handle
+    # nan/inf/negative zero correctly
+    # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+    try:
+        usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+    except (ValueError, TypeError):
+        usecomplex = False
+
+    if usecomplex:
+        if iscomplexobj(actual):
+            actualr = real(actual)
+            actuali = imag(actual)
+        else:
+            actualr = actual
+            actuali = 0
+        if iscomplexobj(desired):
+            desiredr = real(desired)
+            desiredi = imag(desired)
+        else:
+            desiredr = desired
+            desiredi = 0
+        try:
+            assert_equal(actualr, desiredr)
+            assert_equal(actuali, desiredi)
+        except AssertionError:
+            raise AssertionError(msg)
+
+    # isscalar test to check cases such as [np.nan] != np.nan
+    if isscalar(desired) != isscalar(actual):
+        raise AssertionError(msg)
+
+    try:
+        isdesnat = isnat(desired)
+        isactnat = isnat(actual)
+        dtypes_match = (np.asarray(desired).dtype.type ==
+                        np.asarray(actual).dtype.type)
+        if isdesnat and isactnat:
+            # If both are NaT (and have the same dtype -- datetime or
+            # timedelta) they are considered equal.
+            if dtypes_match:
+                return
+            else:
+                raise AssertionError(msg)
+
+    except (TypeError, ValueError, NotImplementedError):
+        pass
+
+    # Inf/nan/negative zero handling
+    try:
+        isdesnan = isnan(desired)
+        isactnan = isnan(actual)
+        if isdesnan and isactnan:
+            return  # both nan, so equal
+
+        # handle signed zero specially for floats
+        array_actual = np.asarray(actual)
+        array_desired = np.asarray(desired)
+        if (array_actual.dtype.char in 'Mm' or
+                array_desired.dtype.char in 'Mm'):
+            # version 1.18
+            # until this version, isnan failed for datetime64 and timedelta64.
+            # Now it succeeds but comparison to scalar with a different type
+            # emits a DeprecationWarning.
+            # Avoid that by skipping the next check
+            raise NotImplementedError('cannot compare to a scalar '
+                                      'with a different type')
+
+        if desired == 0 and actual == 0:
+            if not signbit(desired) == signbit(actual):
+                raise AssertionError(msg)
+
+    except (TypeError, ValueError, NotImplementedError):
+        pass
+
+    try:
+        # Explicitly use __eq__ for comparison, gh-2552
+        if not (desired == actual):
+            raise AssertionError(msg)
+
+    except (DeprecationWarning, FutureWarning) as e:
+        # this handles the case when the two types are not even comparable
+        if 'elementwise == comparison' in e.args[0]:
+            raise AssertionError(msg)
+        else:
+            raise
+
+
+def print_assert_equal(test_string, actual, desired):
+    """
+    Test if two objects are equal, and print an error message if test fails.
+
+    The test is performed with ``actual == desired``.
+
+    Parameters
+    ----------
+    test_string : str
+        The message supplied to AssertionError.
+    actual : object
+        The object to test for equality against `desired`.
+    desired : object
+        The expected result.
+
+    Examples
+    --------
+    >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
+    >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
+    Traceback (most recent call last):
+    ...
+    AssertionError: Test XYZ of func xyz failed
+    ACTUAL:
+    [0, 1]
+    DESIRED:
+    [0, 2]
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import pprint
+
+    if not (actual == desired):
+        msg = StringIO()
+        msg.write(test_string)
+        msg.write(' failed\nACTUAL: \n')
+        pprint.pprint(actual, msg)
+        msg.write('DESIRED: \n')
+        pprint.pprint(desired, msg)
+        raise AssertionError(msg.getvalue())
+
+
+@np._no_nep50_warning()
+def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two items are not equal up to desired
+    precision.
+
+    .. note:: It is recommended to use one of `assert_allclose`,
+              `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+              instead of this function for more consistent floating point
+              comparisons.
+
+    The test verifies that the elements of `actual` and `desired` satisfy.
+
+        ``abs(desired-actual) < float64(1.5 * 10**(-decimal))``
+
+    That is a looser test than originally documented, but agrees with what the
+    actual implementation in `assert_array_almost_equal` did up to rounding
+    vagaries. An exception is raised at conflicting values. For ndarrays this
+    delegates to assert_array_almost_equal
+
+    Parameters
+    ----------
+    actual : array_like
+        The object to check.
+    desired : array_like
+        The expected object.
+    decimal : int, optional
+        Desired precision, default is 7.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+      If actual and desired are not equal up to specified precision.
+
+    See Also
+    --------
+    assert_allclose: Compare two array_like objects for equality with desired
+                     relative and/or absolute precision.
+    assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+    Examples
+    --------
+    >>> from numpy.testing import assert_almost_equal
+    >>> assert_almost_equal(2.3333333333333, 2.33333334)
+    >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 10 decimals
+     ACTUAL: 2.3333333333333
+     DESIRED: 2.33333334
+
+    >>> assert_almost_equal(np.array([1.0,2.3333333333333]),
+    ...                     np.array([1.0,2.33333334]), decimal=9)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 9 decimals
+    
+    Mismatched elements: 1 / 2 (50%)
+    Max absolute difference: 6.66669964e-09
+    Max relative difference: 2.85715698e-09
+     x: array([1.         , 2.333333333])
+     y: array([1.        , 2.33333334])
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    from numpy.core import ndarray
+    from numpy.lib import iscomplexobj, real, imag
+
+    # Handle complex numbers: separate into real/imag to handle
+    # nan/inf/negative zero correctly
+    # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
+    try:
+        usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
+    except ValueError:
+        usecomplex = False
+
+    def _build_err_msg():
+        header = ('Arrays are not almost equal to %d decimals' % decimal)
+        return build_err_msg([actual, desired], err_msg, verbose=verbose,
+                             header=header)
+
+    if usecomplex:
+        if iscomplexobj(actual):
+            actualr = real(actual)
+            actuali = imag(actual)
+        else:
+            actualr = actual
+            actuali = 0
+        if iscomplexobj(desired):
+            desiredr = real(desired)
+            desiredi = imag(desired)
+        else:
+            desiredr = desired
+            desiredi = 0
+        try:
+            assert_almost_equal(actualr, desiredr, decimal=decimal)
+            assert_almost_equal(actuali, desiredi, decimal=decimal)
+        except AssertionError:
+            raise AssertionError(_build_err_msg())
+
+    if isinstance(actual, (ndarray, tuple, list)) \
+            or isinstance(desired, (ndarray, tuple, list)):
+        return assert_array_almost_equal(actual, desired, decimal, err_msg)
+    try:
+        # If one of desired/actual is not finite, handle it specially here:
+        # check that both are nan if any is a nan, and test for equality
+        # otherwise
+        if not (isfinite(desired) and isfinite(actual)):
+            if isnan(desired) or isnan(actual):
+                if not (isnan(desired) and isnan(actual)):
+                    raise AssertionError(_build_err_msg())
+            else:
+                if not desired == actual:
+                    raise AssertionError(_build_err_msg())
+            return
+    except (NotImplementedError, TypeError):
+        pass
+    if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)):
+        raise AssertionError(_build_err_msg())
+
+
+@np._no_nep50_warning()
+def assert_approx_equal(actual, desired, significant=7, err_msg='',
+                        verbose=True):
+    """
+    Raises an AssertionError if two items are not equal up to significant
+    digits.
+
+    .. note:: It is recommended to use one of `assert_allclose`,
+              `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+              instead of this function for more consistent floating point
+              comparisons.
+
+    Given two numbers, check that they are approximately equal.
+    Approximately equal is defined as the number of significant digits
+    that agree.
+
+    Parameters
+    ----------
+    actual : scalar
+        The object to check.
+    desired : scalar
+        The expected object.
+    significant : int, optional
+        Desired precision, default is 7.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+      If actual and desired are not equal up to specified precision.
+
+    See Also
+    --------
+    assert_allclose: Compare two array_like objects for equality with desired
+                     relative and/or absolute precision.
+    assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+    Examples
+    --------
+    >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
+    >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
+    ...                                significant=8)
+    >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
+    ...                                significant=8)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Items are not equal to 8 significant digits:
+     ACTUAL: 1.234567e-21
+     DESIRED: 1.2345672e-21
+
+    the evaluated condition that raises the exception is
+
+    >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
+    True
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import numpy as np
+
+    (actual, desired) = map(float, (actual, desired))
+    if desired == actual:
+        return
+    # Normalized the numbers to be in range (-10.0,10.0)
+    # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
+    with np.errstate(invalid='ignore'):
+        scale = 0.5*(np.abs(desired) + np.abs(actual))
+        scale = np.power(10, np.floor(np.log10(scale)))
+    try:
+        sc_desired = desired/scale
+    except ZeroDivisionError:
+        sc_desired = 0.0
+    try:
+        sc_actual = actual/scale
+    except ZeroDivisionError:
+        sc_actual = 0.0
+    msg = build_err_msg(
+        [actual, desired], err_msg,
+        header='Items are not equal to %d significant digits:' % significant,
+        verbose=verbose)
+    try:
+        # If one of desired/actual is not finite, handle it specially here:
+        # check that both are nan if any is a nan, and test for equality
+        # otherwise
+        if not (isfinite(desired) and isfinite(actual)):
+            if isnan(desired) or isnan(actual):
+                if not (isnan(desired) and isnan(actual)):
+                    raise AssertionError(msg)
+            else:
+                if not desired == actual:
+                    raise AssertionError(msg)
+            return
+    except (TypeError, NotImplementedError):
+        pass
+    if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
+        raise AssertionError(msg)
+
+
+@np._no_nep50_warning()
+def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
+                         precision=6, equal_nan=True, equal_inf=True,
+                         *, strict=False):
+    __tracebackhide__ = True  # Hide traceback for py.test
+    from numpy.core import (array2string, isnan, inf, bool_, errstate,
+                            all, max, object_)
+
+    x = np.asanyarray(x)
+    y = np.asanyarray(y)
+
+    # original array for output formatting
+    ox, oy = x, y
+
+    def isnumber(x):
+        return x.dtype.char in '?bhilqpBHILQPefdgFDG'
+
+    def istime(x):
+        return x.dtype.char in "Mm"
+
+    def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
+        """Handling nan/inf.
+
+        Combine results of running func on x and y, checking that they are True
+        at the same locations.
+
+        """
+        __tracebackhide__ = True  # Hide traceback for py.test
+
+        x_id = func(x)
+        y_id = func(y)
+        # We include work-arounds here to handle three types of slightly
+        # pathological ndarray subclasses:
+        # (1) all() on `masked` array scalars can return masked arrays, so we
+        #     use != True
+        # (2) __eq__ on some ndarray subclasses returns Python booleans
+        #     instead of element-wise comparisons, so we cast to bool_() and
+        #     use isinstance(..., bool) checks
+        # (3) subclasses with bare-bones __array_function__ implementations may
+        #     not implement np.all(), so favor using the .all() method
+        # We are not committed to supporting such subclasses, but it's nice to
+        # support them if possible.
+        if bool_(x_id == y_id).all() != True:
+            msg = build_err_msg([x, y],
+                                err_msg + '\nx and y %s location mismatch:'
+                                % (hasval), verbose=verbose, header=header,
+                                names=('x', 'y'), precision=precision)
+            raise AssertionError(msg)
+        # If there is a scalar, then here we know the array has the same
+        # flag as it everywhere, so we should return the scalar flag.
+        if isinstance(x_id, bool) or x_id.ndim == 0:
+            return bool_(x_id)
+        elif isinstance(y_id, bool) or y_id.ndim == 0:
+            return bool_(y_id)
+        else:
+            return y_id
+
+    try:
+        if strict:
+            cond = x.shape == y.shape and x.dtype == y.dtype
+        else:
+            cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
+        if not cond:
+            if x.shape != y.shape:
+                reason = f'\n(shapes {x.shape}, {y.shape} mismatch)'
+            else:
+                reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)'
+            msg = build_err_msg([x, y],
+                                err_msg
+                                + reason,
+                                verbose=verbose, header=header,
+                                names=('x', 'y'), precision=precision)
+            raise AssertionError(msg)
+
+        flagged = bool_(False)
+        if isnumber(x) and isnumber(y):
+            if equal_nan:
+                flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
+
+            if equal_inf:
+                flagged |= func_assert_same_pos(x, y,
+                                                func=lambda xy: xy == +inf,
+                                                hasval='+inf')
+                flagged |= func_assert_same_pos(x, y,
+                                                func=lambda xy: xy == -inf,
+                                                hasval='-inf')
+
+        elif istime(x) and istime(y):
+            # If one is datetime64 and the other timedelta64 there is no point
+            if equal_nan and x.dtype.type == y.dtype.type:
+                flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
+
+        if flagged.ndim > 0:
+            x, y = x[~flagged], y[~flagged]
+            # Only do the comparison if actual values are left
+            if x.size == 0:
+                return
+        elif flagged:
+            # no sense doing comparison if everything is flagged.
+            return
+
+        val = comparison(x, y)
+
+        if isinstance(val, bool):
+            cond = val
+            reduced = array([val])
+        else:
+            reduced = val.ravel()
+            cond = reduced.all()
+
+        # The below comparison is a hack to ensure that fully masked
+        # results, for which val.ravel().all() returns np.ma.masked,
+        # do not trigger a failure (np.ma.masked != True evaluates as
+        # np.ma.masked, which is falsy).
+        if cond != True:
+            n_mismatch = reduced.size - reduced.sum(dtype=intp)
+            n_elements = flagged.size if flagged.ndim != 0 else reduced.size
+            percent_mismatch = 100 * n_mismatch / n_elements
+            remarks = [
+                'Mismatched elements: {} / {} ({:.3g}%)'.format(
+                    n_mismatch, n_elements, percent_mismatch)]
+
+            with errstate(all='ignore'):
+                # ignore errors for non-numeric types
+                with contextlib.suppress(TypeError):
+                    error = abs(x - y)
+                    if np.issubdtype(x.dtype, np.unsignedinteger):
+                        error2 = abs(y - x)
+                        np.minimum(error, error2, out=error)
+                    max_abs_error = max(error)
+                    if getattr(error, 'dtype', object_) == object_:
+                        remarks.append('Max absolute difference: '
+                                       + str(max_abs_error))
+                    else:
+                        remarks.append('Max absolute difference: '
+                                       + array2string(max_abs_error))
+
+                    # note: this definition of relative error matches that one
+                    # used by assert_allclose (found in np.isclose)
+                    # Filter values where the divisor would be zero
+                    nonzero = bool_(y != 0)
+                    if all(~nonzero):
+                        max_rel_error = array(inf)
+                    else:
+                        max_rel_error = max(error[nonzero] / abs(y[nonzero]))
+                    if getattr(error, 'dtype', object_) == object_:
+                        remarks.append('Max relative difference: '
+                                       + str(max_rel_error))
+                    else:
+                        remarks.append('Max relative difference: '
+                                       + array2string(max_rel_error))
+
+            err_msg += '\n' + '\n'.join(remarks)
+            msg = build_err_msg([ox, oy], err_msg,
+                                verbose=verbose, header=header,
+                                names=('x', 'y'), precision=precision)
+            raise AssertionError(msg)
+    except ValueError:
+        import traceback
+        efmt = traceback.format_exc()
+        header = f'error during assertion:\n\n{efmt}\n\n{header}'
+
+        msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
+                            names=('x', 'y'), precision=precision)
+        raise ValueError(msg)
+
+
+def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False):
+    """
+    Raises an AssertionError if two array_like objects are not equal.
+
+    Given two array_like objects, check that the shape is equal and all
+    elements of these objects are equal (but see the Notes for the special
+    handling of a scalar). An exception is raised at shape mismatch or
+    conflicting values. In contrast to the standard usage in numpy, NaNs
+    are compared like numbers, no assertion is raised if both objects have
+    NaNs in the same positions.
+
+    The usual caution for verifying equality with floating point numbers is
+    advised.
+
+    Parameters
+    ----------
+    x : array_like
+        The actual object to check.
+    y : array_like
+        The desired, expected object.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+    strict : bool, optional
+        If True, raise an AssertionError when either the shape or the data
+        type of the array_like objects does not match. The special
+        handling for scalars mentioned in the Notes section is disabled.
+
+        .. versionadded:: 1.24.0
+
+    Raises
+    ------
+    AssertionError
+        If actual and desired objects are not equal.
+
+    See Also
+    --------
+    assert_allclose: Compare two array_like objects for equality with desired
+                     relative and/or absolute precision.
+    assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+    Notes
+    -----
+    When one of `x` and `y` is a scalar and the other is array_like, the
+    function checks that each element of the array_like object is equal to
+    the scalar. This behaviour can be disabled with the `strict` parameter.
+
+    Examples
+    --------
+    The first assert does not raise an exception:
+
+    >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
+    ...                               [np.exp(0),2.33333, np.nan])
+
+    Assert fails with numerical imprecision with floats:
+
+    >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
+    ...                               [1, np.sqrt(np.pi)**2, np.nan])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not equal
+    
+    Mismatched elements: 1 / 3 (33.3%)
+    Max absolute difference: 4.4408921e-16
+    Max relative difference: 1.41357986e-16
+     x: array([1.      , 3.141593,      nan])
+     y: array([1.      , 3.141593,      nan])
+
+    Use `assert_allclose` or one of the nulp (number of floating point values)
+    functions for these cases instead:
+
+    >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
+    ...                            [1, np.sqrt(np.pi)**2, np.nan],
+    ...                            rtol=1e-10, atol=0)
+
+    As mentioned in the Notes section, `assert_array_equal` has special
+    handling for scalars. Here the test checks that each value in `x` is 3:
+
+    >>> x = np.full((2, 5), fill_value=3)
+    >>> np.testing.assert_array_equal(x, 3)
+
+    Use `strict` to raise an AssertionError when comparing a scalar with an
+    array:
+
+    >>> np.testing.assert_array_equal(x, 3, strict=True)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not equal
+    
+    (shapes (2, 5), () mismatch)
+     x: array([[3, 3, 3, 3, 3],
+           [3, 3, 3, 3, 3]])
+     y: array(3)
+
+    The `strict` parameter also ensures that the array data types match:
+
+    >>> x = np.array([2, 2, 2])
+    >>> y = np.array([2., 2., 2.], dtype=np.float32)
+    >>> np.testing.assert_array_equal(x, y, strict=True)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not equal
+    
+    (dtypes int64, float32 mismatch)
+     x: array([2, 2, 2])
+     y: array([2., 2., 2.], dtype=float32)
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
+                         verbose=verbose, header='Arrays are not equal',
+                         strict=strict)
+
+
+@np._no_nep50_warning()
+def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two objects are not equal up to desired
+    precision.
+
+    .. note:: It is recommended to use one of `assert_allclose`,
+              `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
+              instead of this function for more consistent floating point
+              comparisons.
+
+    The test verifies identical shapes and that the elements of ``actual`` and
+    ``desired`` satisfy.
+
+        ``abs(desired-actual) < 1.5 * 10**(-decimal)``
+
+    That is a looser test than originally documented, but agrees with what the
+    actual implementation did up to rounding vagaries. An exception is raised
+    at shape mismatch or conflicting values. In contrast to the standard usage
+    in numpy, NaNs are compared like numbers, no assertion is raised if both
+    objects have NaNs in the same positions.
+
+    Parameters
+    ----------
+    x : array_like
+        The actual object to check.
+    y : array_like
+        The desired, expected object.
+    decimal : int, optional
+        Desired precision, default is 6.
+    err_msg : str, optional
+      The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+        If actual and desired are not equal up to specified precision.
+
+    See Also
+    --------
+    assert_allclose: Compare two array_like objects for equality with desired
+                     relative and/or absolute precision.
+    assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+
+    Examples
+    --------
+    the first assert does not raise an exception
+
+    >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
+    ...                                      [1.0,2.333,np.nan])
+
+    >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+    ...                                      [1.0,2.33339,np.nan], decimal=5)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 5 decimals
+    
+    Mismatched elements: 1 / 3 (33.3%)
+    Max absolute difference: 6.e-05
+    Max relative difference: 2.57136612e-05
+     x: array([1.     , 2.33333,     nan])
+     y: array([1.     , 2.33339,     nan])
+
+    >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
+    ...                                      [1.0,2.33333, 5], decimal=5)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not almost equal to 5 decimals
+    
+    x and y nan location mismatch:
+     x: array([1.     , 2.33333,     nan])
+     y: array([1.     , 2.33333, 5.     ])
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    from numpy.core import number, float_, result_type
+    from numpy.core.numerictypes import issubdtype
+    from numpy.core.fromnumeric import any as npany
+
+    def compare(x, y):
+        try:
+            if npany(isinf(x)) or npany(isinf(y)):
+                xinfid = isinf(x)
+                yinfid = isinf(y)
+                if not (xinfid == yinfid).all():
+                    return False
+                # if one item, x and y is +- inf
+                if x.size == y.size == 1:
+                    return x == y
+                x = x[~xinfid]
+                y = y[~yinfid]
+        except (TypeError, NotImplementedError):
+            pass
+
+        # make sure y is an inexact type to avoid abs(MIN_INT); will cause
+        # casting of x later.
+        dtype = result_type(y, 1.)
+        y = np.asanyarray(y, dtype)
+        z = abs(x - y)
+
+        if not issubdtype(z.dtype, number):
+            z = z.astype(float_)  # handle object arrays
+
+        return z < 1.5 * 10.0**(-decimal)
+
+    assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
+             header=('Arrays are not almost equal to %d decimals' % decimal),
+             precision=decimal)
+
+
+def assert_array_less(x, y, err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two array_like objects are not ordered by less
+    than.
+
+    Given two array_like objects, check that the shape is equal and all
+    elements of the first object are strictly smaller than those of the
+    second object. An exception is raised at shape mismatch or incorrectly
+    ordered values. Shape mismatch does not raise if an object has zero
+    dimension. In contrast to the standard usage in numpy, NaNs are
+    compared, no assertion is raised if both objects have NaNs in the same
+    positions.
+
+    Parameters
+    ----------
+    x : array_like
+      The smaller object to check.
+    y : array_like
+      The larger object to compare.
+    err_msg : string
+      The error message to be printed in case of failure.
+    verbose : bool
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+      If x is not strictly smaller than y, element-wise.
+
+    See Also
+    --------
+    assert_array_equal: tests objects for equality
+    assert_array_almost_equal: test objects for equality up to precision
+
+    Examples
+    --------
+    >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
+    >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not less-ordered
+    
+    Mismatched elements: 1 / 3 (33.3%)
+    Max absolute difference: 1.
+    Max relative difference: 0.5
+     x: array([ 1.,  1., nan])
+     y: array([ 1.,  2., nan])
+
+    >>> np.testing.assert_array_less([1.0, 4.0], 3)
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not less-ordered
+    
+    Mismatched elements: 1 / 2 (50%)
+    Max absolute difference: 2.
+    Max relative difference: 0.66666667
+     x: array([1., 4.])
+     y: array(3)
+
+    >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
+    Traceback (most recent call last):
+        ...
+    AssertionError:
+    Arrays are not less-ordered
+    
+    (shapes (3,), (1,) mismatch)
+     x: array([1., 2., 3.])
+     y: array([4])
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
+                         verbose=verbose,
+                         header='Arrays are not less-ordered',
+                         equal_inf=False)
+
+
+def runstring(astr, dict):
+    exec(astr, dict)
+
+
+def assert_string_equal(actual, desired):
+    """
+    Test if two strings are equal.
+
+    If the given strings are equal, `assert_string_equal` does nothing.
+    If they are not equal, an AssertionError is raised, and the diff
+    between the strings is shown.
+
+    Parameters
+    ----------
+    actual : str
+        The string to test for equality against the expected string.
+    desired : str
+        The expected string.
+
+    Examples
+    --------
+    >>> np.testing.assert_string_equal('abc', 'abc')
+    >>> np.testing.assert_string_equal('abc', 'abcd')
+    Traceback (most recent call last):
+      File "", line 1, in 
+    ...
+    AssertionError: Differences in strings:
+    - abc+ abcd?    +
+
+    """
+    # delay import of difflib to reduce startup time
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import difflib
+
+    if not isinstance(actual, str):
+        raise AssertionError(repr(type(actual)))
+    if not isinstance(desired, str):
+        raise AssertionError(repr(type(desired)))
+    if desired == actual:
+        return
+
+    diff = list(difflib.Differ().compare(actual.splitlines(True),
+                desired.splitlines(True)))
+    diff_list = []
+    while diff:
+        d1 = diff.pop(0)
+        if d1.startswith('  '):
+            continue
+        if d1.startswith('- '):
+            l = [d1]
+            d2 = diff.pop(0)
+            if d2.startswith('? '):
+                l.append(d2)
+                d2 = diff.pop(0)
+            if not d2.startswith('+ '):
+                raise AssertionError(repr(d2))
+            l.append(d2)
+            if diff:
+                d3 = diff.pop(0)
+                if d3.startswith('? '):
+                    l.append(d3)
+                else:
+                    diff.insert(0, d3)
+            if d2[2:] == d1[2:]:
+                continue
+            diff_list.extend(l)
+            continue
+        raise AssertionError(repr(d1))
+    if not diff_list:
+        return
+    msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
+    if actual != desired:
+        raise AssertionError(msg)
+
+
+def rundocs(filename=None, raise_on_error=True):
+    """
+    Run doctests found in the given file.
+
+    By default `rundocs` raises an AssertionError on failure.
+
+    Parameters
+    ----------
+    filename : str
+        The path to the file for which the doctests are run.
+    raise_on_error : bool
+        Whether to raise an AssertionError when a doctest fails. Default is
+        True.
+
+    Notes
+    -----
+    The doctests can be run by the user/developer by adding the ``doctests``
+    argument to the ``test()`` call. For example, to run all tests (including
+    doctests) for `numpy.lib`:
+
+    >>> np.lib.test(doctests=True)  # doctest: +SKIP
+    """
+    from numpy.distutils.misc_util import exec_mod_from_location
+    import doctest
+    if filename is None:
+        f = sys._getframe(1)
+        filename = f.f_globals['__file__']
+    name = os.path.splitext(os.path.basename(filename))[0]
+    m = exec_mod_from_location(name, filename)
+
+    tests = doctest.DocTestFinder().find(m)
+    runner = doctest.DocTestRunner(verbose=False)
+
+    msg = []
+    if raise_on_error:
+        out = lambda s: msg.append(s)
+    else:
+        out = None
+
+    for test in tests:
+        runner.run(test, out=out)
+
+    if runner.failures > 0 and raise_on_error:
+        raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
+
+
+def check_support_sve():
+    """
+    gh-22982
+    """
+    
+    import subprocess
+    cmd = 'lscpu'
+    try:
+        output = subprocess.run(cmd, capture_output=True, text=True)
+        return 'sve' in output.stdout
+    except OSError:
+        return False
+
+
+_SUPPORTS_SVE = check_support_sve()
+
+#
+# assert_raises and assert_raises_regex are taken from unittest.
+#
+import unittest
+
+
+class _Dummy(unittest.TestCase):
+    def nop(self):
+        pass
+
+
+_d = _Dummy('nop')
+
+
+def assert_raises(*args, **kwargs):
+    """
+    assert_raises(exception_class, callable, *args, **kwargs)
+    assert_raises(exception_class)
+
+    Fail unless an exception of class exception_class is thrown
+    by callable when invoked with arguments args and keyword
+    arguments kwargs. If a different type of exception is
+    thrown, it will not be caught, and the test case will be
+    deemed to have suffered an error, exactly as for an
+    unexpected exception.
+
+    Alternatively, `assert_raises` can be used as a context manager:
+
+    >>> from numpy.testing import assert_raises
+    >>> with assert_raises(ZeroDivisionError):
+    ...     1 / 0
+
+    is equivalent to
+
+    >>> def div(x, y):
+    ...     return x / y
+    >>> assert_raises(ZeroDivisionError, div, 1, 0)
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    return _d.assertRaises(*args, **kwargs)
+
+
+def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
+    """
+    assert_raises_regex(exception_class, expected_regexp, callable, *args,
+                        **kwargs)
+    assert_raises_regex(exception_class, expected_regexp)
+
+    Fail unless an exception of class exception_class and with message that
+    matches expected_regexp is thrown by callable when invoked with arguments
+    args and keyword arguments kwargs.
+
+    Alternatively, can be used as a context manager like `assert_raises`.
+
+    Notes
+    -----
+    .. versionadded:: 1.9.0
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
+
+
+def decorate_methods(cls, decorator, testmatch=None):
+    """
+    Apply a decorator to all methods in a class matching a regular expression.
+
+    The given decorator is applied to all public methods of `cls` that are
+    matched by the regular expression `testmatch`
+    (``testmatch.search(methodname)``). Methods that are private, i.e. start
+    with an underscore, are ignored.
+
+    Parameters
+    ----------
+    cls : class
+        Class whose methods to decorate.
+    decorator : function
+        Decorator to apply to methods
+    testmatch : compiled regexp or str, optional
+        The regular expression. Default value is None, in which case the
+        nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
+        is used.
+        If `testmatch` is a string, it is compiled to a regular expression
+        first.
+
+    """
+    if testmatch is None:
+        testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+    else:
+        testmatch = re.compile(testmatch)
+    cls_attr = cls.__dict__
+
+    # delayed import to reduce startup time
+    from inspect import isfunction
+
+    methods = [_m for _m in cls_attr.values() if isfunction(_m)]
+    for function in methods:
+        try:
+            if hasattr(function, 'compat_func_name'):
+                funcname = function.compat_func_name
+            else:
+                funcname = function.__name__
+        except AttributeError:
+            # not a function
+            continue
+        if testmatch.search(funcname) and not funcname.startswith('_'):
+            setattr(cls, funcname, decorator(function))
+    return
+
+
+def measure(code_str, times=1, label=None):
+    """
+    Return elapsed time for executing code in the namespace of the caller.
+
+    The supplied code string is compiled with the Python builtin ``compile``.
+    The precision of the timing is 10 milli-seconds. If the code will execute
+    fast on this timescale, it can be executed many times to get reasonable
+    timing accuracy.
+
+    Parameters
+    ----------
+    code_str : str
+        The code to be timed.
+    times : int, optional
+        The number of times the code is executed. Default is 1. The code is
+        only compiled once.
+    label : str, optional
+        A label to identify `code_str` with. This is passed into ``compile``
+        as the second argument (for run-time error messages).
+
+    Returns
+    -------
+    elapsed : float
+        Total elapsed time in seconds for executing `code_str` `times` times.
+
+    Examples
+    --------
+    >>> times = 10
+    >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
+    >>> print("Time for a single execution : ", etime / times, "s")  # doctest: +SKIP
+    Time for a single execution :  0.005 s
+
+    """
+    frame = sys._getframe(1)
+    locs, globs = frame.f_locals, frame.f_globals
+
+    code = compile(code_str, f'Test name: {label} ', 'exec')
+    i = 0
+    elapsed = jiffies()
+    while i < times:
+        i += 1
+        exec(code, globs, locs)
+    elapsed = jiffies() - elapsed
+    return 0.01*elapsed
+
+
+def _assert_valid_refcount(op):
+    """
+    Check that ufuncs don't mishandle refcount of object `1`.
+    Used in a few regression tests.
+    """
+    if not HAS_REFCOUNT:
+        return True
+
+    import gc
+    import numpy as np
+
+    b = np.arange(100*100).reshape(100, 100)
+    c = b
+    i = 1
+
+    gc.disable()
+    try:
+        rc = sys.getrefcount(i)
+        for j in range(15):
+            d = op(b, c)
+        assert_(sys.getrefcount(i) >= rc)
+    finally:
+        gc.enable()
+    del d  # for pyflakes
+
+
+def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
+                    err_msg='', verbose=True):
+    """
+    Raises an AssertionError if two objects are not equal up to desired
+    tolerance.
+
+    Given two array_like objects, check that their shapes and all elements
+    are equal (but see the Notes for the special handling of a scalar). An
+    exception is raised if the shapes mismatch or any values conflict. In
+    contrast to the standard usage in numpy, NaNs are compared like numbers,
+    no assertion is raised if both objects have NaNs in the same positions.
+
+    The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
+    that ``allclose`` has different default values). It compares the difference
+    between `actual` and `desired` to ``atol + rtol * abs(desired)``.
+
+    .. versionadded:: 1.5.0
+
+    Parameters
+    ----------
+    actual : array_like
+        Array obtained.
+    desired : array_like
+        Array desired.
+    rtol : float, optional
+        Relative tolerance.
+    atol : float, optional
+        Absolute tolerance.
+    equal_nan : bool, optional.
+        If True, NaNs will compare equal.
+    err_msg : str, optional
+        The error message to be printed in case of failure.
+    verbose : bool, optional
+        If True, the conflicting values are appended to the error message.
+
+    Raises
+    ------
+    AssertionError
+        If actual and desired are not equal up to specified precision.
+
+    See Also
+    --------
+    assert_array_almost_equal_nulp, assert_array_max_ulp
+
+    Notes
+    -----
+    When one of `actual` and `desired` is a scalar and the other is
+    array_like, the function checks that each element of the array_like
+    object is equal to the scalar.
+
+    Examples
+    --------
+    >>> x = [1e-5, 1e-3, 1e-1]
+    >>> y = np.arccos(np.cos(x))
+    >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import numpy as np
+
+    def compare(x, y):
+        return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
+                                       equal_nan=equal_nan)
+
+    actual, desired = np.asanyarray(actual), np.asanyarray(desired)
+    header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}'
+    assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
+                         verbose=verbose, header=header, equal_nan=equal_nan)
+
+
+def assert_array_almost_equal_nulp(x, y, nulp=1):
+    """
+    Compare two arrays relatively to their spacing.
+
+    This is a relatively robust method to compare two arrays whose amplitude
+    is variable.
+
+    Parameters
+    ----------
+    x, y : array_like
+        Input arrays.
+    nulp : int, optional
+        The maximum number of unit in the last place for tolerance (see Notes).
+        Default is 1.
+
+    Returns
+    -------
+    None
+
+    Raises
+    ------
+    AssertionError
+        If the spacing between `x` and `y` for one or more elements is larger
+        than `nulp`.
+
+    See Also
+    --------
+    assert_array_max_ulp : Check that all items of arrays differ in at most
+        N Units in the Last Place.
+    spacing : Return the distance between x and the nearest adjacent number.
+
+    Notes
+    -----
+    An assertion is raised if the following condition is not met::
+
+        abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y)))
+
+    Examples
+    --------
+    >>> x = np.array([1., 1e-10, 1e-20])
+    >>> eps = np.finfo(x.dtype).eps
+    >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
+
+    >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
+    Traceback (most recent call last):
+      ...
+    AssertionError: X and Y are not equal to 1 ULP (max is 2)
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import numpy as np
+    ax = np.abs(x)
+    ay = np.abs(y)
+    ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
+    if not np.all(np.abs(x-y) <= ref):
+        if np.iscomplexobj(x) or np.iscomplexobj(y):
+            msg = "X and Y are not equal to %d ULP" % nulp
+        else:
+            max_nulp = np.max(nulp_diff(x, y))
+            msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
+        raise AssertionError(msg)
+
+
+def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
+    """
+    Check that all items of arrays differ in at most N Units in the Last Place.
+
+    Parameters
+    ----------
+    a, b : array_like
+        Input arrays to be compared.
+    maxulp : int, optional
+        The maximum number of units in the last place that elements of `a` and
+        `b` can differ. Default is 1.
+    dtype : dtype, optional
+        Data-type to convert `a` and `b` to if given. Default is None.
+
+    Returns
+    -------
+    ret : ndarray
+        Array containing number of representable floating point numbers between
+        items in `a` and `b`.
+
+    Raises
+    ------
+    AssertionError
+        If one or more elements differ by more than `maxulp`.
+
+    Notes
+    -----
+    For computing the ULP difference, this API does not differentiate between
+    various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
+    is zero).
+
+    See Also
+    --------
+    assert_array_almost_equal_nulp : Compare two arrays relatively to their
+        spacing.
+
+    Examples
+    --------
+    >>> a = np.linspace(0., 1., 100)
+    >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
+
+    """
+    __tracebackhide__ = True  # Hide traceback for py.test
+    import numpy as np
+    ret = nulp_diff(a, b, dtype)
+    if not np.all(ret <= maxulp):
+        raise AssertionError("Arrays are not almost equal up to %g "
+                             "ULP (max difference is %g ULP)" %
+                             (maxulp, np.max(ret)))
+    return ret
+
+
+def nulp_diff(x, y, dtype=None):
+    """For each item in x and y, return the number of representable floating
+    points between them.
+
+    Parameters
+    ----------
+    x : array_like
+        first input array
+    y : array_like
+        second input array
+    dtype : dtype, optional
+        Data-type to convert `x` and `y` to if given. Default is None.
+
+    Returns
+    -------
+    nulp : array_like
+        number of representable floating point numbers between each item in x
+        and y.
+
+    Notes
+    -----
+    For computing the ULP difference, this API does not differentiate between
+    various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
+    is zero).
+
+    Examples
+    --------
+    # By definition, epsilon is the smallest number such as 1 + eps != 1, so
+    # there should be exactly one ULP between 1 and 1 + eps
+    >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
+    1.0
+    """
+    import numpy as np
+    if dtype:
+        x = np.asarray(x, dtype=dtype)
+        y = np.asarray(y, dtype=dtype)
+    else:
+        x = np.asarray(x)
+        y = np.asarray(y)
+
+    t = np.common_type(x, y)
+    if np.iscomplexobj(x) or np.iscomplexobj(y):
+        raise NotImplementedError("_nulp not implemented for complex array")
+
+    x = np.array([x], dtype=t)
+    y = np.array([y], dtype=t)
+
+    x[np.isnan(x)] = np.nan
+    y[np.isnan(y)] = np.nan
+
+    if not x.shape == y.shape:
+        raise ValueError("x and y do not have the same shape: %s - %s" %
+                         (x.shape, y.shape))
+
+    def _diff(rx, ry, vdt):
+        diff = np.asarray(rx-ry, dtype=vdt)
+        return np.abs(diff)
+
+    rx = integer_repr(x)
+    ry = integer_repr(y)
+    return _diff(rx, ry, t)
+
+
+def _integer_repr(x, vdt, comp):
+    # Reinterpret binary representation of the float as sign-magnitude:
+    # take into account two-complement representation
+    # See also
+    # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+    rx = x.view(vdt)
+    if not (rx.size == 1):
+        rx[rx < 0] = comp - rx[rx < 0]
+    else:
+        if rx < 0:
+            rx = comp - rx
+
+    return rx
+
+
+def integer_repr(x):
+    """Return the signed-magnitude interpretation of the binary representation
+    of x."""
+    import numpy as np
+    if x.dtype == np.float16:
+        return _integer_repr(x, np.int16, np.int16(-2**15))
+    elif x.dtype == np.float32:
+        return _integer_repr(x, np.int32, np.int32(-2**31))
+    elif x.dtype == np.float64:
+        return _integer_repr(x, np.int64, np.int64(-2**63))
+    else:
+        raise ValueError(f'Unsupported dtype {x.dtype}')
+
+
+@contextlib.contextmanager
+def _assert_warns_context(warning_class, name=None):
+    __tracebackhide__ = True  # Hide traceback for py.test
+    with suppress_warnings() as sup:
+        l = sup.record(warning_class)
+        yield
+        if not len(l) > 0:
+            name_str = f' when calling {name}' if name is not None else ''
+            raise AssertionError("No warning raised" + name_str)
+
+
+def assert_warns(warning_class, *args, **kwargs):
+    """
+    Fail unless the given callable throws the specified warning.
+
+    A warning of class warning_class should be thrown by the callable when
+    invoked with arguments args and keyword arguments kwargs.
+    If a different type of warning is thrown, it will not be caught.
+
+    If called with all arguments other than the warning class omitted, may be
+    used as a context manager:
+
+        with assert_warns(SomeWarning):
+            do_something()
+
+    The ability to be used as a context manager is new in NumPy v1.11.0.
+
+    .. versionadded:: 1.4.0
+
+    Parameters
+    ----------
+    warning_class : class
+        The class defining the warning that `func` is expected to throw.
+    func : callable, optional
+        Callable to test
+    *args : Arguments
+        Arguments for `func`.
+    **kwargs : Kwargs
+        Keyword arguments for `func`.
+
+    Returns
+    -------
+    The value returned by `func`.
+
+    Examples
+    --------
+    >>> import warnings
+    >>> def deprecated_func(num):
+    ...     warnings.warn("Please upgrade", DeprecationWarning)
+    ...     return num*num
+    >>> with np.testing.assert_warns(DeprecationWarning):
+    ...     assert deprecated_func(4) == 16
+    >>> # or passing a func
+    >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
+    >>> assert ret == 16
+    """
+    if not args:
+        return _assert_warns_context(warning_class)
+
+    func = args[0]
+    args = args[1:]
+    with _assert_warns_context(warning_class, name=func.__name__):
+        return func(*args, **kwargs)
+
+
+@contextlib.contextmanager
+def _assert_no_warnings_context(name=None):
+    __tracebackhide__ = True  # Hide traceback for py.test
+    with warnings.catch_warnings(record=True) as l:
+        warnings.simplefilter('always')
+        yield
+        if len(l) > 0:
+            name_str = f' when calling {name}' if name is not None else ''
+            raise AssertionError(f'Got warnings{name_str}: {l}')
+
+
+def assert_no_warnings(*args, **kwargs):
+    """
+    Fail if the given callable produces any warnings.
+
+    If called with all arguments omitted, may be used as a context manager:
+
+        with assert_no_warnings():
+            do_something()
+
+    The ability to be used as a context manager is new in NumPy v1.11.0.
+
+    .. versionadded:: 1.7.0
+
+    Parameters
+    ----------
+    func : callable
+        The callable to test.
+    \\*args : Arguments
+        Arguments passed to `func`.
+    \\*\\*kwargs : Kwargs
+        Keyword arguments passed to `func`.
+
+    Returns
+    -------
+    The value returned by `func`.
+
+    """
+    if not args:
+        return _assert_no_warnings_context()
+
+    func = args[0]
+    args = args[1:]
+    with _assert_no_warnings_context(name=func.__name__):
+        return func(*args, **kwargs)
+
+
+def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
+    """
+    generator producing data with different alignment and offsets
+    to test simd vectorization
+
+    Parameters
+    ----------
+    dtype : dtype
+        data type to produce
+    type : string
+        'unary': create data for unary operations, creates one input
+                 and output array
+        'binary': create data for unary operations, creates two input
+                 and output array
+    max_size : integer
+        maximum size of data to produce
+
+    Returns
+    -------
+    if type is 'unary' yields one output, one input array and a message
+    containing information on the data
+    if type is 'binary' yields one output array, two input array and a message
+    containing information on the data
+
+    """
+    ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
+    bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
+    for o in range(3):
+        for s in range(o + 2, max(o + 3, max_size)):
+            if type == 'unary':
+                inp = lambda: arange(s, dtype=dtype)[o:]
+                out = empty((s,), dtype=dtype)[o:]
+                yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
+                d = inp()
+                yield d, d, ufmt % (o, o, s, dtype, 'in place')
+                yield out[1:], inp()[:-1], ufmt % \
+                    (o + 1, o, s - 1, dtype, 'out of place')
+                yield out[:-1], inp()[1:], ufmt % \
+                    (o, o + 1, s - 1, dtype, 'out of place')
+                yield inp()[:-1], inp()[1:], ufmt % \
+                    (o, o + 1, s - 1, dtype, 'aliased')
+                yield inp()[1:], inp()[:-1], ufmt % \
+                    (o + 1, o, s - 1, dtype, 'aliased')
+            if type == 'binary':
+                inp1 = lambda: arange(s, dtype=dtype)[o:]
+                inp2 = lambda: arange(s, dtype=dtype)[o:]
+                out = empty((s,), dtype=dtype)[o:]
+                yield out, inp1(), inp2(),  bfmt % \
+                    (o, o, o, s, dtype, 'out of place')
+                d = inp1()
+                yield d, d, inp2(), bfmt % \
+                    (o, o, o, s, dtype, 'in place1')
+                d = inp2()
+                yield d, inp1(), d, bfmt % \
+                    (o, o, o, s, dtype, 'in place2')
+                yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+                    (o + 1, o, o, s - 1, dtype, 'out of place')
+                yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+                    (o, o + 1, o, s - 1, dtype, 'out of place')
+                yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+                    (o, o, o + 1, s - 1, dtype, 'out of place')
+                yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
+                    (o + 1, o, o, s - 1, dtype, 'aliased')
+                yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
+                    (o, o + 1, o, s - 1, dtype, 'aliased')
+                yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
+                    (o, o, o + 1, s - 1, dtype, 'aliased')
+
+
+class IgnoreException(Exception):
+    "Ignoring this exception due to disabled feature"
+    pass
+
+
+@contextlib.contextmanager
+def tempdir(*args, **kwargs):
+    """Context manager to provide a temporary test folder.
+
+    All arguments are passed as this to the underlying tempfile.mkdtemp
+    function.
+
+    """
+    tmpdir = mkdtemp(*args, **kwargs)
+    try:
+        yield tmpdir
+    finally:
+        shutil.rmtree(tmpdir)
+
+
+@contextlib.contextmanager
+def temppath(*args, **kwargs):
+    """Context manager for temporary files.
+
+    Context manager that returns the path to a closed temporary file. Its
+    parameters are the same as for tempfile.mkstemp and are passed directly
+    to that function. The underlying file is removed when the context is
+    exited, so it should be closed at that time.
+
+    Windows does not allow a temporary file to be opened if it is already
+    open, so the underlying file must be closed after opening before it
+    can be opened again.
+
+    """
+    fd, path = mkstemp(*args, **kwargs)
+    os.close(fd)
+    try:
+        yield path
+    finally:
+        os.remove(path)
+
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+    """ Context manager that resets warning registry for catching warnings
+
+    Warnings can be slippery, because, whenever a warning is triggered, Python
+    adds a ``__warningregistry__`` member to the *calling* module.  This makes
+    it impossible to retrigger the warning in this module, whatever you put in
+    the warnings filters.  This context manager accepts a sequence of `modules`
+    as a keyword argument to its constructor and:
+
+    * stores and removes any ``__warningregistry__`` entries in given `modules`
+      on entry;
+    * resets ``__warningregistry__`` to its previous state on exit.
+
+    This makes it possible to trigger any warning afresh inside the context
+    manager without disturbing the state of warnings outside.
+
+    For compatibility with Python 3.0, please consider all arguments to be
+    keyword-only.
+
+    Parameters
+    ----------
+    record : bool, optional
+        Specifies whether warnings should be captured by a custom
+        implementation of ``warnings.showwarning()`` and be appended to a list
+        returned by the context manager. Otherwise None is returned by the
+        context manager. The objects appended to the list are arguments whose
+        attributes mirror the arguments to ``showwarning()``.
+    modules : sequence, optional
+        Sequence of modules for which to reset warnings registry on entry and
+        restore on exit. To work correctly, all 'ignore' filters should
+        filter by one of these modules.
+
+    Examples
+    --------
+    >>> import warnings
+    >>> with np.testing.clear_and_catch_warnings(
+    ...         modules=[np.core.fromnumeric]):
+    ...     warnings.simplefilter('always')
+    ...     warnings.filterwarnings('ignore', module='np.core.fromnumeric')
+    ...     # do something that raises a warning but ignore those in
+    ...     # np.core.fromnumeric
+    """
+    class_modules = ()
+
+    def __init__(self, record=False, modules=()):
+        self.modules = set(modules).union(self.class_modules)
+        self._warnreg_copies = {}
+        super().__init__(record=record)
+
+    def __enter__(self):
+        for mod in self.modules:
+            if hasattr(mod, '__warningregistry__'):
+                mod_reg = mod.__warningregistry__
+                self._warnreg_copies[mod] = mod_reg.copy()
+                mod_reg.clear()
+        return super().__enter__()
+
+    def __exit__(self, *exc_info):
+        super().__exit__(*exc_info)
+        for mod in self.modules:
+            if hasattr(mod, '__warningregistry__'):
+                mod.__warningregistry__.clear()
+            if mod in self._warnreg_copies:
+                mod.__warningregistry__.update(self._warnreg_copies[mod])
+
+
+class suppress_warnings:
+    """
+    Context manager and decorator doing much the same as
+    ``warnings.catch_warnings``.
+
+    However, it also provides a filter mechanism to work around
+    https://bugs.python.org/issue4180.
+
+    This bug causes Python before 3.4 to not reliably show warnings again
+    after they have been ignored once (even within catch_warnings). It
+    means that no "ignore" filter can be used easily, since following
+    tests might need to see the warning. Additionally it allows easier
+    specificity for testing warnings and can be nested.
+
+    Parameters
+    ----------
+    forwarding_rule : str, optional
+        One of "always", "once", "module", or "location". Analogous to
+        the usual warnings module filter mode, it is useful to reduce
+        noise mostly on the outmost level. Unsuppressed and unrecorded
+        warnings will be forwarded based on this rule. Defaults to "always".
+        "location" is equivalent to the warnings "default", match by exact
+        location the warning warning originated from.
+
+    Notes
+    -----
+    Filters added inside the context manager will be discarded again
+    when leaving it. Upon entering all filters defined outside a
+    context will be applied automatically.
+
+    When a recording filter is added, matching warnings are stored in the
+    ``log`` attribute as well as in the list returned by ``record``.
+
+    If filters are added and the ``module`` keyword is given, the
+    warning registry of this module will additionally be cleared when
+    applying it, entering the context, or exiting it. This could cause
+    warnings to appear a second time after leaving the context if they
+    were configured to be printed once (default) and were already
+    printed before the context was entered.
+
+    Nesting this context manager will work as expected when the
+    forwarding rule is "always" (default). Unfiltered and unrecorded
+    warnings will be passed out and be matched by the outer level.
+    On the outmost level they will be printed (or caught by another
+    warnings context). The forwarding rule argument can modify this
+    behaviour.
+
+    Like ``catch_warnings`` this context manager is not threadsafe.
+
+    Examples
+    --------
+
+    With a context manager::
+
+        with np.testing.suppress_warnings() as sup:
+            sup.filter(DeprecationWarning, "Some text")
+            sup.filter(module=np.ma.core)
+            log = sup.record(FutureWarning, "Does this occur?")
+            command_giving_warnings()
+            # The FutureWarning was given once, the filtered warnings were
+            # ignored. All other warnings abide outside settings (may be
+            # printed/error)
+            assert_(len(log) == 1)
+            assert_(len(sup.log) == 1)  # also stored in log attribute
+
+    Or as a decorator::
+
+        sup = np.testing.suppress_warnings()
+        sup.filter(module=np.ma.core)  # module must match exactly
+        @sup
+        def some_function():
+            # do something which causes a warning in np.ma.core
+            pass
+    """
+    def __init__(self, forwarding_rule="always"):
+        self._entered = False
+
+        # Suppressions are either instance or defined inside one with block:
+        self._suppressions = []
+
+        if forwarding_rule not in {"always", "module", "once", "location"}:
+            raise ValueError("unsupported forwarding rule.")
+        self._forwarding_rule = forwarding_rule
+
+    def _clear_registries(self):
+        if hasattr(warnings, "_filters_mutated"):
+            # clearing the registry should not be necessary on new pythons,
+            # instead the filters should be mutated.
+            warnings._filters_mutated()
+            return
+        # Simply clear the registry, this should normally be harmless,
+        # note that on new pythons it would be invalidated anyway.
+        for module in self._tmp_modules:
+            if hasattr(module, "__warningregistry__"):
+                module.__warningregistry__.clear()
+
+    def _filter(self, category=Warning, message="", module=None, record=False):
+        if record:
+            record = []  # The log where to store warnings
+        else:
+            record = None
+        if self._entered:
+            if module is None:
+                warnings.filterwarnings(
+                    "always", category=category, message=message)
+            else:
+                module_regex = module.__name__.replace('.', r'\.') + '$'
+                warnings.filterwarnings(
+                    "always", category=category, message=message,
+                    module=module_regex)
+                self._tmp_modules.add(module)
+                self._clear_registries()
+
+            self._tmp_suppressions.append(
+                (category, message, re.compile(message, re.I), module, record))
+        else:
+            self._suppressions.append(
+                (category, message, re.compile(message, re.I), module, record))
+
+        return record
+
+    def filter(self, category=Warning, message="", module=None):
+        """
+        Add a new suppressing filter or apply it if the state is entered.
+
+        Parameters
+        ----------
+        category : class, optional
+            Warning class to filter
+        message : string, optional
+            Regular expression matching the warning message.
+        module : module, optional
+            Module to filter for. Note that the module (and its file)
+            must match exactly and cannot be a submodule. This may make
+            it unreliable for external modules.
+
+        Notes
+        -----
+        When added within a context, filters are only added inside
+        the context and will be forgotten when the context is exited.
+        """
+        self._filter(category=category, message=message, module=module,
+                     record=False)
+
+    def record(self, category=Warning, message="", module=None):
+        """
+        Append a new recording filter or apply it if the state is entered.
+
+        All warnings matching will be appended to the ``log`` attribute.
+
+        Parameters
+        ----------
+        category : class, optional
+            Warning class to filter
+        message : string, optional
+            Regular expression matching the warning message.
+        module : module, optional
+            Module to filter for. Note that the module (and its file)
+            must match exactly and cannot be a submodule. This may make
+            it unreliable for external modules.
+
+        Returns
+        -------
+        log : list
+            A list which will be filled with all matched warnings.
+
+        Notes
+        -----
+        When added within a context, filters are only added inside
+        the context and will be forgotten when the context is exited.
+        """
+        return self._filter(category=category, message=message, module=module,
+                            record=True)
+
+    def __enter__(self):
+        if self._entered:
+            raise RuntimeError("cannot enter suppress_warnings twice.")
+
+        self._orig_show = warnings.showwarning
+        self._filters = warnings.filters
+        warnings.filters = self._filters[:]
+
+        self._entered = True
+        self._tmp_suppressions = []
+        self._tmp_modules = set()
+        self._forwarded = set()
+
+        self.log = []  # reset global log (no need to keep same list)
+
+        for cat, mess, _, mod, log in self._suppressions:
+            if log is not None:
+                del log[:]  # clear the log
+            if mod is None:
+                warnings.filterwarnings(
+                    "always", category=cat, message=mess)
+            else:
+                module_regex = mod.__name__.replace('.', r'\.') + '$'
+                warnings.filterwarnings(
+                    "always", category=cat, message=mess,
+                    module=module_regex)
+                self._tmp_modules.add(mod)
+        warnings.showwarning = self._showwarning
+        self._clear_registries()
+
+        return self
+
+    def __exit__(self, *exc_info):
+        warnings.showwarning = self._orig_show
+        warnings.filters = self._filters
+        self._clear_registries()
+        self._entered = False
+        del self._orig_show
+        del self._filters
+
+    def _showwarning(self, message, category, filename, lineno,
+                     *args, use_warnmsg=None, **kwargs):
+        for cat, _, pattern, mod, rec in (
+                self._suppressions + self._tmp_suppressions)[::-1]:
+            if (issubclass(category, cat) and
+                    pattern.match(message.args[0]) is not None):
+                if mod is None:
+                    # Message and category match, either recorded or ignored
+                    if rec is not None:
+                        msg = WarningMessage(message, category, filename,
+                                             lineno, **kwargs)
+                        self.log.append(msg)
+                        rec.append(msg)
+                    return
+                # Use startswith, because warnings strips the c or o from
+                # .pyc/.pyo files.
+                elif mod.__file__.startswith(filename):
+                    # The message and module (filename) match
+                    if rec is not None:
+                        msg = WarningMessage(message, category, filename,
+                                             lineno, **kwargs)
+                        self.log.append(msg)
+                        rec.append(msg)
+                    return
+
+        # There is no filter in place, so pass to the outside handler
+        # unless we should only pass it once
+        if self._forwarding_rule == "always":
+            if use_warnmsg is None:
+                self._orig_show(message, category, filename, lineno,
+                                *args, **kwargs)
+            else:
+                self._orig_showmsg(use_warnmsg)
+            return
+
+        if self._forwarding_rule == "once":
+            signature = (message.args, category)
+        elif self._forwarding_rule == "module":
+            signature = (message.args, category, filename)
+        elif self._forwarding_rule == "location":
+            signature = (message.args, category, filename, lineno)
+
+        if signature in self._forwarded:
+            return
+        self._forwarded.add(signature)
+        if use_warnmsg is None:
+            self._orig_show(message, category, filename, lineno, *args,
+                            **kwargs)
+        else:
+            self._orig_showmsg(use_warnmsg)
+
+    def __call__(self, func):
+        """
+        Function decorator to apply certain suppressions to a whole
+        function.
+        """
+        @wraps(func)
+        def new_func(*args, **kwargs):
+            with self:
+                return func(*args, **kwargs)
+
+        return new_func
+
+
+@contextlib.contextmanager
+def _assert_no_gc_cycles_context(name=None):
+    __tracebackhide__ = True  # Hide traceback for py.test
+
+    # not meaningful to test if there is no refcounting
+    if not HAS_REFCOUNT:
+        yield
+        return
+
+    assert_(gc.isenabled())
+    gc.disable()
+    gc_debug = gc.get_debug()
+    try:
+        for i in range(100):
+            if gc.collect() == 0:
+                break
+        else:
+            raise RuntimeError(
+                "Unable to fully collect garbage - perhaps a __del__ method "
+                "is creating more reference cycles?")
+
+        gc.set_debug(gc.DEBUG_SAVEALL)
+        yield
+        # gc.collect returns the number of unreachable objects in cycles that
+        # were found -- we are checking that no cycles were created in the context
+        n_objects_in_cycles = gc.collect()
+        objects_in_cycles = gc.garbage[:]
+    finally:
+        del gc.garbage[:]
+        gc.set_debug(gc_debug)
+        gc.enable()
+
+    if n_objects_in_cycles:
+        name_str = f' when calling {name}' if name is not None else ''
+        raise AssertionError(
+            "Reference cycles were found{}: {} objects were collected, "
+            "of which {} are shown below:{}"
+            .format(
+                name_str,
+                n_objects_in_cycles,
+                len(objects_in_cycles),
+                ''.join(
+                    "\n  {} object with id={}:\n    {}".format(
+                        type(o).__name__,
+                        id(o),
+                        pprint.pformat(o).replace('\n', '\n    ')
+                    ) for o in objects_in_cycles
+                )
+            )
+        )
+
+
+def assert_no_gc_cycles(*args, **kwargs):
+    """
+    Fail if the given callable produces any reference cycles.
+
+    If called with all arguments omitted, may be used as a context manager:
+
+        with assert_no_gc_cycles():
+            do_something()
+
+    .. versionadded:: 1.15.0
+
+    Parameters
+    ----------
+    func : callable
+        The callable to test.
+    \\*args : Arguments
+        Arguments passed to `func`.
+    \\*\\*kwargs : Kwargs
+        Keyword arguments passed to `func`.
+
+    Returns
+    -------
+    Nothing. The result is deliberately discarded to ensure that all cycles
+    are found.
+
+    """
+    if not args:
+        return _assert_no_gc_cycles_context()
+
+    func = args[0]
+    args = args[1:]
+    with _assert_no_gc_cycles_context(name=func.__name__):
+        func(*args, **kwargs)
+
+
+def break_cycles():
+    """
+    Break reference cycles by calling gc.collect
+    Objects can call other objects' methods (for instance, another object's
+     __del__) inside their own __del__. On PyPy, the interpreter only runs
+    between calls to gc.collect, so multiple calls are needed to completely
+    release all cycles.
+    """
+
+    gc.collect()
+    if IS_PYPY:
+        # a few more, just to make sure all the finalizers are called
+        gc.collect()
+        gc.collect()
+        gc.collect()
+        gc.collect()
+
+
+def requires_memory(free_bytes):
+    """Decorator to skip a test if not enough memory is available"""
+    import pytest
+
+    def decorator(func):
+        @wraps(func)
+        def wrapper(*a, **kw):
+            msg = check_free_memory(free_bytes)
+            if msg is not None:
+                pytest.skip(msg)
+
+            try:
+                return func(*a, **kw)
+            except MemoryError:
+                # Probably ran out of memory regardless: don't regard as failure
+                pytest.xfail("MemoryError raised")
+
+        return wrapper
+
+    return decorator
+
+
+def check_free_memory(free_bytes):
+    """
+    Check whether `free_bytes` amount of memory is currently free.
+    Returns: None if enough memory available, otherwise error message
+    """
+    env_var = 'NPY_AVAILABLE_MEM'
+    env_value = os.environ.get(env_var)
+    if env_value is not None:
+        try:
+            mem_free = _parse_size(env_value)
+        except ValueError as exc:
+            raise ValueError(f'Invalid environment variable {env_var}: {exc}')
+
+        msg = (f'{free_bytes/1e9} GB memory required, but environment variable '
+               f'NPY_AVAILABLE_MEM={env_value} set')
+    else:
+        mem_free = _get_mem_available()
+
+        if mem_free is None:
+            msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
+                   "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
+                   "the test.")
+            mem_free = -1
+        else:
+            msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available'
+
+    return msg if mem_free < free_bytes else None
+
+
+def _parse_size(size_str):
+    """Convert memory size strings ('12 GB' etc.) to float"""
+    suffixes = {'': 1, 'b': 1,
+                'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
+                'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
+                'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
+
+    size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
+        '|'.join(suffixes.keys())), re.I)
+
+    m = size_re.match(size_str.lower())
+    if not m or m.group(2) not in suffixes:
+        raise ValueError(f'value {size_str!r} not a valid size')
+    return int(float(m.group(1)) * suffixes[m.group(2)])
+
+
+def _get_mem_available():
+    """Return available memory in bytes, or None if unknown."""
+    try:
+        import psutil
+        return psutil.virtual_memory().available
+    except (ImportError, AttributeError):
+        pass
+
+    if sys.platform.startswith('linux'):
+        info = {}
+        with open('/proc/meminfo') as f:
+            for line in f:
+                p = line.split()
+                info[p[0].strip(':').lower()] = int(p[1]) * 1024
+
+        if 'memavailable' in info:
+            # Linux >= 3.14
+            return info['memavailable']
+        else:
+            return info['memfree'] + info['cached']
+
+    return None
+
+
+def _no_tracing(func):
+    """
+    Decorator to temporarily turn off tracing for the duration of a test.
+    Needed in tests that check refcounting, otherwise the tracing itself
+    influences the refcounts
+    """
+    if not hasattr(sys, 'gettrace'):
+        return func
+    else:
+        @wraps(func)
+        def wrapper(*args, **kwargs):
+            original_trace = sys.gettrace()
+            try:
+                sys.settrace(None)
+                return func(*args, **kwargs)
+            finally:
+                sys.settrace(original_trace)
+        return wrapper
+
+
+def _get_glibc_version():
+    try:
+        ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]
+    except Exception:
+        ver = '0.0'
+
+    return ver
+
+
+_glibcver = _get_glibc_version()
+_glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x)
+
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/_private/utils.pyi b/.venv/lib/python3.11/site-packages/numpy/testing/_private/utils.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..6baefd83bd0ae114941145349c10c583b3c43a31
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/_private/utils.pyi
@@ -0,0 +1,402 @@
+import os
+import sys
+import ast
+import types
+import warnings
+import unittest
+import contextlib
+from re import Pattern
+from collections.abc import Callable, Iterable, Sequence
+from typing import (
+    Literal as L,
+    Any,
+    AnyStr,
+    ClassVar,
+    NoReturn,
+    overload,
+    type_check_only,
+    TypeVar,
+    Union,
+    Final,
+    SupportsIndex,
+)
+if sys.version_info >= (3, 10):
+    from typing import ParamSpec
+else:
+    from typing_extensions import ParamSpec
+
+from numpy import generic, dtype, number, object_, bool_, _FloatValue
+from numpy._typing import (
+    NDArray,
+    ArrayLike,
+    DTypeLike,
+    _ArrayLikeNumber_co,
+    _ArrayLikeObject_co,
+    _ArrayLikeTD64_co,
+    _ArrayLikeDT64_co,
+)
+
+from unittest.case import (
+    SkipTest as SkipTest,
+)
+
+_P = ParamSpec("_P")
+_T = TypeVar("_T")
+_ET = TypeVar("_ET", bound=BaseException)
+_FT = TypeVar("_FT", bound=Callable[..., Any])
+
+# Must return a bool or an ndarray/generic type
+# that is supported by `np.logical_and.reduce`
+_ComparisonFunc = Callable[
+    [NDArray[Any], NDArray[Any]],
+    Union[
+        bool,
+        bool_,
+        number[Any],
+        NDArray[Union[bool_, number[Any], object_]],
+    ],
+]
+
+__all__: list[str]
+
+class KnownFailureException(Exception): ...
+class IgnoreException(Exception): ...
+
+class clear_and_catch_warnings(warnings.catch_warnings):
+    class_modules: ClassVar[tuple[types.ModuleType, ...]]
+    modules: set[types.ModuleType]
+    @overload
+    def __new__(
+        cls,
+        record: L[False] = ...,
+        modules: Iterable[types.ModuleType] = ...,
+    ) -> _clear_and_catch_warnings_without_records: ...
+    @overload
+    def __new__(
+        cls,
+        record: L[True],
+        modules: Iterable[types.ModuleType] = ...,
+    ) -> _clear_and_catch_warnings_with_records: ...
+    @overload
+    def __new__(
+        cls,
+        record: bool,
+        modules: Iterable[types.ModuleType] = ...,
+    ) -> clear_and_catch_warnings: ...
+    def __enter__(self) -> None | list[warnings.WarningMessage]: ...
+    def __exit__(
+        self,
+        __exc_type: None | type[BaseException] = ...,
+        __exc_val: None | BaseException = ...,
+        __exc_tb: None | types.TracebackType = ...,
+    ) -> None: ...
+
+# Type-check only `clear_and_catch_warnings` subclasses for both values of the
+# `record` parameter. Copied from the stdlib `warnings` stubs.
+
+@type_check_only
+class _clear_and_catch_warnings_with_records(clear_and_catch_warnings):
+    def __enter__(self) -> list[warnings.WarningMessage]: ...
+
+@type_check_only
+class _clear_and_catch_warnings_without_records(clear_and_catch_warnings):
+    def __enter__(self) -> None: ...
+
+class suppress_warnings:
+    log: list[warnings.WarningMessage]
+    def __init__(
+        self,
+        forwarding_rule: L["always", "module", "once", "location"] = ...,
+    ) -> None: ...
+    def filter(
+        self,
+        category: type[Warning] = ...,
+        message: str = ...,
+        module: None | types.ModuleType = ...,
+    ) -> None: ...
+    def record(
+        self,
+        category: type[Warning] = ...,
+        message: str = ...,
+        module: None | types.ModuleType = ...,
+    ) -> list[warnings.WarningMessage]: ...
+    def __enter__(self: _T) -> _T: ...
+    def __exit__(
+        self,
+        __exc_type: None | type[BaseException] = ...,
+        __exc_val: None | BaseException = ...,
+        __exc_tb: None | types.TracebackType = ...,
+    ) -> None: ...
+    def __call__(self, func: _FT) -> _FT: ...
+
+verbose: int
+IS_PYPY: Final[bool]
+IS_PYSTON: Final[bool]
+HAS_REFCOUNT: Final[bool]
+HAS_LAPACK64: Final[bool]
+
+def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ...
+
+# Contrary to runtime we can't do `os.name` checks while type checking,
+# only `sys.platform` checks
+if sys.platform == "win32" or sys.platform == "cygwin":
+    def memusage(processName: str = ..., instance: int = ...) -> int: ...
+elif sys.platform == "linux":
+    def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int: ...
+else:
+    def memusage() -> NoReturn: ...
+
+if sys.platform == "linux":
+    def jiffies(
+        _proc_pid_stat: str | bytes | os.PathLike[Any] = ...,
+        _load_time: list[float] = ...,
+    ) -> int: ...
+else:
+    def jiffies(_load_time: list[float] = ...) -> int: ...
+
+def build_err_msg(
+    arrays: Iterable[object],
+    err_msg: str,
+    header: str = ...,
+    verbose: bool = ...,
+    names: Sequence[str] = ...,
+    precision: None | SupportsIndex = ...,
+) -> str: ...
+
+def assert_equal(
+    actual: object,
+    desired: object,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+def print_assert_equal(
+    test_string: str,
+    actual: object,
+    desired: object,
+) -> None: ...
+
+def assert_almost_equal(
+    actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    decimal: int = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+# Anything that can be coerced into `builtins.float`
+def assert_approx_equal(
+    actual: _FloatValue,
+    desired: _FloatValue,
+    significant: int = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+def assert_array_compare(
+    comparison: _ComparisonFunc,
+    x: ArrayLike,
+    y: ArrayLike,
+    err_msg: str = ...,
+    verbose: bool = ...,
+    header: str = ...,
+    precision: SupportsIndex = ...,
+    equal_nan: bool = ...,
+    equal_inf: bool = ...,
+    *,
+    strict: bool = ...
+) -> None: ...
+
+def assert_array_equal(
+    x: ArrayLike,
+    y: ArrayLike,
+    err_msg: str = ...,
+    verbose: bool = ...,
+    *,
+    strict: bool = ...
+) -> None: ...
+
+def assert_array_almost_equal(
+    x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    decimal: float = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+@overload
+def assert_array_less(
+    x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_array_less(
+    x: _ArrayLikeTD64_co,
+    y: _ArrayLikeTD64_co,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_array_less(
+    x: _ArrayLikeDT64_co,
+    y: _ArrayLikeDT64_co,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+def runstring(
+    astr: str | bytes | types.CodeType,
+    dict: None | dict[str, Any],
+) -> Any: ...
+
+def assert_string_equal(actual: str, desired: str) -> None: ...
+
+def rundocs(
+    filename: None | str | os.PathLike[str] = ...,
+    raise_on_error: bool = ...,
+) -> None: ...
+
+def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ...
+
+@overload
+def assert_raises(  # type: ignore
+    expected_exception: type[BaseException] | tuple[type[BaseException], ...],
+    callable: Callable[_P, Any],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> None: ...
+@overload
+def assert_raises(
+    expected_exception: type[_ET] | tuple[type[_ET], ...],
+    *,
+    msg: None | str = ...,
+) -> unittest.case._AssertRaisesContext[_ET]: ...
+
+@overload
+def assert_raises_regex(
+    expected_exception: type[BaseException] | tuple[type[BaseException], ...],
+    expected_regex: str | bytes | Pattern[Any],
+    callable: Callable[_P, Any],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> None: ...
+@overload
+def assert_raises_regex(
+    expected_exception: type[_ET] | tuple[type[_ET], ...],
+    expected_regex: str | bytes | Pattern[Any],
+    *,
+    msg: None | str = ...,
+) -> unittest.case._AssertRaisesContext[_ET]: ...
+
+def decorate_methods(
+    cls: type[Any],
+    decorator: Callable[[Callable[..., Any]], Any],
+    testmatch: None | str | bytes | Pattern[Any] = ...,
+) -> None: ...
+
+def measure(
+    code_str: str | bytes | ast.mod | ast.AST,
+    times: int = ...,
+    label: None | str = ...,
+) -> float: ...
+
+@overload
+def assert_allclose(
+    actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
+    rtol: float = ...,
+    atol: float = ...,
+    equal_nan: bool = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+@overload
+def assert_allclose(
+    actual: _ArrayLikeTD64_co,
+    desired: _ArrayLikeTD64_co,
+    rtol: float = ...,
+    atol: float = ...,
+    equal_nan: bool = ...,
+    err_msg: str = ...,
+    verbose: bool = ...,
+) -> None: ...
+
+def assert_array_almost_equal_nulp(
+    x: _ArrayLikeNumber_co,
+    y: _ArrayLikeNumber_co,
+    nulp: float = ...,
+) -> None: ...
+
+def assert_array_max_ulp(
+    a: _ArrayLikeNumber_co,
+    b: _ArrayLikeNumber_co,
+    maxulp: float = ...,
+    dtype: DTypeLike = ...,
+) -> NDArray[Any]: ...
+
+@overload
+def assert_warns(
+    warning_class: type[Warning],
+) -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_warns(
+    warning_class: type[Warning],
+    func: Callable[_P, _T],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> _T: ...
+
+@overload
+def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_no_warnings(
+    func: Callable[_P, _T],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> _T: ...
+
+@overload
+def tempdir(
+    suffix: None = ...,
+    prefix: None = ...,
+    dir: None = ...,
+) -> contextlib._GeneratorContextManager[str]: ...
+@overload
+def tempdir(
+    suffix: None | AnyStr = ...,
+    prefix: None | AnyStr = ...,
+    dir: None | AnyStr | os.PathLike[AnyStr] = ...,
+) -> contextlib._GeneratorContextManager[AnyStr]: ...
+
+@overload
+def temppath(
+    suffix: None = ...,
+    prefix: None = ...,
+    dir: None = ...,
+    text: bool = ...,
+) -> contextlib._GeneratorContextManager[str]: ...
+@overload
+def temppath(
+    suffix: None | AnyStr = ...,
+    prefix: None | AnyStr = ...,
+    dir: None | AnyStr | os.PathLike[AnyStr] = ...,
+    text: bool = ...,
+) -> contextlib._GeneratorContextManager[AnyStr]: ...
+
+@overload
+def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ...
+@overload
+def assert_no_gc_cycles(
+    func: Callable[_P, Any],
+    /,
+    *args: _P.args,
+    **kwargs: _P.kwargs,
+) -> None: ...
+
+def break_cycles() -> None: ...
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/overrides.py b/.venv/lib/python3.11/site-packages/numpy/testing/overrides.py
new file mode 100644
index 0000000000000000000000000000000000000000..edc7132c20409cae54f549f4e2c8fe2e295da504
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/overrides.py
@@ -0,0 +1,83 @@
+"""Tools for testing implementations of __array_function__ and ufunc overrides
+
+
+"""
+
+from numpy.core.overrides import ARRAY_FUNCTIONS as _array_functions
+from numpy import ufunc as _ufunc
+import numpy.core.umath as _umath
+
+def get_overridable_numpy_ufuncs():
+    """List all numpy ufuncs overridable via `__array_ufunc__`
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    set
+        A set containing all overridable ufuncs in the public numpy API.
+    """
+    ufuncs = {obj for obj in _umath.__dict__.values()
+              if isinstance(obj, _ufunc)}
+    return ufuncs
+    
+
+def allows_array_ufunc_override(func):
+    """Determine if a function can be overridden via `__array_ufunc__`
+
+    Parameters
+    ----------
+    func : callable
+        Function that may be overridable via `__array_ufunc__`
+
+    Returns
+    -------
+    bool
+        `True` if `func` is overridable via `__array_ufunc__` and
+        `False` otherwise.
+
+    Notes
+    -----
+    This function is equivalent to ``isinstance(func, np.ufunc)`` and
+    will work correctly for ufuncs defined outside of Numpy.
+
+    """
+    return isinstance(func, np.ufunc)
+
+
+def get_overridable_numpy_array_functions():
+    """List all numpy functions overridable via `__array_function__`
+
+    Parameters
+    ----------
+    None
+
+    Returns
+    -------
+    set
+        A set containing all functions in the public numpy API that are
+        overridable via `__array_function__`.
+
+    """
+    # 'import numpy' doesn't import recfunctions, so make sure it's imported
+    # so ufuncs defined there show up in the ufunc listing
+    from numpy.lib import recfunctions
+    return _array_functions.copy()
+
+def allows_array_function_override(func):
+    """Determine if a Numpy function can be overridden via `__array_function__`
+
+    Parameters
+    ----------
+    func : callable
+        Function that may be overridable via `__array_function__`
+
+    Returns
+    -------
+    bool
+        `True` if `func` is a function in the Numpy API that is
+        overridable via `__array_function__` and `False` otherwise.
+    """
+    return func in _array_functions
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/print_coercion_tables.py b/.venv/lib/python3.11/site-packages/numpy/testing/print_coercion_tables.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1d4cdff8fd0b7e9cb9b539d9a49f3374a098a11
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/print_coercion_tables.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python3
+"""Prints type-coercion tables for the built-in NumPy types
+
+"""
+import numpy as np
+from collections import namedtuple
+
+# Generic object that can be added, but doesn't do anything else
+class GenericObject:
+    def __init__(self, v):
+        self.v = v
+
+    def __add__(self, other):
+        return self
+
+    def __radd__(self, other):
+        return self
+
+    dtype = np.dtype('O')
+
+def print_cancast_table(ntypes):
+    print('X', end=' ')
+    for char in ntypes:
+        print(char, end=' ')
+    print()
+    for row in ntypes:
+        print(row, end=' ')
+        for col in ntypes:
+            if np.can_cast(row, col, "equiv"):
+                cast = "#"
+            elif np.can_cast(row, col, "safe"):
+                cast = "="
+            elif np.can_cast(row, col, "same_kind"):
+                cast = "~"
+            elif np.can_cast(row, col, "unsafe"):
+                cast = "."
+            else:
+                cast = " "
+            print(cast, end=' ')
+        print()
+
+def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
+    print('+', end=' ')
+    for char in ntypes:
+        print(char, end=' ')
+    print()
+    for row in ntypes:
+        if row == 'O':
+            rowtype = GenericObject
+        else:
+            rowtype = np.obj2sctype(row)
+
+        print(row, end=' ')
+        for col in ntypes:
+            if col == 'O':
+                coltype = GenericObject
+            else:
+                coltype = np.obj2sctype(col)
+            try:
+                if firstarray:
+                    rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
+                else:
+                    rowvalue = rowtype(inputfirstvalue)
+                colvalue = coltype(inputsecondvalue)
+                if use_promote_types:
+                    char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
+                else:
+                    value = np.add(rowvalue, colvalue)
+                    if isinstance(value, np.ndarray):
+                        char = value.dtype.char
+                    else:
+                        char = np.dtype(type(value)).char
+            except ValueError:
+                char = '!'
+            except OverflowError:
+                char = '@'
+            except TypeError:
+                char = '#'
+            print(char, end=' ')
+        print()
+
+
+def print_new_cast_table(*, can_cast=True, legacy=False, flags=False):
+    """Prints new casts, the values given are default "can-cast" values, not
+    actual ones.
+    """
+    from numpy.core._multiarray_tests import get_all_cast_information
+
+    cast_table = {
+        -1: " ",
+        0: "#",  # No cast (classify as equivalent here)
+        1: "#",  # equivalent casting
+        2: "=",  # safe casting
+        3: "~",  # same-kind casting
+        4: ".",  # unsafe casting
+    }
+    flags_table = {
+        0 : "▗", 7: "█",
+        1: "▚", 2: "▐", 4: "▄",
+                3: "▜", 5: "▙",
+                        6: "▟",
+    }
+
+    cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"])
+    no_cast_info = cast_info(" ", " ", " ")
+
+    casts = get_all_cast_information()
+    table = {}
+    dtypes = set()
+    for cast in casts:
+        dtypes.add(cast["from"])
+        dtypes.add(cast["to"])
+
+        if cast["from"] not in table:
+            table[cast["from"]] = {}
+        to_dict = table[cast["from"]]
+
+        can_cast = cast_table[cast["casting"]]
+        legacy = "L" if cast["legacy"] else "."
+        flags = 0
+        if cast["requires_pyapi"]:
+            flags |= 1
+        if cast["supports_unaligned"]:
+            flags |= 2
+        if cast["no_floatingpoint_errors"]:
+            flags |= 4
+
+        flags = flags_table[flags]
+        to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags)
+
+    # The np.dtype(x.type) is a bit strange, because dtype classes do
+    # not expose much yet.
+    types = np.typecodes["All"]
+    def sorter(x):
+        # This is a bit weird hack, to get a table as close as possible to
+        # the one printing all typecodes (but expecting user-dtypes).
+        dtype = np.dtype(x.type)
+        try:
+            indx = types.index(dtype.char)
+        except ValueError:
+            indx = np.inf
+        return (indx, dtype.char)
+
+    dtypes = sorted(dtypes, key=sorter)
+
+    def print_table(field="can_cast"):
+        print('X', end=' ')
+        for dt in dtypes:
+            print(np.dtype(dt.type).char, end=' ')
+        print()
+        for from_dt in dtypes:
+            print(np.dtype(from_dt.type).char, end=' ')
+            row = table.get(from_dt, {})
+            for to_dt in dtypes:
+                print(getattr(row.get(to_dt, no_cast_info), field), end=' ')
+            print()
+
+    if can_cast:
+        # Print the actual table:
+        print()
+        print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe")
+        print()
+        print_table("can_cast")
+
+    if legacy:
+        print()
+        print("L denotes a legacy cast . a non-legacy one.")
+        print()
+        print_table("legacy")
+
+    if flags:
+        print()
+        print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, "
+              f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors")
+        print()
+        print_table("flags")
+
+
+if __name__ == '__main__':
+    print("can cast")
+    print_cancast_table(np.typecodes['All'])
+    print()
+    print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
+    print()
+    print("scalar + scalar")
+    print_coercion_table(np.typecodes['All'], 0, 0, False)
+    print()
+    print("scalar + neg scalar")
+    print_coercion_table(np.typecodes['All'], 0, -1, False)
+    print()
+    print("array + scalar")
+    print_coercion_table(np.typecodes['All'], 0, 0, True)
+    print()
+    print("array + neg scalar")
+    print_coercion_table(np.typecodes['All'], 0, -1, True)
+    print()
+    print("promote_types")
+    print_coercion_table(np.typecodes['All'], 0, 0, False, True)
+    print("New casting type promotion:")
+    print_new_cast_table(can_cast=True, legacy=True, flags=True)
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/setup.py b/.venv/lib/python3.11/site-packages/numpy/testing/setup.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f203e87271109763f2f947b711bd4124cd1138a
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/setup.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+def configuration(parent_package='',top_path=None):
+    from numpy.distutils.misc_util import Configuration
+    config = Configuration('testing', parent_package, top_path)
+
+    config.add_subpackage('_private')
+    config.add_subpackage('tests')
+    config.add_data_files('*.pyi')
+    config.add_data_files('_private/*.pyi')
+    return config
+
+if __name__ == '__main__':
+    from numpy.distutils.core import setup
+    setup(maintainer="NumPy Developers",
+          maintainer_email="numpy-dev@numpy.org",
+          description="NumPy test module",
+          url="https://www.numpy.org",
+          license="NumPy License (BSD Style)",
+          configuration=configuration,
+          )
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/tests/__init__.py b/.venv/lib/python3.11/site-packages/numpy/testing/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-311.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6598021170822fe09dc5448dcb50dc260a8a0162
Binary files /dev/null and b/.venv/lib/python3.11/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-311.pyc differ
diff --git a/.venv/lib/python3.11/site-packages/numpy/testing/tests/test_utils.py b/.venv/lib/python3.11/site-packages/numpy/testing/tests/test_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..0aaa508ee5d2e194f44c756c94ae5b3db194292e
--- /dev/null
+++ b/.venv/lib/python3.11/site-packages/numpy/testing/tests/test_utils.py
@@ -0,0 +1,1626 @@
+import warnings
+import sys
+import os
+import itertools
+import pytest
+import weakref
+
+import numpy as np
+from numpy.testing import (
+    assert_equal, assert_array_equal, assert_almost_equal,
+    assert_array_almost_equal, assert_array_less, build_err_msg,
+    assert_raises, assert_warns, assert_no_warnings, assert_allclose,
+    assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
+    clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
+    tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
+    )
+
+
+class _GenericTest:
+
+    def _test_equal(self, a, b):
+        self._assert_func(a, b)
+
+    def _test_not_equal(self, a, b):
+        with assert_raises(AssertionError):
+            self._assert_func(a, b)
+
+    def test_array_rank1_eq(self):
+        """Test two equal array of rank 1 are found equal."""
+        a = np.array([1, 2])
+        b = np.array([1, 2])
+
+        self._test_equal(a, b)
+
+    def test_array_rank1_noteq(self):
+        """Test two different array of rank 1 are found not equal."""
+        a = np.array([1, 2])
+        b = np.array([2, 2])
+
+        self._test_not_equal(a, b)
+
+    def test_array_rank2_eq(self):
+        """Test two equal array of rank 2 are found equal."""
+        a = np.array([[1, 2], [3, 4]])
+        b = np.array([[1, 2], [3, 4]])
+
+        self._test_equal(a, b)
+
+    def test_array_diffshape(self):
+        """Test two arrays with different shapes are found not equal."""
+        a = np.array([1, 2])
+        b = np.array([[1, 2], [1, 2]])
+
+        self._test_not_equal(a, b)
+
+    def test_objarray(self):
+        """Test object arrays."""
+        a = np.array([1, 1], dtype=object)
+        self._test_equal(a, 1)
+
+    def test_array_likes(self):
+        self._test_equal([1, 2, 3], (1, 2, 3))
+
+
+class TestArrayEqual(_GenericTest):
+
+    def setup_method(self):
+        self._assert_func = assert_array_equal
+
+    def test_generic_rank1(self):
+        """Test rank 1 array for all dtypes."""
+        def foo(t):
+            a = np.empty(2, t)
+            a.fill(1)
+            b = a.copy()
+            c = a.copy()
+            c.fill(0)
+            self._test_equal(a, b)
+            self._test_not_equal(c, b)
+
+        # Test numeric types and object
+        for t in '?bhilqpBHILQPfdgFDG':
+            foo(t)
+
+        # Test strings
+        for t in ['S1', 'U1']:
+            foo(t)
+
+    def test_0_ndim_array(self):
+        x = np.array(473963742225900817127911193656584771)
+        y = np.array(18535119325151578301457182298393896)
+        assert_raises(AssertionError, self._assert_func, x, y)
+
+        y = x
+        self._assert_func(x, y)
+
+        x = np.array(43)
+        y = np.array(10)
+        assert_raises(AssertionError, self._assert_func, x, y)
+
+        y = x
+        self._assert_func(x, y)
+
+    def test_generic_rank3(self):
+        """Test rank 3 array for all dtypes."""
+        def foo(t):
+            a = np.empty((4, 2, 3), t)
+            a.fill(1)
+            b = a.copy()
+            c = a.copy()
+            c.fill(0)
+            self._test_equal(a, b)
+            self._test_not_equal(c, b)
+
+        # Test numeric types and object
+        for t in '?bhilqpBHILQPfdgFDG':
+            foo(t)
+
+        # Test strings
+        for t in ['S1', 'U1']:
+            foo(t)
+
+    def test_nan_array(self):
+        """Test arrays with nan values in them."""
+        a = np.array([1, 2, np.nan])
+        b = np.array([1, 2, np.nan])
+
+        self._test_equal(a, b)
+
+        c = np.array([1, 2, 3])
+        self._test_not_equal(c, b)
+
+    def test_string_arrays(self):
+        """Test two arrays with different shapes are found not equal."""
+        a = np.array(['floupi', 'floupa'])
+        b = np.array(['floupi', 'floupa'])
+
+        self._test_equal(a, b)
+
+        c = np.array(['floupipi', 'floupa'])
+
+        self._test_not_equal(c, b)
+
+    def test_recarrays(self):
+        """Test record arrays."""
+        a = np.empty(2, [('floupi', float), ('floupa', float)])
+        a['floupi'] = [1, 2]
+        a['floupa'] = [1, 2]
+        b = a.copy()
+
+        self._test_equal(a, b)
+
+        c = np.empty(2, [('floupipi', float),
+                         ('floupi', float), ('floupa', float)])
+        c['floupipi'] = a['floupi'].copy()
+        c['floupa'] = a['floupa'].copy()
+
+        with pytest.raises(TypeError):
+            self._test_not_equal(c, b)
+
+    def test_masked_nan_inf(self):
+        # Regression test for gh-11121
+        a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
+        b = np.array([3., np.nan, 6.5])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+        a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
+        b = np.array([np.inf, 4., 6.5])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+
+    def test_subclass_that_overrides_eq(self):
+        # While we cannot guarantee testing functions will always work for
+        # subclasses, the tests should ideally rely only on subclasses having
+        # comparison operators, not on them being able to store booleans
+        # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+        class MyArray(np.ndarray):
+            def __eq__(self, other):
+                return bool(np.equal(self, other).all())
+
+            def __ne__(self, other):
+                return not self == other
+
+        a = np.array([1., 2.]).view(MyArray)
+        b = np.array([2., 3.]).view(MyArray)
+        assert_(type(a == a), bool)
+        assert_(a == a)
+        assert_(a != b)
+        self._test_equal(a, a)
+        self._test_not_equal(a, b)
+        self._test_not_equal(b, a)
+
+    def test_subclass_that_does_not_implement_npall(self):
+        class MyArray(np.ndarray):
+            def __array_function__(self, *args, **kwargs):
+                return NotImplemented
+
+        a = np.array([1., 2.]).view(MyArray)
+        b = np.array([2., 3.]).view(MyArray)
+        with assert_raises(TypeError):
+            np.all(a)
+        self._test_equal(a, a)
+        self._test_not_equal(a, b)
+        self._test_not_equal(b, a)
+
+    def test_suppress_overflow_warnings(self):
+        # Based on issue #18992
+        with pytest.raises(AssertionError):
+            with np.errstate(all="raise"):
+                np.testing.assert_array_equal(
+                    np.array([1, 2, 3], np.float32),
+                    np.array([1, 1e-40, 3], np.float32))
+
+    def test_array_vs_scalar_is_equal(self):
+        """Test comparing an array with a scalar when all values are equal."""
+        a = np.array([1., 1., 1.])
+        b = 1.
+
+        self._test_equal(a, b)
+
+    def test_array_vs_scalar_not_equal(self):
+        """Test comparing an array with a scalar when not all values equal."""
+        a = np.array([1., 2., 3.])
+        b = 1.
+
+        self._test_not_equal(a, b)
+
+    def test_array_vs_scalar_strict(self):
+        """Test comparing an array with a scalar with strict option."""
+        a = np.array([1., 1., 1.])
+        b = 1.
+
+        with pytest.raises(AssertionError):
+            assert_array_equal(a, b, strict=True)
+
+    def test_array_vs_array_strict(self):
+        """Test comparing two arrays with strict option."""
+        a = np.array([1., 1., 1.])
+        b = np.array([1., 1., 1.])
+
+        assert_array_equal(a, b, strict=True)
+
+    def test_array_vs_float_array_strict(self):
+        """Test comparing two arrays with strict option."""
+        a = np.array([1, 1, 1])
+        b = np.array([1., 1., 1.])
+
+        with pytest.raises(AssertionError):
+            assert_array_equal(a, b, strict=True)
+
+
+class TestBuildErrorMessage:
+
+    def test_build_err_msg_defaults(self):
+        x = np.array([1.00001, 2.00002, 3.00003])
+        y = np.array([1.00002, 2.00003, 3.00004])
+        err_msg = 'There is a mismatch'
+
+        a = build_err_msg([x, y], err_msg)
+        b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
+             '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
+             '2.00003, 3.00004])')
+        assert_equal(a, b)
+
+    def test_build_err_msg_no_verbose(self):
+        x = np.array([1.00001, 2.00002, 3.00003])
+        y = np.array([1.00002, 2.00003, 3.00004])
+        err_msg = 'There is a mismatch'
+
+        a = build_err_msg([x, y], err_msg, verbose=False)
+        b = '\nItems are not equal: There is a mismatch'
+        assert_equal(a, b)
+
+    def test_build_err_msg_custom_names(self):
+        x = np.array([1.00001, 2.00002, 3.00003])
+        y = np.array([1.00002, 2.00003, 3.00004])
+        err_msg = 'There is a mismatch'
+
+        a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
+        b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
+             '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
+             '3.00004])')
+        assert_equal(a, b)
+
+    def test_build_err_msg_custom_precision(self):
+        x = np.array([1.000000001, 2.00002, 3.00003])
+        y = np.array([1.000000002, 2.00003, 3.00004])
+        err_msg = 'There is a mismatch'
+
+        a = build_err_msg([x, y], err_msg, precision=10)
+        b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
+             '1.000000001, 2.00002    , 3.00003    ])\n DESIRED: array(['
+             '1.000000002, 2.00003    , 3.00004    ])')
+        assert_equal(a, b)
+
+
+class TestEqual(TestArrayEqual):
+
+    def setup_method(self):
+        self._assert_func = assert_equal
+
+    def test_nan_items(self):
+        self._assert_func(np.nan, np.nan)
+        self._assert_func([np.nan], [np.nan])
+        self._test_not_equal(np.nan, [np.nan])
+        self._test_not_equal(np.nan, 1)
+
+    def test_inf_items(self):
+        self._assert_func(np.inf, np.inf)
+        self._assert_func([np.inf], [np.inf])
+        self._test_not_equal(np.inf, [np.inf])
+
+    def test_datetime(self):
+        self._test_equal(
+            np.datetime64("2017-01-01", "s"),
+            np.datetime64("2017-01-01", "s")
+        )
+        self._test_equal(
+            np.datetime64("2017-01-01", "s"),
+            np.datetime64("2017-01-01", "m")
+        )
+
+        # gh-10081
+        self._test_not_equal(
+            np.datetime64("2017-01-01", "s"),
+            np.datetime64("2017-01-02", "s")
+        )
+        self._test_not_equal(
+            np.datetime64("2017-01-01", "s"),
+            np.datetime64("2017-01-02", "m")
+        )
+
+    def test_nat_items(self):
+        # not a datetime
+        nadt_no_unit = np.datetime64("NaT")
+        nadt_s = np.datetime64("NaT", "s")
+        nadt_d = np.datetime64("NaT", "ns")
+        # not a timedelta
+        natd_no_unit = np.timedelta64("NaT")
+        natd_s = np.timedelta64("NaT", "s")
+        natd_d = np.timedelta64("NaT", "ns")
+
+        dts = [nadt_no_unit, nadt_s, nadt_d]
+        tds = [natd_no_unit, natd_s, natd_d]
+        for a, b in itertools.product(dts, dts):
+            self._assert_func(a, b)
+            self._assert_func([a], [b])
+            self._test_not_equal([a], b)
+
+        for a, b in itertools.product(tds, tds):
+            self._assert_func(a, b)
+            self._assert_func([a], [b])
+            self._test_not_equal([a], b)
+
+        for a, b in itertools.product(tds, dts):
+            self._test_not_equal(a, b)
+            self._test_not_equal(a, [b])
+            self._test_not_equal([a], [b])
+            self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
+            self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
+            self._test_not_equal([a], np.timedelta64(123, "s"))
+            self._test_not_equal([b], np.timedelta64(123, "s"))
+
+    def test_non_numeric(self):
+        self._assert_func('ab', 'ab')
+        self._test_not_equal('ab', 'abb')
+
+    def test_complex_item(self):
+        self._assert_func(complex(1, 2), complex(1, 2))
+        self._assert_func(complex(1, np.nan), complex(1, np.nan))
+        self._test_not_equal(complex(1, np.nan), complex(1, 2))
+        self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
+        self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
+
+    def test_negative_zero(self):
+        self._test_not_equal(np.PZERO, np.NZERO)
+
+    def test_complex(self):
+        x = np.array([complex(1, 2), complex(1, np.nan)])
+        y = np.array([complex(1, 2), complex(1, 2)])
+        self._assert_func(x, x)
+        self._test_not_equal(x, y)
+
+    def test_object(self):
+        #gh-12942
+        import datetime
+        a = np.array([datetime.datetime(2000, 1, 1),
+                      datetime.datetime(2000, 1, 2)])
+        self._test_not_equal(a, a[::-1])
+
+
+class TestArrayAlmostEqual(_GenericTest):
+
+    def setup_method(self):
+        self._assert_func = assert_array_almost_equal
+
+    def test_closeness(self):
+        # Note that in the course of time we ended up with
+        #     `abs(x - y) < 1.5 * 10**(-decimal)`
+        # instead of the previously documented
+        #     `abs(x - y) < 0.5 * 10**(-decimal)`
+        # so this check serves to preserve the wrongness.
+
+        # test scalars
+        self._assert_func(1.499999, 0.0, decimal=0)
+        assert_raises(AssertionError,
+                          lambda: self._assert_func(1.5, 0.0, decimal=0))
+
+        # test arrays
+        self._assert_func([1.499999], [0.0], decimal=0)
+        assert_raises(AssertionError,
+                          lambda: self._assert_func([1.5], [0.0], decimal=0))
+
+    def test_simple(self):
+        x = np.array([1234.2222])
+        y = np.array([1234.2223])
+
+        self._assert_func(x, y, decimal=3)
+        self._assert_func(x, y, decimal=4)
+        assert_raises(AssertionError,
+                lambda: self._assert_func(x, y, decimal=5))
+
+    def test_nan(self):
+        anan = np.array([np.nan])
+        aone = np.array([1])
+        ainf = np.array([np.inf])
+        self._assert_func(anan, anan)
+        assert_raises(AssertionError,
+                lambda: self._assert_func(anan, aone))
+        assert_raises(AssertionError,
+                lambda: self._assert_func(anan, ainf))
+        assert_raises(AssertionError,
+                lambda: self._assert_func(ainf, anan))
+
+    def test_inf(self):
+        a = np.array([[1., 2.], [3., 4.]])
+        b = a.copy()
+        a[0, 0] = np.inf
+        assert_raises(AssertionError,
+                lambda: self._assert_func(a, b))
+        b[0, 0] = -np.inf
+        assert_raises(AssertionError,
+                lambda: self._assert_func(a, b))
+
+    def test_subclass(self):
+        a = np.array([[1., 2.], [3., 4.]])
+        b = np.ma.masked_array([[1., 2.], [0., 4.]],
+                               [[False, False], [True, False]])
+        self._assert_func(a, b)
+        self._assert_func(b, a)
+        self._assert_func(b, b)
+
+        # Test fully masked as well (see gh-11123).
+        a = np.ma.MaskedArray(3.5, mask=True)
+        b = np.array([3., 4., 6.5])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+        a = np.ma.masked
+        b = np.array([3., 4., 6.5])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+        a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+        b = np.array([1., 2., 3.])
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+        a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
+        b = np.array(1.)
+        self._test_equal(a, b)
+        self._test_equal(b, a)
+
+    def test_subclass_that_cannot_be_bool(self):
+        # While we cannot guarantee testing functions will always work for
+        # subclasses, the tests should ideally rely only on subclasses having
+        # comparison operators, not on them being able to store booleans
+        # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+        class MyArray(np.ndarray):
+            def __eq__(self, other):
+                return super().__eq__(other).view(np.ndarray)
+
+            def __lt__(self, other):
+                return super().__lt__(other).view(np.ndarray)
+
+            def all(self, *args, **kwargs):
+                raise NotImplementedError
+
+        a = np.array([1., 2.]).view(MyArray)
+        self._assert_func(a, a)
+
+
+class TestAlmostEqual(_GenericTest):
+
+    def setup_method(self):
+        self._assert_func = assert_almost_equal
+
+    def test_closeness(self):
+        # Note that in the course of time we ended up with
+        #     `abs(x - y) < 1.5 * 10**(-decimal)`
+        # instead of the previously documented
+        #     `abs(x - y) < 0.5 * 10**(-decimal)`
+        # so this check serves to preserve the wrongness.
+
+        # test scalars
+        self._assert_func(1.499999, 0.0, decimal=0)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(1.5, 0.0, decimal=0))
+
+        # test arrays
+        self._assert_func([1.499999], [0.0], decimal=0)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func([1.5], [0.0], decimal=0))
+
+    def test_nan_item(self):
+        self._assert_func(np.nan, np.nan)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(np.nan, 1))
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(np.nan, np.inf))
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(np.inf, np.nan))
+
+    def test_inf_item(self):
+        self._assert_func(np.inf, np.inf)
+        self._assert_func(-np.inf, -np.inf)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(np.inf, 1))
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(-np.inf, np.inf))
+
+    def test_simple_item(self):
+        self._test_not_equal(1, 2)
+
+    def test_complex_item(self):
+        self._assert_func(complex(1, 2), complex(1, 2))
+        self._assert_func(complex(1, np.nan), complex(1, np.nan))
+        self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
+        self._test_not_equal(complex(1, np.nan), complex(1, 2))
+        self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
+        self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
+
+    def test_complex(self):
+        x = np.array([complex(1, 2), complex(1, np.nan)])
+        z = np.array([complex(1, 2), complex(np.nan, 1)])
+        y = np.array([complex(1, 2), complex(1, 2)])
+        self._assert_func(x, x)
+        self._test_not_equal(x, y)
+        self._test_not_equal(x, z)
+
+    def test_error_message(self):
+        """Check the message is formatted correctly for the decimal value.
+           Also check the message when input includes inf or nan (gh12200)"""
+        x = np.array([1.00000000001, 2.00000000002, 3.00003])
+        y = np.array([1.00000000002, 2.00000000003, 3.00004])
+
+        # Test with a different amount of decimal digits
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y, decimal=12)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+        assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+        assert_equal(
+            msgs[6],
+            ' x: array([1.00000000001, 2.00000000002, 3.00003      ])')
+        assert_equal(
+            msgs[7],
+            ' y: array([1.00000000002, 2.00000000003, 3.00004      ])')
+
+        # With the default value of decimal digits, only the 3rd element
+        # differs. Note that we only check for the formatting of the arrays
+        # themselves.
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+        assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+        assert_equal(msgs[6], ' x: array([1.     , 2.     , 3.00003])')
+        assert_equal(msgs[7], ' y: array([1.     , 2.     , 3.00004])')
+
+        # Check the error message when input includes inf
+        x = np.array([np.inf, 0])
+        y = np.array([np.inf, 1])
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 1.')
+        assert_equal(msgs[6], ' x: array([inf,  0.])')
+        assert_equal(msgs[7], ' y: array([inf,  1.])')
+
+        # Check the error message when dividing by zero
+        x = np.array([1, 2])
+        y = np.array([0, 0])
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 2')
+        assert_equal(msgs[5], 'Max relative difference: inf')
+
+    def test_error_message_2(self):
+        """Check the message is formatted correctly when either x or y is a scalar."""
+        x = 2
+        y = np.ones(20)
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 1.')
+
+        y = 2
+        x = np.ones(20)
+        with pytest.raises(AssertionError) as exc_info:
+            self._assert_func(x, y)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+        assert_equal(msgs[4], 'Max absolute difference: 1.')
+        assert_equal(msgs[5], 'Max relative difference: 0.5')
+
+    def test_subclass_that_cannot_be_bool(self):
+        # While we cannot guarantee testing functions will always work for
+        # subclasses, the tests should ideally rely only on subclasses having
+        # comparison operators, not on them being able to store booleans
+        # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+        class MyArray(np.ndarray):
+            def __eq__(self, other):
+                return super().__eq__(other).view(np.ndarray)
+
+            def __lt__(self, other):
+                return super().__lt__(other).view(np.ndarray)
+
+            def all(self, *args, **kwargs):
+                raise NotImplementedError
+
+        a = np.array([1., 2.]).view(MyArray)
+        self._assert_func(a, a)
+
+
+class TestApproxEqual:
+
+    def setup_method(self):
+        self._assert_func = assert_approx_equal
+
+    def test_simple_0d_arrays(self):
+        x = np.array(1234.22)
+        y = np.array(1234.23)
+
+        self._assert_func(x, y, significant=5)
+        self._assert_func(x, y, significant=6)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(x, y, significant=7))
+
+    def test_simple_items(self):
+        x = 1234.22
+        y = 1234.23
+
+        self._assert_func(x, y, significant=4)
+        self._assert_func(x, y, significant=5)
+        self._assert_func(x, y, significant=6)
+        assert_raises(AssertionError,
+                      lambda: self._assert_func(x, y, significant=7))
+
+    def test_nan_array(self):
+        anan = np.array(np.nan)
+        aone = np.array(1)
+        ainf = np.array(np.inf)
+        self._assert_func(anan, anan)
+        assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+    def test_nan_items(self):
+        anan = np.array(np.nan)
+        aone = np.array(1)
+        ainf = np.array(np.inf)
+        self._assert_func(anan, anan)
+        assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+
+class TestArrayAssertLess:
+
+    def setup_method(self):
+        self._assert_func = assert_array_less
+
+    def test_simple_arrays(self):
+        x = np.array([1.1, 2.2])
+        y = np.array([1.2, 2.3])
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y = np.array([1.0, 2.3])
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+    def test_rank2(self):
+        x = np.array([[1.1, 2.2], [3.3, 4.4]])
+        y = np.array([[1.2, 2.3], [3.4, 4.5]])
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y = np.array([[1.0, 2.3], [3.4, 4.5]])
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+    def test_rank3(self):
+        x = np.ones(shape=(2, 2, 2))
+        y = np.ones(shape=(2, 2, 2))+1
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y[0, 0, 0] = 0
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+    def test_simple_items(self):
+        x = 1.1
+        y = 2.2
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y = np.array([2.2, 3.3])
+
+        self._assert_func(x, y)
+        assert_raises(AssertionError, lambda: self._assert_func(y, x))
+
+        y = np.array([1.0, 3.3])
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+
+    def test_nan_noncompare(self):
+        anan = np.array(np.nan)
+        aone = np.array(1)
+        ainf = np.array(np.inf)
+        self._assert_func(anan, anan)
+        assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
+
+    def test_nan_noncompare_array(self):
+        x = np.array([1.1, 2.2, 3.3])
+        anan = np.array(np.nan)
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, anan))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, x))
+
+        x = np.array([1.1, 2.2, np.nan])
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, anan))
+        assert_raises(AssertionError, lambda: self._assert_func(anan, x))
+
+        y = np.array([1.0, 2.0, np.nan])
+
+        self._assert_func(y, x)
+        assert_raises(AssertionError, lambda: self._assert_func(x, y))
+
+    def test_inf_compare(self):
+        aone = np.array(1)
+        ainf = np.array(np.inf)
+
+        self._assert_func(aone, ainf)
+        self._assert_func(-ainf, aone)
+        self._assert_func(-ainf, ainf)
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
+        assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
+
+    def test_inf_compare_array(self):
+        x = np.array([1.1, 2.2, np.inf])
+        ainf = np.array(np.inf)
+
+        assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
+        assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
+        assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
+        self._assert_func(-ainf, x)
+
+
+class TestWarns:
+
+    def test_warn(self):
+        def f():
+            warnings.warn("yo")
+            return 3
+
+        before_filters = sys.modules['warnings'].filters[:]
+        assert_equal(assert_warns(UserWarning, f), 3)
+        after_filters = sys.modules['warnings'].filters
+
+        assert_raises(AssertionError, assert_no_warnings, f)
+        assert_equal(assert_no_warnings(lambda x: x, 1), 1)
+
+        # Check that the warnings state is unchanged
+        assert_equal(before_filters, after_filters,
+                     "assert_warns does not preserver warnings state")
+
+    def test_context_manager(self):
+
+        before_filters = sys.modules['warnings'].filters[:]
+        with assert_warns(UserWarning):
+            warnings.warn("yo")
+        after_filters = sys.modules['warnings'].filters
+
+        def no_warnings():
+            with assert_no_warnings():
+                warnings.warn("yo")
+
+        assert_raises(AssertionError, no_warnings)
+        assert_equal(before_filters, after_filters,
+                     "assert_warns does not preserver warnings state")
+
+    def test_warn_wrong_warning(self):
+        def f():
+            warnings.warn("yo", DeprecationWarning)
+
+        failed = False
+        with warnings.catch_warnings():
+            warnings.simplefilter("error", DeprecationWarning)
+            try:
+                # Should raise a DeprecationWarning
+                assert_warns(UserWarning, f)
+                failed = True
+            except DeprecationWarning:
+                pass
+
+        if failed:
+            raise AssertionError("wrong warning caught by assert_warn")
+
+
+class TestAssertAllclose:
+
+    def test_simple(self):
+        x = 1e-3
+        y = 1e-9
+
+        assert_allclose(x, y, atol=1)
+        assert_raises(AssertionError, assert_allclose, x, y)
+
+        a = np.array([x, y, x, y])
+        b = np.array([x, y, x, x])
+
+        assert_allclose(a, b, atol=1)
+        assert_raises(AssertionError, assert_allclose, a, b)
+
+        b[-1] = y * (1 + 1e-8)
+        assert_allclose(a, b)
+        assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
+
+        assert_allclose(6, 10, rtol=0.5)
+        assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
+
+    def test_min_int(self):
+        a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
+        # Should not raise:
+        assert_allclose(a, a)
+
+    def test_report_fail_percentage(self):
+        a = np.array([1, 1, 1, 1])
+        b = np.array([1, 1, 1, 2])
+
+        with pytest.raises(AssertionError) as exc_info:
+            assert_allclose(a, b)
+        msg = str(exc_info.value)
+        assert_('Mismatched elements: 1 / 4 (25%)\n'
+                'Max absolute difference: 1\n'
+                'Max relative difference: 0.5' in msg)
+
+    def test_equal_nan(self):
+        a = np.array([np.nan])
+        b = np.array([np.nan])
+        # Should not raise:
+        assert_allclose(a, b, equal_nan=True)
+
+    def test_not_equal_nan(self):
+        a = np.array([np.nan])
+        b = np.array([np.nan])
+        assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
+
+    def test_equal_nan_default(self):
+        # Make sure equal_nan default behavior remains unchanged. (All
+        # of these functions use assert_array_compare under the hood.)
+        # None of these should raise.
+        a = np.array([np.nan])
+        b = np.array([np.nan])
+        assert_array_equal(a, b)
+        assert_array_almost_equal(a, b)
+        assert_array_less(a, b)
+        assert_allclose(a, b)
+
+    def test_report_max_relative_error(self):
+        a = np.array([0, 1])
+        b = np.array([0, 2])
+
+        with pytest.raises(AssertionError) as exc_info:
+            assert_allclose(a, b)
+        msg = str(exc_info.value)
+        assert_('Max relative difference: 0.5' in msg)
+
+    def test_timedelta(self):
+        # see gh-18286
+        a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
+        assert_allclose(a, a)
+
+    def test_error_message_unsigned(self):
+        """Check the the message is formatted correctly when overflow can occur
+           (gh21768)"""
+        # Ensure to test for potential overflow in the case of:
+        #        x - y
+        # and
+        #        y - x
+        x = np.asarray([0, 1, 8], dtype='uint8')
+        y = np.asarray([4, 4, 4], dtype='uint8')
+        with pytest.raises(AssertionError) as exc_info:
+            assert_allclose(x, y, atol=3)
+        msgs = str(exc_info.value).split('\n')
+        assert_equal(msgs[4], 'Max absolute difference: 4')
+
+
+class TestArrayAlmostEqualNulp:
+
+    def test_float64_pass(self):
+        # The number of units of least precision
+        # In this case, use a few places above the lowest level (ie nulp=1)
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float64)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        # Addition
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+        # Subtraction
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+    def test_float64_fail(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float64)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+    def test_float64_ignore_nan(self):
+        # Ignore ULP differences between various NAN's
+        # Note that MIPS may reverse quiet and signaling nans
+        # so we use the builtin version as a base.
+        offset = np.uint64(0xffffffff)
+        nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64)
+        nan2_i64 = nan1_i64 ^ offset  # nan payload on MIPS is all ones.
+        nan1_f64 = nan1_i64.view(np.float64)
+        nan2_f64 = nan2_i64.view(np.float64)
+        assert_array_max_ulp(nan1_f64, nan2_f64, 0)
+
+    def test_float32_pass(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float32)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+    def test_float32_fail(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float32)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+    def test_float32_ignore_nan(self):
+        # Ignore ULP differences between various NAN's
+        # Note that MIPS may reverse quiet and signaling nans
+        # so we use the builtin version as a base.
+        offset = np.uint32(0xffff)
+        nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32)
+        nan2_i32 = nan1_i32 ^ offset  # nan payload on MIPS is all ones.
+        nan1_f32 = nan1_i32.view(np.float32)
+        nan2_f32 = nan2_i32.view(np.float32)
+        assert_array_max_ulp(nan1_f32, nan2_f32, 0)
+
+    def test_float16_pass(self):
+        nulp = 5
+        x = np.linspace(-4, 4, 10, dtype=np.float16)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(x, y, nulp)
+
+    def test_float16_fail(self):
+        nulp = 5
+        x = np.linspace(-4, 4, 10, dtype=np.float16)
+        x = 10**x
+        x = np.r_[-x, x]
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      x, y, nulp)
+
+    def test_float16_ignore_nan(self):
+        # Ignore ULP differences between various NAN's
+        # Note that MIPS may reverse quiet and signaling nans
+        # so we use the builtin version as a base.
+        offset = np.uint16(0xff)
+        nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16)
+        nan2_i16 = nan1_i16 ^ offset  # nan payload on MIPS is all ones.
+        nan1_f16 = nan1_i16.view(np.float16)
+        nan2_f16 = nan2_i16.view(np.float16)
+        assert_array_max_ulp(nan1_f16, nan2_f16, 0)
+
+    def test_complex128_pass(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float64)
+        x = 10**x
+        x = np.r_[-x, x]
+        xi = x + x*1j
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+        # The test condition needs to be at least a factor of sqrt(2) smaller
+        # because the real and imaginary parts both change
+        y = x + x*eps*nulp/4.
+        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+        y = x - x*epsneg*nulp/4.
+        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+    def test_complex128_fail(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float64)
+        x = 10**x
+        x = np.r_[-x, x]
+        xi = x + x*1j
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, x + y*1j, nulp)
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + x*1j, nulp)
+        # The test condition needs to be at least a factor of sqrt(2) smaller
+        # because the real and imaginary parts both change
+        y = x + x*eps*nulp
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + y*1j, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, x + y*1j, nulp)
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + x*1j, nulp)
+        y = x - x*epsneg*nulp
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + y*1j, nulp)
+
+    def test_complex64_pass(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float32)
+        x = 10**x
+        x = np.r_[-x, x]
+        xi = x + x*1j
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp/2.
+        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+        y = x + x*eps*nulp/4.
+        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp/2.
+        assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
+        assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
+        y = x - x*epsneg*nulp/4.
+        assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
+
+    def test_complex64_fail(self):
+        nulp = 5
+        x = np.linspace(-20, 20, 50, dtype=np.float32)
+        x = 10**x
+        x = np.r_[-x, x]
+        xi = x + x*1j
+
+        eps = np.finfo(x.dtype).eps
+        y = x + x*eps*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, x + y*1j, nulp)
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + x*1j, nulp)
+        y = x + x*eps*nulp
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + y*1j, nulp)
+
+        epsneg = np.finfo(x.dtype).epsneg
+        y = x - x*epsneg*nulp*2.
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, x + y*1j, nulp)
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + x*1j, nulp)
+        y = x - x*epsneg*nulp
+        assert_raises(AssertionError, assert_array_almost_equal_nulp,
+                      xi, y + y*1j, nulp)
+
+
+class TestULP:
+
+    def test_equal(self):
+        x = np.random.randn(10)
+        assert_array_max_ulp(x, x, maxulp=0)
+
+    def test_single(self):
+        # Generate 1 + small deviation, check that adding eps gives a few UNL
+        x = np.ones(10).astype(np.float32)
+        x += 0.01 * np.random.randn(10).astype(np.float32)
+        eps = np.finfo(np.float32).eps
+        assert_array_max_ulp(x, x+eps, maxulp=20)
+
+    def test_double(self):
+        # Generate 1 + small deviation, check that adding eps gives a few UNL
+        x = np.ones(10).astype(np.float64)
+        x += 0.01 * np.random.randn(10).astype(np.float64)
+        eps = np.finfo(np.float64).eps
+        assert_array_max_ulp(x, x+eps, maxulp=200)
+
+    def test_inf(self):
+        for dt in [np.float32, np.float64]:
+            inf = np.array([np.inf]).astype(dt)
+            big = np.array([np.finfo(dt).max])
+            assert_array_max_ulp(inf, big, maxulp=200)
+
+    def test_nan(self):
+        # Test that nan is 'far' from small, tiny, inf, max and min
+        for dt in [np.float32, np.float64]:
+            if dt == np.float32:
+                maxulp = 1e6
+            else:
+                maxulp = 1e12
+            inf = np.array([np.inf]).astype(dt)
+            nan = np.array([np.nan]).astype(dt)
+            big = np.array([np.finfo(dt).max])
+            tiny = np.array([np.finfo(dt).tiny])
+            zero = np.array([np.PZERO]).astype(dt)
+            nzero = np.array([np.NZERO]).astype(dt)
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, inf,
+                          maxulp=maxulp))
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, big,
+                          maxulp=maxulp))
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, tiny,
+                          maxulp=maxulp))
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, zero,
+                          maxulp=maxulp))
+            assert_raises(AssertionError,
+                          lambda: assert_array_max_ulp(nan, nzero,
+                          maxulp=maxulp))
+
+
+class TestStringEqual:
+    def test_simple(self):
+        assert_string_equal("hello", "hello")
+        assert_string_equal("hello\nmultiline", "hello\nmultiline")
+
+        with pytest.raises(AssertionError) as exc_info:
+            assert_string_equal("foo\nbar", "hello\nbar")
+        msg = str(exc_info.value)
+        assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
+
+        assert_raises(AssertionError,
+                      lambda: assert_string_equal("foo", "hello"))
+
+    def test_regex(self):
+        assert_string_equal("a+*b", "a+*b")
+
+        assert_raises(AssertionError,
+                      lambda: assert_string_equal("aaa", "a+b"))
+
+
+def assert_warn_len_equal(mod, n_in_context):
+    try:
+        mod_warns = mod.__warningregistry__
+    except AttributeError:
+        # the lack of a __warningregistry__
+        # attribute means that no warning has
+        # occurred; this can be triggered in
+        # a parallel test scenario, while in
+        # a serial test scenario an initial
+        # warning (and therefore the attribute)
+        # are always created first
+        mod_warns = {}
+
+    num_warns = len(mod_warns)
+
+    if 'version' in mod_warns:
+        # Python 3 adds a 'version' entry to the registry,
+        # do not count it.
+        num_warns -= 1
+
+    assert_equal(num_warns, n_in_context)
+
+
+def test_warn_len_equal_call_scenarios():
+    # assert_warn_len_equal is called under
+    # varying circumstances depending on serial
+    # vs. parallel test scenarios; this test
+    # simply aims to probe both code paths and
+    # check that no assertion is uncaught
+
+    # parallel scenario -- no warning issued yet
+    class mod:
+        pass
+
+    mod_inst = mod()
+
+    assert_warn_len_equal(mod=mod_inst,
+                          n_in_context=0)
+
+    # serial test scenario -- the __warningregistry__
+    # attribute should be present
+    class mod:
+        def __init__(self):
+            self.__warningregistry__ = {'warning1':1,
+                                        'warning2':2}
+
+    mod_inst = mod()
+    assert_warn_len_equal(mod=mod_inst,
+                          n_in_context=2)
+
+
+def _get_fresh_mod():
+    # Get this module, with warning registry empty
+    my_mod = sys.modules[__name__]
+    try:
+        my_mod.__warningregistry__.clear()
+    except AttributeError:
+        # will not have a __warningregistry__ unless warning has been
+        # raised in the module at some point
+        pass
+    return my_mod
+
+
+def test_clear_and_catch_warnings():
+    # Initial state of module, no warnings
+    my_mod = _get_fresh_mod()
+    assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+    with clear_and_catch_warnings(modules=[my_mod]):
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_equal(my_mod.__warningregistry__, {})
+    # Without specified modules, don't clear warnings during context.
+    # catch_warnings doesn't make an entry for 'ignore'.
+    with clear_and_catch_warnings():
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+    # Manually adding two warnings to the registry:
+    my_mod.__warningregistry__ = {'warning1': 1,
+                                  'warning2': 2}
+
+    # Confirm that specifying module keeps old warning, does not add new
+    with clear_and_catch_warnings(modules=[my_mod]):
+        warnings.simplefilter('ignore')
+        warnings.warn('Another warning')
+    assert_warn_len_equal(my_mod, 2)
+
+    # Another warning, no module spec it clears up registry
+    with clear_and_catch_warnings():
+        warnings.simplefilter('ignore')
+        warnings.warn('Another warning')
+    assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_module():
+    # Initial state of module, no warnings
+    my_mod = _get_fresh_mod()
+    assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+
+    def warn_other_module():
+        # Apply along axis is implemented in python; stacklevel=2 means
+        # we end up inside its module, not ours.
+        def warn(arr):
+            warnings.warn("Some warning 2", stacklevel=2)
+            return arr
+        np.apply_along_axis(warn, 0, [0])
+
+    # Test module based warning suppression:
+    assert_warn_len_equal(my_mod, 0)
+    with suppress_warnings() as sup:
+        sup.record(UserWarning)
+        # suppress warning from other module (may have .pyc ending),
+        # if apply_along_axis is moved, had to be changed.
+        sup.filter(module=np.lib.shape_base)
+        warnings.warn("Some warning")
+        warn_other_module()
+    # Check that the suppression did test the file correctly (this module
+    # got filtered)
+    assert_equal(len(sup.log), 1)
+    assert_equal(sup.log[0].message.args[0], "Some warning")
+    assert_warn_len_equal(my_mod, 0)
+    sup = suppress_warnings()
+    # Will have to be changed if apply_along_axis is moved:
+    sup.filter(module=my_mod)
+    with sup:
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+    # And test repeat works:
+    sup.filter(module=my_mod)
+    with sup:
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+    # Without specified modules
+    with suppress_warnings():
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_type():
+    # Initial state of module, no warnings
+    my_mod = _get_fresh_mod()
+    assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
+
+    # Test module based warning suppression:
+    with suppress_warnings() as sup:
+        sup.filter(UserWarning)
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+    sup = suppress_warnings()
+    sup.filter(UserWarning)
+    with sup:
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+    # And test repeat works:
+    sup.filter(module=my_mod)
+    with sup:
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+    # Without specified modules
+    with suppress_warnings():
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_warn_len_equal(my_mod, 0)
+
+
+def test_suppress_warnings_decorate_no_record():
+    sup = suppress_warnings()
+    sup.filter(UserWarning)
+
+    @sup
+    def warn(category):
+        warnings.warn('Some warning', category)
+
+    with warnings.catch_warnings(record=True) as w:
+        warnings.simplefilter("always")
+        warn(UserWarning)  # should be supppressed
+        warn(RuntimeWarning)
+        assert_equal(len(w), 1)
+
+
+def test_suppress_warnings_record():
+    sup = suppress_warnings()
+    log1 = sup.record()
+
+    with sup:
+        log2 = sup.record(message='Some other warning 2')
+        sup.filter(message='Some warning')
+        warnings.warn('Some warning')
+        warnings.warn('Some other warning')
+        warnings.warn('Some other warning 2')
+
+        assert_equal(len(sup.log), 2)
+        assert_equal(len(log1), 1)
+        assert_equal(len(log2),1)
+        assert_equal(log2[0].message.args[0], 'Some other warning 2')
+
+    # Do it again, with the same context to see if some warnings survived:
+    with sup:
+        log2 = sup.record(message='Some other warning 2')
+        sup.filter(message='Some warning')
+        warnings.warn('Some warning')
+        warnings.warn('Some other warning')
+        warnings.warn('Some other warning 2')
+
+        assert_equal(len(sup.log), 2)
+        assert_equal(len(log1), 1)
+        assert_equal(len(log2), 1)
+        assert_equal(log2[0].message.args[0], 'Some other warning 2')
+
+    # Test nested:
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings() as sup2:
+            sup2.record(message='Some warning')
+            warnings.warn('Some warning')
+            warnings.warn('Some other warning')
+            assert_equal(len(sup2.log), 1)
+        assert_equal(len(sup.log), 1)
+
+
+def test_suppress_warnings_forwarding():
+    def warn_other_module():
+        # Apply along axis is implemented in python; stacklevel=2 means
+        # we end up inside its module, not ours.
+        def warn(arr):
+            warnings.warn("Some warning", stacklevel=2)
+            return arr
+        np.apply_along_axis(warn, 0, [0])
+
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings("always"):
+            for i in range(2):
+                warnings.warn("Some warning")
+
+        assert_equal(len(sup.log), 2)
+
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings("location"):
+            for i in range(2):
+                warnings.warn("Some warning")
+                warnings.warn("Some warning")
+
+        assert_equal(len(sup.log), 2)
+
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings("module"):
+            for i in range(2):
+                warnings.warn("Some warning")
+                warnings.warn("Some warning")
+                warn_other_module()
+
+        assert_equal(len(sup.log), 2)
+
+    with suppress_warnings() as sup:
+        sup.record()
+        with suppress_warnings("once"):
+            for i in range(2):
+                warnings.warn("Some warning")
+                warnings.warn("Some other warning")
+                warn_other_module()
+
+        assert_equal(len(sup.log), 2)
+
+
+def test_tempdir():
+    with tempdir() as tdir:
+        fpath = os.path.join(tdir, 'tmp')
+        with open(fpath, 'w'):
+            pass
+    assert_(not os.path.isdir(tdir))
+
+    raised = False
+    try:
+        with tempdir() as tdir:
+            raise ValueError()
+    except ValueError:
+        raised = True
+    assert_(raised)
+    assert_(not os.path.isdir(tdir))
+
+
+def test_temppath():
+    with temppath() as fpath:
+        with open(fpath, 'w'):
+            pass
+    assert_(not os.path.isfile(fpath))
+
+    raised = False
+    try:
+        with temppath() as fpath:
+            raise ValueError()
+    except ValueError:
+        raised = True
+    assert_(raised)
+    assert_(not os.path.isfile(fpath))
+
+
+class my_cacw(clear_and_catch_warnings):
+
+    class_modules = (sys.modules[__name__],)
+
+
+def test_clear_and_catch_warnings_inherit():
+    # Test can subclass and add default modules
+    my_mod = _get_fresh_mod()
+    with my_cacw():
+        warnings.simplefilter('ignore')
+        warnings.warn('Some warning')
+    assert_equal(my_mod.__warningregistry__, {})
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestAssertNoGcCycles:
+    """ Test assert_no_gc_cycles """
+    def test_passes(self):
+        def no_cycle():
+            b = []
+            b.append([])
+            return b
+
+        with assert_no_gc_cycles():
+            no_cycle()
+
+        assert_no_gc_cycles(no_cycle)
+
+    def test_asserts(self):
+        def make_cycle():
+            a = []
+            a.append(a)
+            a.append(a)
+            return a
+
+        with assert_raises(AssertionError):
+            with assert_no_gc_cycles():
+                make_cycle()
+
+        with assert_raises(AssertionError):
+            assert_no_gc_cycles(make_cycle)
+
+    @pytest.mark.slow
+    def test_fails(self):
+        """
+        Test that in cases where the garbage cannot be collected, we raise an
+        error, instead of hanging forever trying to clear it.
+        """
+
+        class ReferenceCycleInDel:
+            """
+            An object that not only contains a reference cycle, but creates new
+            cycles whenever it's garbage-collected and its __del__ runs
+            """
+            make_cycle = True
+
+            def __init__(self):
+                self.cycle = self
+
+            def __del__(self):
+                # break the current cycle so that `self` can be freed
+                self.cycle = None
+
+                if ReferenceCycleInDel.make_cycle:
+                    # but create a new one so that the garbage collector has more
+                    # work to do.
+                    ReferenceCycleInDel()
+
+        try:
+            w = weakref.ref(ReferenceCycleInDel())
+            try:
+                with assert_raises(RuntimeError):
+                    # this will be unable to get a baseline empty garbage
+                    assert_no_gc_cycles(lambda: None)
+            except AssertionError:
+                # the above test is only necessary if the GC actually tried to free
+                # our object anyway, which python 2.7 does not.
+                if w() is not None:
+                    pytest.skip("GC does not call __del__ on cyclic objects")
+                    raise
+
+        finally:
+            # make sure that we stop creating reference cycles
+            ReferenceCycleInDel.make_cycle = False
diff --git a/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698/model-00004-of-00004.safetensors b/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698/model-00004-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f5fa3d5d85410d78e188077ebecbba252bffaced
--- /dev/null
+++ b/llm_tutorial/llm_recipes/models/hf-model-eval/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698/model-00004-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93a0c088c44f3ac7285148a6bacc1a0aa7f5fb2098850e3e9d5d71baa67110c4
+size 1223688320
diff --git a/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698/model-00004-of-00004.safetensors b/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698/model-00004-of-00004.safetensors
new file mode 100644
index 0000000000000000000000000000000000000000..f5fa3d5d85410d78e188077ebecbba252bffaced
--- /dev/null
+++ b/llm_tutorial/llm_recipes/models/hf-model/llm-jp-v3-3.7b_ja-en_3M-pairs/iter_0000698/model-00004-of-00004.safetensors
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:93a0c088c44f3ac7285148a6bacc1a0aa7f5fb2098850e3e9d5d71baa67110c4
+size 1223688320