koichi12 commited on
Commit
c36407f
·
verified ·
1 Parent(s): d78dfe4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/numpy/_utils/__init__.py +29 -0
  2. .venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/__init__.cpython-311.pyc +0 -0
  3. .venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_convertions.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_inspect.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_pep440.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/numpy/_utils/_convertions.py +18 -0
  7. .venv/lib/python3.11/site-packages/numpy/_utils/_inspect.py +191 -0
  8. .venv/lib/python3.11/site-packages/numpy/_utils/_pep440.py +487 -0
  9. .venv/lib/python3.11/site-packages/numpy/compat/__init__.py +19 -0
  10. .venv/lib/python3.11/site-packages/numpy/compat/__pycache__/__init__.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/numpy/compat/__pycache__/py3k.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/numpy/compat/__pycache__/setup.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/numpy/compat/py3k.py +145 -0
  14. .venv/lib/python3.11/site-packages/numpy/compat/setup.py +10 -0
  15. .venv/lib/python3.11/site-packages/numpy/compat/tests/__init__.py +0 -0
  16. .venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-311.pyc +0 -0
  18. .venv/lib/python3.11/site-packages/numpy/compat/tests/test_compat.py +22 -0
  19. .venv/lib/python3.11/site-packages/numpy/lib/__init__.pyi +245 -0
  20. .venv/lib/python3.11/site-packages/numpy/lib/_datasource.py +704 -0
  21. .venv/lib/python3.11/site-packages/numpy/lib/_iotools.py +897 -0
  22. .venv/lib/python3.11/site-packages/numpy/lib/_version.pyi +17 -0
  23. .venv/lib/python3.11/site-packages/numpy/lib/arraypad.pyi +85 -0
  24. .venv/lib/python3.11/site-packages/numpy/lib/arraysetops.py +981 -0
  25. .venv/lib/python3.11/site-packages/numpy/lib/arrayterator.py +219 -0
  26. .venv/lib/python3.11/site-packages/numpy/lib/arrayterator.pyi +49 -0
  27. .venv/lib/python3.11/site-packages/numpy/lib/format.py +976 -0
  28. .venv/lib/python3.11/site-packages/numpy/lib/format.pyi +22 -0
  29. .venv/lib/python3.11/site-packages/numpy/lib/function_base.pyi +697 -0
  30. .venv/lib/python3.11/site-packages/numpy/lib/histograms.py +1072 -0
  31. .venv/lib/python3.11/site-packages/numpy/lib/histograms.pyi +47 -0
  32. .venv/lib/python3.11/site-packages/numpy/lib/index_tricks.py +1046 -0
  33. .venv/lib/python3.11/site-packages/numpy/lib/index_tricks.pyi +162 -0
  34. .venv/lib/python3.11/site-packages/numpy/lib/mixins.py +177 -0
  35. .venv/lib/python3.11/site-packages/numpy/lib/mixins.pyi +74 -0
  36. .venv/lib/python3.11/site-packages/numpy/lib/npyio.py +2547 -0
  37. .venv/lib/python3.11/site-packages/numpy/lib/npyio.pyi +330 -0
  38. .venv/lib/python3.11/site-packages/numpy/lib/polynomial.pyi +303 -0
  39. .venv/lib/python3.11/site-packages/numpy/lib/recfunctions.py +1673 -0
  40. .venv/lib/python3.11/site-packages/numpy/lib/scimath.py +625 -0
  41. .venv/lib/python3.11/site-packages/numpy/lib/setup.py +12 -0
  42. .venv/lib/python3.11/site-packages/numpy/lib/shape_base.py +1274 -0
  43. .venv/lib/python3.11/site-packages/numpy/lib/shape_base.pyi +220 -0
  44. .venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.py +547 -0
  45. .venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.pyi +80 -0
  46. .venv/lib/python3.11/site-packages/numpy/lib/twodim_base.py +1183 -0
  47. .venv/lib/python3.11/site-packages/numpy/lib/twodim_base.pyi +239 -0
  48. .venv/lib/python3.11/site-packages/numpy/lib/type_check.py +735 -0
  49. .venv/lib/python3.11/site-packages/numpy/lib/type_check.pyi +222 -0
  50. .venv/lib/python3.11/site-packages/numpy/lib/ufunclike.py +210 -0
.venv/lib/python3.11/site-packages/numpy/_utils/__init__.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is a module for defining private helpers which do not depend on the
3
+ rest of NumPy.
4
+
5
+ Everything in here must be self-contained so that it can be
6
+ imported anywhere else without creating circular imports.
7
+ If a utility requires the import of NumPy, it probably belongs
8
+ in ``numpy.core``.
9
+ """
10
+
11
+ from ._convertions import asunicode, asbytes
12
+
13
+
14
+ def set_module(module):
15
+ """Private decorator for overriding __module__ on a function or class.
16
+
17
+ Example usage::
18
+
19
+ @set_module('numpy')
20
+ def example():
21
+ pass
22
+
23
+ assert example.__module__ == 'numpy'
24
+ """
25
+ def decorator(func):
26
+ if module is not None:
27
+ func.__module__ = module
28
+ return func
29
+ return decorator
.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (1.15 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_convertions.cpython-311.pyc ADDED
Binary file (935 Bytes). View file
 
.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_inspect.cpython-311.pyc ADDED
Binary file (10.4 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/_utils/__pycache__/_pep440.cpython-311.pyc ADDED
Binary file (20.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/_utils/_convertions.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A set of methods retained from np.compat module that
3
+ are still used across codebase.
4
+ """
5
+
6
+ __all__ = ["asunicode", "asbytes"]
7
+
8
+
9
+ def asunicode(s):
10
+ if isinstance(s, bytes):
11
+ return s.decode('latin1')
12
+ return str(s)
13
+
14
+
15
+ def asbytes(s):
16
+ if isinstance(s, bytes):
17
+ return s
18
+ return str(s).encode('latin1')
.venv/lib/python3.11/site-packages/numpy/_utils/_inspect.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Subset of inspect module from upstream python
2
+
3
+ We use this instead of upstream because upstream inspect is slow to import, and
4
+ significantly contributes to numpy import times. Importing this copy has almost
5
+ no overhead.
6
+
7
+ """
8
+ import types
9
+
10
+ __all__ = ['getargspec', 'formatargspec']
11
+
12
+ # ----------------------------------------------------------- type-checking
13
+ def ismethod(object):
14
+ """Return true if the object is an instance method.
15
+
16
+ Instance method objects provide these attributes:
17
+ __doc__ documentation string
18
+ __name__ name with which this method was defined
19
+ im_class class object in which this method belongs
20
+ im_func function object containing implementation of method
21
+ im_self instance to which this method is bound, or None
22
+
23
+ """
24
+ return isinstance(object, types.MethodType)
25
+
26
+ def isfunction(object):
27
+ """Return true if the object is a user-defined function.
28
+
29
+ Function objects provide these attributes:
30
+ __doc__ documentation string
31
+ __name__ name with which this function was defined
32
+ func_code code object containing compiled function bytecode
33
+ func_defaults tuple of any default values for arguments
34
+ func_doc (same as __doc__)
35
+ func_globals global namespace in which this function was defined
36
+ func_name (same as __name__)
37
+
38
+ """
39
+ return isinstance(object, types.FunctionType)
40
+
41
+ def iscode(object):
42
+ """Return true if the object is a code object.
43
+
44
+ Code objects provide these attributes:
45
+ co_argcount number of arguments (not including * or ** args)
46
+ co_code string of raw compiled bytecode
47
+ co_consts tuple of constants used in the bytecode
48
+ co_filename name of file in which this code object was created
49
+ co_firstlineno number of first line in Python source code
50
+ co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
51
+ co_lnotab encoded mapping of line numbers to bytecode indices
52
+ co_name name with which this code object was defined
53
+ co_names tuple of names of local variables
54
+ co_nlocals number of local variables
55
+ co_stacksize virtual machine stack space required
56
+ co_varnames tuple of names of arguments and local variables
57
+
58
+ """
59
+ return isinstance(object, types.CodeType)
60
+
61
+ # ------------------------------------------------ argument list extraction
62
+ # These constants are from Python's compile.h.
63
+ CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS = 1, 2, 4, 8
64
+
65
+ def getargs(co):
66
+ """Get information about the arguments accepted by a code object.
67
+
68
+ Three things are returned: (args, varargs, varkw), where 'args' is
69
+ a list of argument names (possibly containing nested lists), and
70
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
71
+
72
+ """
73
+
74
+ if not iscode(co):
75
+ raise TypeError('arg is not a code object')
76
+
77
+ nargs = co.co_argcount
78
+ names = co.co_varnames
79
+ args = list(names[:nargs])
80
+
81
+ # The following acrobatics are for anonymous (tuple) arguments.
82
+ # Which we do not need to support, so remove to avoid importing
83
+ # the dis module.
84
+ for i in range(nargs):
85
+ if args[i][:1] in ['', '.']:
86
+ raise TypeError("tuple function arguments are not supported")
87
+ varargs = None
88
+ if co.co_flags & CO_VARARGS:
89
+ varargs = co.co_varnames[nargs]
90
+ nargs = nargs + 1
91
+ varkw = None
92
+ if co.co_flags & CO_VARKEYWORDS:
93
+ varkw = co.co_varnames[nargs]
94
+ return args, varargs, varkw
95
+
96
+ def getargspec(func):
97
+ """Get the names and default values of a function's arguments.
98
+
99
+ A tuple of four things is returned: (args, varargs, varkw, defaults).
100
+ 'args' is a list of the argument names (it may contain nested lists).
101
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
102
+ 'defaults' is an n-tuple of the default values of the last n arguments.
103
+
104
+ """
105
+
106
+ if ismethod(func):
107
+ func = func.__func__
108
+ if not isfunction(func):
109
+ raise TypeError('arg is not a Python function')
110
+ args, varargs, varkw = getargs(func.__code__)
111
+ return args, varargs, varkw, func.__defaults__
112
+
113
+ def getargvalues(frame):
114
+ """Get information about arguments passed into a particular frame.
115
+
116
+ A tuple of four things is returned: (args, varargs, varkw, locals).
117
+ 'args' is a list of the argument names (it may contain nested lists).
118
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
119
+ 'locals' is the locals dictionary of the given frame.
120
+
121
+ """
122
+ args, varargs, varkw = getargs(frame.f_code)
123
+ return args, varargs, varkw, frame.f_locals
124
+
125
+ def joinseq(seq):
126
+ if len(seq) == 1:
127
+ return '(' + seq[0] + ',)'
128
+ else:
129
+ return '(' + ', '.join(seq) + ')'
130
+
131
+ def strseq(object, convert, join=joinseq):
132
+ """Recursively walk a sequence, stringifying each element.
133
+
134
+ """
135
+ if type(object) in [list, tuple]:
136
+ return join([strseq(_o, convert, join) for _o in object])
137
+ else:
138
+ return convert(object)
139
+
140
+ def formatargspec(args, varargs=None, varkw=None, defaults=None,
141
+ formatarg=str,
142
+ formatvarargs=lambda name: '*' + name,
143
+ formatvarkw=lambda name: '**' + name,
144
+ formatvalue=lambda value: '=' + repr(value),
145
+ join=joinseq):
146
+ """Format an argument spec from the 4 values returned by getargspec.
147
+
148
+ The first four arguments are (args, varargs, varkw, defaults). The
149
+ other four arguments are the corresponding optional formatting functions
150
+ that are called to turn names and values into strings. The ninth
151
+ argument is an optional function to format the sequence of arguments.
152
+
153
+ """
154
+ specs = []
155
+ if defaults:
156
+ firstdefault = len(args) - len(defaults)
157
+ for i in range(len(args)):
158
+ spec = strseq(args[i], formatarg, join)
159
+ if defaults and i >= firstdefault:
160
+ spec = spec + formatvalue(defaults[i - firstdefault])
161
+ specs.append(spec)
162
+ if varargs is not None:
163
+ specs.append(formatvarargs(varargs))
164
+ if varkw is not None:
165
+ specs.append(formatvarkw(varkw))
166
+ return '(' + ', '.join(specs) + ')'
167
+
168
+ def formatargvalues(args, varargs, varkw, locals,
169
+ formatarg=str,
170
+ formatvarargs=lambda name: '*' + name,
171
+ formatvarkw=lambda name: '**' + name,
172
+ formatvalue=lambda value: '=' + repr(value),
173
+ join=joinseq):
174
+ """Format an argument spec from the 4 values returned by getargvalues.
175
+
176
+ The first four arguments are (args, varargs, varkw, locals). The
177
+ next four arguments are the corresponding optional formatting functions
178
+ that are called to turn names and values into strings. The ninth
179
+ argument is an optional function to format the sequence of arguments.
180
+
181
+ """
182
+ def convert(name, locals=locals,
183
+ formatarg=formatarg, formatvalue=formatvalue):
184
+ return formatarg(name) + formatvalue(locals[name])
185
+ specs = [strseq(arg, convert, join) for arg in args]
186
+
187
+ if varargs:
188
+ specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
189
+ if varkw:
190
+ specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
191
+ return '(' + ', '.join(specs) + ')'
.venv/lib/python3.11/site-packages/numpy/_utils/_pep440.py ADDED
@@ -0,0 +1,487 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility to compare pep440 compatible version strings.
2
+
3
+ The LooseVersion and StrictVersion classes that distutils provides don't
4
+ work; they don't recognize anything like alpha/beta/rc/dev versions.
5
+ """
6
+
7
+ # Copyright (c) Donald Stufft and individual contributors.
8
+ # All rights reserved.
9
+
10
+ # Redistribution and use in source and binary forms, with or without
11
+ # modification, are permitted provided that the following conditions are met:
12
+
13
+ # 1. Redistributions of source code must retain the above copyright notice,
14
+ # this list of conditions and the following disclaimer.
15
+
16
+ # 2. Redistributions in binary form must reproduce the above copyright
17
+ # notice, this list of conditions and the following disclaimer in the
18
+ # documentation and/or other materials provided with the distribution.
19
+
20
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
21
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23
+ # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
24
+ # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25
+ # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26
+ # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27
+ # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28
+ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29
+ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30
+ # POSSIBILITY OF SUCH DAMAGE.
31
+
32
+ import collections
33
+ import itertools
34
+ import re
35
+
36
+
37
+ __all__ = [
38
+ "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN",
39
+ ]
40
+
41
+
42
+ # BEGIN packaging/_structures.py
43
+
44
+
45
+ class Infinity:
46
+ def __repr__(self):
47
+ return "Infinity"
48
+
49
+ def __hash__(self):
50
+ return hash(repr(self))
51
+
52
+ def __lt__(self, other):
53
+ return False
54
+
55
+ def __le__(self, other):
56
+ return False
57
+
58
+ def __eq__(self, other):
59
+ return isinstance(other, self.__class__)
60
+
61
+ def __ne__(self, other):
62
+ return not isinstance(other, self.__class__)
63
+
64
+ def __gt__(self, other):
65
+ return True
66
+
67
+ def __ge__(self, other):
68
+ return True
69
+
70
+ def __neg__(self):
71
+ return NegativeInfinity
72
+
73
+
74
+ Infinity = Infinity()
75
+
76
+
77
+ class NegativeInfinity:
78
+ def __repr__(self):
79
+ return "-Infinity"
80
+
81
+ def __hash__(self):
82
+ return hash(repr(self))
83
+
84
+ def __lt__(self, other):
85
+ return True
86
+
87
+ def __le__(self, other):
88
+ return True
89
+
90
+ def __eq__(self, other):
91
+ return isinstance(other, self.__class__)
92
+
93
+ def __ne__(self, other):
94
+ return not isinstance(other, self.__class__)
95
+
96
+ def __gt__(self, other):
97
+ return False
98
+
99
+ def __ge__(self, other):
100
+ return False
101
+
102
+ def __neg__(self):
103
+ return Infinity
104
+
105
+
106
+ # BEGIN packaging/version.py
107
+
108
+
109
+ NegativeInfinity = NegativeInfinity()
110
+
111
+ _Version = collections.namedtuple(
112
+ "_Version",
113
+ ["epoch", "release", "dev", "pre", "post", "local"],
114
+ )
115
+
116
+
117
+ def parse(version):
118
+ """
119
+ Parse the given version string and return either a :class:`Version` object
120
+ or a :class:`LegacyVersion` object depending on if the given version is
121
+ a valid PEP 440 version or a legacy version.
122
+ """
123
+ try:
124
+ return Version(version)
125
+ except InvalidVersion:
126
+ return LegacyVersion(version)
127
+
128
+
129
+ class InvalidVersion(ValueError):
130
+ """
131
+ An invalid version was found, users should refer to PEP 440.
132
+ """
133
+
134
+
135
+ class _BaseVersion:
136
+
137
+ def __hash__(self):
138
+ return hash(self._key)
139
+
140
+ def __lt__(self, other):
141
+ return self._compare(other, lambda s, o: s < o)
142
+
143
+ def __le__(self, other):
144
+ return self._compare(other, lambda s, o: s <= o)
145
+
146
+ def __eq__(self, other):
147
+ return self._compare(other, lambda s, o: s == o)
148
+
149
+ def __ge__(self, other):
150
+ return self._compare(other, lambda s, o: s >= o)
151
+
152
+ def __gt__(self, other):
153
+ return self._compare(other, lambda s, o: s > o)
154
+
155
+ def __ne__(self, other):
156
+ return self._compare(other, lambda s, o: s != o)
157
+
158
+ def _compare(self, other, method):
159
+ if not isinstance(other, _BaseVersion):
160
+ return NotImplemented
161
+
162
+ return method(self._key, other._key)
163
+
164
+
165
+ class LegacyVersion(_BaseVersion):
166
+
167
+ def __init__(self, version):
168
+ self._version = str(version)
169
+ self._key = _legacy_cmpkey(self._version)
170
+
171
+ def __str__(self):
172
+ return self._version
173
+
174
+ def __repr__(self):
175
+ return "<LegacyVersion({0})>".format(repr(str(self)))
176
+
177
+ @property
178
+ def public(self):
179
+ return self._version
180
+
181
+ @property
182
+ def base_version(self):
183
+ return self._version
184
+
185
+ @property
186
+ def local(self):
187
+ return None
188
+
189
+ @property
190
+ def is_prerelease(self):
191
+ return False
192
+
193
+ @property
194
+ def is_postrelease(self):
195
+ return False
196
+
197
+
198
+ _legacy_version_component_re = re.compile(
199
+ r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE,
200
+ )
201
+
202
+ _legacy_version_replacement_map = {
203
+ "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@",
204
+ }
205
+
206
+
207
+ def _parse_version_parts(s):
208
+ for part in _legacy_version_component_re.split(s):
209
+ part = _legacy_version_replacement_map.get(part, part)
210
+
211
+ if not part or part == ".":
212
+ continue
213
+
214
+ if part[:1] in "0123456789":
215
+ # pad for numeric comparison
216
+ yield part.zfill(8)
217
+ else:
218
+ yield "*" + part
219
+
220
+ # ensure that alpha/beta/candidate are before final
221
+ yield "*final"
222
+
223
+
224
+ def _legacy_cmpkey(version):
225
+ # We hardcode an epoch of -1 here. A PEP 440 version can only have an epoch
226
+ # greater than or equal to 0. This will effectively put the LegacyVersion,
227
+ # which uses the defacto standard originally implemented by setuptools,
228
+ # as before all PEP 440 versions.
229
+ epoch = -1
230
+
231
+ # This scheme is taken from pkg_resources.parse_version setuptools prior to
232
+ # its adoption of the packaging library.
233
+ parts = []
234
+ for part in _parse_version_parts(version.lower()):
235
+ if part.startswith("*"):
236
+ # remove "-" before a prerelease tag
237
+ if part < "*final":
238
+ while parts and parts[-1] == "*final-":
239
+ parts.pop()
240
+
241
+ # remove trailing zeros from each series of numeric parts
242
+ while parts and parts[-1] == "00000000":
243
+ parts.pop()
244
+
245
+ parts.append(part)
246
+ parts = tuple(parts)
247
+
248
+ return epoch, parts
249
+
250
+
251
+ # Deliberately not anchored to the start and end of the string, to make it
252
+ # easier for 3rd party code to reuse
253
+ VERSION_PATTERN = r"""
254
+ v?
255
+ (?:
256
+ (?:(?P<epoch>[0-9]+)!)? # epoch
257
+ (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment
258
+ (?P<pre> # pre-release
259
+ [-_\.]?
260
+ (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview))
261
+ [-_\.]?
262
+ (?P<pre_n>[0-9]+)?
263
+ )?
264
+ (?P<post> # post release
265
+ (?:-(?P<post_n1>[0-9]+))
266
+ |
267
+ (?:
268
+ [-_\.]?
269
+ (?P<post_l>post|rev|r)
270
+ [-_\.]?
271
+ (?P<post_n2>[0-9]+)?
272
+ )
273
+ )?
274
+ (?P<dev> # dev release
275
+ [-_\.]?
276
+ (?P<dev_l>dev)
277
+ [-_\.]?
278
+ (?P<dev_n>[0-9]+)?
279
+ )?
280
+ )
281
+ (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version
282
+ """
283
+
284
+
285
+ class Version(_BaseVersion):
286
+
287
+ _regex = re.compile(
288
+ r"^\s*" + VERSION_PATTERN + r"\s*$",
289
+ re.VERBOSE | re.IGNORECASE,
290
+ )
291
+
292
+ def __init__(self, version):
293
+ # Validate the version and parse it into pieces
294
+ match = self._regex.search(version)
295
+ if not match:
296
+ raise InvalidVersion("Invalid version: '{0}'".format(version))
297
+
298
+ # Store the parsed out pieces of the version
299
+ self._version = _Version(
300
+ epoch=int(match.group("epoch")) if match.group("epoch") else 0,
301
+ release=tuple(int(i) for i in match.group("release").split(".")),
302
+ pre=_parse_letter_version(
303
+ match.group("pre_l"),
304
+ match.group("pre_n"),
305
+ ),
306
+ post=_parse_letter_version(
307
+ match.group("post_l"),
308
+ match.group("post_n1") or match.group("post_n2"),
309
+ ),
310
+ dev=_parse_letter_version(
311
+ match.group("dev_l"),
312
+ match.group("dev_n"),
313
+ ),
314
+ local=_parse_local_version(match.group("local")),
315
+ )
316
+
317
+ # Generate a key which will be used for sorting
318
+ self._key = _cmpkey(
319
+ self._version.epoch,
320
+ self._version.release,
321
+ self._version.pre,
322
+ self._version.post,
323
+ self._version.dev,
324
+ self._version.local,
325
+ )
326
+
327
+ def __repr__(self):
328
+ return "<Version({0})>".format(repr(str(self)))
329
+
330
+ def __str__(self):
331
+ parts = []
332
+
333
+ # Epoch
334
+ if self._version.epoch != 0:
335
+ parts.append("{0}!".format(self._version.epoch))
336
+
337
+ # Release segment
338
+ parts.append(".".join(str(x) for x in self._version.release))
339
+
340
+ # Pre-release
341
+ if self._version.pre is not None:
342
+ parts.append("".join(str(x) for x in self._version.pre))
343
+
344
+ # Post-release
345
+ if self._version.post is not None:
346
+ parts.append(".post{0}".format(self._version.post[1]))
347
+
348
+ # Development release
349
+ if self._version.dev is not None:
350
+ parts.append(".dev{0}".format(self._version.dev[1]))
351
+
352
+ # Local version segment
353
+ if self._version.local is not None:
354
+ parts.append(
355
+ "+{0}".format(".".join(str(x) for x in self._version.local))
356
+ )
357
+
358
+ return "".join(parts)
359
+
360
+ @property
361
+ def public(self):
362
+ return str(self).split("+", 1)[0]
363
+
364
+ @property
365
+ def base_version(self):
366
+ parts = []
367
+
368
+ # Epoch
369
+ if self._version.epoch != 0:
370
+ parts.append("{0}!".format(self._version.epoch))
371
+
372
+ # Release segment
373
+ parts.append(".".join(str(x) for x in self._version.release))
374
+
375
+ return "".join(parts)
376
+
377
+ @property
378
+ def local(self):
379
+ version_string = str(self)
380
+ if "+" in version_string:
381
+ return version_string.split("+", 1)[1]
382
+
383
+ @property
384
+ def is_prerelease(self):
385
+ return bool(self._version.dev or self._version.pre)
386
+
387
+ @property
388
+ def is_postrelease(self):
389
+ return bool(self._version.post)
390
+
391
+
392
+ def _parse_letter_version(letter, number):
393
+ if letter:
394
+ # We assume there is an implicit 0 in a pre-release if there is
395
+ # no numeral associated with it.
396
+ if number is None:
397
+ number = 0
398
+
399
+ # We normalize any letters to their lower-case form
400
+ letter = letter.lower()
401
+
402
+ # We consider some words to be alternate spellings of other words and
403
+ # in those cases we want to normalize the spellings to our preferred
404
+ # spelling.
405
+ if letter == "alpha":
406
+ letter = "a"
407
+ elif letter == "beta":
408
+ letter = "b"
409
+ elif letter in ["c", "pre", "preview"]:
410
+ letter = "rc"
411
+ elif letter in ["rev", "r"]:
412
+ letter = "post"
413
+
414
+ return letter, int(number)
415
+ if not letter and number:
416
+ # We assume that if we are given a number but not given a letter,
417
+ # then this is using the implicit post release syntax (e.g., 1.0-1)
418
+ letter = "post"
419
+
420
+ return letter, int(number)
421
+
422
+
423
+ _local_version_seperators = re.compile(r"[\._-]")
424
+
425
+
426
+ def _parse_local_version(local):
427
+ """
428
+ Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
429
+ """
430
+ if local is not None:
431
+ return tuple(
432
+ part.lower() if not part.isdigit() else int(part)
433
+ for part in _local_version_seperators.split(local)
434
+ )
435
+
436
+
437
+ def _cmpkey(epoch, release, pre, post, dev, local):
438
+ # When we compare a release version, we want to compare it with all of the
439
+ # trailing zeros removed. So we'll use a reverse the list, drop all the now
440
+ # leading zeros until we come to something non-zero, then take the rest,
441
+ # re-reverse it back into the correct order, and make it a tuple and use
442
+ # that for our sorting key.
443
+ release = tuple(
444
+ reversed(list(
445
+ itertools.dropwhile(
446
+ lambda x: x == 0,
447
+ reversed(release),
448
+ )
449
+ ))
450
+ )
451
+
452
+ # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
453
+ # We'll do this by abusing the pre-segment, but we _only_ want to do this
454
+ # if there is no pre- or a post-segment. If we have one of those, then
455
+ # the normal sorting rules will handle this case correctly.
456
+ if pre is None and post is None and dev is not None:
457
+ pre = -Infinity
458
+ # Versions without a pre-release (except as noted above) should sort after
459
+ # those with one.
460
+ elif pre is None:
461
+ pre = Infinity
462
+
463
+ # Versions without a post-segment should sort before those with one.
464
+ if post is None:
465
+ post = -Infinity
466
+
467
+ # Versions without a development segment should sort after those with one.
468
+ if dev is None:
469
+ dev = Infinity
470
+
471
+ if local is None:
472
+ # Versions without a local segment should sort before those with one.
473
+ local = -Infinity
474
+ else:
475
+ # Versions with a local segment need that segment parsed to implement
476
+ # the sorting rules in PEP440.
477
+ # - Alphanumeric segments sort before numeric segments
478
+ # - Alphanumeric segments sort lexicographically
479
+ # - Numeric segments sort numerically
480
+ # - Shorter versions sort before longer versions when the prefixes
481
+ # match exactly
482
+ local = tuple(
483
+ (i, "") if isinstance(i, int) else (-Infinity, i)
484
+ for i in local
485
+ )
486
+
487
+ return epoch, release, pre, post, dev, local
.venv/lib/python3.11/site-packages/numpy/compat/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Compatibility module.
3
+
4
+ This module contains duplicated code from Python itself or 3rd party
5
+ extensions, which may be included for the following reasons:
6
+
7
+ * compatibility
8
+ * we may only need a small subset of the copied library/module
9
+
10
+ """
11
+
12
+ from .._utils import _inspect
13
+ from .._utils._inspect import getargspec, formatargspec
14
+ from . import py3k
15
+ from .py3k import *
16
+
17
+ __all__ = []
18
+ __all__.extend(_inspect.__all__)
19
+ __all__.extend(py3k.__all__)
.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (846 Bytes). View file
 
.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/py3k.cpython-311.pyc ADDED
Binary file (6.55 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/compat/__pycache__/setup.cpython-311.pyc ADDED
Binary file (751 Bytes). View file
 
.venv/lib/python3.11/site-packages/numpy/compat/py3k.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Python 3.X compatibility tools.
3
+
4
+ While this file was originally intended for Python 2 -> 3 transition,
5
+ it is now used to create a compatibility layer between different
6
+ minor versions of Python 3.
7
+
8
+ While the active version of numpy may not support a given version of python, we
9
+ allow downstream libraries to continue to use these shims for forward
10
+ compatibility with numpy while they transition their code to newer versions of
11
+ Python.
12
+ """
13
+ __all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
14
+ 'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
15
+ 'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
16
+ 'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
17
+ 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
18
+
19
+ import sys
20
+ import os
21
+ from pathlib import Path
22
+ import io
23
+ try:
24
+ import pickle5 as pickle
25
+ except ImportError:
26
+ import pickle
27
+
28
+ long = int
29
+ integer_types = (int,)
30
+ basestring = str
31
+ unicode = str
32
+ bytes = bytes
33
+
34
+ def asunicode(s):
35
+ if isinstance(s, bytes):
36
+ return s.decode('latin1')
37
+ return str(s)
38
+
39
+ def asbytes(s):
40
+ if isinstance(s, bytes):
41
+ return s
42
+ return str(s).encode('latin1')
43
+
44
+ def asstr(s):
45
+ if isinstance(s, bytes):
46
+ return s.decode('latin1')
47
+ return str(s)
48
+
49
+ def isfileobj(f):
50
+ if not isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter)):
51
+ return False
52
+ try:
53
+ # BufferedReader/Writer may raise OSError when
54
+ # fetching `fileno()` (e.g. when wrapping BytesIO).
55
+ f.fileno()
56
+ return True
57
+ except OSError:
58
+ return False
59
+
60
+ def open_latin1(filename, mode='r'):
61
+ return open(filename, mode=mode, encoding='iso-8859-1')
62
+
63
+ def sixu(s):
64
+ return s
65
+
66
+ strchar = 'U'
67
+
68
+ def getexception():
69
+ return sys.exc_info()[1]
70
+
71
+ def asbytes_nested(x):
72
+ if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
73
+ return [asbytes_nested(y) for y in x]
74
+ else:
75
+ return asbytes(x)
76
+
77
+ def asunicode_nested(x):
78
+ if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
79
+ return [asunicode_nested(y) for y in x]
80
+ else:
81
+ return asunicode(x)
82
+
83
+ def is_pathlib_path(obj):
84
+ """
85
+ Check whether obj is a `pathlib.Path` object.
86
+
87
+ Prefer using ``isinstance(obj, os.PathLike)`` instead of this function.
88
+ """
89
+ return isinstance(obj, Path)
90
+
91
+ # from Python 3.7
92
+ class contextlib_nullcontext:
93
+ """Context manager that does no additional processing.
94
+
95
+ Used as a stand-in for a normal context manager, when a particular
96
+ block of code is only sometimes used with a normal context manager:
97
+
98
+ cm = optional_cm if condition else nullcontext()
99
+ with cm:
100
+ # Perform operation, using optional_cm if condition is True
101
+
102
+ .. note::
103
+ Prefer using `contextlib.nullcontext` instead of this context manager.
104
+ """
105
+
106
+ def __init__(self, enter_result=None):
107
+ self.enter_result = enter_result
108
+
109
+ def __enter__(self):
110
+ return self.enter_result
111
+
112
+ def __exit__(self, *excinfo):
113
+ pass
114
+
115
+
116
+ def npy_load_module(name, fn, info=None):
117
+ """
118
+ Load a module. Uses ``load_module`` which will be deprecated in python
119
+ 3.12. An alternative that uses ``exec_module`` is in
120
+ numpy.distutils.misc_util.exec_mod_from_location
121
+
122
+ .. versionadded:: 1.11.2
123
+
124
+ Parameters
125
+ ----------
126
+ name : str
127
+ Full module name.
128
+ fn : str
129
+ Path to module file.
130
+ info : tuple, optional
131
+ Only here for backward compatibility with Python 2.*.
132
+
133
+ Returns
134
+ -------
135
+ mod : module
136
+
137
+ """
138
+ # Explicitly lazy import this to avoid paying the cost
139
+ # of importing importlib at startup
140
+ from importlib.machinery import SourceFileLoader
141
+ return SourceFileLoader(name, fn).load_module()
142
+
143
+
144
+ os_fspath = os.fspath
145
+ os_PathLike = os.PathLike
.venv/lib/python3.11/site-packages/numpy/compat/setup.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ def configuration(parent_package='',top_path=None):
2
+ from numpy.distutils.misc_util import Configuration
3
+
4
+ config = Configuration('compat', parent_package, top_path)
5
+ config.add_subpackage('tests')
6
+ return config
7
+
8
+ if __name__ == '__main__':
9
+ from numpy.distutils.core import setup
10
+ setup(configuration=configuration)
.venv/lib/python3.11/site-packages/numpy/compat/tests/__init__.py ADDED
File without changes
.venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (191 Bytes). View file
 
.venv/lib/python3.11/site-packages/numpy/compat/tests/__pycache__/test_compat.cpython-311.pyc ADDED
Binary file (2.01 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/compat/tests/test_compat.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from os.path import join
2
+ from io import BufferedReader, BytesIO
3
+
4
+ from numpy.compat import isfileobj
5
+ from numpy.testing import assert_
6
+ from numpy.testing import tempdir
7
+
8
+
9
+ def test_isfileobj():
10
+ with tempdir(prefix="numpy_test_compat_") as folder:
11
+ filename = join(folder, 'a.bin')
12
+
13
+ with open(filename, 'wb') as f:
14
+ assert_(isfileobj(f))
15
+
16
+ with open(filename, 'ab') as f:
17
+ assert_(isfileobj(f))
18
+
19
+ with open(filename, 'rb') as f:
20
+ assert_(isfileobj(f))
21
+
22
+ assert_(isfileobj(BufferedReader(BytesIO())) is False)
.venv/lib/python3.11/site-packages/numpy/lib/__init__.pyi ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math as math
2
+ from typing import Any
3
+
4
+ from numpy._pytesttester import PytestTester
5
+
6
+ from numpy import (
7
+ ndenumerate as ndenumerate,
8
+ ndindex as ndindex,
9
+ )
10
+
11
+ from numpy.version import version
12
+
13
+ from numpy.lib import (
14
+ format as format,
15
+ mixins as mixins,
16
+ scimath as scimath,
17
+ stride_tricks as stride_tricks,
18
+ )
19
+
20
+ from numpy.lib._version import (
21
+ NumpyVersion as NumpyVersion,
22
+ )
23
+
24
+ from numpy.lib.arraypad import (
25
+ pad as pad,
26
+ )
27
+
28
+ from numpy.lib.arraysetops import (
29
+ ediff1d as ediff1d,
30
+ intersect1d as intersect1d,
31
+ setxor1d as setxor1d,
32
+ union1d as union1d,
33
+ setdiff1d as setdiff1d,
34
+ unique as unique,
35
+ in1d as in1d,
36
+ isin as isin,
37
+ )
38
+
39
+ from numpy.lib.arrayterator import (
40
+ Arrayterator as Arrayterator,
41
+ )
42
+
43
+ from numpy.lib.function_base import (
44
+ select as select,
45
+ piecewise as piecewise,
46
+ trim_zeros as trim_zeros,
47
+ copy as copy,
48
+ iterable as iterable,
49
+ percentile as percentile,
50
+ diff as diff,
51
+ gradient as gradient,
52
+ angle as angle,
53
+ unwrap as unwrap,
54
+ sort_complex as sort_complex,
55
+ disp as disp,
56
+ flip as flip,
57
+ rot90 as rot90,
58
+ extract as extract,
59
+ place as place,
60
+ vectorize as vectorize,
61
+ asarray_chkfinite as asarray_chkfinite,
62
+ average as average,
63
+ bincount as bincount,
64
+ digitize as digitize,
65
+ cov as cov,
66
+ corrcoef as corrcoef,
67
+ median as median,
68
+ sinc as sinc,
69
+ hamming as hamming,
70
+ hanning as hanning,
71
+ bartlett as bartlett,
72
+ blackman as blackman,
73
+ kaiser as kaiser,
74
+ trapz as trapz,
75
+ i0 as i0,
76
+ add_newdoc as add_newdoc,
77
+ add_docstring as add_docstring,
78
+ meshgrid as meshgrid,
79
+ delete as delete,
80
+ insert as insert,
81
+ append as append,
82
+ interp as interp,
83
+ add_newdoc_ufunc as add_newdoc_ufunc,
84
+ quantile as quantile,
85
+ )
86
+
87
+ from numpy.lib.histograms import (
88
+ histogram_bin_edges as histogram_bin_edges,
89
+ histogram as histogram,
90
+ histogramdd as histogramdd,
91
+ )
92
+
93
+ from numpy.lib.index_tricks import (
94
+ ravel_multi_index as ravel_multi_index,
95
+ unravel_index as unravel_index,
96
+ mgrid as mgrid,
97
+ ogrid as ogrid,
98
+ r_ as r_,
99
+ c_ as c_,
100
+ s_ as s_,
101
+ index_exp as index_exp,
102
+ ix_ as ix_,
103
+ fill_diagonal as fill_diagonal,
104
+ diag_indices as diag_indices,
105
+ diag_indices_from as diag_indices_from,
106
+ )
107
+
108
+ from numpy.lib.nanfunctions import (
109
+ nansum as nansum,
110
+ nanmax as nanmax,
111
+ nanmin as nanmin,
112
+ nanargmax as nanargmax,
113
+ nanargmin as nanargmin,
114
+ nanmean as nanmean,
115
+ nanmedian as nanmedian,
116
+ nanpercentile as nanpercentile,
117
+ nanvar as nanvar,
118
+ nanstd as nanstd,
119
+ nanprod as nanprod,
120
+ nancumsum as nancumsum,
121
+ nancumprod as nancumprod,
122
+ nanquantile as nanquantile,
123
+ )
124
+
125
+ from numpy.lib.npyio import (
126
+ savetxt as savetxt,
127
+ loadtxt as loadtxt,
128
+ genfromtxt as genfromtxt,
129
+ recfromtxt as recfromtxt,
130
+ recfromcsv as recfromcsv,
131
+ load as load,
132
+ save as save,
133
+ savez as savez,
134
+ savez_compressed as savez_compressed,
135
+ packbits as packbits,
136
+ unpackbits as unpackbits,
137
+ fromregex as fromregex,
138
+ DataSource as DataSource,
139
+ )
140
+
141
+ from numpy.lib.polynomial import (
142
+ poly as poly,
143
+ roots as roots,
144
+ polyint as polyint,
145
+ polyder as polyder,
146
+ polyadd as polyadd,
147
+ polysub as polysub,
148
+ polymul as polymul,
149
+ polydiv as polydiv,
150
+ polyval as polyval,
151
+ polyfit as polyfit,
152
+ RankWarning as RankWarning,
153
+ poly1d as poly1d,
154
+ )
155
+
156
+ from numpy.lib.shape_base import (
157
+ column_stack as column_stack,
158
+ row_stack as row_stack,
159
+ dstack as dstack,
160
+ array_split as array_split,
161
+ split as split,
162
+ hsplit as hsplit,
163
+ vsplit as vsplit,
164
+ dsplit as dsplit,
165
+ apply_over_axes as apply_over_axes,
166
+ expand_dims as expand_dims,
167
+ apply_along_axis as apply_along_axis,
168
+ kron as kron,
169
+ tile as tile,
170
+ get_array_wrap as get_array_wrap,
171
+ take_along_axis as take_along_axis,
172
+ put_along_axis as put_along_axis,
173
+ )
174
+
175
+ from numpy.lib.stride_tricks import (
176
+ broadcast_to as broadcast_to,
177
+ broadcast_arrays as broadcast_arrays,
178
+ broadcast_shapes as broadcast_shapes,
179
+ )
180
+
181
+ from numpy.lib.twodim_base import (
182
+ diag as diag,
183
+ diagflat as diagflat,
184
+ eye as eye,
185
+ fliplr as fliplr,
186
+ flipud as flipud,
187
+ tri as tri,
188
+ triu as triu,
189
+ tril as tril,
190
+ vander as vander,
191
+ histogram2d as histogram2d,
192
+ mask_indices as mask_indices,
193
+ tril_indices as tril_indices,
194
+ tril_indices_from as tril_indices_from,
195
+ triu_indices as triu_indices,
196
+ triu_indices_from as triu_indices_from,
197
+ )
198
+
199
+ from numpy.lib.type_check import (
200
+ mintypecode as mintypecode,
201
+ asfarray as asfarray,
202
+ real as real,
203
+ imag as imag,
204
+ iscomplex as iscomplex,
205
+ isreal as isreal,
206
+ iscomplexobj as iscomplexobj,
207
+ isrealobj as isrealobj,
208
+ nan_to_num as nan_to_num,
209
+ real_if_close as real_if_close,
210
+ typename as typename,
211
+ common_type as common_type,
212
+ )
213
+
214
+ from numpy.lib.ufunclike import (
215
+ fix as fix,
216
+ isposinf as isposinf,
217
+ isneginf as isneginf,
218
+ )
219
+
220
+ from numpy.lib.utils import (
221
+ issubclass_ as issubclass_,
222
+ issubsctype as issubsctype,
223
+ issubdtype as issubdtype,
224
+ deprecate as deprecate,
225
+ deprecate_with_doc as deprecate_with_doc,
226
+ get_include as get_include,
227
+ info as info,
228
+ source as source,
229
+ who as who,
230
+ lookfor as lookfor,
231
+ byte_bounds as byte_bounds,
232
+ safe_eval as safe_eval,
233
+ show_runtime as show_runtime,
234
+ )
235
+
236
+ from numpy.core.multiarray import (
237
+ tracemalloc_domain as tracemalloc_domain,
238
+ )
239
+
240
+ __all__: list[str]
241
+ __path__: list[str]
242
+ test: PytestTester
243
+
244
+ __version__ = version
245
+ emath = scimath
.venv/lib/python3.11/site-packages/numpy/lib/_datasource.py ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A file interface for handling local and remote data files.
2
+
3
+ The goal of datasource is to abstract some of the file system operations
4
+ when dealing with data files so the researcher doesn't have to know all the
5
+ low-level details. Through datasource, a researcher can obtain and use a
6
+ file with one function call, regardless of location of the file.
7
+
8
+ DataSource is meant to augment standard python libraries, not replace them.
9
+ It should work seamlessly with standard file IO operations and the os
10
+ module.
11
+
12
+ DataSource files can originate locally or remotely:
13
+
14
+ - local files : '/home/guido/src/local/data.txt'
15
+ - URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
16
+
17
+ DataSource files can also be compressed or uncompressed. Currently only
18
+ gzip, bz2 and xz are supported.
19
+
20
+ Example::
21
+
22
+ >>> # Create a DataSource, use os.curdir (default) for local storage.
23
+ >>> from numpy import DataSource
24
+ >>> ds = DataSource()
25
+ >>>
26
+ >>> # Open a remote file.
27
+ >>> # DataSource downloads the file, stores it locally in:
28
+ >>> # './www.google.com/index.html'
29
+ >>> # opens the file and returns a file object.
30
+ >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
31
+ >>>
32
+ >>> # Use the file as you normally would
33
+ >>> fp.read() # doctest: +SKIP
34
+ >>> fp.close() # doctest: +SKIP
35
+
36
+ """
37
+ import os
38
+ import io
39
+
40
+ from .._utils import set_module
41
+
42
+
43
+ _open = open
44
+
45
+
46
+ def _check_mode(mode, encoding, newline):
47
+ """Check mode and that encoding and newline are compatible.
48
+
49
+ Parameters
50
+ ----------
51
+ mode : str
52
+ File open mode.
53
+ encoding : str
54
+ File encoding.
55
+ newline : str
56
+ Newline for text files.
57
+
58
+ """
59
+ if "t" in mode:
60
+ if "b" in mode:
61
+ raise ValueError("Invalid mode: %r" % (mode,))
62
+ else:
63
+ if encoding is not None:
64
+ raise ValueError("Argument 'encoding' not supported in binary mode")
65
+ if newline is not None:
66
+ raise ValueError("Argument 'newline' not supported in binary mode")
67
+
68
+
69
+ # Using a class instead of a module-level dictionary
70
+ # to reduce the initial 'import numpy' overhead by
71
+ # deferring the import of lzma, bz2 and gzip until needed
72
+
73
+ # TODO: .zip support, .tar support?
74
+ class _FileOpeners:
75
+ """
76
+ Container for different methods to open (un-)compressed files.
77
+
78
+ `_FileOpeners` contains a dictionary that holds one method for each
79
+ supported file format. Attribute lookup is implemented in such a way
80
+ that an instance of `_FileOpeners` itself can be indexed with the keys
81
+ of that dictionary. Currently uncompressed files as well as files
82
+ compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
83
+
84
+ Notes
85
+ -----
86
+ `_file_openers`, an instance of `_FileOpeners`, is made available for
87
+ use in the `_datasource` module.
88
+
89
+ Examples
90
+ --------
91
+ >>> import gzip
92
+ >>> np.lib._datasource._file_openers.keys()
93
+ [None, '.bz2', '.gz', '.xz', '.lzma']
94
+ >>> np.lib._datasource._file_openers['.gz'] is gzip.open
95
+ True
96
+
97
+ """
98
+
99
+ def __init__(self):
100
+ self._loaded = False
101
+ self._file_openers = {None: io.open}
102
+
103
+ def _load(self):
104
+ if self._loaded:
105
+ return
106
+
107
+ try:
108
+ import bz2
109
+ self._file_openers[".bz2"] = bz2.open
110
+ except ImportError:
111
+ pass
112
+
113
+ try:
114
+ import gzip
115
+ self._file_openers[".gz"] = gzip.open
116
+ except ImportError:
117
+ pass
118
+
119
+ try:
120
+ import lzma
121
+ self._file_openers[".xz"] = lzma.open
122
+ self._file_openers[".lzma"] = lzma.open
123
+ except (ImportError, AttributeError):
124
+ # There are incompatible backports of lzma that do not have the
125
+ # lzma.open attribute, so catch that as well as ImportError.
126
+ pass
127
+
128
+ self._loaded = True
129
+
130
+ def keys(self):
131
+ """
132
+ Return the keys of currently supported file openers.
133
+
134
+ Parameters
135
+ ----------
136
+ None
137
+
138
+ Returns
139
+ -------
140
+ keys : list
141
+ The keys are None for uncompressed files and the file extension
142
+ strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
143
+ methods.
144
+
145
+ """
146
+ self._load()
147
+ return list(self._file_openers.keys())
148
+
149
+ def __getitem__(self, key):
150
+ self._load()
151
+ return self._file_openers[key]
152
+
153
+ _file_openers = _FileOpeners()
154
+
155
+ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
156
+ """
157
+ Open `path` with `mode` and return the file object.
158
+
159
+ If ``path`` is an URL, it will be downloaded, stored in the
160
+ `DataSource` `destpath` directory and opened from there.
161
+
162
+ Parameters
163
+ ----------
164
+ path : str
165
+ Local file path or URL to open.
166
+ mode : str, optional
167
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
168
+ append. Available modes depend on the type of object specified by
169
+ path. Default is 'r'.
170
+ destpath : str, optional
171
+ Path to the directory where the source file gets downloaded to for
172
+ use. If `destpath` is None, a temporary directory will be created.
173
+ The default path is the current directory.
174
+ encoding : {None, str}, optional
175
+ Open text file with given encoding. The default encoding will be
176
+ what `io.open` uses.
177
+ newline : {None, str}, optional
178
+ Newline to use when reading text file.
179
+
180
+ Returns
181
+ -------
182
+ out : file object
183
+ The opened file.
184
+
185
+ Notes
186
+ -----
187
+ This is a convenience function that instantiates a `DataSource` and
188
+ returns the file object from ``DataSource.open(path)``.
189
+
190
+ """
191
+
192
+ ds = DataSource(destpath)
193
+ return ds.open(path, mode, encoding=encoding, newline=newline)
194
+
195
+
196
+ @set_module('numpy')
197
+ class DataSource:
198
+ """
199
+ DataSource(destpath='.')
200
+
201
+ A generic data source file (file, http, ftp, ...).
202
+
203
+ DataSources can be local files or remote files/URLs. The files may
204
+ also be compressed or uncompressed. DataSource hides some of the
205
+ low-level details of downloading the file, allowing you to simply pass
206
+ in a valid file path (or URL) and obtain a file object.
207
+
208
+ Parameters
209
+ ----------
210
+ destpath : str or None, optional
211
+ Path to the directory where the source file gets downloaded to for
212
+ use. If `destpath` is None, a temporary directory will be created.
213
+ The default path is the current directory.
214
+
215
+ Notes
216
+ -----
217
+ URLs require a scheme string (``http://``) to be used, without it they
218
+ will fail::
219
+
220
+ >>> repos = np.DataSource()
221
+ >>> repos.exists('www.google.com/index.html')
222
+ False
223
+ >>> repos.exists('http://www.google.com/index.html')
224
+ True
225
+
226
+ Temporary directories are deleted when the DataSource is deleted.
227
+
228
+ Examples
229
+ --------
230
+ ::
231
+
232
+ >>> ds = np.DataSource('/home/guido')
233
+ >>> urlname = 'http://www.google.com/'
234
+ >>> gfile = ds.open('http://www.google.com/')
235
+ >>> ds.abspath(urlname)
236
+ '/home/guido/www.google.com/index.html'
237
+
238
+ >>> ds = np.DataSource(None) # use with temporary file
239
+ >>> ds.open('/home/guido/foobar.txt')
240
+ <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
241
+ >>> ds.abspath('/home/guido/foobar.txt')
242
+ '/tmp/.../home/guido/foobar.txt'
243
+
244
+ """
245
+
246
+ def __init__(self, destpath=os.curdir):
247
+ """Create a DataSource with a local path at destpath."""
248
+ if destpath:
249
+ self._destpath = os.path.abspath(destpath)
250
+ self._istmpdest = False
251
+ else:
252
+ import tempfile # deferring import to improve startup time
253
+ self._destpath = tempfile.mkdtemp()
254
+ self._istmpdest = True
255
+
256
+ def __del__(self):
257
+ # Remove temp directories
258
+ if hasattr(self, '_istmpdest') and self._istmpdest:
259
+ import shutil
260
+
261
+ shutil.rmtree(self._destpath)
262
+
263
+ def _iszip(self, filename):
264
+ """Test if the filename is a zip file by looking at the file extension.
265
+
266
+ """
267
+ fname, ext = os.path.splitext(filename)
268
+ return ext in _file_openers.keys()
269
+
270
+ def _iswritemode(self, mode):
271
+ """Test if the given mode will open a file for writing."""
272
+
273
+ # Currently only used to test the bz2 files.
274
+ _writemodes = ("w", "+")
275
+ for c in mode:
276
+ if c in _writemodes:
277
+ return True
278
+ return False
279
+
280
+ def _splitzipext(self, filename):
281
+ """Split zip extension from filename and return filename.
282
+
283
+ Returns
284
+ -------
285
+ base, zip_ext : {tuple}
286
+
287
+ """
288
+
289
+ if self._iszip(filename):
290
+ return os.path.splitext(filename)
291
+ else:
292
+ return filename, None
293
+
294
+ def _possible_names(self, filename):
295
+ """Return a tuple containing compressed filename variations."""
296
+ names = [filename]
297
+ if not self._iszip(filename):
298
+ for zipext in _file_openers.keys():
299
+ if zipext:
300
+ names.append(filename+zipext)
301
+ return names
302
+
303
+ def _isurl(self, path):
304
+ """Test if path is a net location. Tests the scheme and netloc."""
305
+
306
+ # We do this here to reduce the 'import numpy' initial import time.
307
+ from urllib.parse import urlparse
308
+
309
+ # BUG : URLs require a scheme string ('http://') to be used.
310
+ # www.google.com will fail.
311
+ # Should we prepend the scheme for those that don't have it and
312
+ # test that also? Similar to the way we append .gz and test for
313
+ # for compressed versions of files.
314
+
315
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
316
+ return bool(scheme and netloc)
317
+
318
+ def _cache(self, path):
319
+ """Cache the file specified by path.
320
+
321
+ Creates a copy of the file in the datasource cache.
322
+
323
+ """
324
+ # We import these here because importing them is slow and
325
+ # a significant fraction of numpy's total import time.
326
+ import shutil
327
+ from urllib.request import urlopen
328
+
329
+ upath = self.abspath(path)
330
+
331
+ # ensure directory exists
332
+ if not os.path.exists(os.path.dirname(upath)):
333
+ os.makedirs(os.path.dirname(upath))
334
+
335
+ # TODO: Doesn't handle compressed files!
336
+ if self._isurl(path):
337
+ with urlopen(path) as openedurl:
338
+ with _open(upath, 'wb') as f:
339
+ shutil.copyfileobj(openedurl, f)
340
+ else:
341
+ shutil.copyfile(path, upath)
342
+ return upath
343
+
344
+ def _findfile(self, path):
345
+ """Searches for ``path`` and returns full path if found.
346
+
347
+ If path is an URL, _findfile will cache a local copy and return the
348
+ path to the cached file. If path is a local file, _findfile will
349
+ return a path to that local file.
350
+
351
+ The search will include possible compressed versions of the file
352
+ and return the first occurrence found.
353
+
354
+ """
355
+
356
+ # Build list of possible local file paths
357
+ if not self._isurl(path):
358
+ # Valid local paths
359
+ filelist = self._possible_names(path)
360
+ # Paths in self._destpath
361
+ filelist += self._possible_names(self.abspath(path))
362
+ else:
363
+ # Cached URLs in self._destpath
364
+ filelist = self._possible_names(self.abspath(path))
365
+ # Remote URLs
366
+ filelist = filelist + self._possible_names(path)
367
+
368
+ for name in filelist:
369
+ if self.exists(name):
370
+ if self._isurl(name):
371
+ name = self._cache(name)
372
+ return name
373
+ return None
374
+
375
+ def abspath(self, path):
376
+ """
377
+ Return absolute path of file in the DataSource directory.
378
+
379
+ If `path` is an URL, then `abspath` will return either the location
380
+ the file exists locally or the location it would exist when opened
381
+ using the `open` method.
382
+
383
+ Parameters
384
+ ----------
385
+ path : str
386
+ Can be a local file or a remote URL.
387
+
388
+ Returns
389
+ -------
390
+ out : str
391
+ Complete path, including the `DataSource` destination directory.
392
+
393
+ Notes
394
+ -----
395
+ The functionality is based on `os.path.abspath`.
396
+
397
+ """
398
+ # We do this here to reduce the 'import numpy' initial import time.
399
+ from urllib.parse import urlparse
400
+
401
+ # TODO: This should be more robust. Handles case where path includes
402
+ # the destpath, but not other sub-paths. Failing case:
403
+ # path = /home/guido/datafile.txt
404
+ # destpath = /home/alex/
405
+ # upath = self.abspath(path)
406
+ # upath == '/home/alex/home/guido/datafile.txt'
407
+
408
+ # handle case where path includes self._destpath
409
+ splitpath = path.split(self._destpath, 2)
410
+ if len(splitpath) > 1:
411
+ path = splitpath[1]
412
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
413
+ netloc = self._sanitize_relative_path(netloc)
414
+ upath = self._sanitize_relative_path(upath)
415
+ return os.path.join(self._destpath, netloc, upath)
416
+
417
+ def _sanitize_relative_path(self, path):
418
+ """Return a sanitised relative path for which
419
+ os.path.abspath(os.path.join(base, path)).startswith(base)
420
+ """
421
+ last = None
422
+ path = os.path.normpath(path)
423
+ while path != last:
424
+ last = path
425
+ # Note: os.path.join treats '/' as os.sep on Windows
426
+ path = path.lstrip(os.sep).lstrip('/')
427
+ path = path.lstrip(os.pardir).lstrip('..')
428
+ drive, path = os.path.splitdrive(path) # for Windows
429
+ return path
430
+
431
+ def exists(self, path):
432
+ """
433
+ Test if path exists.
434
+
435
+ Test if `path` exists as (and in this order):
436
+
437
+ - a local file.
438
+ - a remote URL that has been downloaded and stored locally in the
439
+ `DataSource` directory.
440
+ - a remote URL that has not been downloaded, but is valid and
441
+ accessible.
442
+
443
+ Parameters
444
+ ----------
445
+ path : str
446
+ Can be a local file or a remote URL.
447
+
448
+ Returns
449
+ -------
450
+ out : bool
451
+ True if `path` exists.
452
+
453
+ Notes
454
+ -----
455
+ When `path` is an URL, `exists` will return True if it's either
456
+ stored locally in the `DataSource` directory, or is a valid remote
457
+ URL. `DataSource` does not discriminate between the two, the file
458
+ is accessible if it exists in either location.
459
+
460
+ """
461
+
462
+ # First test for local path
463
+ if os.path.exists(path):
464
+ return True
465
+
466
+ # We import this here because importing urllib is slow and
467
+ # a significant fraction of numpy's total import time.
468
+ from urllib.request import urlopen
469
+ from urllib.error import URLError
470
+
471
+ # Test cached url
472
+ upath = self.abspath(path)
473
+ if os.path.exists(upath):
474
+ return True
475
+
476
+ # Test remote url
477
+ if self._isurl(path):
478
+ try:
479
+ netfile = urlopen(path)
480
+ netfile.close()
481
+ del(netfile)
482
+ return True
483
+ except URLError:
484
+ return False
485
+ return False
486
+
487
+ def open(self, path, mode='r', encoding=None, newline=None):
488
+ """
489
+ Open and return file-like object.
490
+
491
+ If `path` is an URL, it will be downloaded, stored in the
492
+ `DataSource` directory and opened from there.
493
+
494
+ Parameters
495
+ ----------
496
+ path : str
497
+ Local file path or URL to open.
498
+ mode : {'r', 'w', 'a'}, optional
499
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
500
+ 'a' to append. Available modes depend on the type of object
501
+ specified by `path`. Default is 'r'.
502
+ encoding : {None, str}, optional
503
+ Open text file with given encoding. The default encoding will be
504
+ what `io.open` uses.
505
+ newline : {None, str}, optional
506
+ Newline to use when reading text file.
507
+
508
+ Returns
509
+ -------
510
+ out : file object
511
+ File object.
512
+
513
+ """
514
+
515
+ # TODO: There is no support for opening a file for writing which
516
+ # doesn't exist yet (creating a file). Should there be?
517
+
518
+ # TODO: Add a ``subdir`` parameter for specifying the subdirectory
519
+ # used to store URLs in self._destpath.
520
+
521
+ if self._isurl(path) and self._iswritemode(mode):
522
+ raise ValueError("URLs are not writeable")
523
+
524
+ # NOTE: _findfile will fail on a new file opened for writing.
525
+ found = self._findfile(path)
526
+ if found:
527
+ _fname, ext = self._splitzipext(found)
528
+ if ext == 'bz2':
529
+ mode.replace("+", "")
530
+ return _file_openers[ext](found, mode=mode,
531
+ encoding=encoding, newline=newline)
532
+ else:
533
+ raise FileNotFoundError(f"{path} not found.")
534
+
535
+
536
+ class Repository (DataSource):
537
+ """
538
+ Repository(baseurl, destpath='.')
539
+
540
+ A data repository where multiple DataSource's share a base
541
+ URL/directory.
542
+
543
+ `Repository` extends `DataSource` by prepending a base URL (or
544
+ directory) to all the files it handles. Use `Repository` when you will
545
+ be working with multiple files from one base URL. Initialize
546
+ `Repository` with the base URL, then refer to each file by its filename
547
+ only.
548
+
549
+ Parameters
550
+ ----------
551
+ baseurl : str
552
+ Path to the local directory or remote location that contains the
553
+ data files.
554
+ destpath : str or None, optional
555
+ Path to the directory where the source file gets downloaded to for
556
+ use. If `destpath` is None, a temporary directory will be created.
557
+ The default path is the current directory.
558
+
559
+ Examples
560
+ --------
561
+ To analyze all files in the repository, do something like this
562
+ (note: this is not self-contained code)::
563
+
564
+ >>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
565
+ >>> for filename in filelist:
566
+ ... fp = repos.open(filename)
567
+ ... fp.analyze()
568
+ ... fp.close()
569
+
570
+ Similarly you could use a URL for a repository::
571
+
572
+ >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
573
+
574
+ """
575
+
576
+ def __init__(self, baseurl, destpath=os.curdir):
577
+ """Create a Repository with a shared url or directory of baseurl."""
578
+ DataSource.__init__(self, destpath=destpath)
579
+ self._baseurl = baseurl
580
+
581
+ def __del__(self):
582
+ DataSource.__del__(self)
583
+
584
+ def _fullpath(self, path):
585
+ """Return complete path for path. Prepends baseurl if necessary."""
586
+ splitpath = path.split(self._baseurl, 2)
587
+ if len(splitpath) == 1:
588
+ result = os.path.join(self._baseurl, path)
589
+ else:
590
+ result = path # path contains baseurl already
591
+ return result
592
+
593
+ def _findfile(self, path):
594
+ """Extend DataSource method to prepend baseurl to ``path``."""
595
+ return DataSource._findfile(self, self._fullpath(path))
596
+
597
+ def abspath(self, path):
598
+ """
599
+ Return absolute path of file in the Repository directory.
600
+
601
+ If `path` is an URL, then `abspath` will return either the location
602
+ the file exists locally or the location it would exist when opened
603
+ using the `open` method.
604
+
605
+ Parameters
606
+ ----------
607
+ path : str
608
+ Can be a local file or a remote URL. This may, but does not
609
+ have to, include the `baseurl` with which the `Repository` was
610
+ initialized.
611
+
612
+ Returns
613
+ -------
614
+ out : str
615
+ Complete path, including the `DataSource` destination directory.
616
+
617
+ """
618
+ return DataSource.abspath(self, self._fullpath(path))
619
+
620
+ def exists(self, path):
621
+ """
622
+ Test if path exists prepending Repository base URL to path.
623
+
624
+ Test if `path` exists as (and in this order):
625
+
626
+ - a local file.
627
+ - a remote URL that has been downloaded and stored locally in the
628
+ `DataSource` directory.
629
+ - a remote URL that has not been downloaded, but is valid and
630
+ accessible.
631
+
632
+ Parameters
633
+ ----------
634
+ path : str
635
+ Can be a local file or a remote URL. This may, but does not
636
+ have to, include the `baseurl` with which the `Repository` was
637
+ initialized.
638
+
639
+ Returns
640
+ -------
641
+ out : bool
642
+ True if `path` exists.
643
+
644
+ Notes
645
+ -----
646
+ When `path` is an URL, `exists` will return True if it's either
647
+ stored locally in the `DataSource` directory, or is a valid remote
648
+ URL. `DataSource` does not discriminate between the two, the file
649
+ is accessible if it exists in either location.
650
+
651
+ """
652
+ return DataSource.exists(self, self._fullpath(path))
653
+
654
+ def open(self, path, mode='r', encoding=None, newline=None):
655
+ """
656
+ Open and return file-like object prepending Repository base URL.
657
+
658
+ If `path` is an URL, it will be downloaded, stored in the
659
+ DataSource directory and opened from there.
660
+
661
+ Parameters
662
+ ----------
663
+ path : str
664
+ Local file path or URL to open. This may, but does not have to,
665
+ include the `baseurl` with which the `Repository` was
666
+ initialized.
667
+ mode : {'r', 'w', 'a'}, optional
668
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
669
+ 'a' to append. Available modes depend on the type of object
670
+ specified by `path`. Default is 'r'.
671
+ encoding : {None, str}, optional
672
+ Open text file with given encoding. The default encoding will be
673
+ what `io.open` uses.
674
+ newline : {None, str}, optional
675
+ Newline to use when reading text file.
676
+
677
+ Returns
678
+ -------
679
+ out : file object
680
+ File object.
681
+
682
+ """
683
+ return DataSource.open(self, self._fullpath(path), mode,
684
+ encoding=encoding, newline=newline)
685
+
686
+ def listdir(self):
687
+ """
688
+ List files in the source Repository.
689
+
690
+ Returns
691
+ -------
692
+ files : list of str
693
+ List of file names (not containing a directory part).
694
+
695
+ Notes
696
+ -----
697
+ Does not currently work for remote repositories.
698
+
699
+ """
700
+ if self._isurl(self._baseurl):
701
+ raise NotImplementedError(
702
+ "Directory listing of URLs, not supported yet.")
703
+ else:
704
+ return os.listdir(self._baseurl)
.venv/lib/python3.11/site-packages/numpy/lib/_iotools.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A collection of functions designed to help I/O with ascii files.
2
+
3
+ """
4
+ __docformat__ = "restructuredtext en"
5
+
6
+ import numpy as np
7
+ import numpy.core.numeric as nx
8
+ from numpy.compat import asbytes, asunicode
9
+
10
+
11
+ def _decode_line(line, encoding=None):
12
+ """Decode bytes from binary input streams.
13
+
14
+ Defaults to decoding from 'latin1'. That differs from the behavior of
15
+ np.compat.asunicode that decodes from 'ascii'.
16
+
17
+ Parameters
18
+ ----------
19
+ line : str or bytes
20
+ Line to be decoded.
21
+ encoding : str
22
+ Encoding used to decode `line`.
23
+
24
+ Returns
25
+ -------
26
+ decoded_line : str
27
+
28
+ """
29
+ if type(line) is bytes:
30
+ if encoding is None:
31
+ encoding = "latin1"
32
+ line = line.decode(encoding)
33
+
34
+ return line
35
+
36
+
37
+ def _is_string_like(obj):
38
+ """
39
+ Check whether obj behaves like a string.
40
+ """
41
+ try:
42
+ obj + ''
43
+ except (TypeError, ValueError):
44
+ return False
45
+ return True
46
+
47
+
48
+ def _is_bytes_like(obj):
49
+ """
50
+ Check whether obj behaves like a bytes object.
51
+ """
52
+ try:
53
+ obj + b''
54
+ except (TypeError, ValueError):
55
+ return False
56
+ return True
57
+
58
+
59
+ def has_nested_fields(ndtype):
60
+ """
61
+ Returns whether one or several fields of a dtype are nested.
62
+
63
+ Parameters
64
+ ----------
65
+ ndtype : dtype
66
+ Data-type of a structured array.
67
+
68
+ Raises
69
+ ------
70
+ AttributeError
71
+ If `ndtype` does not have a `names` attribute.
72
+
73
+ Examples
74
+ --------
75
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
76
+ >>> np.lib._iotools.has_nested_fields(dt)
77
+ False
78
+
79
+ """
80
+ for name in ndtype.names or ():
81
+ if ndtype[name].names is not None:
82
+ return True
83
+ return False
84
+
85
+
86
+ def flatten_dtype(ndtype, flatten_base=False):
87
+ """
88
+ Unpack a structured data-type by collapsing nested fields and/or fields
89
+ with a shape.
90
+
91
+ Note that the field names are lost.
92
+
93
+ Parameters
94
+ ----------
95
+ ndtype : dtype
96
+ The datatype to collapse
97
+ flatten_base : bool, optional
98
+ If True, transform a field with a shape into several fields. Default is
99
+ False.
100
+
101
+ Examples
102
+ --------
103
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
104
+ ... ('block', int, (2, 3))])
105
+ >>> np.lib._iotools.flatten_dtype(dt)
106
+ [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
107
+ >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
108
+ [dtype('S4'),
109
+ dtype('float64'),
110
+ dtype('float64'),
111
+ dtype('int64'),
112
+ dtype('int64'),
113
+ dtype('int64'),
114
+ dtype('int64'),
115
+ dtype('int64'),
116
+ dtype('int64')]
117
+
118
+ """
119
+ names = ndtype.names
120
+ if names is None:
121
+ if flatten_base:
122
+ return [ndtype.base] * int(np.prod(ndtype.shape))
123
+ return [ndtype.base]
124
+ else:
125
+ types = []
126
+ for field in names:
127
+ info = ndtype.fields[field]
128
+ flat_dt = flatten_dtype(info[0], flatten_base)
129
+ types.extend(flat_dt)
130
+ return types
131
+
132
+
133
+ class LineSplitter:
134
+ """
135
+ Object to split a string at a given delimiter or at given places.
136
+
137
+ Parameters
138
+ ----------
139
+ delimiter : str, int, or sequence of ints, optional
140
+ If a string, character used to delimit consecutive fields.
141
+ If an integer or a sequence of integers, width(s) of each field.
142
+ comments : str, optional
143
+ Character used to mark the beginning of a comment. Default is '#'.
144
+ autostrip : bool, optional
145
+ Whether to strip each individual field. Default is True.
146
+
147
+ """
148
+
149
+ def autostrip(self, method):
150
+ """
151
+ Wrapper to strip each member of the output of `method`.
152
+
153
+ Parameters
154
+ ----------
155
+ method : function
156
+ Function that takes a single argument and returns a sequence of
157
+ strings.
158
+
159
+ Returns
160
+ -------
161
+ wrapped : function
162
+ The result of wrapping `method`. `wrapped` takes a single input
163
+ argument and returns a list of strings that are stripped of
164
+ white-space.
165
+
166
+ """
167
+ return lambda input: [_.strip() for _ in method(input)]
168
+
169
+ def __init__(self, delimiter=None, comments='#', autostrip=True,
170
+ encoding=None):
171
+ delimiter = _decode_line(delimiter)
172
+ comments = _decode_line(comments)
173
+
174
+ self.comments = comments
175
+
176
+ # Delimiter is a character
177
+ if (delimiter is None) or isinstance(delimiter, str):
178
+ delimiter = delimiter or None
179
+ _handyman = self._delimited_splitter
180
+ # Delimiter is a list of field widths
181
+ elif hasattr(delimiter, '__iter__'):
182
+ _handyman = self._variablewidth_splitter
183
+ idx = np.cumsum([0] + list(delimiter))
184
+ delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
185
+ # Delimiter is a single integer
186
+ elif int(delimiter):
187
+ (_handyman, delimiter) = (
188
+ self._fixedwidth_splitter, int(delimiter))
189
+ else:
190
+ (_handyman, delimiter) = (self._delimited_splitter, None)
191
+ self.delimiter = delimiter
192
+ if autostrip:
193
+ self._handyman = self.autostrip(_handyman)
194
+ else:
195
+ self._handyman = _handyman
196
+ self.encoding = encoding
197
+
198
+ def _delimited_splitter(self, line):
199
+ """Chop off comments, strip, and split at delimiter. """
200
+ if self.comments is not None:
201
+ line = line.split(self.comments)[0]
202
+ line = line.strip(" \r\n")
203
+ if not line:
204
+ return []
205
+ return line.split(self.delimiter)
206
+
207
+ def _fixedwidth_splitter(self, line):
208
+ if self.comments is not None:
209
+ line = line.split(self.comments)[0]
210
+ line = line.strip("\r\n")
211
+ if not line:
212
+ return []
213
+ fixed = self.delimiter
214
+ slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
215
+ return [line[s] for s in slices]
216
+
217
+ def _variablewidth_splitter(self, line):
218
+ if self.comments is not None:
219
+ line = line.split(self.comments)[0]
220
+ if not line:
221
+ return []
222
+ slices = self.delimiter
223
+ return [line[s] for s in slices]
224
+
225
+ def __call__(self, line):
226
+ return self._handyman(_decode_line(line, self.encoding))
227
+
228
+
229
+ class NameValidator:
230
+ """
231
+ Object to validate a list of strings to use as field names.
232
+
233
+ The strings are stripped of any non alphanumeric character, and spaces
234
+ are replaced by '_'. During instantiation, the user can define a list
235
+ of names to exclude, as well as a list of invalid characters. Names in
236
+ the exclusion list are appended a '_' character.
237
+
238
+ Once an instance has been created, it can be called with a list of
239
+ names, and a list of valid names will be created. The `__call__`
240
+ method accepts an optional keyword "default" that sets the default name
241
+ in case of ambiguity. By default this is 'f', so that names will
242
+ default to `f0`, `f1`, etc.
243
+
244
+ Parameters
245
+ ----------
246
+ excludelist : sequence, optional
247
+ A list of names to exclude. This list is appended to the default
248
+ list ['return', 'file', 'print']. Excluded names are appended an
249
+ underscore: for example, `file` becomes `file_` if supplied.
250
+ deletechars : str, optional
251
+ A string combining invalid characters that must be deleted from the
252
+ names.
253
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
254
+ * If True, field names are case-sensitive.
255
+ * If False or 'upper', field names are converted to upper case.
256
+ * If 'lower', field names are converted to lower case.
257
+
258
+ The default value is True.
259
+ replace_space : '_', optional
260
+ Character(s) used in replacement of white spaces.
261
+
262
+ Notes
263
+ -----
264
+ Calling an instance of `NameValidator` is the same as calling its
265
+ method `validate`.
266
+
267
+ Examples
268
+ --------
269
+ >>> validator = np.lib._iotools.NameValidator()
270
+ >>> validator(['file', 'field2', 'with space', 'CaSe'])
271
+ ('file_', 'field2', 'with_space', 'CaSe')
272
+
273
+ >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
274
+ ... deletechars='q',
275
+ ... case_sensitive=False)
276
+ >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
277
+ ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
278
+
279
+ """
280
+
281
+ defaultexcludelist = ['return', 'file', 'print']
282
+ defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
283
+
284
+ def __init__(self, excludelist=None, deletechars=None,
285
+ case_sensitive=None, replace_space='_'):
286
+ # Process the exclusion list ..
287
+ if excludelist is None:
288
+ excludelist = []
289
+ excludelist.extend(self.defaultexcludelist)
290
+ self.excludelist = excludelist
291
+ # Process the list of characters to delete
292
+ if deletechars is None:
293
+ delete = self.defaultdeletechars
294
+ else:
295
+ delete = set(deletechars)
296
+ delete.add('"')
297
+ self.deletechars = delete
298
+ # Process the case option .....
299
+ if (case_sensitive is None) or (case_sensitive is True):
300
+ self.case_converter = lambda x: x
301
+ elif (case_sensitive is False) or case_sensitive.startswith('u'):
302
+ self.case_converter = lambda x: x.upper()
303
+ elif case_sensitive.startswith('l'):
304
+ self.case_converter = lambda x: x.lower()
305
+ else:
306
+ msg = 'unrecognized case_sensitive value %s.' % case_sensitive
307
+ raise ValueError(msg)
308
+
309
+ self.replace_space = replace_space
310
+
311
+ def validate(self, names, defaultfmt="f%i", nbfields=None):
312
+ """
313
+ Validate a list of strings as field names for a structured array.
314
+
315
+ Parameters
316
+ ----------
317
+ names : sequence of str
318
+ Strings to be validated.
319
+ defaultfmt : str, optional
320
+ Default format string, used if validating a given string
321
+ reduces its length to zero.
322
+ nbfields : integer, optional
323
+ Final number of validated names, used to expand or shrink the
324
+ initial list of names.
325
+
326
+ Returns
327
+ -------
328
+ validatednames : list of str
329
+ The list of validated field names.
330
+
331
+ Notes
332
+ -----
333
+ A `NameValidator` instance can be called directly, which is the
334
+ same as calling `validate`. For examples, see `NameValidator`.
335
+
336
+ """
337
+ # Initial checks ..............
338
+ if (names is None):
339
+ if (nbfields is None):
340
+ return None
341
+ names = []
342
+ if isinstance(names, str):
343
+ names = [names, ]
344
+ if nbfields is not None:
345
+ nbnames = len(names)
346
+ if (nbnames < nbfields):
347
+ names = list(names) + [''] * (nbfields - nbnames)
348
+ elif (nbnames > nbfields):
349
+ names = names[:nbfields]
350
+ # Set some shortcuts ...........
351
+ deletechars = self.deletechars
352
+ excludelist = self.excludelist
353
+ case_converter = self.case_converter
354
+ replace_space = self.replace_space
355
+ # Initializes some variables ...
356
+ validatednames = []
357
+ seen = dict()
358
+ nbempty = 0
359
+
360
+ for item in names:
361
+ item = case_converter(item).strip()
362
+ if replace_space:
363
+ item = item.replace(' ', replace_space)
364
+ item = ''.join([c for c in item if c not in deletechars])
365
+ if item == '':
366
+ item = defaultfmt % nbempty
367
+ while item in names:
368
+ nbempty += 1
369
+ item = defaultfmt % nbempty
370
+ nbempty += 1
371
+ elif item in excludelist:
372
+ item += '_'
373
+ cnt = seen.get(item, 0)
374
+ if cnt > 0:
375
+ validatednames.append(item + '_%d' % cnt)
376
+ else:
377
+ validatednames.append(item)
378
+ seen[item] = cnt + 1
379
+ return tuple(validatednames)
380
+
381
+ def __call__(self, names, defaultfmt="f%i", nbfields=None):
382
+ return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
383
+
384
+
385
+ def str2bool(value):
386
+ """
387
+ Tries to transform a string supposed to represent a boolean to a boolean.
388
+
389
+ Parameters
390
+ ----------
391
+ value : str
392
+ The string that is transformed to a boolean.
393
+
394
+ Returns
395
+ -------
396
+ boolval : bool
397
+ The boolean representation of `value`.
398
+
399
+ Raises
400
+ ------
401
+ ValueError
402
+ If the string is not 'True' or 'False' (case independent)
403
+
404
+ Examples
405
+ --------
406
+ >>> np.lib._iotools.str2bool('TRUE')
407
+ True
408
+ >>> np.lib._iotools.str2bool('false')
409
+ False
410
+
411
+ """
412
+ value = value.upper()
413
+ if value == 'TRUE':
414
+ return True
415
+ elif value == 'FALSE':
416
+ return False
417
+ else:
418
+ raise ValueError("Invalid boolean")
419
+
420
+
421
+ class ConverterError(Exception):
422
+ """
423
+ Exception raised when an error occurs in a converter for string values.
424
+
425
+ """
426
+ pass
427
+
428
+
429
+ class ConverterLockError(ConverterError):
430
+ """
431
+ Exception raised when an attempt is made to upgrade a locked converter.
432
+
433
+ """
434
+ pass
435
+
436
+
437
+ class ConversionWarning(UserWarning):
438
+ """
439
+ Warning issued when a string converter has a problem.
440
+
441
+ Notes
442
+ -----
443
+ In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
444
+ is explicitly suppressed with the "invalid_raise" keyword.
445
+
446
+ """
447
+ pass
448
+
449
+
450
+ class StringConverter:
451
+ """
452
+ Factory class for function transforming a string into another object
453
+ (int, float).
454
+
455
+ After initialization, an instance can be called to transform a string
456
+ into another object. If the string is recognized as representing a
457
+ missing value, a default value is returned.
458
+
459
+ Attributes
460
+ ----------
461
+ func : function
462
+ Function used for the conversion.
463
+ default : any
464
+ Default value to return when the input corresponds to a missing
465
+ value.
466
+ type : type
467
+ Type of the output.
468
+ _status : int
469
+ Integer representing the order of the conversion.
470
+ _mapper : sequence of tuples
471
+ Sequence of tuples (dtype, function, default value) to evaluate in
472
+ order.
473
+ _locked : bool
474
+ Holds `locked` parameter.
475
+
476
+ Parameters
477
+ ----------
478
+ dtype_or_func : {None, dtype, function}, optional
479
+ If a `dtype`, specifies the input data type, used to define a basic
480
+ function and a default value for missing data. For example, when
481
+ `dtype` is float, the `func` attribute is set to `float` and the
482
+ default value to `np.nan`. If a function, this function is used to
483
+ convert a string to another object. In this case, it is recommended
484
+ to give an associated default value as input.
485
+ default : any, optional
486
+ Value to return by default, that is, when the string to be
487
+ converted is flagged as missing. If not given, `StringConverter`
488
+ tries to supply a reasonable default value.
489
+ missing_values : {None, sequence of str}, optional
490
+ ``None`` or sequence of strings indicating a missing value. If ``None``
491
+ then missing values are indicated by empty entries. The default is
492
+ ``None``.
493
+ locked : bool, optional
494
+ Whether the StringConverter should be locked to prevent automatic
495
+ upgrade or not. Default is False.
496
+
497
+ """
498
+ _mapper = [(nx.bool_, str2bool, False),
499
+ (nx.int_, int, -1),]
500
+
501
+ # On 32-bit systems, we need to make sure that we explicitly include
502
+ # nx.int64 since ns.int_ is nx.int32.
503
+ if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
504
+ _mapper.append((nx.int64, int, -1))
505
+
506
+ _mapper.extend([(nx.float64, float, nx.nan),
507
+ (nx.complex128, complex, nx.nan + 0j),
508
+ (nx.longdouble, nx.longdouble, nx.nan),
509
+ # If a non-default dtype is passed, fall back to generic
510
+ # ones (should only be used for the converter)
511
+ (nx.integer, int, -1),
512
+ (nx.floating, float, nx.nan),
513
+ (nx.complexfloating, complex, nx.nan + 0j),
514
+ # Last, try with the string types (must be last, because
515
+ # `_mapper[-1]` is used as default in some cases)
516
+ (nx.str_, asunicode, '???'),
517
+ (nx.bytes_, asbytes, '???'),
518
+ ])
519
+
520
+ @classmethod
521
+ def _getdtype(cls, val):
522
+ """Returns the dtype of the input variable."""
523
+ return np.array(val).dtype
524
+
525
+ @classmethod
526
+ def _getsubdtype(cls, val):
527
+ """Returns the type of the dtype of the input variable."""
528
+ return np.array(val).dtype.type
529
+
530
+ @classmethod
531
+ def _dtypeortype(cls, dtype):
532
+ """Returns dtype for datetime64 and type of dtype otherwise."""
533
+
534
+ # This is a bit annoying. We want to return the "general" type in most
535
+ # cases (ie. "string" rather than "S10"), but we want to return the
536
+ # specific type for datetime64 (ie. "datetime64[us]" rather than
537
+ # "datetime64").
538
+ if dtype.type == np.datetime64:
539
+ return dtype
540
+ return dtype.type
541
+
542
+ @classmethod
543
+ def upgrade_mapper(cls, func, default=None):
544
+ """
545
+ Upgrade the mapper of a StringConverter by adding a new function and
546
+ its corresponding default.
547
+
548
+ The input function (or sequence of functions) and its associated
549
+ default value (if any) is inserted in penultimate position of the
550
+ mapper. The corresponding type is estimated from the dtype of the
551
+ default value.
552
+
553
+ Parameters
554
+ ----------
555
+ func : var
556
+ Function, or sequence of functions
557
+
558
+ Examples
559
+ --------
560
+ >>> import dateutil.parser
561
+ >>> import datetime
562
+ >>> dateparser = dateutil.parser.parse
563
+ >>> defaultdate = datetime.date(2000, 1, 1)
564
+ >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
565
+ """
566
+ # Func is a single functions
567
+ if hasattr(func, '__call__'):
568
+ cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
569
+ return
570
+ elif hasattr(func, '__iter__'):
571
+ if isinstance(func[0], (tuple, list)):
572
+ for _ in func:
573
+ cls._mapper.insert(-1, _)
574
+ return
575
+ if default is None:
576
+ default = [None] * len(func)
577
+ else:
578
+ default = list(default)
579
+ default.append([None] * (len(func) - len(default)))
580
+ for fct, dft in zip(func, default):
581
+ cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
582
+
583
+ @classmethod
584
+ def _find_map_entry(cls, dtype):
585
+ # if a converter for the specific dtype is available use that
586
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
587
+ if dtype.type == deftype:
588
+ return i, (deftype, func, default_def)
589
+
590
+ # otherwise find an inexact match
591
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
592
+ if np.issubdtype(dtype.type, deftype):
593
+ return i, (deftype, func, default_def)
594
+
595
+ raise LookupError
596
+
597
+ def __init__(self, dtype_or_func=None, default=None, missing_values=None,
598
+ locked=False):
599
+ # Defines a lock for upgrade
600
+ self._locked = bool(locked)
601
+ # No input dtype: minimal initialization
602
+ if dtype_or_func is None:
603
+ self.func = str2bool
604
+ self._status = 0
605
+ self.default = default or False
606
+ dtype = np.dtype('bool')
607
+ else:
608
+ # Is the input a np.dtype ?
609
+ try:
610
+ self.func = None
611
+ dtype = np.dtype(dtype_or_func)
612
+ except TypeError:
613
+ # dtype_or_func must be a function, then
614
+ if not hasattr(dtype_or_func, '__call__'):
615
+ errmsg = ("The input argument `dtype` is neither a"
616
+ " function nor a dtype (got '%s' instead)")
617
+ raise TypeError(errmsg % type(dtype_or_func))
618
+ # Set the function
619
+ self.func = dtype_or_func
620
+ # If we don't have a default, try to guess it or set it to
621
+ # None
622
+ if default is None:
623
+ try:
624
+ default = self.func('0')
625
+ except ValueError:
626
+ default = None
627
+ dtype = self._getdtype(default)
628
+
629
+ # find the best match in our mapper
630
+ try:
631
+ self._status, (_, func, default_def) = self._find_map_entry(dtype)
632
+ except LookupError:
633
+ # no match
634
+ self.default = default
635
+ _, func, _ = self._mapper[-1]
636
+ self._status = 0
637
+ else:
638
+ # use the found default only if we did not already have one
639
+ if default is None:
640
+ self.default = default_def
641
+ else:
642
+ self.default = default
643
+
644
+ # If the input was a dtype, set the function to the last we saw
645
+ if self.func is None:
646
+ self.func = func
647
+
648
+ # If the status is 1 (int), change the function to
649
+ # something more robust.
650
+ if self.func == self._mapper[1][1]:
651
+ if issubclass(dtype.type, np.uint64):
652
+ self.func = np.uint64
653
+ elif issubclass(dtype.type, np.int64):
654
+ self.func = np.int64
655
+ else:
656
+ self.func = lambda x: int(float(x))
657
+ # Store the list of strings corresponding to missing values.
658
+ if missing_values is None:
659
+ self.missing_values = {''}
660
+ else:
661
+ if isinstance(missing_values, str):
662
+ missing_values = missing_values.split(",")
663
+ self.missing_values = set(list(missing_values) + [''])
664
+
665
+ self._callingfunction = self._strict_call
666
+ self.type = self._dtypeortype(dtype)
667
+ self._checked = False
668
+ self._initial_default = default
669
+
670
+ def _loose_call(self, value):
671
+ try:
672
+ return self.func(value)
673
+ except ValueError:
674
+ return self.default
675
+
676
+ def _strict_call(self, value):
677
+ try:
678
+
679
+ # We check if we can convert the value using the current function
680
+ new_value = self.func(value)
681
+
682
+ # In addition to having to check whether func can convert the
683
+ # value, we also have to make sure that we don't get overflow
684
+ # errors for integers.
685
+ if self.func is int:
686
+ try:
687
+ np.array(value, dtype=self.type)
688
+ except OverflowError:
689
+ raise ValueError
690
+
691
+ # We're still here so we can now return the new value
692
+ return new_value
693
+
694
+ except ValueError:
695
+ if value.strip() in self.missing_values:
696
+ if not self._status:
697
+ self._checked = False
698
+ return self.default
699
+ raise ValueError("Cannot convert string '%s'" % value)
700
+
701
+ def __call__(self, value):
702
+ return self._callingfunction(value)
703
+
704
+ def _do_upgrade(self):
705
+ # Raise an exception if we locked the converter...
706
+ if self._locked:
707
+ errmsg = "Converter is locked and cannot be upgraded"
708
+ raise ConverterLockError(errmsg)
709
+ _statusmax = len(self._mapper)
710
+ # Complains if we try to upgrade by the maximum
711
+ _status = self._status
712
+ if _status == _statusmax:
713
+ errmsg = "Could not find a valid conversion function"
714
+ raise ConverterError(errmsg)
715
+ elif _status < _statusmax - 1:
716
+ _status += 1
717
+ self.type, self.func, default = self._mapper[_status]
718
+ self._status = _status
719
+ if self._initial_default is not None:
720
+ self.default = self._initial_default
721
+ else:
722
+ self.default = default
723
+
724
+ def upgrade(self, value):
725
+ """
726
+ Find the best converter for a given string, and return the result.
727
+
728
+ The supplied string `value` is converted by testing different
729
+ converters in order. First the `func` method of the
730
+ `StringConverter` instance is tried, if this fails other available
731
+ converters are tried. The order in which these other converters
732
+ are tried is determined by the `_status` attribute of the instance.
733
+
734
+ Parameters
735
+ ----------
736
+ value : str
737
+ The string to convert.
738
+
739
+ Returns
740
+ -------
741
+ out : any
742
+ The result of converting `value` with the appropriate converter.
743
+
744
+ """
745
+ self._checked = True
746
+ try:
747
+ return self._strict_call(value)
748
+ except ValueError:
749
+ self._do_upgrade()
750
+ return self.upgrade(value)
751
+
752
+ def iterupgrade(self, value):
753
+ self._checked = True
754
+ if not hasattr(value, '__iter__'):
755
+ value = (value,)
756
+ _strict_call = self._strict_call
757
+ try:
758
+ for _m in value:
759
+ _strict_call(_m)
760
+ except ValueError:
761
+ self._do_upgrade()
762
+ self.iterupgrade(value)
763
+
764
+ def update(self, func, default=None, testing_value=None,
765
+ missing_values='', locked=False):
766
+ """
767
+ Set StringConverter attributes directly.
768
+
769
+ Parameters
770
+ ----------
771
+ func : function
772
+ Conversion function.
773
+ default : any, optional
774
+ Value to return by default, that is, when the string to be
775
+ converted is flagged as missing. If not given,
776
+ `StringConverter` tries to supply a reasonable default value.
777
+ testing_value : str, optional
778
+ A string representing a standard input value of the converter.
779
+ This string is used to help defining a reasonable default
780
+ value.
781
+ missing_values : {sequence of str, None}, optional
782
+ Sequence of strings indicating a missing value. If ``None``, then
783
+ the existing `missing_values` are cleared. The default is `''`.
784
+ locked : bool, optional
785
+ Whether the StringConverter should be locked to prevent
786
+ automatic upgrade or not. Default is False.
787
+
788
+ Notes
789
+ -----
790
+ `update` takes the same parameters as the constructor of
791
+ `StringConverter`, except that `func` does not accept a `dtype`
792
+ whereas `dtype_or_func` in the constructor does.
793
+
794
+ """
795
+ self.func = func
796
+ self._locked = locked
797
+
798
+ # Don't reset the default to None if we can avoid it
799
+ if default is not None:
800
+ self.default = default
801
+ self.type = self._dtypeortype(self._getdtype(default))
802
+ else:
803
+ try:
804
+ tester = func(testing_value or '1')
805
+ except (TypeError, ValueError):
806
+ tester = None
807
+ self.type = self._dtypeortype(self._getdtype(tester))
808
+
809
+ # Add the missing values to the existing set or clear it.
810
+ if missing_values is None:
811
+ # Clear all missing values even though the ctor initializes it to
812
+ # set(['']) when the argument is None.
813
+ self.missing_values = set()
814
+ else:
815
+ if not np.iterable(missing_values):
816
+ missing_values = [missing_values]
817
+ if not all(isinstance(v, str) for v in missing_values):
818
+ raise TypeError("missing_values must be strings or unicode")
819
+ self.missing_values.update(missing_values)
820
+
821
+
822
+ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
823
+ """
824
+ Convenience function to create a `np.dtype` object.
825
+
826
+ The function processes the input `dtype` and matches it with the given
827
+ names.
828
+
829
+ Parameters
830
+ ----------
831
+ ndtype : var
832
+ Definition of the dtype. Can be any string or dictionary recognized
833
+ by the `np.dtype` function, or a sequence of types.
834
+ names : str or sequence, optional
835
+ Sequence of strings to use as field names for a structured dtype.
836
+ For convenience, `names` can be a string of a comma-separated list
837
+ of names.
838
+ defaultfmt : str, optional
839
+ Format string used to define missing names, such as ``"f%i"``
840
+ (default) or ``"fields_%02i"``.
841
+ validationargs : optional
842
+ A series of optional arguments used to initialize a
843
+ `NameValidator`.
844
+
845
+ Examples
846
+ --------
847
+ >>> np.lib._iotools.easy_dtype(float)
848
+ dtype('float64')
849
+ >>> np.lib._iotools.easy_dtype("i4, f8")
850
+ dtype([('f0', '<i4'), ('f1', '<f8')])
851
+ >>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
852
+ dtype([('field_000', '<i4'), ('field_001', '<f8')])
853
+
854
+ >>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
855
+ dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
856
+ >>> np.lib._iotools.easy_dtype(float, names="a,b,c")
857
+ dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
858
+
859
+ """
860
+ try:
861
+ ndtype = np.dtype(ndtype)
862
+ except TypeError:
863
+ validate = NameValidator(**validationargs)
864
+ nbfields = len(ndtype)
865
+ if names is None:
866
+ names = [''] * len(ndtype)
867
+ elif isinstance(names, str):
868
+ names = names.split(",")
869
+ names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
870
+ ndtype = np.dtype(dict(formats=ndtype, names=names))
871
+ else:
872
+ # Explicit names
873
+ if names is not None:
874
+ validate = NameValidator(**validationargs)
875
+ if isinstance(names, str):
876
+ names = names.split(",")
877
+ # Simple dtype: repeat to match the nb of names
878
+ if ndtype.names is None:
879
+ formats = tuple([ndtype.type] * len(names))
880
+ names = validate(names, defaultfmt=defaultfmt)
881
+ ndtype = np.dtype(list(zip(names, formats)))
882
+ # Structured dtype: just validate the names as needed
883
+ else:
884
+ ndtype.names = validate(names, nbfields=len(ndtype.names),
885
+ defaultfmt=defaultfmt)
886
+ # No implicit names
887
+ elif ndtype.names is not None:
888
+ validate = NameValidator(**validationargs)
889
+ # Default initial names : should we change the format ?
890
+ numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
891
+ if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
892
+ ndtype.names = validate([''] * len(ndtype.names),
893
+ defaultfmt=defaultfmt)
894
+ # Explicit initial names : just validate
895
+ else:
896
+ ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
897
+ return ndtype
.venv/lib/python3.11/site-packages/numpy/lib/_version.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str]
2
+
3
+ class NumpyVersion:
4
+ vstring: str
5
+ version: str
6
+ major: int
7
+ minor: int
8
+ bugfix: int
9
+ pre_release: str
10
+ is_devversion: bool
11
+ def __init__(self, vstring: str) -> None: ...
12
+ def __lt__(self, other: str | NumpyVersion) -> bool: ...
13
+ def __le__(self, other: str | NumpyVersion) -> bool: ...
14
+ def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
15
+ def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
16
+ def __gt__(self, other: str | NumpyVersion) -> bool: ...
17
+ def __ge__(self, other: str | NumpyVersion) -> bool: ...
.venv/lib/python3.11/site-packages/numpy/lib/arraypad.pyi ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ Any,
4
+ overload,
5
+ TypeVar,
6
+ Protocol,
7
+ )
8
+
9
+ from numpy import generic
10
+
11
+ from numpy._typing import (
12
+ ArrayLike,
13
+ NDArray,
14
+ _ArrayLikeInt,
15
+ _ArrayLike,
16
+ )
17
+
18
+ _SCT = TypeVar("_SCT", bound=generic)
19
+
20
+ class _ModeFunc(Protocol):
21
+ def __call__(
22
+ self,
23
+ vector: NDArray[Any],
24
+ iaxis_pad_width: tuple[int, int],
25
+ iaxis: int,
26
+ kwargs: dict[str, Any],
27
+ /,
28
+ ) -> None: ...
29
+
30
+ _ModeKind = L[
31
+ "constant",
32
+ "edge",
33
+ "linear_ramp",
34
+ "maximum",
35
+ "mean",
36
+ "median",
37
+ "minimum",
38
+ "reflect",
39
+ "symmetric",
40
+ "wrap",
41
+ "empty",
42
+ ]
43
+
44
+ __all__: list[str]
45
+
46
+ # TODO: In practice each keyword argument is exclusive to one or more
47
+ # specific modes. Consider adding more overloads to express this in the future.
48
+
49
+ # Expand `**kwargs` into explicit keyword-only arguments
50
+ @overload
51
+ def pad(
52
+ array: _ArrayLike[_SCT],
53
+ pad_width: _ArrayLikeInt,
54
+ mode: _ModeKind = ...,
55
+ *,
56
+ stat_length: None | _ArrayLikeInt = ...,
57
+ constant_values: ArrayLike = ...,
58
+ end_values: ArrayLike = ...,
59
+ reflect_type: L["odd", "even"] = ...,
60
+ ) -> NDArray[_SCT]: ...
61
+ @overload
62
+ def pad(
63
+ array: ArrayLike,
64
+ pad_width: _ArrayLikeInt,
65
+ mode: _ModeKind = ...,
66
+ *,
67
+ stat_length: None | _ArrayLikeInt = ...,
68
+ constant_values: ArrayLike = ...,
69
+ end_values: ArrayLike = ...,
70
+ reflect_type: L["odd", "even"] = ...,
71
+ ) -> NDArray[Any]: ...
72
+ @overload
73
+ def pad(
74
+ array: _ArrayLike[_SCT],
75
+ pad_width: _ArrayLikeInt,
76
+ mode: _ModeFunc,
77
+ **kwargs: Any,
78
+ ) -> NDArray[_SCT]: ...
79
+ @overload
80
+ def pad(
81
+ array: ArrayLike,
82
+ pad_width: _ArrayLikeInt,
83
+ mode: _ModeFunc,
84
+ **kwargs: Any,
85
+ ) -> NDArray[Any]: ...
.venv/lib/python3.11/site-packages/numpy/lib/arraysetops.py ADDED
@@ -0,0 +1,981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Set operations for arrays based on sorting.
3
+
4
+ Notes
5
+ -----
6
+
7
+ For floating point arrays, inaccurate results may appear due to usual round-off
8
+ and floating point comparison issues.
9
+
10
+ Speed could be gained in some operations by an implementation of
11
+ `numpy.sort`, that can provide directly the permutation vectors, thus avoiding
12
+ calls to `numpy.argsort`.
13
+
14
+ Original author: Robert Cimrman
15
+
16
+ """
17
+ import functools
18
+
19
+ import numpy as np
20
+ from numpy.core import overrides
21
+
22
+
23
+ array_function_dispatch = functools.partial(
24
+ overrides.array_function_dispatch, module='numpy')
25
+
26
+
27
+ __all__ = [
28
+ 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
29
+ 'in1d', 'isin'
30
+ ]
31
+
32
+
33
+ def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
34
+ return (ary, to_end, to_begin)
35
+
36
+
37
+ @array_function_dispatch(_ediff1d_dispatcher)
38
+ def ediff1d(ary, to_end=None, to_begin=None):
39
+ """
40
+ The differences between consecutive elements of an array.
41
+
42
+ Parameters
43
+ ----------
44
+ ary : array_like
45
+ If necessary, will be flattened before the differences are taken.
46
+ to_end : array_like, optional
47
+ Number(s) to append at the end of the returned differences.
48
+ to_begin : array_like, optional
49
+ Number(s) to prepend at the beginning of the returned differences.
50
+
51
+ Returns
52
+ -------
53
+ ediff1d : ndarray
54
+ The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
55
+
56
+ See Also
57
+ --------
58
+ diff, gradient
59
+
60
+ Notes
61
+ -----
62
+ When applied to masked arrays, this function drops the mask information
63
+ if the `to_begin` and/or `to_end` parameters are used.
64
+
65
+ Examples
66
+ --------
67
+ >>> x = np.array([1, 2, 4, 7, 0])
68
+ >>> np.ediff1d(x)
69
+ array([ 1, 2, 3, -7])
70
+
71
+ >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
72
+ array([-99, 1, 2, ..., -7, 88, 99])
73
+
74
+ The returned array is always 1D.
75
+
76
+ >>> y = [[1, 2, 4], [1, 6, 24]]
77
+ >>> np.ediff1d(y)
78
+ array([ 1, 2, -3, 5, 18])
79
+
80
+ """
81
+ # force a 1d array
82
+ ary = np.asanyarray(ary).ravel()
83
+
84
+ # enforce that the dtype of `ary` is used for the output
85
+ dtype_req = ary.dtype
86
+
87
+ # fast track default case
88
+ if to_begin is None and to_end is None:
89
+ return ary[1:] - ary[:-1]
90
+
91
+ if to_begin is None:
92
+ l_begin = 0
93
+ else:
94
+ to_begin = np.asanyarray(to_begin)
95
+ if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
96
+ raise TypeError("dtype of `to_begin` must be compatible "
97
+ "with input `ary` under the `same_kind` rule.")
98
+
99
+ to_begin = to_begin.ravel()
100
+ l_begin = len(to_begin)
101
+
102
+ if to_end is None:
103
+ l_end = 0
104
+ else:
105
+ to_end = np.asanyarray(to_end)
106
+ if not np.can_cast(to_end, dtype_req, casting="same_kind"):
107
+ raise TypeError("dtype of `to_end` must be compatible "
108
+ "with input `ary` under the `same_kind` rule.")
109
+
110
+ to_end = to_end.ravel()
111
+ l_end = len(to_end)
112
+
113
+ # do the calculation in place and copy to_begin and to_end
114
+ l_diff = max(len(ary) - 1, 0)
115
+ result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
116
+ result = ary.__array_wrap__(result)
117
+ if l_begin > 0:
118
+ result[:l_begin] = to_begin
119
+ if l_end > 0:
120
+ result[l_begin + l_diff:] = to_end
121
+ np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
122
+ return result
123
+
124
+
125
+ def _unpack_tuple(x):
126
+ """ Unpacks one-element tuples for use as return values """
127
+ if len(x) == 1:
128
+ return x[0]
129
+ else:
130
+ return x
131
+
132
+
133
+ def _unique_dispatcher(ar, return_index=None, return_inverse=None,
134
+ return_counts=None, axis=None, *, equal_nan=None):
135
+ return (ar,)
136
+
137
+
138
+ @array_function_dispatch(_unique_dispatcher)
139
+ def unique(ar, return_index=False, return_inverse=False,
140
+ return_counts=False, axis=None, *, equal_nan=True):
141
+ """
142
+ Find the unique elements of an array.
143
+
144
+ Returns the sorted unique elements of an array. There are three optional
145
+ outputs in addition to the unique elements:
146
+
147
+ * the indices of the input array that give the unique values
148
+ * the indices of the unique array that reconstruct the input array
149
+ * the number of times each unique value comes up in the input array
150
+
151
+ Parameters
152
+ ----------
153
+ ar : array_like
154
+ Input array. Unless `axis` is specified, this will be flattened if it
155
+ is not already 1-D.
156
+ return_index : bool, optional
157
+ If True, also return the indices of `ar` (along the specified axis,
158
+ if provided, or in the flattened array) that result in the unique array.
159
+ return_inverse : bool, optional
160
+ If True, also return the indices of the unique array (for the specified
161
+ axis, if provided) that can be used to reconstruct `ar`.
162
+ return_counts : bool, optional
163
+ If True, also return the number of times each unique item appears
164
+ in `ar`.
165
+ axis : int or None, optional
166
+ The axis to operate on. If None, `ar` will be flattened. If an integer,
167
+ the subarrays indexed by the given axis will be flattened and treated
168
+ as the elements of a 1-D array with the dimension of the given axis,
169
+ see the notes for more details. Object arrays or structured arrays
170
+ that contain objects are not supported if the `axis` kwarg is used. The
171
+ default is None.
172
+
173
+ .. versionadded:: 1.13.0
174
+
175
+ equal_nan : bool, optional
176
+ If True, collapses multiple NaN values in the return array into one.
177
+
178
+ .. versionadded:: 1.24
179
+
180
+ Returns
181
+ -------
182
+ unique : ndarray
183
+ The sorted unique values.
184
+ unique_indices : ndarray, optional
185
+ The indices of the first occurrences of the unique values in the
186
+ original array. Only provided if `return_index` is True.
187
+ unique_inverse : ndarray, optional
188
+ The indices to reconstruct the original array from the
189
+ unique array. Only provided if `return_inverse` is True.
190
+ unique_counts : ndarray, optional
191
+ The number of times each of the unique values comes up in the
192
+ original array. Only provided if `return_counts` is True.
193
+
194
+ .. versionadded:: 1.9.0
195
+
196
+ See Also
197
+ --------
198
+ numpy.lib.arraysetops : Module with a number of other functions for
199
+ performing set operations on arrays.
200
+ repeat : Repeat elements of an array.
201
+
202
+ Notes
203
+ -----
204
+ When an axis is specified the subarrays indexed by the axis are sorted.
205
+ This is done by making the specified axis the first dimension of the array
206
+ (move the axis to the first dimension to keep the order of the other axes)
207
+ and then flattening the subarrays in C order. The flattened subarrays are
208
+ then viewed as a structured type with each element given a label, with the
209
+ effect that we end up with a 1-D array of structured types that can be
210
+ treated in the same way as any other 1-D array. The result is that the
211
+ flattened subarrays are sorted in lexicographic order starting with the
212
+ first element.
213
+
214
+ .. versionchanged: NumPy 1.21
215
+ If nan values are in the input array, a single nan is put
216
+ to the end of the sorted unique values.
217
+
218
+ Also for complex arrays all NaN values are considered equivalent
219
+ (no matter whether the NaN is in the real or imaginary part).
220
+ As the representant for the returned array the smallest one in the
221
+ lexicographical order is chosen - see np.sort for how the lexicographical
222
+ order is defined for complex arrays.
223
+
224
+ Examples
225
+ --------
226
+ >>> np.unique([1, 1, 2, 2, 3, 3])
227
+ array([1, 2, 3])
228
+ >>> a = np.array([[1, 1], [2, 3]])
229
+ >>> np.unique(a)
230
+ array([1, 2, 3])
231
+
232
+ Return the unique rows of a 2D array
233
+
234
+ >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
235
+ >>> np.unique(a, axis=0)
236
+ array([[1, 0, 0], [2, 3, 4]])
237
+
238
+ Return the indices of the original array that give the unique values:
239
+
240
+ >>> a = np.array(['a', 'b', 'b', 'c', 'a'])
241
+ >>> u, indices = np.unique(a, return_index=True)
242
+ >>> u
243
+ array(['a', 'b', 'c'], dtype='<U1')
244
+ >>> indices
245
+ array([0, 1, 3])
246
+ >>> a[indices]
247
+ array(['a', 'b', 'c'], dtype='<U1')
248
+
249
+ Reconstruct the input array from the unique values and inverse:
250
+
251
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
252
+ >>> u, indices = np.unique(a, return_inverse=True)
253
+ >>> u
254
+ array([1, 2, 3, 4, 6])
255
+ >>> indices
256
+ array([0, 1, 4, 3, 1, 2, 1])
257
+ >>> u[indices]
258
+ array([1, 2, 6, 4, 2, 3, 2])
259
+
260
+ Reconstruct the input values from the unique values and counts:
261
+
262
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
263
+ >>> values, counts = np.unique(a, return_counts=True)
264
+ >>> values
265
+ array([1, 2, 3, 4, 6])
266
+ >>> counts
267
+ array([1, 3, 1, 1, 1])
268
+ >>> np.repeat(values, counts)
269
+ array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
270
+
271
+ """
272
+ ar = np.asanyarray(ar)
273
+ if axis is None:
274
+ ret = _unique1d(ar, return_index, return_inverse, return_counts,
275
+ equal_nan=equal_nan)
276
+ return _unpack_tuple(ret)
277
+
278
+ # axis was specified and not None
279
+ try:
280
+ ar = np.moveaxis(ar, axis, 0)
281
+ except np.AxisError:
282
+ # this removes the "axis1" or "axis2" prefix from the error message
283
+ raise np.AxisError(axis, ar.ndim) from None
284
+
285
+ # Must reshape to a contiguous 2D array for this to work...
286
+ orig_shape, orig_dtype = ar.shape, ar.dtype
287
+ ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
288
+ ar = np.ascontiguousarray(ar)
289
+ dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
290
+
291
+ # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
292
+ # data type with `m` fields where each field has the data type of `ar`.
293
+ # In the following, we create the array `consolidated`, which has
294
+ # shape `(n,)` with data type `dtype`.
295
+ try:
296
+ if ar.shape[1] > 0:
297
+ consolidated = ar.view(dtype)
298
+ else:
299
+ # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
300
+ # a data type with itemsize 0, and the call `ar.view(dtype)` will
301
+ # fail. Instead, we'll use `np.empty` to explicitly create the
302
+ # array with shape `(len(ar),)`. Because `dtype` in this case has
303
+ # itemsize 0, the total size of the result is still 0 bytes.
304
+ consolidated = np.empty(len(ar), dtype=dtype)
305
+ except TypeError as e:
306
+ # There's no good way to do this for object arrays, etc...
307
+ msg = 'The axis argument to unique is not supported for dtype {dt}'
308
+ raise TypeError(msg.format(dt=ar.dtype)) from e
309
+
310
+ def reshape_uniq(uniq):
311
+ n = len(uniq)
312
+ uniq = uniq.view(orig_dtype)
313
+ uniq = uniq.reshape(n, *orig_shape[1:])
314
+ uniq = np.moveaxis(uniq, 0, axis)
315
+ return uniq
316
+
317
+ output = _unique1d(consolidated, return_index,
318
+ return_inverse, return_counts, equal_nan=equal_nan)
319
+ output = (reshape_uniq(output[0]),) + output[1:]
320
+ return _unpack_tuple(output)
321
+
322
+
323
+ def _unique1d(ar, return_index=False, return_inverse=False,
324
+ return_counts=False, *, equal_nan=True):
325
+ """
326
+ Find the unique elements of an array, ignoring shape.
327
+ """
328
+ ar = np.asanyarray(ar).flatten()
329
+
330
+ optional_indices = return_index or return_inverse
331
+
332
+ if optional_indices:
333
+ perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
334
+ aux = ar[perm]
335
+ else:
336
+ ar.sort()
337
+ aux = ar
338
+ mask = np.empty(aux.shape, dtype=np.bool_)
339
+ mask[:1] = True
340
+ if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and
341
+ np.isnan(aux[-1])):
342
+ if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent
343
+ aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
344
+ else:
345
+ aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
346
+ if aux_firstnan > 0:
347
+ mask[1:aux_firstnan] = (
348
+ aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
349
+ mask[aux_firstnan] = True
350
+ mask[aux_firstnan + 1:] = False
351
+ else:
352
+ mask[1:] = aux[1:] != aux[:-1]
353
+
354
+ ret = (aux[mask],)
355
+ if return_index:
356
+ ret += (perm[mask],)
357
+ if return_inverse:
358
+ imask = np.cumsum(mask) - 1
359
+ inv_idx = np.empty(mask.shape, dtype=np.intp)
360
+ inv_idx[perm] = imask
361
+ ret += (inv_idx,)
362
+ if return_counts:
363
+ idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
364
+ ret += (np.diff(idx),)
365
+ return ret
366
+
367
+
368
+ def _intersect1d_dispatcher(
369
+ ar1, ar2, assume_unique=None, return_indices=None):
370
+ return (ar1, ar2)
371
+
372
+
373
+ @array_function_dispatch(_intersect1d_dispatcher)
374
+ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
375
+ """
376
+ Find the intersection of two arrays.
377
+
378
+ Return the sorted, unique values that are in both of the input arrays.
379
+
380
+ Parameters
381
+ ----------
382
+ ar1, ar2 : array_like
383
+ Input arrays. Will be flattened if not already 1D.
384
+ assume_unique : bool
385
+ If True, the input arrays are both assumed to be unique, which
386
+ can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
387
+ unique, incorrect results and out-of-bounds indices could result.
388
+ Default is False.
389
+ return_indices : bool
390
+ If True, the indices which correspond to the intersection of the two
391
+ arrays are returned. The first instance of a value is used if there are
392
+ multiple. Default is False.
393
+
394
+ .. versionadded:: 1.15.0
395
+
396
+ Returns
397
+ -------
398
+ intersect1d : ndarray
399
+ Sorted 1D array of common and unique elements.
400
+ comm1 : ndarray
401
+ The indices of the first occurrences of the common values in `ar1`.
402
+ Only provided if `return_indices` is True.
403
+ comm2 : ndarray
404
+ The indices of the first occurrences of the common values in `ar2`.
405
+ Only provided if `return_indices` is True.
406
+
407
+
408
+ See Also
409
+ --------
410
+ numpy.lib.arraysetops : Module with a number of other functions for
411
+ performing set operations on arrays.
412
+
413
+ Examples
414
+ --------
415
+ >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
416
+ array([1, 3])
417
+
418
+ To intersect more than two arrays, use functools.reduce:
419
+
420
+ >>> from functools import reduce
421
+ >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
422
+ array([3])
423
+
424
+ To return the indices of the values common to the input arrays
425
+ along with the intersected values:
426
+
427
+ >>> x = np.array([1, 1, 2, 3, 4])
428
+ >>> y = np.array([2, 1, 4, 6])
429
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
430
+ >>> x_ind, y_ind
431
+ (array([0, 2, 4]), array([1, 0, 2]))
432
+ >>> xy, x[x_ind], y[y_ind]
433
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
434
+
435
+ """
436
+ ar1 = np.asanyarray(ar1)
437
+ ar2 = np.asanyarray(ar2)
438
+
439
+ if not assume_unique:
440
+ if return_indices:
441
+ ar1, ind1 = unique(ar1, return_index=True)
442
+ ar2, ind2 = unique(ar2, return_index=True)
443
+ else:
444
+ ar1 = unique(ar1)
445
+ ar2 = unique(ar2)
446
+ else:
447
+ ar1 = ar1.ravel()
448
+ ar2 = ar2.ravel()
449
+
450
+ aux = np.concatenate((ar1, ar2))
451
+ if return_indices:
452
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
453
+ aux = aux[aux_sort_indices]
454
+ else:
455
+ aux.sort()
456
+
457
+ mask = aux[1:] == aux[:-1]
458
+ int1d = aux[:-1][mask]
459
+
460
+ if return_indices:
461
+ ar1_indices = aux_sort_indices[:-1][mask]
462
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
463
+ if not assume_unique:
464
+ ar1_indices = ind1[ar1_indices]
465
+ ar2_indices = ind2[ar2_indices]
466
+
467
+ return int1d, ar1_indices, ar2_indices
468
+ else:
469
+ return int1d
470
+
471
+
472
+ def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
473
+ return (ar1, ar2)
474
+
475
+
476
+ @array_function_dispatch(_setxor1d_dispatcher)
477
+ def setxor1d(ar1, ar2, assume_unique=False):
478
+ """
479
+ Find the set exclusive-or of two arrays.
480
+
481
+ Return the sorted, unique values that are in only one (not both) of the
482
+ input arrays.
483
+
484
+ Parameters
485
+ ----------
486
+ ar1, ar2 : array_like
487
+ Input arrays.
488
+ assume_unique : bool
489
+ If True, the input arrays are both assumed to be unique, which
490
+ can speed up the calculation. Default is False.
491
+
492
+ Returns
493
+ -------
494
+ setxor1d : ndarray
495
+ Sorted 1D array of unique values that are in only one of the input
496
+ arrays.
497
+
498
+ Examples
499
+ --------
500
+ >>> a = np.array([1, 2, 3, 2, 4])
501
+ >>> b = np.array([2, 3, 5, 7, 5])
502
+ >>> np.setxor1d(a,b)
503
+ array([1, 4, 5, 7])
504
+
505
+ """
506
+ if not assume_unique:
507
+ ar1 = unique(ar1)
508
+ ar2 = unique(ar2)
509
+
510
+ aux = np.concatenate((ar1, ar2))
511
+ if aux.size == 0:
512
+ return aux
513
+
514
+ aux.sort()
515
+ flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
516
+ return aux[flag[1:] & flag[:-1]]
517
+
518
+
519
+ def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *,
520
+ kind=None):
521
+ return (ar1, ar2)
522
+
523
+
524
+ @array_function_dispatch(_in1d_dispatcher)
525
+ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
526
+ """
527
+ Test whether each element of a 1-D array is also present in a second array.
528
+
529
+ Returns a boolean array the same length as `ar1` that is True
530
+ where an element of `ar1` is in `ar2` and False otherwise.
531
+
532
+ We recommend using :func:`isin` instead of `in1d` for new code.
533
+
534
+ Parameters
535
+ ----------
536
+ ar1 : (M,) array_like
537
+ Input array.
538
+ ar2 : array_like
539
+ The values against which to test each value of `ar1`.
540
+ assume_unique : bool, optional
541
+ If True, the input arrays are both assumed to be unique, which
542
+ can speed up the calculation. Default is False.
543
+ invert : bool, optional
544
+ If True, the values in the returned array are inverted (that is,
545
+ False where an element of `ar1` is in `ar2` and True otherwise).
546
+ Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
547
+ to (but is faster than) ``np.invert(in1d(a, b))``.
548
+ kind : {None, 'sort', 'table'}, optional
549
+ The algorithm to use. This will not affect the final result,
550
+ but will affect the speed and memory use. The default, None,
551
+ will select automatically based on memory considerations.
552
+
553
+ * If 'sort', will use a mergesort-based approach. This will have
554
+ a memory usage of roughly 6 times the sum of the sizes of
555
+ `ar1` and `ar2`, not accounting for size of dtypes.
556
+ * If 'table', will use a lookup table approach similar
557
+ to a counting sort. This is only available for boolean and
558
+ integer arrays. This will have a memory usage of the
559
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
560
+ has no effect when the 'table' option is used.
561
+ * If None, will automatically choose 'table' if
562
+ the required memory allocation is less than or equal to
563
+ 6 times the sum of the sizes of `ar1` and `ar2`,
564
+ otherwise will use 'sort'. This is done to not use
565
+ a large amount of memory by default, even though
566
+ 'table' may be faster in most cases. If 'table' is chosen,
567
+ `assume_unique` will have no effect.
568
+
569
+ .. versionadded:: 1.8.0
570
+
571
+ Returns
572
+ -------
573
+ in1d : (M,) ndarray, bool
574
+ The values `ar1[in1d]` are in `ar2`.
575
+
576
+ See Also
577
+ --------
578
+ isin : Version of this function that preserves the
579
+ shape of ar1.
580
+ numpy.lib.arraysetops : Module with a number of other functions for
581
+ performing set operations on arrays.
582
+
583
+ Notes
584
+ -----
585
+ `in1d` can be considered as an element-wise function version of the
586
+ python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
587
+ equivalent to ``np.array([item in b for item in a])``.
588
+ However, this idea fails if `ar2` is a set, or similar (non-sequence)
589
+ container: As ``ar2`` is converted to an array, in those cases
590
+ ``asarray(ar2)`` is an object array rather than the expected array of
591
+ contained values.
592
+
593
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
594
+ following relationship is true:
595
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
596
+ but may use greater memory. The default value for `kind` will
597
+ be automatically selected based only on memory usage, so one may
598
+ manually set ``kind='table'`` if memory constraints can be relaxed.
599
+
600
+ .. versionadded:: 1.4.0
601
+
602
+ Examples
603
+ --------
604
+ >>> test = np.array([0, 1, 2, 5, 0])
605
+ >>> states = [0, 2]
606
+ >>> mask = np.in1d(test, states)
607
+ >>> mask
608
+ array([ True, False, True, False, True])
609
+ >>> test[mask]
610
+ array([0, 2, 0])
611
+ >>> mask = np.in1d(test, states, invert=True)
612
+ >>> mask
613
+ array([False, True, False, True, False])
614
+ >>> test[mask]
615
+ array([1, 5])
616
+ """
617
+ # Ravel both arrays, behavior for the first array could be different
618
+ ar1 = np.asarray(ar1).ravel()
619
+ ar2 = np.asarray(ar2).ravel()
620
+
621
+ # Ensure that iteration through object arrays yields size-1 arrays
622
+ if ar2.dtype == object:
623
+ ar2 = ar2.reshape(-1, 1)
624
+
625
+ if kind not in {None, 'sort', 'table'}:
626
+ raise ValueError(
627
+ f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.")
628
+
629
+ # Can use the table method if all arrays are integers or boolean:
630
+ is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2))
631
+ use_table_method = is_int_arrays and kind in {None, 'table'}
632
+
633
+ if use_table_method:
634
+ if ar2.size == 0:
635
+ if invert:
636
+ return np.ones_like(ar1, dtype=bool)
637
+ else:
638
+ return np.zeros_like(ar1, dtype=bool)
639
+
640
+ # Convert booleans to uint8 so we can use the fast integer algorithm
641
+ if ar1.dtype == bool:
642
+ ar1 = ar1.astype(np.uint8)
643
+ if ar2.dtype == bool:
644
+ ar2 = ar2.astype(np.uint8)
645
+
646
+ ar2_min = np.min(ar2)
647
+ ar2_max = np.max(ar2)
648
+
649
+ ar2_range = int(ar2_max) - int(ar2_min)
650
+
651
+ # Constraints on whether we can actually use the table method:
652
+ # 1. Assert memory usage is not too large
653
+ below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
654
+ # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
655
+ range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
656
+ # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype
657
+ if ar1.size > 0:
658
+ ar1_min = np.min(ar1)
659
+ ar1_max = np.max(ar1)
660
+
661
+ # After masking, the range of ar1 is guaranteed to be
662
+ # within the range of ar2:
663
+ ar1_upper = min(int(ar1_max), int(ar2_max))
664
+ ar1_lower = max(int(ar1_min), int(ar2_min))
665
+
666
+ range_safe_from_overflow &= all((
667
+ ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max,
668
+ ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min
669
+ ))
670
+
671
+ # Optimal performance is for approximately
672
+ # log10(size) > (log10(range) - 2.27) / 0.927.
673
+ # However, here we set the requirement that by default
674
+ # the intermediate array can only be 6x
675
+ # the combined memory allocation of the original
676
+ # arrays. See discussion on
677
+ # https://github.com/numpy/numpy/pull/12065.
678
+
679
+ if (
680
+ range_safe_from_overflow and
681
+ (below_memory_constraint or kind == 'table')
682
+ ):
683
+
684
+ if invert:
685
+ outgoing_array = np.ones_like(ar1, dtype=bool)
686
+ else:
687
+ outgoing_array = np.zeros_like(ar1, dtype=bool)
688
+
689
+ # Make elements 1 where the integer exists in ar2
690
+ if invert:
691
+ isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)
692
+ isin_helper_ar[ar2 - ar2_min] = 0
693
+ else:
694
+ isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)
695
+ isin_helper_ar[ar2 - ar2_min] = 1
696
+
697
+ # Mask out elements we know won't work
698
+ basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)
699
+ outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -
700
+ ar2_min]
701
+
702
+ return outgoing_array
703
+ elif kind == 'table': # not range_safe_from_overflow
704
+ raise RuntimeError(
705
+ "You have specified kind='table', "
706
+ "but the range of values in `ar2` or `ar1` exceed the "
707
+ "maximum integer of the datatype. "
708
+ "Please set `kind` to None or 'sort'."
709
+ )
710
+ elif kind == 'table':
711
+ raise ValueError(
712
+ "The 'table' method is only "
713
+ "supported for boolean or integer arrays. "
714
+ "Please select 'sort' or None for kind."
715
+ )
716
+
717
+
718
+ # Check if one of the arrays may contain arbitrary objects
719
+ contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
720
+
721
+ # This code is run when
722
+ # a) the first condition is true, making the code significantly faster
723
+ # b) the second condition is true (i.e. `ar1` or `ar2` may contain
724
+ # arbitrary objects), since then sorting is not guaranteed to work
725
+ if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
726
+ if invert:
727
+ mask = np.ones(len(ar1), dtype=bool)
728
+ for a in ar2:
729
+ mask &= (ar1 != a)
730
+ else:
731
+ mask = np.zeros(len(ar1), dtype=bool)
732
+ for a in ar2:
733
+ mask |= (ar1 == a)
734
+ return mask
735
+
736
+ # Otherwise use sorting
737
+ if not assume_unique:
738
+ ar1, rev_idx = np.unique(ar1, return_inverse=True)
739
+ ar2 = np.unique(ar2)
740
+
741
+ ar = np.concatenate((ar1, ar2))
742
+ # We need this to be a stable sort, so always use 'mergesort'
743
+ # here. The values from the first array should always come before
744
+ # the values from the second array.
745
+ order = ar.argsort(kind='mergesort')
746
+ sar = ar[order]
747
+ if invert:
748
+ bool_ar = (sar[1:] != sar[:-1])
749
+ else:
750
+ bool_ar = (sar[1:] == sar[:-1])
751
+ flag = np.concatenate((bool_ar, [invert]))
752
+ ret = np.empty(ar.shape, dtype=bool)
753
+ ret[order] = flag
754
+
755
+ if assume_unique:
756
+ return ret[:len(ar1)]
757
+ else:
758
+ return ret[rev_idx]
759
+
760
+
761
+ def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None,
762
+ *, kind=None):
763
+ return (element, test_elements)
764
+
765
+
766
+ @array_function_dispatch(_isin_dispatcher)
767
+ def isin(element, test_elements, assume_unique=False, invert=False, *,
768
+ kind=None):
769
+ """
770
+ Calculates ``element in test_elements``, broadcasting over `element` only.
771
+ Returns a boolean array of the same shape as `element` that is True
772
+ where an element of `element` is in `test_elements` and False otherwise.
773
+
774
+ Parameters
775
+ ----------
776
+ element : array_like
777
+ Input array.
778
+ test_elements : array_like
779
+ The values against which to test each value of `element`.
780
+ This argument is flattened if it is an array or array_like.
781
+ See notes for behavior with non-array-like parameters.
782
+ assume_unique : bool, optional
783
+ If True, the input arrays are both assumed to be unique, which
784
+ can speed up the calculation. Default is False.
785
+ invert : bool, optional
786
+ If True, the values in the returned array are inverted, as if
787
+ calculating `element not in test_elements`. Default is False.
788
+ ``np.isin(a, b, invert=True)`` is equivalent to (but faster
789
+ than) ``np.invert(np.isin(a, b))``.
790
+ kind : {None, 'sort', 'table'}, optional
791
+ The algorithm to use. This will not affect the final result,
792
+ but will affect the speed and memory use. The default, None,
793
+ will select automatically based on memory considerations.
794
+
795
+ * If 'sort', will use a mergesort-based approach. This will have
796
+ a memory usage of roughly 6 times the sum of the sizes of
797
+ `ar1` and `ar2`, not accounting for size of dtypes.
798
+ * If 'table', will use a lookup table approach similar
799
+ to a counting sort. This is only available for boolean and
800
+ integer arrays. This will have a memory usage of the
801
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
802
+ has no effect when the 'table' option is used.
803
+ * If None, will automatically choose 'table' if
804
+ the required memory allocation is less than or equal to
805
+ 6 times the sum of the sizes of `ar1` and `ar2`,
806
+ otherwise will use 'sort'. This is done to not use
807
+ a large amount of memory by default, even though
808
+ 'table' may be faster in most cases. If 'table' is chosen,
809
+ `assume_unique` will have no effect.
810
+
811
+
812
+ Returns
813
+ -------
814
+ isin : ndarray, bool
815
+ Has the same shape as `element`. The values `element[isin]`
816
+ are in `test_elements`.
817
+
818
+ See Also
819
+ --------
820
+ in1d : Flattened version of this function.
821
+ numpy.lib.arraysetops : Module with a number of other functions for
822
+ performing set operations on arrays.
823
+
824
+ Notes
825
+ -----
826
+
827
+ `isin` is an element-wise function version of the python keyword `in`.
828
+ ``isin(a, b)`` is roughly equivalent to
829
+ ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
830
+
831
+ `element` and `test_elements` are converted to arrays if they are not
832
+ already. If `test_elements` is a set (or other non-sequence collection)
833
+ it will be converted to an object array with one element, rather than an
834
+ array of the values contained in `test_elements`. This is a consequence
835
+ of the `array` constructor's way of handling non-sequence collections.
836
+ Converting the set to a list usually gives the desired behavior.
837
+
838
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
839
+ following relationship is true:
840
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
841
+ but may use greater memory. The default value for `kind` will
842
+ be automatically selected based only on memory usage, so one may
843
+ manually set ``kind='table'`` if memory constraints can be relaxed.
844
+
845
+ .. versionadded:: 1.13.0
846
+
847
+ Examples
848
+ --------
849
+ >>> element = 2*np.arange(4).reshape((2, 2))
850
+ >>> element
851
+ array([[0, 2],
852
+ [4, 6]])
853
+ >>> test_elements = [1, 2, 4, 8]
854
+ >>> mask = np.isin(element, test_elements)
855
+ >>> mask
856
+ array([[False, True],
857
+ [ True, False]])
858
+ >>> element[mask]
859
+ array([2, 4])
860
+
861
+ The indices of the matched values can be obtained with `nonzero`:
862
+
863
+ >>> np.nonzero(mask)
864
+ (array([0, 1]), array([1, 0]))
865
+
866
+ The test can also be inverted:
867
+
868
+ >>> mask = np.isin(element, test_elements, invert=True)
869
+ >>> mask
870
+ array([[ True, False],
871
+ [False, True]])
872
+ >>> element[mask]
873
+ array([0, 6])
874
+
875
+ Because of how `array` handles sets, the following does not
876
+ work as expected:
877
+
878
+ >>> test_set = {1, 2, 4, 8}
879
+ >>> np.isin(element, test_set)
880
+ array([[False, False],
881
+ [False, False]])
882
+
883
+ Casting the set to a list gives the expected result:
884
+
885
+ >>> np.isin(element, list(test_set))
886
+ array([[False, True],
887
+ [ True, False]])
888
+ """
889
+ element = np.asarray(element)
890
+ return in1d(element, test_elements, assume_unique=assume_unique,
891
+ invert=invert, kind=kind).reshape(element.shape)
892
+
893
+
894
+ def _union1d_dispatcher(ar1, ar2):
895
+ return (ar1, ar2)
896
+
897
+
898
+ @array_function_dispatch(_union1d_dispatcher)
899
+ def union1d(ar1, ar2):
900
+ """
901
+ Find the union of two arrays.
902
+
903
+ Return the unique, sorted array of values that are in either of the two
904
+ input arrays.
905
+
906
+ Parameters
907
+ ----------
908
+ ar1, ar2 : array_like
909
+ Input arrays. They are flattened if they are not already 1D.
910
+
911
+ Returns
912
+ -------
913
+ union1d : ndarray
914
+ Unique, sorted union of the input arrays.
915
+
916
+ See Also
917
+ --------
918
+ numpy.lib.arraysetops : Module with a number of other functions for
919
+ performing set operations on arrays.
920
+
921
+ Examples
922
+ --------
923
+ >>> np.union1d([-1, 0, 1], [-2, 0, 2])
924
+ array([-2, -1, 0, 1, 2])
925
+
926
+ To find the union of more than two arrays, use functools.reduce:
927
+
928
+ >>> from functools import reduce
929
+ >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
930
+ array([1, 2, 3, 4, 6])
931
+ """
932
+ return unique(np.concatenate((ar1, ar2), axis=None))
933
+
934
+
935
+ def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
936
+ return (ar1, ar2)
937
+
938
+
939
+ @array_function_dispatch(_setdiff1d_dispatcher)
940
+ def setdiff1d(ar1, ar2, assume_unique=False):
941
+ """
942
+ Find the set difference of two arrays.
943
+
944
+ Return the unique values in `ar1` that are not in `ar2`.
945
+
946
+ Parameters
947
+ ----------
948
+ ar1 : array_like
949
+ Input array.
950
+ ar2 : array_like
951
+ Input comparison array.
952
+ assume_unique : bool
953
+ If True, the input arrays are both assumed to be unique, which
954
+ can speed up the calculation. Default is False.
955
+
956
+ Returns
957
+ -------
958
+ setdiff1d : ndarray
959
+ 1D array of values in `ar1` that are not in `ar2`. The result
960
+ is sorted when `assume_unique=False`, but otherwise only sorted
961
+ if the input is sorted.
962
+
963
+ See Also
964
+ --------
965
+ numpy.lib.arraysetops : Module with a number of other functions for
966
+ performing set operations on arrays.
967
+
968
+ Examples
969
+ --------
970
+ >>> a = np.array([1, 2, 3, 2, 4, 1])
971
+ >>> b = np.array([3, 4, 5, 6])
972
+ >>> np.setdiff1d(a, b)
973
+ array([1, 2])
974
+
975
+ """
976
+ if assume_unique:
977
+ ar1 = np.asarray(ar1).ravel()
978
+ else:
979
+ ar1 = unique(ar1)
980
+ ar2 = unique(ar2)
981
+ return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
.venv/lib/python3.11/site-packages/numpy/lib/arrayterator.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A buffered iterator for big arrays.
3
+
4
+ This module solves the problem of iterating over a big file-based array
5
+ without having to read it into memory. The `Arrayterator` class wraps
6
+ an array object, and when iterated it will return sub-arrays with at most
7
+ a user-specified number of elements.
8
+
9
+ """
10
+ from operator import mul
11
+ from functools import reduce
12
+
13
+ __all__ = ['Arrayterator']
14
+
15
+
16
+ class Arrayterator:
17
+ """
18
+ Buffered iterator for big arrays.
19
+
20
+ `Arrayterator` creates a buffered iterator for reading big arrays in small
21
+ contiguous blocks. The class is useful for objects stored in the
22
+ file system. It allows iteration over the object *without* reading
23
+ everything in memory; instead, small blocks are read and iterated over.
24
+
25
+ `Arrayterator` can be used with any object that supports multidimensional
26
+ slices. This includes NumPy arrays, but also variables from
27
+ Scientific.IO.NetCDF or pynetcdf for example.
28
+
29
+ Parameters
30
+ ----------
31
+ var : array_like
32
+ The object to iterate over.
33
+ buf_size : int, optional
34
+ The buffer size. If `buf_size` is supplied, the maximum amount of
35
+ data that will be read into memory is `buf_size` elements.
36
+ Default is None, which will read as many element as possible
37
+ into memory.
38
+
39
+ Attributes
40
+ ----------
41
+ var
42
+ buf_size
43
+ start
44
+ stop
45
+ step
46
+ shape
47
+ flat
48
+
49
+ See Also
50
+ --------
51
+ ndenumerate : Multidimensional array iterator.
52
+ flatiter : Flat array iterator.
53
+ memmap : Create a memory-map to an array stored in a binary file on disk.
54
+
55
+ Notes
56
+ -----
57
+ The algorithm works by first finding a "running dimension", along which
58
+ the blocks will be extracted. Given an array of dimensions
59
+ ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
60
+ first dimension will be used. If, on the other hand,
61
+ ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
62
+ Blocks are extracted along this dimension, and when the last block is
63
+ returned the process continues from the next dimension, until all
64
+ elements have been read.
65
+
66
+ Examples
67
+ --------
68
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
69
+ >>> a_itor = np.lib.Arrayterator(a, 2)
70
+ >>> a_itor.shape
71
+ (3, 4, 5, 6)
72
+
73
+ Now we can iterate over ``a_itor``, and it will return arrays of size
74
+ two. Since `buf_size` was smaller than any dimension, the first
75
+ dimension will be iterated over first:
76
+
77
+ >>> for subarr in a_itor:
78
+ ... if not subarr.all():
79
+ ... print(subarr, subarr.shape) # doctest: +SKIP
80
+ >>> # [[[[0 1]]]] (1, 1, 1, 2)
81
+
82
+ """
83
+
84
+ def __init__(self, var, buf_size=None):
85
+ self.var = var
86
+ self.buf_size = buf_size
87
+
88
+ self.start = [0 for dim in var.shape]
89
+ self.stop = [dim for dim in var.shape]
90
+ self.step = [1 for dim in var.shape]
91
+
92
+ def __getattr__(self, attr):
93
+ return getattr(self.var, attr)
94
+
95
+ def __getitem__(self, index):
96
+ """
97
+ Return a new arrayterator.
98
+
99
+ """
100
+ # Fix index, handling ellipsis and incomplete slices.
101
+ if not isinstance(index, tuple):
102
+ index = (index,)
103
+ fixed = []
104
+ length, dims = len(index), self.ndim
105
+ for slice_ in index:
106
+ if slice_ is Ellipsis:
107
+ fixed.extend([slice(None)] * (dims-length+1))
108
+ length = len(fixed)
109
+ elif isinstance(slice_, int):
110
+ fixed.append(slice(slice_, slice_+1, 1))
111
+ else:
112
+ fixed.append(slice_)
113
+ index = tuple(fixed)
114
+ if len(index) < dims:
115
+ index += (slice(None),) * (dims-len(index))
116
+
117
+ # Return a new arrayterator object.
118
+ out = self.__class__(self.var, self.buf_size)
119
+ for i, (start, stop, step, slice_) in enumerate(
120
+ zip(self.start, self.stop, self.step, index)):
121
+ out.start[i] = start + (slice_.start or 0)
122
+ out.step[i] = step * (slice_.step or 1)
123
+ out.stop[i] = start + (slice_.stop or stop-start)
124
+ out.stop[i] = min(stop, out.stop[i])
125
+ return out
126
+
127
+ def __array__(self):
128
+ """
129
+ Return corresponding data.
130
+
131
+ """
132
+ slice_ = tuple(slice(*t) for t in zip(
133
+ self.start, self.stop, self.step))
134
+ return self.var[slice_]
135
+
136
+ @property
137
+ def flat(self):
138
+ """
139
+ A 1-D flat iterator for Arrayterator objects.
140
+
141
+ This iterator returns elements of the array to be iterated over in
142
+ `Arrayterator` one by one. It is similar to `flatiter`.
143
+
144
+ See Also
145
+ --------
146
+ Arrayterator
147
+ flatiter
148
+
149
+ Examples
150
+ --------
151
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
152
+ >>> a_itor = np.lib.Arrayterator(a, 2)
153
+
154
+ >>> for subarr in a_itor.flat:
155
+ ... if not subarr:
156
+ ... print(subarr, type(subarr))
157
+ ...
158
+ 0 <class 'numpy.int64'>
159
+
160
+ """
161
+ for block in self:
162
+ yield from block.flat
163
+
164
+ @property
165
+ def shape(self):
166
+ """
167
+ The shape of the array to be iterated over.
168
+
169
+ For an example, see `Arrayterator`.
170
+
171
+ """
172
+ return tuple(((stop-start-1)//step+1) for start, stop, step in
173
+ zip(self.start, self.stop, self.step))
174
+
175
+ def __iter__(self):
176
+ # Skip arrays with degenerate dimensions
177
+ if [dim for dim in self.shape if dim <= 0]:
178
+ return
179
+
180
+ start = self.start[:]
181
+ stop = self.stop[:]
182
+ step = self.step[:]
183
+ ndims = self.var.ndim
184
+
185
+ while True:
186
+ count = self.buf_size or reduce(mul, self.shape)
187
+
188
+ # iterate over each dimension, looking for the
189
+ # running dimension (ie, the dimension along which
190
+ # the blocks will be built from)
191
+ rundim = 0
192
+ for i in range(ndims-1, -1, -1):
193
+ # if count is zero we ran out of elements to read
194
+ # along higher dimensions, so we read only a single position
195
+ if count == 0:
196
+ stop[i] = start[i]+1
197
+ elif count <= self.shape[i]:
198
+ # limit along this dimension
199
+ stop[i] = start[i] + count*step[i]
200
+ rundim = i
201
+ else:
202
+ # read everything along this dimension
203
+ stop[i] = self.stop[i]
204
+ stop[i] = min(self.stop[i], stop[i])
205
+ count = count//self.shape[i]
206
+
207
+ # yield a block
208
+ slice_ = tuple(slice(*t) for t in zip(start, stop, step))
209
+ yield self.var[slice_]
210
+
211
+ # Update start position, taking care of overflow to
212
+ # other dimensions
213
+ start[rundim] = stop[rundim] # start where we stopped
214
+ for i in range(ndims-1, 0, -1):
215
+ if start[i] >= self.stop[i]:
216
+ start[i] = self.start[i]
217
+ start[i-1] += self.step[i-1]
218
+ if start[0] >= self.stop[0]:
219
+ return
.venv/lib/python3.11/site-packages/numpy/lib/arrayterator.pyi ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Generator
2
+ from typing import (
3
+ Any,
4
+ TypeVar,
5
+ Union,
6
+ overload,
7
+ )
8
+
9
+ from numpy import ndarray, dtype, generic
10
+ from numpy._typing import DTypeLike
11
+
12
+ # TODO: Set a shape bound once we've got proper shape support
13
+ _Shape = TypeVar("_Shape", bound=Any)
14
+ _DType = TypeVar("_DType", bound=dtype[Any])
15
+ _ScalarType = TypeVar("_ScalarType", bound=generic)
16
+
17
+ _Index = Union[
18
+ Union[ellipsis, int, slice],
19
+ tuple[Union[ellipsis, int, slice], ...],
20
+ ]
21
+
22
+ __all__: list[str]
23
+
24
+ # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
25
+ # but its ``__getattr__` method does wrap around the former and thus has
26
+ # access to all its methods
27
+
28
+ class Arrayterator(ndarray[_Shape, _DType]):
29
+ var: ndarray[_Shape, _DType] # type: ignore[assignment]
30
+ buf_size: None | int
31
+ start: list[int]
32
+ stop: list[int]
33
+ step: list[int]
34
+
35
+ @property # type: ignore[misc]
36
+ def shape(self) -> tuple[int, ...]: ...
37
+ @property
38
+ def flat( # type: ignore[override]
39
+ self: ndarray[Any, dtype[_ScalarType]]
40
+ ) -> Generator[_ScalarType, None, None]: ...
41
+ def __init__(
42
+ self, var: ndarray[_Shape, _DType], buf_size: None | int = ...
43
+ ) -> None: ...
44
+ @overload
45
+ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ...
46
+ @overload
47
+ def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ...
48
+ def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ...
49
+ def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ...
.venv/lib/python3.11/site-packages/numpy/lib/format.py ADDED
@@ -0,0 +1,976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Binary serialization
3
+
4
+ NPY format
5
+ ==========
6
+
7
+ A simple format for saving numpy arrays to disk with the full
8
+ information about them.
9
+
10
+ The ``.npy`` format is the standard binary file format in NumPy for
11
+ persisting a *single* arbitrary NumPy array on disk. The format stores all
12
+ of the shape and dtype information necessary to reconstruct the array
13
+ correctly even on another machine with a different architecture.
14
+ The format is designed to be as simple as possible while achieving
15
+ its limited goals.
16
+
17
+ The ``.npz`` format is the standard format for persisting *multiple* NumPy
18
+ arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
19
+ files, one for each array.
20
+
21
+ Capabilities
22
+ ------------
23
+
24
+ - Can represent all NumPy arrays including nested record arrays and
25
+ object arrays.
26
+
27
+ - Represents the data in its native binary form.
28
+
29
+ - Supports Fortran-contiguous arrays directly.
30
+
31
+ - Stores all of the necessary information to reconstruct the array
32
+ including shape and dtype on a machine of a different
33
+ architecture. Both little-endian and big-endian arrays are
34
+ supported, and a file with little-endian numbers will yield
35
+ a little-endian array on any machine reading the file. The
36
+ types are described in terms of their actual sizes. For example,
37
+ if a machine with a 64-bit C "long int" writes out an array with
38
+ "long ints", a reading machine with 32-bit C "long ints" will yield
39
+ an array with 64-bit integers.
40
+
41
+ - Is straightforward to reverse engineer. Datasets often live longer than
42
+ the programs that created them. A competent developer should be
43
+ able to create a solution in their preferred programming language to
44
+ read most ``.npy`` files that they have been given without much
45
+ documentation.
46
+
47
+ - Allows memory-mapping of the data. See `open_memmap`.
48
+
49
+ - Can be read from a filelike stream object instead of an actual file.
50
+
51
+ - Stores object arrays, i.e. arrays containing elements that are arbitrary
52
+ Python objects. Files with object arrays are not to be mmapable, but
53
+ can be read and written to disk.
54
+
55
+ Limitations
56
+ -----------
57
+
58
+ - Arbitrary subclasses of numpy.ndarray are not completely preserved.
59
+ Subclasses will be accepted for writing, but only the array data will
60
+ be written out. A regular numpy.ndarray object will be created
61
+ upon reading the file.
62
+
63
+ .. warning::
64
+
65
+ Due to limitations in the interpretation of structured dtypes, dtypes
66
+ with fields with empty names will have the names replaced by 'f0', 'f1',
67
+ etc. Such arrays will not round-trip through the format entirely
68
+ accurately. The data is intact; only the field names will differ. We are
69
+ working on a fix for this. This fix will not require a change in the
70
+ file format. The arrays with such structures can still be saved and
71
+ restored, and the correct dtype may be restored by using the
72
+ ``loadedarray.view(correct_dtype)`` method.
73
+
74
+ File extensions
75
+ ---------------
76
+
77
+ We recommend using the ``.npy`` and ``.npz`` extensions for files saved
78
+ in this format. This is by no means a requirement; applications may wish
79
+ to use these file formats but use an extension specific to the
80
+ application. In the absence of an obvious alternative, however,
81
+ we suggest using ``.npy`` and ``.npz``.
82
+
83
+ Version numbering
84
+ -----------------
85
+
86
+ The version numbering of these formats is independent of NumPy version
87
+ numbering. If the format is upgraded, the code in `numpy.io` will still
88
+ be able to read and write Version 1.0 files.
89
+
90
+ Format Version 1.0
91
+ ------------------
92
+
93
+ The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
94
+
95
+ The next 1 byte is an unsigned byte: the major version number of the file
96
+ format, e.g. ``\\x01``.
97
+
98
+ The next 1 byte is an unsigned byte: the minor version number of the file
99
+ format, e.g. ``\\x00``. Note: the version of the file format is not tied
100
+ to the version of the numpy package.
101
+
102
+ The next 2 bytes form a little-endian unsigned short int: the length of
103
+ the header data HEADER_LEN.
104
+
105
+ The next HEADER_LEN bytes form the header data describing the array's
106
+ format. It is an ASCII string which contains a Python literal expression
107
+ of a dictionary. It is terminated by a newline (``\\n``) and padded with
108
+ spaces (``\\x20``) to make the total of
109
+ ``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
110
+ by 64 for alignment purposes.
111
+
112
+ The dictionary contains three keys:
113
+
114
+ "descr" : dtype.descr
115
+ An object that can be passed as an argument to the `numpy.dtype`
116
+ constructor to create the array's dtype.
117
+ "fortran_order" : bool
118
+ Whether the array data is Fortran-contiguous or not. Since
119
+ Fortran-contiguous arrays are a common form of non-C-contiguity,
120
+ we allow them to be written directly to disk for efficiency.
121
+ "shape" : tuple of int
122
+ The shape of the array.
123
+
124
+ For repeatability and readability, the dictionary keys are sorted in
125
+ alphabetic order. This is for convenience only. A writer SHOULD implement
126
+ this if possible. A reader MUST NOT depend on this.
127
+
128
+ Following the header comes the array data. If the dtype contains Python
129
+ objects (i.e. ``dtype.hasobject is True``), then the data is a Python
130
+ pickle of the array. Otherwise the data is the contiguous (either C-
131
+ or Fortran-, depending on ``fortran_order``) bytes of the array.
132
+ Consumers can figure out the number of bytes by multiplying the number
133
+ of elements given by the shape (noting that ``shape=()`` means there is
134
+ 1 element) by ``dtype.itemsize``.
135
+
136
+ Format Version 2.0
137
+ ------------------
138
+
139
+ The version 1.0 format only allowed the array header to have a total size of
140
+ 65535 bytes. This can be exceeded by structured arrays with a large number of
141
+ columns. The version 2.0 format extends the header size to 4 GiB.
142
+ `numpy.save` will automatically save in 2.0 format if the data requires it,
143
+ else it will always use the more compatible 1.0 format.
144
+
145
+ The description of the fourth element of the header therefore has become:
146
+ "The next 4 bytes form a little-endian unsigned int: the length of the header
147
+ data HEADER_LEN."
148
+
149
+ Format Version 3.0
150
+ ------------------
151
+
152
+ This version replaces the ASCII string (which in practice was latin1) with
153
+ a utf8-encoded string, so supports structured types with any unicode field
154
+ names.
155
+
156
+ Notes
157
+ -----
158
+ The ``.npy`` format, including motivation for creating it and a comparison of
159
+ alternatives, is described in the
160
+ :doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have
161
+ evolved with time and this document is more current.
162
+
163
+ """
164
+ import numpy
165
+ import warnings
166
+ from numpy.lib.utils import safe_eval, drop_metadata
167
+ from numpy.compat import (
168
+ isfileobj, os_fspath, pickle
169
+ )
170
+
171
+
172
+ __all__ = []
173
+
174
+
175
+ EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
176
+ MAGIC_PREFIX = b'\x93NUMPY'
177
+ MAGIC_LEN = len(MAGIC_PREFIX) + 2
178
+ ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
179
+ BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
180
+ # allow growth within the address space of a 64 bit machine along one axis
181
+ GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype
182
+
183
+ # difference between version 1.0 and 2.0 is a 4 byte (I) header length
184
+ # instead of 2 bytes (H) allowing storage of large structured arrays
185
+ _header_size_info = {
186
+ (1, 0): ('<H', 'latin1'),
187
+ (2, 0): ('<I', 'latin1'),
188
+ (3, 0): ('<I', 'utf8'),
189
+ }
190
+
191
+ # Python's literal_eval is not actually safe for large inputs, since parsing
192
+ # may become slow or even cause interpreter crashes.
193
+ # This is an arbitrary, low limit which should make it safe in practice.
194
+ _MAX_HEADER_SIZE = 10000
195
+
196
+ def _check_version(version):
197
+ if version not in [(1, 0), (2, 0), (3, 0), None]:
198
+ msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
199
+ raise ValueError(msg % (version,))
200
+
201
+ def magic(major, minor):
202
+ """ Return the magic string for the given file format version.
203
+
204
+ Parameters
205
+ ----------
206
+ major : int in [0, 255]
207
+ minor : int in [0, 255]
208
+
209
+ Returns
210
+ -------
211
+ magic : str
212
+
213
+ Raises
214
+ ------
215
+ ValueError if the version cannot be formatted.
216
+ """
217
+ if major < 0 or major > 255:
218
+ raise ValueError("major version must be 0 <= major < 256")
219
+ if minor < 0 or minor > 255:
220
+ raise ValueError("minor version must be 0 <= minor < 256")
221
+ return MAGIC_PREFIX + bytes([major, minor])
222
+
223
+ def read_magic(fp):
224
+ """ Read the magic string to get the version of the file format.
225
+
226
+ Parameters
227
+ ----------
228
+ fp : filelike object
229
+
230
+ Returns
231
+ -------
232
+ major : int
233
+ minor : int
234
+ """
235
+ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
236
+ if magic_str[:-2] != MAGIC_PREFIX:
237
+ msg = "the magic string is not correct; expected %r, got %r"
238
+ raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
239
+ major, minor = magic_str[-2:]
240
+ return major, minor
241
+
242
+
243
+ def dtype_to_descr(dtype):
244
+ """
245
+ Get a serializable descriptor from the dtype.
246
+
247
+ The .descr attribute of a dtype object cannot be round-tripped through
248
+ the dtype() constructor. Simple types, like dtype('float32'), have
249
+ a descr which looks like a record array with one field with '' as
250
+ a name. The dtype() constructor interprets this as a request to give
251
+ a default name. Instead, we construct descriptor that can be passed to
252
+ dtype().
253
+
254
+ Parameters
255
+ ----------
256
+ dtype : dtype
257
+ The dtype of the array that will be written to disk.
258
+
259
+ Returns
260
+ -------
261
+ descr : object
262
+ An object that can be passed to `numpy.dtype()` in order to
263
+ replicate the input dtype.
264
+
265
+ """
266
+ # NOTE: that drop_metadata may not return the right dtype e.g. for user
267
+ # dtypes. In that case our code below would fail the same, though.
268
+ new_dtype = drop_metadata(dtype)
269
+ if new_dtype is not dtype:
270
+ warnings.warn("metadata on a dtype is not saved to an npy/npz. "
271
+ "Use another format (such as pickle) to store it.",
272
+ UserWarning, stacklevel=2)
273
+ if dtype.names is not None:
274
+ # This is a record array. The .descr is fine. XXX: parts of the
275
+ # record array with an empty name, like padding bytes, still get
276
+ # fiddled with. This needs to be fixed in the C implementation of
277
+ # dtype().
278
+ return dtype.descr
279
+ else:
280
+ return dtype.str
281
+
282
+ def descr_to_dtype(descr):
283
+ """
284
+ Returns a dtype based off the given description.
285
+
286
+ This is essentially the reverse of `dtype_to_descr()`. It will remove
287
+ the valueless padding fields created by, i.e. simple fields like
288
+ dtype('float32'), and then convert the description to its corresponding
289
+ dtype.
290
+
291
+ Parameters
292
+ ----------
293
+ descr : object
294
+ The object retrieved by dtype.descr. Can be passed to
295
+ `numpy.dtype()` in order to replicate the input dtype.
296
+
297
+ Returns
298
+ -------
299
+ dtype : dtype
300
+ The dtype constructed by the description.
301
+
302
+ """
303
+ if isinstance(descr, str):
304
+ # No padding removal needed
305
+ return numpy.dtype(descr)
306
+ elif isinstance(descr, tuple):
307
+ # subtype, will always have a shape descr[1]
308
+ dt = descr_to_dtype(descr[0])
309
+ return numpy.dtype((dt, descr[1]))
310
+
311
+ titles = []
312
+ names = []
313
+ formats = []
314
+ offsets = []
315
+ offset = 0
316
+ for field in descr:
317
+ if len(field) == 2:
318
+ name, descr_str = field
319
+ dt = descr_to_dtype(descr_str)
320
+ else:
321
+ name, descr_str, shape = field
322
+ dt = numpy.dtype((descr_to_dtype(descr_str), shape))
323
+
324
+ # Ignore padding bytes, which will be void bytes with '' as name
325
+ # Once support for blank names is removed, only "if name == ''" needed)
326
+ is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
327
+ if not is_pad:
328
+ title, name = name if isinstance(name, tuple) else (None, name)
329
+ titles.append(title)
330
+ names.append(name)
331
+ formats.append(dt)
332
+ offsets.append(offset)
333
+ offset += dt.itemsize
334
+
335
+ return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
336
+ 'offsets': offsets, 'itemsize': offset})
337
+
338
+ def header_data_from_array_1_0(array):
339
+ """ Get the dictionary of header metadata from a numpy.ndarray.
340
+
341
+ Parameters
342
+ ----------
343
+ array : numpy.ndarray
344
+
345
+ Returns
346
+ -------
347
+ d : dict
348
+ This has the appropriate entries for writing its string representation
349
+ to the header of the file.
350
+ """
351
+ d = {'shape': array.shape}
352
+ if array.flags.c_contiguous:
353
+ d['fortran_order'] = False
354
+ elif array.flags.f_contiguous:
355
+ d['fortran_order'] = True
356
+ else:
357
+ # Totally non-contiguous data. We will have to make it C-contiguous
358
+ # before writing. Note that we need to test for C_CONTIGUOUS first
359
+ # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
360
+ d['fortran_order'] = False
361
+
362
+ d['descr'] = dtype_to_descr(array.dtype)
363
+ return d
364
+
365
+
366
+ def _wrap_header(header, version):
367
+ """
368
+ Takes a stringified header, and attaches the prefix and padding to it
369
+ """
370
+ import struct
371
+ assert version is not None
372
+ fmt, encoding = _header_size_info[version]
373
+ header = header.encode(encoding)
374
+ hlen = len(header) + 1
375
+ padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
376
+ try:
377
+ header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
378
+ except struct.error:
379
+ msg = "Header length {} too big for version={}".format(hlen, version)
380
+ raise ValueError(msg) from None
381
+
382
+ # Pad the header with spaces and a final newline such that the magic
383
+ # string, the header-length short and the header are aligned on a
384
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
385
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
386
+ # offset must be page-aligned (i.e. the beginning of the file).
387
+ return header_prefix + header + b' '*padlen + b'\n'
388
+
389
+
390
+ def _wrap_header_guess_version(header):
391
+ """
392
+ Like `_wrap_header`, but chooses an appropriate version given the contents
393
+ """
394
+ try:
395
+ return _wrap_header(header, (1, 0))
396
+ except ValueError:
397
+ pass
398
+
399
+ try:
400
+ ret = _wrap_header(header, (2, 0))
401
+ except UnicodeEncodeError:
402
+ pass
403
+ else:
404
+ warnings.warn("Stored array in format 2.0. It can only be"
405
+ "read by NumPy >= 1.9", UserWarning, stacklevel=2)
406
+ return ret
407
+
408
+ header = _wrap_header(header, (3, 0))
409
+ warnings.warn("Stored array in format 3.0. It can only be "
410
+ "read by NumPy >= 1.17", UserWarning, stacklevel=2)
411
+ return header
412
+
413
+
414
+ def _write_array_header(fp, d, version=None):
415
+ """ Write the header for an array and returns the version used
416
+
417
+ Parameters
418
+ ----------
419
+ fp : filelike object
420
+ d : dict
421
+ This has the appropriate entries for writing its string representation
422
+ to the header of the file.
423
+ version : tuple or None
424
+ None means use oldest that works. Providing an explicit version will
425
+ raise a ValueError if the format does not allow saving this data.
426
+ Default: None
427
+ """
428
+ header = ["{"]
429
+ for key, value in sorted(d.items()):
430
+ # Need to use repr here, since we eval these when reading
431
+ header.append("'%s': %s, " % (key, repr(value)))
432
+ header.append("}")
433
+ header = "".join(header)
434
+
435
+ # Add some spare space so that the array header can be modified in-place
436
+ # when changing the array size, e.g. when growing it by appending data at
437
+ # the end.
438
+ shape = d['shape']
439
+ header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
440
+ shape[-1 if d['fortran_order'] else 0]
441
+ ))) if len(shape) > 0 else 0)
442
+
443
+ if version is None:
444
+ header = _wrap_header_guess_version(header)
445
+ else:
446
+ header = _wrap_header(header, version)
447
+ fp.write(header)
448
+
449
+ def write_array_header_1_0(fp, d):
450
+ """ Write the header for an array using the 1.0 format.
451
+
452
+ Parameters
453
+ ----------
454
+ fp : filelike object
455
+ d : dict
456
+ This has the appropriate entries for writing its string
457
+ representation to the header of the file.
458
+ """
459
+ _write_array_header(fp, d, (1, 0))
460
+
461
+
462
+ def write_array_header_2_0(fp, d):
463
+ """ Write the header for an array using the 2.0 format.
464
+ The 2.0 format allows storing very large structured arrays.
465
+
466
+ .. versionadded:: 1.9.0
467
+
468
+ Parameters
469
+ ----------
470
+ fp : filelike object
471
+ d : dict
472
+ This has the appropriate entries for writing its string
473
+ representation to the header of the file.
474
+ """
475
+ _write_array_header(fp, d, (2, 0))
476
+
477
+ def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
478
+ """
479
+ Read an array header from a filelike object using the 1.0 file format
480
+ version.
481
+
482
+ This will leave the file object located just after the header.
483
+
484
+ Parameters
485
+ ----------
486
+ fp : filelike object
487
+ A file object or something with a `.read()` method like a file.
488
+
489
+ Returns
490
+ -------
491
+ shape : tuple of int
492
+ The shape of the array.
493
+ fortran_order : bool
494
+ The array data will be written out directly if it is either
495
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
496
+ contiguous before writing it out.
497
+ dtype : dtype
498
+ The dtype of the file's data.
499
+ max_header_size : int, optional
500
+ Maximum allowed size of the header. Large headers may not be safe
501
+ to load securely and thus require explicitly passing a larger value.
502
+ See :py:func:`ast.literal_eval()` for details.
503
+
504
+ Raises
505
+ ------
506
+ ValueError
507
+ If the data is invalid.
508
+
509
+ """
510
+ return _read_array_header(
511
+ fp, version=(1, 0), max_header_size=max_header_size)
512
+
513
+ def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
514
+ """
515
+ Read an array header from a filelike object using the 2.0 file format
516
+ version.
517
+
518
+ This will leave the file object located just after the header.
519
+
520
+ .. versionadded:: 1.9.0
521
+
522
+ Parameters
523
+ ----------
524
+ fp : filelike object
525
+ A file object or something with a `.read()` method like a file.
526
+ max_header_size : int, optional
527
+ Maximum allowed size of the header. Large headers may not be safe
528
+ to load securely and thus require explicitly passing a larger value.
529
+ See :py:func:`ast.literal_eval()` for details.
530
+
531
+ Returns
532
+ -------
533
+ shape : tuple of int
534
+ The shape of the array.
535
+ fortran_order : bool
536
+ The array data will be written out directly if it is either
537
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
538
+ contiguous before writing it out.
539
+ dtype : dtype
540
+ The dtype of the file's data.
541
+
542
+ Raises
543
+ ------
544
+ ValueError
545
+ If the data is invalid.
546
+
547
+ """
548
+ return _read_array_header(
549
+ fp, version=(2, 0), max_header_size=max_header_size)
550
+
551
+
552
+ def _filter_header(s):
553
+ """Clean up 'L' in npz header ints.
554
+
555
+ Cleans up the 'L' in strings representing integers. Needed to allow npz
556
+ headers produced in Python2 to be read in Python3.
557
+
558
+ Parameters
559
+ ----------
560
+ s : string
561
+ Npy file header.
562
+
563
+ Returns
564
+ -------
565
+ header : str
566
+ Cleaned up header.
567
+
568
+ """
569
+ import tokenize
570
+ from io import StringIO
571
+
572
+ tokens = []
573
+ last_token_was_number = False
574
+ for token in tokenize.generate_tokens(StringIO(s).readline):
575
+ token_type = token[0]
576
+ token_string = token[1]
577
+ if (last_token_was_number and
578
+ token_type == tokenize.NAME and
579
+ token_string == "L"):
580
+ continue
581
+ else:
582
+ tokens.append(token)
583
+ last_token_was_number = (token_type == tokenize.NUMBER)
584
+ return tokenize.untokenize(tokens)
585
+
586
+
587
+ def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
588
+ """
589
+ see read_array_header_1_0
590
+ """
591
+ # Read an unsigned, little-endian short int which has the length of the
592
+ # header.
593
+ import struct
594
+ hinfo = _header_size_info.get(version)
595
+ if hinfo is None:
596
+ raise ValueError("Invalid version {!r}".format(version))
597
+ hlength_type, encoding = hinfo
598
+
599
+ hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
600
+ header_length = struct.unpack(hlength_type, hlength_str)[0]
601
+ header = _read_bytes(fp, header_length, "array header")
602
+ header = header.decode(encoding)
603
+ if len(header) > max_header_size:
604
+ raise ValueError(
605
+ f"Header info length ({len(header)}) is large and may not be safe "
606
+ "to load securely.\n"
607
+ "To allow loading, adjust `max_header_size` or fully trust "
608
+ "the `.npy` file using `allow_pickle=True`.\n"
609
+ "For safety against large resource use or crashes, sandboxing "
610
+ "may be necessary.")
611
+
612
+ # The header is a pretty-printed string representation of a literal
613
+ # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
614
+ # boundary. The keys are strings.
615
+ # "shape" : tuple of int
616
+ # "fortran_order" : bool
617
+ # "descr" : dtype.descr
618
+ # Versions (2, 0) and (1, 0) could have been created by a Python 2
619
+ # implementation before header filtering was implemented.
620
+ #
621
+ # For performance reasons, we try without _filter_header first though
622
+ try:
623
+ d = safe_eval(header)
624
+ except SyntaxError as e:
625
+ if version <= (2, 0):
626
+ header = _filter_header(header)
627
+ try:
628
+ d = safe_eval(header)
629
+ except SyntaxError as e2:
630
+ msg = "Cannot parse header: {!r}"
631
+ raise ValueError(msg.format(header)) from e2
632
+ else:
633
+ warnings.warn(
634
+ "Reading `.npy` or `.npz` file required additional "
635
+ "header parsing as it was created on Python 2. Save the "
636
+ "file again to speed up loading and avoid this warning.",
637
+ UserWarning, stacklevel=4)
638
+ else:
639
+ msg = "Cannot parse header: {!r}"
640
+ raise ValueError(msg.format(header)) from e
641
+ if not isinstance(d, dict):
642
+ msg = "Header is not a dictionary: {!r}"
643
+ raise ValueError(msg.format(d))
644
+
645
+ if EXPECTED_KEYS != d.keys():
646
+ keys = sorted(d.keys())
647
+ msg = "Header does not contain the correct keys: {!r}"
648
+ raise ValueError(msg.format(keys))
649
+
650
+ # Sanity-check the values.
651
+ if (not isinstance(d['shape'], tuple) or
652
+ not all(isinstance(x, int) for x in d['shape'])):
653
+ msg = "shape is not valid: {!r}"
654
+ raise ValueError(msg.format(d['shape']))
655
+ if not isinstance(d['fortran_order'], bool):
656
+ msg = "fortran_order is not a valid bool: {!r}"
657
+ raise ValueError(msg.format(d['fortran_order']))
658
+ try:
659
+ dtype = descr_to_dtype(d['descr'])
660
+ except TypeError as e:
661
+ msg = "descr is not a valid dtype descriptor: {!r}"
662
+ raise ValueError(msg.format(d['descr'])) from e
663
+
664
+ return d['shape'], d['fortran_order'], dtype
665
+
666
+ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
667
+ """
668
+ Write an array to an NPY file, including a header.
669
+
670
+ If the array is neither C-contiguous nor Fortran-contiguous AND the
671
+ file_like object is not a real file object, this function will have to
672
+ copy data in memory.
673
+
674
+ Parameters
675
+ ----------
676
+ fp : file_like object
677
+ An open, writable file object, or similar object with a
678
+ ``.write()`` method.
679
+ array : ndarray
680
+ The array to write to disk.
681
+ version : (int, int) or None, optional
682
+ The version number of the format. None means use the oldest
683
+ supported version that is able to store the data. Default: None
684
+ allow_pickle : bool, optional
685
+ Whether to allow writing pickled data. Default: True
686
+ pickle_kwargs : dict, optional
687
+ Additional keyword arguments to pass to pickle.dump, excluding
688
+ 'protocol'. These are only useful when pickling objects in object
689
+ arrays on Python 3 to Python 2 compatible format.
690
+
691
+ Raises
692
+ ------
693
+ ValueError
694
+ If the array cannot be persisted. This includes the case of
695
+ allow_pickle=False and array being an object array.
696
+ Various other errors
697
+ If the array contains Python objects as part of its dtype, the
698
+ process of pickling them may raise various errors if the objects
699
+ are not picklable.
700
+
701
+ """
702
+ _check_version(version)
703
+ _write_array_header(fp, header_data_from_array_1_0(array), version)
704
+
705
+ if array.itemsize == 0:
706
+ buffersize = 0
707
+ else:
708
+ # Set buffer size to 16 MiB to hide the Python loop overhead.
709
+ buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
710
+
711
+ if array.dtype.hasobject:
712
+ # We contain Python objects so we cannot write out the data
713
+ # directly. Instead, we will pickle it out
714
+ if not allow_pickle:
715
+ raise ValueError("Object arrays cannot be saved when "
716
+ "allow_pickle=False")
717
+ if pickle_kwargs is None:
718
+ pickle_kwargs = {}
719
+ pickle.dump(array, fp, protocol=3, **pickle_kwargs)
720
+ elif array.flags.f_contiguous and not array.flags.c_contiguous:
721
+ if isfileobj(fp):
722
+ array.T.tofile(fp)
723
+ else:
724
+ for chunk in numpy.nditer(
725
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
726
+ buffersize=buffersize, order='F'):
727
+ fp.write(chunk.tobytes('C'))
728
+ else:
729
+ if isfileobj(fp):
730
+ array.tofile(fp)
731
+ else:
732
+ for chunk in numpy.nditer(
733
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
734
+ buffersize=buffersize, order='C'):
735
+ fp.write(chunk.tobytes('C'))
736
+
737
+
738
+ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
739
+ max_header_size=_MAX_HEADER_SIZE):
740
+ """
741
+ Read an array from an NPY file.
742
+
743
+ Parameters
744
+ ----------
745
+ fp : file_like object
746
+ If this is not a real file object, then this may take extra memory
747
+ and time.
748
+ allow_pickle : bool, optional
749
+ Whether to allow writing pickled data. Default: False
750
+
751
+ .. versionchanged:: 1.16.3
752
+ Made default False in response to CVE-2019-6446.
753
+
754
+ pickle_kwargs : dict
755
+ Additional keyword arguments to pass to pickle.load. These are only
756
+ useful when loading object arrays saved on Python 2 when using
757
+ Python 3.
758
+ max_header_size : int, optional
759
+ Maximum allowed size of the header. Large headers may not be safe
760
+ to load securely and thus require explicitly passing a larger value.
761
+ See :py:func:`ast.literal_eval()` for details.
762
+ This option is ignored when `allow_pickle` is passed. In that case
763
+ the file is by definition trusted and the limit is unnecessary.
764
+
765
+ Returns
766
+ -------
767
+ array : ndarray
768
+ The array from the data on disk.
769
+
770
+ Raises
771
+ ------
772
+ ValueError
773
+ If the data is invalid, or allow_pickle=False and the file contains
774
+ an object array.
775
+
776
+ """
777
+ if allow_pickle:
778
+ # Effectively ignore max_header_size, since `allow_pickle` indicates
779
+ # that the input is fully trusted.
780
+ max_header_size = 2**64
781
+
782
+ version = read_magic(fp)
783
+ _check_version(version)
784
+ shape, fortran_order, dtype = _read_array_header(
785
+ fp, version, max_header_size=max_header_size)
786
+ if len(shape) == 0:
787
+ count = 1
788
+ else:
789
+ count = numpy.multiply.reduce(shape, dtype=numpy.int64)
790
+
791
+ # Now read the actual data.
792
+ if dtype.hasobject:
793
+ # The array contained Python objects. We need to unpickle the data.
794
+ if not allow_pickle:
795
+ raise ValueError("Object arrays cannot be loaded when "
796
+ "allow_pickle=False")
797
+ if pickle_kwargs is None:
798
+ pickle_kwargs = {}
799
+ try:
800
+ array = pickle.load(fp, **pickle_kwargs)
801
+ except UnicodeError as err:
802
+ # Friendlier error message
803
+ raise UnicodeError("Unpickling a python object failed: %r\n"
804
+ "You may need to pass the encoding= option "
805
+ "to numpy.load" % (err,)) from err
806
+ else:
807
+ if isfileobj(fp):
808
+ # We can use the fast fromfile() function.
809
+ array = numpy.fromfile(fp, dtype=dtype, count=count)
810
+ else:
811
+ # This is not a real file. We have to read it the
812
+ # memory-intensive way.
813
+ # crc32 module fails on reads greater than 2 ** 32 bytes,
814
+ # breaking large reads from gzip streams. Chunk reads to
815
+ # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
816
+ # of the read. In non-chunked case count < max_read_count, so
817
+ # only one read is performed.
818
+
819
+ # Use np.ndarray instead of np.empty since the latter does
820
+ # not correctly instantiate zero-width string dtypes; see
821
+ # https://github.com/numpy/numpy/pull/6430
822
+ array = numpy.ndarray(count, dtype=dtype)
823
+
824
+ if dtype.itemsize > 0:
825
+ # If dtype.itemsize == 0 then there's nothing more to read
826
+ max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
827
+
828
+ for i in range(0, count, max_read_count):
829
+ read_count = min(max_read_count, count - i)
830
+ read_size = int(read_count * dtype.itemsize)
831
+ data = _read_bytes(fp, read_size, "array data")
832
+ array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
833
+ count=read_count)
834
+
835
+ if fortran_order:
836
+ array.shape = shape[::-1]
837
+ array = array.transpose()
838
+ else:
839
+ array.shape = shape
840
+
841
+ return array
842
+
843
+
844
+ def open_memmap(filename, mode='r+', dtype=None, shape=None,
845
+ fortran_order=False, version=None, *,
846
+ max_header_size=_MAX_HEADER_SIZE):
847
+ """
848
+ Open a .npy file as a memory-mapped array.
849
+
850
+ This may be used to read an existing file or create a new one.
851
+
852
+ Parameters
853
+ ----------
854
+ filename : str or path-like
855
+ The name of the file on disk. This may *not* be a file-like
856
+ object.
857
+ mode : str, optional
858
+ The mode in which to open the file; the default is 'r+'. In
859
+ addition to the standard file modes, 'c' is also accepted to mean
860
+ "copy on write." See `memmap` for the available mode strings.
861
+ dtype : data-type, optional
862
+ The data type of the array if we are creating a new file in "write"
863
+ mode, if not, `dtype` is ignored. The default value is None, which
864
+ results in a data-type of `float64`.
865
+ shape : tuple of int
866
+ The shape of the array if we are creating a new file in "write"
867
+ mode, in which case this parameter is required. Otherwise, this
868
+ parameter is ignored and is thus optional.
869
+ fortran_order : bool, optional
870
+ Whether the array should be Fortran-contiguous (True) or
871
+ C-contiguous (False, the default) if we are creating a new file in
872
+ "write" mode.
873
+ version : tuple of int (major, minor) or None
874
+ If the mode is a "write" mode, then this is the version of the file
875
+ format used to create the file. None means use the oldest
876
+ supported version that is able to store the data. Default: None
877
+ max_header_size : int, optional
878
+ Maximum allowed size of the header. Large headers may not be safe
879
+ to load securely and thus require explicitly passing a larger value.
880
+ See :py:func:`ast.literal_eval()` for details.
881
+
882
+ Returns
883
+ -------
884
+ marray : memmap
885
+ The memory-mapped array.
886
+
887
+ Raises
888
+ ------
889
+ ValueError
890
+ If the data or the mode is invalid.
891
+ OSError
892
+ If the file is not found or cannot be opened correctly.
893
+
894
+ See Also
895
+ --------
896
+ numpy.memmap
897
+
898
+ """
899
+ if isfileobj(filename):
900
+ raise ValueError("Filename must be a string or a path-like object."
901
+ " Memmap cannot use existing file handles.")
902
+
903
+ if 'w' in mode:
904
+ # We are creating the file, not reading it.
905
+ # Check if we ought to create the file.
906
+ _check_version(version)
907
+ # Ensure that the given dtype is an authentic dtype object rather
908
+ # than just something that can be interpreted as a dtype object.
909
+ dtype = numpy.dtype(dtype)
910
+ if dtype.hasobject:
911
+ msg = "Array can't be memory-mapped: Python objects in dtype."
912
+ raise ValueError(msg)
913
+ d = dict(
914
+ descr=dtype_to_descr(dtype),
915
+ fortran_order=fortran_order,
916
+ shape=shape,
917
+ )
918
+ # If we got here, then it should be safe to create the file.
919
+ with open(os_fspath(filename), mode+'b') as fp:
920
+ _write_array_header(fp, d, version)
921
+ offset = fp.tell()
922
+ else:
923
+ # Read the header of the file first.
924
+ with open(os_fspath(filename), 'rb') as fp:
925
+ version = read_magic(fp)
926
+ _check_version(version)
927
+
928
+ shape, fortran_order, dtype = _read_array_header(
929
+ fp, version, max_header_size=max_header_size)
930
+ if dtype.hasobject:
931
+ msg = "Array can't be memory-mapped: Python objects in dtype."
932
+ raise ValueError(msg)
933
+ offset = fp.tell()
934
+
935
+ if fortran_order:
936
+ order = 'F'
937
+ else:
938
+ order = 'C'
939
+
940
+ # We need to change a write-only mode to a read-write mode since we've
941
+ # already written data to the file.
942
+ if mode == 'w+':
943
+ mode = 'r+'
944
+
945
+ marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
946
+ mode=mode, offset=offset)
947
+
948
+ return marray
949
+
950
+
951
+ def _read_bytes(fp, size, error_template="ran out of data"):
952
+ """
953
+ Read from file-like object until size bytes are read.
954
+ Raises ValueError if not EOF is encountered before size bytes are read.
955
+ Non-blocking objects only supported if they derive from io objects.
956
+
957
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
958
+ requested.
959
+ """
960
+ data = bytes()
961
+ while True:
962
+ # io files (default in python3) return None or raise on
963
+ # would-block, python2 file will truncate, probably nothing can be
964
+ # done about that. note that regular files can't be non-blocking
965
+ try:
966
+ r = fp.read(size - len(data))
967
+ data += r
968
+ if len(r) == 0 or len(data) == size:
969
+ break
970
+ except BlockingIOError:
971
+ pass
972
+ if len(data) != size:
973
+ msg = "EOF: reading %s, expected %d bytes got %d"
974
+ raise ValueError(msg % (error_template, size, len(data)))
975
+ else:
976
+ return data
.venv/lib/python3.11/site-packages/numpy/lib/format.pyi ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Literal, Final
2
+
3
+ __all__: list[str]
4
+
5
+ EXPECTED_KEYS: Final[set[str]]
6
+ MAGIC_PREFIX: Final[bytes]
7
+ MAGIC_LEN: Literal[8]
8
+ ARRAY_ALIGN: Literal[64]
9
+ BUFFER_SIZE: Literal[262144] # 2**18
10
+
11
+ def magic(major, minor): ...
12
+ def read_magic(fp): ...
13
+ def dtype_to_descr(dtype): ...
14
+ def descr_to_dtype(descr): ...
15
+ def header_data_from_array_1_0(array): ...
16
+ def write_array_header_1_0(fp, d): ...
17
+ def write_array_header_2_0(fp, d): ...
18
+ def read_array_header_1_0(fp): ...
19
+ def read_array_header_2_0(fp): ...
20
+ def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
21
+ def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
22
+ def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...
.venv/lib/python3.11/site-packages/numpy/lib/function_base.pyi ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from collections.abc import Sequence, Iterator, Callable, Iterable
3
+ from typing import (
4
+ Literal as L,
5
+ Any,
6
+ TypeVar,
7
+ overload,
8
+ Protocol,
9
+ SupportsIndex,
10
+ SupportsInt,
11
+ )
12
+
13
+ if sys.version_info >= (3, 10):
14
+ from typing import TypeGuard
15
+ else:
16
+ from typing_extensions import TypeGuard
17
+
18
+ from numpy import (
19
+ vectorize as vectorize,
20
+ ufunc,
21
+ generic,
22
+ floating,
23
+ complexfloating,
24
+ intp,
25
+ float64,
26
+ complex128,
27
+ timedelta64,
28
+ datetime64,
29
+ object_,
30
+ _OrderKACF,
31
+ )
32
+
33
+ from numpy._typing import (
34
+ NDArray,
35
+ ArrayLike,
36
+ DTypeLike,
37
+ _ShapeLike,
38
+ _ScalarLike_co,
39
+ _DTypeLike,
40
+ _ArrayLike,
41
+ _ArrayLikeInt_co,
42
+ _ArrayLikeFloat_co,
43
+ _ArrayLikeComplex_co,
44
+ _ArrayLikeTD64_co,
45
+ _ArrayLikeDT64_co,
46
+ _ArrayLikeObject_co,
47
+ _FloatLike_co,
48
+ _ComplexLike_co,
49
+ )
50
+
51
+ from numpy.core.function_base import (
52
+ add_newdoc as add_newdoc,
53
+ )
54
+
55
+ from numpy.core.multiarray import (
56
+ add_docstring as add_docstring,
57
+ bincount as bincount,
58
+ )
59
+
60
+ from numpy.core.umath import _add_newdoc_ufunc
61
+
62
+ _T = TypeVar("_T")
63
+ _T_co = TypeVar("_T_co", covariant=True)
64
+ _SCT = TypeVar("_SCT", bound=generic)
65
+ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
66
+
67
+ _2Tuple = tuple[_T, _T]
68
+
69
+ class _TrimZerosSequence(Protocol[_T_co]):
70
+ def __len__(self) -> int: ...
71
+ def __getitem__(self, key: slice, /) -> _T_co: ...
72
+ def __iter__(self) -> Iterator[Any]: ...
73
+
74
+ class _SupportsWriteFlush(Protocol):
75
+ def write(self, s: str, /) -> object: ...
76
+ def flush(self) -> object: ...
77
+
78
+ __all__: list[str]
79
+
80
+ # NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc`
81
+ def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ...
82
+
83
+ @overload
84
+ def rot90(
85
+ m: _ArrayLike[_SCT],
86
+ k: int = ...,
87
+ axes: tuple[int, int] = ...,
88
+ ) -> NDArray[_SCT]: ...
89
+ @overload
90
+ def rot90(
91
+ m: ArrayLike,
92
+ k: int = ...,
93
+ axes: tuple[int, int] = ...,
94
+ ) -> NDArray[Any]: ...
95
+
96
+ @overload
97
+ def flip(m: _SCT, axis: None = ...) -> _SCT: ...
98
+ @overload
99
+ def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
100
+ @overload
101
+ def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
102
+ @overload
103
+ def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
104
+
105
+ def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
106
+
107
+ @overload
108
+ def average(
109
+ a: _ArrayLikeFloat_co,
110
+ axis: None = ...,
111
+ weights: None | _ArrayLikeFloat_co= ...,
112
+ returned: L[False] = ...,
113
+ keepdims: L[False] = ...,
114
+ ) -> floating[Any]: ...
115
+ @overload
116
+ def average(
117
+ a: _ArrayLikeComplex_co,
118
+ axis: None = ...,
119
+ weights: None | _ArrayLikeComplex_co = ...,
120
+ returned: L[False] = ...,
121
+ keepdims: L[False] = ...,
122
+ ) -> complexfloating[Any, Any]: ...
123
+ @overload
124
+ def average(
125
+ a: _ArrayLikeObject_co,
126
+ axis: None = ...,
127
+ weights: None | Any = ...,
128
+ returned: L[False] = ...,
129
+ keepdims: L[False] = ...,
130
+ ) -> Any: ...
131
+ @overload
132
+ def average(
133
+ a: _ArrayLikeFloat_co,
134
+ axis: None = ...,
135
+ weights: None | _ArrayLikeFloat_co= ...,
136
+ returned: L[True] = ...,
137
+ keepdims: L[False] = ...,
138
+ ) -> _2Tuple[floating[Any]]: ...
139
+ @overload
140
+ def average(
141
+ a: _ArrayLikeComplex_co,
142
+ axis: None = ...,
143
+ weights: None | _ArrayLikeComplex_co = ...,
144
+ returned: L[True] = ...,
145
+ keepdims: L[False] = ...,
146
+ ) -> _2Tuple[complexfloating[Any, Any]]: ...
147
+ @overload
148
+ def average(
149
+ a: _ArrayLikeObject_co,
150
+ axis: None = ...,
151
+ weights: None | Any = ...,
152
+ returned: L[True] = ...,
153
+ keepdims: L[False] = ...,
154
+ ) -> _2Tuple[Any]: ...
155
+ @overload
156
+ def average(
157
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
158
+ axis: None | _ShapeLike = ...,
159
+ weights: None | Any = ...,
160
+ returned: L[False] = ...,
161
+ keepdims: bool = ...,
162
+ ) -> Any: ...
163
+ @overload
164
+ def average(
165
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
166
+ axis: None | _ShapeLike = ...,
167
+ weights: None | Any = ...,
168
+ returned: L[True] = ...,
169
+ keepdims: bool = ...,
170
+ ) -> _2Tuple[Any]: ...
171
+
172
+ @overload
173
+ def asarray_chkfinite(
174
+ a: _ArrayLike[_SCT],
175
+ dtype: None = ...,
176
+ order: _OrderKACF = ...,
177
+ ) -> NDArray[_SCT]: ...
178
+ @overload
179
+ def asarray_chkfinite(
180
+ a: object,
181
+ dtype: None = ...,
182
+ order: _OrderKACF = ...,
183
+ ) -> NDArray[Any]: ...
184
+ @overload
185
+ def asarray_chkfinite(
186
+ a: Any,
187
+ dtype: _DTypeLike[_SCT],
188
+ order: _OrderKACF = ...,
189
+ ) -> NDArray[_SCT]: ...
190
+ @overload
191
+ def asarray_chkfinite(
192
+ a: Any,
193
+ dtype: DTypeLike,
194
+ order: _OrderKACF = ...,
195
+ ) -> NDArray[Any]: ...
196
+
197
+ # TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
198
+ # xref python/mypy#8645
199
+ @overload
200
+ def piecewise(
201
+ x: _ArrayLike[_SCT],
202
+ condlist: ArrayLike,
203
+ funclist: Sequence[Any | Callable[..., Any]],
204
+ *args: Any,
205
+ **kw: Any,
206
+ ) -> NDArray[_SCT]: ...
207
+ @overload
208
+ def piecewise(
209
+ x: ArrayLike,
210
+ condlist: ArrayLike,
211
+ funclist: Sequence[Any | Callable[..., Any]],
212
+ *args: Any,
213
+ **kw: Any,
214
+ ) -> NDArray[Any]: ...
215
+
216
+ def select(
217
+ condlist: Sequence[ArrayLike],
218
+ choicelist: Sequence[ArrayLike],
219
+ default: ArrayLike = ...,
220
+ ) -> NDArray[Any]: ...
221
+
222
+ @overload
223
+ def copy(
224
+ a: _ArrayType,
225
+ order: _OrderKACF,
226
+ subok: L[True],
227
+ ) -> _ArrayType: ...
228
+ @overload
229
+ def copy(
230
+ a: _ArrayType,
231
+ order: _OrderKACF = ...,
232
+ *,
233
+ subok: L[True],
234
+ ) -> _ArrayType: ...
235
+ @overload
236
+ def copy(
237
+ a: _ArrayLike[_SCT],
238
+ order: _OrderKACF = ...,
239
+ subok: L[False] = ...,
240
+ ) -> NDArray[_SCT]: ...
241
+ @overload
242
+ def copy(
243
+ a: ArrayLike,
244
+ order: _OrderKACF = ...,
245
+ subok: L[False] = ...,
246
+ ) -> NDArray[Any]: ...
247
+
248
+ def gradient(
249
+ f: ArrayLike,
250
+ *varargs: ArrayLike,
251
+ axis: None | _ShapeLike = ...,
252
+ edge_order: L[1, 2] = ...,
253
+ ) -> Any: ...
254
+
255
+ @overload
256
+ def diff(
257
+ a: _T,
258
+ n: L[0],
259
+ axis: SupportsIndex = ...,
260
+ prepend: ArrayLike = ...,
261
+ append: ArrayLike = ...,
262
+ ) -> _T: ...
263
+ @overload
264
+ def diff(
265
+ a: ArrayLike,
266
+ n: int = ...,
267
+ axis: SupportsIndex = ...,
268
+ prepend: ArrayLike = ...,
269
+ append: ArrayLike = ...,
270
+ ) -> NDArray[Any]: ...
271
+
272
+ @overload
273
+ def interp(
274
+ x: _ArrayLikeFloat_co,
275
+ xp: _ArrayLikeFloat_co,
276
+ fp: _ArrayLikeFloat_co,
277
+ left: None | _FloatLike_co = ...,
278
+ right: None | _FloatLike_co = ...,
279
+ period: None | _FloatLike_co = ...,
280
+ ) -> NDArray[float64]: ...
281
+ @overload
282
+ def interp(
283
+ x: _ArrayLikeFloat_co,
284
+ xp: _ArrayLikeFloat_co,
285
+ fp: _ArrayLikeComplex_co,
286
+ left: None | _ComplexLike_co = ...,
287
+ right: None | _ComplexLike_co = ...,
288
+ period: None | _FloatLike_co = ...,
289
+ ) -> NDArray[complex128]: ...
290
+
291
+ @overload
292
+ def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ...
293
+ @overload
294
+ def angle(z: object_, deg: bool = ...) -> Any: ...
295
+ @overload
296
+ def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ...
297
+ @overload
298
+ def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
299
+
300
+ @overload
301
+ def unwrap(
302
+ p: _ArrayLikeFloat_co,
303
+ discont: None | float = ...,
304
+ axis: int = ...,
305
+ *,
306
+ period: float = ...,
307
+ ) -> NDArray[floating[Any]]: ...
308
+ @overload
309
+ def unwrap(
310
+ p: _ArrayLikeObject_co,
311
+ discont: None | float = ...,
312
+ axis: int = ...,
313
+ *,
314
+ period: float = ...,
315
+ ) -> NDArray[object_]: ...
316
+
317
+ def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
318
+
319
+ def trim_zeros(
320
+ filt: _TrimZerosSequence[_T],
321
+ trim: L["f", "b", "fb", "bf"] = ...,
322
+ ) -> _T: ...
323
+
324
+ @overload
325
+ def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
326
+ @overload
327
+ def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
328
+
329
+ def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
330
+
331
+ def disp(
332
+ mesg: object,
333
+ device: None | _SupportsWriteFlush = ...,
334
+ linefeed: bool = ...,
335
+ ) -> None: ...
336
+
337
+ @overload
338
+ def cov(
339
+ m: _ArrayLikeFloat_co,
340
+ y: None | _ArrayLikeFloat_co = ...,
341
+ rowvar: bool = ...,
342
+ bias: bool = ...,
343
+ ddof: None | SupportsIndex | SupportsInt = ...,
344
+ fweights: None | ArrayLike = ...,
345
+ aweights: None | ArrayLike = ...,
346
+ *,
347
+ dtype: None = ...,
348
+ ) -> NDArray[floating[Any]]: ...
349
+ @overload
350
+ def cov(
351
+ m: _ArrayLikeComplex_co,
352
+ y: None | _ArrayLikeComplex_co = ...,
353
+ rowvar: bool = ...,
354
+ bias: bool = ...,
355
+ ddof: None | SupportsIndex | SupportsInt = ...,
356
+ fweights: None | ArrayLike = ...,
357
+ aweights: None | ArrayLike = ...,
358
+ *,
359
+ dtype: None = ...,
360
+ ) -> NDArray[complexfloating[Any, Any]]: ...
361
+ @overload
362
+ def cov(
363
+ m: _ArrayLikeComplex_co,
364
+ y: None | _ArrayLikeComplex_co = ...,
365
+ rowvar: bool = ...,
366
+ bias: bool = ...,
367
+ ddof: None | SupportsIndex | SupportsInt = ...,
368
+ fweights: None | ArrayLike = ...,
369
+ aweights: None | ArrayLike = ...,
370
+ *,
371
+ dtype: _DTypeLike[_SCT],
372
+ ) -> NDArray[_SCT]: ...
373
+ @overload
374
+ def cov(
375
+ m: _ArrayLikeComplex_co,
376
+ y: None | _ArrayLikeComplex_co = ...,
377
+ rowvar: bool = ...,
378
+ bias: bool = ...,
379
+ ddof: None | SupportsIndex | SupportsInt = ...,
380
+ fweights: None | ArrayLike = ...,
381
+ aweights: None | ArrayLike = ...,
382
+ *,
383
+ dtype: DTypeLike,
384
+ ) -> NDArray[Any]: ...
385
+
386
+ # NOTE `bias` and `ddof` have been deprecated
387
+ @overload
388
+ def corrcoef(
389
+ m: _ArrayLikeFloat_co,
390
+ y: None | _ArrayLikeFloat_co = ...,
391
+ rowvar: bool = ...,
392
+ *,
393
+ dtype: None = ...,
394
+ ) -> NDArray[floating[Any]]: ...
395
+ @overload
396
+ def corrcoef(
397
+ m: _ArrayLikeComplex_co,
398
+ y: None | _ArrayLikeComplex_co = ...,
399
+ rowvar: bool = ...,
400
+ *,
401
+ dtype: None = ...,
402
+ ) -> NDArray[complexfloating[Any, Any]]: ...
403
+ @overload
404
+ def corrcoef(
405
+ m: _ArrayLikeComplex_co,
406
+ y: None | _ArrayLikeComplex_co = ...,
407
+ rowvar: bool = ...,
408
+ *,
409
+ dtype: _DTypeLike[_SCT],
410
+ ) -> NDArray[_SCT]: ...
411
+ @overload
412
+ def corrcoef(
413
+ m: _ArrayLikeComplex_co,
414
+ y: None | _ArrayLikeComplex_co = ...,
415
+ rowvar: bool = ...,
416
+ *,
417
+ dtype: DTypeLike,
418
+ ) -> NDArray[Any]: ...
419
+
420
+ def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
421
+
422
+ def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
423
+
424
+ def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
425
+
426
+ def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
427
+
428
+ def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
429
+
430
+ def kaiser(
431
+ M: _FloatLike_co,
432
+ beta: _FloatLike_co,
433
+ ) -> NDArray[floating[Any]]: ...
434
+
435
+ @overload
436
+ def sinc(x: _FloatLike_co) -> floating[Any]: ...
437
+ @overload
438
+ def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
439
+ @overload
440
+ def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
441
+ @overload
442
+ def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
443
+
444
+ # NOTE: Deprecated
445
+ # def msort(a: ArrayLike) -> NDArray[Any]: ...
446
+
447
+ @overload
448
+ def median(
449
+ a: _ArrayLikeFloat_co,
450
+ axis: None = ...,
451
+ out: None = ...,
452
+ overwrite_input: bool = ...,
453
+ keepdims: L[False] = ...,
454
+ ) -> floating[Any]: ...
455
+ @overload
456
+ def median(
457
+ a: _ArrayLikeComplex_co,
458
+ axis: None = ...,
459
+ out: None = ...,
460
+ overwrite_input: bool = ...,
461
+ keepdims: L[False] = ...,
462
+ ) -> complexfloating[Any, Any]: ...
463
+ @overload
464
+ def median(
465
+ a: _ArrayLikeTD64_co,
466
+ axis: None = ...,
467
+ out: None = ...,
468
+ overwrite_input: bool = ...,
469
+ keepdims: L[False] = ...,
470
+ ) -> timedelta64: ...
471
+ @overload
472
+ def median(
473
+ a: _ArrayLikeObject_co,
474
+ axis: None = ...,
475
+ out: None = ...,
476
+ overwrite_input: bool = ...,
477
+ keepdims: L[False] = ...,
478
+ ) -> Any: ...
479
+ @overload
480
+ def median(
481
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
482
+ axis: None | _ShapeLike = ...,
483
+ out: None = ...,
484
+ overwrite_input: bool = ...,
485
+ keepdims: bool = ...,
486
+ ) -> Any: ...
487
+ @overload
488
+ def median(
489
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
490
+ axis: None | _ShapeLike = ...,
491
+ out: _ArrayType = ...,
492
+ overwrite_input: bool = ...,
493
+ keepdims: bool = ...,
494
+ ) -> _ArrayType: ...
495
+
496
+ _MethodKind = L[
497
+ "inverted_cdf",
498
+ "averaged_inverted_cdf",
499
+ "closest_observation",
500
+ "interpolated_inverted_cdf",
501
+ "hazen",
502
+ "weibull",
503
+ "linear",
504
+ "median_unbiased",
505
+ "normal_unbiased",
506
+ "lower",
507
+ "higher",
508
+ "midpoint",
509
+ "nearest",
510
+ ]
511
+
512
+ @overload
513
+ def percentile(
514
+ a: _ArrayLikeFloat_co,
515
+ q: _FloatLike_co,
516
+ axis: None = ...,
517
+ out: None = ...,
518
+ overwrite_input: bool = ...,
519
+ method: _MethodKind = ...,
520
+ keepdims: L[False] = ...,
521
+ ) -> floating[Any]: ...
522
+ @overload
523
+ def percentile(
524
+ a: _ArrayLikeComplex_co,
525
+ q: _FloatLike_co,
526
+ axis: None = ...,
527
+ out: None = ...,
528
+ overwrite_input: bool = ...,
529
+ method: _MethodKind = ...,
530
+ keepdims: L[False] = ...,
531
+ ) -> complexfloating[Any, Any]: ...
532
+ @overload
533
+ def percentile(
534
+ a: _ArrayLikeTD64_co,
535
+ q: _FloatLike_co,
536
+ axis: None = ...,
537
+ out: None = ...,
538
+ overwrite_input: bool = ...,
539
+ method: _MethodKind = ...,
540
+ keepdims: L[False] = ...,
541
+ ) -> timedelta64: ...
542
+ @overload
543
+ def percentile(
544
+ a: _ArrayLikeDT64_co,
545
+ q: _FloatLike_co,
546
+ axis: None = ...,
547
+ out: None = ...,
548
+ overwrite_input: bool = ...,
549
+ method: _MethodKind = ...,
550
+ keepdims: L[False] = ...,
551
+ ) -> datetime64: ...
552
+ @overload
553
+ def percentile(
554
+ a: _ArrayLikeObject_co,
555
+ q: _FloatLike_co,
556
+ axis: None = ...,
557
+ out: None = ...,
558
+ overwrite_input: bool = ...,
559
+ method: _MethodKind = ...,
560
+ keepdims: L[False] = ...,
561
+ ) -> Any: ...
562
+ @overload
563
+ def percentile(
564
+ a: _ArrayLikeFloat_co,
565
+ q: _ArrayLikeFloat_co,
566
+ axis: None = ...,
567
+ out: None = ...,
568
+ overwrite_input: bool = ...,
569
+ method: _MethodKind = ...,
570
+ keepdims: L[False] = ...,
571
+ ) -> NDArray[floating[Any]]: ...
572
+ @overload
573
+ def percentile(
574
+ a: _ArrayLikeComplex_co,
575
+ q: _ArrayLikeFloat_co,
576
+ axis: None = ...,
577
+ out: None = ...,
578
+ overwrite_input: bool = ...,
579
+ method: _MethodKind = ...,
580
+ keepdims: L[False] = ...,
581
+ ) -> NDArray[complexfloating[Any, Any]]: ...
582
+ @overload
583
+ def percentile(
584
+ a: _ArrayLikeTD64_co,
585
+ q: _ArrayLikeFloat_co,
586
+ axis: None = ...,
587
+ out: None = ...,
588
+ overwrite_input: bool = ...,
589
+ method: _MethodKind = ...,
590
+ keepdims: L[False] = ...,
591
+ ) -> NDArray[timedelta64]: ...
592
+ @overload
593
+ def percentile(
594
+ a: _ArrayLikeDT64_co,
595
+ q: _ArrayLikeFloat_co,
596
+ axis: None = ...,
597
+ out: None = ...,
598
+ overwrite_input: bool = ...,
599
+ method: _MethodKind = ...,
600
+ keepdims: L[False] = ...,
601
+ ) -> NDArray[datetime64]: ...
602
+ @overload
603
+ def percentile(
604
+ a: _ArrayLikeObject_co,
605
+ q: _ArrayLikeFloat_co,
606
+ axis: None = ...,
607
+ out: None = ...,
608
+ overwrite_input: bool = ...,
609
+ method: _MethodKind = ...,
610
+ keepdims: L[False] = ...,
611
+ ) -> NDArray[object_]: ...
612
+ @overload
613
+ def percentile(
614
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
615
+ q: _ArrayLikeFloat_co,
616
+ axis: None | _ShapeLike = ...,
617
+ out: None = ...,
618
+ overwrite_input: bool = ...,
619
+ method: _MethodKind = ...,
620
+ keepdims: bool = ...,
621
+ ) -> Any: ...
622
+ @overload
623
+ def percentile(
624
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
625
+ q: _ArrayLikeFloat_co,
626
+ axis: None | _ShapeLike = ...,
627
+ out: _ArrayType = ...,
628
+ overwrite_input: bool = ...,
629
+ method: _MethodKind = ...,
630
+ keepdims: bool = ...,
631
+ ) -> _ArrayType: ...
632
+
633
+ # NOTE: Not an alias, but they do have identical signatures
634
+ # (that we can reuse)
635
+ quantile = percentile
636
+
637
+ # TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise
638
+ def trapz(
639
+ y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
640
+ x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ...,
641
+ dx: float = ...,
642
+ axis: SupportsIndex = ...,
643
+ ) -> Any: ...
644
+
645
+ def meshgrid(
646
+ *xi: ArrayLike,
647
+ copy: bool = ...,
648
+ sparse: bool = ...,
649
+ indexing: L["xy", "ij"] = ...,
650
+ ) -> list[NDArray[Any]]: ...
651
+
652
+ @overload
653
+ def delete(
654
+ arr: _ArrayLike[_SCT],
655
+ obj: slice | _ArrayLikeInt_co,
656
+ axis: None | SupportsIndex = ...,
657
+ ) -> NDArray[_SCT]: ...
658
+ @overload
659
+ def delete(
660
+ arr: ArrayLike,
661
+ obj: slice | _ArrayLikeInt_co,
662
+ axis: None | SupportsIndex = ...,
663
+ ) -> NDArray[Any]: ...
664
+
665
+ @overload
666
+ def insert(
667
+ arr: _ArrayLike[_SCT],
668
+ obj: slice | _ArrayLikeInt_co,
669
+ values: ArrayLike,
670
+ axis: None | SupportsIndex = ...,
671
+ ) -> NDArray[_SCT]: ...
672
+ @overload
673
+ def insert(
674
+ arr: ArrayLike,
675
+ obj: slice | _ArrayLikeInt_co,
676
+ values: ArrayLike,
677
+ axis: None | SupportsIndex = ...,
678
+ ) -> NDArray[Any]: ...
679
+
680
+ def append(
681
+ arr: ArrayLike,
682
+ values: ArrayLike,
683
+ axis: None | SupportsIndex = ...,
684
+ ) -> NDArray[Any]: ...
685
+
686
+ @overload
687
+ def digitize(
688
+ x: _FloatLike_co,
689
+ bins: _ArrayLikeFloat_co,
690
+ right: bool = ...,
691
+ ) -> intp: ...
692
+ @overload
693
+ def digitize(
694
+ x: _ArrayLikeFloat_co,
695
+ bins: _ArrayLikeFloat_co,
696
+ right: bool = ...,
697
+ ) -> NDArray[intp]: ...
.venv/lib/python3.11/site-packages/numpy/lib/histograms.py ADDED
@@ -0,0 +1,1072 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Histogram-related functions
3
+ """
4
+ import contextlib
5
+ import functools
6
+ import operator
7
+ import warnings
8
+
9
+ import numpy as np
10
+ from numpy.core import overrides
11
+
12
+ __all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
13
+
14
+ array_function_dispatch = functools.partial(
15
+ overrides.array_function_dispatch, module='numpy')
16
+
17
+ # range is a keyword argument to many functions, so save the builtin so they can
18
+ # use it.
19
+ _range = range
20
+
21
+
22
+ def _ptp(x):
23
+ """Peak-to-peak value of x.
24
+
25
+ This implementation avoids the problem of signed integer arrays having a
26
+ peak-to-peak value that cannot be represented with the array's data type.
27
+ This function returns an unsigned value for signed integer arrays.
28
+ """
29
+ return _unsigned_subtract(x.max(), x.min())
30
+
31
+
32
+ def _hist_bin_sqrt(x, range):
33
+ """
34
+ Square root histogram bin estimator.
35
+
36
+ Bin width is inversely proportional to the data size. Used by many
37
+ programs for its simplicity.
38
+
39
+ Parameters
40
+ ----------
41
+ x : array_like
42
+ Input data that is to be histogrammed, trimmed to range. May not
43
+ be empty.
44
+
45
+ Returns
46
+ -------
47
+ h : An estimate of the optimal bin width for the given data.
48
+ """
49
+ del range # unused
50
+ return _ptp(x) / np.sqrt(x.size)
51
+
52
+
53
+ def _hist_bin_sturges(x, range):
54
+ """
55
+ Sturges histogram bin estimator.
56
+
57
+ A very simplistic estimator based on the assumption of normality of
58
+ the data. This estimator has poor performance for non-normal data,
59
+ which becomes especially obvious for large data sets. The estimate
60
+ depends only on size of the data.
61
+
62
+ Parameters
63
+ ----------
64
+ x : array_like
65
+ Input data that is to be histogrammed, trimmed to range. May not
66
+ be empty.
67
+
68
+ Returns
69
+ -------
70
+ h : An estimate of the optimal bin width for the given data.
71
+ """
72
+ del range # unused
73
+ return _ptp(x) / (np.log2(x.size) + 1.0)
74
+
75
+
76
+ def _hist_bin_rice(x, range):
77
+ """
78
+ Rice histogram bin estimator.
79
+
80
+ Another simple estimator with no normality assumption. It has better
81
+ performance for large data than Sturges, but tends to overestimate
82
+ the number of bins. The number of bins is proportional to the cube
83
+ root of data size (asymptotically optimal). The estimate depends
84
+ only on size of the data.
85
+
86
+ Parameters
87
+ ----------
88
+ x : array_like
89
+ Input data that is to be histogrammed, trimmed to range. May not
90
+ be empty.
91
+
92
+ Returns
93
+ -------
94
+ h : An estimate of the optimal bin width for the given data.
95
+ """
96
+ del range # unused
97
+ return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
98
+
99
+
100
+ def _hist_bin_scott(x, range):
101
+ """
102
+ Scott histogram bin estimator.
103
+
104
+ The binwidth is proportional to the standard deviation of the data
105
+ and inversely proportional to the cube root of data size
106
+ (asymptotically optimal).
107
+
108
+ Parameters
109
+ ----------
110
+ x : array_like
111
+ Input data that is to be histogrammed, trimmed to range. May not
112
+ be empty.
113
+
114
+ Returns
115
+ -------
116
+ h : An estimate of the optimal bin width for the given data.
117
+ """
118
+ del range # unused
119
+ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
120
+
121
+
122
+ def _hist_bin_stone(x, range):
123
+ """
124
+ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
125
+
126
+ The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
127
+ The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
128
+ https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
129
+
130
+ This paper by Stone appears to be the origination of this rule.
131
+ http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
132
+
133
+ Parameters
134
+ ----------
135
+ x : array_like
136
+ Input data that is to be histogrammed, trimmed to range. May not
137
+ be empty.
138
+ range : (float, float)
139
+ The lower and upper range of the bins.
140
+
141
+ Returns
142
+ -------
143
+ h : An estimate of the optimal bin width for the given data.
144
+ """
145
+
146
+ n = x.size
147
+ ptp_x = _ptp(x)
148
+ if n <= 1 or ptp_x == 0:
149
+ return 0
150
+
151
+ def jhat(nbins):
152
+ hh = ptp_x / nbins
153
+ p_k = np.histogram(x, bins=nbins, range=range)[0] / n
154
+ return (2 - (n + 1) * p_k.dot(p_k)) / hh
155
+
156
+ nbins_upper_bound = max(100, int(np.sqrt(n)))
157
+ nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
158
+ if nbins == nbins_upper_bound:
159
+ warnings.warn("The number of bins estimated may be suboptimal.",
160
+ RuntimeWarning, stacklevel=3)
161
+ return ptp_x / nbins
162
+
163
+
164
+ def _hist_bin_doane(x, range):
165
+ """
166
+ Doane's histogram bin estimator.
167
+
168
+ Improved version of Sturges' formula which works better for
169
+ non-normal data. See
170
+ stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
171
+
172
+ Parameters
173
+ ----------
174
+ x : array_like
175
+ Input data that is to be histogrammed, trimmed to range. May not
176
+ be empty.
177
+
178
+ Returns
179
+ -------
180
+ h : An estimate of the optimal bin width for the given data.
181
+ """
182
+ del range # unused
183
+ if x.size > 2:
184
+ sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
185
+ sigma = np.std(x)
186
+ if sigma > 0.0:
187
+ # These three operations add up to
188
+ # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
189
+ # but use only one temp array instead of three
190
+ temp = x - np.mean(x)
191
+ np.true_divide(temp, sigma, temp)
192
+ np.power(temp, 3, temp)
193
+ g1 = np.mean(temp)
194
+ return _ptp(x) / (1.0 + np.log2(x.size) +
195
+ np.log2(1.0 + np.absolute(g1) / sg1))
196
+ return 0.0
197
+
198
+
199
+ def _hist_bin_fd(x, range):
200
+ """
201
+ The Freedman-Diaconis histogram bin estimator.
202
+
203
+ The Freedman-Diaconis rule uses interquartile range (IQR) to
204
+ estimate binwidth. It is considered a variation of the Scott rule
205
+ with more robustness as the IQR is less affected by outliers than
206
+ the standard deviation. However, the IQR depends on fewer points
207
+ than the standard deviation, so it is less accurate, especially for
208
+ long tailed distributions.
209
+
210
+ If the IQR is 0, this function returns 0 for the bin width.
211
+ Binwidth is inversely proportional to the cube root of data size
212
+ (asymptotically optimal).
213
+
214
+ Parameters
215
+ ----------
216
+ x : array_like
217
+ Input data that is to be histogrammed, trimmed to range. May not
218
+ be empty.
219
+
220
+ Returns
221
+ -------
222
+ h : An estimate of the optimal bin width for the given data.
223
+ """
224
+ del range # unused
225
+ iqr = np.subtract(*np.percentile(x, [75, 25]))
226
+ return 2.0 * iqr * x.size ** (-1.0 / 3.0)
227
+
228
+
229
+ def _hist_bin_auto(x, range):
230
+ """
231
+ Histogram bin estimator that uses the minimum width of the
232
+ Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
233
+ If the bin width from the FD estimator is 0, the Sturges estimator is used.
234
+
235
+ The FD estimator is usually the most robust method, but its width
236
+ estimate tends to be too large for small `x` and bad for data with limited
237
+ variance. The Sturges estimator is quite good for small (<1000) datasets
238
+ and is the default in the R language. This method gives good off-the-shelf
239
+ behaviour.
240
+
241
+ .. versionchanged:: 1.15.0
242
+ If there is limited variance the IQR can be 0, which results in the
243
+ FD bin width being 0 too. This is not a valid bin width, so
244
+ ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
245
+ If the IQR is 0, it's unlikely any variance-based estimators will be of
246
+ use, so we revert to the Sturges estimator, which only uses the size of the
247
+ dataset in its calculation.
248
+
249
+ Parameters
250
+ ----------
251
+ x : array_like
252
+ Input data that is to be histogrammed, trimmed to range. May not
253
+ be empty.
254
+
255
+ Returns
256
+ -------
257
+ h : An estimate of the optimal bin width for the given data.
258
+
259
+ See Also
260
+ --------
261
+ _hist_bin_fd, _hist_bin_sturges
262
+ """
263
+ fd_bw = _hist_bin_fd(x, range)
264
+ sturges_bw = _hist_bin_sturges(x, range)
265
+ del range # unused
266
+ if fd_bw:
267
+ return min(fd_bw, sturges_bw)
268
+ else:
269
+ # limited variance, so we return a len dependent bw estimator
270
+ return sturges_bw
271
+
272
+ # Private dict initialized at module load time
273
+ _hist_bin_selectors = {'stone': _hist_bin_stone,
274
+ 'auto': _hist_bin_auto,
275
+ 'doane': _hist_bin_doane,
276
+ 'fd': _hist_bin_fd,
277
+ 'rice': _hist_bin_rice,
278
+ 'scott': _hist_bin_scott,
279
+ 'sqrt': _hist_bin_sqrt,
280
+ 'sturges': _hist_bin_sturges}
281
+
282
+
283
+ def _ravel_and_check_weights(a, weights):
284
+ """ Check a and weights have matching shapes, and ravel both """
285
+ a = np.asarray(a)
286
+
287
+ # Ensure that the array is a "subtractable" dtype
288
+ if a.dtype == np.bool_:
289
+ warnings.warn("Converting input from {} to {} for compatibility."
290
+ .format(a.dtype, np.uint8),
291
+ RuntimeWarning, stacklevel=3)
292
+ a = a.astype(np.uint8)
293
+
294
+ if weights is not None:
295
+ weights = np.asarray(weights)
296
+ if weights.shape != a.shape:
297
+ raise ValueError(
298
+ 'weights should have the same shape as a.')
299
+ weights = weights.ravel()
300
+ a = a.ravel()
301
+ return a, weights
302
+
303
+
304
+ def _get_outer_edges(a, range):
305
+ """
306
+ Determine the outer bin edges to use, from either the data or the range
307
+ argument
308
+ """
309
+ if range is not None:
310
+ first_edge, last_edge = range
311
+ if first_edge > last_edge:
312
+ raise ValueError(
313
+ 'max must be larger than min in range parameter.')
314
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
315
+ raise ValueError(
316
+ "supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
317
+ elif a.size == 0:
318
+ # handle empty arrays. Can't determine range, so use 0-1.
319
+ first_edge, last_edge = 0, 1
320
+ else:
321
+ first_edge, last_edge = a.min(), a.max()
322
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
323
+ raise ValueError(
324
+ "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
325
+
326
+ # expand empty range to avoid divide by zero
327
+ if first_edge == last_edge:
328
+ first_edge = first_edge - 0.5
329
+ last_edge = last_edge + 0.5
330
+
331
+ return first_edge, last_edge
332
+
333
+
334
+ def _unsigned_subtract(a, b):
335
+ """
336
+ Subtract two values where a >= b, and produce an unsigned result
337
+
338
+ This is needed when finding the difference between the upper and lower
339
+ bound of an int16 histogram
340
+ """
341
+ # coerce to a single type
342
+ signed_to_unsigned = {
343
+ np.byte: np.ubyte,
344
+ np.short: np.ushort,
345
+ np.intc: np.uintc,
346
+ np.int_: np.uint,
347
+ np.longlong: np.ulonglong
348
+ }
349
+ dt = np.result_type(a, b)
350
+ try:
351
+ dt = signed_to_unsigned[dt.type]
352
+ except KeyError:
353
+ return np.subtract(a, b, dtype=dt)
354
+ else:
355
+ # we know the inputs are integers, and we are deliberately casting
356
+ # signed to unsigned
357
+ return np.subtract(a, b, casting='unsafe', dtype=dt)
358
+
359
+
360
+ def _get_bin_edges(a, bins, range, weights):
361
+ """
362
+ Computes the bins used internally by `histogram`.
363
+
364
+ Parameters
365
+ ==========
366
+ a : ndarray
367
+ Ravelled data array
368
+ bins, range
369
+ Forwarded arguments from `histogram`.
370
+ weights : ndarray, optional
371
+ Ravelled weights array, or None
372
+
373
+ Returns
374
+ =======
375
+ bin_edges : ndarray
376
+ Array of bin edges
377
+ uniform_bins : (Number, Number, int):
378
+ The upper bound, lowerbound, and number of bins, used in the optimized
379
+ implementation of `histogram` that works on uniform bins.
380
+ """
381
+ # parse the overloaded bins argument
382
+ n_equal_bins = None
383
+ bin_edges = None
384
+
385
+ if isinstance(bins, str):
386
+ bin_name = bins
387
+ # if `bins` is a string for an automatic method,
388
+ # this will replace it with the number of bins calculated
389
+ if bin_name not in _hist_bin_selectors:
390
+ raise ValueError(
391
+ "{!r} is not a valid estimator for `bins`".format(bin_name))
392
+ if weights is not None:
393
+ raise TypeError("Automated estimation of the number of "
394
+ "bins is not supported for weighted data")
395
+
396
+ first_edge, last_edge = _get_outer_edges(a, range)
397
+
398
+ # truncate the range if needed
399
+ if range is not None:
400
+ keep = (a >= first_edge)
401
+ keep &= (a <= last_edge)
402
+ if not np.logical_and.reduce(keep):
403
+ a = a[keep]
404
+
405
+ if a.size == 0:
406
+ n_equal_bins = 1
407
+ else:
408
+ # Do not call selectors on empty arrays
409
+ width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
410
+ if width:
411
+ n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
412
+ else:
413
+ # Width can be zero for some estimators, e.g. FD when
414
+ # the IQR of the data is zero.
415
+ n_equal_bins = 1
416
+
417
+ elif np.ndim(bins) == 0:
418
+ try:
419
+ n_equal_bins = operator.index(bins)
420
+ except TypeError as e:
421
+ raise TypeError(
422
+ '`bins` must be an integer, a string, or an array') from e
423
+ if n_equal_bins < 1:
424
+ raise ValueError('`bins` must be positive, when an integer')
425
+
426
+ first_edge, last_edge = _get_outer_edges(a, range)
427
+
428
+ elif np.ndim(bins) == 1:
429
+ bin_edges = np.asarray(bins)
430
+ if np.any(bin_edges[:-1] > bin_edges[1:]):
431
+ raise ValueError(
432
+ '`bins` must increase monotonically, when an array')
433
+
434
+ else:
435
+ raise ValueError('`bins` must be 1d, when an array')
436
+
437
+ if n_equal_bins is not None:
438
+ # gh-10322 means that type resolution rules are dependent on array
439
+ # shapes. To avoid this causing problems, we pick a type now and stick
440
+ # with it throughout.
441
+ bin_type = np.result_type(first_edge, last_edge, a)
442
+ if np.issubdtype(bin_type, np.integer):
443
+ bin_type = np.result_type(bin_type, float)
444
+
445
+ # bin edges must be computed
446
+ bin_edges = np.linspace(
447
+ first_edge, last_edge, n_equal_bins + 1,
448
+ endpoint=True, dtype=bin_type)
449
+ return bin_edges, (first_edge, last_edge, n_equal_bins)
450
+ else:
451
+ return bin_edges, None
452
+
453
+
454
+ def _search_sorted_inclusive(a, v):
455
+ """
456
+ Like `searchsorted`, but where the last item in `v` is placed on the right.
457
+
458
+ In the context of a histogram, this makes the last bin edge inclusive
459
+ """
460
+ return np.concatenate((
461
+ a.searchsorted(v[:-1], 'left'),
462
+ a.searchsorted(v[-1:], 'right')
463
+ ))
464
+
465
+
466
+ def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
467
+ return (a, bins, weights)
468
+
469
+
470
+ @array_function_dispatch(_histogram_bin_edges_dispatcher)
471
+ def histogram_bin_edges(a, bins=10, range=None, weights=None):
472
+ r"""
473
+ Function to calculate only the edges of the bins used by the `histogram`
474
+ function.
475
+
476
+ Parameters
477
+ ----------
478
+ a : array_like
479
+ Input data. The histogram is computed over the flattened array.
480
+ bins : int or sequence of scalars or str, optional
481
+ If `bins` is an int, it defines the number of equal-width
482
+ bins in the given range (10, by default). If `bins` is a
483
+ sequence, it defines the bin edges, including the rightmost
484
+ edge, allowing for non-uniform bin widths.
485
+
486
+ If `bins` is a string from the list below, `histogram_bin_edges` will use
487
+ the method chosen to calculate the optimal bin width and
488
+ consequently the number of bins (see `Notes` for more detail on
489
+ the estimators) from the data that falls within the requested
490
+ range. While the bin width will be optimal for the actual data
491
+ in the range, the number of bins will be computed to fill the
492
+ entire range, including the empty portions. For visualisation,
493
+ using the 'auto' option is suggested. Weighted data is not
494
+ supported for automated bin size selection.
495
+
496
+ 'auto'
497
+ Maximum of the 'sturges' and 'fd' estimators. Provides good
498
+ all around performance.
499
+
500
+ 'fd' (Freedman Diaconis Estimator)
501
+ Robust (resilient to outliers) estimator that takes into
502
+ account data variability and data size.
503
+
504
+ 'doane'
505
+ An improved version of Sturges' estimator that works better
506
+ with non-normal datasets.
507
+
508
+ 'scott'
509
+ Less robust estimator that takes into account data variability
510
+ and data size.
511
+
512
+ 'stone'
513
+ Estimator based on leave-one-out cross-validation estimate of
514
+ the integrated squared error. Can be regarded as a generalization
515
+ of Scott's rule.
516
+
517
+ 'rice'
518
+ Estimator does not take variability into account, only data
519
+ size. Commonly overestimates number of bins required.
520
+
521
+ 'sturges'
522
+ R's default method, only accounts for data size. Only
523
+ optimal for gaussian data and underestimates number of bins
524
+ for large non-gaussian datasets.
525
+
526
+ 'sqrt'
527
+ Square root (of data size) estimator, used by Excel and
528
+ other programs for its speed and simplicity.
529
+
530
+ range : (float, float), optional
531
+ The lower and upper range of the bins. If not provided, range
532
+ is simply ``(a.min(), a.max())``. Values outside the range are
533
+ ignored. The first element of the range must be less than or
534
+ equal to the second. `range` affects the automatic bin
535
+ computation as well. While bin width is computed to be optimal
536
+ based on the actual data within `range`, the bin count will fill
537
+ the entire range including portions containing no data.
538
+
539
+ weights : array_like, optional
540
+ An array of weights, of the same shape as `a`. Each value in
541
+ `a` only contributes its associated weight towards the bin count
542
+ (instead of 1). This is currently not used by any of the bin estimators,
543
+ but may be in the future.
544
+
545
+ Returns
546
+ -------
547
+ bin_edges : array of dtype float
548
+ The edges to pass into `histogram`
549
+
550
+ See Also
551
+ --------
552
+ histogram
553
+
554
+ Notes
555
+ -----
556
+ The methods to estimate the optimal number of bins are well founded
557
+ in literature, and are inspired by the choices R provides for
558
+ histogram visualisation. Note that having the number of bins
559
+ proportional to :math:`n^{1/3}` is asymptotically optimal, which is
560
+ why it appears in most estimators. These are simply plug-in methods
561
+ that give good starting points for number of bins. In the equations
562
+ below, :math:`h` is the binwidth and :math:`n_h` is the number of
563
+ bins. All estimators that compute bin counts are recast to bin width
564
+ using the `ptp` of the data. The final bin count is obtained from
565
+ ``np.round(np.ceil(range / h))``. The final bin width is often less
566
+ than what is returned by the estimators below.
567
+
568
+ 'auto' (maximum of the 'sturges' and 'fd' estimators)
569
+ A compromise to get a good value. For small datasets the Sturges
570
+ value will usually be chosen, while larger datasets will usually
571
+ default to FD. Avoids the overly conservative behaviour of FD
572
+ and Sturges for small and large datasets respectively.
573
+ Switchover point is usually :math:`a.size \approx 1000`.
574
+
575
+ 'fd' (Freedman Diaconis Estimator)
576
+ .. math:: h = 2 \frac{IQR}{n^{1/3}}
577
+
578
+ The binwidth is proportional to the interquartile range (IQR)
579
+ and inversely proportional to cube root of a.size. Can be too
580
+ conservative for small datasets, but is quite good for large
581
+ datasets. The IQR is very robust to outliers.
582
+
583
+ 'scott'
584
+ .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
585
+
586
+ The binwidth is proportional to the standard deviation of the
587
+ data and inversely proportional to cube root of ``x.size``. Can
588
+ be too conservative for small datasets, but is quite good for
589
+ large datasets. The standard deviation is not very robust to
590
+ outliers. Values are very similar to the Freedman-Diaconis
591
+ estimator in the absence of outliers.
592
+
593
+ 'rice'
594
+ .. math:: n_h = 2n^{1/3}
595
+
596
+ The number of bins is only proportional to cube root of
597
+ ``a.size``. It tends to overestimate the number of bins and it
598
+ does not take into account data variability.
599
+
600
+ 'sturges'
601
+ .. math:: n_h = \log _{2}(n) + 1
602
+
603
+ The number of bins is the base 2 log of ``a.size``. This
604
+ estimator assumes normality of data and is too conservative for
605
+ larger, non-normal datasets. This is the default method in R's
606
+ ``hist`` method.
607
+
608
+ 'doane'
609
+ .. math:: n_h = 1 + \log_{2}(n) +
610
+ \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
611
+
612
+ g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
613
+
614
+ \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
615
+
616
+ An improved version of Sturges' formula that produces better
617
+ estimates for non-normal datasets. This estimator attempts to
618
+ account for the skew of the data.
619
+
620
+ 'sqrt'
621
+ .. math:: n_h = \sqrt n
622
+
623
+ The simplest and fastest estimator. Only takes into account the
624
+ data size.
625
+
626
+ Examples
627
+ --------
628
+ >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
629
+ >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
630
+ array([0. , 0.25, 0.5 , 0.75, 1. ])
631
+ >>> np.histogram_bin_edges(arr, bins=2)
632
+ array([0. , 2.5, 5. ])
633
+
634
+ For consistency with histogram, an array of pre-computed bins is
635
+ passed through unmodified:
636
+
637
+ >>> np.histogram_bin_edges(arr, [1, 2])
638
+ array([1, 2])
639
+
640
+ This function allows one set of bins to be computed, and reused across
641
+ multiple histograms:
642
+
643
+ >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
644
+ >>> shared_bins
645
+ array([0., 1., 2., 3., 4., 5.])
646
+
647
+ >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
648
+ >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
649
+ >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
650
+
651
+ >>> hist_0; hist_1
652
+ array([1, 1, 0, 1, 0])
653
+ array([2, 0, 1, 1, 2])
654
+
655
+ Which gives more easily comparable results than using separate bins for
656
+ each histogram:
657
+
658
+ >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
659
+ >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
660
+ >>> hist_0; hist_1
661
+ array([1, 1, 1])
662
+ array([2, 1, 1, 2])
663
+ >>> bins_0; bins_1
664
+ array([0., 1., 2., 3.])
665
+ array([0. , 1.25, 2.5 , 3.75, 5. ])
666
+
667
+ """
668
+ a, weights = _ravel_and_check_weights(a, weights)
669
+ bin_edges, _ = _get_bin_edges(a, bins, range, weights)
670
+ return bin_edges
671
+
672
+
673
+ def _histogram_dispatcher(
674
+ a, bins=None, range=None, density=None, weights=None):
675
+ return (a, bins, weights)
676
+
677
+
678
+ @array_function_dispatch(_histogram_dispatcher)
679
+ def histogram(a, bins=10, range=None, density=None, weights=None):
680
+ r"""
681
+ Compute the histogram of a dataset.
682
+
683
+ Parameters
684
+ ----------
685
+ a : array_like
686
+ Input data. The histogram is computed over the flattened array.
687
+ bins : int or sequence of scalars or str, optional
688
+ If `bins` is an int, it defines the number of equal-width
689
+ bins in the given range (10, by default). If `bins` is a
690
+ sequence, it defines a monotonically increasing array of bin edges,
691
+ including the rightmost edge, allowing for non-uniform bin widths.
692
+
693
+ .. versionadded:: 1.11.0
694
+
695
+ If `bins` is a string, it defines the method used to calculate the
696
+ optimal bin width, as defined by `histogram_bin_edges`.
697
+
698
+ range : (float, float), optional
699
+ The lower and upper range of the bins. If not provided, range
700
+ is simply ``(a.min(), a.max())``. Values outside the range are
701
+ ignored. The first element of the range must be less than or
702
+ equal to the second. `range` affects the automatic bin
703
+ computation as well. While bin width is computed to be optimal
704
+ based on the actual data within `range`, the bin count will fill
705
+ the entire range including portions containing no data.
706
+ weights : array_like, optional
707
+ An array of weights, of the same shape as `a`. Each value in
708
+ `a` only contributes its associated weight towards the bin count
709
+ (instead of 1). If `density` is True, the weights are
710
+ normalized, so that the integral of the density over the range
711
+ remains 1.
712
+ density : bool, optional
713
+ If ``False``, the result will contain the number of samples in
714
+ each bin. If ``True``, the result is the value of the
715
+ probability *density* function at the bin, normalized such that
716
+ the *integral* over the range is 1. Note that the sum of the
717
+ histogram values will not be equal to 1 unless bins of unity
718
+ width are chosen; it is not a probability *mass* function.
719
+
720
+ Returns
721
+ -------
722
+ hist : array
723
+ The values of the histogram. See `density` and `weights` for a
724
+ description of the possible semantics.
725
+ bin_edges : array of dtype float
726
+ Return the bin edges ``(length(hist)+1)``.
727
+
728
+
729
+ See Also
730
+ --------
731
+ histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
732
+
733
+ Notes
734
+ -----
735
+ All but the last (righthand-most) bin is half-open. In other words,
736
+ if `bins` is::
737
+
738
+ [1, 2, 3, 4]
739
+
740
+ then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
741
+ the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
742
+ *includes* 4.
743
+
744
+
745
+ Examples
746
+ --------
747
+ >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
748
+ (array([0, 2, 1]), array([0, 1, 2, 3]))
749
+ >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
750
+ (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
751
+ >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
752
+ (array([1, 4, 1]), array([0, 1, 2, 3]))
753
+
754
+ >>> a = np.arange(5)
755
+ >>> hist, bin_edges = np.histogram(a, density=True)
756
+ >>> hist
757
+ array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
758
+ >>> hist.sum()
759
+ 2.4999999999999996
760
+ >>> np.sum(hist * np.diff(bin_edges))
761
+ 1.0
762
+
763
+ .. versionadded:: 1.11.0
764
+
765
+ Automated Bin Selection Methods example, using 2 peak random data
766
+ with 2000 points:
767
+
768
+ >>> import matplotlib.pyplot as plt
769
+ >>> rng = np.random.RandomState(10) # deterministic random data
770
+ >>> a = np.hstack((rng.normal(size=1000),
771
+ ... rng.normal(loc=5, scale=2, size=1000)))
772
+ >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
773
+ >>> plt.title("Histogram with 'auto' bins")
774
+ Text(0.5, 1.0, "Histogram with 'auto' bins")
775
+ >>> plt.show()
776
+
777
+ """
778
+ a, weights = _ravel_and_check_weights(a, weights)
779
+
780
+ bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
781
+
782
+ # Histogram is an integer or a float array depending on the weights.
783
+ if weights is None:
784
+ ntype = np.dtype(np.intp)
785
+ else:
786
+ ntype = weights.dtype
787
+
788
+ # We set a block size, as this allows us to iterate over chunks when
789
+ # computing histograms, to minimize memory usage.
790
+ BLOCK = 65536
791
+
792
+ # The fast path uses bincount, but that only works for certain types
793
+ # of weight
794
+ simple_weights = (
795
+ weights is None or
796
+ np.can_cast(weights.dtype, np.double) or
797
+ np.can_cast(weights.dtype, complex)
798
+ )
799
+
800
+ if uniform_bins is not None and simple_weights:
801
+ # Fast algorithm for equal bins
802
+ # We now convert values of a to bin indices, under the assumption of
803
+ # equal bin widths (which is valid here).
804
+ first_edge, last_edge, n_equal_bins = uniform_bins
805
+
806
+ # Initialize empty histogram
807
+ n = np.zeros(n_equal_bins, ntype)
808
+
809
+ # Pre-compute histogram scaling factor
810
+ norm_numerator = n_equal_bins
811
+ norm_denom = _unsigned_subtract(last_edge, first_edge)
812
+
813
+ # We iterate over blocks here for two reasons: the first is that for
814
+ # large arrays, it is actually faster (for example for a 10^8 array it
815
+ # is 2x as fast) and it results in a memory footprint 3x lower in the
816
+ # limit of large arrays.
817
+ for i in _range(0, len(a), BLOCK):
818
+ tmp_a = a[i:i+BLOCK]
819
+ if weights is None:
820
+ tmp_w = None
821
+ else:
822
+ tmp_w = weights[i:i + BLOCK]
823
+
824
+ # Only include values in the right range
825
+ keep = (tmp_a >= first_edge)
826
+ keep &= (tmp_a <= last_edge)
827
+ if not np.logical_and.reduce(keep):
828
+ tmp_a = tmp_a[keep]
829
+ if tmp_w is not None:
830
+ tmp_w = tmp_w[keep]
831
+
832
+ # This cast ensures no type promotions occur below, which gh-10322
833
+ # make unpredictable. Getting it wrong leads to precision errors
834
+ # like gh-8123.
835
+ tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
836
+
837
+ # Compute the bin indices, and for values that lie exactly on
838
+ # last_edge we need to subtract one
839
+ f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom)
840
+ * norm_numerator)
841
+ indices = f_indices.astype(np.intp)
842
+ indices[indices == n_equal_bins] -= 1
843
+
844
+ # The index computation is not guaranteed to give exactly
845
+ # consistent results within ~1 ULP of the bin edges.
846
+ decrement = tmp_a < bin_edges[indices]
847
+ indices[decrement] -= 1
848
+ # The last bin includes the right edge. The other bins do not.
849
+ increment = ((tmp_a >= bin_edges[indices + 1])
850
+ & (indices != n_equal_bins - 1))
851
+ indices[increment] += 1
852
+
853
+ # We now compute the histogram using bincount
854
+ if ntype.kind == 'c':
855
+ n.real += np.bincount(indices, weights=tmp_w.real,
856
+ minlength=n_equal_bins)
857
+ n.imag += np.bincount(indices, weights=tmp_w.imag,
858
+ minlength=n_equal_bins)
859
+ else:
860
+ n += np.bincount(indices, weights=tmp_w,
861
+ minlength=n_equal_bins).astype(ntype)
862
+ else:
863
+ # Compute via cumulative histogram
864
+ cum_n = np.zeros(bin_edges.shape, ntype)
865
+ if weights is None:
866
+ for i in _range(0, len(a), BLOCK):
867
+ sa = np.sort(a[i:i+BLOCK])
868
+ cum_n += _search_sorted_inclusive(sa, bin_edges)
869
+ else:
870
+ zero = np.zeros(1, dtype=ntype)
871
+ for i in _range(0, len(a), BLOCK):
872
+ tmp_a = a[i:i+BLOCK]
873
+ tmp_w = weights[i:i+BLOCK]
874
+ sorting_index = np.argsort(tmp_a)
875
+ sa = tmp_a[sorting_index]
876
+ sw = tmp_w[sorting_index]
877
+ cw = np.concatenate((zero, sw.cumsum()))
878
+ bin_index = _search_sorted_inclusive(sa, bin_edges)
879
+ cum_n += cw[bin_index]
880
+
881
+ n = np.diff(cum_n)
882
+
883
+ if density:
884
+ db = np.array(np.diff(bin_edges), float)
885
+ return n/db/n.sum(), bin_edges
886
+
887
+ return n, bin_edges
888
+
889
+
890
+ def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
891
+ weights=None):
892
+ if hasattr(sample, 'shape'): # same condition as used in histogramdd
893
+ yield sample
894
+ else:
895
+ yield from sample
896
+ with contextlib.suppress(TypeError):
897
+ yield from bins
898
+ yield weights
899
+
900
+
901
+ @array_function_dispatch(_histogramdd_dispatcher)
902
+ def histogramdd(sample, bins=10, range=None, density=None, weights=None):
903
+ """
904
+ Compute the multidimensional histogram of some data.
905
+
906
+ Parameters
907
+ ----------
908
+ sample : (N, D) array, or (N, D) array_like
909
+ The data to be histogrammed.
910
+
911
+ Note the unusual interpretation of sample when an array_like:
912
+
913
+ * When an array, each row is a coordinate in a D-dimensional space -
914
+ such as ``histogramdd(np.array([p1, p2, p3]))``.
915
+ * When an array_like, each element is the list of values for single
916
+ coordinate - such as ``histogramdd((X, Y, Z))``.
917
+
918
+ The first form should be preferred.
919
+
920
+ bins : sequence or int, optional
921
+ The bin specification:
922
+
923
+ * A sequence of arrays describing the monotonically increasing bin
924
+ edges along each dimension.
925
+ * The number of bins for each dimension (nx, ny, ... =bins)
926
+ * The number of bins for all dimensions (nx=ny=...=bins).
927
+
928
+ range : sequence, optional
929
+ A sequence of length D, each an optional (lower, upper) tuple giving
930
+ the outer bin edges to be used if the edges are not given explicitly in
931
+ `bins`.
932
+ An entry of None in the sequence results in the minimum and maximum
933
+ values being used for the corresponding dimension.
934
+ The default, None, is equivalent to passing a tuple of D None values.
935
+ density : bool, optional
936
+ If False, the default, returns the number of samples in each bin.
937
+ If True, returns the probability *density* function at the bin,
938
+ ``bin_count / sample_count / bin_volume``.
939
+ weights : (N,) array_like, optional
940
+ An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
941
+ Weights are normalized to 1 if density is True. If density is False,
942
+ the values of the returned histogram are equal to the sum of the
943
+ weights belonging to the samples falling into each bin.
944
+
945
+ Returns
946
+ -------
947
+ H : ndarray
948
+ The multidimensional histogram of sample x. See density and weights
949
+ for the different possible semantics.
950
+ edges : list
951
+ A list of D arrays describing the bin edges for each dimension.
952
+
953
+ See Also
954
+ --------
955
+ histogram: 1-D histogram
956
+ histogram2d: 2-D histogram
957
+
958
+ Examples
959
+ --------
960
+ >>> r = np.random.randn(100,3)
961
+ >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
962
+ >>> H.shape, edges[0].size, edges[1].size, edges[2].size
963
+ ((5, 8, 4), 6, 9, 5)
964
+
965
+ """
966
+
967
+ try:
968
+ # Sample is an ND-array.
969
+ N, D = sample.shape
970
+ except (AttributeError, ValueError):
971
+ # Sample is a sequence of 1D arrays.
972
+ sample = np.atleast_2d(sample).T
973
+ N, D = sample.shape
974
+
975
+ nbin = np.empty(D, np.intp)
976
+ edges = D*[None]
977
+ dedges = D*[None]
978
+ if weights is not None:
979
+ weights = np.asarray(weights)
980
+
981
+ try:
982
+ M = len(bins)
983
+ if M != D:
984
+ raise ValueError(
985
+ 'The dimension of bins must be equal to the dimension of the '
986
+ 'sample x.')
987
+ except TypeError:
988
+ # bins is an integer
989
+ bins = D*[bins]
990
+
991
+ # normalize the range argument
992
+ if range is None:
993
+ range = (None,) * D
994
+ elif len(range) != D:
995
+ raise ValueError('range argument must have one entry per dimension')
996
+
997
+ # Create edge arrays
998
+ for i in _range(D):
999
+ if np.ndim(bins[i]) == 0:
1000
+ if bins[i] < 1:
1001
+ raise ValueError(
1002
+ '`bins[{}]` must be positive, when an integer'.format(i))
1003
+ smin, smax = _get_outer_edges(sample[:,i], range[i])
1004
+ try:
1005
+ n = operator.index(bins[i])
1006
+
1007
+ except TypeError as e:
1008
+ raise TypeError(
1009
+ "`bins[{}]` must be an integer, when a scalar".format(i)
1010
+ ) from e
1011
+
1012
+ edges[i] = np.linspace(smin, smax, n + 1)
1013
+ elif np.ndim(bins[i]) == 1:
1014
+ edges[i] = np.asarray(bins[i])
1015
+ if np.any(edges[i][:-1] > edges[i][1:]):
1016
+ raise ValueError(
1017
+ '`bins[{}]` must be monotonically increasing, when an array'
1018
+ .format(i))
1019
+ else:
1020
+ raise ValueError(
1021
+ '`bins[{}]` must be a scalar or 1d array'.format(i))
1022
+
1023
+ nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
1024
+ dedges[i] = np.diff(edges[i])
1025
+
1026
+ # Compute the bin number each sample falls into.
1027
+ Ncount = tuple(
1028
+ # avoid np.digitize to work around gh-11022
1029
+ np.searchsorted(edges[i], sample[:, i], side='right')
1030
+ for i in _range(D)
1031
+ )
1032
+
1033
+ # Using digitize, values that fall on an edge are put in the right bin.
1034
+ # For the rightmost bin, we want values equal to the right edge to be
1035
+ # counted in the last bin, and not as an outlier.
1036
+ for i in _range(D):
1037
+ # Find which points are on the rightmost edge.
1038
+ on_edge = (sample[:, i] == edges[i][-1])
1039
+ # Shift these points one bin to the left.
1040
+ Ncount[i][on_edge] -= 1
1041
+
1042
+ # Compute the sample indices in the flattened histogram matrix.
1043
+ # This raises an error if the array is too large.
1044
+ xy = np.ravel_multi_index(Ncount, nbin)
1045
+
1046
+ # Compute the number of repetitions in xy and assign it to the
1047
+ # flattened histmat.
1048
+ hist = np.bincount(xy, weights, minlength=nbin.prod())
1049
+
1050
+ # Shape into a proper matrix
1051
+ hist = hist.reshape(nbin)
1052
+
1053
+ # This preserves the (bad) behavior observed in gh-7845, for now.
1054
+ hist = hist.astype(float, casting='safe')
1055
+
1056
+ # Remove outliers (indices 0 and -1 for each dimension).
1057
+ core = D*(slice(1, -1),)
1058
+ hist = hist[core]
1059
+
1060
+ if density:
1061
+ # calculate the probability density function
1062
+ s = hist.sum()
1063
+ for i in _range(D):
1064
+ shape = np.ones(D, int)
1065
+ shape[i] = nbin[i] - 2
1066
+ hist = hist / dedges[i].reshape(shape)
1067
+ hist /= s
1068
+
1069
+ if (hist.shape != nbin - 2).any():
1070
+ raise RuntimeError(
1071
+ "Internal Shape Error")
1072
+ return hist, edges
.venv/lib/python3.11/site-packages/numpy/lib/histograms.pyi ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ from typing import (
3
+ Literal as L,
4
+ Any,
5
+ SupportsIndex,
6
+ )
7
+
8
+ from numpy._typing import (
9
+ NDArray,
10
+ ArrayLike,
11
+ )
12
+
13
+ _BinKind = L[
14
+ "stone",
15
+ "auto",
16
+ "doane",
17
+ "fd",
18
+ "rice",
19
+ "scott",
20
+ "sqrt",
21
+ "sturges",
22
+ ]
23
+
24
+ __all__: list[str]
25
+
26
+ def histogram_bin_edges(
27
+ a: ArrayLike,
28
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
29
+ range: None | tuple[float, float] = ...,
30
+ weights: None | ArrayLike = ...,
31
+ ) -> NDArray[Any]: ...
32
+
33
+ def histogram(
34
+ a: ArrayLike,
35
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
36
+ range: None | tuple[float, float] = ...,
37
+ density: bool = ...,
38
+ weights: None | ArrayLike = ...,
39
+ ) -> tuple[NDArray[Any], NDArray[Any]]: ...
40
+
41
+ def histogramdd(
42
+ sample: ArrayLike,
43
+ bins: SupportsIndex | ArrayLike = ...,
44
+ range: Sequence[tuple[float, float]] = ...,
45
+ density: None | bool = ...,
46
+ weights: None | ArrayLike = ...,
47
+ ) -> tuple[NDArray[Any], list[NDArray[Any]]]: ...
.venv/lib/python3.11/site-packages/numpy/lib/index_tricks.py ADDED
@@ -0,0 +1,1046 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import sys
3
+ import math
4
+ import warnings
5
+
6
+ import numpy as np
7
+ from .._utils import set_module
8
+ import numpy.core.numeric as _nx
9
+ from numpy.core.numeric import ScalarType, array
10
+ from numpy.core.numerictypes import issubdtype
11
+
12
+ import numpy.matrixlib as matrixlib
13
+ from .function_base import diff
14
+ from numpy.core.multiarray import ravel_multi_index, unravel_index
15
+ from numpy.core import overrides, linspace
16
+ from numpy.lib.stride_tricks import as_strided
17
+
18
+
19
+ array_function_dispatch = functools.partial(
20
+ overrides.array_function_dispatch, module='numpy')
21
+
22
+
23
+ __all__ = [
24
+ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
25
+ 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
26
+ 'diag_indices', 'diag_indices_from'
27
+ ]
28
+
29
+
30
+ def _ix__dispatcher(*args):
31
+ return args
32
+
33
+
34
+ @array_function_dispatch(_ix__dispatcher)
35
+ def ix_(*args):
36
+ """
37
+ Construct an open mesh from multiple sequences.
38
+
39
+ This function takes N 1-D sequences and returns N outputs with N
40
+ dimensions each, such that the shape is 1 in all but one dimension
41
+ and the dimension with the non-unit shape value cycles through all
42
+ N dimensions.
43
+
44
+ Using `ix_` one can quickly construct index arrays that will index
45
+ the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
46
+ ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
47
+
48
+ Parameters
49
+ ----------
50
+ args : 1-D sequences
51
+ Each sequence should be of integer or boolean type.
52
+ Boolean sequences will be interpreted as boolean masks for the
53
+ corresponding dimension (equivalent to passing in
54
+ ``np.nonzero(boolean_sequence)``).
55
+
56
+ Returns
57
+ -------
58
+ out : tuple of ndarrays
59
+ N arrays with N dimensions each, with N the number of input
60
+ sequences. Together these arrays form an open mesh.
61
+
62
+ See Also
63
+ --------
64
+ ogrid, mgrid, meshgrid
65
+
66
+ Examples
67
+ --------
68
+ >>> a = np.arange(10).reshape(2, 5)
69
+ >>> a
70
+ array([[0, 1, 2, 3, 4],
71
+ [5, 6, 7, 8, 9]])
72
+ >>> ixgrid = np.ix_([0, 1], [2, 4])
73
+ >>> ixgrid
74
+ (array([[0],
75
+ [1]]), array([[2, 4]]))
76
+ >>> ixgrid[0].shape, ixgrid[1].shape
77
+ ((2, 1), (1, 2))
78
+ >>> a[ixgrid]
79
+ array([[2, 4],
80
+ [7, 9]])
81
+
82
+ >>> ixgrid = np.ix_([True, True], [2, 4])
83
+ >>> a[ixgrid]
84
+ array([[2, 4],
85
+ [7, 9]])
86
+ >>> ixgrid = np.ix_([True, True], [False, False, True, False, True])
87
+ >>> a[ixgrid]
88
+ array([[2, 4],
89
+ [7, 9]])
90
+
91
+ """
92
+ out = []
93
+ nd = len(args)
94
+ for k, new in enumerate(args):
95
+ if not isinstance(new, _nx.ndarray):
96
+ new = np.asarray(new)
97
+ if new.size == 0:
98
+ # Explicitly type empty arrays to avoid float default
99
+ new = new.astype(_nx.intp)
100
+ if new.ndim != 1:
101
+ raise ValueError("Cross index must be 1 dimensional")
102
+ if issubdtype(new.dtype, _nx.bool_):
103
+ new, = new.nonzero()
104
+ new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
105
+ out.append(new)
106
+ return tuple(out)
107
+
108
+
109
+ class nd_grid:
110
+ """
111
+ Construct a multi-dimensional "meshgrid".
112
+
113
+ ``grid = nd_grid()`` creates an instance which will return a mesh-grid
114
+ when indexed. The dimension and number of the output arrays are equal
115
+ to the number of indexing dimensions. If the step length is not a
116
+ complex number, then the stop is not inclusive.
117
+
118
+ However, if the step length is a **complex number** (e.g. 5j), then the
119
+ integer part of its magnitude is interpreted as specifying the
120
+ number of points to create between the start and stop values, where
121
+ the stop value **is inclusive**.
122
+
123
+ If instantiated with an argument of ``sparse=True``, the mesh-grid is
124
+ open (or not fleshed out) so that only one-dimension of each returned
125
+ argument is greater than 1.
126
+
127
+ Parameters
128
+ ----------
129
+ sparse : bool, optional
130
+ Whether the grid is sparse or not. Default is False.
131
+
132
+ Notes
133
+ -----
134
+ Two instances of `nd_grid` are made available in the NumPy namespace,
135
+ `mgrid` and `ogrid`, approximately defined as::
136
+
137
+ mgrid = nd_grid(sparse=False)
138
+ ogrid = nd_grid(sparse=True)
139
+
140
+ Users should use these pre-defined instances instead of using `nd_grid`
141
+ directly.
142
+ """
143
+
144
+ def __init__(self, sparse=False):
145
+ self.sparse = sparse
146
+
147
+ def __getitem__(self, key):
148
+ try:
149
+ size = []
150
+ # Mimic the behavior of `np.arange` and use a data type
151
+ # which is at least as large as `np.int_`
152
+ num_list = [0]
153
+ for k in range(len(key)):
154
+ step = key[k].step
155
+ start = key[k].start
156
+ stop = key[k].stop
157
+ if start is None:
158
+ start = 0
159
+ if step is None:
160
+ step = 1
161
+ if isinstance(step, (_nx.complexfloating, complex)):
162
+ step = abs(step)
163
+ size.append(int(step))
164
+ else:
165
+ size.append(
166
+ int(math.ceil((stop - start) / (step*1.0))))
167
+ num_list += [start, stop, step]
168
+ typ = _nx.result_type(*num_list)
169
+ if self.sparse:
170
+ nn = [_nx.arange(_x, dtype=_t)
171
+ for _x, _t in zip(size, (typ,)*len(size))]
172
+ else:
173
+ nn = _nx.indices(size, typ)
174
+ for k, kk in enumerate(key):
175
+ step = kk.step
176
+ start = kk.start
177
+ if start is None:
178
+ start = 0
179
+ if step is None:
180
+ step = 1
181
+ if isinstance(step, (_nx.complexfloating, complex)):
182
+ step = int(abs(step))
183
+ if step != 1:
184
+ step = (kk.stop - start) / float(step - 1)
185
+ nn[k] = (nn[k]*step+start)
186
+ if self.sparse:
187
+ slobj = [_nx.newaxis]*len(size)
188
+ for k in range(len(size)):
189
+ slobj[k] = slice(None, None)
190
+ nn[k] = nn[k][tuple(slobj)]
191
+ slobj[k] = _nx.newaxis
192
+ return nn
193
+ except (IndexError, TypeError):
194
+ step = key.step
195
+ stop = key.stop
196
+ start = key.start
197
+ if start is None:
198
+ start = 0
199
+ if isinstance(step, (_nx.complexfloating, complex)):
200
+ # Prevent the (potential) creation of integer arrays
201
+ step_float = abs(step)
202
+ step = length = int(step_float)
203
+ if step != 1:
204
+ step = (key.stop-start)/float(step-1)
205
+ typ = _nx.result_type(start, stop, step_float)
206
+ return _nx.arange(0, length, 1, dtype=typ)*step + start
207
+ else:
208
+ return _nx.arange(start, stop, step)
209
+
210
+
211
+ class MGridClass(nd_grid):
212
+ """
213
+ An instance which returns a dense multi-dimensional "meshgrid".
214
+
215
+ An instance which returns a dense (or fleshed out) mesh-grid
216
+ when indexed, so that each returned argument has the same shape.
217
+ The dimensions and number of the output arrays are equal to the
218
+ number of indexing dimensions. If the step length is not a complex
219
+ number, then the stop is not inclusive.
220
+
221
+ However, if the step length is a **complex number** (e.g. 5j), then
222
+ the integer part of its magnitude is interpreted as specifying the
223
+ number of points to create between the start and stop values, where
224
+ the stop value **is inclusive**.
225
+
226
+ Returns
227
+ -------
228
+ mesh-grid `ndarrays` all of the same dimensions
229
+
230
+ See Also
231
+ --------
232
+ ogrid : like `mgrid` but returns open (not fleshed out) mesh grids
233
+ meshgrid: return coordinate matrices from coordinate vectors
234
+ r_ : array concatenator
235
+ :ref:`how-to-partition`
236
+
237
+ Examples
238
+ --------
239
+ >>> np.mgrid[0:5, 0:5]
240
+ array([[[0, 0, 0, 0, 0],
241
+ [1, 1, 1, 1, 1],
242
+ [2, 2, 2, 2, 2],
243
+ [3, 3, 3, 3, 3],
244
+ [4, 4, 4, 4, 4]],
245
+ [[0, 1, 2, 3, 4],
246
+ [0, 1, 2, 3, 4],
247
+ [0, 1, 2, 3, 4],
248
+ [0, 1, 2, 3, 4],
249
+ [0, 1, 2, 3, 4]]])
250
+ >>> np.mgrid[-1:1:5j]
251
+ array([-1. , -0.5, 0. , 0.5, 1. ])
252
+
253
+ """
254
+
255
+ def __init__(self):
256
+ super().__init__(sparse=False)
257
+
258
+
259
+ mgrid = MGridClass()
260
+
261
+
262
+ class OGridClass(nd_grid):
263
+ """
264
+ An instance which returns an open multi-dimensional "meshgrid".
265
+
266
+ An instance which returns an open (i.e. not fleshed out) mesh-grid
267
+ when indexed, so that only one dimension of each returned array is
268
+ greater than 1. The dimension and number of the output arrays are
269
+ equal to the number of indexing dimensions. If the step length is
270
+ not a complex number, then the stop is not inclusive.
271
+
272
+ However, if the step length is a **complex number** (e.g. 5j), then
273
+ the integer part of its magnitude is interpreted as specifying the
274
+ number of points to create between the start and stop values, where
275
+ the stop value **is inclusive**.
276
+
277
+ Returns
278
+ -------
279
+ mesh-grid
280
+ `ndarrays` with only one dimension not equal to 1
281
+
282
+ See Also
283
+ --------
284
+ mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
285
+ meshgrid: return coordinate matrices from coordinate vectors
286
+ r_ : array concatenator
287
+ :ref:`how-to-partition`
288
+
289
+ Examples
290
+ --------
291
+ >>> from numpy import ogrid
292
+ >>> ogrid[-1:1:5j]
293
+ array([-1. , -0.5, 0. , 0.5, 1. ])
294
+ >>> ogrid[0:5,0:5]
295
+ [array([[0],
296
+ [1],
297
+ [2],
298
+ [3],
299
+ [4]]), array([[0, 1, 2, 3, 4]])]
300
+
301
+ """
302
+
303
+ def __init__(self):
304
+ super().__init__(sparse=True)
305
+
306
+
307
+ ogrid = OGridClass()
308
+
309
+
310
+ class AxisConcatenator:
311
+ """
312
+ Translates slice objects to concatenation along an axis.
313
+
314
+ For detailed documentation on usage, see `r_`.
315
+ """
316
+ # allow ma.mr_ to override this
317
+ concatenate = staticmethod(_nx.concatenate)
318
+ makemat = staticmethod(matrixlib.matrix)
319
+
320
+ def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
321
+ self.axis = axis
322
+ self.matrix = matrix
323
+ self.trans1d = trans1d
324
+ self.ndmin = ndmin
325
+
326
+ def __getitem__(self, key):
327
+ # handle matrix builder syntax
328
+ if isinstance(key, str):
329
+ frame = sys._getframe().f_back
330
+ mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
331
+ return mymat
332
+
333
+ if not isinstance(key, tuple):
334
+ key = (key,)
335
+
336
+ # copy attributes, since they can be overridden in the first argument
337
+ trans1d = self.trans1d
338
+ ndmin = self.ndmin
339
+ matrix = self.matrix
340
+ axis = self.axis
341
+
342
+ objs = []
343
+ # dtypes or scalars for weak scalar handling in result_type
344
+ result_type_objs = []
345
+
346
+ for k, item in enumerate(key):
347
+ scalar = False
348
+ if isinstance(item, slice):
349
+ step = item.step
350
+ start = item.start
351
+ stop = item.stop
352
+ if start is None:
353
+ start = 0
354
+ if step is None:
355
+ step = 1
356
+ if isinstance(step, (_nx.complexfloating, complex)):
357
+ size = int(abs(step))
358
+ newobj = linspace(start, stop, num=size)
359
+ else:
360
+ newobj = _nx.arange(start, stop, step)
361
+ if ndmin > 1:
362
+ newobj = array(newobj, copy=False, ndmin=ndmin)
363
+ if trans1d != -1:
364
+ newobj = newobj.swapaxes(-1, trans1d)
365
+ elif isinstance(item, str):
366
+ if k != 0:
367
+ raise ValueError("special directives must be the "
368
+ "first entry.")
369
+ if item in ('r', 'c'):
370
+ matrix = True
371
+ col = (item == 'c')
372
+ continue
373
+ if ',' in item:
374
+ vec = item.split(',')
375
+ try:
376
+ axis, ndmin = [int(x) for x in vec[:2]]
377
+ if len(vec) == 3:
378
+ trans1d = int(vec[2])
379
+ continue
380
+ except Exception as e:
381
+ raise ValueError(
382
+ "unknown special directive {!r}".format(item)
383
+ ) from e
384
+ try:
385
+ axis = int(item)
386
+ continue
387
+ except (ValueError, TypeError) as e:
388
+ raise ValueError("unknown special directive") from e
389
+ elif type(item) in ScalarType:
390
+ scalar = True
391
+ newobj = item
392
+ else:
393
+ item_ndim = np.ndim(item)
394
+ newobj = array(item, copy=False, subok=True, ndmin=ndmin)
395
+ if trans1d != -1 and item_ndim < ndmin:
396
+ k2 = ndmin - item_ndim
397
+ k1 = trans1d
398
+ if k1 < 0:
399
+ k1 += k2 + 1
400
+ defaxes = list(range(ndmin))
401
+ axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
402
+ newobj = newobj.transpose(axes)
403
+
404
+ objs.append(newobj)
405
+ if scalar:
406
+ result_type_objs.append(item)
407
+ else:
408
+ result_type_objs.append(newobj.dtype)
409
+
410
+ # Ensure that scalars won't up-cast unless warranted, for 0, drops
411
+ # through to error in concatenate.
412
+ if len(result_type_objs) != 0:
413
+ final_dtype = _nx.result_type(*result_type_objs)
414
+ # concatenate could do cast, but that can be overriden:
415
+ objs = [array(obj, copy=False, subok=True,
416
+ ndmin=ndmin, dtype=final_dtype) for obj in objs]
417
+
418
+ res = self.concatenate(tuple(objs), axis=axis)
419
+
420
+ if matrix:
421
+ oldndim = res.ndim
422
+ res = self.makemat(res)
423
+ if oldndim == 1 and col:
424
+ res = res.T
425
+ return res
426
+
427
+ def __len__(self):
428
+ return 0
429
+
430
+ # separate classes are used here instead of just making r_ = concatentor(0),
431
+ # etc. because otherwise we couldn't get the doc string to come out right
432
+ # in help(r_)
433
+
434
+
435
+ class RClass(AxisConcatenator):
436
+ """
437
+ Translates slice objects to concatenation along the first axis.
438
+
439
+ This is a simple way to build up arrays quickly. There are two use cases.
440
+
441
+ 1. If the index expression contains comma separated arrays, then stack
442
+ them along their first axis.
443
+ 2. If the index expression contains slice notation or scalars then create
444
+ a 1-D array with a range indicated by the slice notation.
445
+
446
+ If slice notation is used, the syntax ``start:stop:step`` is equivalent
447
+ to ``np.arange(start, stop, step)`` inside of the brackets. However, if
448
+ ``step`` is an imaginary number (i.e. 100j) then its integer portion is
449
+ interpreted as a number-of-points desired and the start and stop are
450
+ inclusive. In other words ``start:stop:stepj`` is interpreted as
451
+ ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
452
+ After expansion of slice notation, all comma separated sequences are
453
+ concatenated together.
454
+
455
+ Optional character strings placed as the first element of the index
456
+ expression can be used to change the output. The strings 'r' or 'c' result
457
+ in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
458
+ matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
459
+ (column) matrix is produced. If the result is 2-D then both provide the
460
+ same matrix result.
461
+
462
+ A string integer specifies which axis to stack multiple comma separated
463
+ arrays along. A string of two comma-separated integers allows indication
464
+ of the minimum number of dimensions to force each entry into as the
465
+ second integer (the axis to concatenate along is still the first integer).
466
+
467
+ A string with three comma-separated integers allows specification of the
468
+ axis to concatenate along, the minimum number of dimensions to force the
469
+ entries to, and which axis should contain the start of the arrays which
470
+ are less than the specified number of dimensions. In other words the third
471
+ integer allows you to specify where the 1's should be placed in the shape
472
+ of the arrays that have their shapes upgraded. By default, they are placed
473
+ in the front of the shape tuple. The third argument allows you to specify
474
+ where the start of the array should be instead. Thus, a third argument of
475
+ '0' would place the 1's at the end of the array shape. Negative integers
476
+ specify where in the new shape tuple the last dimension of upgraded arrays
477
+ should be placed, so the default is '-1'.
478
+
479
+ Parameters
480
+ ----------
481
+ Not a function, so takes no parameters
482
+
483
+
484
+ Returns
485
+ -------
486
+ A concatenated ndarray or matrix.
487
+
488
+ See Also
489
+ --------
490
+ concatenate : Join a sequence of arrays along an existing axis.
491
+ c_ : Translates slice objects to concatenation along the second axis.
492
+
493
+ Examples
494
+ --------
495
+ >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
496
+ array([1, 2, 3, ..., 4, 5, 6])
497
+ >>> np.r_[-1:1:6j, [0]*3, 5, 6]
498
+ array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
499
+
500
+ String integers specify the axis to concatenate along or the minimum
501
+ number of dimensions to force entries into.
502
+
503
+ >>> a = np.array([[0, 1, 2], [3, 4, 5]])
504
+ >>> np.r_['-1', a, a] # concatenate along last axis
505
+ array([[0, 1, 2, 0, 1, 2],
506
+ [3, 4, 5, 3, 4, 5]])
507
+ >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
508
+ array([[1, 2, 3],
509
+ [4, 5, 6]])
510
+
511
+ >>> np.r_['0,2,0', [1,2,3], [4,5,6]]
512
+ array([[1],
513
+ [2],
514
+ [3],
515
+ [4],
516
+ [5],
517
+ [6]])
518
+ >>> np.r_['1,2,0', [1,2,3], [4,5,6]]
519
+ array([[1, 4],
520
+ [2, 5],
521
+ [3, 6]])
522
+
523
+ Using 'r' or 'c' as a first string argument creates a matrix.
524
+
525
+ >>> np.r_['r',[1,2,3], [4,5,6]]
526
+ matrix([[1, 2, 3, 4, 5, 6]])
527
+
528
+ """
529
+
530
+ def __init__(self):
531
+ AxisConcatenator.__init__(self, 0)
532
+
533
+
534
+ r_ = RClass()
535
+
536
+
537
+ class CClass(AxisConcatenator):
538
+ """
539
+ Translates slice objects to concatenation along the second axis.
540
+
541
+ This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
542
+ useful because of its common occurrence. In particular, arrays will be
543
+ stacked along their last axis after being upgraded to at least 2-D with
544
+ 1's post-pended to the shape (column vectors made out of 1-D arrays).
545
+
546
+ See Also
547
+ --------
548
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
549
+ r_ : For more detailed documentation.
550
+
551
+ Examples
552
+ --------
553
+ >>> np.c_[np.array([1,2,3]), np.array([4,5,6])]
554
+ array([[1, 4],
555
+ [2, 5],
556
+ [3, 6]])
557
+ >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
558
+ array([[1, 2, 3, ..., 4, 5, 6]])
559
+
560
+ """
561
+
562
+ def __init__(self):
563
+ AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
564
+
565
+
566
+ c_ = CClass()
567
+
568
+
569
+ @set_module('numpy')
570
+ class ndenumerate:
571
+ """
572
+ Multidimensional index iterator.
573
+
574
+ Return an iterator yielding pairs of array coordinates and values.
575
+
576
+ Parameters
577
+ ----------
578
+ arr : ndarray
579
+ Input array.
580
+
581
+ See Also
582
+ --------
583
+ ndindex, flatiter
584
+
585
+ Examples
586
+ --------
587
+ >>> a = np.array([[1, 2], [3, 4]])
588
+ >>> for index, x in np.ndenumerate(a):
589
+ ... print(index, x)
590
+ (0, 0) 1
591
+ (0, 1) 2
592
+ (1, 0) 3
593
+ (1, 1) 4
594
+
595
+ """
596
+
597
+ def __init__(self, arr):
598
+ self.iter = np.asarray(arr).flat
599
+
600
+ def __next__(self):
601
+ """
602
+ Standard iterator method, returns the index tuple and array value.
603
+
604
+ Returns
605
+ -------
606
+ coords : tuple of ints
607
+ The indices of the current iteration.
608
+ val : scalar
609
+ The array element of the current iteration.
610
+
611
+ """
612
+ return self.iter.coords, next(self.iter)
613
+
614
+ def __iter__(self):
615
+ return self
616
+
617
+
618
+ @set_module('numpy')
619
+ class ndindex:
620
+ """
621
+ An N-dimensional iterator object to index arrays.
622
+
623
+ Given the shape of an array, an `ndindex` instance iterates over
624
+ the N-dimensional index of the array. At each iteration a tuple
625
+ of indices is returned, the last dimension is iterated over first.
626
+
627
+ Parameters
628
+ ----------
629
+ shape : ints, or a single tuple of ints
630
+ The size of each dimension of the array can be passed as
631
+ individual parameters or as the elements of a tuple.
632
+
633
+ See Also
634
+ --------
635
+ ndenumerate, flatiter
636
+
637
+ Examples
638
+ --------
639
+ Dimensions as individual arguments
640
+
641
+ >>> for index in np.ndindex(3, 2, 1):
642
+ ... print(index)
643
+ (0, 0, 0)
644
+ (0, 1, 0)
645
+ (1, 0, 0)
646
+ (1, 1, 0)
647
+ (2, 0, 0)
648
+ (2, 1, 0)
649
+
650
+ Same dimensions - but in a tuple ``(3, 2, 1)``
651
+
652
+ >>> for index in np.ndindex((3, 2, 1)):
653
+ ... print(index)
654
+ (0, 0, 0)
655
+ (0, 1, 0)
656
+ (1, 0, 0)
657
+ (1, 1, 0)
658
+ (2, 0, 0)
659
+ (2, 1, 0)
660
+
661
+ """
662
+
663
+ def __init__(self, *shape):
664
+ if len(shape) == 1 and isinstance(shape[0], tuple):
665
+ shape = shape[0]
666
+ x = as_strided(_nx.zeros(1), shape=shape,
667
+ strides=_nx.zeros_like(shape))
668
+ self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
669
+ order='C')
670
+
671
+ def __iter__(self):
672
+ return self
673
+
674
+ def ndincr(self):
675
+ """
676
+ Increment the multi-dimensional index by one.
677
+
678
+ This method is for backward compatibility only: do not use.
679
+
680
+ .. deprecated:: 1.20.0
681
+ This method has been advised against since numpy 1.8.0, but only
682
+ started emitting DeprecationWarning as of this version.
683
+ """
684
+ # NumPy 1.20.0, 2020-09-08
685
+ warnings.warn(
686
+ "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",
687
+ DeprecationWarning, stacklevel=2)
688
+ next(self)
689
+
690
+ def __next__(self):
691
+ """
692
+ Standard iterator method, updates the index and returns the index
693
+ tuple.
694
+
695
+ Returns
696
+ -------
697
+ val : tuple of ints
698
+ Returns a tuple containing the indices of the current
699
+ iteration.
700
+
701
+ """
702
+ next(self._it)
703
+ return self._it.multi_index
704
+
705
+
706
+ # You can do all this with slice() plus a few special objects,
707
+ # but there's a lot to remember. This version is simpler because
708
+ # it uses the standard array indexing syntax.
709
+ #
710
+ # Written by Konrad Hinsen <[email protected]>
711
+ # last revision: 1999-7-23
712
+ #
713
+ # Cosmetic changes by T. Oliphant 2001
714
+ #
715
+ #
716
+
717
+ class IndexExpression:
718
+ """
719
+ A nicer way to build up index tuples for arrays.
720
+
721
+ .. note::
722
+ Use one of the two predefined instances `index_exp` or `s_`
723
+ rather than directly using `IndexExpression`.
724
+
725
+ For any index combination, including slicing and axis insertion,
726
+ ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
727
+ array `a`. However, ``np.index_exp[indices]`` can be used anywhere
728
+ in Python code and returns a tuple of slice objects that can be
729
+ used in the construction of complex index expressions.
730
+
731
+ Parameters
732
+ ----------
733
+ maketuple : bool
734
+ If True, always returns a tuple.
735
+
736
+ See Also
737
+ --------
738
+ index_exp : Predefined instance that always returns a tuple:
739
+ `index_exp = IndexExpression(maketuple=True)`.
740
+ s_ : Predefined instance without tuple conversion:
741
+ `s_ = IndexExpression(maketuple=False)`.
742
+
743
+ Notes
744
+ -----
745
+ You can do all this with `slice()` plus a few special objects,
746
+ but there's a lot to remember and this version is simpler because
747
+ it uses the standard array indexing syntax.
748
+
749
+ Examples
750
+ --------
751
+ >>> np.s_[2::2]
752
+ slice(2, None, 2)
753
+ >>> np.index_exp[2::2]
754
+ (slice(2, None, 2),)
755
+
756
+ >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
757
+ array([2, 4])
758
+
759
+ """
760
+
761
+ def __init__(self, maketuple):
762
+ self.maketuple = maketuple
763
+
764
+ def __getitem__(self, item):
765
+ if self.maketuple and not isinstance(item, tuple):
766
+ return (item,)
767
+ else:
768
+ return item
769
+
770
+
771
+ index_exp = IndexExpression(maketuple=True)
772
+ s_ = IndexExpression(maketuple=False)
773
+
774
+ # End contribution from Konrad.
775
+
776
+
777
+ # The following functions complement those in twodim_base, but are
778
+ # applicable to N-dimensions.
779
+
780
+
781
+ def _fill_diagonal_dispatcher(a, val, wrap=None):
782
+ return (a,)
783
+
784
+
785
+ @array_function_dispatch(_fill_diagonal_dispatcher)
786
+ def fill_diagonal(a, val, wrap=False):
787
+ """Fill the main diagonal of the given array of any dimensionality.
788
+
789
+ For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
790
+ locations with indices ``a[i, ..., i]`` all identical. This function
791
+ modifies the input array in-place, it does not return a value.
792
+
793
+ Parameters
794
+ ----------
795
+ a : array, at least 2-D.
796
+ Array whose diagonal is to be filled, it gets modified in-place.
797
+
798
+ val : scalar or array_like
799
+ Value(s) to write on the diagonal. If `val` is scalar, the value is
800
+ written along the diagonal. If array-like, the flattened `val` is
801
+ written along the diagonal, repeating if necessary to fill all
802
+ diagonal entries.
803
+
804
+ wrap : bool
805
+ For tall matrices in NumPy version up to 1.6.2, the
806
+ diagonal "wrapped" after N columns. You can have this behavior
807
+ with this option. This affects only tall matrices.
808
+
809
+ See also
810
+ --------
811
+ diag_indices, diag_indices_from
812
+
813
+ Notes
814
+ -----
815
+ .. versionadded:: 1.4.0
816
+
817
+ This functionality can be obtained via `diag_indices`, but internally
818
+ this version uses a much faster implementation that never constructs the
819
+ indices and uses simple slicing.
820
+
821
+ Examples
822
+ --------
823
+ >>> a = np.zeros((3, 3), int)
824
+ >>> np.fill_diagonal(a, 5)
825
+ >>> a
826
+ array([[5, 0, 0],
827
+ [0, 5, 0],
828
+ [0, 0, 5]])
829
+
830
+ The same function can operate on a 4-D array:
831
+
832
+ >>> a = np.zeros((3, 3, 3, 3), int)
833
+ >>> np.fill_diagonal(a, 4)
834
+
835
+ We only show a few blocks for clarity:
836
+
837
+ >>> a[0, 0]
838
+ array([[4, 0, 0],
839
+ [0, 0, 0],
840
+ [0, 0, 0]])
841
+ >>> a[1, 1]
842
+ array([[0, 0, 0],
843
+ [0, 4, 0],
844
+ [0, 0, 0]])
845
+ >>> a[2, 2]
846
+ array([[0, 0, 0],
847
+ [0, 0, 0],
848
+ [0, 0, 4]])
849
+
850
+ The wrap option affects only tall matrices:
851
+
852
+ >>> # tall matrices no wrap
853
+ >>> a = np.zeros((5, 3), int)
854
+ >>> np.fill_diagonal(a, 4)
855
+ >>> a
856
+ array([[4, 0, 0],
857
+ [0, 4, 0],
858
+ [0, 0, 4],
859
+ [0, 0, 0],
860
+ [0, 0, 0]])
861
+
862
+ >>> # tall matrices wrap
863
+ >>> a = np.zeros((5, 3), int)
864
+ >>> np.fill_diagonal(a, 4, wrap=True)
865
+ >>> a
866
+ array([[4, 0, 0],
867
+ [0, 4, 0],
868
+ [0, 0, 4],
869
+ [0, 0, 0],
870
+ [4, 0, 0]])
871
+
872
+ >>> # wide matrices
873
+ >>> a = np.zeros((3, 5), int)
874
+ >>> np.fill_diagonal(a, 4, wrap=True)
875
+ >>> a
876
+ array([[4, 0, 0, 0, 0],
877
+ [0, 4, 0, 0, 0],
878
+ [0, 0, 4, 0, 0]])
879
+
880
+ The anti-diagonal can be filled by reversing the order of elements
881
+ using either `numpy.flipud` or `numpy.fliplr`.
882
+
883
+ >>> a = np.zeros((3, 3), int);
884
+ >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
885
+ >>> a
886
+ array([[0, 0, 1],
887
+ [0, 2, 0],
888
+ [3, 0, 0]])
889
+ >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
890
+ >>> a
891
+ array([[0, 0, 3],
892
+ [0, 2, 0],
893
+ [1, 0, 0]])
894
+
895
+ Note that the order in which the diagonal is filled varies depending
896
+ on the flip function.
897
+ """
898
+ if a.ndim < 2:
899
+ raise ValueError("array must be at least 2-d")
900
+ end = None
901
+ if a.ndim == 2:
902
+ # Explicit, fast formula for the common case. For 2-d arrays, we
903
+ # accept rectangular ones.
904
+ step = a.shape[1] + 1
905
+ # This is needed to don't have tall matrix have the diagonal wrap.
906
+ if not wrap:
907
+ end = a.shape[1] * a.shape[1]
908
+ else:
909
+ # For more than d=2, the strided formula is only valid for arrays with
910
+ # all dimensions equal, so we check first.
911
+ if not np.all(diff(a.shape) == 0):
912
+ raise ValueError("All dimensions of input must be of equal length")
913
+ step = 1 + (np.cumprod(a.shape[:-1])).sum()
914
+
915
+ # Write the value out into the diagonal.
916
+ a.flat[:end:step] = val
917
+
918
+
919
+ @set_module('numpy')
920
+ def diag_indices(n, ndim=2):
921
+ """
922
+ Return the indices to access the main diagonal of an array.
923
+
924
+ This returns a tuple of indices that can be used to access the main
925
+ diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
926
+ (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
927
+ ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
928
+ for ``i = [0..n-1]``.
929
+
930
+ Parameters
931
+ ----------
932
+ n : int
933
+ The size, along each dimension, of the arrays for which the returned
934
+ indices can be used.
935
+
936
+ ndim : int, optional
937
+ The number of dimensions.
938
+
939
+ See Also
940
+ --------
941
+ diag_indices_from
942
+
943
+ Notes
944
+ -----
945
+ .. versionadded:: 1.4.0
946
+
947
+ Examples
948
+ --------
949
+ Create a set of indices to access the diagonal of a (4, 4) array:
950
+
951
+ >>> di = np.diag_indices(4)
952
+ >>> di
953
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
954
+ >>> a = np.arange(16).reshape(4, 4)
955
+ >>> a
956
+ array([[ 0, 1, 2, 3],
957
+ [ 4, 5, 6, 7],
958
+ [ 8, 9, 10, 11],
959
+ [12, 13, 14, 15]])
960
+ >>> a[di] = 100
961
+ >>> a
962
+ array([[100, 1, 2, 3],
963
+ [ 4, 100, 6, 7],
964
+ [ 8, 9, 100, 11],
965
+ [ 12, 13, 14, 100]])
966
+
967
+ Now, we create indices to manipulate a 3-D array:
968
+
969
+ >>> d3 = np.diag_indices(2, 3)
970
+ >>> d3
971
+ (array([0, 1]), array([0, 1]), array([0, 1]))
972
+
973
+ And use it to set the diagonal of an array of zeros to 1:
974
+
975
+ >>> a = np.zeros((2, 2, 2), dtype=int)
976
+ >>> a[d3] = 1
977
+ >>> a
978
+ array([[[1, 0],
979
+ [0, 0]],
980
+ [[0, 0],
981
+ [0, 1]]])
982
+
983
+ """
984
+ idx = np.arange(n)
985
+ return (idx,) * ndim
986
+
987
+
988
+ def _diag_indices_from(arr):
989
+ return (arr,)
990
+
991
+
992
+ @array_function_dispatch(_diag_indices_from)
993
+ def diag_indices_from(arr):
994
+ """
995
+ Return the indices to access the main diagonal of an n-dimensional array.
996
+
997
+ See `diag_indices` for full details.
998
+
999
+ Parameters
1000
+ ----------
1001
+ arr : array, at least 2-D
1002
+
1003
+ See Also
1004
+ --------
1005
+ diag_indices
1006
+
1007
+ Notes
1008
+ -----
1009
+ .. versionadded:: 1.4.0
1010
+
1011
+ Examples
1012
+ --------
1013
+
1014
+ Create a 4 by 4 array.
1015
+
1016
+ >>> a = np.arange(16).reshape(4, 4)
1017
+ >>> a
1018
+ array([[ 0, 1, 2, 3],
1019
+ [ 4, 5, 6, 7],
1020
+ [ 8, 9, 10, 11],
1021
+ [12, 13, 14, 15]])
1022
+
1023
+ Get the indices of the diagonal elements.
1024
+
1025
+ >>> di = np.diag_indices_from(a)
1026
+ >>> di
1027
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
1028
+
1029
+ >>> a[di]
1030
+ array([ 0, 5, 10, 15])
1031
+
1032
+ This is simply syntactic sugar for diag_indices.
1033
+
1034
+ >>> np.diag_indices(a.shape[0])
1035
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
1036
+
1037
+ """
1038
+
1039
+ if not arr.ndim >= 2:
1040
+ raise ValueError("input array must be at least 2-d")
1041
+ # For more than d=2, the strided formula is only valid for arrays with
1042
+ # all dimensions equal, so we check first.
1043
+ if not np.all(diff(arr.shape) == 0):
1044
+ raise ValueError("All dimensions of input must be of equal length")
1045
+
1046
+ return diag_indices(arr.shape[0], arr.ndim)
.venv/lib/python3.11/site-packages/numpy/lib/index_tricks.pyi ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ from typing import (
3
+ Any,
4
+ TypeVar,
5
+ Generic,
6
+ overload,
7
+ Literal,
8
+ SupportsIndex,
9
+ )
10
+
11
+ from numpy import (
12
+ # Circumvent a naming conflict with `AxisConcatenator.matrix`
13
+ matrix as _Matrix,
14
+ ndenumerate as ndenumerate,
15
+ ndindex as ndindex,
16
+ ndarray,
17
+ dtype,
18
+ integer,
19
+ str_,
20
+ bytes_,
21
+ bool_,
22
+ int_,
23
+ float_,
24
+ complex_,
25
+ intp,
26
+ _OrderCF,
27
+ _ModeKind,
28
+ )
29
+ from numpy._typing import (
30
+ # Arrays
31
+ ArrayLike,
32
+ _NestedSequence,
33
+ _FiniteNestedSequence,
34
+ NDArray,
35
+ _ArrayLikeInt,
36
+
37
+ # DTypes
38
+ DTypeLike,
39
+ _SupportsDType,
40
+
41
+ # Shapes
42
+ _ShapeLike,
43
+ )
44
+
45
+ from numpy.core.multiarray import (
46
+ unravel_index as unravel_index,
47
+ ravel_multi_index as ravel_multi_index,
48
+ )
49
+
50
+ _T = TypeVar("_T")
51
+ _DType = TypeVar("_DType", bound=dtype[Any])
52
+ _BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
53
+ _TupType = TypeVar("_TupType", bound=tuple[Any, ...])
54
+ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
55
+
56
+ __all__: list[str]
57
+
58
+ @overload
59
+ def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ...
60
+ @overload
61
+ def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ...
62
+ @overload
63
+ def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ...
64
+ @overload
65
+ def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]: ...
66
+ @overload
67
+ def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ...
68
+ @overload
69
+ def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]: ...
70
+ @overload
71
+ def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]: ...
72
+
73
+ class nd_grid(Generic[_BoolType]):
74
+ sparse: _BoolType
75
+ def __init__(self, sparse: _BoolType = ...) -> None: ...
76
+ @overload
77
+ def __getitem__(
78
+ self: nd_grid[Literal[False]],
79
+ key: slice | Sequence[slice],
80
+ ) -> NDArray[Any]: ...
81
+ @overload
82
+ def __getitem__(
83
+ self: nd_grid[Literal[True]],
84
+ key: slice | Sequence[slice],
85
+ ) -> list[NDArray[Any]]: ...
86
+
87
+ class MGridClass(nd_grid[Literal[False]]):
88
+ def __init__(self) -> None: ...
89
+
90
+ mgrid: MGridClass
91
+
92
+ class OGridClass(nd_grid[Literal[True]]):
93
+ def __init__(self) -> None: ...
94
+
95
+ ogrid: OGridClass
96
+
97
+ class AxisConcatenator:
98
+ axis: int
99
+ matrix: bool
100
+ ndmin: int
101
+ trans1d: int
102
+ def __init__(
103
+ self,
104
+ axis: int = ...,
105
+ matrix: bool = ...,
106
+ ndmin: int = ...,
107
+ trans1d: int = ...,
108
+ ) -> None: ...
109
+ @staticmethod
110
+ @overload
111
+ def concatenate( # type: ignore[misc]
112
+ *a: ArrayLike, axis: SupportsIndex = ..., out: None = ...
113
+ ) -> NDArray[Any]: ...
114
+ @staticmethod
115
+ @overload
116
+ def concatenate(
117
+ *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ...
118
+ ) -> _ArrayType: ...
119
+ @staticmethod
120
+ def makemat(
121
+ data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
122
+ ) -> _Matrix[Any, Any]: ...
123
+
124
+ # TODO: Sort out this `__getitem__` method
125
+ def __getitem__(self, key: Any) -> Any: ...
126
+
127
+ class RClass(AxisConcatenator):
128
+ axis: Literal[0]
129
+ matrix: Literal[False]
130
+ ndmin: Literal[1]
131
+ trans1d: Literal[-1]
132
+ def __init__(self) -> None: ...
133
+
134
+ r_: RClass
135
+
136
+ class CClass(AxisConcatenator):
137
+ axis: Literal[-1]
138
+ matrix: Literal[False]
139
+ ndmin: Literal[2]
140
+ trans1d: Literal[0]
141
+ def __init__(self) -> None: ...
142
+
143
+ c_: CClass
144
+
145
+ class IndexExpression(Generic[_BoolType]):
146
+ maketuple: _BoolType
147
+ def __init__(self, maketuple: _BoolType) -> None: ...
148
+ @overload
149
+ def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc]
150
+ @overload
151
+ def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ...
152
+ @overload
153
+ def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ...
154
+
155
+ index_exp: IndexExpression[Literal[True]]
156
+ s_: IndexExpression[Literal[False]]
157
+
158
+ def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ...
159
+ def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ...
160
+ def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ...
161
+
162
+ # NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex`
.venv/lib/python3.11/site-packages/numpy/lib/mixins.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Mixin classes for custom array types that don't inherit from ndarray."""
2
+ from numpy.core import umath as um
3
+
4
+
5
+ __all__ = ['NDArrayOperatorsMixin']
6
+
7
+
8
+ def _disables_array_ufunc(obj):
9
+ """True when __array_ufunc__ is set to None."""
10
+ try:
11
+ return obj.__array_ufunc__ is None
12
+ except AttributeError:
13
+ return False
14
+
15
+
16
+ def _binary_method(ufunc, name):
17
+ """Implement a forward binary method with a ufunc, e.g., __add__."""
18
+ def func(self, other):
19
+ if _disables_array_ufunc(other):
20
+ return NotImplemented
21
+ return ufunc(self, other)
22
+ func.__name__ = '__{}__'.format(name)
23
+ return func
24
+
25
+
26
+ def _reflected_binary_method(ufunc, name):
27
+ """Implement a reflected binary method with a ufunc, e.g., __radd__."""
28
+ def func(self, other):
29
+ if _disables_array_ufunc(other):
30
+ return NotImplemented
31
+ return ufunc(other, self)
32
+ func.__name__ = '__r{}__'.format(name)
33
+ return func
34
+
35
+
36
+ def _inplace_binary_method(ufunc, name):
37
+ """Implement an in-place binary method with a ufunc, e.g., __iadd__."""
38
+ def func(self, other):
39
+ return ufunc(self, other, out=(self,))
40
+ func.__name__ = '__i{}__'.format(name)
41
+ return func
42
+
43
+
44
+ def _numeric_methods(ufunc, name):
45
+ """Implement forward, reflected and inplace binary methods with a ufunc."""
46
+ return (_binary_method(ufunc, name),
47
+ _reflected_binary_method(ufunc, name),
48
+ _inplace_binary_method(ufunc, name))
49
+
50
+
51
+ def _unary_method(ufunc, name):
52
+ """Implement a unary special method with a ufunc."""
53
+ def func(self):
54
+ return ufunc(self)
55
+ func.__name__ = '__{}__'.format(name)
56
+ return func
57
+
58
+
59
+ class NDArrayOperatorsMixin:
60
+ """Mixin defining all operator special methods using __array_ufunc__.
61
+
62
+ This class implements the special methods for almost all of Python's
63
+ builtin operators defined in the `operator` module, including comparisons
64
+ (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
65
+ deferring to the ``__array_ufunc__`` method, which subclasses must
66
+ implement.
67
+
68
+ It is useful for writing classes that do not inherit from `numpy.ndarray`,
69
+ but that should support arithmetic and numpy universal functions like
70
+ arrays as described in `A Mechanism for Overriding Ufuncs
71
+ <https://numpy.org/neps/nep-0013-ufunc-overrides.html>`_.
72
+
73
+ As an trivial example, consider this implementation of an ``ArrayLike``
74
+ class that simply wraps a NumPy array and ensures that the result of any
75
+ arithmetic operation is also an ``ArrayLike`` object::
76
+
77
+ class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
78
+ def __init__(self, value):
79
+ self.value = np.asarray(value)
80
+
81
+ # One might also consider adding the built-in list type to this
82
+ # list, to support operations like np.add(array_like, list)
83
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
84
+
85
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
86
+ out = kwargs.get('out', ())
87
+ for x in inputs + out:
88
+ # Only support operations with instances of _HANDLED_TYPES.
89
+ # Use ArrayLike instead of type(self) for isinstance to
90
+ # allow subclasses that don't override __array_ufunc__ to
91
+ # handle ArrayLike objects.
92
+ if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
93
+ return NotImplemented
94
+
95
+ # Defer to the implementation of the ufunc on unwrapped values.
96
+ inputs = tuple(x.value if isinstance(x, ArrayLike) else x
97
+ for x in inputs)
98
+ if out:
99
+ kwargs['out'] = tuple(
100
+ x.value if isinstance(x, ArrayLike) else x
101
+ for x in out)
102
+ result = getattr(ufunc, method)(*inputs, **kwargs)
103
+
104
+ if type(result) is tuple:
105
+ # multiple return values
106
+ return tuple(type(self)(x) for x in result)
107
+ elif method == 'at':
108
+ # no return value
109
+ return None
110
+ else:
111
+ # one return value
112
+ return type(self)(result)
113
+
114
+ def __repr__(self):
115
+ return '%s(%r)' % (type(self).__name__, self.value)
116
+
117
+ In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
118
+ the result is always another ``ArrayLike``:
119
+
120
+ >>> x = ArrayLike([1, 2, 3])
121
+ >>> x - 1
122
+ ArrayLike(array([0, 1, 2]))
123
+ >>> 1 - x
124
+ ArrayLike(array([ 0, -1, -2]))
125
+ >>> np.arange(3) - x
126
+ ArrayLike(array([-1, -1, -1]))
127
+ >>> x - np.arange(3)
128
+ ArrayLike(array([1, 1, 1]))
129
+
130
+ Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
131
+ with arbitrary, unrecognized types. This ensures that interactions with
132
+ ArrayLike preserve a well-defined casting hierarchy.
133
+
134
+ .. versionadded:: 1.13
135
+ """
136
+ __slots__ = ()
137
+ # Like np.ndarray, this mixin class implements "Option 1" from the ufunc
138
+ # overrides NEP.
139
+
140
+ # comparisons don't have reflected and in-place versions
141
+ __lt__ = _binary_method(um.less, 'lt')
142
+ __le__ = _binary_method(um.less_equal, 'le')
143
+ __eq__ = _binary_method(um.equal, 'eq')
144
+ __ne__ = _binary_method(um.not_equal, 'ne')
145
+ __gt__ = _binary_method(um.greater, 'gt')
146
+ __ge__ = _binary_method(um.greater_equal, 'ge')
147
+
148
+ # numeric methods
149
+ __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
150
+ __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
151
+ __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
152
+ __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
153
+ um.matmul, 'matmul')
154
+ # Python 3 does not use __div__, __rdiv__, or __idiv__
155
+ __truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
156
+ um.true_divide, 'truediv')
157
+ __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
158
+ um.floor_divide, 'floordiv')
159
+ __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
160
+ __divmod__ = _binary_method(um.divmod, 'divmod')
161
+ __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
162
+ # __idivmod__ does not exist
163
+ # TODO: handle the optional third argument for __pow__?
164
+ __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
165
+ __lshift__, __rlshift__, __ilshift__ = _numeric_methods(
166
+ um.left_shift, 'lshift')
167
+ __rshift__, __rrshift__, __irshift__ = _numeric_methods(
168
+ um.right_shift, 'rshift')
169
+ __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
170
+ __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
171
+ __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
172
+
173
+ # unary methods
174
+ __neg__ = _unary_method(um.negative, 'neg')
175
+ __pos__ = _unary_method(um.positive, 'pos')
176
+ __abs__ = _unary_method(um.absolute, 'abs')
177
+ __invert__ = _unary_method(um.invert, 'invert')
.venv/lib/python3.11/site-packages/numpy/lib/mixins.pyi ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABCMeta, abstractmethod
2
+ from typing import Literal as L, Any
3
+
4
+ from numpy import ufunc
5
+
6
+ __all__: list[str]
7
+
8
+ # NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
9
+ # even though it's reliant on subclasses implementing `__array_ufunc__`
10
+
11
+ # NOTE: The accepted input- and output-types of the various dunders are
12
+ # completely dependent on how `__array_ufunc__` is implemented.
13
+ # As such, only little type safety can be provided here.
14
+
15
+ class NDArrayOperatorsMixin(metaclass=ABCMeta):
16
+ @abstractmethod
17
+ def __array_ufunc__(
18
+ self,
19
+ ufunc: ufunc,
20
+ method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
21
+ *inputs: Any,
22
+ **kwargs: Any,
23
+ ) -> Any: ...
24
+ def __lt__(self, other: Any) -> Any: ...
25
+ def __le__(self, other: Any) -> Any: ...
26
+ def __eq__(self, other: Any) -> Any: ...
27
+ def __ne__(self, other: Any) -> Any: ...
28
+ def __gt__(self, other: Any) -> Any: ...
29
+ def __ge__(self, other: Any) -> Any: ...
30
+ def __add__(self, other: Any) -> Any: ...
31
+ def __radd__(self, other: Any) -> Any: ...
32
+ def __iadd__(self, other: Any) -> Any: ...
33
+ def __sub__(self, other: Any) -> Any: ...
34
+ def __rsub__(self, other: Any) -> Any: ...
35
+ def __isub__(self, other: Any) -> Any: ...
36
+ def __mul__(self, other: Any) -> Any: ...
37
+ def __rmul__(self, other: Any) -> Any: ...
38
+ def __imul__(self, other: Any) -> Any: ...
39
+ def __matmul__(self, other: Any) -> Any: ...
40
+ def __rmatmul__(self, other: Any) -> Any: ...
41
+ def __imatmul__(self, other: Any) -> Any: ...
42
+ def __truediv__(self, other: Any) -> Any: ...
43
+ def __rtruediv__(self, other: Any) -> Any: ...
44
+ def __itruediv__(self, other: Any) -> Any: ...
45
+ def __floordiv__(self, other: Any) -> Any: ...
46
+ def __rfloordiv__(self, other: Any) -> Any: ...
47
+ def __ifloordiv__(self, other: Any) -> Any: ...
48
+ def __mod__(self, other: Any) -> Any: ...
49
+ def __rmod__(self, other: Any) -> Any: ...
50
+ def __imod__(self, other: Any) -> Any: ...
51
+ def __divmod__(self, other: Any) -> Any: ...
52
+ def __rdivmod__(self, other: Any) -> Any: ...
53
+ def __pow__(self, other: Any) -> Any: ...
54
+ def __rpow__(self, other: Any) -> Any: ...
55
+ def __ipow__(self, other: Any) -> Any: ...
56
+ def __lshift__(self, other: Any) -> Any: ...
57
+ def __rlshift__(self, other: Any) -> Any: ...
58
+ def __ilshift__(self, other: Any) -> Any: ...
59
+ def __rshift__(self, other: Any) -> Any: ...
60
+ def __rrshift__(self, other: Any) -> Any: ...
61
+ def __irshift__(self, other: Any) -> Any: ...
62
+ def __and__(self, other: Any) -> Any: ...
63
+ def __rand__(self, other: Any) -> Any: ...
64
+ def __iand__(self, other: Any) -> Any: ...
65
+ def __xor__(self, other: Any) -> Any: ...
66
+ def __rxor__(self, other: Any) -> Any: ...
67
+ def __ixor__(self, other: Any) -> Any: ...
68
+ def __or__(self, other: Any) -> Any: ...
69
+ def __ror__(self, other: Any) -> Any: ...
70
+ def __ior__(self, other: Any) -> Any: ...
71
+ def __neg__(self) -> Any: ...
72
+ def __pos__(self) -> Any: ...
73
+ def __abs__(self) -> Any: ...
74
+ def __invert__(self) -> Any: ...
.venv/lib/python3.11/site-packages/numpy/lib/npyio.py ADDED
@@ -0,0 +1,2547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import functools
4
+ import itertools
5
+ import warnings
6
+ import weakref
7
+ import contextlib
8
+ import operator
9
+ from operator import itemgetter, index as opindex, methodcaller
10
+ from collections.abc import Mapping
11
+
12
+ import numpy as np
13
+ from . import format
14
+ from ._datasource import DataSource
15
+ from numpy.core import overrides
16
+ from numpy.core.multiarray import packbits, unpackbits
17
+ from numpy.core._multiarray_umath import _load_from_filelike
18
+ from numpy.core.overrides import set_array_function_like_doc, set_module
19
+ from ._iotools import (
20
+ LineSplitter, NameValidator, StringConverter, ConverterError,
21
+ ConverterLockError, ConversionWarning, _is_string_like,
22
+ has_nested_fields, flatten_dtype, easy_dtype, _decode_line
23
+ )
24
+
25
+ from numpy.compat import (
26
+ asbytes, asstr, asunicode, os_fspath, os_PathLike,
27
+ pickle
28
+ )
29
+
30
+
31
+ __all__ = [
32
+ 'savetxt', 'loadtxt', 'genfromtxt',
33
+ 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
34
+ 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
35
+ ]
36
+
37
+
38
+ array_function_dispatch = functools.partial(
39
+ overrides.array_function_dispatch, module='numpy')
40
+
41
+
42
+ class BagObj:
43
+ """
44
+ BagObj(obj)
45
+
46
+ Convert attribute look-ups to getitems on the object passed in.
47
+
48
+ Parameters
49
+ ----------
50
+ obj : class instance
51
+ Object on which attribute look-up is performed.
52
+
53
+ Examples
54
+ --------
55
+ >>> from numpy.lib.npyio import BagObj as BO
56
+ >>> class BagDemo:
57
+ ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
58
+ ... # will call this method when any
59
+ ... # attribute look-up is required
60
+ ... result = "Doesn't matter what you want, "
61
+ ... return result + "you're gonna get this"
62
+ ...
63
+ >>> demo_obj = BagDemo()
64
+ >>> bagobj = BO(demo_obj)
65
+ >>> bagobj.hello_there
66
+ "Doesn't matter what you want, you're gonna get this"
67
+ >>> bagobj.I_can_be_anything
68
+ "Doesn't matter what you want, you're gonna get this"
69
+
70
+ """
71
+
72
+ def __init__(self, obj):
73
+ # Use weakref to make NpzFile objects collectable by refcount
74
+ self._obj = weakref.proxy(obj)
75
+
76
+ def __getattribute__(self, key):
77
+ try:
78
+ return object.__getattribute__(self, '_obj')[key]
79
+ except KeyError:
80
+ raise AttributeError(key) from None
81
+
82
+ def __dir__(self):
83
+ """
84
+ Enables dir(bagobj) to list the files in an NpzFile.
85
+
86
+ This also enables tab-completion in an interpreter or IPython.
87
+ """
88
+ return list(object.__getattribute__(self, '_obj').keys())
89
+
90
+
91
+ def zipfile_factory(file, *args, **kwargs):
92
+ """
93
+ Create a ZipFile.
94
+
95
+ Allows for Zip64, and the `file` argument can accept file, str, or
96
+ pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
97
+ constructor.
98
+ """
99
+ if not hasattr(file, 'read'):
100
+ file = os_fspath(file)
101
+ import zipfile
102
+ kwargs['allowZip64'] = True
103
+ return zipfile.ZipFile(file, *args, **kwargs)
104
+
105
+
106
+ class NpzFile(Mapping):
107
+ """
108
+ NpzFile(fid)
109
+
110
+ A dictionary-like object with lazy-loading of files in the zipped
111
+ archive provided on construction.
112
+
113
+ `NpzFile` is used to load files in the NumPy ``.npz`` data archive
114
+ format. It assumes that files in the archive have a ``.npy`` extension,
115
+ other files are ignored.
116
+
117
+ The arrays and file strings are lazily loaded on either
118
+ getitem access using ``obj['key']`` or attribute lookup using
119
+ ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
120
+ be obtained with ``obj.files`` and the ZipFile object itself using
121
+ ``obj.zip``.
122
+
123
+ Attributes
124
+ ----------
125
+ files : list of str
126
+ List of all files in the archive with a ``.npy`` extension.
127
+ zip : ZipFile instance
128
+ The ZipFile object initialized with the zipped archive.
129
+ f : BagObj instance
130
+ An object on which attribute can be performed as an alternative
131
+ to getitem access on the `NpzFile` instance itself.
132
+ allow_pickle : bool, optional
133
+ Allow loading pickled data. Default: False
134
+
135
+ .. versionchanged:: 1.16.3
136
+ Made default False in response to CVE-2019-6446.
137
+
138
+ pickle_kwargs : dict, optional
139
+ Additional keyword arguments to pass on to pickle.load.
140
+ These are only useful when loading object arrays saved on
141
+ Python 2 when using Python 3.
142
+ max_header_size : int, optional
143
+ Maximum allowed size of the header. Large headers may not be safe
144
+ to load securely and thus require explicitly passing a larger value.
145
+ See :py:func:`ast.literal_eval()` for details.
146
+ This option is ignored when `allow_pickle` is passed. In that case
147
+ the file is by definition trusted and the limit is unnecessary.
148
+
149
+ Parameters
150
+ ----------
151
+ fid : file or str
152
+ The zipped archive to open. This is either a file-like object
153
+ or a string containing the path to the archive.
154
+ own_fid : bool, optional
155
+ Whether NpzFile should close the file handle.
156
+ Requires that `fid` is a file-like object.
157
+
158
+ Examples
159
+ --------
160
+ >>> from tempfile import TemporaryFile
161
+ >>> outfile = TemporaryFile()
162
+ >>> x = np.arange(10)
163
+ >>> y = np.sin(x)
164
+ >>> np.savez(outfile, x=x, y=y)
165
+ >>> _ = outfile.seek(0)
166
+
167
+ >>> npz = np.load(outfile)
168
+ >>> isinstance(npz, np.lib.npyio.NpzFile)
169
+ True
170
+ >>> npz
171
+ NpzFile 'object' with keys x, y
172
+ >>> sorted(npz.files)
173
+ ['x', 'y']
174
+ >>> npz['x'] # getitem access
175
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
176
+ >>> npz.f.x # attribute lookup
177
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
178
+
179
+ """
180
+ # Make __exit__ safe if zipfile_factory raises an exception
181
+ zip = None
182
+ fid = None
183
+ _MAX_REPR_ARRAY_COUNT = 5
184
+
185
+ def __init__(self, fid, own_fid=False, allow_pickle=False,
186
+ pickle_kwargs=None, *,
187
+ max_header_size=format._MAX_HEADER_SIZE):
188
+ # Import is postponed to here since zipfile depends on gzip, an
189
+ # optional component of the so-called standard library.
190
+ _zip = zipfile_factory(fid)
191
+ self._files = _zip.namelist()
192
+ self.files = []
193
+ self.allow_pickle = allow_pickle
194
+ self.max_header_size = max_header_size
195
+ self.pickle_kwargs = pickle_kwargs
196
+ for x in self._files:
197
+ if x.endswith('.npy'):
198
+ self.files.append(x[:-4])
199
+ else:
200
+ self.files.append(x)
201
+ self.zip = _zip
202
+ self.f = BagObj(self)
203
+ if own_fid:
204
+ self.fid = fid
205
+
206
+ def __enter__(self):
207
+ return self
208
+
209
+ def __exit__(self, exc_type, exc_value, traceback):
210
+ self.close()
211
+
212
+ def close(self):
213
+ """
214
+ Close the file.
215
+
216
+ """
217
+ if self.zip is not None:
218
+ self.zip.close()
219
+ self.zip = None
220
+ if self.fid is not None:
221
+ self.fid.close()
222
+ self.fid = None
223
+ self.f = None # break reference cycle
224
+
225
+ def __del__(self):
226
+ self.close()
227
+
228
+ # Implement the Mapping ABC
229
+ def __iter__(self):
230
+ return iter(self.files)
231
+
232
+ def __len__(self):
233
+ return len(self.files)
234
+
235
+ def __getitem__(self, key):
236
+ # FIXME: This seems like it will copy strings around
237
+ # more than is strictly necessary. The zipfile
238
+ # will read the string and then
239
+ # the format.read_array will copy the string
240
+ # to another place in memory.
241
+ # It would be better if the zipfile could read
242
+ # (or at least uncompress) the data
243
+ # directly into the array memory.
244
+ member = False
245
+ if key in self._files:
246
+ member = True
247
+ elif key in self.files:
248
+ member = True
249
+ key += '.npy'
250
+ if member:
251
+ bytes = self.zip.open(key)
252
+ magic = bytes.read(len(format.MAGIC_PREFIX))
253
+ bytes.close()
254
+ if magic == format.MAGIC_PREFIX:
255
+ bytes = self.zip.open(key)
256
+ return format.read_array(bytes,
257
+ allow_pickle=self.allow_pickle,
258
+ pickle_kwargs=self.pickle_kwargs,
259
+ max_header_size=self.max_header_size)
260
+ else:
261
+ return self.zip.read(key)
262
+ else:
263
+ raise KeyError(f"{key} is not a file in the archive")
264
+
265
+ def __contains__(self, key):
266
+ return (key in self._files or key in self.files)
267
+
268
+ def __repr__(self):
269
+ # Get filename or default to `object`
270
+ if isinstance(self.fid, str):
271
+ filename = self.fid
272
+ else:
273
+ filename = getattr(self.fid, "name", "object")
274
+
275
+ # Get the name of arrays
276
+ array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
277
+ if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
278
+ array_names += "..."
279
+ return f"NpzFile {filename!r} with keys: {array_names}"
280
+
281
+
282
+ @set_module('numpy')
283
+ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
284
+ encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE):
285
+ """
286
+ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
287
+
288
+ .. warning:: Loading files that contain object arrays uses the ``pickle``
289
+ module, which is not secure against erroneous or maliciously
290
+ constructed data. Consider passing ``allow_pickle=False`` to
291
+ load data that is known not to contain object arrays for the
292
+ safer handling of untrusted sources.
293
+
294
+ Parameters
295
+ ----------
296
+ file : file-like object, string, or pathlib.Path
297
+ The file to read. File-like objects must support the
298
+ ``seek()`` and ``read()`` methods and must always
299
+ be opened in binary mode. Pickled files require that the
300
+ file-like object support the ``readline()`` method as well.
301
+ mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
302
+ If not None, then memory-map the file, using the given mode (see
303
+ `numpy.memmap` for a detailed description of the modes). A
304
+ memory-mapped array is kept on disk. However, it can be accessed
305
+ and sliced like any ndarray. Memory mapping is especially useful
306
+ for accessing small fragments of large files without reading the
307
+ entire file into memory.
308
+ allow_pickle : bool, optional
309
+ Allow loading pickled object arrays stored in npy files. Reasons for
310
+ disallowing pickles include security, as loading pickled data can
311
+ execute arbitrary code. If pickles are disallowed, loading object
312
+ arrays will fail. Default: False
313
+
314
+ .. versionchanged:: 1.16.3
315
+ Made default False in response to CVE-2019-6446.
316
+
317
+ fix_imports : bool, optional
318
+ Only useful when loading Python 2 generated pickled files on Python 3,
319
+ which includes npy/npz files containing object arrays. If `fix_imports`
320
+ is True, pickle will try to map the old Python 2 names to the new names
321
+ used in Python 3.
322
+ encoding : str, optional
323
+ What encoding to use when reading Python 2 strings. Only useful when
324
+ loading Python 2 generated pickled files in Python 3, which includes
325
+ npy/npz files containing object arrays. Values other than 'latin1',
326
+ 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
327
+ data. Default: 'ASCII'
328
+ max_header_size : int, optional
329
+ Maximum allowed size of the header. Large headers may not be safe
330
+ to load securely and thus require explicitly passing a larger value.
331
+ See :py:func:`ast.literal_eval()` for details.
332
+ This option is ignored when `allow_pickle` is passed. In that case
333
+ the file is by definition trusted and the limit is unnecessary.
334
+
335
+ Returns
336
+ -------
337
+ result : array, tuple, dict, etc.
338
+ Data stored in the file. For ``.npz`` files, the returned instance
339
+ of NpzFile class must be closed to avoid leaking file descriptors.
340
+
341
+ Raises
342
+ ------
343
+ OSError
344
+ If the input file does not exist or cannot be read.
345
+ UnpicklingError
346
+ If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
347
+ ValueError
348
+ The file contains an object array, but ``allow_pickle=False`` given.
349
+ EOFError
350
+ When calling ``np.load`` multiple times on the same file handle,
351
+ if all data has already been read
352
+
353
+ See Also
354
+ --------
355
+ save, savez, savez_compressed, loadtxt
356
+ memmap : Create a memory-map to an array stored in a file on disk.
357
+ lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
358
+
359
+ Notes
360
+ -----
361
+ - If the file contains pickle data, then whatever object is stored
362
+ in the pickle is returned.
363
+ - If the file is a ``.npy`` file, then a single array is returned.
364
+ - If the file is a ``.npz`` file, then a dictionary-like object is
365
+ returned, containing ``{filename: array}`` key-value pairs, one for
366
+ each file in the archive.
367
+ - If the file is a ``.npz`` file, the returned value supports the
368
+ context manager protocol in a similar fashion to the open function::
369
+
370
+ with load('foo.npz') as data:
371
+ a = data['a']
372
+
373
+ The underlying file descriptor is closed when exiting the 'with'
374
+ block.
375
+
376
+ Examples
377
+ --------
378
+ Store data to disk, and load it again:
379
+
380
+ >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
381
+ >>> np.load('/tmp/123.npy')
382
+ array([[1, 2, 3],
383
+ [4, 5, 6]])
384
+
385
+ Store compressed data to disk, and load it again:
386
+
387
+ >>> a=np.array([[1, 2, 3], [4, 5, 6]])
388
+ >>> b=np.array([1, 2])
389
+ >>> np.savez('/tmp/123.npz', a=a, b=b)
390
+ >>> data = np.load('/tmp/123.npz')
391
+ >>> data['a']
392
+ array([[1, 2, 3],
393
+ [4, 5, 6]])
394
+ >>> data['b']
395
+ array([1, 2])
396
+ >>> data.close()
397
+
398
+ Mem-map the stored array, and then access the second row
399
+ directly from disk:
400
+
401
+ >>> X = np.load('/tmp/123.npy', mmap_mode='r')
402
+ >>> X[1, :]
403
+ memmap([4, 5, 6])
404
+
405
+ """
406
+ if encoding not in ('ASCII', 'latin1', 'bytes'):
407
+ # The 'encoding' value for pickle also affects what encoding
408
+ # the serialized binary data of NumPy arrays is loaded
409
+ # in. Pickle does not pass on the encoding information to
410
+ # NumPy. The unpickling code in numpy.core.multiarray is
411
+ # written to assume that unicode data appearing where binary
412
+ # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
413
+ #
414
+ # Other encoding values can corrupt binary data, and we
415
+ # purposefully disallow them. For the same reason, the errors=
416
+ # argument is not exposed, as values other than 'strict'
417
+ # result can similarly silently corrupt numerical data.
418
+ raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
419
+
420
+ pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
421
+
422
+ with contextlib.ExitStack() as stack:
423
+ if hasattr(file, 'read'):
424
+ fid = file
425
+ own_fid = False
426
+ else:
427
+ fid = stack.enter_context(open(os_fspath(file), "rb"))
428
+ own_fid = True
429
+
430
+ # Code to distinguish from NumPy binary files and pickles.
431
+ _ZIP_PREFIX = b'PK\x03\x04'
432
+ _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
433
+ N = len(format.MAGIC_PREFIX)
434
+ magic = fid.read(N)
435
+ if not magic:
436
+ raise EOFError("No data left in file")
437
+ # If the file size is less than N, we need to make sure not
438
+ # to seek past the beginning of the file
439
+ fid.seek(-min(N, len(magic)), 1) # back-up
440
+ if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
441
+ # zip-file (assume .npz)
442
+ # Potentially transfer file ownership to NpzFile
443
+ stack.pop_all()
444
+ ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
445
+ pickle_kwargs=pickle_kwargs,
446
+ max_header_size=max_header_size)
447
+ return ret
448
+ elif magic == format.MAGIC_PREFIX:
449
+ # .npy file
450
+ if mmap_mode:
451
+ if allow_pickle:
452
+ max_header_size = 2**64
453
+ return format.open_memmap(file, mode=mmap_mode,
454
+ max_header_size=max_header_size)
455
+ else:
456
+ return format.read_array(fid, allow_pickle=allow_pickle,
457
+ pickle_kwargs=pickle_kwargs,
458
+ max_header_size=max_header_size)
459
+ else:
460
+ # Try a pickle
461
+ if not allow_pickle:
462
+ raise ValueError("Cannot load file containing pickled data "
463
+ "when allow_pickle=False")
464
+ try:
465
+ return pickle.load(fid, **pickle_kwargs)
466
+ except Exception as e:
467
+ raise pickle.UnpicklingError(
468
+ f"Failed to interpret file {file!r} as a pickle") from e
469
+
470
+
471
+ def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
472
+ return (arr,)
473
+
474
+
475
+ @array_function_dispatch(_save_dispatcher)
476
+ def save(file, arr, allow_pickle=True, fix_imports=True):
477
+ """
478
+ Save an array to a binary file in NumPy ``.npy`` format.
479
+
480
+ Parameters
481
+ ----------
482
+ file : file, str, or pathlib.Path
483
+ File or filename to which the data is saved. If file is a file-object,
484
+ then the filename is unchanged. If file is a string or Path, a ``.npy``
485
+ extension will be appended to the filename if it does not already
486
+ have one.
487
+ arr : array_like
488
+ Array data to be saved.
489
+ allow_pickle : bool, optional
490
+ Allow saving object arrays using Python pickles. Reasons for disallowing
491
+ pickles include security (loading pickled data can execute arbitrary
492
+ code) and portability (pickled objects may not be loadable on different
493
+ Python installations, for example if the stored objects require libraries
494
+ that are not available, and not all pickled data is compatible between
495
+ Python 2 and Python 3).
496
+ Default: True
497
+ fix_imports : bool, optional
498
+ Only useful in forcing objects in object arrays on Python 3 to be
499
+ pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
500
+ will try to map the new Python 3 names to the old module names used in
501
+ Python 2, so that the pickle data stream is readable with Python 2.
502
+
503
+ See Also
504
+ --------
505
+ savez : Save several arrays into a ``.npz`` archive
506
+ savetxt, load
507
+
508
+ Notes
509
+ -----
510
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
511
+
512
+ Any data saved to the file is appended to the end of the file.
513
+
514
+ Examples
515
+ --------
516
+ >>> from tempfile import TemporaryFile
517
+ >>> outfile = TemporaryFile()
518
+
519
+ >>> x = np.arange(10)
520
+ >>> np.save(outfile, x)
521
+
522
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
523
+ >>> np.load(outfile)
524
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
525
+
526
+
527
+ >>> with open('test.npy', 'wb') as f:
528
+ ... np.save(f, np.array([1, 2]))
529
+ ... np.save(f, np.array([1, 3]))
530
+ >>> with open('test.npy', 'rb') as f:
531
+ ... a = np.load(f)
532
+ ... b = np.load(f)
533
+ >>> print(a, b)
534
+ # [1 2] [1 3]
535
+ """
536
+ if hasattr(file, 'write'):
537
+ file_ctx = contextlib.nullcontext(file)
538
+ else:
539
+ file = os_fspath(file)
540
+ if not file.endswith('.npy'):
541
+ file = file + '.npy'
542
+ file_ctx = open(file, "wb")
543
+
544
+ with file_ctx as fid:
545
+ arr = np.asanyarray(arr)
546
+ format.write_array(fid, arr, allow_pickle=allow_pickle,
547
+ pickle_kwargs=dict(fix_imports=fix_imports))
548
+
549
+
550
+ def _savez_dispatcher(file, *args, **kwds):
551
+ yield from args
552
+ yield from kwds.values()
553
+
554
+
555
+ @array_function_dispatch(_savez_dispatcher)
556
+ def savez(file, *args, **kwds):
557
+ """Save several arrays into a single file in uncompressed ``.npz`` format.
558
+
559
+ Provide arrays as keyword arguments to store them under the
560
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
561
+
562
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
563
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
564
+
565
+ Parameters
566
+ ----------
567
+ file : str or file
568
+ Either the filename (string) or an open file (file-like object)
569
+ where the data will be saved. If file is a string or a Path, the
570
+ ``.npz`` extension will be appended to the filename if it is not
571
+ already there.
572
+ args : Arguments, optional
573
+ Arrays to save to the file. Please use keyword arguments (see
574
+ `kwds` below) to assign names to arrays. Arrays specified as
575
+ args will be named "arr_0", "arr_1", and so on.
576
+ kwds : Keyword arguments, optional
577
+ Arrays to save to the file. Each array will be saved to the
578
+ output file with its corresponding keyword name.
579
+
580
+ Returns
581
+ -------
582
+ None
583
+
584
+ See Also
585
+ --------
586
+ save : Save a single array to a binary file in NumPy format.
587
+ savetxt : Save an array to a file as plain text.
588
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
589
+
590
+ Notes
591
+ -----
592
+ The ``.npz`` file format is a zipped archive of files named after the
593
+ variables they contain. The archive is not compressed and each file
594
+ in the archive contains one variable in ``.npy`` format. For a
595
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
596
+
597
+ When opening the saved ``.npz`` file with `load` a `NpzFile` object is
598
+ returned. This is a dictionary-like object which can be queried for
599
+ its list of arrays (with the ``.files`` attribute), and for the arrays
600
+ themselves.
601
+
602
+ Keys passed in `kwds` are used as filenames inside the ZIP archive.
603
+ Therefore, keys should be valid filenames; e.g., avoid keys that begin with
604
+ ``/`` or contain ``.``.
605
+
606
+ When naming variables with keyword arguments, it is not possible to name a
607
+ variable ``file``, as this would cause the ``file`` argument to be defined
608
+ twice in the call to ``savez``.
609
+
610
+ Examples
611
+ --------
612
+ >>> from tempfile import TemporaryFile
613
+ >>> outfile = TemporaryFile()
614
+ >>> x = np.arange(10)
615
+ >>> y = np.sin(x)
616
+
617
+ Using `savez` with \\*args, the arrays are saved with default names.
618
+
619
+ >>> np.savez(outfile, x, y)
620
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
621
+ >>> npzfile = np.load(outfile)
622
+ >>> npzfile.files
623
+ ['arr_0', 'arr_1']
624
+ >>> npzfile['arr_0']
625
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
626
+
627
+ Using `savez` with \\**kwds, the arrays are saved with the keyword names.
628
+
629
+ >>> outfile = TemporaryFile()
630
+ >>> np.savez(outfile, x=x, y=y)
631
+ >>> _ = outfile.seek(0)
632
+ >>> npzfile = np.load(outfile)
633
+ >>> sorted(npzfile.files)
634
+ ['x', 'y']
635
+ >>> npzfile['x']
636
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
637
+
638
+ """
639
+ _savez(file, args, kwds, False)
640
+
641
+
642
+ def _savez_compressed_dispatcher(file, *args, **kwds):
643
+ yield from args
644
+ yield from kwds.values()
645
+
646
+
647
+ @array_function_dispatch(_savez_compressed_dispatcher)
648
+ def savez_compressed(file, *args, **kwds):
649
+ """
650
+ Save several arrays into a single file in compressed ``.npz`` format.
651
+
652
+ Provide arrays as keyword arguments to store them under the
653
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
654
+
655
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
656
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
657
+
658
+ Parameters
659
+ ----------
660
+ file : str or file
661
+ Either the filename (string) or an open file (file-like object)
662
+ where the data will be saved. If file is a string or a Path, the
663
+ ``.npz`` extension will be appended to the filename if it is not
664
+ already there.
665
+ args : Arguments, optional
666
+ Arrays to save to the file. Please use keyword arguments (see
667
+ `kwds` below) to assign names to arrays. Arrays specified as
668
+ args will be named "arr_0", "arr_1", and so on.
669
+ kwds : Keyword arguments, optional
670
+ Arrays to save to the file. Each array will be saved to the
671
+ output file with its corresponding keyword name.
672
+
673
+ Returns
674
+ -------
675
+ None
676
+
677
+ See Also
678
+ --------
679
+ numpy.save : Save a single array to a binary file in NumPy format.
680
+ numpy.savetxt : Save an array to a file as plain text.
681
+ numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
682
+ numpy.load : Load the files created by savez_compressed.
683
+
684
+ Notes
685
+ -----
686
+ The ``.npz`` file format is a zipped archive of files named after the
687
+ variables they contain. The archive is compressed with
688
+ ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
689
+ in ``.npy`` format. For a description of the ``.npy`` format, see
690
+ :py:mod:`numpy.lib.format`.
691
+
692
+
693
+ When opening the saved ``.npz`` file with `load` a `NpzFile` object is
694
+ returned. This is a dictionary-like object which can be queried for
695
+ its list of arrays (with the ``.files`` attribute), and for the arrays
696
+ themselves.
697
+
698
+ Examples
699
+ --------
700
+ >>> test_array = np.random.rand(3, 2)
701
+ >>> test_vector = np.random.rand(4)
702
+ >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
703
+ >>> loaded = np.load('/tmp/123.npz')
704
+ >>> print(np.array_equal(test_array, loaded['a']))
705
+ True
706
+ >>> print(np.array_equal(test_vector, loaded['b']))
707
+ True
708
+
709
+ """
710
+ _savez(file, args, kwds, True)
711
+
712
+
713
+ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
714
+ # Import is postponed to here since zipfile depends on gzip, an optional
715
+ # component of the so-called standard library.
716
+ import zipfile
717
+
718
+ if not hasattr(file, 'write'):
719
+ file = os_fspath(file)
720
+ if not file.endswith('.npz'):
721
+ file = file + '.npz'
722
+
723
+ namedict = kwds
724
+ for i, val in enumerate(args):
725
+ key = 'arr_%d' % i
726
+ if key in namedict.keys():
727
+ raise ValueError(
728
+ "Cannot use un-named variables and keyword %s" % key)
729
+ namedict[key] = val
730
+
731
+ if compress:
732
+ compression = zipfile.ZIP_DEFLATED
733
+ else:
734
+ compression = zipfile.ZIP_STORED
735
+
736
+ zipf = zipfile_factory(file, mode="w", compression=compression)
737
+
738
+ for key, val in namedict.items():
739
+ fname = key + '.npy'
740
+ val = np.asanyarray(val)
741
+ # always force zip64, gh-10776
742
+ with zipf.open(fname, 'w', force_zip64=True) as fid:
743
+ format.write_array(fid, val,
744
+ allow_pickle=allow_pickle,
745
+ pickle_kwargs=pickle_kwargs)
746
+
747
+ zipf.close()
748
+
749
+
750
+ def _ensure_ndmin_ndarray_check_param(ndmin):
751
+ """Just checks if the param ndmin is supported on
752
+ _ensure_ndmin_ndarray. It is intended to be used as
753
+ verification before running anything expensive.
754
+ e.g. loadtxt, genfromtxt
755
+ """
756
+ # Check correctness of the values of `ndmin`
757
+ if ndmin not in [0, 1, 2]:
758
+ raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
759
+
760
+ def _ensure_ndmin_ndarray(a, *, ndmin: int):
761
+ """This is a helper function of loadtxt and genfromtxt to ensure
762
+ proper minimum dimension as requested
763
+
764
+ ndim : int. Supported values 1, 2, 3
765
+ ^^ whenever this changes, keep in sync with
766
+ _ensure_ndmin_ndarray_check_param
767
+ """
768
+ # Verify that the array has at least dimensions `ndmin`.
769
+ # Tweak the size and shape of the arrays - remove extraneous dimensions
770
+ if a.ndim > ndmin:
771
+ a = np.squeeze(a)
772
+ # and ensure we have the minimum number of dimensions asked for
773
+ # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
774
+ if a.ndim < ndmin:
775
+ if ndmin == 1:
776
+ a = np.atleast_1d(a)
777
+ elif ndmin == 2:
778
+ a = np.atleast_2d(a).T
779
+
780
+ return a
781
+
782
+
783
+ # amount of lines loadtxt reads in one chunk, can be overridden for testing
784
+ _loadtxt_chunksize = 50000
785
+
786
+
787
+ def _check_nonneg_int(value, name="argument"):
788
+ try:
789
+ operator.index(value)
790
+ except TypeError:
791
+ raise TypeError(f"{name} must be an integer") from None
792
+ if value < 0:
793
+ raise ValueError(f"{name} must be nonnegative")
794
+
795
+
796
+ def _preprocess_comments(iterable, comments, encoding):
797
+ """
798
+ Generator that consumes a line iterated iterable and strips out the
799
+ multiple (or multi-character) comments from lines.
800
+ This is a pre-processing step to achieve feature parity with loadtxt
801
+ (we assume that this feature is a nieche feature).
802
+ """
803
+ for line in iterable:
804
+ if isinstance(line, bytes):
805
+ # Need to handle conversion here, or the splitting would fail
806
+ line = line.decode(encoding)
807
+
808
+ for c in comments:
809
+ line = line.split(c, 1)[0]
810
+
811
+ yield line
812
+
813
+
814
+ # The number of rows we read in one go if confronted with a parametric dtype
815
+ _loadtxt_chunksize = 50000
816
+
817
+
818
+ def _read(fname, *, delimiter=',', comment='#', quote='"',
819
+ imaginary_unit='j', usecols=None, skiplines=0,
820
+ max_rows=None, converters=None, ndmin=None, unpack=False,
821
+ dtype=np.float64, encoding="bytes"):
822
+ r"""
823
+ Read a NumPy array from a text file.
824
+
825
+ Parameters
826
+ ----------
827
+ fname : str or file object
828
+ The filename or the file to be read.
829
+ delimiter : str, optional
830
+ Field delimiter of the fields in line of the file.
831
+ Default is a comma, ','. If None any sequence of whitespace is
832
+ considered a delimiter.
833
+ comment : str or sequence of str or None, optional
834
+ Character that begins a comment. All text from the comment
835
+ character to the end of the line is ignored.
836
+ Multiple comments or multiple-character comment strings are supported,
837
+ but may be slower and `quote` must be empty if used.
838
+ Use None to disable all use of comments.
839
+ quote : str or None, optional
840
+ Character that is used to quote string fields. Default is '"'
841
+ (a double quote). Use None to disable quote support.
842
+ imaginary_unit : str, optional
843
+ Character that represent the imaginay unit `sqrt(-1)`.
844
+ Default is 'j'.
845
+ usecols : array_like, optional
846
+ A one-dimensional array of integer column numbers. These are the
847
+ columns from the file to be included in the array. If this value
848
+ is not given, all the columns are used.
849
+ skiplines : int, optional
850
+ Number of lines to skip before interpreting the data in the file.
851
+ max_rows : int, optional
852
+ Maximum number of rows of data to read. Default is to read the
853
+ entire file.
854
+ converters : dict or callable, optional
855
+ A function to parse all columns strings into the desired value, or
856
+ a dictionary mapping column number to a parser function.
857
+ E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
858
+ Converters can also be used to provide a default value for missing
859
+ data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
860
+ convert empty fields to 0.
861
+ Default: None
862
+ ndmin : int, optional
863
+ Minimum dimension of the array returned.
864
+ Allowed values are 0, 1 or 2. Default is 0.
865
+ unpack : bool, optional
866
+ If True, the returned array is transposed, so that arguments may be
867
+ unpacked using ``x, y, z = read(...)``. When used with a structured
868
+ data-type, arrays are returned for each field. Default is False.
869
+ dtype : numpy data type
870
+ A NumPy dtype instance, can be a structured dtype to map to the
871
+ columns of the file.
872
+ encoding : str, optional
873
+ Encoding used to decode the inputfile. The special value 'bytes'
874
+ (the default) enables backwards-compatible behavior for `converters`,
875
+ ensuring that inputs to the converter functions are encoded
876
+ bytes objects. The special value 'bytes' has no additional effect if
877
+ ``converters=None``. If encoding is ``'bytes'`` or ``None``, the
878
+ default system encoding is used.
879
+
880
+ Returns
881
+ -------
882
+ ndarray
883
+ NumPy array.
884
+
885
+ Examples
886
+ --------
887
+ First we create a file for the example.
888
+
889
+ >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n'
890
+ >>> with open('example1.csv', 'w') as f:
891
+ ... f.write(s1)
892
+ >>> a1 = read_from_filename('example1.csv')
893
+ >>> a1
894
+ array([[1., 2., 3.],
895
+ [4., 5., 6.]])
896
+
897
+ The second example has columns with different data types, so a
898
+ one-dimensional array with a structured data type is returned.
899
+ The tab character is used as the field delimiter.
900
+
901
+ >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n'
902
+ >>> with open('example2.tsv', 'w') as f:
903
+ ... f.write(s2)
904
+ >>> a2 = read_from_filename('example2.tsv', delimiter='\t')
905
+ >>> a2
906
+ array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')],
907
+ dtype=[('f0', '<f8'), ('f1', 'u1'), ('f2', 'S5')])
908
+ """
909
+ # Handle special 'bytes' keyword for encoding
910
+ byte_converters = False
911
+ if encoding == 'bytes':
912
+ encoding = None
913
+ byte_converters = True
914
+
915
+ if dtype is None:
916
+ raise TypeError("a dtype must be provided.")
917
+ dtype = np.dtype(dtype)
918
+
919
+ read_dtype_via_object_chunks = None
920
+ if dtype.kind in 'SUM' and (
921
+ dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'):
922
+ # This is a legacy "flexible" dtype. We do not truly support
923
+ # parametric dtypes currently (no dtype discovery step in the core),
924
+ # but have to support these for backward compatibility.
925
+ read_dtype_via_object_chunks = dtype
926
+ dtype = np.dtype(object)
927
+
928
+ if usecols is not None:
929
+ # Allow usecols to be a single int or a sequence of ints, the C-code
930
+ # handles the rest
931
+ try:
932
+ usecols = list(usecols)
933
+ except TypeError:
934
+ usecols = [usecols]
935
+
936
+ _ensure_ndmin_ndarray_check_param(ndmin)
937
+
938
+ if comment is None:
939
+ comments = None
940
+ else:
941
+ # assume comments are a sequence of strings
942
+ if "" in comment:
943
+ raise ValueError(
944
+ "comments cannot be an empty string. Use comments=None to "
945
+ "disable comments."
946
+ )
947
+ comments = tuple(comment)
948
+ comment = None
949
+ if len(comments) == 0:
950
+ comments = None # No comments at all
951
+ elif len(comments) == 1:
952
+ # If there is only one comment, and that comment has one character,
953
+ # the normal parsing can deal with it just fine.
954
+ if isinstance(comments[0], str) and len(comments[0]) == 1:
955
+ comment = comments[0]
956
+ comments = None
957
+ else:
958
+ # Input validation if there are multiple comment characters
959
+ if delimiter in comments:
960
+ raise TypeError(
961
+ f"Comment characters '{comments}' cannot include the "
962
+ f"delimiter '{delimiter}'"
963
+ )
964
+
965
+ # comment is now either a 1 or 0 character string or a tuple:
966
+ if comments is not None:
967
+ # Note: An earlier version support two character comments (and could
968
+ # have been extended to multiple characters, we assume this is
969
+ # rare enough to not optimize for.
970
+ if quote is not None:
971
+ raise ValueError(
972
+ "when multiple comments or a multi-character comment is "
973
+ "given, quotes are not supported. In this case quotechar "
974
+ "must be set to None.")
975
+
976
+ if len(imaginary_unit) != 1:
977
+ raise ValueError('len(imaginary_unit) must be 1.')
978
+
979
+ _check_nonneg_int(skiplines)
980
+ if max_rows is not None:
981
+ _check_nonneg_int(max_rows)
982
+ else:
983
+ # Passing -1 to the C code means "read the entire file".
984
+ max_rows = -1
985
+
986
+ fh_closing_ctx = contextlib.nullcontext()
987
+ filelike = False
988
+ try:
989
+ if isinstance(fname, os.PathLike):
990
+ fname = os.fspath(fname)
991
+ if isinstance(fname, str):
992
+ fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
993
+ if encoding is None:
994
+ encoding = getattr(fh, 'encoding', 'latin1')
995
+
996
+ fh_closing_ctx = contextlib.closing(fh)
997
+ data = fh
998
+ filelike = True
999
+ else:
1000
+ if encoding is None:
1001
+ encoding = getattr(fname, 'encoding', 'latin1')
1002
+ data = iter(fname)
1003
+ except TypeError as e:
1004
+ raise ValueError(
1005
+ f"fname must be a string, filehandle, list of strings,\n"
1006
+ f"or generator. Got {type(fname)} instead.") from e
1007
+
1008
+ with fh_closing_ctx:
1009
+ if comments is not None:
1010
+ if filelike:
1011
+ data = iter(data)
1012
+ filelike = False
1013
+ data = _preprocess_comments(data, comments, encoding)
1014
+
1015
+ if read_dtype_via_object_chunks is None:
1016
+ arr = _load_from_filelike(
1017
+ data, delimiter=delimiter, comment=comment, quote=quote,
1018
+ imaginary_unit=imaginary_unit,
1019
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
1020
+ converters=converters, dtype=dtype,
1021
+ encoding=encoding, filelike=filelike,
1022
+ byte_converters=byte_converters)
1023
+
1024
+ else:
1025
+ # This branch reads the file into chunks of object arrays and then
1026
+ # casts them to the desired actual dtype. This ensures correct
1027
+ # string-length and datetime-unit discovery (like `arr.astype()`).
1028
+ # Due to chunking, certain error reports are less clear, currently.
1029
+ if filelike:
1030
+ data = iter(data) # cannot chunk when reading from file
1031
+
1032
+ c_byte_converters = False
1033
+ if read_dtype_via_object_chunks == "S":
1034
+ c_byte_converters = True # Use latin1 rather than ascii
1035
+
1036
+ chunks = []
1037
+ while max_rows != 0:
1038
+ if max_rows < 0:
1039
+ chunk_size = _loadtxt_chunksize
1040
+ else:
1041
+ chunk_size = min(_loadtxt_chunksize, max_rows)
1042
+
1043
+ next_arr = _load_from_filelike(
1044
+ data, delimiter=delimiter, comment=comment, quote=quote,
1045
+ imaginary_unit=imaginary_unit,
1046
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
1047
+ converters=converters, dtype=dtype,
1048
+ encoding=encoding, filelike=filelike,
1049
+ byte_converters=byte_converters,
1050
+ c_byte_converters=c_byte_converters)
1051
+ # Cast here already. We hope that this is better even for
1052
+ # large files because the storage is more compact. It could
1053
+ # be adapted (in principle the concatenate could cast).
1054
+ chunks.append(next_arr.astype(read_dtype_via_object_chunks))
1055
+
1056
+ skiprows = 0 # Only have to skip for first chunk
1057
+ if max_rows >= 0:
1058
+ max_rows -= chunk_size
1059
+ if len(next_arr) < chunk_size:
1060
+ # There was less data than requested, so we are done.
1061
+ break
1062
+
1063
+ # Need at least one chunk, but if empty, the last one may have
1064
+ # the wrong shape.
1065
+ if len(chunks) > 1 and len(chunks[-1]) == 0:
1066
+ del chunks[-1]
1067
+ if len(chunks) == 1:
1068
+ arr = chunks[0]
1069
+ else:
1070
+ arr = np.concatenate(chunks, axis=0)
1071
+
1072
+ # NOTE: ndmin works as advertised for structured dtypes, but normally
1073
+ # these would return a 1D result plus the structured dimension,
1074
+ # so ndmin=2 adds a third dimension even when no squeezing occurs.
1075
+ # A `squeeze=False` could be a better solution (pandas uses squeeze).
1076
+ arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
1077
+
1078
+ if arr.shape:
1079
+ if arr.shape[0] == 0:
1080
+ warnings.warn(
1081
+ f'loadtxt: input contained no data: "{fname}"',
1082
+ category=UserWarning,
1083
+ stacklevel=3
1084
+ )
1085
+
1086
+ if unpack:
1087
+ # Unpack structured dtypes if requested:
1088
+ dt = arr.dtype
1089
+ if dt.names is not None:
1090
+ # For structured arrays, return an array for each field.
1091
+ return [arr[field] for field in dt.names]
1092
+ else:
1093
+ return arr.T
1094
+ else:
1095
+ return arr
1096
+
1097
+
1098
+ @set_array_function_like_doc
1099
+ @set_module('numpy')
1100
+ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
1101
+ converters=None, skiprows=0, usecols=None, unpack=False,
1102
+ ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None,
1103
+ like=None):
1104
+ r"""
1105
+ Load data from a text file.
1106
+
1107
+ Parameters
1108
+ ----------
1109
+ fname : file, str, pathlib.Path, list of str, generator
1110
+ File, filename, list, or generator to read. If the filename
1111
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
1112
+ that generators must return bytes or strings. The strings
1113
+ in a list or produced by a generator are treated as lines.
1114
+ dtype : data-type, optional
1115
+ Data-type of the resulting array; default: float. If this is a
1116
+ structured data-type, the resulting array will be 1-dimensional, and
1117
+ each row will be interpreted as an element of the array. In this
1118
+ case, the number of columns used must match the number of fields in
1119
+ the data-type.
1120
+ comments : str or sequence of str or None, optional
1121
+ The characters or list of characters used to indicate the start of a
1122
+ comment. None implies no comments. For backwards compatibility, byte
1123
+ strings will be decoded as 'latin1'. The default is '#'.
1124
+ delimiter : str, optional
1125
+ The character used to separate the values. For backwards compatibility,
1126
+ byte strings will be decoded as 'latin1'. The default is whitespace.
1127
+
1128
+ .. versionchanged:: 1.23.0
1129
+ Only single character delimiters are supported. Newline characters
1130
+ cannot be used as the delimiter.
1131
+
1132
+ converters : dict or callable, optional
1133
+ Converter functions to customize value parsing. If `converters` is
1134
+ callable, the function is applied to all columns, else it must be a
1135
+ dict that maps column number to a parser function.
1136
+ See examples for further details.
1137
+ Default: None.
1138
+
1139
+ .. versionchanged:: 1.23.0
1140
+ The ability to pass a single callable to be applied to all columns
1141
+ was added.
1142
+
1143
+ skiprows : int, optional
1144
+ Skip the first `skiprows` lines, including comments; default: 0.
1145
+ usecols : int or sequence, optional
1146
+ Which columns to read, with 0 being the first. For example,
1147
+ ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
1148
+ The default, None, results in all columns being read.
1149
+
1150
+ .. versionchanged:: 1.11.0
1151
+ When a single column has to be read it is possible to use
1152
+ an integer instead of a tuple. E.g ``usecols = 3`` reads the
1153
+ fourth column the same way as ``usecols = (3,)`` would.
1154
+ unpack : bool, optional
1155
+ If True, the returned array is transposed, so that arguments may be
1156
+ unpacked using ``x, y, z = loadtxt(...)``. When used with a
1157
+ structured data-type, arrays are returned for each field.
1158
+ Default is False.
1159
+ ndmin : int, optional
1160
+ The returned array will have at least `ndmin` dimensions.
1161
+ Otherwise mono-dimensional axes will be squeezed.
1162
+ Legal values: 0 (default), 1 or 2.
1163
+
1164
+ .. versionadded:: 1.6.0
1165
+ encoding : str, optional
1166
+ Encoding used to decode the inputfile. Does not apply to input streams.
1167
+ The special value 'bytes' enables backward compatibility workarounds
1168
+ that ensures you receive byte arrays as results if possible and passes
1169
+ 'latin1' encoded strings to converters. Override this value to receive
1170
+ unicode arrays and pass strings as input to converters. If set to None
1171
+ the system default is used. The default value is 'bytes'.
1172
+
1173
+ .. versionadded:: 1.14.0
1174
+ max_rows : int, optional
1175
+ Read `max_rows` rows of content after `skiprows` lines. The default is
1176
+ to read all the rows. Note that empty rows containing no data such as
1177
+ empty lines and comment lines are not counted towards `max_rows`,
1178
+ while such lines are counted in `skiprows`.
1179
+
1180
+ .. versionadded:: 1.16.0
1181
+
1182
+ .. versionchanged:: 1.23.0
1183
+ Lines containing no data, including comment lines (e.g., lines
1184
+ starting with '#' or as specified via `comments`) are not counted
1185
+ towards `max_rows`.
1186
+ quotechar : unicode character or None, optional
1187
+ The character used to denote the start and end of a quoted item.
1188
+ Occurrences of the delimiter or comment characters are ignored within
1189
+ a quoted item. The default value is ``quotechar=None``, which means
1190
+ quoting support is disabled.
1191
+
1192
+ If two consecutive instances of `quotechar` are found within a quoted
1193
+ field, the first is treated as an escape character. See examples.
1194
+
1195
+ .. versionadded:: 1.23.0
1196
+ ${ARRAY_FUNCTION_LIKE}
1197
+
1198
+ .. versionadded:: 1.20.0
1199
+
1200
+ Returns
1201
+ -------
1202
+ out : ndarray
1203
+ Data read from the text file.
1204
+
1205
+ See Also
1206
+ --------
1207
+ load, fromstring, fromregex
1208
+ genfromtxt : Load data with missing values handled as specified.
1209
+ scipy.io.loadmat : reads MATLAB data files
1210
+
1211
+ Notes
1212
+ -----
1213
+ This function aims to be a fast reader for simply formatted files. The
1214
+ `genfromtxt` function provides more sophisticated handling of, e.g.,
1215
+ lines with missing values.
1216
+
1217
+ Each row in the input text file must have the same number of values to be
1218
+ able to read all values. If all rows do not have same number of values, a
1219
+ subset of up to n columns (where n is the least number of values present
1220
+ in all rows) can be read by specifying the columns via `usecols`.
1221
+
1222
+ .. versionadded:: 1.10.0
1223
+
1224
+ The strings produced by the Python float.hex method can be used as
1225
+ input for floats.
1226
+
1227
+ Examples
1228
+ --------
1229
+ >>> from io import StringIO # StringIO behaves like a file object
1230
+ >>> c = StringIO("0 1\n2 3")
1231
+ >>> np.loadtxt(c)
1232
+ array([[0., 1.],
1233
+ [2., 3.]])
1234
+
1235
+ >>> d = StringIO("M 21 72\nF 35 58")
1236
+ >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
1237
+ ... 'formats': ('S1', 'i4', 'f4')})
1238
+ array([(b'M', 21, 72.), (b'F', 35, 58.)],
1239
+ dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
1240
+
1241
+ >>> c = StringIO("1,0,2\n3,0,4")
1242
+ >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
1243
+ >>> x
1244
+ array([1., 3.])
1245
+ >>> y
1246
+ array([2., 4.])
1247
+
1248
+ The `converters` argument is used to specify functions to preprocess the
1249
+ text prior to parsing. `converters` can be a dictionary that maps
1250
+ preprocessing functions to each column:
1251
+
1252
+ >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
1253
+ >>> conv = {
1254
+ ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0
1255
+ ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1
1256
+ ... }
1257
+ >>> np.loadtxt(s, delimiter=",", converters=conv)
1258
+ array([[1., 3.],
1259
+ [3., 5.]])
1260
+
1261
+ `converters` can be a callable instead of a dictionary, in which case it
1262
+ is applied to all columns:
1263
+
1264
+ >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
1265
+ >>> import functools
1266
+ >>> conv = functools.partial(int, base=16)
1267
+ >>> np.loadtxt(s, converters=conv)
1268
+ array([[222., 173.],
1269
+ [192., 222.]])
1270
+
1271
+ This example shows how `converters` can be used to convert a field
1272
+ with a trailing minus sign into a negative number.
1273
+
1274
+ >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
1275
+ >>> def conv(fld):
1276
+ ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
1277
+ ...
1278
+ >>> np.loadtxt(s, converters=conv)
1279
+ array([[ 10.01, -31.25],
1280
+ [ 19.22, 64.31],
1281
+ [-17.57, 63.94]])
1282
+
1283
+ Using a callable as the converter can be particularly useful for handling
1284
+ values with different formatting, e.g. floats with underscores:
1285
+
1286
+ >>> s = StringIO("1 2.7 100_000")
1287
+ >>> np.loadtxt(s, converters=float)
1288
+ array([1.e+00, 2.7e+00, 1.e+05])
1289
+
1290
+ This idea can be extended to automatically handle values specified in
1291
+ many different formats:
1292
+
1293
+ >>> def conv(val):
1294
+ ... try:
1295
+ ... return float(val)
1296
+ ... except ValueError:
1297
+ ... return float.fromhex(val)
1298
+ >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
1299
+ >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None)
1300
+ array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
1301
+
1302
+ Note that with the default ``encoding="bytes"``, the inputs to the
1303
+ converter function are latin-1 encoded byte strings. To deactivate the
1304
+ implicit encoding prior to conversion, use ``encoding=None``
1305
+
1306
+ >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
1307
+ >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x)
1308
+ >>> np.loadtxt(s, converters=conv, encoding=None)
1309
+ array([[ 10.01, -31.25],
1310
+ [ 19.22, 64.31],
1311
+ [-17.57, 63.94]])
1312
+
1313
+ Support for quoted fields is enabled with the `quotechar` parameter.
1314
+ Comment and delimiter characters are ignored when they appear within a
1315
+ quoted item delineated by `quotechar`:
1316
+
1317
+ >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
1318
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
1319
+ >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
1320
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
1321
+ dtype=[('label', '<U12'), ('value', '<f8')])
1322
+
1323
+ Quoted fields can be separated by multiple whitespace characters:
1324
+
1325
+ >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
1326
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
1327
+ >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
1328
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
1329
+ dtype=[('label', '<U12'), ('value', '<f8')])
1330
+
1331
+ Two consecutive quote characters within a quoted field are treated as a
1332
+ single escaped character:
1333
+
1334
+ >>> s = StringIO('"Hello, my name is ""Monty""!"')
1335
+ >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
1336
+ array('Hello, my name is "Monty"!', dtype='<U26')
1337
+
1338
+ Read subset of columns when all rows do not contain equal number of values:
1339
+
1340
+ >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
1341
+ >>> np.loadtxt(d, usecols=(0, 1))
1342
+ array([[ 1., 2.],
1343
+ [ 2., 4.],
1344
+ [ 3., 9.],
1345
+ [ 4., 16.]])
1346
+
1347
+ """
1348
+
1349
+ if like is not None:
1350
+ return _loadtxt_with_like(
1351
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
1352
+ converters=converters, skiprows=skiprows, usecols=usecols,
1353
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
1354
+ max_rows=max_rows
1355
+ )
1356
+
1357
+ if isinstance(delimiter, bytes):
1358
+ delimiter.decode("latin1")
1359
+
1360
+ if dtype is None:
1361
+ dtype = np.float64
1362
+
1363
+ comment = comments
1364
+ # Control character type conversions for Py3 convenience
1365
+ if comment is not None:
1366
+ if isinstance(comment, (str, bytes)):
1367
+ comment = [comment]
1368
+ comment = [
1369
+ x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
1370
+ if isinstance(delimiter, bytes):
1371
+ delimiter = delimiter.decode('latin1')
1372
+
1373
+ arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
1374
+ converters=converters, skiplines=skiprows, usecols=usecols,
1375
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
1376
+ max_rows=max_rows, quote=quotechar)
1377
+
1378
+ return arr
1379
+
1380
+
1381
+ _loadtxt_with_like = array_function_dispatch()(loadtxt)
1382
+
1383
+
1384
+ def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
1385
+ header=None, footer=None, comments=None,
1386
+ encoding=None):
1387
+ return (X,)
1388
+
1389
+
1390
+ @array_function_dispatch(_savetxt_dispatcher)
1391
+ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
1392
+ footer='', comments='# ', encoding=None):
1393
+ """
1394
+ Save an array to a text file.
1395
+
1396
+ Parameters
1397
+ ----------
1398
+ fname : filename or file handle
1399
+ If the filename ends in ``.gz``, the file is automatically saved in
1400
+ compressed gzip format. `loadtxt` understands gzipped files
1401
+ transparently.
1402
+ X : 1D or 2D array_like
1403
+ Data to be saved to a text file.
1404
+ fmt : str or sequence of strs, optional
1405
+ A single format (%10.5f), a sequence of formats, or a
1406
+ multi-format string, e.g. 'Iteration %d -- %10.5f', in which
1407
+ case `delimiter` is ignored. For complex `X`, the legal options
1408
+ for `fmt` are:
1409
+
1410
+ * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
1411
+ like `' (%s+%sj)' % (fmt, fmt)`
1412
+ * a full string specifying every real and imaginary part, e.g.
1413
+ `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
1414
+ * a list of specifiers, one per column - in this case, the real
1415
+ and imaginary part must have separate specifiers,
1416
+ e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
1417
+ delimiter : str, optional
1418
+ String or character separating columns.
1419
+ newline : str, optional
1420
+ String or character separating lines.
1421
+
1422
+ .. versionadded:: 1.5.0
1423
+ header : str, optional
1424
+ String that will be written at the beginning of the file.
1425
+
1426
+ .. versionadded:: 1.7.0
1427
+ footer : str, optional
1428
+ String that will be written at the end of the file.
1429
+
1430
+ .. versionadded:: 1.7.0
1431
+ comments : str, optional
1432
+ String that will be prepended to the ``header`` and ``footer`` strings,
1433
+ to mark them as comments. Default: '# ', as expected by e.g.
1434
+ ``numpy.loadtxt``.
1435
+
1436
+ .. versionadded:: 1.7.0
1437
+ encoding : {None, str}, optional
1438
+ Encoding used to encode the outputfile. Does not apply to output
1439
+ streams. If the encoding is something other than 'bytes' or 'latin1'
1440
+ you will not be able to load the file in NumPy versions < 1.14. Default
1441
+ is 'latin1'.
1442
+
1443
+ .. versionadded:: 1.14.0
1444
+
1445
+
1446
+ See Also
1447
+ --------
1448
+ save : Save an array to a binary file in NumPy ``.npy`` format
1449
+ savez : Save several arrays into an uncompressed ``.npz`` archive
1450
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
1451
+
1452
+ Notes
1453
+ -----
1454
+ Further explanation of the `fmt` parameter
1455
+ (``%[flag]width[.precision]specifier``):
1456
+
1457
+ flags:
1458
+ ``-`` : left justify
1459
+
1460
+ ``+`` : Forces to precede result with + or -.
1461
+
1462
+ ``0`` : Left pad the number with zeros instead of space (see width).
1463
+
1464
+ width:
1465
+ Minimum number of characters to be printed. The value is not truncated
1466
+ if it has more characters.
1467
+
1468
+ precision:
1469
+ - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
1470
+ digits.
1471
+ - For ``e, E`` and ``f`` specifiers, the number of digits to print
1472
+ after the decimal point.
1473
+ - For ``g`` and ``G``, the maximum number of significant digits.
1474
+ - For ``s``, the maximum number of characters.
1475
+
1476
+ specifiers:
1477
+ ``c`` : character
1478
+
1479
+ ``d`` or ``i`` : signed decimal integer
1480
+
1481
+ ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
1482
+
1483
+ ``f`` : decimal floating point
1484
+
1485
+ ``g,G`` : use the shorter of ``e,E`` or ``f``
1486
+
1487
+ ``o`` : signed octal
1488
+
1489
+ ``s`` : string of characters
1490
+
1491
+ ``u`` : unsigned decimal integer
1492
+
1493
+ ``x,X`` : unsigned hexadecimal integer
1494
+
1495
+ This explanation of ``fmt`` is not complete, for an exhaustive
1496
+ specification see [1]_.
1497
+
1498
+ References
1499
+ ----------
1500
+ .. [1] `Format Specification Mini-Language
1501
+ <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
1502
+ Python Documentation.
1503
+
1504
+ Examples
1505
+ --------
1506
+ >>> x = y = z = np.arange(0.0,5.0,1.0)
1507
+ >>> np.savetxt('test.out', x, delimiter=',') # X is an array
1508
+ >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
1509
+ >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
1510
+
1511
+ """
1512
+
1513
+ # Py3 conversions first
1514
+ if isinstance(fmt, bytes):
1515
+ fmt = asstr(fmt)
1516
+ delimiter = asstr(delimiter)
1517
+
1518
+ class WriteWrap:
1519
+ """Convert to bytes on bytestream inputs.
1520
+
1521
+ """
1522
+ def __init__(self, fh, encoding):
1523
+ self.fh = fh
1524
+ self.encoding = encoding
1525
+ self.do_write = self.first_write
1526
+
1527
+ def close(self):
1528
+ self.fh.close()
1529
+
1530
+ def write(self, v):
1531
+ self.do_write(v)
1532
+
1533
+ def write_bytes(self, v):
1534
+ if isinstance(v, bytes):
1535
+ self.fh.write(v)
1536
+ else:
1537
+ self.fh.write(v.encode(self.encoding))
1538
+
1539
+ def write_normal(self, v):
1540
+ self.fh.write(asunicode(v))
1541
+
1542
+ def first_write(self, v):
1543
+ try:
1544
+ self.write_normal(v)
1545
+ self.write = self.write_normal
1546
+ except TypeError:
1547
+ # input is probably a bytestream
1548
+ self.write_bytes(v)
1549
+ self.write = self.write_bytes
1550
+
1551
+ own_fh = False
1552
+ if isinstance(fname, os_PathLike):
1553
+ fname = os_fspath(fname)
1554
+ if _is_string_like(fname):
1555
+ # datasource doesn't support creating a new file ...
1556
+ open(fname, 'wt').close()
1557
+ fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
1558
+ own_fh = True
1559
+ elif hasattr(fname, 'write'):
1560
+ # wrap to handle byte output streams
1561
+ fh = WriteWrap(fname, encoding or 'latin1')
1562
+ else:
1563
+ raise ValueError('fname must be a string or file handle')
1564
+
1565
+ try:
1566
+ X = np.asarray(X)
1567
+
1568
+ # Handle 1-dimensional arrays
1569
+ if X.ndim == 0 or X.ndim > 2:
1570
+ raise ValueError(
1571
+ "Expected 1D or 2D array, got %dD array instead" % X.ndim)
1572
+ elif X.ndim == 1:
1573
+ # Common case -- 1d array of numbers
1574
+ if X.dtype.names is None:
1575
+ X = np.atleast_2d(X).T
1576
+ ncol = 1
1577
+
1578
+ # Complex dtype -- each field indicates a separate column
1579
+ else:
1580
+ ncol = len(X.dtype.names)
1581
+ else:
1582
+ ncol = X.shape[1]
1583
+
1584
+ iscomplex_X = np.iscomplexobj(X)
1585
+ # `fmt` can be a string with multiple insertion points or a
1586
+ # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
1587
+ if type(fmt) in (list, tuple):
1588
+ if len(fmt) != ncol:
1589
+ raise AttributeError('fmt has wrong shape. %s' % str(fmt))
1590
+ format = asstr(delimiter).join(map(asstr, fmt))
1591
+ elif isinstance(fmt, str):
1592
+ n_fmt_chars = fmt.count('%')
1593
+ error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
1594
+ if n_fmt_chars == 1:
1595
+ if iscomplex_X:
1596
+ fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
1597
+ else:
1598
+ fmt = [fmt, ] * ncol
1599
+ format = delimiter.join(fmt)
1600
+ elif iscomplex_X and n_fmt_chars != (2 * ncol):
1601
+ raise error
1602
+ elif ((not iscomplex_X) and n_fmt_chars != ncol):
1603
+ raise error
1604
+ else:
1605
+ format = fmt
1606
+ else:
1607
+ raise ValueError('invalid fmt: %r' % (fmt,))
1608
+
1609
+ if len(header) > 0:
1610
+ header = header.replace('\n', '\n' + comments)
1611
+ fh.write(comments + header + newline)
1612
+ if iscomplex_X:
1613
+ for row in X:
1614
+ row2 = []
1615
+ for number in row:
1616
+ row2.append(number.real)
1617
+ row2.append(number.imag)
1618
+ s = format % tuple(row2) + newline
1619
+ fh.write(s.replace('+-', '-'))
1620
+ else:
1621
+ for row in X:
1622
+ try:
1623
+ v = format % tuple(row) + newline
1624
+ except TypeError as e:
1625
+ raise TypeError("Mismatch between array dtype ('%s') and "
1626
+ "format specifier ('%s')"
1627
+ % (str(X.dtype), format)) from e
1628
+ fh.write(v)
1629
+
1630
+ if len(footer) > 0:
1631
+ footer = footer.replace('\n', '\n' + comments)
1632
+ fh.write(comments + footer + newline)
1633
+ finally:
1634
+ if own_fh:
1635
+ fh.close()
1636
+
1637
+
1638
+ @set_module('numpy')
1639
+ def fromregex(file, regexp, dtype, encoding=None):
1640
+ r"""
1641
+ Construct an array from a text file, using regular expression parsing.
1642
+
1643
+ The returned array is always a structured array, and is constructed from
1644
+ all matches of the regular expression in the file. Groups in the regular
1645
+ expression are converted to fields of the structured array.
1646
+
1647
+ Parameters
1648
+ ----------
1649
+ file : path or file
1650
+ Filename or file object to read.
1651
+
1652
+ .. versionchanged:: 1.22.0
1653
+ Now accepts `os.PathLike` implementations.
1654
+ regexp : str or regexp
1655
+ Regular expression used to parse the file.
1656
+ Groups in the regular expression correspond to fields in the dtype.
1657
+ dtype : dtype or list of dtypes
1658
+ Dtype for the structured array; must be a structured datatype.
1659
+ encoding : str, optional
1660
+ Encoding used to decode the inputfile. Does not apply to input streams.
1661
+
1662
+ .. versionadded:: 1.14.0
1663
+
1664
+ Returns
1665
+ -------
1666
+ output : ndarray
1667
+ The output array, containing the part of the content of `file` that
1668
+ was matched by `regexp`. `output` is always a structured array.
1669
+
1670
+ Raises
1671
+ ------
1672
+ TypeError
1673
+ When `dtype` is not a valid dtype for a structured array.
1674
+
1675
+ See Also
1676
+ --------
1677
+ fromstring, loadtxt
1678
+
1679
+ Notes
1680
+ -----
1681
+ Dtypes for structured arrays can be specified in several forms, but all
1682
+ forms specify at least the data type and field name. For details see
1683
+ `basics.rec`.
1684
+
1685
+ Examples
1686
+ --------
1687
+ >>> from io import StringIO
1688
+ >>> text = StringIO("1312 foo\n1534 bar\n444 qux")
1689
+
1690
+ >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
1691
+ >>> output = np.fromregex(text, regexp,
1692
+ ... [('num', np.int64), ('key', 'S3')])
1693
+ >>> output
1694
+ array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
1695
+ dtype=[('num', '<i8'), ('key', 'S3')])
1696
+ >>> output['num']
1697
+ array([1312, 1534, 444])
1698
+
1699
+ """
1700
+ own_fh = False
1701
+ if not hasattr(file, "read"):
1702
+ file = os.fspath(file)
1703
+ file = np.lib._datasource.open(file, 'rt', encoding=encoding)
1704
+ own_fh = True
1705
+
1706
+ try:
1707
+ if not isinstance(dtype, np.dtype):
1708
+ dtype = np.dtype(dtype)
1709
+ if dtype.names is None:
1710
+ raise TypeError('dtype must be a structured datatype.')
1711
+
1712
+ content = file.read()
1713
+ if isinstance(content, bytes) and isinstance(regexp, str):
1714
+ regexp = asbytes(regexp)
1715
+ elif isinstance(content, str) and isinstance(regexp, bytes):
1716
+ regexp = asstr(regexp)
1717
+
1718
+ if not hasattr(regexp, 'match'):
1719
+ regexp = re.compile(regexp)
1720
+ seq = regexp.findall(content)
1721
+ if seq and not isinstance(seq[0], tuple):
1722
+ # Only one group is in the regexp.
1723
+ # Create the new array as a single data-type and then
1724
+ # re-interpret as a single-field structured array.
1725
+ newdtype = np.dtype(dtype[dtype.names[0]])
1726
+ output = np.array(seq, dtype=newdtype)
1727
+ output.dtype = dtype
1728
+ else:
1729
+ output = np.array(seq, dtype=dtype)
1730
+
1731
+ return output
1732
+ finally:
1733
+ if own_fh:
1734
+ file.close()
1735
+
1736
+
1737
+ #####--------------------------------------------------------------------------
1738
+ #---- --- ASCII functions ---
1739
+ #####--------------------------------------------------------------------------
1740
+
1741
+
1742
+ @set_array_function_like_doc
1743
+ @set_module('numpy')
1744
+ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
1745
+ skip_header=0, skip_footer=0, converters=None,
1746
+ missing_values=None, filling_values=None, usecols=None,
1747
+ names=None, excludelist=None,
1748
+ deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
1749
+ replace_space='_', autostrip=False, case_sensitive=True,
1750
+ defaultfmt="f%i", unpack=None, usemask=False, loose=True,
1751
+ invalid_raise=True, max_rows=None, encoding='bytes',
1752
+ *, ndmin=0, like=None):
1753
+ """
1754
+ Load data from a text file, with missing values handled as specified.
1755
+
1756
+ Each line past the first `skip_header` lines is split at the `delimiter`
1757
+ character, and characters following the `comments` character are discarded.
1758
+
1759
+ Parameters
1760
+ ----------
1761
+ fname : file, str, pathlib.Path, list of str, generator
1762
+ File, filename, list, or generator to read. If the filename
1763
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
1764
+ that generators must return bytes or strings. The strings
1765
+ in a list or produced by a generator are treated as lines.
1766
+ dtype : dtype, optional
1767
+ Data type of the resulting array.
1768
+ If None, the dtypes will be determined by the contents of each
1769
+ column, individually.
1770
+ comments : str, optional
1771
+ The character used to indicate the start of a comment.
1772
+ All the characters occurring on a line after a comment are discarded.
1773
+ delimiter : str, int, or sequence, optional
1774
+ The string used to separate values. By default, any consecutive
1775
+ whitespaces act as delimiter. An integer or sequence of integers
1776
+ can also be provided as width(s) of each field.
1777
+ skiprows : int, optional
1778
+ `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
1779
+ skip_header : int, optional
1780
+ The number of lines to skip at the beginning of the file.
1781
+ skip_footer : int, optional
1782
+ The number of lines to skip at the end of the file.
1783
+ converters : variable, optional
1784
+ The set of functions that convert the data of a column to a value.
1785
+ The converters can also be used to provide a default value
1786
+ for missing data: ``converters = {3: lambda s: float(s or 0)}``.
1787
+ missing : variable, optional
1788
+ `missing` was removed in numpy 1.10. Please use `missing_values`
1789
+ instead.
1790
+ missing_values : variable, optional
1791
+ The set of strings corresponding to missing data.
1792
+ filling_values : variable, optional
1793
+ The set of values to be used as default when the data are missing.
1794
+ usecols : sequence, optional
1795
+ Which columns to read, with 0 being the first. For example,
1796
+ ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
1797
+ names : {None, True, str, sequence}, optional
1798
+ If `names` is True, the field names are read from the first line after
1799
+ the first `skip_header` lines. This line can optionally be preceded
1800
+ by a comment delimiter. If `names` is a sequence or a single-string of
1801
+ comma-separated names, the names will be used to define the field names
1802
+ in a structured dtype. If `names` is None, the names of the dtype
1803
+ fields will be used, if any.
1804
+ excludelist : sequence, optional
1805
+ A list of names to exclude. This list is appended to the default list
1806
+ ['return','file','print']. Excluded names are appended with an
1807
+ underscore: for example, `file` would become `file_`.
1808
+ deletechars : str, optional
1809
+ A string combining invalid characters that must be deleted from the
1810
+ names.
1811
+ defaultfmt : str, optional
1812
+ A format used to define default field names, such as "f%i" or "f_%02i".
1813
+ autostrip : bool, optional
1814
+ Whether to automatically strip white spaces from the variables.
1815
+ replace_space : char, optional
1816
+ Character(s) used in replacement of white spaces in the variable
1817
+ names. By default, use a '_'.
1818
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
1819
+ If True, field names are case sensitive.
1820
+ If False or 'upper', field names are converted to upper case.
1821
+ If 'lower', field names are converted to lower case.
1822
+ unpack : bool, optional
1823
+ If True, the returned array is transposed, so that arguments may be
1824
+ unpacked using ``x, y, z = genfromtxt(...)``. When used with a
1825
+ structured data-type, arrays are returned for each field.
1826
+ Default is False.
1827
+ usemask : bool, optional
1828
+ If True, return a masked array.
1829
+ If False, return a regular array.
1830
+ loose : bool, optional
1831
+ If True, do not raise errors for invalid values.
1832
+ invalid_raise : bool, optional
1833
+ If True, an exception is raised if an inconsistency is detected in the
1834
+ number of columns.
1835
+ If False, a warning is emitted and the offending lines are skipped.
1836
+ max_rows : int, optional
1837
+ The maximum number of rows to read. Must not be used with skip_footer
1838
+ at the same time. If given, the value must be at least 1. Default is
1839
+ to read the entire file.
1840
+
1841
+ .. versionadded:: 1.10.0
1842
+ encoding : str, optional
1843
+ Encoding used to decode the inputfile. Does not apply when `fname` is
1844
+ a file object. The special value 'bytes' enables backward compatibility
1845
+ workarounds that ensure that you receive byte arrays when possible
1846
+ and passes latin1 encoded strings to converters. Override this value to
1847
+ receive unicode arrays and pass strings as input to converters. If set
1848
+ to None the system default is used. The default value is 'bytes'.
1849
+
1850
+ .. versionadded:: 1.14.0
1851
+ ndmin : int, optional
1852
+ Same parameter as `loadtxt`
1853
+
1854
+ .. versionadded:: 1.23.0
1855
+ ${ARRAY_FUNCTION_LIKE}
1856
+
1857
+ .. versionadded:: 1.20.0
1858
+
1859
+ Returns
1860
+ -------
1861
+ out : ndarray
1862
+ Data read from the text file. If `usemask` is True, this is a
1863
+ masked array.
1864
+
1865
+ See Also
1866
+ --------
1867
+ numpy.loadtxt : equivalent function when no data is missing.
1868
+
1869
+ Notes
1870
+ -----
1871
+ * When spaces are used as delimiters, or when no delimiter has been given
1872
+ as input, there should not be any missing data between two fields.
1873
+ * When the variables are named (either by a flexible dtype or with `names`),
1874
+ there must not be any header in the file (else a ValueError
1875
+ exception is raised).
1876
+ * Individual values are not stripped of spaces by default.
1877
+ When using a custom converter, make sure the function does remove spaces.
1878
+
1879
+ References
1880
+ ----------
1881
+ .. [1] NumPy User Guide, section `I/O with NumPy
1882
+ <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
1883
+
1884
+ Examples
1885
+ --------
1886
+ >>> from io import StringIO
1887
+ >>> import numpy as np
1888
+
1889
+ Comma delimited file with mixed dtype
1890
+
1891
+ >>> s = StringIO(u"1,1.3,abcde")
1892
+ >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
1893
+ ... ('mystring','S5')], delimiter=",")
1894
+ >>> data
1895
+ array((1, 1.3, b'abcde'),
1896
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1897
+
1898
+ Using dtype = None
1899
+
1900
+ >>> _ = s.seek(0) # needed for StringIO example only
1901
+ >>> data = np.genfromtxt(s, dtype=None,
1902
+ ... names = ['myint','myfloat','mystring'], delimiter=",")
1903
+ >>> data
1904
+ array((1, 1.3, b'abcde'),
1905
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1906
+
1907
+ Specifying dtype and names
1908
+
1909
+ >>> _ = s.seek(0)
1910
+ >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
1911
+ ... names=['myint','myfloat','mystring'], delimiter=",")
1912
+ >>> data
1913
+ array((1, 1.3, b'abcde'),
1914
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1915
+
1916
+ An example with fixed-width columns
1917
+
1918
+ >>> s = StringIO(u"11.3abcde")
1919
+ >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
1920
+ ... delimiter=[1,3,5])
1921
+ >>> data
1922
+ array((1, 1.3, b'abcde'),
1923
+ dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
1924
+
1925
+ An example to show comments
1926
+
1927
+ >>> f = StringIO('''
1928
+ ... text,# of chars
1929
+ ... hello world,11
1930
+ ... numpy,5''')
1931
+ >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
1932
+ array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
1933
+ dtype=[('f0', 'S12'), ('f1', 'S12')])
1934
+
1935
+ """
1936
+
1937
+ if like is not None:
1938
+ return _genfromtxt_with_like(
1939
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
1940
+ skip_header=skip_header, skip_footer=skip_footer,
1941
+ converters=converters, missing_values=missing_values,
1942
+ filling_values=filling_values, usecols=usecols, names=names,
1943
+ excludelist=excludelist, deletechars=deletechars,
1944
+ replace_space=replace_space, autostrip=autostrip,
1945
+ case_sensitive=case_sensitive, defaultfmt=defaultfmt,
1946
+ unpack=unpack, usemask=usemask, loose=loose,
1947
+ invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
1948
+ ndmin=ndmin,
1949
+ )
1950
+
1951
+ _ensure_ndmin_ndarray_check_param(ndmin)
1952
+
1953
+ if max_rows is not None:
1954
+ if skip_footer:
1955
+ raise ValueError(
1956
+ "The keywords 'skip_footer' and 'max_rows' can not be "
1957
+ "specified at the same time.")
1958
+ if max_rows < 1:
1959
+ raise ValueError("'max_rows' must be at least 1.")
1960
+
1961
+ if usemask:
1962
+ from numpy.ma import MaskedArray, make_mask_descr
1963
+ # Check the input dictionary of converters
1964
+ user_converters = converters or {}
1965
+ if not isinstance(user_converters, dict):
1966
+ raise TypeError(
1967
+ "The input argument 'converter' should be a valid dictionary "
1968
+ "(got '%s' instead)" % type(user_converters))
1969
+
1970
+ if encoding == 'bytes':
1971
+ encoding = None
1972
+ byte_converters = True
1973
+ else:
1974
+ byte_converters = False
1975
+
1976
+ # Initialize the filehandle, the LineSplitter and the NameValidator
1977
+ if isinstance(fname, os_PathLike):
1978
+ fname = os_fspath(fname)
1979
+ if isinstance(fname, str):
1980
+ fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
1981
+ fid_ctx = contextlib.closing(fid)
1982
+ else:
1983
+ fid = fname
1984
+ fid_ctx = contextlib.nullcontext(fid)
1985
+ try:
1986
+ fhd = iter(fid)
1987
+ except TypeError as e:
1988
+ raise TypeError(
1989
+ "fname must be a string, a filehandle, a sequence of strings,\n"
1990
+ f"or an iterator of strings. Got {type(fname)} instead."
1991
+ ) from e
1992
+ with fid_ctx:
1993
+ split_line = LineSplitter(delimiter=delimiter, comments=comments,
1994
+ autostrip=autostrip, encoding=encoding)
1995
+ validate_names = NameValidator(excludelist=excludelist,
1996
+ deletechars=deletechars,
1997
+ case_sensitive=case_sensitive,
1998
+ replace_space=replace_space)
1999
+
2000
+ # Skip the first `skip_header` rows
2001
+ try:
2002
+ for i in range(skip_header):
2003
+ next(fhd)
2004
+
2005
+ # Keep on until we find the first valid values
2006
+ first_values = None
2007
+
2008
+ while not first_values:
2009
+ first_line = _decode_line(next(fhd), encoding)
2010
+ if (names is True) and (comments is not None):
2011
+ if comments in first_line:
2012
+ first_line = (
2013
+ ''.join(first_line.split(comments)[1:]))
2014
+ first_values = split_line(first_line)
2015
+ except StopIteration:
2016
+ # return an empty array if the datafile is empty
2017
+ first_line = ''
2018
+ first_values = []
2019
+ warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
2020
+
2021
+ # Should we take the first values as names ?
2022
+ if names is True:
2023
+ fval = first_values[0].strip()
2024
+ if comments is not None:
2025
+ if fval in comments:
2026
+ del first_values[0]
2027
+
2028
+ # Check the columns to use: make sure `usecols` is a list
2029
+ if usecols is not None:
2030
+ try:
2031
+ usecols = [_.strip() for _ in usecols.split(",")]
2032
+ except AttributeError:
2033
+ try:
2034
+ usecols = list(usecols)
2035
+ except TypeError:
2036
+ usecols = [usecols, ]
2037
+ nbcols = len(usecols or first_values)
2038
+
2039
+ # Check the names and overwrite the dtype.names if needed
2040
+ if names is True:
2041
+ names = validate_names([str(_.strip()) for _ in first_values])
2042
+ first_line = ''
2043
+ elif _is_string_like(names):
2044
+ names = validate_names([_.strip() for _ in names.split(',')])
2045
+ elif names:
2046
+ names = validate_names(names)
2047
+ # Get the dtype
2048
+ if dtype is not None:
2049
+ dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
2050
+ excludelist=excludelist,
2051
+ deletechars=deletechars,
2052
+ case_sensitive=case_sensitive,
2053
+ replace_space=replace_space)
2054
+ # Make sure the names is a list (for 2.5)
2055
+ if names is not None:
2056
+ names = list(names)
2057
+
2058
+ if usecols:
2059
+ for (i, current) in enumerate(usecols):
2060
+ # if usecols is a list of names, convert to a list of indices
2061
+ if _is_string_like(current):
2062
+ usecols[i] = names.index(current)
2063
+ elif current < 0:
2064
+ usecols[i] = current + len(first_values)
2065
+ # If the dtype is not None, make sure we update it
2066
+ if (dtype is not None) and (len(dtype) > nbcols):
2067
+ descr = dtype.descr
2068
+ dtype = np.dtype([descr[_] for _ in usecols])
2069
+ names = list(dtype.names)
2070
+ # If `names` is not None, update the names
2071
+ elif (names is not None) and (len(names) > nbcols):
2072
+ names = [names[_] for _ in usecols]
2073
+ elif (names is not None) and (dtype is not None):
2074
+ names = list(dtype.names)
2075
+
2076
+ # Process the missing values ...............................
2077
+ # Rename missing_values for convenience
2078
+ user_missing_values = missing_values or ()
2079
+ if isinstance(user_missing_values, bytes):
2080
+ user_missing_values = user_missing_values.decode('latin1')
2081
+
2082
+ # Define the list of missing_values (one column: one list)
2083
+ missing_values = [list(['']) for _ in range(nbcols)]
2084
+
2085
+ # We have a dictionary: process it field by field
2086
+ if isinstance(user_missing_values, dict):
2087
+ # Loop on the items
2088
+ for (key, val) in user_missing_values.items():
2089
+ # Is the key a string ?
2090
+ if _is_string_like(key):
2091
+ try:
2092
+ # Transform it into an integer
2093
+ key = names.index(key)
2094
+ except ValueError:
2095
+ # We couldn't find it: the name must have been dropped
2096
+ continue
2097
+ # Redefine the key as needed if it's a column number
2098
+ if usecols:
2099
+ try:
2100
+ key = usecols.index(key)
2101
+ except ValueError:
2102
+ pass
2103
+ # Transform the value as a list of string
2104
+ if isinstance(val, (list, tuple)):
2105
+ val = [str(_) for _ in val]
2106
+ else:
2107
+ val = [str(val), ]
2108
+ # Add the value(s) to the current list of missing
2109
+ if key is None:
2110
+ # None acts as default
2111
+ for miss in missing_values:
2112
+ miss.extend(val)
2113
+ else:
2114
+ missing_values[key].extend(val)
2115
+ # We have a sequence : each item matches a column
2116
+ elif isinstance(user_missing_values, (list, tuple)):
2117
+ for (value, entry) in zip(user_missing_values, missing_values):
2118
+ value = str(value)
2119
+ if value not in entry:
2120
+ entry.append(value)
2121
+ # We have a string : apply it to all entries
2122
+ elif isinstance(user_missing_values, str):
2123
+ user_value = user_missing_values.split(",")
2124
+ for entry in missing_values:
2125
+ entry.extend(user_value)
2126
+ # We have something else: apply it to all entries
2127
+ else:
2128
+ for entry in missing_values:
2129
+ entry.extend([str(user_missing_values)])
2130
+
2131
+ # Process the filling_values ...............................
2132
+ # Rename the input for convenience
2133
+ user_filling_values = filling_values
2134
+ if user_filling_values is None:
2135
+ user_filling_values = []
2136
+ # Define the default
2137
+ filling_values = [None] * nbcols
2138
+ # We have a dictionary : update each entry individually
2139
+ if isinstance(user_filling_values, dict):
2140
+ for (key, val) in user_filling_values.items():
2141
+ if _is_string_like(key):
2142
+ try:
2143
+ # Transform it into an integer
2144
+ key = names.index(key)
2145
+ except ValueError:
2146
+ # We couldn't find it: the name must have been dropped,
2147
+ continue
2148
+ # Redefine the key if it's a column number and usecols is defined
2149
+ if usecols:
2150
+ try:
2151
+ key = usecols.index(key)
2152
+ except ValueError:
2153
+ pass
2154
+ # Add the value to the list
2155
+ filling_values[key] = val
2156
+ # We have a sequence : update on a one-to-one basis
2157
+ elif isinstance(user_filling_values, (list, tuple)):
2158
+ n = len(user_filling_values)
2159
+ if (n <= nbcols):
2160
+ filling_values[:n] = user_filling_values
2161
+ else:
2162
+ filling_values = user_filling_values[:nbcols]
2163
+ # We have something else : use it for all entries
2164
+ else:
2165
+ filling_values = [user_filling_values] * nbcols
2166
+
2167
+ # Initialize the converters ................................
2168
+ if dtype is None:
2169
+ # Note: we can't use a [...]*nbcols, as we would have 3 times the same
2170
+ # ... converter, instead of 3 different converters.
2171
+ converters = [StringConverter(None, missing_values=miss, default=fill)
2172
+ for (miss, fill) in zip(missing_values, filling_values)]
2173
+ else:
2174
+ dtype_flat = flatten_dtype(dtype, flatten_base=True)
2175
+ # Initialize the converters
2176
+ if len(dtype_flat) > 1:
2177
+ # Flexible type : get a converter from each dtype
2178
+ zipit = zip(dtype_flat, missing_values, filling_values)
2179
+ converters = [StringConverter(dt, locked=True,
2180
+ missing_values=miss, default=fill)
2181
+ for (dt, miss, fill) in zipit]
2182
+ else:
2183
+ # Set to a default converter (but w/ different missing values)
2184
+ zipit = zip(missing_values, filling_values)
2185
+ converters = [StringConverter(dtype, locked=True,
2186
+ missing_values=miss, default=fill)
2187
+ for (miss, fill) in zipit]
2188
+ # Update the converters to use the user-defined ones
2189
+ uc_update = []
2190
+ for (j, conv) in user_converters.items():
2191
+ # If the converter is specified by column names, use the index instead
2192
+ if _is_string_like(j):
2193
+ try:
2194
+ j = names.index(j)
2195
+ i = j
2196
+ except ValueError:
2197
+ continue
2198
+ elif usecols:
2199
+ try:
2200
+ i = usecols.index(j)
2201
+ except ValueError:
2202
+ # Unused converter specified
2203
+ continue
2204
+ else:
2205
+ i = j
2206
+ # Find the value to test - first_line is not filtered by usecols:
2207
+ if len(first_line):
2208
+ testing_value = first_values[j]
2209
+ else:
2210
+ testing_value = None
2211
+ if conv is bytes:
2212
+ user_conv = asbytes
2213
+ elif byte_converters:
2214
+ # converters may use decode to workaround numpy's old behaviour,
2215
+ # so encode the string again before passing to the user converter
2216
+ def tobytes_first(x, conv):
2217
+ if type(x) is bytes:
2218
+ return conv(x)
2219
+ return conv(x.encode("latin1"))
2220
+ user_conv = functools.partial(tobytes_first, conv=conv)
2221
+ else:
2222
+ user_conv = conv
2223
+ converters[i].update(user_conv, locked=True,
2224
+ testing_value=testing_value,
2225
+ default=filling_values[i],
2226
+ missing_values=missing_values[i],)
2227
+ uc_update.append((i, user_conv))
2228
+ # Make sure we have the corrected keys in user_converters...
2229
+ user_converters.update(uc_update)
2230
+
2231
+ # Fixme: possible error as following variable never used.
2232
+ # miss_chars = [_.missing_values for _ in converters]
2233
+
2234
+ # Initialize the output lists ...
2235
+ # ... rows
2236
+ rows = []
2237
+ append_to_rows = rows.append
2238
+ # ... masks
2239
+ if usemask:
2240
+ masks = []
2241
+ append_to_masks = masks.append
2242
+ # ... invalid
2243
+ invalid = []
2244
+ append_to_invalid = invalid.append
2245
+
2246
+ # Parse each line
2247
+ for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
2248
+ values = split_line(line)
2249
+ nbvalues = len(values)
2250
+ # Skip an empty line
2251
+ if nbvalues == 0:
2252
+ continue
2253
+ if usecols:
2254
+ # Select only the columns we need
2255
+ try:
2256
+ values = [values[_] for _ in usecols]
2257
+ except IndexError:
2258
+ append_to_invalid((i + skip_header + 1, nbvalues))
2259
+ continue
2260
+ elif nbvalues != nbcols:
2261
+ append_to_invalid((i + skip_header + 1, nbvalues))
2262
+ continue
2263
+ # Store the values
2264
+ append_to_rows(tuple(values))
2265
+ if usemask:
2266
+ append_to_masks(tuple([v.strip() in m
2267
+ for (v, m) in zip(values,
2268
+ missing_values)]))
2269
+ if len(rows) == max_rows:
2270
+ break
2271
+
2272
+ # Upgrade the converters (if needed)
2273
+ if dtype is None:
2274
+ for (i, converter) in enumerate(converters):
2275
+ current_column = [itemgetter(i)(_m) for _m in rows]
2276
+ try:
2277
+ converter.iterupgrade(current_column)
2278
+ except ConverterLockError:
2279
+ errmsg = "Converter #%i is locked and cannot be upgraded: " % i
2280
+ current_column = map(itemgetter(i), rows)
2281
+ for (j, value) in enumerate(current_column):
2282
+ try:
2283
+ converter.upgrade(value)
2284
+ except (ConverterError, ValueError):
2285
+ errmsg += "(occurred line #%i for value '%s')"
2286
+ errmsg %= (j + 1 + skip_header, value)
2287
+ raise ConverterError(errmsg)
2288
+
2289
+ # Check that we don't have invalid values
2290
+ nbinvalid = len(invalid)
2291
+ if nbinvalid > 0:
2292
+ nbrows = len(rows) + nbinvalid - skip_footer
2293
+ # Construct the error message
2294
+ template = " Line #%%i (got %%i columns instead of %i)" % nbcols
2295
+ if skip_footer > 0:
2296
+ nbinvalid_skipped = len([_ for _ in invalid
2297
+ if _[0] > nbrows + skip_header])
2298
+ invalid = invalid[:nbinvalid - nbinvalid_skipped]
2299
+ skip_footer -= nbinvalid_skipped
2300
+ #
2301
+ # nbrows -= skip_footer
2302
+ # errmsg = [template % (i, nb)
2303
+ # for (i, nb) in invalid if i < nbrows]
2304
+ # else:
2305
+ errmsg = [template % (i, nb)
2306
+ for (i, nb) in invalid]
2307
+ if len(errmsg):
2308
+ errmsg.insert(0, "Some errors were detected !")
2309
+ errmsg = "\n".join(errmsg)
2310
+ # Raise an exception ?
2311
+ if invalid_raise:
2312
+ raise ValueError(errmsg)
2313
+ # Issue a warning ?
2314
+ else:
2315
+ warnings.warn(errmsg, ConversionWarning, stacklevel=2)
2316
+
2317
+ # Strip the last skip_footer data
2318
+ if skip_footer > 0:
2319
+ rows = rows[:-skip_footer]
2320
+ if usemask:
2321
+ masks = masks[:-skip_footer]
2322
+
2323
+ # Convert each value according to the converter:
2324
+ # We want to modify the list in place to avoid creating a new one...
2325
+ if loose:
2326
+ rows = list(
2327
+ zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
2328
+ for (i, conv) in enumerate(converters)]))
2329
+ else:
2330
+ rows = list(
2331
+ zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
2332
+ for (i, conv) in enumerate(converters)]))
2333
+
2334
+ # Reset the dtype
2335
+ data = rows
2336
+ if dtype is None:
2337
+ # Get the dtypes from the types of the converters
2338
+ column_types = [conv.type for conv in converters]
2339
+ # Find the columns with strings...
2340
+ strcolidx = [i for (i, v) in enumerate(column_types)
2341
+ if v == np.str_]
2342
+
2343
+ if byte_converters and strcolidx:
2344
+ # convert strings back to bytes for backward compatibility
2345
+ warnings.warn(
2346
+ "Reading unicode strings without specifying the encoding "
2347
+ "argument is deprecated. Set the encoding, use None for the "
2348
+ "system default.",
2349
+ np.VisibleDeprecationWarning, stacklevel=2)
2350
+ def encode_unicode_cols(row_tup):
2351
+ row = list(row_tup)
2352
+ for i in strcolidx:
2353
+ row[i] = row[i].encode('latin1')
2354
+ return tuple(row)
2355
+
2356
+ try:
2357
+ data = [encode_unicode_cols(r) for r in data]
2358
+ except UnicodeEncodeError:
2359
+ pass
2360
+ else:
2361
+ for i in strcolidx:
2362
+ column_types[i] = np.bytes_
2363
+
2364
+ # Update string types to be the right length
2365
+ sized_column_types = column_types[:]
2366
+ for i, col_type in enumerate(column_types):
2367
+ if np.issubdtype(col_type, np.character):
2368
+ n_chars = max(len(row[i]) for row in data)
2369
+ sized_column_types[i] = (col_type, n_chars)
2370
+
2371
+ if names is None:
2372
+ # If the dtype is uniform (before sizing strings)
2373
+ base = {
2374
+ c_type
2375
+ for c, c_type in zip(converters, column_types)
2376
+ if c._checked}
2377
+ if len(base) == 1:
2378
+ uniform_type, = base
2379
+ (ddtype, mdtype) = (uniform_type, bool)
2380
+ else:
2381
+ ddtype = [(defaultfmt % i, dt)
2382
+ for (i, dt) in enumerate(sized_column_types)]
2383
+ if usemask:
2384
+ mdtype = [(defaultfmt % i, bool)
2385
+ for (i, dt) in enumerate(sized_column_types)]
2386
+ else:
2387
+ ddtype = list(zip(names, sized_column_types))
2388
+ mdtype = list(zip(names, [bool] * len(sized_column_types)))
2389
+ output = np.array(data, dtype=ddtype)
2390
+ if usemask:
2391
+ outputmask = np.array(masks, dtype=mdtype)
2392
+ else:
2393
+ # Overwrite the initial dtype names if needed
2394
+ if names and dtype.names is not None:
2395
+ dtype.names = names
2396
+ # Case 1. We have a structured type
2397
+ if len(dtype_flat) > 1:
2398
+ # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
2399
+ # First, create the array using a flattened dtype:
2400
+ # [('a', int), ('b1', int), ('b2', float)]
2401
+ # Then, view the array using the specified dtype.
2402
+ if 'O' in (_.char for _ in dtype_flat):
2403
+ if has_nested_fields(dtype):
2404
+ raise NotImplementedError(
2405
+ "Nested fields involving objects are not supported...")
2406
+ else:
2407
+ output = np.array(data, dtype=dtype)
2408
+ else:
2409
+ rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
2410
+ output = rows.view(dtype)
2411
+ # Now, process the rowmasks the same way
2412
+ if usemask:
2413
+ rowmasks = np.array(
2414
+ masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
2415
+ # Construct the new dtype
2416
+ mdtype = make_mask_descr(dtype)
2417
+ outputmask = rowmasks.view(mdtype)
2418
+ # Case #2. We have a basic dtype
2419
+ else:
2420
+ # We used some user-defined converters
2421
+ if user_converters:
2422
+ ishomogeneous = True
2423
+ descr = []
2424
+ for i, ttype in enumerate([conv.type for conv in converters]):
2425
+ # Keep the dtype of the current converter
2426
+ if i in user_converters:
2427
+ ishomogeneous &= (ttype == dtype.type)
2428
+ if np.issubdtype(ttype, np.character):
2429
+ ttype = (ttype, max(len(row[i]) for row in data))
2430
+ descr.append(('', ttype))
2431
+ else:
2432
+ descr.append(('', dtype))
2433
+ # So we changed the dtype ?
2434
+ if not ishomogeneous:
2435
+ # We have more than one field
2436
+ if len(descr) > 1:
2437
+ dtype = np.dtype(descr)
2438
+ # We have only one field: drop the name if not needed.
2439
+ else:
2440
+ dtype = np.dtype(ttype)
2441
+ #
2442
+ output = np.array(data, dtype)
2443
+ if usemask:
2444
+ if dtype.names is not None:
2445
+ mdtype = [(_, bool) for _ in dtype.names]
2446
+ else:
2447
+ mdtype = bool
2448
+ outputmask = np.array(masks, dtype=mdtype)
2449
+ # Try to take care of the missing data we missed
2450
+ names = output.dtype.names
2451
+ if usemask and names:
2452
+ for (name, conv) in zip(names, converters):
2453
+ missing_values = [conv(_) for _ in conv.missing_values
2454
+ if _ != '']
2455
+ for mval in missing_values:
2456
+ outputmask[name] |= (output[name] == mval)
2457
+ # Construct the final array
2458
+ if usemask:
2459
+ output = output.view(MaskedArray)
2460
+ output._mask = outputmask
2461
+
2462
+ output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
2463
+
2464
+ if unpack:
2465
+ if names is None:
2466
+ return output.T
2467
+ elif len(names) == 1:
2468
+ # squeeze single-name dtypes too
2469
+ return output[names[0]]
2470
+ else:
2471
+ # For structured arrays with multiple fields,
2472
+ # return an array for each field.
2473
+ return [output[field] for field in names]
2474
+ return output
2475
+
2476
+
2477
+ _genfromtxt_with_like = array_function_dispatch()(genfromtxt)
2478
+
2479
+
2480
+ def recfromtxt(fname, **kwargs):
2481
+ """
2482
+ Load ASCII data from a file and return it in a record array.
2483
+
2484
+ If ``usemask=False`` a standard `recarray` is returned,
2485
+ if ``usemask=True`` a MaskedRecords array is returned.
2486
+
2487
+ Parameters
2488
+ ----------
2489
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
2490
+
2491
+ See Also
2492
+ --------
2493
+ numpy.genfromtxt : generic function
2494
+
2495
+ Notes
2496
+ -----
2497
+ By default, `dtype` is None, which means that the data-type of the output
2498
+ array will be determined from the data.
2499
+
2500
+ """
2501
+ kwargs.setdefault("dtype", None)
2502
+ usemask = kwargs.get('usemask', False)
2503
+ output = genfromtxt(fname, **kwargs)
2504
+ if usemask:
2505
+ from numpy.ma.mrecords import MaskedRecords
2506
+ output = output.view(MaskedRecords)
2507
+ else:
2508
+ output = output.view(np.recarray)
2509
+ return output
2510
+
2511
+
2512
+ def recfromcsv(fname, **kwargs):
2513
+ """
2514
+ Load ASCII data stored in a comma-separated file.
2515
+
2516
+ The returned array is a record array (if ``usemask=False``, see
2517
+ `recarray`) or a masked record array (if ``usemask=True``,
2518
+ see `ma.mrecords.MaskedRecords`).
2519
+
2520
+ Parameters
2521
+ ----------
2522
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
2523
+
2524
+ See Also
2525
+ --------
2526
+ numpy.genfromtxt : generic function to load ASCII data.
2527
+
2528
+ Notes
2529
+ -----
2530
+ By default, `dtype` is None, which means that the data-type of the output
2531
+ array will be determined from the data.
2532
+
2533
+ """
2534
+ # Set default kwargs for genfromtxt as relevant to csv import.
2535
+ kwargs.setdefault("case_sensitive", "lower")
2536
+ kwargs.setdefault("names", True)
2537
+ kwargs.setdefault("delimiter", ",")
2538
+ kwargs.setdefault("dtype", None)
2539
+ output = genfromtxt(fname, **kwargs)
2540
+
2541
+ usemask = kwargs.get("usemask", False)
2542
+ if usemask:
2543
+ from numpy.ma.mrecords import MaskedRecords
2544
+ output = output.view(MaskedRecords)
2545
+ else:
2546
+ output = output.view(np.recarray)
2547
+ return output
.venv/lib/python3.11/site-packages/numpy/lib/npyio.pyi ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import zipfile
4
+ import types
5
+ from re import Pattern
6
+ from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable
7
+ from typing import (
8
+ Literal as L,
9
+ Any,
10
+ TypeVar,
11
+ Generic,
12
+ IO,
13
+ overload,
14
+ Protocol,
15
+ )
16
+
17
+ from numpy import (
18
+ DataSource as DataSource,
19
+ ndarray,
20
+ recarray,
21
+ dtype,
22
+ generic,
23
+ float64,
24
+ void,
25
+ record,
26
+ )
27
+
28
+ from numpy.ma.mrecords import MaskedRecords
29
+ from numpy._typing import (
30
+ ArrayLike,
31
+ DTypeLike,
32
+ NDArray,
33
+ _DTypeLike,
34
+ _SupportsArrayFunc,
35
+ )
36
+
37
+ from numpy.core.multiarray import (
38
+ packbits as packbits,
39
+ unpackbits as unpackbits,
40
+ )
41
+
42
+ _T = TypeVar("_T")
43
+ _T_contra = TypeVar("_T_contra", contravariant=True)
44
+ _T_co = TypeVar("_T_co", covariant=True)
45
+ _SCT = TypeVar("_SCT", bound=generic)
46
+ _CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
47
+ _CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
48
+
49
+ class _SupportsGetItem(Protocol[_T_contra, _T_co]):
50
+ def __getitem__(self, key: _T_contra, /) -> _T_co: ...
51
+
52
+ class _SupportsRead(Protocol[_CharType_co]):
53
+ def read(self) -> _CharType_co: ...
54
+
55
+ class _SupportsReadSeek(Protocol[_CharType_co]):
56
+ def read(self, n: int, /) -> _CharType_co: ...
57
+ def seek(self, offset: int, whence: int, /) -> object: ...
58
+
59
+ class _SupportsWrite(Protocol[_CharType_contra]):
60
+ def write(self, s: _CharType_contra, /) -> object: ...
61
+
62
+ __all__: list[str]
63
+
64
+ class BagObj(Generic[_T_co]):
65
+ def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
66
+ def __getattribute__(self, key: str) -> _T_co: ...
67
+ def __dir__(self) -> list[str]: ...
68
+
69
+ class NpzFile(Mapping[str, NDArray[Any]]):
70
+ zip: zipfile.ZipFile
71
+ fid: None | IO[str]
72
+ files: list[str]
73
+ allow_pickle: bool
74
+ pickle_kwargs: None | Mapping[str, Any]
75
+ _MAX_REPR_ARRAY_COUNT: int
76
+ # Represent `f` as a mutable property so we can access the type of `self`
77
+ @property
78
+ def f(self: _T) -> BagObj[_T]: ...
79
+ @f.setter
80
+ def f(self: _T, value: BagObj[_T]) -> None: ...
81
+ def __init__(
82
+ self,
83
+ fid: IO[str],
84
+ own_fid: bool = ...,
85
+ allow_pickle: bool = ...,
86
+ pickle_kwargs: None | Mapping[str, Any] = ...,
87
+ ) -> None: ...
88
+ def __enter__(self: _T) -> _T: ...
89
+ def __exit__(
90
+ self,
91
+ exc_type: None | type[BaseException],
92
+ exc_value: None | BaseException,
93
+ traceback: None | types.TracebackType,
94
+ /,
95
+ ) -> None: ...
96
+ def close(self) -> None: ...
97
+ def __del__(self) -> None: ...
98
+ def __iter__(self) -> Iterator[str]: ...
99
+ def __len__(self) -> int: ...
100
+ def __getitem__(self, key: str) -> NDArray[Any]: ...
101
+ def __contains__(self, key: str) -> bool: ...
102
+ def __repr__(self) -> str: ...
103
+
104
+ # NOTE: Returns a `NpzFile` if file is a zip file;
105
+ # returns an `ndarray`/`memmap` otherwise
106
+ def load(
107
+ file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
108
+ mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
109
+ allow_pickle: bool = ...,
110
+ fix_imports: bool = ...,
111
+ encoding: L["ASCII", "latin1", "bytes"] = ...,
112
+ ) -> Any: ...
113
+
114
+ def save(
115
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
116
+ arr: ArrayLike,
117
+ allow_pickle: bool = ...,
118
+ fix_imports: bool = ...,
119
+ ) -> None: ...
120
+
121
+ def savez(
122
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
123
+ *args: ArrayLike,
124
+ **kwds: ArrayLike,
125
+ ) -> None: ...
126
+
127
+ def savez_compressed(
128
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
129
+ *args: ArrayLike,
130
+ **kwds: ArrayLike,
131
+ ) -> None: ...
132
+
133
+ # File-like objects only have to implement `__iter__` and,
134
+ # optionally, `encoding`
135
+ @overload
136
+ def loadtxt(
137
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
138
+ dtype: None = ...,
139
+ comments: None | str | Sequence[str] = ...,
140
+ delimiter: None | str = ...,
141
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
142
+ skiprows: int = ...,
143
+ usecols: int | Sequence[int] = ...,
144
+ unpack: bool = ...,
145
+ ndmin: L[0, 1, 2] = ...,
146
+ encoding: None | str = ...,
147
+ max_rows: None | int = ...,
148
+ *,
149
+ quotechar: None | str = ...,
150
+ like: None | _SupportsArrayFunc = ...
151
+ ) -> NDArray[float64]: ...
152
+ @overload
153
+ def loadtxt(
154
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
155
+ dtype: _DTypeLike[_SCT],
156
+ comments: None | str | Sequence[str] = ...,
157
+ delimiter: None | str = ...,
158
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
159
+ skiprows: int = ...,
160
+ usecols: int | Sequence[int] = ...,
161
+ unpack: bool = ...,
162
+ ndmin: L[0, 1, 2] = ...,
163
+ encoding: None | str = ...,
164
+ max_rows: None | int = ...,
165
+ *,
166
+ quotechar: None | str = ...,
167
+ like: None | _SupportsArrayFunc = ...
168
+ ) -> NDArray[_SCT]: ...
169
+ @overload
170
+ def loadtxt(
171
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
172
+ dtype: DTypeLike,
173
+ comments: None | str | Sequence[str] = ...,
174
+ delimiter: None | str = ...,
175
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
176
+ skiprows: int = ...,
177
+ usecols: int | Sequence[int] = ...,
178
+ unpack: bool = ...,
179
+ ndmin: L[0, 1, 2] = ...,
180
+ encoding: None | str = ...,
181
+ max_rows: None | int = ...,
182
+ *,
183
+ quotechar: None | str = ...,
184
+ like: None | _SupportsArrayFunc = ...
185
+ ) -> NDArray[Any]: ...
186
+
187
+ def savetxt(
188
+ fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
189
+ X: ArrayLike,
190
+ fmt: str | Sequence[str] = ...,
191
+ delimiter: str = ...,
192
+ newline: str = ...,
193
+ header: str = ...,
194
+ footer: str = ...,
195
+ comments: str = ...,
196
+ encoding: None | str = ...,
197
+ ) -> None: ...
198
+
199
+ @overload
200
+ def fromregex(
201
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
202
+ regexp: str | bytes | Pattern[Any],
203
+ dtype: _DTypeLike[_SCT],
204
+ encoding: None | str = ...
205
+ ) -> NDArray[_SCT]: ...
206
+ @overload
207
+ def fromregex(
208
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
209
+ regexp: str | bytes | Pattern[Any],
210
+ dtype: DTypeLike,
211
+ encoding: None | str = ...
212
+ ) -> NDArray[Any]: ...
213
+
214
+ @overload
215
+ def genfromtxt(
216
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
217
+ dtype: None = ...,
218
+ comments: str = ...,
219
+ delimiter: None | str | int | Iterable[int] = ...,
220
+ skip_header: int = ...,
221
+ skip_footer: int = ...,
222
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
223
+ missing_values: Any = ...,
224
+ filling_values: Any = ...,
225
+ usecols: None | Sequence[int] = ...,
226
+ names: L[None, True] | str | Collection[str] = ...,
227
+ excludelist: None | Sequence[str] = ...,
228
+ deletechars: str = ...,
229
+ replace_space: str = ...,
230
+ autostrip: bool = ...,
231
+ case_sensitive: bool | L['upper', 'lower'] = ...,
232
+ defaultfmt: str = ...,
233
+ unpack: None | bool = ...,
234
+ usemask: bool = ...,
235
+ loose: bool = ...,
236
+ invalid_raise: bool = ...,
237
+ max_rows: None | int = ...,
238
+ encoding: str = ...,
239
+ *,
240
+ ndmin: L[0, 1, 2] = ...,
241
+ like: None | _SupportsArrayFunc = ...,
242
+ ) -> NDArray[Any]: ...
243
+ @overload
244
+ def genfromtxt(
245
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
246
+ dtype: _DTypeLike[_SCT],
247
+ comments: str = ...,
248
+ delimiter: None | str | int | Iterable[int] = ...,
249
+ skip_header: int = ...,
250
+ skip_footer: int = ...,
251
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
252
+ missing_values: Any = ...,
253
+ filling_values: Any = ...,
254
+ usecols: None | Sequence[int] = ...,
255
+ names: L[None, True] | str | Collection[str] = ...,
256
+ excludelist: None | Sequence[str] = ...,
257
+ deletechars: str = ...,
258
+ replace_space: str = ...,
259
+ autostrip: bool = ...,
260
+ case_sensitive: bool | L['upper', 'lower'] = ...,
261
+ defaultfmt: str = ...,
262
+ unpack: None | bool = ...,
263
+ usemask: bool = ...,
264
+ loose: bool = ...,
265
+ invalid_raise: bool = ...,
266
+ max_rows: None | int = ...,
267
+ encoding: str = ...,
268
+ *,
269
+ ndmin: L[0, 1, 2] = ...,
270
+ like: None | _SupportsArrayFunc = ...,
271
+ ) -> NDArray[_SCT]: ...
272
+ @overload
273
+ def genfromtxt(
274
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
275
+ dtype: DTypeLike,
276
+ comments: str = ...,
277
+ delimiter: None | str | int | Iterable[int] = ...,
278
+ skip_header: int = ...,
279
+ skip_footer: int = ...,
280
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
281
+ missing_values: Any = ...,
282
+ filling_values: Any = ...,
283
+ usecols: None | Sequence[int] = ...,
284
+ names: L[None, True] | str | Collection[str] = ...,
285
+ excludelist: None | Sequence[str] = ...,
286
+ deletechars: str = ...,
287
+ replace_space: str = ...,
288
+ autostrip: bool = ...,
289
+ case_sensitive: bool | L['upper', 'lower'] = ...,
290
+ defaultfmt: str = ...,
291
+ unpack: None | bool = ...,
292
+ usemask: bool = ...,
293
+ loose: bool = ...,
294
+ invalid_raise: bool = ...,
295
+ max_rows: None | int = ...,
296
+ encoding: str = ...,
297
+ *,
298
+ ndmin: L[0, 1, 2] = ...,
299
+ like: None | _SupportsArrayFunc = ...,
300
+ ) -> NDArray[Any]: ...
301
+
302
+ @overload
303
+ def recfromtxt(
304
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
305
+ *,
306
+ usemask: L[False] = ...,
307
+ **kwargs: Any,
308
+ ) -> recarray[Any, dtype[record]]: ...
309
+ @overload
310
+ def recfromtxt(
311
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
312
+ *,
313
+ usemask: L[True],
314
+ **kwargs: Any,
315
+ ) -> MaskedRecords[Any, dtype[void]]: ...
316
+
317
+ @overload
318
+ def recfromcsv(
319
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
320
+ *,
321
+ usemask: L[False] = ...,
322
+ **kwargs: Any,
323
+ ) -> recarray[Any, dtype[record]]: ...
324
+ @overload
325
+ def recfromcsv(
326
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
327
+ *,
328
+ usemask: L[True],
329
+ **kwargs: Any,
330
+ ) -> MaskedRecords[Any, dtype[void]]: ...
.venv/lib/python3.11/site-packages/numpy/lib/polynomial.pyi ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ overload,
4
+ Any,
5
+ SupportsInt,
6
+ SupportsIndex,
7
+ TypeVar,
8
+ NoReturn,
9
+ )
10
+
11
+ from numpy import (
12
+ RankWarning as RankWarning,
13
+ poly1d as poly1d,
14
+ unsignedinteger,
15
+ signedinteger,
16
+ floating,
17
+ complexfloating,
18
+ bool_,
19
+ int32,
20
+ int64,
21
+ float64,
22
+ complex128,
23
+ object_,
24
+ )
25
+
26
+ from numpy._typing import (
27
+ NDArray,
28
+ ArrayLike,
29
+ _ArrayLikeBool_co,
30
+ _ArrayLikeUInt_co,
31
+ _ArrayLikeInt_co,
32
+ _ArrayLikeFloat_co,
33
+ _ArrayLikeComplex_co,
34
+ _ArrayLikeObject_co,
35
+ )
36
+
37
+ _T = TypeVar("_T")
38
+
39
+ _2Tup = tuple[_T, _T]
40
+ _5Tup = tuple[
41
+ _T,
42
+ NDArray[float64],
43
+ NDArray[int32],
44
+ NDArray[float64],
45
+ NDArray[float64],
46
+ ]
47
+
48
+ __all__: list[str]
49
+
50
+ def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
51
+
52
+ # Returns either a float or complex array depending on the input values.
53
+ # See `np.linalg.eigvals`.
54
+ def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
55
+
56
+ @overload
57
+ def polyint(
58
+ p: poly1d,
59
+ m: SupportsInt | SupportsIndex = ...,
60
+ k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
61
+ ) -> poly1d: ...
62
+ @overload
63
+ def polyint(
64
+ p: _ArrayLikeFloat_co,
65
+ m: SupportsInt | SupportsIndex = ...,
66
+ k: None | _ArrayLikeFloat_co = ...,
67
+ ) -> NDArray[floating[Any]]: ...
68
+ @overload
69
+ def polyint(
70
+ p: _ArrayLikeComplex_co,
71
+ m: SupportsInt | SupportsIndex = ...,
72
+ k: None | _ArrayLikeComplex_co = ...,
73
+ ) -> NDArray[complexfloating[Any, Any]]: ...
74
+ @overload
75
+ def polyint(
76
+ p: _ArrayLikeObject_co,
77
+ m: SupportsInt | SupportsIndex = ...,
78
+ k: None | _ArrayLikeObject_co = ...,
79
+ ) -> NDArray[object_]: ...
80
+
81
+ @overload
82
+ def polyder(
83
+ p: poly1d,
84
+ m: SupportsInt | SupportsIndex = ...,
85
+ ) -> poly1d: ...
86
+ @overload
87
+ def polyder(
88
+ p: _ArrayLikeFloat_co,
89
+ m: SupportsInt | SupportsIndex = ...,
90
+ ) -> NDArray[floating[Any]]: ...
91
+ @overload
92
+ def polyder(
93
+ p: _ArrayLikeComplex_co,
94
+ m: SupportsInt | SupportsIndex = ...,
95
+ ) -> NDArray[complexfloating[Any, Any]]: ...
96
+ @overload
97
+ def polyder(
98
+ p: _ArrayLikeObject_co,
99
+ m: SupportsInt | SupportsIndex = ...,
100
+ ) -> NDArray[object_]: ...
101
+
102
+ @overload
103
+ def polyfit(
104
+ x: _ArrayLikeFloat_co,
105
+ y: _ArrayLikeFloat_co,
106
+ deg: SupportsIndex | SupportsInt,
107
+ rcond: None | float = ...,
108
+ full: L[False] = ...,
109
+ w: None | _ArrayLikeFloat_co = ...,
110
+ cov: L[False] = ...,
111
+ ) -> NDArray[float64]: ...
112
+ @overload
113
+ def polyfit(
114
+ x: _ArrayLikeComplex_co,
115
+ y: _ArrayLikeComplex_co,
116
+ deg: SupportsIndex | SupportsInt,
117
+ rcond: None | float = ...,
118
+ full: L[False] = ...,
119
+ w: None | _ArrayLikeFloat_co = ...,
120
+ cov: L[False] = ...,
121
+ ) -> NDArray[complex128]: ...
122
+ @overload
123
+ def polyfit(
124
+ x: _ArrayLikeFloat_co,
125
+ y: _ArrayLikeFloat_co,
126
+ deg: SupportsIndex | SupportsInt,
127
+ rcond: None | float = ...,
128
+ full: L[False] = ...,
129
+ w: None | _ArrayLikeFloat_co = ...,
130
+ cov: L[True, "unscaled"] = ...,
131
+ ) -> _2Tup[NDArray[float64]]: ...
132
+ @overload
133
+ def polyfit(
134
+ x: _ArrayLikeComplex_co,
135
+ y: _ArrayLikeComplex_co,
136
+ deg: SupportsIndex | SupportsInt,
137
+ rcond: None | float = ...,
138
+ full: L[False] = ...,
139
+ w: None | _ArrayLikeFloat_co = ...,
140
+ cov: L[True, "unscaled"] = ...,
141
+ ) -> _2Tup[NDArray[complex128]]: ...
142
+ @overload
143
+ def polyfit(
144
+ x: _ArrayLikeFloat_co,
145
+ y: _ArrayLikeFloat_co,
146
+ deg: SupportsIndex | SupportsInt,
147
+ rcond: None | float = ...,
148
+ full: L[True] = ...,
149
+ w: None | _ArrayLikeFloat_co = ...,
150
+ cov: bool | L["unscaled"] = ...,
151
+ ) -> _5Tup[NDArray[float64]]: ...
152
+ @overload
153
+ def polyfit(
154
+ x: _ArrayLikeComplex_co,
155
+ y: _ArrayLikeComplex_co,
156
+ deg: SupportsIndex | SupportsInt,
157
+ rcond: None | float = ...,
158
+ full: L[True] = ...,
159
+ w: None | _ArrayLikeFloat_co = ...,
160
+ cov: bool | L["unscaled"] = ...,
161
+ ) -> _5Tup[NDArray[complex128]]: ...
162
+
163
+ @overload
164
+ def polyval(
165
+ p: _ArrayLikeBool_co,
166
+ x: _ArrayLikeBool_co,
167
+ ) -> NDArray[int64]: ...
168
+ @overload
169
+ def polyval(
170
+ p: _ArrayLikeUInt_co,
171
+ x: _ArrayLikeUInt_co,
172
+ ) -> NDArray[unsignedinteger[Any]]: ...
173
+ @overload
174
+ def polyval(
175
+ p: _ArrayLikeInt_co,
176
+ x: _ArrayLikeInt_co,
177
+ ) -> NDArray[signedinteger[Any]]: ...
178
+ @overload
179
+ def polyval(
180
+ p: _ArrayLikeFloat_co,
181
+ x: _ArrayLikeFloat_co,
182
+ ) -> NDArray[floating[Any]]: ...
183
+ @overload
184
+ def polyval(
185
+ p: _ArrayLikeComplex_co,
186
+ x: _ArrayLikeComplex_co,
187
+ ) -> NDArray[complexfloating[Any, Any]]: ...
188
+ @overload
189
+ def polyval(
190
+ p: _ArrayLikeObject_co,
191
+ x: _ArrayLikeObject_co,
192
+ ) -> NDArray[object_]: ...
193
+
194
+ @overload
195
+ def polyadd(
196
+ a1: poly1d,
197
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
198
+ ) -> poly1d: ...
199
+ @overload
200
+ def polyadd(
201
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
202
+ a2: poly1d,
203
+ ) -> poly1d: ...
204
+ @overload
205
+ def polyadd(
206
+ a1: _ArrayLikeBool_co,
207
+ a2: _ArrayLikeBool_co,
208
+ ) -> NDArray[bool_]: ...
209
+ @overload
210
+ def polyadd(
211
+ a1: _ArrayLikeUInt_co,
212
+ a2: _ArrayLikeUInt_co,
213
+ ) -> NDArray[unsignedinteger[Any]]: ...
214
+ @overload
215
+ def polyadd(
216
+ a1: _ArrayLikeInt_co,
217
+ a2: _ArrayLikeInt_co,
218
+ ) -> NDArray[signedinteger[Any]]: ...
219
+ @overload
220
+ def polyadd(
221
+ a1: _ArrayLikeFloat_co,
222
+ a2: _ArrayLikeFloat_co,
223
+ ) -> NDArray[floating[Any]]: ...
224
+ @overload
225
+ def polyadd(
226
+ a1: _ArrayLikeComplex_co,
227
+ a2: _ArrayLikeComplex_co,
228
+ ) -> NDArray[complexfloating[Any, Any]]: ...
229
+ @overload
230
+ def polyadd(
231
+ a1: _ArrayLikeObject_co,
232
+ a2: _ArrayLikeObject_co,
233
+ ) -> NDArray[object_]: ...
234
+
235
+ @overload
236
+ def polysub(
237
+ a1: poly1d,
238
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
239
+ ) -> poly1d: ...
240
+ @overload
241
+ def polysub(
242
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
243
+ a2: poly1d,
244
+ ) -> poly1d: ...
245
+ @overload
246
+ def polysub(
247
+ a1: _ArrayLikeBool_co,
248
+ a2: _ArrayLikeBool_co,
249
+ ) -> NoReturn: ...
250
+ @overload
251
+ def polysub(
252
+ a1: _ArrayLikeUInt_co,
253
+ a2: _ArrayLikeUInt_co,
254
+ ) -> NDArray[unsignedinteger[Any]]: ...
255
+ @overload
256
+ def polysub(
257
+ a1: _ArrayLikeInt_co,
258
+ a2: _ArrayLikeInt_co,
259
+ ) -> NDArray[signedinteger[Any]]: ...
260
+ @overload
261
+ def polysub(
262
+ a1: _ArrayLikeFloat_co,
263
+ a2: _ArrayLikeFloat_co,
264
+ ) -> NDArray[floating[Any]]: ...
265
+ @overload
266
+ def polysub(
267
+ a1: _ArrayLikeComplex_co,
268
+ a2: _ArrayLikeComplex_co,
269
+ ) -> NDArray[complexfloating[Any, Any]]: ...
270
+ @overload
271
+ def polysub(
272
+ a1: _ArrayLikeObject_co,
273
+ a2: _ArrayLikeObject_co,
274
+ ) -> NDArray[object_]: ...
275
+
276
+ # NOTE: Not an alias, but they do have the same signature (that we can reuse)
277
+ polymul = polyadd
278
+
279
+ @overload
280
+ def polydiv(
281
+ u: poly1d,
282
+ v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
283
+ ) -> _2Tup[poly1d]: ...
284
+ @overload
285
+ def polydiv(
286
+ u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
287
+ v: poly1d,
288
+ ) -> _2Tup[poly1d]: ...
289
+ @overload
290
+ def polydiv(
291
+ u: _ArrayLikeFloat_co,
292
+ v: _ArrayLikeFloat_co,
293
+ ) -> _2Tup[NDArray[floating[Any]]]: ...
294
+ @overload
295
+ def polydiv(
296
+ u: _ArrayLikeComplex_co,
297
+ v: _ArrayLikeComplex_co,
298
+ ) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
299
+ @overload
300
+ def polydiv(
301
+ u: _ArrayLikeObject_co,
302
+ v: _ArrayLikeObject_co,
303
+ ) -> _2Tup[NDArray[Any]]: ...
.venv/lib/python3.11/site-packages/numpy/lib/recfunctions.py ADDED
@@ -0,0 +1,1673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Collection of utilities to manipulate structured arrays.
3
+
4
+ Most of these functions were initially implemented by John Hunter for
5
+ matplotlib. They have been rewritten and extended for convenience.
6
+
7
+ """
8
+ import itertools
9
+ import numpy as np
10
+ import numpy.ma as ma
11
+ from numpy import ndarray, recarray
12
+ from numpy.ma import MaskedArray
13
+ from numpy.ma.mrecords import MaskedRecords
14
+ from numpy.core.overrides import array_function_dispatch
15
+ from numpy.lib._iotools import _is_string_like
16
+
17
+ _check_fill_value = np.ma.core._check_fill_value
18
+
19
+
20
+ __all__ = [
21
+ 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
22
+ 'drop_fields', 'find_duplicates', 'flatten_descr',
23
+ 'get_fieldstructure', 'get_names', 'get_names_flat',
24
+ 'join_by', 'merge_arrays', 'rec_append_fields',
25
+ 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
26
+ 'rename_fields', 'repack_fields', 'require_fields',
27
+ 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
28
+ ]
29
+
30
+
31
+ def _recursive_fill_fields_dispatcher(input, output):
32
+ return (input, output)
33
+
34
+
35
+ @array_function_dispatch(_recursive_fill_fields_dispatcher)
36
+ def recursive_fill_fields(input, output):
37
+ """
38
+ Fills fields from output with fields from input,
39
+ with support for nested structures.
40
+
41
+ Parameters
42
+ ----------
43
+ input : ndarray
44
+ Input array.
45
+ output : ndarray
46
+ Output array.
47
+
48
+ Notes
49
+ -----
50
+ * `output` should be at least the same size as `input`
51
+
52
+ Examples
53
+ --------
54
+ >>> from numpy.lib import recfunctions as rfn
55
+ >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
56
+ >>> b = np.zeros((3,), dtype=a.dtype)
57
+ >>> rfn.recursive_fill_fields(a, b)
58
+ array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
59
+
60
+ """
61
+ newdtype = output.dtype
62
+ for field in newdtype.names:
63
+ try:
64
+ current = input[field]
65
+ except ValueError:
66
+ continue
67
+ if current.dtype.names is not None:
68
+ recursive_fill_fields(current, output[field])
69
+ else:
70
+ output[field][:len(current)] = current
71
+ return output
72
+
73
+
74
+ def _get_fieldspec(dtype):
75
+ """
76
+ Produce a list of name/dtype pairs corresponding to the dtype fields
77
+
78
+ Similar to dtype.descr, but the second item of each tuple is a dtype, not a
79
+ string. As a result, this handles subarray dtypes
80
+
81
+ Can be passed to the dtype constructor to reconstruct the dtype, noting that
82
+ this (deliberately) discards field offsets.
83
+
84
+ Examples
85
+ --------
86
+ >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
87
+ >>> dt.descr
88
+ [(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
89
+ >>> _get_fieldspec(dt)
90
+ [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
91
+
92
+ """
93
+ if dtype.names is None:
94
+ # .descr returns a nameless field, so we should too
95
+ return [('', dtype)]
96
+ else:
97
+ fields = ((name, dtype.fields[name]) for name in dtype.names)
98
+ # keep any titles, if present
99
+ return [
100
+ (name if len(f) == 2 else (f[2], name), f[0])
101
+ for name, f in fields
102
+ ]
103
+
104
+
105
+ def get_names(adtype):
106
+ """
107
+ Returns the field names of the input datatype as a tuple. Input datatype
108
+ must have fields otherwise error is raised.
109
+
110
+ Parameters
111
+ ----------
112
+ adtype : dtype
113
+ Input datatype
114
+
115
+ Examples
116
+ --------
117
+ >>> from numpy.lib import recfunctions as rfn
118
+ >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
119
+ ('A',)
120
+ >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
121
+ ('A', 'B')
122
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
123
+ >>> rfn.get_names(adtype)
124
+ ('a', ('b', ('ba', 'bb')))
125
+ """
126
+ listnames = []
127
+ names = adtype.names
128
+ for name in names:
129
+ current = adtype[name]
130
+ if current.names is not None:
131
+ listnames.append((name, tuple(get_names(current))))
132
+ else:
133
+ listnames.append(name)
134
+ return tuple(listnames)
135
+
136
+
137
+ def get_names_flat(adtype):
138
+ """
139
+ Returns the field names of the input datatype as a tuple. Input datatype
140
+ must have fields otherwise error is raised.
141
+ Nested structure are flattened beforehand.
142
+
143
+ Parameters
144
+ ----------
145
+ adtype : dtype
146
+ Input datatype
147
+
148
+ Examples
149
+ --------
150
+ >>> from numpy.lib import recfunctions as rfn
151
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
152
+ False
153
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
154
+ ('A', 'B')
155
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
156
+ >>> rfn.get_names_flat(adtype)
157
+ ('a', 'b', 'ba', 'bb')
158
+ """
159
+ listnames = []
160
+ names = adtype.names
161
+ for name in names:
162
+ listnames.append(name)
163
+ current = adtype[name]
164
+ if current.names is not None:
165
+ listnames.extend(get_names_flat(current))
166
+ return tuple(listnames)
167
+
168
+
169
+ def flatten_descr(ndtype):
170
+ """
171
+ Flatten a structured data-type description.
172
+
173
+ Examples
174
+ --------
175
+ >>> from numpy.lib import recfunctions as rfn
176
+ >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
177
+ >>> rfn.flatten_descr(ndtype)
178
+ (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
179
+
180
+ """
181
+ names = ndtype.names
182
+ if names is None:
183
+ return (('', ndtype),)
184
+ else:
185
+ descr = []
186
+ for field in names:
187
+ (typ, _) = ndtype.fields[field]
188
+ if typ.names is not None:
189
+ descr.extend(flatten_descr(typ))
190
+ else:
191
+ descr.append((field, typ))
192
+ return tuple(descr)
193
+
194
+
195
+ def _zip_dtype(seqarrays, flatten=False):
196
+ newdtype = []
197
+ if flatten:
198
+ for a in seqarrays:
199
+ newdtype.extend(flatten_descr(a.dtype))
200
+ else:
201
+ for a in seqarrays:
202
+ current = a.dtype
203
+ if current.names is not None and len(current.names) == 1:
204
+ # special case - dtypes of 1 field are flattened
205
+ newdtype.extend(_get_fieldspec(current))
206
+ else:
207
+ newdtype.append(('', current))
208
+ return np.dtype(newdtype)
209
+
210
+
211
+ def _zip_descr(seqarrays, flatten=False):
212
+ """
213
+ Combine the dtype description of a series of arrays.
214
+
215
+ Parameters
216
+ ----------
217
+ seqarrays : sequence of arrays
218
+ Sequence of arrays
219
+ flatten : {boolean}, optional
220
+ Whether to collapse nested descriptions.
221
+ """
222
+ return _zip_dtype(seqarrays, flatten=flatten).descr
223
+
224
+
225
+ def get_fieldstructure(adtype, lastname=None, parents=None,):
226
+ """
227
+ Returns a dictionary with fields indexing lists of their parent fields.
228
+
229
+ This function is used to simplify access to fields nested in other fields.
230
+
231
+ Parameters
232
+ ----------
233
+ adtype : np.dtype
234
+ Input datatype
235
+ lastname : optional
236
+ Last processed field name (used internally during recursion).
237
+ parents : dictionary
238
+ Dictionary of parent fields (used interbally during recursion).
239
+
240
+ Examples
241
+ --------
242
+ >>> from numpy.lib import recfunctions as rfn
243
+ >>> ndtype = np.dtype([('A', int),
244
+ ... ('B', [('BA', int),
245
+ ... ('BB', [('BBA', int), ('BBB', int)])])])
246
+ >>> rfn.get_fieldstructure(ndtype)
247
+ ... # XXX: possible regression, order of BBA and BBB is swapped
248
+ {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
249
+
250
+ """
251
+ if parents is None:
252
+ parents = {}
253
+ names = adtype.names
254
+ for name in names:
255
+ current = adtype[name]
256
+ if current.names is not None:
257
+ if lastname:
258
+ parents[name] = [lastname, ]
259
+ else:
260
+ parents[name] = []
261
+ parents.update(get_fieldstructure(current, name, parents))
262
+ else:
263
+ lastparent = [_ for _ in (parents.get(lastname, []) or [])]
264
+ if lastparent:
265
+ lastparent.append(lastname)
266
+ elif lastname:
267
+ lastparent = [lastname, ]
268
+ parents[name] = lastparent or []
269
+ return parents
270
+
271
+
272
+ def _izip_fields_flat(iterable):
273
+ """
274
+ Returns an iterator of concatenated fields from a sequence of arrays,
275
+ collapsing any nested structure.
276
+
277
+ """
278
+ for element in iterable:
279
+ if isinstance(element, np.void):
280
+ yield from _izip_fields_flat(tuple(element))
281
+ else:
282
+ yield element
283
+
284
+
285
+ def _izip_fields(iterable):
286
+ """
287
+ Returns an iterator of concatenated fields from a sequence of arrays.
288
+
289
+ """
290
+ for element in iterable:
291
+ if (hasattr(element, '__iter__') and
292
+ not isinstance(element, str)):
293
+ yield from _izip_fields(element)
294
+ elif isinstance(element, np.void) and len(tuple(element)) == 1:
295
+ # this statement is the same from the previous expression
296
+ yield from _izip_fields(element)
297
+ else:
298
+ yield element
299
+
300
+
301
+ def _izip_records(seqarrays, fill_value=None, flatten=True):
302
+ """
303
+ Returns an iterator of concatenated items from a sequence of arrays.
304
+
305
+ Parameters
306
+ ----------
307
+ seqarrays : sequence of arrays
308
+ Sequence of arrays.
309
+ fill_value : {None, integer}
310
+ Value used to pad shorter iterables.
311
+ flatten : {True, False},
312
+ Whether to
313
+ """
314
+
315
+ # Should we flatten the items, or just use a nested approach
316
+ if flatten:
317
+ zipfunc = _izip_fields_flat
318
+ else:
319
+ zipfunc = _izip_fields
320
+
321
+ for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
322
+ yield tuple(zipfunc(tup))
323
+
324
+
325
+ def _fix_output(output, usemask=True, asrecarray=False):
326
+ """
327
+ Private function: return a recarray, a ndarray, a MaskedArray
328
+ or a MaskedRecords depending on the input parameters
329
+ """
330
+ if not isinstance(output, MaskedArray):
331
+ usemask = False
332
+ if usemask:
333
+ if asrecarray:
334
+ output = output.view(MaskedRecords)
335
+ else:
336
+ output = ma.filled(output)
337
+ if asrecarray:
338
+ output = output.view(recarray)
339
+ return output
340
+
341
+
342
+ def _fix_defaults(output, defaults=None):
343
+ """
344
+ Update the fill_value and masked data of `output`
345
+ from the default given in a dictionary defaults.
346
+ """
347
+ names = output.dtype.names
348
+ (data, mask, fill_value) = (output.data, output.mask, output.fill_value)
349
+ for (k, v) in (defaults or {}).items():
350
+ if k in names:
351
+ fill_value[k] = v
352
+ data[k][mask[k]] = v
353
+ return output
354
+
355
+
356
+ def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
357
+ usemask=None, asrecarray=None):
358
+ return seqarrays
359
+
360
+
361
+ @array_function_dispatch(_merge_arrays_dispatcher)
362
+ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
363
+ usemask=False, asrecarray=False):
364
+ """
365
+ Merge arrays field by field.
366
+
367
+ Parameters
368
+ ----------
369
+ seqarrays : sequence of ndarrays
370
+ Sequence of arrays
371
+ fill_value : {float}, optional
372
+ Filling value used to pad missing data on the shorter arrays.
373
+ flatten : {False, True}, optional
374
+ Whether to collapse nested fields.
375
+ usemask : {False, True}, optional
376
+ Whether to return a masked array or not.
377
+ asrecarray : {False, True}, optional
378
+ Whether to return a recarray (MaskedRecords) or not.
379
+
380
+ Examples
381
+ --------
382
+ >>> from numpy.lib import recfunctions as rfn
383
+ >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
384
+ array([( 1, 10.), ( 2, 20.), (-1, 30.)],
385
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
386
+
387
+ >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
388
+ ... np.array([10., 20., 30.])), usemask=False)
389
+ array([(1, 10.0), (2, 20.0), (-1, 30.0)],
390
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
391
+ >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
392
+ ... np.array([10., 20., 30.])),
393
+ ... usemask=False, asrecarray=True)
394
+ rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
395
+ dtype=[('a', '<i8'), ('f1', '<f8')])
396
+
397
+ Notes
398
+ -----
399
+ * Without a mask, the missing value will be filled with something,
400
+ depending on what its corresponding type:
401
+
402
+ * ``-1`` for integers
403
+ * ``-1.0`` for floating point numbers
404
+ * ``'-'`` for characters
405
+ * ``'-1'`` for strings
406
+ * ``True`` for boolean values
407
+ * XXX: I just obtained these values empirically
408
+ """
409
+ # Only one item in the input sequence ?
410
+ if (len(seqarrays) == 1):
411
+ seqarrays = np.asanyarray(seqarrays[0])
412
+ # Do we have a single ndarray as input ?
413
+ if isinstance(seqarrays, (ndarray, np.void)):
414
+ seqdtype = seqarrays.dtype
415
+ # Make sure we have named fields
416
+ if seqdtype.names is None:
417
+ seqdtype = np.dtype([('', seqdtype)])
418
+ if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
419
+ # Minimal processing needed: just make sure everything's a-ok
420
+ seqarrays = seqarrays.ravel()
421
+ # Find what type of array we must return
422
+ if usemask:
423
+ if asrecarray:
424
+ seqtype = MaskedRecords
425
+ else:
426
+ seqtype = MaskedArray
427
+ elif asrecarray:
428
+ seqtype = recarray
429
+ else:
430
+ seqtype = ndarray
431
+ return seqarrays.view(dtype=seqdtype, type=seqtype)
432
+ else:
433
+ seqarrays = (seqarrays,)
434
+ else:
435
+ # Make sure we have arrays in the input sequence
436
+ seqarrays = [np.asanyarray(_m) for _m in seqarrays]
437
+ # Find the sizes of the inputs and their maximum
438
+ sizes = tuple(a.size for a in seqarrays)
439
+ maxlength = max(sizes)
440
+ # Get the dtype of the output (flattening if needed)
441
+ newdtype = _zip_dtype(seqarrays, flatten=flatten)
442
+ # Initialize the sequences for data and mask
443
+ seqdata = []
444
+ seqmask = []
445
+ # If we expect some kind of MaskedArray, make a special loop.
446
+ if usemask:
447
+ for (a, n) in zip(seqarrays, sizes):
448
+ nbmissing = (maxlength - n)
449
+ # Get the data and mask
450
+ data = a.ravel().__array__()
451
+ mask = ma.getmaskarray(a).ravel()
452
+ # Get the filling value (if needed)
453
+ if nbmissing:
454
+ fval = _check_fill_value(fill_value, a.dtype)
455
+ if isinstance(fval, (ndarray, np.void)):
456
+ if len(fval.dtype) == 1:
457
+ fval = fval.item()[0]
458
+ fmsk = True
459
+ else:
460
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
461
+ fmsk = np.ones((1,), dtype=mask.dtype)
462
+ else:
463
+ fval = None
464
+ fmsk = True
465
+ # Store an iterator padding the input to the expected length
466
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
467
+ seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
468
+ # Create an iterator for the data
469
+ data = tuple(_izip_records(seqdata, flatten=flatten))
470
+ output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
471
+ mask=list(_izip_records(seqmask, flatten=flatten)))
472
+ if asrecarray:
473
+ output = output.view(MaskedRecords)
474
+ else:
475
+ # Same as before, without the mask we don't need...
476
+ for (a, n) in zip(seqarrays, sizes):
477
+ nbmissing = (maxlength - n)
478
+ data = a.ravel().__array__()
479
+ if nbmissing:
480
+ fval = _check_fill_value(fill_value, a.dtype)
481
+ if isinstance(fval, (ndarray, np.void)):
482
+ if len(fval.dtype) == 1:
483
+ fval = fval.item()[0]
484
+ else:
485
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
486
+ else:
487
+ fval = None
488
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
489
+ output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
490
+ dtype=newdtype, count=maxlength)
491
+ if asrecarray:
492
+ output = output.view(recarray)
493
+ # And we're done...
494
+ return output
495
+
496
+
497
+ def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
498
+ return (base,)
499
+
500
+
501
+ @array_function_dispatch(_drop_fields_dispatcher)
502
+ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
503
+ """
504
+ Return a new array with fields in `drop_names` dropped.
505
+
506
+ Nested fields are supported.
507
+
508
+ .. versionchanged:: 1.18.0
509
+ `drop_fields` returns an array with 0 fields if all fields are dropped,
510
+ rather than returning ``None`` as it did previously.
511
+
512
+ Parameters
513
+ ----------
514
+ base : array
515
+ Input array
516
+ drop_names : string or sequence
517
+ String or sequence of strings corresponding to the names of the
518
+ fields to drop.
519
+ usemask : {False, True}, optional
520
+ Whether to return a masked array or not.
521
+ asrecarray : string or sequence, optional
522
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
523
+ a plain ndarray or masked array with flexible dtype. The default
524
+ is False.
525
+
526
+ Examples
527
+ --------
528
+ >>> from numpy.lib import recfunctions as rfn
529
+ >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
530
+ ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
531
+ >>> rfn.drop_fields(a, 'a')
532
+ array([((2., 3),), ((5., 6),)],
533
+ dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
534
+ >>> rfn.drop_fields(a, 'ba')
535
+ array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
536
+ >>> rfn.drop_fields(a, ['ba', 'bb'])
537
+ array([(1,), (4,)], dtype=[('a', '<i8')])
538
+ """
539
+ if _is_string_like(drop_names):
540
+ drop_names = [drop_names]
541
+ else:
542
+ drop_names = set(drop_names)
543
+
544
+ def _drop_descr(ndtype, drop_names):
545
+ names = ndtype.names
546
+ newdtype = []
547
+ for name in names:
548
+ current = ndtype[name]
549
+ if name in drop_names:
550
+ continue
551
+ if current.names is not None:
552
+ descr = _drop_descr(current, drop_names)
553
+ if descr:
554
+ newdtype.append((name, descr))
555
+ else:
556
+ newdtype.append((name, current))
557
+ return newdtype
558
+
559
+ newdtype = _drop_descr(base.dtype, drop_names)
560
+
561
+ output = np.empty(base.shape, dtype=newdtype)
562
+ output = recursive_fill_fields(base, output)
563
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
564
+
565
+
566
+ def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
567
+ """
568
+ Return a new array keeping only the fields in `keep_names`,
569
+ and preserving the order of those fields.
570
+
571
+ Parameters
572
+ ----------
573
+ base : array
574
+ Input array
575
+ keep_names : string or sequence
576
+ String or sequence of strings corresponding to the names of the
577
+ fields to keep. Order of the names will be preserved.
578
+ usemask : {False, True}, optional
579
+ Whether to return a masked array or not.
580
+ asrecarray : string or sequence, optional
581
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
582
+ a plain ndarray or masked array with flexible dtype. The default
583
+ is False.
584
+ """
585
+ newdtype = [(n, base.dtype[n]) for n in keep_names]
586
+ output = np.empty(base.shape, dtype=newdtype)
587
+ output = recursive_fill_fields(base, output)
588
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
589
+
590
+
591
+ def _rec_drop_fields_dispatcher(base, drop_names):
592
+ return (base,)
593
+
594
+
595
+ @array_function_dispatch(_rec_drop_fields_dispatcher)
596
+ def rec_drop_fields(base, drop_names):
597
+ """
598
+ Returns a new numpy.recarray with fields in `drop_names` dropped.
599
+ """
600
+ return drop_fields(base, drop_names, usemask=False, asrecarray=True)
601
+
602
+
603
+ def _rename_fields_dispatcher(base, namemapper):
604
+ return (base,)
605
+
606
+
607
+ @array_function_dispatch(_rename_fields_dispatcher)
608
+ def rename_fields(base, namemapper):
609
+ """
610
+ Rename the fields from a flexible-datatype ndarray or recarray.
611
+
612
+ Nested fields are supported.
613
+
614
+ Parameters
615
+ ----------
616
+ base : ndarray
617
+ Input array whose fields must be modified.
618
+ namemapper : dictionary
619
+ Dictionary mapping old field names to their new version.
620
+
621
+ Examples
622
+ --------
623
+ >>> from numpy.lib import recfunctions as rfn
624
+ >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
625
+ ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
626
+ >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
627
+ array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
628
+ dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
629
+
630
+ """
631
+ def _recursive_rename_fields(ndtype, namemapper):
632
+ newdtype = []
633
+ for name in ndtype.names:
634
+ newname = namemapper.get(name, name)
635
+ current = ndtype[name]
636
+ if current.names is not None:
637
+ newdtype.append(
638
+ (newname, _recursive_rename_fields(current, namemapper))
639
+ )
640
+ else:
641
+ newdtype.append((newname, current))
642
+ return newdtype
643
+ newdtype = _recursive_rename_fields(base.dtype, namemapper)
644
+ return base.view(newdtype)
645
+
646
+
647
+ def _append_fields_dispatcher(base, names, data, dtypes=None,
648
+ fill_value=None, usemask=None, asrecarray=None):
649
+ yield base
650
+ yield from data
651
+
652
+
653
+ @array_function_dispatch(_append_fields_dispatcher)
654
+ def append_fields(base, names, data, dtypes=None,
655
+ fill_value=-1, usemask=True, asrecarray=False):
656
+ """
657
+ Add new fields to an existing array.
658
+
659
+ The names of the fields are given with the `names` arguments,
660
+ the corresponding values with the `data` arguments.
661
+ If a single field is appended, `names`, `data` and `dtypes` do not have
662
+ to be lists but just values.
663
+
664
+ Parameters
665
+ ----------
666
+ base : array
667
+ Input array to extend.
668
+ names : string, sequence
669
+ String or sequence of strings corresponding to the names
670
+ of the new fields.
671
+ data : array or sequence of arrays
672
+ Array or sequence of arrays storing the fields to add to the base.
673
+ dtypes : sequence of datatypes, optional
674
+ Datatype or sequence of datatypes.
675
+ If None, the datatypes are estimated from the `data`.
676
+ fill_value : {float}, optional
677
+ Filling value used to pad missing data on the shorter arrays.
678
+ usemask : {False, True}, optional
679
+ Whether to return a masked array or not.
680
+ asrecarray : {False, True}, optional
681
+ Whether to return a recarray (MaskedRecords) or not.
682
+
683
+ """
684
+ # Check the names
685
+ if isinstance(names, (tuple, list)):
686
+ if len(names) != len(data):
687
+ msg = "The number of arrays does not match the number of names"
688
+ raise ValueError(msg)
689
+ elif isinstance(names, str):
690
+ names = [names, ]
691
+ data = [data, ]
692
+ #
693
+ if dtypes is None:
694
+ data = [np.array(a, copy=False, subok=True) for a in data]
695
+ data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
696
+ else:
697
+ if not isinstance(dtypes, (tuple, list)):
698
+ dtypes = [dtypes, ]
699
+ if len(data) != len(dtypes):
700
+ if len(dtypes) == 1:
701
+ dtypes = dtypes * len(data)
702
+ else:
703
+ msg = "The dtypes argument must be None, a dtype, or a list."
704
+ raise ValueError(msg)
705
+ data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
706
+ for (a, n, d) in zip(data, names, dtypes)]
707
+ #
708
+ base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
709
+ if len(data) > 1:
710
+ data = merge_arrays(data, flatten=True, usemask=usemask,
711
+ fill_value=fill_value)
712
+ else:
713
+ data = data.pop()
714
+ #
715
+ output = ma.masked_all(
716
+ max(len(base), len(data)),
717
+ dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
718
+ output = recursive_fill_fields(base, output)
719
+ output = recursive_fill_fields(data, output)
720
+ #
721
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
722
+
723
+
724
+ def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
725
+ yield base
726
+ yield from data
727
+
728
+
729
+ @array_function_dispatch(_rec_append_fields_dispatcher)
730
+ def rec_append_fields(base, names, data, dtypes=None):
731
+ """
732
+ Add new fields to an existing array.
733
+
734
+ The names of the fields are given with the `names` arguments,
735
+ the corresponding values with the `data` arguments.
736
+ If a single field is appended, `names`, `data` and `dtypes` do not have
737
+ to be lists but just values.
738
+
739
+ Parameters
740
+ ----------
741
+ base : array
742
+ Input array to extend.
743
+ names : string, sequence
744
+ String or sequence of strings corresponding to the names
745
+ of the new fields.
746
+ data : array or sequence of arrays
747
+ Array or sequence of arrays storing the fields to add to the base.
748
+ dtypes : sequence of datatypes, optional
749
+ Datatype or sequence of datatypes.
750
+ If None, the datatypes are estimated from the `data`.
751
+
752
+ See Also
753
+ --------
754
+ append_fields
755
+
756
+ Returns
757
+ -------
758
+ appended_array : np.recarray
759
+ """
760
+ return append_fields(base, names, data=data, dtypes=dtypes,
761
+ asrecarray=True, usemask=False)
762
+
763
+
764
+ def _repack_fields_dispatcher(a, align=None, recurse=None):
765
+ return (a,)
766
+
767
+
768
+ @array_function_dispatch(_repack_fields_dispatcher)
769
+ def repack_fields(a, align=False, recurse=False):
770
+ """
771
+ Re-pack the fields of a structured array or dtype in memory.
772
+
773
+ The memory layout of structured datatypes allows fields at arbitrary
774
+ byte offsets. This means the fields can be separated by padding bytes,
775
+ their offsets can be non-monotonically increasing, and they can overlap.
776
+
777
+ This method removes any overlaps and reorders the fields in memory so they
778
+ have increasing byte offsets, and adds or removes padding bytes depending
779
+ on the `align` option, which behaves like the `align` option to
780
+ `numpy.dtype`.
781
+
782
+ If `align=False`, this method produces a "packed" memory layout in which
783
+ each field starts at the byte the previous field ended, and any padding
784
+ bytes are removed.
785
+
786
+ If `align=True`, this methods produces an "aligned" memory layout in which
787
+ each field's offset is a multiple of its alignment, and the total itemsize
788
+ is a multiple of the largest alignment, by adding padding bytes as needed.
789
+
790
+ Parameters
791
+ ----------
792
+ a : ndarray or dtype
793
+ array or dtype for which to repack the fields.
794
+ align : boolean
795
+ If true, use an "aligned" memory layout, otherwise use a "packed" layout.
796
+ recurse : boolean
797
+ If True, also repack nested structures.
798
+
799
+ Returns
800
+ -------
801
+ repacked : ndarray or dtype
802
+ Copy of `a` with fields repacked, or `a` itself if no repacking was
803
+ needed.
804
+
805
+ Examples
806
+ --------
807
+
808
+ >>> from numpy.lib import recfunctions as rfn
809
+ >>> def print_offsets(d):
810
+ ... print("offsets:", [d.fields[name][1] for name in d.names])
811
+ ... print("itemsize:", d.itemsize)
812
+ ...
813
+ >>> dt = np.dtype('u1, <i8, <f8', align=True)
814
+ >>> dt
815
+ dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \
816
+ 'offsets': [0, 8, 16], 'itemsize': 24}, align=True)
817
+ >>> print_offsets(dt)
818
+ offsets: [0, 8, 16]
819
+ itemsize: 24
820
+ >>> packed_dt = rfn.repack_fields(dt)
821
+ >>> packed_dt
822
+ dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
823
+ >>> print_offsets(packed_dt)
824
+ offsets: [0, 1, 9]
825
+ itemsize: 17
826
+
827
+ """
828
+ if not isinstance(a, np.dtype):
829
+ dt = repack_fields(a.dtype, align=align, recurse=recurse)
830
+ return a.astype(dt, copy=False)
831
+
832
+ if a.names is None:
833
+ return a
834
+
835
+ fieldinfo = []
836
+ for name in a.names:
837
+ tup = a.fields[name]
838
+ if recurse:
839
+ fmt = repack_fields(tup[0], align=align, recurse=True)
840
+ else:
841
+ fmt = tup[0]
842
+
843
+ if len(tup) == 3:
844
+ name = (tup[2], name)
845
+
846
+ fieldinfo.append((name, fmt))
847
+
848
+ dt = np.dtype(fieldinfo, align=align)
849
+ return np.dtype((a.type, dt))
850
+
851
+ def _get_fields_and_offsets(dt, offset=0):
852
+ """
853
+ Returns a flat list of (dtype, count, offset) tuples of all the
854
+ scalar fields in the dtype "dt", including nested fields, in left
855
+ to right order.
856
+ """
857
+
858
+ # counts up elements in subarrays, including nested subarrays, and returns
859
+ # base dtype and count
860
+ def count_elem(dt):
861
+ count = 1
862
+ while dt.shape != ():
863
+ for size in dt.shape:
864
+ count *= size
865
+ dt = dt.base
866
+ return dt, count
867
+
868
+ fields = []
869
+ for name in dt.names:
870
+ field = dt.fields[name]
871
+ f_dt, f_offset = field[0], field[1]
872
+ f_dt, n = count_elem(f_dt)
873
+
874
+ if f_dt.names is None:
875
+ fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
876
+ else:
877
+ subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
878
+ size = f_dt.itemsize
879
+
880
+ for i in range(n):
881
+ if i == 0:
882
+ # optimization: avoid list comprehension if no subarray
883
+ fields.extend(subfields)
884
+ else:
885
+ fields.extend([(d, c, o + i*size) for d, c, o in subfields])
886
+ return fields
887
+
888
+ def _common_stride(offsets, counts, itemsize):
889
+ """
890
+ Returns the stride between the fields, or None if the stride is not
891
+ constant. The values in "counts" designate the lengths of
892
+ subarrays. Subarrays are treated as many contiguous fields, with
893
+ always positive stride.
894
+ """
895
+ if len(offsets) <= 1:
896
+ return itemsize
897
+
898
+ negative = offsets[1] < offsets[0] # negative stride
899
+ if negative:
900
+ # reverse, so offsets will be ascending
901
+ it = zip(reversed(offsets), reversed(counts))
902
+ else:
903
+ it = zip(offsets, counts)
904
+
905
+ prev_offset = None
906
+ stride = None
907
+ for offset, count in it:
908
+ if count != 1: # subarray: always c-contiguous
909
+ if negative:
910
+ return None # subarrays can never have a negative stride
911
+ if stride is None:
912
+ stride = itemsize
913
+ if stride != itemsize:
914
+ return None
915
+ end_offset = offset + (count - 1) * itemsize
916
+ else:
917
+ end_offset = offset
918
+
919
+ if prev_offset is not None:
920
+ new_stride = offset - prev_offset
921
+ if stride is None:
922
+ stride = new_stride
923
+ if stride != new_stride:
924
+ return None
925
+
926
+ prev_offset = end_offset
927
+
928
+ if negative:
929
+ return -stride
930
+ return stride
931
+
932
+
933
+ def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
934
+ casting=None):
935
+ return (arr,)
936
+
937
+ @array_function_dispatch(_structured_to_unstructured_dispatcher)
938
+ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
939
+ """
940
+ Converts an n-D structured array into an (n+1)-D unstructured array.
941
+
942
+ The new array will have a new last dimension equal in size to the
943
+ number of field-elements of the input array. If not supplied, the output
944
+ datatype is determined from the numpy type promotion rules applied to all
945
+ the field datatypes.
946
+
947
+ Nested fields, as well as each element of any subarray fields, all count
948
+ as a single field-elements.
949
+
950
+ Parameters
951
+ ----------
952
+ arr : ndarray
953
+ Structured array or dtype to convert. Cannot contain object datatype.
954
+ dtype : dtype, optional
955
+ The dtype of the output unstructured array.
956
+ copy : bool, optional
957
+ If true, always return a copy. If false, a view is returned if
958
+ possible, such as when the `dtype` and strides of the fields are
959
+ suitable and the array subtype is one of `np.ndarray`, `np.recarray`
960
+ or `np.memmap`.
961
+
962
+ .. versionchanged:: 1.25.0
963
+ A view can now be returned if the fields are separated by a
964
+ uniform stride.
965
+
966
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
967
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
968
+ data casting may occur.
969
+
970
+ Returns
971
+ -------
972
+ unstructured : ndarray
973
+ Unstructured array with one more dimension.
974
+
975
+ Examples
976
+ --------
977
+
978
+ >>> from numpy.lib import recfunctions as rfn
979
+ >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
980
+ >>> a
981
+ array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
982
+ (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
983
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
984
+ >>> rfn.structured_to_unstructured(a)
985
+ array([[0., 0., 0., 0., 0.],
986
+ [0., 0., 0., 0., 0.],
987
+ [0., 0., 0., 0., 0.],
988
+ [0., 0., 0., 0., 0.]])
989
+
990
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
991
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
992
+ >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
993
+ array([ 3. , 5.5, 9. , 11. ])
994
+
995
+ """
996
+ if arr.dtype.names is None:
997
+ raise ValueError('arr must be a structured array')
998
+
999
+ fields = _get_fields_and_offsets(arr.dtype)
1000
+ n_fields = len(fields)
1001
+ if n_fields == 0 and dtype is None:
1002
+ raise ValueError("arr has no fields. Unable to guess dtype")
1003
+ elif n_fields == 0:
1004
+ # too many bugs elsewhere for this to work now
1005
+ raise NotImplementedError("arr with no fields is not supported")
1006
+
1007
+ dts, counts, offsets = zip(*fields)
1008
+ names = ['f{}'.format(n) for n in range(n_fields)]
1009
+
1010
+ if dtype is None:
1011
+ out_dtype = np.result_type(*[dt.base for dt in dts])
1012
+ else:
1013
+ out_dtype = np.dtype(dtype)
1014
+
1015
+ # Use a series of views and casts to convert to an unstructured array:
1016
+
1017
+ # first view using flattened fields (doesn't work for object arrays)
1018
+ # Note: dts may include a shape for subarrays
1019
+ flattened_fields = np.dtype({'names': names,
1020
+ 'formats': dts,
1021
+ 'offsets': offsets,
1022
+ 'itemsize': arr.dtype.itemsize})
1023
+ arr = arr.view(flattened_fields)
1024
+
1025
+ # we only allow a few types to be unstructured by manipulating the
1026
+ # strides, because we know it won't work with, for example, np.matrix nor
1027
+ # np.ma.MaskedArray.
1028
+ can_view = type(arr) in (np.ndarray, np.recarray, np.memmap)
1029
+ if (not copy) and can_view and all(dt.base == out_dtype for dt in dts):
1030
+ # all elements have the right dtype already; if they have a common
1031
+ # stride, we can just return a view
1032
+ common_stride = _common_stride(offsets, counts, out_dtype.itemsize)
1033
+ if common_stride is not None:
1034
+ wrap = arr.__array_wrap__
1035
+
1036
+ new_shape = arr.shape + (sum(counts), out_dtype.itemsize)
1037
+ new_strides = arr.strides + (abs(common_stride), 1)
1038
+
1039
+ arr = arr[..., np.newaxis].view(np.uint8) # view as bytes
1040
+ arr = arr[..., min(offsets):] # remove the leading unused data
1041
+ arr = np.lib.stride_tricks.as_strided(arr,
1042
+ new_shape,
1043
+ new_strides,
1044
+ subok=True)
1045
+
1046
+ # cast and drop the last dimension again
1047
+ arr = arr.view(out_dtype)[..., 0]
1048
+
1049
+ if common_stride < 0:
1050
+ arr = arr[..., ::-1] # reverse, if the stride was negative
1051
+ if type(arr) is not type(wrap.__self__):
1052
+ # Some types (e.g. recarray) turn into an ndarray along the
1053
+ # way, so we have to wrap it again in order to match the
1054
+ # behavior with copy=True.
1055
+ arr = wrap(arr)
1056
+ return arr
1057
+
1058
+ # next cast to a packed format with all fields converted to new dtype
1059
+ packed_fields = np.dtype({'names': names,
1060
+ 'formats': [(out_dtype, dt.shape) for dt in dts]})
1061
+ arr = arr.astype(packed_fields, copy=copy, casting=casting)
1062
+
1063
+ # finally is it safe to view the packed fields as the unstructured type
1064
+ return arr.view((out_dtype, (sum(counts),)))
1065
+
1066
+
1067
+ def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
1068
+ align=None, copy=None, casting=None):
1069
+ return (arr,)
1070
+
1071
+ @array_function_dispatch(_unstructured_to_structured_dispatcher)
1072
+ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
1073
+ copy=False, casting='unsafe'):
1074
+ """
1075
+ Converts an n-D unstructured array into an (n-1)-D structured array.
1076
+
1077
+ The last dimension of the input array is converted into a structure, with
1078
+ number of field-elements equal to the size of the last dimension of the
1079
+ input array. By default all output fields have the input array's dtype, but
1080
+ an output structured dtype with an equal number of fields-elements can be
1081
+ supplied instead.
1082
+
1083
+ Nested fields, as well as each element of any subarray fields, all count
1084
+ towards the number of field-elements.
1085
+
1086
+ Parameters
1087
+ ----------
1088
+ arr : ndarray
1089
+ Unstructured array or dtype to convert.
1090
+ dtype : dtype, optional
1091
+ The structured dtype of the output array
1092
+ names : list of strings, optional
1093
+ If dtype is not supplied, this specifies the field names for the output
1094
+ dtype, in order. The field dtypes will be the same as the input array.
1095
+ align : boolean, optional
1096
+ Whether to create an aligned memory layout.
1097
+ copy : bool, optional
1098
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
1099
+ copy. If false, and `dtype` requirements are satisfied, a view is
1100
+ returned.
1101
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
1102
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
1103
+ data casting may occur.
1104
+
1105
+ Returns
1106
+ -------
1107
+ structured : ndarray
1108
+ Structured array with fewer dimensions.
1109
+
1110
+ Examples
1111
+ --------
1112
+
1113
+ >>> from numpy.lib import recfunctions as rfn
1114
+ >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
1115
+ >>> a = np.arange(20).reshape((4,5))
1116
+ >>> a
1117
+ array([[ 0, 1, 2, 3, 4],
1118
+ [ 5, 6, 7, 8, 9],
1119
+ [10, 11, 12, 13, 14],
1120
+ [15, 16, 17, 18, 19]])
1121
+ >>> rfn.unstructured_to_structured(a, dt)
1122
+ array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
1123
+ (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
1124
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
1125
+
1126
+ """
1127
+ if arr.shape == ():
1128
+ raise ValueError('arr must have at least one dimension')
1129
+ n_elem = arr.shape[-1]
1130
+ if n_elem == 0:
1131
+ # too many bugs elsewhere for this to work now
1132
+ raise NotImplementedError("last axis with size 0 is not supported")
1133
+
1134
+ if dtype is None:
1135
+ if names is None:
1136
+ names = ['f{}'.format(n) for n in range(n_elem)]
1137
+ out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
1138
+ fields = _get_fields_and_offsets(out_dtype)
1139
+ dts, counts, offsets = zip(*fields)
1140
+ else:
1141
+ if names is not None:
1142
+ raise ValueError("don't supply both dtype and names")
1143
+ # if dtype is the args of np.dtype, construct it
1144
+ dtype = np.dtype(dtype)
1145
+ # sanity check of the input dtype
1146
+ fields = _get_fields_and_offsets(dtype)
1147
+ if len(fields) == 0:
1148
+ dts, counts, offsets = [], [], []
1149
+ else:
1150
+ dts, counts, offsets = zip(*fields)
1151
+
1152
+ if n_elem != sum(counts):
1153
+ raise ValueError('The length of the last dimension of arr must '
1154
+ 'be equal to the number of fields in dtype')
1155
+ out_dtype = dtype
1156
+ if align and not out_dtype.isalignedstruct:
1157
+ raise ValueError("align was True but dtype is not aligned")
1158
+
1159
+ names = ['f{}'.format(n) for n in range(len(fields))]
1160
+
1161
+ # Use a series of views and casts to convert to a structured array:
1162
+
1163
+ # first view as a packed structured array of one dtype
1164
+ packed_fields = np.dtype({'names': names,
1165
+ 'formats': [(arr.dtype, dt.shape) for dt in dts]})
1166
+ arr = np.ascontiguousarray(arr).view(packed_fields)
1167
+
1168
+ # next cast to an unpacked but flattened format with varied dtypes
1169
+ flattened_fields = np.dtype({'names': names,
1170
+ 'formats': dts,
1171
+ 'offsets': offsets,
1172
+ 'itemsize': out_dtype.itemsize})
1173
+ arr = arr.astype(flattened_fields, copy=copy, casting=casting)
1174
+
1175
+ # finally view as the final nested dtype and remove the last axis
1176
+ return arr.view(out_dtype)[..., 0]
1177
+
1178
+ def _apply_along_fields_dispatcher(func, arr):
1179
+ return (arr,)
1180
+
1181
+ @array_function_dispatch(_apply_along_fields_dispatcher)
1182
+ def apply_along_fields(func, arr):
1183
+ """
1184
+ Apply function 'func' as a reduction across fields of a structured array.
1185
+
1186
+ This is similar to `apply_along_axis`, but treats the fields of a
1187
+ structured array as an extra axis. The fields are all first cast to a
1188
+ common type following the type-promotion rules from `numpy.result_type`
1189
+ applied to the field's dtypes.
1190
+
1191
+ Parameters
1192
+ ----------
1193
+ func : function
1194
+ Function to apply on the "field" dimension. This function must
1195
+ support an `axis` argument, like np.mean, np.sum, etc.
1196
+ arr : ndarray
1197
+ Structured array for which to apply func.
1198
+
1199
+ Returns
1200
+ -------
1201
+ out : ndarray
1202
+ Result of the recution operation
1203
+
1204
+ Examples
1205
+ --------
1206
+
1207
+ >>> from numpy.lib import recfunctions as rfn
1208
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
1209
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
1210
+ >>> rfn.apply_along_fields(np.mean, b)
1211
+ array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
1212
+ >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
1213
+ array([ 3. , 5.5, 9. , 11. ])
1214
+
1215
+ """
1216
+ if arr.dtype.names is None:
1217
+ raise ValueError('arr must be a structured array')
1218
+
1219
+ uarr = structured_to_unstructured(arr)
1220
+ return func(uarr, axis=-1)
1221
+ # works and avoids axis requirement, but very, very slow:
1222
+ #return np.apply_along_axis(func, -1, uarr)
1223
+
1224
+ def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
1225
+ return dst, src
1226
+
1227
+ @array_function_dispatch(_assign_fields_by_name_dispatcher)
1228
+ def assign_fields_by_name(dst, src, zero_unassigned=True):
1229
+ """
1230
+ Assigns values from one structured array to another by field name.
1231
+
1232
+ Normally in numpy >= 1.14, assignment of one structured array to another
1233
+ copies fields "by position", meaning that the first field from the src is
1234
+ copied to the first field of the dst, and so on, regardless of field name.
1235
+
1236
+ This function instead copies "by field name", such that fields in the dst
1237
+ are assigned from the identically named field in the src. This applies
1238
+ recursively for nested structures. This is how structure assignment worked
1239
+ in numpy >= 1.6 to <= 1.13.
1240
+
1241
+ Parameters
1242
+ ----------
1243
+ dst : ndarray
1244
+ src : ndarray
1245
+ The source and destination arrays during assignment.
1246
+ zero_unassigned : bool, optional
1247
+ If True, fields in the dst for which there was no matching
1248
+ field in the src are filled with the value 0 (zero). This
1249
+ was the behavior of numpy <= 1.13. If False, those fields
1250
+ are not modified.
1251
+ """
1252
+
1253
+ if dst.dtype.names is None:
1254
+ dst[...] = src
1255
+ return
1256
+
1257
+ for name in dst.dtype.names:
1258
+ if name not in src.dtype.names:
1259
+ if zero_unassigned:
1260
+ dst[name] = 0
1261
+ else:
1262
+ assign_fields_by_name(dst[name], src[name],
1263
+ zero_unassigned)
1264
+
1265
+ def _require_fields_dispatcher(array, required_dtype):
1266
+ return (array,)
1267
+
1268
+ @array_function_dispatch(_require_fields_dispatcher)
1269
+ def require_fields(array, required_dtype):
1270
+ """
1271
+ Casts a structured array to a new dtype using assignment by field-name.
1272
+
1273
+ This function assigns from the old to the new array by name, so the
1274
+ value of a field in the output array is the value of the field with the
1275
+ same name in the source array. This has the effect of creating a new
1276
+ ndarray containing only the fields "required" by the required_dtype.
1277
+
1278
+ If a field name in the required_dtype does not exist in the
1279
+ input array, that field is created and set to 0 in the output array.
1280
+
1281
+ Parameters
1282
+ ----------
1283
+ a : ndarray
1284
+ array to cast
1285
+ required_dtype : dtype
1286
+ datatype for output array
1287
+
1288
+ Returns
1289
+ -------
1290
+ out : ndarray
1291
+ array with the new dtype, with field values copied from the fields in
1292
+ the input array with the same name
1293
+
1294
+ Examples
1295
+ --------
1296
+
1297
+ >>> from numpy.lib import recfunctions as rfn
1298
+ >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
1299
+ >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
1300
+ array([(1., 1), (1., 1), (1., 1), (1., 1)],
1301
+ dtype=[('b', '<f4'), ('c', 'u1')])
1302
+ >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
1303
+ array([(1., 0), (1., 0), (1., 0), (1., 0)],
1304
+ dtype=[('b', '<f4'), ('newf', 'u1')])
1305
+
1306
+ """
1307
+ out = np.empty(array.shape, dtype=required_dtype)
1308
+ assign_fields_by_name(out, array)
1309
+ return out
1310
+
1311
+
1312
+ def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
1313
+ asrecarray=None, autoconvert=None):
1314
+ return arrays
1315
+
1316
+
1317
+ @array_function_dispatch(_stack_arrays_dispatcher)
1318
+ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
1319
+ autoconvert=False):
1320
+ """
1321
+ Superposes arrays fields by fields
1322
+
1323
+ Parameters
1324
+ ----------
1325
+ arrays : array or sequence
1326
+ Sequence of input arrays.
1327
+ defaults : dictionary, optional
1328
+ Dictionary mapping field names to the corresponding default values.
1329
+ usemask : {True, False}, optional
1330
+ Whether to return a MaskedArray (or MaskedRecords is
1331
+ `asrecarray==True`) or a ndarray.
1332
+ asrecarray : {False, True}, optional
1333
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
1334
+ or just a flexible-type ndarray.
1335
+ autoconvert : {False, True}, optional
1336
+ Whether automatically cast the type of the field to the maximum.
1337
+
1338
+ Examples
1339
+ --------
1340
+ >>> from numpy.lib import recfunctions as rfn
1341
+ >>> x = np.array([1, 2,])
1342
+ >>> rfn.stack_arrays(x) is x
1343
+ True
1344
+ >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
1345
+ >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
1346
+ ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
1347
+ >>> test = rfn.stack_arrays((z,zz))
1348
+ >>> test
1349
+ masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
1350
+ (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
1351
+ mask=[(False, False, True), (False, False, True),
1352
+ (False, False, False), (False, False, False),
1353
+ (False, False, False)],
1354
+ fill_value=(b'N/A', 1.e+20, 1.e+20),
1355
+ dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
1356
+
1357
+ """
1358
+ if isinstance(arrays, ndarray):
1359
+ return arrays
1360
+ elif len(arrays) == 1:
1361
+ return arrays[0]
1362
+ seqarrays = [np.asanyarray(a).ravel() for a in arrays]
1363
+ nrecords = [len(a) for a in seqarrays]
1364
+ ndtype = [a.dtype for a in seqarrays]
1365
+ fldnames = [d.names for d in ndtype]
1366
+ #
1367
+ dtype_l = ndtype[0]
1368
+ newdescr = _get_fieldspec(dtype_l)
1369
+ names = [n for n, d in newdescr]
1370
+ for dtype_n in ndtype[1:]:
1371
+ for fname, fdtype in _get_fieldspec(dtype_n):
1372
+ if fname not in names:
1373
+ newdescr.append((fname, fdtype))
1374
+ names.append(fname)
1375
+ else:
1376
+ nameidx = names.index(fname)
1377
+ _, cdtype = newdescr[nameidx]
1378
+ if autoconvert:
1379
+ newdescr[nameidx] = (fname, max(fdtype, cdtype))
1380
+ elif fdtype != cdtype:
1381
+ raise TypeError("Incompatible type '%s' <> '%s'" %
1382
+ (cdtype, fdtype))
1383
+ # Only one field: use concatenate
1384
+ if len(newdescr) == 1:
1385
+ output = ma.concatenate(seqarrays)
1386
+ else:
1387
+ #
1388
+ output = ma.masked_all((np.sum(nrecords),), newdescr)
1389
+ offset = np.cumsum(np.r_[0, nrecords])
1390
+ seen = []
1391
+ for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
1392
+ names = a.dtype.names
1393
+ if names is None:
1394
+ output['f%i' % len(seen)][i:j] = a
1395
+ else:
1396
+ for name in n:
1397
+ output[name][i:j] = a[name]
1398
+ if name not in seen:
1399
+ seen.append(name)
1400
+ #
1401
+ return _fix_output(_fix_defaults(output, defaults),
1402
+ usemask=usemask, asrecarray=asrecarray)
1403
+
1404
+
1405
+ def _find_duplicates_dispatcher(
1406
+ a, key=None, ignoremask=None, return_index=None):
1407
+ return (a,)
1408
+
1409
+
1410
+ @array_function_dispatch(_find_duplicates_dispatcher)
1411
+ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
1412
+ """
1413
+ Find the duplicates in a structured array along a given key
1414
+
1415
+ Parameters
1416
+ ----------
1417
+ a : array-like
1418
+ Input array
1419
+ key : {string, None}, optional
1420
+ Name of the fields along which to check the duplicates.
1421
+ If None, the search is performed by records
1422
+ ignoremask : {True, False}, optional
1423
+ Whether masked data should be discarded or considered as duplicates.
1424
+ return_index : {False, True}, optional
1425
+ Whether to return the indices of the duplicated values.
1426
+
1427
+ Examples
1428
+ --------
1429
+ >>> from numpy.lib import recfunctions as rfn
1430
+ >>> ndtype = [('a', int)]
1431
+ >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
1432
+ ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
1433
+ >>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
1434
+ (masked_array(data=[(1,), (1,), (2,), (2,)],
1435
+ mask=[(False,), (False,), (False,), (False,)],
1436
+ fill_value=(999999,),
1437
+ dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
1438
+ """
1439
+ a = np.asanyarray(a).ravel()
1440
+ # Get a dictionary of fields
1441
+ fields = get_fieldstructure(a.dtype)
1442
+ # Get the sorting data (by selecting the corresponding field)
1443
+ base = a
1444
+ if key:
1445
+ for f in fields[key]:
1446
+ base = base[f]
1447
+ base = base[key]
1448
+ # Get the sorting indices and the sorted data
1449
+ sortidx = base.argsort()
1450
+ sortedbase = base[sortidx]
1451
+ sorteddata = sortedbase.filled()
1452
+ # Compare the sorting data
1453
+ flag = (sorteddata[:-1] == sorteddata[1:])
1454
+ # If masked data must be ignored, set the flag to false where needed
1455
+ if ignoremask:
1456
+ sortedmask = sortedbase.recordmask
1457
+ flag[sortedmask[1:]] = False
1458
+ flag = np.concatenate(([False], flag))
1459
+ # We need to take the point on the left as well (else we're missing it)
1460
+ flag[:-1] = flag[:-1] + flag[1:]
1461
+ duplicates = a[sortidx][flag]
1462
+ if return_index:
1463
+ return (duplicates, sortidx[flag])
1464
+ else:
1465
+ return duplicates
1466
+
1467
+
1468
+ def _join_by_dispatcher(
1469
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
1470
+ defaults=None, usemask=None, asrecarray=None):
1471
+ return (r1, r2)
1472
+
1473
+
1474
+ @array_function_dispatch(_join_by_dispatcher)
1475
+ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
1476
+ defaults=None, usemask=True, asrecarray=False):
1477
+ """
1478
+ Join arrays `r1` and `r2` on key `key`.
1479
+
1480
+ The key should be either a string or a sequence of string corresponding
1481
+ to the fields used to join the array. An exception is raised if the
1482
+ `key` field cannot be found in the two input arrays. Neither `r1` nor
1483
+ `r2` should have any duplicates along `key`: the presence of duplicates
1484
+ will make the output quite unreliable. Note that duplicates are not
1485
+ looked for by the algorithm.
1486
+
1487
+ Parameters
1488
+ ----------
1489
+ key : {string, sequence}
1490
+ A string or a sequence of strings corresponding to the fields used
1491
+ for comparison.
1492
+ r1, r2 : arrays
1493
+ Structured arrays.
1494
+ jointype : {'inner', 'outer', 'leftouter'}, optional
1495
+ If 'inner', returns the elements common to both r1 and r2.
1496
+ If 'outer', returns the common elements as well as the elements of
1497
+ r1 not in r2 and the elements of not in r2.
1498
+ If 'leftouter', returns the common elements and the elements of r1
1499
+ not in r2.
1500
+ r1postfix : string, optional
1501
+ String appended to the names of the fields of r1 that are present
1502
+ in r2 but absent of the key.
1503
+ r2postfix : string, optional
1504
+ String appended to the names of the fields of r2 that are present
1505
+ in r1 but absent of the key.
1506
+ defaults : {dictionary}, optional
1507
+ Dictionary mapping field names to the corresponding default values.
1508
+ usemask : {True, False}, optional
1509
+ Whether to return a MaskedArray (or MaskedRecords is
1510
+ `asrecarray==True`) or a ndarray.
1511
+ asrecarray : {False, True}, optional
1512
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
1513
+ or just a flexible-type ndarray.
1514
+
1515
+ Notes
1516
+ -----
1517
+ * The output is sorted along the key.
1518
+ * A temporary array is formed by dropping the fields not in the key for
1519
+ the two arrays and concatenating the result. This array is then
1520
+ sorted, and the common entries selected. The output is constructed by
1521
+ filling the fields with the selected entries. Matching is not
1522
+ preserved if there are some duplicates...
1523
+
1524
+ """
1525
+ # Check jointype
1526
+ if jointype not in ('inner', 'outer', 'leftouter'):
1527
+ raise ValueError(
1528
+ "The 'jointype' argument should be in 'inner', "
1529
+ "'outer' or 'leftouter' (got '%s' instead)" % jointype
1530
+ )
1531
+ # If we have a single key, put it in a tuple
1532
+ if isinstance(key, str):
1533
+ key = (key,)
1534
+
1535
+ # Check the keys
1536
+ if len(set(key)) != len(key):
1537
+ dup = next(x for n,x in enumerate(key) if x in key[n+1:])
1538
+ raise ValueError("duplicate join key %r" % dup)
1539
+ for name in key:
1540
+ if name not in r1.dtype.names:
1541
+ raise ValueError('r1 does not have key field %r' % name)
1542
+ if name not in r2.dtype.names:
1543
+ raise ValueError('r2 does not have key field %r' % name)
1544
+
1545
+ # Make sure we work with ravelled arrays
1546
+ r1 = r1.ravel()
1547
+ r2 = r2.ravel()
1548
+ # Fixme: nb2 below is never used. Commenting out for pyflakes.
1549
+ # (nb1, nb2) = (len(r1), len(r2))
1550
+ nb1 = len(r1)
1551
+ (r1names, r2names) = (r1.dtype.names, r2.dtype.names)
1552
+
1553
+ # Check the names for collision
1554
+ collisions = (set(r1names) & set(r2names)) - set(key)
1555
+ if collisions and not (r1postfix or r2postfix):
1556
+ msg = "r1 and r2 contain common names, r1postfix and r2postfix "
1557
+ msg += "can't both be empty"
1558
+ raise ValueError(msg)
1559
+
1560
+ # Make temporary arrays of just the keys
1561
+ # (use order of keys in `r1` for back-compatibility)
1562
+ key1 = [ n for n in r1names if n in key ]
1563
+ r1k = _keep_fields(r1, key1)
1564
+ r2k = _keep_fields(r2, key1)
1565
+
1566
+ # Concatenate the two arrays for comparison
1567
+ aux = ma.concatenate((r1k, r2k))
1568
+ idx_sort = aux.argsort(order=key)
1569
+ aux = aux[idx_sort]
1570
+ #
1571
+ # Get the common keys
1572
+ flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
1573
+ flag_in[:-1] = flag_in[1:] + flag_in[:-1]
1574
+ idx_in = idx_sort[flag_in]
1575
+ idx_1 = idx_in[(idx_in < nb1)]
1576
+ idx_2 = idx_in[(idx_in >= nb1)] - nb1
1577
+ (r1cmn, r2cmn) = (len(idx_1), len(idx_2))
1578
+ if jointype == 'inner':
1579
+ (r1spc, r2spc) = (0, 0)
1580
+ elif jointype == 'outer':
1581
+ idx_out = idx_sort[~flag_in]
1582
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
1583
+ idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
1584
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
1585
+ elif jointype == 'leftouter':
1586
+ idx_out = idx_sort[~flag_in]
1587
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
1588
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
1589
+ # Select the entries from each input
1590
+ (s1, s2) = (r1[idx_1], r2[idx_2])
1591
+ #
1592
+ # Build the new description of the output array .......
1593
+ # Start with the key fields
1594
+ ndtype = _get_fieldspec(r1k.dtype)
1595
+
1596
+ # Add the fields from r1
1597
+ for fname, fdtype in _get_fieldspec(r1.dtype):
1598
+ if fname not in key:
1599
+ ndtype.append((fname, fdtype))
1600
+
1601
+ # Add the fields from r2
1602
+ for fname, fdtype in _get_fieldspec(r2.dtype):
1603
+ # Have we seen the current name already ?
1604
+ # we need to rebuild this list every time
1605
+ names = list(name for name, dtype in ndtype)
1606
+ try:
1607
+ nameidx = names.index(fname)
1608
+ except ValueError:
1609
+ #... we haven't: just add the description to the current list
1610
+ ndtype.append((fname, fdtype))
1611
+ else:
1612
+ # collision
1613
+ _, cdtype = ndtype[nameidx]
1614
+ if fname in key:
1615
+ # The current field is part of the key: take the largest dtype
1616
+ ndtype[nameidx] = (fname, max(fdtype, cdtype))
1617
+ else:
1618
+ # The current field is not part of the key: add the suffixes,
1619
+ # and place the new field adjacent to the old one
1620
+ ndtype[nameidx:nameidx + 1] = [
1621
+ (fname + r1postfix, cdtype),
1622
+ (fname + r2postfix, fdtype)
1623
+ ]
1624
+ # Rebuild a dtype from the new fields
1625
+ ndtype = np.dtype(ndtype)
1626
+ # Find the largest nb of common fields :
1627
+ # r1cmn and r2cmn should be equal, but...
1628
+ cmn = max(r1cmn, r2cmn)
1629
+ # Construct an empty array
1630
+ output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
1631
+ names = output.dtype.names
1632
+ for f in r1names:
1633
+ selected = s1[f]
1634
+ if f not in names or (f in r2names and not r2postfix and f not in key):
1635
+ f += r1postfix
1636
+ current = output[f]
1637
+ current[:r1cmn] = selected[:r1cmn]
1638
+ if jointype in ('outer', 'leftouter'):
1639
+ current[cmn:cmn + r1spc] = selected[r1cmn:]
1640
+ for f in r2names:
1641
+ selected = s2[f]
1642
+ if f not in names or (f in r1names and not r1postfix and f not in key):
1643
+ f += r2postfix
1644
+ current = output[f]
1645
+ current[:r2cmn] = selected[:r2cmn]
1646
+ if (jointype == 'outer') and r2spc:
1647
+ current[-r2spc:] = selected[r2cmn:]
1648
+ # Sort and finalize the output
1649
+ output.sort(order=key)
1650
+ kwargs = dict(usemask=usemask, asrecarray=asrecarray)
1651
+ return _fix_output(_fix_defaults(output, defaults), **kwargs)
1652
+
1653
+
1654
+ def _rec_join_dispatcher(
1655
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
1656
+ defaults=None):
1657
+ return (r1, r2)
1658
+
1659
+
1660
+ @array_function_dispatch(_rec_join_dispatcher)
1661
+ def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
1662
+ defaults=None):
1663
+ """
1664
+ Join arrays `r1` and `r2` on keys.
1665
+ Alternative to join_by, that always returns a np.recarray.
1666
+
1667
+ See Also
1668
+ --------
1669
+ join_by : equivalent function
1670
+ """
1671
+ kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
1672
+ defaults=defaults, usemask=False, asrecarray=True)
1673
+ return join_by(key, r1, r2, **kwargs)
.venv/lib/python3.11/site-packages/numpy/lib/scimath.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Wrapper functions to more user-friendly calling of certain math functions
3
+ whose output data-type is different than the input data-type in certain
4
+ domains of the input.
5
+
6
+ For example, for functions like `log` with branch cuts, the versions in this
7
+ module provide the mathematically valid answers in the complex plane::
8
+
9
+ >>> import math
10
+ >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
11
+ True
12
+
13
+ Similarly, `sqrt`, other base logarithms, `power` and trig functions are
14
+ correctly handled. See their respective docstrings for specific examples.
15
+
16
+ Functions
17
+ ---------
18
+
19
+ .. autosummary::
20
+ :toctree: generated/
21
+
22
+ sqrt
23
+ log
24
+ log2
25
+ logn
26
+ log10
27
+ power
28
+ arccos
29
+ arcsin
30
+ arctanh
31
+
32
+ """
33
+ import numpy.core.numeric as nx
34
+ import numpy.core.numerictypes as nt
35
+ from numpy.core.numeric import asarray, any
36
+ from numpy.core.overrides import array_function_dispatch
37
+ from numpy.lib.type_check import isreal
38
+
39
+
40
+ __all__ = [
41
+ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
42
+ 'arctanh'
43
+ ]
44
+
45
+
46
+ _ln2 = nx.log(2.0)
47
+
48
+
49
+ def _tocomplex(arr):
50
+ """Convert its input `arr` to a complex array.
51
+
52
+ The input is returned as a complex array of the smallest type that will fit
53
+ the original data: types like single, byte, short, etc. become csingle,
54
+ while others become cdouble.
55
+
56
+ A copy of the input is always made.
57
+
58
+ Parameters
59
+ ----------
60
+ arr : array
61
+
62
+ Returns
63
+ -------
64
+ array
65
+ An array with the same input data as the input but in complex form.
66
+
67
+ Examples
68
+ --------
69
+
70
+ First, consider an input of type short:
71
+
72
+ >>> a = np.array([1,2,3],np.short)
73
+
74
+ >>> ac = np.lib.scimath._tocomplex(a); ac
75
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
76
+
77
+ >>> ac.dtype
78
+ dtype('complex64')
79
+
80
+ If the input is of type double, the output is correspondingly of the
81
+ complex double type as well:
82
+
83
+ >>> b = np.array([1,2,3],np.double)
84
+
85
+ >>> bc = np.lib.scimath._tocomplex(b); bc
86
+ array([1.+0.j, 2.+0.j, 3.+0.j])
87
+
88
+ >>> bc.dtype
89
+ dtype('complex128')
90
+
91
+ Note that even if the input was complex to begin with, a copy is still
92
+ made, since the astype() method always copies:
93
+
94
+ >>> c = np.array([1,2,3],np.csingle)
95
+
96
+ >>> cc = np.lib.scimath._tocomplex(c); cc
97
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
98
+
99
+ >>> c *= 2; c
100
+ array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
101
+
102
+ >>> cc
103
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
104
+ """
105
+ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
106
+ nt.ushort, nt.csingle)):
107
+ return arr.astype(nt.csingle)
108
+ else:
109
+ return arr.astype(nt.cdouble)
110
+
111
+
112
+ def _fix_real_lt_zero(x):
113
+ """Convert `x` to complex if it has real, negative components.
114
+
115
+ Otherwise, output is just the array version of the input (via asarray).
116
+
117
+ Parameters
118
+ ----------
119
+ x : array_like
120
+
121
+ Returns
122
+ -------
123
+ array
124
+
125
+ Examples
126
+ --------
127
+ >>> np.lib.scimath._fix_real_lt_zero([1,2])
128
+ array([1, 2])
129
+
130
+ >>> np.lib.scimath._fix_real_lt_zero([-1,2])
131
+ array([-1.+0.j, 2.+0.j])
132
+
133
+ """
134
+ x = asarray(x)
135
+ if any(isreal(x) & (x < 0)):
136
+ x = _tocomplex(x)
137
+ return x
138
+
139
+
140
+ def _fix_int_lt_zero(x):
141
+ """Convert `x` to double if it has real, negative components.
142
+
143
+ Otherwise, output is just the array version of the input (via asarray).
144
+
145
+ Parameters
146
+ ----------
147
+ x : array_like
148
+
149
+ Returns
150
+ -------
151
+ array
152
+
153
+ Examples
154
+ --------
155
+ >>> np.lib.scimath._fix_int_lt_zero([1,2])
156
+ array([1, 2])
157
+
158
+ >>> np.lib.scimath._fix_int_lt_zero([-1,2])
159
+ array([-1., 2.])
160
+ """
161
+ x = asarray(x)
162
+ if any(isreal(x) & (x < 0)):
163
+ x = x * 1.0
164
+ return x
165
+
166
+
167
+ def _fix_real_abs_gt_1(x):
168
+ """Convert `x` to complex if it has real components x_i with abs(x_i)>1.
169
+
170
+ Otherwise, output is just the array version of the input (via asarray).
171
+
172
+ Parameters
173
+ ----------
174
+ x : array_like
175
+
176
+ Returns
177
+ -------
178
+ array
179
+
180
+ Examples
181
+ --------
182
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,1])
183
+ array([0, 1])
184
+
185
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,2])
186
+ array([0.+0.j, 2.+0.j])
187
+ """
188
+ x = asarray(x)
189
+ if any(isreal(x) & (abs(x) > 1)):
190
+ x = _tocomplex(x)
191
+ return x
192
+
193
+
194
+ def _unary_dispatcher(x):
195
+ return (x,)
196
+
197
+
198
+ @array_function_dispatch(_unary_dispatcher)
199
+ def sqrt(x):
200
+ """
201
+ Compute the square root of x.
202
+
203
+ For negative input elements, a complex value is returned
204
+ (unlike `numpy.sqrt` which returns NaN).
205
+
206
+ Parameters
207
+ ----------
208
+ x : array_like
209
+ The input value(s).
210
+
211
+ Returns
212
+ -------
213
+ out : ndarray or scalar
214
+ The square root of `x`. If `x` was a scalar, so is `out`,
215
+ otherwise an array is returned.
216
+
217
+ See Also
218
+ --------
219
+ numpy.sqrt
220
+
221
+ Examples
222
+ --------
223
+ For real, non-negative inputs this works just like `numpy.sqrt`:
224
+
225
+ >>> np.emath.sqrt(1)
226
+ 1.0
227
+ >>> np.emath.sqrt([1, 4])
228
+ array([1., 2.])
229
+
230
+ But it automatically handles negative inputs:
231
+
232
+ >>> np.emath.sqrt(-1)
233
+ 1j
234
+ >>> np.emath.sqrt([-1,4])
235
+ array([0.+1.j, 2.+0.j])
236
+
237
+ Different results are expected because:
238
+ floating point 0.0 and -0.0 are distinct.
239
+
240
+ For more control, explicitly use complex() as follows:
241
+
242
+ >>> np.emath.sqrt(complex(-4.0, 0.0))
243
+ 2j
244
+ >>> np.emath.sqrt(complex(-4.0, -0.0))
245
+ -2j
246
+ """
247
+ x = _fix_real_lt_zero(x)
248
+ return nx.sqrt(x)
249
+
250
+
251
+ @array_function_dispatch(_unary_dispatcher)
252
+ def log(x):
253
+ """
254
+ Compute the natural logarithm of `x`.
255
+
256
+ Return the "principal value" (for a description of this, see `numpy.log`)
257
+ of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
258
+ returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
259
+ complex principle value is returned.
260
+
261
+ Parameters
262
+ ----------
263
+ x : array_like
264
+ The value(s) whose log is (are) required.
265
+
266
+ Returns
267
+ -------
268
+ out : ndarray or scalar
269
+ The log of the `x` value(s). If `x` was a scalar, so is `out`,
270
+ otherwise an array is returned.
271
+
272
+ See Also
273
+ --------
274
+ numpy.log
275
+
276
+ Notes
277
+ -----
278
+ For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
279
+ (note, however, that otherwise `numpy.log` and this `log` are identical,
280
+ i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
281
+ notably, the complex principle value if ``x.imag != 0``).
282
+
283
+ Examples
284
+ --------
285
+ >>> np.emath.log(np.exp(1))
286
+ 1.0
287
+
288
+ Negative arguments are handled "correctly" (recall that
289
+ ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
290
+
291
+ >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
292
+ True
293
+
294
+ """
295
+ x = _fix_real_lt_zero(x)
296
+ return nx.log(x)
297
+
298
+
299
+ @array_function_dispatch(_unary_dispatcher)
300
+ def log10(x):
301
+ """
302
+ Compute the logarithm base 10 of `x`.
303
+
304
+ Return the "principal value" (for a description of this, see
305
+ `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
306
+ is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
307
+ returns ``inf``). Otherwise, the complex principle value is returned.
308
+
309
+ Parameters
310
+ ----------
311
+ x : array_like or scalar
312
+ The value(s) whose log base 10 is (are) required.
313
+
314
+ Returns
315
+ -------
316
+ out : ndarray or scalar
317
+ The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
318
+ otherwise an array object is returned.
319
+
320
+ See Also
321
+ --------
322
+ numpy.log10
323
+
324
+ Notes
325
+ -----
326
+ For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
327
+ (note, however, that otherwise `numpy.log10` and this `log10` are
328
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
329
+ and, notably, the complex principle value if ``x.imag != 0``).
330
+
331
+ Examples
332
+ --------
333
+
334
+ (We set the printing precision so the example can be auto-tested)
335
+
336
+ >>> np.set_printoptions(precision=4)
337
+
338
+ >>> np.emath.log10(10**1)
339
+ 1.0
340
+
341
+ >>> np.emath.log10([-10**1, -10**2, 10**2])
342
+ array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
343
+
344
+ """
345
+ x = _fix_real_lt_zero(x)
346
+ return nx.log10(x)
347
+
348
+
349
+ def _logn_dispatcher(n, x):
350
+ return (n, x,)
351
+
352
+
353
+ @array_function_dispatch(_logn_dispatcher)
354
+ def logn(n, x):
355
+ """
356
+ Take log base n of x.
357
+
358
+ If `x` contains negative inputs, the answer is computed and returned in the
359
+ complex domain.
360
+
361
+ Parameters
362
+ ----------
363
+ n : array_like
364
+ The integer base(s) in which the log is taken.
365
+ x : array_like
366
+ The value(s) whose log base `n` is (are) required.
367
+
368
+ Returns
369
+ -------
370
+ out : ndarray or scalar
371
+ The log base `n` of the `x` value(s). If `x` was a scalar, so is
372
+ `out`, otherwise an array is returned.
373
+
374
+ Examples
375
+ --------
376
+ >>> np.set_printoptions(precision=4)
377
+
378
+ >>> np.emath.logn(2, [4, 8])
379
+ array([2., 3.])
380
+ >>> np.emath.logn(2, [-4, -8, 8])
381
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
382
+
383
+ """
384
+ x = _fix_real_lt_zero(x)
385
+ n = _fix_real_lt_zero(n)
386
+ return nx.log(x)/nx.log(n)
387
+
388
+
389
+ @array_function_dispatch(_unary_dispatcher)
390
+ def log2(x):
391
+ """
392
+ Compute the logarithm base 2 of `x`.
393
+
394
+ Return the "principal value" (for a description of this, see
395
+ `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
396
+ a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
397
+ ``inf``). Otherwise, the complex principle value is returned.
398
+
399
+ Parameters
400
+ ----------
401
+ x : array_like
402
+ The value(s) whose log base 2 is (are) required.
403
+
404
+ Returns
405
+ -------
406
+ out : ndarray or scalar
407
+ The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
408
+ otherwise an array is returned.
409
+
410
+ See Also
411
+ --------
412
+ numpy.log2
413
+
414
+ Notes
415
+ -----
416
+ For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
417
+ (note, however, that otherwise `numpy.log2` and this `log2` are
418
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
419
+ and, notably, the complex principle value if ``x.imag != 0``).
420
+
421
+ Examples
422
+ --------
423
+ We set the printing precision so the example can be auto-tested:
424
+
425
+ >>> np.set_printoptions(precision=4)
426
+
427
+ >>> np.emath.log2(8)
428
+ 3.0
429
+ >>> np.emath.log2([-4, -8, 8])
430
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
431
+
432
+ """
433
+ x = _fix_real_lt_zero(x)
434
+ return nx.log2(x)
435
+
436
+
437
+ def _power_dispatcher(x, p):
438
+ return (x, p)
439
+
440
+
441
+ @array_function_dispatch(_power_dispatcher)
442
+ def power(x, p):
443
+ """
444
+ Return x to the power p, (x**p).
445
+
446
+ If `x` contains negative values, the output is converted to the
447
+ complex domain.
448
+
449
+ Parameters
450
+ ----------
451
+ x : array_like
452
+ The input value(s).
453
+ p : array_like of ints
454
+ The power(s) to which `x` is raised. If `x` contains multiple values,
455
+ `p` has to either be a scalar, or contain the same number of values
456
+ as `x`. In the latter case, the result is
457
+ ``x[0]**p[0], x[1]**p[1], ...``.
458
+
459
+ Returns
460
+ -------
461
+ out : ndarray or scalar
462
+ The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
463
+ otherwise an array is returned.
464
+
465
+ See Also
466
+ --------
467
+ numpy.power
468
+
469
+ Examples
470
+ --------
471
+ >>> np.set_printoptions(precision=4)
472
+
473
+ >>> np.emath.power([2, 4], 2)
474
+ array([ 4, 16])
475
+ >>> np.emath.power([2, 4], -2)
476
+ array([0.25 , 0.0625])
477
+ >>> np.emath.power([-2, 4], 2)
478
+ array([ 4.-0.j, 16.+0.j])
479
+
480
+ """
481
+ x = _fix_real_lt_zero(x)
482
+ p = _fix_int_lt_zero(p)
483
+ return nx.power(x, p)
484
+
485
+
486
+ @array_function_dispatch(_unary_dispatcher)
487
+ def arccos(x):
488
+ """
489
+ Compute the inverse cosine of x.
490
+
491
+ Return the "principal value" (for a description of this, see
492
+ `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
493
+ `abs(x) <= 1`, this is a real number in the closed interval
494
+ :math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
495
+
496
+ Parameters
497
+ ----------
498
+ x : array_like or scalar
499
+ The value(s) whose arccos is (are) required.
500
+
501
+ Returns
502
+ -------
503
+ out : ndarray or scalar
504
+ The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
505
+ is `out`, otherwise an array object is returned.
506
+
507
+ See Also
508
+ --------
509
+ numpy.arccos
510
+
511
+ Notes
512
+ -----
513
+ For an arccos() that returns ``NAN`` when real `x` is not in the
514
+ interval ``[-1,1]``, use `numpy.arccos`.
515
+
516
+ Examples
517
+ --------
518
+ >>> np.set_printoptions(precision=4)
519
+
520
+ >>> np.emath.arccos(1) # a scalar is returned
521
+ 0.0
522
+
523
+ >>> np.emath.arccos([1,2])
524
+ array([0.-0.j , 0.-1.317j])
525
+
526
+ """
527
+ x = _fix_real_abs_gt_1(x)
528
+ return nx.arccos(x)
529
+
530
+
531
+ @array_function_dispatch(_unary_dispatcher)
532
+ def arcsin(x):
533
+ """
534
+ Compute the inverse sine of x.
535
+
536
+ Return the "principal value" (for a description of this, see
537
+ `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
538
+ `abs(x) <= 1`, this is a real number in the closed interval
539
+ :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
540
+ returned.
541
+
542
+ Parameters
543
+ ----------
544
+ x : array_like or scalar
545
+ The value(s) whose arcsin is (are) required.
546
+
547
+ Returns
548
+ -------
549
+ out : ndarray or scalar
550
+ The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
551
+ is `out`, otherwise an array object is returned.
552
+
553
+ See Also
554
+ --------
555
+ numpy.arcsin
556
+
557
+ Notes
558
+ -----
559
+ For an arcsin() that returns ``NAN`` when real `x` is not in the
560
+ interval ``[-1,1]``, use `numpy.arcsin`.
561
+
562
+ Examples
563
+ --------
564
+ >>> np.set_printoptions(precision=4)
565
+
566
+ >>> np.emath.arcsin(0)
567
+ 0.0
568
+
569
+ >>> np.emath.arcsin([0,1])
570
+ array([0. , 1.5708])
571
+
572
+ """
573
+ x = _fix_real_abs_gt_1(x)
574
+ return nx.arcsin(x)
575
+
576
+
577
+ @array_function_dispatch(_unary_dispatcher)
578
+ def arctanh(x):
579
+ """
580
+ Compute the inverse hyperbolic tangent of `x`.
581
+
582
+ Return the "principal value" (for a description of this, see
583
+ `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
584
+ ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
585
+ complex, the result is complex. Finally, `x = 1` returns``inf`` and
586
+ ``x=-1`` returns ``-inf``.
587
+
588
+ Parameters
589
+ ----------
590
+ x : array_like
591
+ The value(s) whose arctanh is (are) required.
592
+
593
+ Returns
594
+ -------
595
+ out : ndarray or scalar
596
+ The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
597
+ a scalar so is `out`, otherwise an array is returned.
598
+
599
+
600
+ See Also
601
+ --------
602
+ numpy.arctanh
603
+
604
+ Notes
605
+ -----
606
+ For an arctanh() that returns ``NAN`` when real `x` is not in the
607
+ interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
608
+ return +/-inf for ``x = +/-1``).
609
+
610
+ Examples
611
+ --------
612
+ >>> np.set_printoptions(precision=4)
613
+
614
+ >>> from numpy.testing import suppress_warnings
615
+ >>> with suppress_warnings() as sup:
616
+ ... sup.filter(RuntimeWarning)
617
+ ... np.emath.arctanh(np.eye(2))
618
+ array([[inf, 0.],
619
+ [ 0., inf]])
620
+ >>> np.emath.arctanh([1j])
621
+ array([0.+0.7854j])
622
+
623
+ """
624
+ x = _fix_real_abs_gt_1(x)
625
+ return nx.arctanh(x)
.venv/lib/python3.11/site-packages/numpy/lib/setup.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def configuration(parent_package='',top_path=None):
2
+ from numpy.distutils.misc_util import Configuration
3
+
4
+ config = Configuration('lib', parent_package, top_path)
5
+ config.add_subpackage('tests')
6
+ config.add_data_dir('tests/data')
7
+ config.add_data_files('*.pyi')
8
+ return config
9
+
10
+ if __name__ == '__main__':
11
+ from numpy.distutils.core import setup
12
+ setup(configuration=configuration)
.venv/lib/python3.11/site-packages/numpy/lib/shape_base.py ADDED
@@ -0,0 +1,1274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import numpy.core.numeric as _nx
4
+ from numpy.core.numeric import asarray, zeros, array, asanyarray
5
+ from numpy.core.fromnumeric import reshape, transpose
6
+ from numpy.core.multiarray import normalize_axis_index
7
+ from numpy.core import overrides
8
+ from numpy.core import vstack, atleast_3d
9
+ from numpy.core.numeric import normalize_axis_tuple
10
+ from numpy.core.shape_base import _arrays_for_stack_dispatcher
11
+ from numpy.lib.index_tricks import ndindex
12
+ from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
13
+
14
+
15
+ __all__ = [
16
+ 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
17
+ 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
18
+ 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
19
+ 'put_along_axis'
20
+ ]
21
+
22
+
23
+ array_function_dispatch = functools.partial(
24
+ overrides.array_function_dispatch, module='numpy')
25
+
26
+
27
+ def _make_along_axis_idx(arr_shape, indices, axis):
28
+ # compute dimensions to iterate over
29
+ if not _nx.issubdtype(indices.dtype, _nx.integer):
30
+ raise IndexError('`indices` must be an integer array')
31
+ if len(arr_shape) != indices.ndim:
32
+ raise ValueError(
33
+ "`indices` and `arr` must have the same number of dimensions")
34
+ shape_ones = (1,) * indices.ndim
35
+ dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
36
+
37
+ # build a fancy index, consisting of orthogonal aranges, with the
38
+ # requested index inserted at the right location
39
+ fancy_index = []
40
+ for dim, n in zip(dest_dims, arr_shape):
41
+ if dim is None:
42
+ fancy_index.append(indices)
43
+ else:
44
+ ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
45
+ fancy_index.append(_nx.arange(n).reshape(ind_shape))
46
+
47
+ return tuple(fancy_index)
48
+
49
+
50
+ def _take_along_axis_dispatcher(arr, indices, axis):
51
+ return (arr, indices)
52
+
53
+
54
+ @array_function_dispatch(_take_along_axis_dispatcher)
55
+ def take_along_axis(arr, indices, axis):
56
+ """
57
+ Take values from the input array by matching 1d index and data slices.
58
+
59
+ This iterates over matching 1d slices oriented along the specified axis in
60
+ the index and data arrays, and uses the former to look up values in the
61
+ latter. These slices can be different lengths.
62
+
63
+ Functions returning an index along an axis, like `argsort` and
64
+ `argpartition`, produce suitable indices for this function.
65
+
66
+ .. versionadded:: 1.15.0
67
+
68
+ Parameters
69
+ ----------
70
+ arr : ndarray (Ni..., M, Nk...)
71
+ Source array
72
+ indices : ndarray (Ni..., J, Nk...)
73
+ Indices to take along each 1d slice of `arr`. This must match the
74
+ dimension of arr, but dimensions Ni and Nj only need to broadcast
75
+ against `arr`.
76
+ axis : int
77
+ The axis to take 1d slices along. If axis is None, the input array is
78
+ treated as if it had first been flattened to 1d, for consistency with
79
+ `sort` and `argsort`.
80
+
81
+ Returns
82
+ -------
83
+ out: ndarray (Ni..., J, Nk...)
84
+ The indexed result.
85
+
86
+ Notes
87
+ -----
88
+ This is equivalent to (but faster than) the following use of `ndindex` and
89
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
90
+
91
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
92
+ J = indices.shape[axis] # Need not equal M
93
+ out = np.empty(Ni + (J,) + Nk)
94
+
95
+ for ii in ndindex(Ni):
96
+ for kk in ndindex(Nk):
97
+ a_1d = a [ii + s_[:,] + kk]
98
+ indices_1d = indices[ii + s_[:,] + kk]
99
+ out_1d = out [ii + s_[:,] + kk]
100
+ for j in range(J):
101
+ out_1d[j] = a_1d[indices_1d[j]]
102
+
103
+ Equivalently, eliminating the inner loop, the last two lines would be::
104
+
105
+ out_1d[:] = a_1d[indices_1d]
106
+
107
+ See Also
108
+ --------
109
+ take : Take along an axis, using the same indices for every 1d slice
110
+ put_along_axis :
111
+ Put values into the destination array by matching 1d index and data slices
112
+
113
+ Examples
114
+ --------
115
+
116
+ For this sample array
117
+
118
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
119
+
120
+ We can sort either by using sort directly, or argsort and this function
121
+
122
+ >>> np.sort(a, axis=1)
123
+ array([[10, 20, 30],
124
+ [40, 50, 60]])
125
+ >>> ai = np.argsort(a, axis=1)
126
+ >>> ai
127
+ array([[0, 2, 1],
128
+ [1, 2, 0]])
129
+ >>> np.take_along_axis(a, ai, axis=1)
130
+ array([[10, 20, 30],
131
+ [40, 50, 60]])
132
+
133
+ The same works for max and min, if you maintain the trivial dimension
134
+ with ``keepdims``:
135
+
136
+ >>> np.max(a, axis=1, keepdims=True)
137
+ array([[30],
138
+ [60]])
139
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
140
+ >>> ai
141
+ array([[1],
142
+ [0]])
143
+ >>> np.take_along_axis(a, ai, axis=1)
144
+ array([[30],
145
+ [60]])
146
+
147
+ If we want to get the max and min at the same time, we can stack the
148
+ indices first
149
+
150
+ >>> ai_min = np.argmin(a, axis=1, keepdims=True)
151
+ >>> ai_max = np.argmax(a, axis=1, keepdims=True)
152
+ >>> ai = np.concatenate([ai_min, ai_max], axis=1)
153
+ >>> ai
154
+ array([[0, 1],
155
+ [1, 0]])
156
+ >>> np.take_along_axis(a, ai, axis=1)
157
+ array([[10, 30],
158
+ [40, 60]])
159
+ """
160
+ # normalize inputs
161
+ if axis is None:
162
+ arr = arr.flat
163
+ arr_shape = (len(arr),) # flatiter has no .shape
164
+ axis = 0
165
+ else:
166
+ axis = normalize_axis_index(axis, arr.ndim)
167
+ arr_shape = arr.shape
168
+
169
+ # use the fancy index
170
+ return arr[_make_along_axis_idx(arr_shape, indices, axis)]
171
+
172
+
173
+ def _put_along_axis_dispatcher(arr, indices, values, axis):
174
+ return (arr, indices, values)
175
+
176
+
177
+ @array_function_dispatch(_put_along_axis_dispatcher)
178
+ def put_along_axis(arr, indices, values, axis):
179
+ """
180
+ Put values into the destination array by matching 1d index and data slices.
181
+
182
+ This iterates over matching 1d slices oriented along the specified axis in
183
+ the index and data arrays, and uses the former to place values into the
184
+ latter. These slices can be different lengths.
185
+
186
+ Functions returning an index along an axis, like `argsort` and
187
+ `argpartition`, produce suitable indices for this function.
188
+
189
+ .. versionadded:: 1.15.0
190
+
191
+ Parameters
192
+ ----------
193
+ arr : ndarray (Ni..., M, Nk...)
194
+ Destination array.
195
+ indices : ndarray (Ni..., J, Nk...)
196
+ Indices to change along each 1d slice of `arr`. This must match the
197
+ dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
198
+ against `arr`.
199
+ values : array_like (Ni..., J, Nk...)
200
+ values to insert at those indices. Its shape and dimension are
201
+ broadcast to match that of `indices`.
202
+ axis : int
203
+ The axis to take 1d slices along. If axis is None, the destination
204
+ array is treated as if a flattened 1d view had been created of it.
205
+
206
+ Notes
207
+ -----
208
+ This is equivalent to (but faster than) the following use of `ndindex` and
209
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
210
+
211
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
212
+ J = indices.shape[axis] # Need not equal M
213
+
214
+ for ii in ndindex(Ni):
215
+ for kk in ndindex(Nk):
216
+ a_1d = a [ii + s_[:,] + kk]
217
+ indices_1d = indices[ii + s_[:,] + kk]
218
+ values_1d = values [ii + s_[:,] + kk]
219
+ for j in range(J):
220
+ a_1d[indices_1d[j]] = values_1d[j]
221
+
222
+ Equivalently, eliminating the inner loop, the last two lines would be::
223
+
224
+ a_1d[indices_1d] = values_1d
225
+
226
+ See Also
227
+ --------
228
+ take_along_axis :
229
+ Take values from the input array by matching 1d index and data slices
230
+
231
+ Examples
232
+ --------
233
+
234
+ For this sample array
235
+
236
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
237
+
238
+ We can replace the maximum values with:
239
+
240
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
241
+ >>> ai
242
+ array([[1],
243
+ [0]])
244
+ >>> np.put_along_axis(a, ai, 99, axis=1)
245
+ >>> a
246
+ array([[10, 99, 20],
247
+ [99, 40, 50]])
248
+
249
+ """
250
+ # normalize inputs
251
+ if axis is None:
252
+ arr = arr.flat
253
+ axis = 0
254
+ arr_shape = (len(arr),) # flatiter has no .shape
255
+ else:
256
+ axis = normalize_axis_index(axis, arr.ndim)
257
+ arr_shape = arr.shape
258
+
259
+ # use the fancy index
260
+ arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
261
+
262
+
263
+ def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
264
+ return (arr,)
265
+
266
+
267
+ @array_function_dispatch(_apply_along_axis_dispatcher)
268
+ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
269
+ """
270
+ Apply a function to 1-D slices along the given axis.
271
+
272
+ Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays
273
+ and `a` is a 1-D slice of `arr` along `axis`.
274
+
275
+ This is equivalent to (but faster than) the following use of `ndindex` and
276
+ `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
277
+
278
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
279
+ for ii in ndindex(Ni):
280
+ for kk in ndindex(Nk):
281
+ f = func1d(arr[ii + s_[:,] + kk])
282
+ Nj = f.shape
283
+ for jj in ndindex(Nj):
284
+ out[ii + jj + kk] = f[jj]
285
+
286
+ Equivalently, eliminating the inner loop, this can be expressed as::
287
+
288
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
289
+ for ii in ndindex(Ni):
290
+ for kk in ndindex(Nk):
291
+ out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])
292
+
293
+ Parameters
294
+ ----------
295
+ func1d : function (M,) -> (Nj...)
296
+ This function should accept 1-D arrays. It is applied to 1-D
297
+ slices of `arr` along the specified axis.
298
+ axis : integer
299
+ Axis along which `arr` is sliced.
300
+ arr : ndarray (Ni..., M, Nk...)
301
+ Input array.
302
+ args : any
303
+ Additional arguments to `func1d`.
304
+ kwargs : any
305
+ Additional named arguments to `func1d`.
306
+
307
+ .. versionadded:: 1.9.0
308
+
309
+
310
+ Returns
311
+ -------
312
+ out : ndarray (Ni..., Nj..., Nk...)
313
+ The output array. The shape of `out` is identical to the shape of
314
+ `arr`, except along the `axis` dimension. This axis is removed, and
315
+ replaced with new dimensions equal to the shape of the return value
316
+ of `func1d`. So if `func1d` returns a scalar `out` will have one
317
+ fewer dimensions than `arr`.
318
+
319
+ See Also
320
+ --------
321
+ apply_over_axes : Apply a function repeatedly over multiple axes.
322
+
323
+ Examples
324
+ --------
325
+ >>> def my_func(a):
326
+ ... \"\"\"Average first and last element of a 1-D array\"\"\"
327
+ ... return (a[0] + a[-1]) * 0.5
328
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
329
+ >>> np.apply_along_axis(my_func, 0, b)
330
+ array([4., 5., 6.])
331
+ >>> np.apply_along_axis(my_func, 1, b)
332
+ array([2., 5., 8.])
333
+
334
+ For a function that returns a 1D array, the number of dimensions in
335
+ `outarr` is the same as `arr`.
336
+
337
+ >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
338
+ >>> np.apply_along_axis(sorted, 1, b)
339
+ array([[1, 7, 8],
340
+ [3, 4, 9],
341
+ [2, 5, 6]])
342
+
343
+ For a function that returns a higher dimensional array, those dimensions
344
+ are inserted in place of the `axis` dimension.
345
+
346
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
347
+ >>> np.apply_along_axis(np.diag, -1, b)
348
+ array([[[1, 0, 0],
349
+ [0, 2, 0],
350
+ [0, 0, 3]],
351
+ [[4, 0, 0],
352
+ [0, 5, 0],
353
+ [0, 0, 6]],
354
+ [[7, 0, 0],
355
+ [0, 8, 0],
356
+ [0, 0, 9]]])
357
+ """
358
+ # handle negative axes
359
+ arr = asanyarray(arr)
360
+ nd = arr.ndim
361
+ axis = normalize_axis_index(axis, nd)
362
+
363
+ # arr, with the iteration axis at the end
364
+ in_dims = list(range(nd))
365
+ inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])
366
+
367
+ # compute indices for the iteration axes, and append a trailing ellipsis to
368
+ # prevent 0d arrays decaying to scalars, which fixes gh-8642
369
+ inds = ndindex(inarr_view.shape[:-1])
370
+ inds = (ind + (Ellipsis,) for ind in inds)
371
+
372
+ # invoke the function on the first item
373
+ try:
374
+ ind0 = next(inds)
375
+ except StopIteration:
376
+ raise ValueError(
377
+ 'Cannot apply_along_axis when any iteration dimensions are 0'
378
+ ) from None
379
+ res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
380
+
381
+ # build a buffer for storing evaluations of func1d.
382
+ # remove the requested axis, and add the new ones on the end.
383
+ # laid out so that each write is contiguous.
384
+ # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
385
+ buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
386
+
387
+ # permutation of axes such that out = buff.transpose(buff_permute)
388
+ buff_dims = list(range(buff.ndim))
389
+ buff_permute = (
390
+ buff_dims[0 : axis] +
391
+ buff_dims[buff.ndim-res.ndim : buff.ndim] +
392
+ buff_dims[axis : buff.ndim-res.ndim]
393
+ )
394
+
395
+ # matrices have a nasty __array_prepare__ and __array_wrap__
396
+ if not isinstance(res, matrix):
397
+ buff = res.__array_prepare__(buff)
398
+
399
+ # save the first result, then compute and save all remaining results
400
+ buff[ind0] = res
401
+ for ind in inds:
402
+ buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
403
+
404
+ if not isinstance(res, matrix):
405
+ # wrap the array, to preserve subclasses
406
+ buff = res.__array_wrap__(buff)
407
+
408
+ # finally, rotate the inserted axes back to where they belong
409
+ return transpose(buff, buff_permute)
410
+
411
+ else:
412
+ # matrices have to be transposed first, because they collapse dimensions!
413
+ out_arr = transpose(buff, buff_permute)
414
+ return res.__array_wrap__(out_arr)
415
+
416
+
417
+ def _apply_over_axes_dispatcher(func, a, axes):
418
+ return (a,)
419
+
420
+
421
+ @array_function_dispatch(_apply_over_axes_dispatcher)
422
+ def apply_over_axes(func, a, axes):
423
+ """
424
+ Apply a function repeatedly over multiple axes.
425
+
426
+ `func` is called as `res = func(a, axis)`, where `axis` is the first
427
+ element of `axes`. The result `res` of the function call must have
428
+ either the same dimensions as `a` or one less dimension. If `res`
429
+ has one less dimension than `a`, a dimension is inserted before
430
+ `axis`. The call to `func` is then repeated for each axis in `axes`,
431
+ with `res` as the first argument.
432
+
433
+ Parameters
434
+ ----------
435
+ func : function
436
+ This function must take two arguments, `func(a, axis)`.
437
+ a : array_like
438
+ Input array.
439
+ axes : array_like
440
+ Axes over which `func` is applied; the elements must be integers.
441
+
442
+ Returns
443
+ -------
444
+ apply_over_axis : ndarray
445
+ The output array. The number of dimensions is the same as `a`,
446
+ but the shape can be different. This depends on whether `func`
447
+ changes the shape of its output with respect to its input.
448
+
449
+ See Also
450
+ --------
451
+ apply_along_axis :
452
+ Apply a function to 1-D slices of an array along the given axis.
453
+
454
+ Notes
455
+ -----
456
+ This function is equivalent to tuple axis arguments to reorderable ufuncs
457
+ with keepdims=True. Tuple axis arguments to ufuncs have been available since
458
+ version 1.7.0.
459
+
460
+ Examples
461
+ --------
462
+ >>> a = np.arange(24).reshape(2,3,4)
463
+ >>> a
464
+ array([[[ 0, 1, 2, 3],
465
+ [ 4, 5, 6, 7],
466
+ [ 8, 9, 10, 11]],
467
+ [[12, 13, 14, 15],
468
+ [16, 17, 18, 19],
469
+ [20, 21, 22, 23]]])
470
+
471
+ Sum over axes 0 and 2. The result has same number of dimensions
472
+ as the original array:
473
+
474
+ >>> np.apply_over_axes(np.sum, a, [0,2])
475
+ array([[[ 60],
476
+ [ 92],
477
+ [124]]])
478
+
479
+ Tuple axis arguments to ufuncs are equivalent:
480
+
481
+ >>> np.sum(a, axis=(0,2), keepdims=True)
482
+ array([[[ 60],
483
+ [ 92],
484
+ [124]]])
485
+
486
+ """
487
+ val = asarray(a)
488
+ N = a.ndim
489
+ if array(axes).ndim == 0:
490
+ axes = (axes,)
491
+ for axis in axes:
492
+ if axis < 0:
493
+ axis = N + axis
494
+ args = (val, axis)
495
+ res = func(*args)
496
+ if res.ndim == val.ndim:
497
+ val = res
498
+ else:
499
+ res = expand_dims(res, axis)
500
+ if res.ndim == val.ndim:
501
+ val = res
502
+ else:
503
+ raise ValueError("function is not returning "
504
+ "an array of the correct shape")
505
+ return val
506
+
507
+
508
+ def _expand_dims_dispatcher(a, axis):
509
+ return (a,)
510
+
511
+
512
+ @array_function_dispatch(_expand_dims_dispatcher)
513
+ def expand_dims(a, axis):
514
+ """
515
+ Expand the shape of an array.
516
+
517
+ Insert a new axis that will appear at the `axis` position in the expanded
518
+ array shape.
519
+
520
+ Parameters
521
+ ----------
522
+ a : array_like
523
+ Input array.
524
+ axis : int or tuple of ints
525
+ Position in the expanded axes where the new axis (or axes) is placed.
526
+
527
+ .. deprecated:: 1.13.0
528
+ Passing an axis where ``axis > a.ndim`` will be treated as
529
+ ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
530
+ be treated as ``axis == 0``. This behavior is deprecated.
531
+
532
+ .. versionchanged:: 1.18.0
533
+ A tuple of axes is now supported. Out of range axes as
534
+ described above are now forbidden and raise an `AxisError`.
535
+
536
+ Returns
537
+ -------
538
+ result : ndarray
539
+ View of `a` with the number of dimensions increased.
540
+
541
+ See Also
542
+ --------
543
+ squeeze : The inverse operation, removing singleton dimensions
544
+ reshape : Insert, remove, and combine dimensions, and resize existing ones
545
+ doc.indexing, atleast_1d, atleast_2d, atleast_3d
546
+
547
+ Examples
548
+ --------
549
+ >>> x = np.array([1, 2])
550
+ >>> x.shape
551
+ (2,)
552
+
553
+ The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
554
+
555
+ >>> y = np.expand_dims(x, axis=0)
556
+ >>> y
557
+ array([[1, 2]])
558
+ >>> y.shape
559
+ (1, 2)
560
+
561
+ The following is equivalent to ``x[:, np.newaxis]``:
562
+
563
+ >>> y = np.expand_dims(x, axis=1)
564
+ >>> y
565
+ array([[1],
566
+ [2]])
567
+ >>> y.shape
568
+ (2, 1)
569
+
570
+ ``axis`` may also be a tuple:
571
+
572
+ >>> y = np.expand_dims(x, axis=(0, 1))
573
+ >>> y
574
+ array([[[1, 2]]])
575
+
576
+ >>> y = np.expand_dims(x, axis=(2, 0))
577
+ >>> y
578
+ array([[[1],
579
+ [2]]])
580
+
581
+ Note that some examples may use ``None`` instead of ``np.newaxis``. These
582
+ are the same objects:
583
+
584
+ >>> np.newaxis is None
585
+ True
586
+
587
+ """
588
+ if isinstance(a, matrix):
589
+ a = asarray(a)
590
+ else:
591
+ a = asanyarray(a)
592
+
593
+ if type(axis) not in (tuple, list):
594
+ axis = (axis,)
595
+
596
+ out_ndim = len(axis) + a.ndim
597
+ axis = normalize_axis_tuple(axis, out_ndim)
598
+
599
+ shape_it = iter(a.shape)
600
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
601
+
602
+ return a.reshape(shape)
603
+
604
+
605
+ row_stack = vstack
606
+
607
+
608
+ def _column_stack_dispatcher(tup):
609
+ return _arrays_for_stack_dispatcher(tup)
610
+
611
+
612
+ @array_function_dispatch(_column_stack_dispatcher)
613
+ def column_stack(tup):
614
+ """
615
+ Stack 1-D arrays as columns into a 2-D array.
616
+
617
+ Take a sequence of 1-D arrays and stack them as columns
618
+ to make a single 2-D array. 2-D arrays are stacked as-is,
619
+ just like with `hstack`. 1-D arrays are turned into 2-D columns
620
+ first.
621
+
622
+ Parameters
623
+ ----------
624
+ tup : sequence of 1-D or 2-D arrays.
625
+ Arrays to stack. All of them must have the same first dimension.
626
+
627
+ Returns
628
+ -------
629
+ stacked : 2-D array
630
+ The array formed by stacking the given arrays.
631
+
632
+ See Also
633
+ --------
634
+ stack, hstack, vstack, concatenate
635
+
636
+ Examples
637
+ --------
638
+ >>> a = np.array((1,2,3))
639
+ >>> b = np.array((2,3,4))
640
+ >>> np.column_stack((a,b))
641
+ array([[1, 2],
642
+ [2, 3],
643
+ [3, 4]])
644
+
645
+ """
646
+ arrays = []
647
+ for v in tup:
648
+ arr = asanyarray(v)
649
+ if arr.ndim < 2:
650
+ arr = array(arr, copy=False, subok=True, ndmin=2).T
651
+ arrays.append(arr)
652
+ return _nx.concatenate(arrays, 1)
653
+
654
+
655
+ def _dstack_dispatcher(tup):
656
+ return _arrays_for_stack_dispatcher(tup)
657
+
658
+
659
+ @array_function_dispatch(_dstack_dispatcher)
660
+ def dstack(tup):
661
+ """
662
+ Stack arrays in sequence depth wise (along third axis).
663
+
664
+ This is equivalent to concatenation along the third axis after 2-D arrays
665
+ of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
666
+ `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
667
+ `dsplit`.
668
+
669
+ This function makes most sense for arrays with up to 3 dimensions. For
670
+ instance, for pixel-data with a height (first axis), width (second axis),
671
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
672
+ `block` provide more general stacking and concatenation operations.
673
+
674
+ Parameters
675
+ ----------
676
+ tup : sequence of arrays
677
+ The arrays must have the same shape along all but the third axis.
678
+ 1-D or 2-D arrays must have the same shape.
679
+
680
+ Returns
681
+ -------
682
+ stacked : ndarray
683
+ The array formed by stacking the given arrays, will be at least 3-D.
684
+
685
+ See Also
686
+ --------
687
+ concatenate : Join a sequence of arrays along an existing axis.
688
+ stack : Join a sequence of arrays along a new axis.
689
+ block : Assemble an nd-array from nested lists of blocks.
690
+ vstack : Stack arrays in sequence vertically (row wise).
691
+ hstack : Stack arrays in sequence horizontally (column wise).
692
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
693
+ dsplit : Split array along third axis.
694
+
695
+ Examples
696
+ --------
697
+ >>> a = np.array((1,2,3))
698
+ >>> b = np.array((2,3,4))
699
+ >>> np.dstack((a,b))
700
+ array([[[1, 2],
701
+ [2, 3],
702
+ [3, 4]]])
703
+
704
+ >>> a = np.array([[1],[2],[3]])
705
+ >>> b = np.array([[2],[3],[4]])
706
+ >>> np.dstack((a,b))
707
+ array([[[1, 2]],
708
+ [[2, 3]],
709
+ [[3, 4]]])
710
+
711
+ """
712
+ arrs = atleast_3d(*tup)
713
+ if not isinstance(arrs, list):
714
+ arrs = [arrs]
715
+ return _nx.concatenate(arrs, 2)
716
+
717
+
718
+ def _replace_zero_by_x_arrays(sub_arys):
719
+ for i in range(len(sub_arys)):
720
+ if _nx.ndim(sub_arys[i]) == 0:
721
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
722
+ elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
723
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
724
+ return sub_arys
725
+
726
+
727
+ def _array_split_dispatcher(ary, indices_or_sections, axis=None):
728
+ return (ary, indices_or_sections)
729
+
730
+
731
+ @array_function_dispatch(_array_split_dispatcher)
732
+ def array_split(ary, indices_or_sections, axis=0):
733
+ """
734
+ Split an array into multiple sub-arrays.
735
+
736
+ Please refer to the ``split`` documentation. The only difference
737
+ between these functions is that ``array_split`` allows
738
+ `indices_or_sections` to be an integer that does *not* equally
739
+ divide the axis. For an array of length l that should be split
740
+ into n sections, it returns l % n sub-arrays of size l//n + 1
741
+ and the rest of size l//n.
742
+
743
+ See Also
744
+ --------
745
+ split : Split array into multiple sub-arrays of equal size.
746
+
747
+ Examples
748
+ --------
749
+ >>> x = np.arange(8.0)
750
+ >>> np.array_split(x, 3)
751
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
752
+
753
+ >>> x = np.arange(9)
754
+ >>> np.array_split(x, 4)
755
+ [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])]
756
+
757
+ """
758
+ try:
759
+ Ntotal = ary.shape[axis]
760
+ except AttributeError:
761
+ Ntotal = len(ary)
762
+ try:
763
+ # handle array case.
764
+ Nsections = len(indices_or_sections) + 1
765
+ div_points = [0] + list(indices_or_sections) + [Ntotal]
766
+ except TypeError:
767
+ # indices_or_sections is a scalar, not an array.
768
+ Nsections = int(indices_or_sections)
769
+ if Nsections <= 0:
770
+ raise ValueError('number sections must be larger than 0.') from None
771
+ Neach_section, extras = divmod(Ntotal, Nsections)
772
+ section_sizes = ([0] +
773
+ extras * [Neach_section+1] +
774
+ (Nsections-extras) * [Neach_section])
775
+ div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
776
+
777
+ sub_arys = []
778
+ sary = _nx.swapaxes(ary, axis, 0)
779
+ for i in range(Nsections):
780
+ st = div_points[i]
781
+ end = div_points[i + 1]
782
+ sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
783
+
784
+ return sub_arys
785
+
786
+
787
+ def _split_dispatcher(ary, indices_or_sections, axis=None):
788
+ return (ary, indices_or_sections)
789
+
790
+
791
+ @array_function_dispatch(_split_dispatcher)
792
+ def split(ary, indices_or_sections, axis=0):
793
+ """
794
+ Split an array into multiple sub-arrays as views into `ary`.
795
+
796
+ Parameters
797
+ ----------
798
+ ary : ndarray
799
+ Array to be divided into sub-arrays.
800
+ indices_or_sections : int or 1-D array
801
+ If `indices_or_sections` is an integer, N, the array will be divided
802
+ into N equal arrays along `axis`. If such a split is not possible,
803
+ an error is raised.
804
+
805
+ If `indices_or_sections` is a 1-D array of sorted integers, the entries
806
+ indicate where along `axis` the array is split. For example,
807
+ ``[2, 3]`` would, for ``axis=0``, result in
808
+
809
+ - ary[:2]
810
+ - ary[2:3]
811
+ - ary[3:]
812
+
813
+ If an index exceeds the dimension of the array along `axis`,
814
+ an empty sub-array is returned correspondingly.
815
+ axis : int, optional
816
+ The axis along which to split, default is 0.
817
+
818
+ Returns
819
+ -------
820
+ sub-arrays : list of ndarrays
821
+ A list of sub-arrays as views into `ary`.
822
+
823
+ Raises
824
+ ------
825
+ ValueError
826
+ If `indices_or_sections` is given as an integer, but
827
+ a split does not result in equal division.
828
+
829
+ See Also
830
+ --------
831
+ array_split : Split an array into multiple sub-arrays of equal or
832
+ near-equal size. Does not raise an exception if
833
+ an equal division cannot be made.
834
+ hsplit : Split array into multiple sub-arrays horizontally (column-wise).
835
+ vsplit : Split array into multiple sub-arrays vertically (row wise).
836
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
837
+ concatenate : Join a sequence of arrays along an existing axis.
838
+ stack : Join a sequence of arrays along a new axis.
839
+ hstack : Stack arrays in sequence horizontally (column wise).
840
+ vstack : Stack arrays in sequence vertically (row wise).
841
+ dstack : Stack arrays in sequence depth wise (along third dimension).
842
+
843
+ Examples
844
+ --------
845
+ >>> x = np.arange(9.0)
846
+ >>> np.split(x, 3)
847
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
848
+
849
+ >>> x = np.arange(8.0)
850
+ >>> np.split(x, [3, 5, 6, 10])
851
+ [array([0., 1., 2.]),
852
+ array([3., 4.]),
853
+ array([5.]),
854
+ array([6., 7.]),
855
+ array([], dtype=float64)]
856
+
857
+ """
858
+ try:
859
+ len(indices_or_sections)
860
+ except TypeError:
861
+ sections = indices_or_sections
862
+ N = ary.shape[axis]
863
+ if N % sections:
864
+ raise ValueError(
865
+ 'array split does not result in an equal division') from None
866
+ return array_split(ary, indices_or_sections, axis)
867
+
868
+
869
+ def _hvdsplit_dispatcher(ary, indices_or_sections):
870
+ return (ary, indices_or_sections)
871
+
872
+
873
+ @array_function_dispatch(_hvdsplit_dispatcher)
874
+ def hsplit(ary, indices_or_sections):
875
+ """
876
+ Split an array into multiple sub-arrays horizontally (column-wise).
877
+
878
+ Please refer to the `split` documentation. `hsplit` is equivalent
879
+ to `split` with ``axis=1``, the array is always split along the second
880
+ axis except for 1-D arrays, where it is split at ``axis=0``.
881
+
882
+ See Also
883
+ --------
884
+ split : Split an array into multiple sub-arrays of equal size.
885
+
886
+ Examples
887
+ --------
888
+ >>> x = np.arange(16.0).reshape(4, 4)
889
+ >>> x
890
+ array([[ 0., 1., 2., 3.],
891
+ [ 4., 5., 6., 7.],
892
+ [ 8., 9., 10., 11.],
893
+ [12., 13., 14., 15.]])
894
+ >>> np.hsplit(x, 2)
895
+ [array([[ 0., 1.],
896
+ [ 4., 5.],
897
+ [ 8., 9.],
898
+ [12., 13.]]),
899
+ array([[ 2., 3.],
900
+ [ 6., 7.],
901
+ [10., 11.],
902
+ [14., 15.]])]
903
+ >>> np.hsplit(x, np.array([3, 6]))
904
+ [array([[ 0., 1., 2.],
905
+ [ 4., 5., 6.],
906
+ [ 8., 9., 10.],
907
+ [12., 13., 14.]]),
908
+ array([[ 3.],
909
+ [ 7.],
910
+ [11.],
911
+ [15.]]),
912
+ array([], shape=(4, 0), dtype=float64)]
913
+
914
+ With a higher dimensional array the split is still along the second axis.
915
+
916
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
917
+ >>> x
918
+ array([[[0., 1.],
919
+ [2., 3.]],
920
+ [[4., 5.],
921
+ [6., 7.]]])
922
+ >>> np.hsplit(x, 2)
923
+ [array([[[0., 1.]],
924
+ [[4., 5.]]]),
925
+ array([[[2., 3.]],
926
+ [[6., 7.]]])]
927
+
928
+ With a 1-D array, the split is along axis 0.
929
+
930
+ >>> x = np.array([0, 1, 2, 3, 4, 5])
931
+ >>> np.hsplit(x, 2)
932
+ [array([0, 1, 2]), array([3, 4, 5])]
933
+
934
+ """
935
+ if _nx.ndim(ary) == 0:
936
+ raise ValueError('hsplit only works on arrays of 1 or more dimensions')
937
+ if ary.ndim > 1:
938
+ return split(ary, indices_or_sections, 1)
939
+ else:
940
+ return split(ary, indices_or_sections, 0)
941
+
942
+
943
+ @array_function_dispatch(_hvdsplit_dispatcher)
944
+ def vsplit(ary, indices_or_sections):
945
+ """
946
+ Split an array into multiple sub-arrays vertically (row-wise).
947
+
948
+ Please refer to the ``split`` documentation. ``vsplit`` is equivalent
949
+ to ``split`` with `axis=0` (default), the array is always split along the
950
+ first axis regardless of the array dimension.
951
+
952
+ See Also
953
+ --------
954
+ split : Split an array into multiple sub-arrays of equal size.
955
+
956
+ Examples
957
+ --------
958
+ >>> x = np.arange(16.0).reshape(4, 4)
959
+ >>> x
960
+ array([[ 0., 1., 2., 3.],
961
+ [ 4., 5., 6., 7.],
962
+ [ 8., 9., 10., 11.],
963
+ [12., 13., 14., 15.]])
964
+ >>> np.vsplit(x, 2)
965
+ [array([[0., 1., 2., 3.],
966
+ [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
967
+ [12., 13., 14., 15.]])]
968
+ >>> np.vsplit(x, np.array([3, 6]))
969
+ [array([[ 0., 1., 2., 3.],
970
+ [ 4., 5., 6., 7.],
971
+ [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]
972
+
973
+ With a higher dimensional array the split is still along the first axis.
974
+
975
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
976
+ >>> x
977
+ array([[[0., 1.],
978
+ [2., 3.]],
979
+ [[4., 5.],
980
+ [6., 7.]]])
981
+ >>> np.vsplit(x, 2)
982
+ [array([[[0., 1.],
983
+ [2., 3.]]]), array([[[4., 5.],
984
+ [6., 7.]]])]
985
+
986
+ """
987
+ if _nx.ndim(ary) < 2:
988
+ raise ValueError('vsplit only works on arrays of 2 or more dimensions')
989
+ return split(ary, indices_or_sections, 0)
990
+
991
+
992
+ @array_function_dispatch(_hvdsplit_dispatcher)
993
+ def dsplit(ary, indices_or_sections):
994
+ """
995
+ Split array into multiple sub-arrays along the 3rd axis (depth).
996
+
997
+ Please refer to the `split` documentation. `dsplit` is equivalent
998
+ to `split` with ``axis=2``, the array is always split along the third
999
+ axis provided the array dimension is greater than or equal to 3.
1000
+
1001
+ See Also
1002
+ --------
1003
+ split : Split an array into multiple sub-arrays of equal size.
1004
+
1005
+ Examples
1006
+ --------
1007
+ >>> x = np.arange(16.0).reshape(2, 2, 4)
1008
+ >>> x
1009
+ array([[[ 0., 1., 2., 3.],
1010
+ [ 4., 5., 6., 7.]],
1011
+ [[ 8., 9., 10., 11.],
1012
+ [12., 13., 14., 15.]]])
1013
+ >>> np.dsplit(x, 2)
1014
+ [array([[[ 0., 1.],
1015
+ [ 4., 5.]],
1016
+ [[ 8., 9.],
1017
+ [12., 13.]]]), array([[[ 2., 3.],
1018
+ [ 6., 7.]],
1019
+ [[10., 11.],
1020
+ [14., 15.]]])]
1021
+ >>> np.dsplit(x, np.array([3, 6]))
1022
+ [array([[[ 0., 1., 2.],
1023
+ [ 4., 5., 6.]],
1024
+ [[ 8., 9., 10.],
1025
+ [12., 13., 14.]]]),
1026
+ array([[[ 3.],
1027
+ [ 7.]],
1028
+ [[11.],
1029
+ [15.]]]),
1030
+ array([], shape=(2, 2, 0), dtype=float64)]
1031
+ """
1032
+ if _nx.ndim(ary) < 3:
1033
+ raise ValueError('dsplit only works on arrays of 3 or more dimensions')
1034
+ return split(ary, indices_or_sections, 2)
1035
+
1036
+
1037
+ def get_array_prepare(*args):
1038
+ """Find the wrapper for the array with the highest priority.
1039
+
1040
+ In case of ties, leftmost wins. If no wrapper is found, return None
1041
+ """
1042
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
1043
+ x.__array_prepare__) for i, x in enumerate(args)
1044
+ if hasattr(x, '__array_prepare__'))
1045
+ if wrappers:
1046
+ return wrappers[-1][-1]
1047
+ return None
1048
+
1049
+
1050
+ def get_array_wrap(*args):
1051
+ """Find the wrapper for the array with the highest priority.
1052
+
1053
+ In case of ties, leftmost wins. If no wrapper is found, return None
1054
+ """
1055
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
1056
+ x.__array_wrap__) for i, x in enumerate(args)
1057
+ if hasattr(x, '__array_wrap__'))
1058
+ if wrappers:
1059
+ return wrappers[-1][-1]
1060
+ return None
1061
+
1062
+
1063
+ def _kron_dispatcher(a, b):
1064
+ return (a, b)
1065
+
1066
+
1067
+ @array_function_dispatch(_kron_dispatcher)
1068
+ def kron(a, b):
1069
+ """
1070
+ Kronecker product of two arrays.
1071
+
1072
+ Computes the Kronecker product, a composite array made of blocks of the
1073
+ second array scaled by the first.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ a, b : array_like
1078
+
1079
+ Returns
1080
+ -------
1081
+ out : ndarray
1082
+
1083
+ See Also
1084
+ --------
1085
+ outer : The outer product
1086
+
1087
+ Notes
1088
+ -----
1089
+ The function assumes that the number of dimensions of `a` and `b`
1090
+ are the same, if necessary prepending the smallest with ones.
1091
+ If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
1092
+ the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
1093
+ The elements are products of elements from `a` and `b`, organized
1094
+ explicitly by::
1095
+
1096
+ kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
1097
+
1098
+ where::
1099
+
1100
+ kt = it * st + jt, t = 0,...,N
1101
+
1102
+ In the common 2-D case (N=1), the block structure can be visualized::
1103
+
1104
+ [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
1105
+ [ ... ... ],
1106
+ [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
1107
+
1108
+
1109
+ Examples
1110
+ --------
1111
+ >>> np.kron([1,10,100], [5,6,7])
1112
+ array([ 5, 6, 7, ..., 500, 600, 700])
1113
+ >>> np.kron([5,6,7], [1,10,100])
1114
+ array([ 5, 50, 500, ..., 7, 70, 700])
1115
+
1116
+ >>> np.kron(np.eye(2), np.ones((2,2)))
1117
+ array([[1., 1., 0., 0.],
1118
+ [1., 1., 0., 0.],
1119
+ [0., 0., 1., 1.],
1120
+ [0., 0., 1., 1.]])
1121
+
1122
+ >>> a = np.arange(100).reshape((2,5,2,5))
1123
+ >>> b = np.arange(24).reshape((2,3,4))
1124
+ >>> c = np.kron(a,b)
1125
+ >>> c.shape
1126
+ (2, 10, 6, 20)
1127
+ >>> I = (1,3,0,2)
1128
+ >>> J = (0,2,1)
1129
+ >>> J1 = (0,) + J # extend to ndim=4
1130
+ >>> S1 = (1,) + b.shape
1131
+ >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
1132
+ >>> c[K] == a[I]*b[J]
1133
+ True
1134
+
1135
+ """
1136
+ # Working:
1137
+ # 1. Equalise the shapes by prepending smaller array with 1s
1138
+ # 2. Expand shapes of both the arrays by adding new axes at
1139
+ # odd positions for 1st array and even positions for 2nd
1140
+ # 3. Compute the product of the modified array
1141
+ # 4. The inner most array elements now contain the rows of
1142
+ # the Kronecker product
1143
+ # 5. Reshape the result to kron's shape, which is same as
1144
+ # product of shapes of the two arrays.
1145
+ b = asanyarray(b)
1146
+ a = array(a, copy=False, subok=True, ndmin=b.ndim)
1147
+ is_any_mat = isinstance(a, matrix) or isinstance(b, matrix)
1148
+ ndb, nda = b.ndim, a.ndim
1149
+ nd = max(ndb, nda)
1150
+
1151
+ if (nda == 0 or ndb == 0):
1152
+ return _nx.multiply(a, b)
1153
+
1154
+ as_ = a.shape
1155
+ bs = b.shape
1156
+ if not a.flags.contiguous:
1157
+ a = reshape(a, as_)
1158
+ if not b.flags.contiguous:
1159
+ b = reshape(b, bs)
1160
+
1161
+ # Equalise the shapes by prepending smaller one with 1s
1162
+ as_ = (1,)*max(0, ndb-nda) + as_
1163
+ bs = (1,)*max(0, nda-ndb) + bs
1164
+
1165
+ # Insert empty dimensions
1166
+ a_arr = expand_dims(a, axis=tuple(range(ndb-nda)))
1167
+ b_arr = expand_dims(b, axis=tuple(range(nda-ndb)))
1168
+
1169
+ # Compute the product
1170
+ a_arr = expand_dims(a_arr, axis=tuple(range(1, nd*2, 2)))
1171
+ b_arr = expand_dims(b_arr, axis=tuple(range(0, nd*2, 2)))
1172
+ # In case of `mat`, convert result to `array`
1173
+ result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))
1174
+
1175
+ # Reshape back
1176
+ result = result.reshape(_nx.multiply(as_, bs))
1177
+
1178
+ return result if not is_any_mat else matrix(result, copy=False)
1179
+
1180
+
1181
+ def _tile_dispatcher(A, reps):
1182
+ return (A, reps)
1183
+
1184
+
1185
+ @array_function_dispatch(_tile_dispatcher)
1186
+ def tile(A, reps):
1187
+ """
1188
+ Construct an array by repeating A the number of times given by reps.
1189
+
1190
+ If `reps` has length ``d``, the result will have dimension of
1191
+ ``max(d, A.ndim)``.
1192
+
1193
+ If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
1194
+ axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
1195
+ or shape (1, 1, 3) for 3-D replication. If this is not the desired
1196
+ behavior, promote `A` to d-dimensions manually before calling this
1197
+ function.
1198
+
1199
+ If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
1200
+ Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
1201
+ (1, 1, 2, 2).
1202
+
1203
+ Note : Although tile may be used for broadcasting, it is strongly
1204
+ recommended to use numpy's broadcasting operations and functions.
1205
+
1206
+ Parameters
1207
+ ----------
1208
+ A : array_like
1209
+ The input array.
1210
+ reps : array_like
1211
+ The number of repetitions of `A` along each axis.
1212
+
1213
+ Returns
1214
+ -------
1215
+ c : ndarray
1216
+ The tiled output array.
1217
+
1218
+ See Also
1219
+ --------
1220
+ repeat : Repeat elements of an array.
1221
+ broadcast_to : Broadcast an array to a new shape
1222
+
1223
+ Examples
1224
+ --------
1225
+ >>> a = np.array([0, 1, 2])
1226
+ >>> np.tile(a, 2)
1227
+ array([0, 1, 2, 0, 1, 2])
1228
+ >>> np.tile(a, (2, 2))
1229
+ array([[0, 1, 2, 0, 1, 2],
1230
+ [0, 1, 2, 0, 1, 2]])
1231
+ >>> np.tile(a, (2, 1, 2))
1232
+ array([[[0, 1, 2, 0, 1, 2]],
1233
+ [[0, 1, 2, 0, 1, 2]]])
1234
+
1235
+ >>> b = np.array([[1, 2], [3, 4]])
1236
+ >>> np.tile(b, 2)
1237
+ array([[1, 2, 1, 2],
1238
+ [3, 4, 3, 4]])
1239
+ >>> np.tile(b, (2, 1))
1240
+ array([[1, 2],
1241
+ [3, 4],
1242
+ [1, 2],
1243
+ [3, 4]])
1244
+
1245
+ >>> c = np.array([1,2,3,4])
1246
+ >>> np.tile(c,(4,1))
1247
+ array([[1, 2, 3, 4],
1248
+ [1, 2, 3, 4],
1249
+ [1, 2, 3, 4],
1250
+ [1, 2, 3, 4]])
1251
+ """
1252
+ try:
1253
+ tup = tuple(reps)
1254
+ except TypeError:
1255
+ tup = (reps,)
1256
+ d = len(tup)
1257
+ if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
1258
+ # Fixes the problem that the function does not make a copy if A is a
1259
+ # numpy array and the repetitions are 1 in all dimensions
1260
+ return _nx.array(A, copy=True, subok=True, ndmin=d)
1261
+ else:
1262
+ # Note that no copy of zero-sized arrays is made. However since they
1263
+ # have no data there is no risk of an inadvertent overwrite.
1264
+ c = _nx.array(A, copy=False, subok=True, ndmin=d)
1265
+ if (d < c.ndim):
1266
+ tup = (1,)*(c.ndim-d) + tup
1267
+ shape_out = tuple(s*t for s, t in zip(c.shape, tup))
1268
+ n = c.size
1269
+ if n > 0:
1270
+ for dim_in, nrep in zip(c.shape, tup):
1271
+ if nrep != 1:
1272
+ c = c.reshape(-1, n).repeat(nrep, 0)
1273
+ n //= dim_in
1274
+ return c.reshape(shape_out)
.venv/lib/python3.11/site-packages/numpy/lib/shape_base.pyi ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from collections.abc import Callable, Sequence
3
+ from typing import TypeVar, Any, overload, SupportsIndex, Protocol
4
+
5
+ if sys.version_info >= (3, 10):
6
+ from typing import ParamSpec, Concatenate
7
+ else:
8
+ from typing_extensions import ParamSpec, Concatenate
9
+
10
+ from numpy import (
11
+ generic,
12
+ integer,
13
+ ufunc,
14
+ bool_,
15
+ unsignedinteger,
16
+ signedinteger,
17
+ floating,
18
+ complexfloating,
19
+ object_,
20
+ )
21
+
22
+ from numpy._typing import (
23
+ ArrayLike,
24
+ NDArray,
25
+ _ShapeLike,
26
+ _ArrayLike,
27
+ _ArrayLikeBool_co,
28
+ _ArrayLikeUInt_co,
29
+ _ArrayLikeInt_co,
30
+ _ArrayLikeFloat_co,
31
+ _ArrayLikeComplex_co,
32
+ _ArrayLikeObject_co,
33
+ )
34
+
35
+ from numpy.core.shape_base import vstack
36
+
37
+ _P = ParamSpec("_P")
38
+ _SCT = TypeVar("_SCT", bound=generic)
39
+
40
+ # The signatures of `__array_wrap__` and `__array_prepare__` are the same;
41
+ # give them unique names for the sake of clarity
42
+ class _ArrayWrap(Protocol):
43
+ def __call__(
44
+ self,
45
+ array: NDArray[Any],
46
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
47
+ /,
48
+ ) -> Any: ...
49
+
50
+ class _ArrayPrepare(Protocol):
51
+ def __call__(
52
+ self,
53
+ array: NDArray[Any],
54
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
55
+ /,
56
+ ) -> Any: ...
57
+
58
+ class _SupportsArrayWrap(Protocol):
59
+ @property
60
+ def __array_wrap__(self) -> _ArrayWrap: ...
61
+
62
+ class _SupportsArrayPrepare(Protocol):
63
+ @property
64
+ def __array_prepare__(self) -> _ArrayPrepare: ...
65
+
66
+ __all__: list[str]
67
+
68
+ row_stack = vstack
69
+
70
+ def take_along_axis(
71
+ arr: _SCT | NDArray[_SCT],
72
+ indices: NDArray[integer[Any]],
73
+ axis: None | int,
74
+ ) -> NDArray[_SCT]: ...
75
+
76
+ def put_along_axis(
77
+ arr: NDArray[_SCT],
78
+ indices: NDArray[integer[Any]],
79
+ values: ArrayLike,
80
+ axis: None | int,
81
+ ) -> None: ...
82
+
83
+ @overload
84
+ def apply_along_axis(
85
+ func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]],
86
+ axis: SupportsIndex,
87
+ arr: ArrayLike,
88
+ *args: _P.args,
89
+ **kwargs: _P.kwargs,
90
+ ) -> NDArray[_SCT]: ...
91
+ @overload
92
+ def apply_along_axis(
93
+ func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike],
94
+ axis: SupportsIndex,
95
+ arr: ArrayLike,
96
+ *args: _P.args,
97
+ **kwargs: _P.kwargs,
98
+ ) -> NDArray[Any]: ...
99
+
100
+ def apply_over_axes(
101
+ func: Callable[[NDArray[Any], int], NDArray[_SCT]],
102
+ a: ArrayLike,
103
+ axes: int | Sequence[int],
104
+ ) -> NDArray[_SCT]: ...
105
+
106
+ @overload
107
+ def expand_dims(
108
+ a: _ArrayLike[_SCT],
109
+ axis: _ShapeLike,
110
+ ) -> NDArray[_SCT]: ...
111
+ @overload
112
+ def expand_dims(
113
+ a: ArrayLike,
114
+ axis: _ShapeLike,
115
+ ) -> NDArray[Any]: ...
116
+
117
+ @overload
118
+ def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
119
+ @overload
120
+ def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
121
+
122
+ @overload
123
+ def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
124
+ @overload
125
+ def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
126
+
127
+ @overload
128
+ def array_split(
129
+ ary: _ArrayLike[_SCT],
130
+ indices_or_sections: _ShapeLike,
131
+ axis: SupportsIndex = ...,
132
+ ) -> list[NDArray[_SCT]]: ...
133
+ @overload
134
+ def array_split(
135
+ ary: ArrayLike,
136
+ indices_or_sections: _ShapeLike,
137
+ axis: SupportsIndex = ...,
138
+ ) -> list[NDArray[Any]]: ...
139
+
140
+ @overload
141
+ def split(
142
+ ary: _ArrayLike[_SCT],
143
+ indices_or_sections: _ShapeLike,
144
+ axis: SupportsIndex = ...,
145
+ ) -> list[NDArray[_SCT]]: ...
146
+ @overload
147
+ def split(
148
+ ary: ArrayLike,
149
+ indices_or_sections: _ShapeLike,
150
+ axis: SupportsIndex = ...,
151
+ ) -> list[NDArray[Any]]: ...
152
+
153
+ @overload
154
+ def hsplit(
155
+ ary: _ArrayLike[_SCT],
156
+ indices_or_sections: _ShapeLike,
157
+ ) -> list[NDArray[_SCT]]: ...
158
+ @overload
159
+ def hsplit(
160
+ ary: ArrayLike,
161
+ indices_or_sections: _ShapeLike,
162
+ ) -> list[NDArray[Any]]: ...
163
+
164
+ @overload
165
+ def vsplit(
166
+ ary: _ArrayLike[_SCT],
167
+ indices_or_sections: _ShapeLike,
168
+ ) -> list[NDArray[_SCT]]: ...
169
+ @overload
170
+ def vsplit(
171
+ ary: ArrayLike,
172
+ indices_or_sections: _ShapeLike,
173
+ ) -> list[NDArray[Any]]: ...
174
+
175
+ @overload
176
+ def dsplit(
177
+ ary: _ArrayLike[_SCT],
178
+ indices_or_sections: _ShapeLike,
179
+ ) -> list[NDArray[_SCT]]: ...
180
+ @overload
181
+ def dsplit(
182
+ ary: ArrayLike,
183
+ indices_or_sections: _ShapeLike,
184
+ ) -> list[NDArray[Any]]: ...
185
+
186
+ @overload
187
+ def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare: ...
188
+ @overload
189
+ def get_array_prepare(*args: object) -> None | _ArrayPrepare: ...
190
+
191
+ @overload
192
+ def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
193
+ @overload
194
+ def get_array_wrap(*args: object) -> None | _ArrayWrap: ...
195
+
196
+ @overload
197
+ def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
198
+ @overload
199
+ def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
200
+ @overload
201
+ def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
202
+ @overload
203
+ def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
204
+ @overload
205
+ def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
206
+ @overload
207
+ def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
208
+ @overload
209
+ def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
210
+
211
+ @overload
212
+ def tile(
213
+ A: _ArrayLike[_SCT],
214
+ reps: int | Sequence[int],
215
+ ) -> NDArray[_SCT]: ...
216
+ @overload
217
+ def tile(
218
+ A: ArrayLike,
219
+ reps: int | Sequence[int],
220
+ ) -> NDArray[Any]: ...
.venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utilities that manipulate strides to achieve desirable effects.
3
+
4
+ An explanation of strides can be found in the "ndarray.rst" file in the
5
+ NumPy reference guide.
6
+
7
+ """
8
+ import numpy as np
9
+ from numpy.core.numeric import normalize_axis_tuple
10
+ from numpy.core.overrides import array_function_dispatch, set_module
11
+
12
+ __all__ = ['broadcast_to', 'broadcast_arrays', 'broadcast_shapes']
13
+
14
+
15
+ class DummyArray:
16
+ """Dummy object that just exists to hang __array_interface__ dictionaries
17
+ and possibly keep alive a reference to a base array.
18
+ """
19
+
20
+ def __init__(self, interface, base=None):
21
+ self.__array_interface__ = interface
22
+ self.base = base
23
+
24
+
25
+ def _maybe_view_as_subclass(original_array, new_array):
26
+ if type(original_array) is not type(new_array):
27
+ # if input was an ndarray subclass and subclasses were OK,
28
+ # then view the result as that subclass.
29
+ new_array = new_array.view(type=type(original_array))
30
+ # Since we have done something akin to a view from original_array, we
31
+ # should let the subclass finalize (if it has it implemented, i.e., is
32
+ # not None).
33
+ if new_array.__array_finalize__:
34
+ new_array.__array_finalize__(original_array)
35
+ return new_array
36
+
37
+
38
+ def as_strided(x, shape=None, strides=None, subok=False, writeable=True):
39
+ """
40
+ Create a view into the array with the given shape and strides.
41
+
42
+ .. warning:: This function has to be used with extreme care, see notes.
43
+
44
+ Parameters
45
+ ----------
46
+ x : ndarray
47
+ Array to create a new.
48
+ shape : sequence of int, optional
49
+ The shape of the new array. Defaults to ``x.shape``.
50
+ strides : sequence of int, optional
51
+ The strides of the new array. Defaults to ``x.strides``.
52
+ subok : bool, optional
53
+ .. versionadded:: 1.10
54
+
55
+ If True, subclasses are preserved.
56
+ writeable : bool, optional
57
+ .. versionadded:: 1.12
58
+
59
+ If set to False, the returned array will always be readonly.
60
+ Otherwise it will be writable if the original array was. It
61
+ is advisable to set this to False if possible (see Notes).
62
+
63
+ Returns
64
+ -------
65
+ view : ndarray
66
+
67
+ See also
68
+ --------
69
+ broadcast_to : broadcast an array to a given shape.
70
+ reshape : reshape an array.
71
+ lib.stride_tricks.sliding_window_view :
72
+ userfriendly and safe function for the creation of sliding window views.
73
+
74
+ Notes
75
+ -----
76
+ ``as_strided`` creates a view into the array given the exact strides
77
+ and shape. This means it manipulates the internal data structure of
78
+ ndarray and, if done incorrectly, the array elements can point to
79
+ invalid memory and can corrupt results or crash your program.
80
+ It is advisable to always use the original ``x.strides`` when
81
+ calculating new strides to avoid reliance on a contiguous memory
82
+ layout.
83
+
84
+ Furthermore, arrays created with this function often contain self
85
+ overlapping memory, so that two elements are identical.
86
+ Vectorized write operations on such arrays will typically be
87
+ unpredictable. They may even give different results for small, large,
88
+ or transposed arrays.
89
+
90
+ Since writing to these arrays has to be tested and done with great
91
+ care, you may want to use ``writeable=False`` to avoid accidental write
92
+ operations.
93
+
94
+ For these reasons it is advisable to avoid ``as_strided`` when
95
+ possible.
96
+ """
97
+ # first convert input to array, possibly keeping subclass
98
+ x = np.array(x, copy=False, subok=subok)
99
+ interface = dict(x.__array_interface__)
100
+ if shape is not None:
101
+ interface['shape'] = tuple(shape)
102
+ if strides is not None:
103
+ interface['strides'] = tuple(strides)
104
+
105
+ array = np.asarray(DummyArray(interface, base=x))
106
+ # The route via `__interface__` does not preserve structured
107
+ # dtypes. Since dtype should remain unchanged, we set it explicitly.
108
+ array.dtype = x.dtype
109
+
110
+ view = _maybe_view_as_subclass(x, array)
111
+
112
+ if view.flags.writeable and not writeable:
113
+ view.flags.writeable = False
114
+
115
+ return view
116
+
117
+
118
+ def _sliding_window_view_dispatcher(x, window_shape, axis=None, *,
119
+ subok=None, writeable=None):
120
+ return (x,)
121
+
122
+
123
+ @array_function_dispatch(_sliding_window_view_dispatcher)
124
+ def sliding_window_view(x, window_shape, axis=None, *,
125
+ subok=False, writeable=False):
126
+ """
127
+ Create a sliding window view into the array with the given window shape.
128
+
129
+ Also known as rolling or moving window, the window slides across all
130
+ dimensions of the array and extracts subsets of the array at all window
131
+ positions.
132
+
133
+ .. versionadded:: 1.20.0
134
+
135
+ Parameters
136
+ ----------
137
+ x : array_like
138
+ Array to create the sliding window view from.
139
+ window_shape : int or tuple of int
140
+ Size of window over each axis that takes part in the sliding window.
141
+ If `axis` is not present, must have same length as the number of input
142
+ array dimensions. Single integers `i` are treated as if they were the
143
+ tuple `(i,)`.
144
+ axis : int or tuple of int, optional
145
+ Axis or axes along which the sliding window is applied.
146
+ By default, the sliding window is applied to all axes and
147
+ `window_shape[i]` will refer to axis `i` of `x`.
148
+ If `axis` is given as a `tuple of int`, `window_shape[i]` will refer to
149
+ the axis `axis[i]` of `x`.
150
+ Single integers `i` are treated as if they were the tuple `(i,)`.
151
+ subok : bool, optional
152
+ If True, sub-classes will be passed-through, otherwise the returned
153
+ array will be forced to be a base-class array (default).
154
+ writeable : bool, optional
155
+ When true, allow writing to the returned view. The default is false,
156
+ as this should be used with caution: the returned view contains the
157
+ same memory location multiple times, so writing to one location will
158
+ cause others to change.
159
+
160
+ Returns
161
+ -------
162
+ view : ndarray
163
+ Sliding window view of the array. The sliding window dimensions are
164
+ inserted at the end, and the original dimensions are trimmed as
165
+ required by the size of the sliding window.
166
+ That is, ``view.shape = x_shape_trimmed + window_shape``, where
167
+ ``x_shape_trimmed`` is ``x.shape`` with every entry reduced by one less
168
+ than the corresponding window size.
169
+
170
+ See Also
171
+ --------
172
+ lib.stride_tricks.as_strided: A lower-level and less safe routine for
173
+ creating arbitrary views from custom shape and strides.
174
+ broadcast_to: broadcast an array to a given shape.
175
+
176
+ Notes
177
+ -----
178
+ For many applications using a sliding window view can be convenient, but
179
+ potentially very slow. Often specialized solutions exist, for example:
180
+
181
+ - `scipy.signal.fftconvolve`
182
+
183
+ - filtering functions in `scipy.ndimage`
184
+
185
+ - moving window functions provided by
186
+ `bottleneck <https://github.com/pydata/bottleneck>`_.
187
+
188
+ As a rough estimate, a sliding window approach with an input size of `N`
189
+ and a window size of `W` will scale as `O(N*W)` where frequently a special
190
+ algorithm can achieve `O(N)`. That means that the sliding window variant
191
+ for a window size of 100 can be a 100 times slower than a more specialized
192
+ version.
193
+
194
+ Nevertheless, for small window sizes, when no custom algorithm exists, or
195
+ as a prototyping and developing tool, this function can be a good solution.
196
+
197
+ Examples
198
+ --------
199
+ >>> x = np.arange(6)
200
+ >>> x.shape
201
+ (6,)
202
+ >>> v = sliding_window_view(x, 3)
203
+ >>> v.shape
204
+ (4, 3)
205
+ >>> v
206
+ array([[0, 1, 2],
207
+ [1, 2, 3],
208
+ [2, 3, 4],
209
+ [3, 4, 5]])
210
+
211
+ This also works in more dimensions, e.g.
212
+
213
+ >>> i, j = np.ogrid[:3, :4]
214
+ >>> x = 10*i + j
215
+ >>> x.shape
216
+ (3, 4)
217
+ >>> x
218
+ array([[ 0, 1, 2, 3],
219
+ [10, 11, 12, 13],
220
+ [20, 21, 22, 23]])
221
+ >>> shape = (2,2)
222
+ >>> v = sliding_window_view(x, shape)
223
+ >>> v.shape
224
+ (2, 3, 2, 2)
225
+ >>> v
226
+ array([[[[ 0, 1],
227
+ [10, 11]],
228
+ [[ 1, 2],
229
+ [11, 12]],
230
+ [[ 2, 3],
231
+ [12, 13]]],
232
+ [[[10, 11],
233
+ [20, 21]],
234
+ [[11, 12],
235
+ [21, 22]],
236
+ [[12, 13],
237
+ [22, 23]]]])
238
+
239
+ The axis can be specified explicitly:
240
+
241
+ >>> v = sliding_window_view(x, 3, 0)
242
+ >>> v.shape
243
+ (1, 4, 3)
244
+ >>> v
245
+ array([[[ 0, 10, 20],
246
+ [ 1, 11, 21],
247
+ [ 2, 12, 22],
248
+ [ 3, 13, 23]]])
249
+
250
+ The same axis can be used several times. In that case, every use reduces
251
+ the corresponding original dimension:
252
+
253
+ >>> v = sliding_window_view(x, (2, 3), (1, 1))
254
+ >>> v.shape
255
+ (3, 1, 2, 3)
256
+ >>> v
257
+ array([[[[ 0, 1, 2],
258
+ [ 1, 2, 3]]],
259
+ [[[10, 11, 12],
260
+ [11, 12, 13]]],
261
+ [[[20, 21, 22],
262
+ [21, 22, 23]]]])
263
+
264
+ Combining with stepped slicing (`::step`), this can be used to take sliding
265
+ views which skip elements:
266
+
267
+ >>> x = np.arange(7)
268
+ >>> sliding_window_view(x, 5)[:, ::2]
269
+ array([[0, 2, 4],
270
+ [1, 3, 5],
271
+ [2, 4, 6]])
272
+
273
+ or views which move by multiple elements
274
+
275
+ >>> x = np.arange(7)
276
+ >>> sliding_window_view(x, 3)[::2, :]
277
+ array([[0, 1, 2],
278
+ [2, 3, 4],
279
+ [4, 5, 6]])
280
+
281
+ A common application of `sliding_window_view` is the calculation of running
282
+ statistics. The simplest example is the
283
+ `moving average <https://en.wikipedia.org/wiki/Moving_average>`_:
284
+
285
+ >>> x = np.arange(6)
286
+ >>> x.shape
287
+ (6,)
288
+ >>> v = sliding_window_view(x, 3)
289
+ >>> v.shape
290
+ (4, 3)
291
+ >>> v
292
+ array([[0, 1, 2],
293
+ [1, 2, 3],
294
+ [2, 3, 4],
295
+ [3, 4, 5]])
296
+ >>> moving_average = v.mean(axis=-1)
297
+ >>> moving_average
298
+ array([1., 2., 3., 4.])
299
+
300
+ Note that a sliding window approach is often **not** optimal (see Notes).
301
+ """
302
+ window_shape = (tuple(window_shape)
303
+ if np.iterable(window_shape)
304
+ else (window_shape,))
305
+ # first convert input to array, possibly keeping subclass
306
+ x = np.array(x, copy=False, subok=subok)
307
+
308
+ window_shape_array = np.array(window_shape)
309
+ if np.any(window_shape_array < 0):
310
+ raise ValueError('`window_shape` cannot contain negative values')
311
+
312
+ if axis is None:
313
+ axis = tuple(range(x.ndim))
314
+ if len(window_shape) != len(axis):
315
+ raise ValueError(f'Since axis is `None`, must provide '
316
+ f'window_shape for all dimensions of `x`; '
317
+ f'got {len(window_shape)} window_shape elements '
318
+ f'and `x.ndim` is {x.ndim}.')
319
+ else:
320
+ axis = normalize_axis_tuple(axis, x.ndim, allow_duplicate=True)
321
+ if len(window_shape) != len(axis):
322
+ raise ValueError(f'Must provide matching length window_shape and '
323
+ f'axis; got {len(window_shape)} window_shape '
324
+ f'elements and {len(axis)} axes elements.')
325
+
326
+ out_strides = x.strides + tuple(x.strides[ax] for ax in axis)
327
+
328
+ # note: same axis can be windowed repeatedly
329
+ x_shape_trimmed = list(x.shape)
330
+ for ax, dim in zip(axis, window_shape):
331
+ if x_shape_trimmed[ax] < dim:
332
+ raise ValueError(
333
+ 'window shape cannot be larger than input array shape')
334
+ x_shape_trimmed[ax] -= dim - 1
335
+ out_shape = tuple(x_shape_trimmed) + window_shape
336
+ return as_strided(x, strides=out_strides, shape=out_shape,
337
+ subok=subok, writeable=writeable)
338
+
339
+
340
+ def _broadcast_to(array, shape, subok, readonly):
341
+ shape = tuple(shape) if np.iterable(shape) else (shape,)
342
+ array = np.array(array, copy=False, subok=subok)
343
+ if not shape and array.shape:
344
+ raise ValueError('cannot broadcast a non-scalar to a scalar array')
345
+ if any(size < 0 for size in shape):
346
+ raise ValueError('all elements of broadcast shape must be non-'
347
+ 'negative')
348
+ extras = []
349
+ it = np.nditer(
350
+ (array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
351
+ op_flags=['readonly'], itershape=shape, order='C')
352
+ with it:
353
+ # never really has writebackifcopy semantics
354
+ broadcast = it.itviews[0]
355
+ result = _maybe_view_as_subclass(array, broadcast)
356
+ # In a future version this will go away
357
+ if not readonly and array.flags._writeable_no_warn:
358
+ result.flags.writeable = True
359
+ result.flags._warn_on_write = True
360
+ return result
361
+
362
+
363
+ def _broadcast_to_dispatcher(array, shape, subok=None):
364
+ return (array,)
365
+
366
+
367
+ @array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
368
+ def broadcast_to(array, shape, subok=False):
369
+ """Broadcast an array to a new shape.
370
+
371
+ Parameters
372
+ ----------
373
+ array : array_like
374
+ The array to broadcast.
375
+ shape : tuple or int
376
+ The shape of the desired array. A single integer ``i`` is interpreted
377
+ as ``(i,)``.
378
+ subok : bool, optional
379
+ If True, then sub-classes will be passed-through, otherwise
380
+ the returned array will be forced to be a base-class array (default).
381
+
382
+ Returns
383
+ -------
384
+ broadcast : array
385
+ A readonly view on the original array with the given shape. It is
386
+ typically not contiguous. Furthermore, more than one element of a
387
+ broadcasted array may refer to a single memory location.
388
+
389
+ Raises
390
+ ------
391
+ ValueError
392
+ If the array is not compatible with the new shape according to NumPy's
393
+ broadcasting rules.
394
+
395
+ See Also
396
+ --------
397
+ broadcast
398
+ broadcast_arrays
399
+ broadcast_shapes
400
+
401
+ Notes
402
+ -----
403
+ .. versionadded:: 1.10.0
404
+
405
+ Examples
406
+ --------
407
+ >>> x = np.array([1, 2, 3])
408
+ >>> np.broadcast_to(x, (3, 3))
409
+ array([[1, 2, 3],
410
+ [1, 2, 3],
411
+ [1, 2, 3]])
412
+ """
413
+ return _broadcast_to(array, shape, subok=subok, readonly=True)
414
+
415
+
416
+ def _broadcast_shape(*args):
417
+ """Returns the shape of the arrays that would result from broadcasting the
418
+ supplied arrays against each other.
419
+ """
420
+ # use the old-iterator because np.nditer does not handle size 0 arrays
421
+ # consistently
422
+ b = np.broadcast(*args[:32])
423
+ # unfortunately, it cannot handle 32 or more arguments directly
424
+ for pos in range(32, len(args), 31):
425
+ # ironically, np.broadcast does not properly handle np.broadcast
426
+ # objects (it treats them as scalars)
427
+ # use broadcasting to avoid allocating the full array
428
+ b = broadcast_to(0, b.shape)
429
+ b = np.broadcast(b, *args[pos:(pos + 31)])
430
+ return b.shape
431
+
432
+
433
+ @set_module('numpy')
434
+ def broadcast_shapes(*args):
435
+ """
436
+ Broadcast the input shapes into a single shape.
437
+
438
+ :ref:`Learn more about broadcasting here <basics.broadcasting>`.
439
+
440
+ .. versionadded:: 1.20.0
441
+
442
+ Parameters
443
+ ----------
444
+ `*args` : tuples of ints, or ints
445
+ The shapes to be broadcast against each other.
446
+
447
+ Returns
448
+ -------
449
+ tuple
450
+ Broadcasted shape.
451
+
452
+ Raises
453
+ ------
454
+ ValueError
455
+ If the shapes are not compatible and cannot be broadcast according
456
+ to NumPy's broadcasting rules.
457
+
458
+ See Also
459
+ --------
460
+ broadcast
461
+ broadcast_arrays
462
+ broadcast_to
463
+
464
+ Examples
465
+ --------
466
+ >>> np.broadcast_shapes((1, 2), (3, 1), (3, 2))
467
+ (3, 2)
468
+
469
+ >>> np.broadcast_shapes((6, 7), (5, 6, 1), (7,), (5, 1, 7))
470
+ (5, 6, 7)
471
+ """
472
+ arrays = [np.empty(x, dtype=[]) for x in args]
473
+ return _broadcast_shape(*arrays)
474
+
475
+
476
+ def _broadcast_arrays_dispatcher(*args, subok=None):
477
+ return args
478
+
479
+
480
+ @array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
481
+ def broadcast_arrays(*args, subok=False):
482
+ """
483
+ Broadcast any number of arrays against each other.
484
+
485
+ Parameters
486
+ ----------
487
+ `*args` : array_likes
488
+ The arrays to broadcast.
489
+
490
+ subok : bool, optional
491
+ If True, then sub-classes will be passed-through, otherwise
492
+ the returned arrays will be forced to be a base-class array (default).
493
+
494
+ Returns
495
+ -------
496
+ broadcasted : list of arrays
497
+ These arrays are views on the original arrays. They are typically
498
+ not contiguous. Furthermore, more than one element of a
499
+ broadcasted array may refer to a single memory location. If you need
500
+ to write to the arrays, make copies first. While you can set the
501
+ ``writable`` flag True, writing to a single output value may end up
502
+ changing more than one location in the output array.
503
+
504
+ .. deprecated:: 1.17
505
+ The output is currently marked so that if written to, a deprecation
506
+ warning will be emitted. A future version will set the
507
+ ``writable`` flag False so writing to it will raise an error.
508
+
509
+ See Also
510
+ --------
511
+ broadcast
512
+ broadcast_to
513
+ broadcast_shapes
514
+
515
+ Examples
516
+ --------
517
+ >>> x = np.array([[1,2,3]])
518
+ >>> y = np.array([[4],[5]])
519
+ >>> np.broadcast_arrays(x, y)
520
+ [array([[1, 2, 3],
521
+ [1, 2, 3]]), array([[4, 4, 4],
522
+ [5, 5, 5]])]
523
+
524
+ Here is a useful idiom for getting contiguous copies instead of
525
+ non-contiguous views.
526
+
527
+ >>> [np.array(a) for a in np.broadcast_arrays(x, y)]
528
+ [array([[1, 2, 3],
529
+ [1, 2, 3]]), array([[4, 4, 4],
530
+ [5, 5, 5]])]
531
+
532
+ """
533
+ # nditer is not used here to avoid the limit of 32 arrays.
534
+ # Otherwise, something like the following one-liner would suffice:
535
+ # return np.nditer(args, flags=['multi_index', 'zerosize_ok'],
536
+ # order='C').itviews
537
+
538
+ args = [np.array(_m, copy=False, subok=subok) for _m in args]
539
+
540
+ shape = _broadcast_shape(*args)
541
+
542
+ if all(array.shape == shape for array in args):
543
+ # Common case where nothing needs to be broadcasted.
544
+ return args
545
+
546
+ return [_broadcast_to(array, shape, subok=subok, readonly=False)
547
+ for array in args]
.venv/lib/python3.11/site-packages/numpy/lib/stride_tricks.pyi ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Iterable
2
+ from typing import Any, TypeVar, overload, SupportsIndex
3
+
4
+ from numpy import generic
5
+ from numpy._typing import (
6
+ NDArray,
7
+ ArrayLike,
8
+ _ShapeLike,
9
+ _Shape,
10
+ _ArrayLike
11
+ )
12
+
13
+ _SCT = TypeVar("_SCT", bound=generic)
14
+
15
+ __all__: list[str]
16
+
17
+ class DummyArray:
18
+ __array_interface__: dict[str, Any]
19
+ base: None | NDArray[Any]
20
+ def __init__(
21
+ self,
22
+ interface: dict[str, Any],
23
+ base: None | NDArray[Any] = ...,
24
+ ) -> None: ...
25
+
26
+ @overload
27
+ def as_strided(
28
+ x: _ArrayLike[_SCT],
29
+ shape: None | Iterable[int] = ...,
30
+ strides: None | Iterable[int] = ...,
31
+ subok: bool = ...,
32
+ writeable: bool = ...,
33
+ ) -> NDArray[_SCT]: ...
34
+ @overload
35
+ def as_strided(
36
+ x: ArrayLike,
37
+ shape: None | Iterable[int] = ...,
38
+ strides: None | Iterable[int] = ...,
39
+ subok: bool = ...,
40
+ writeable: bool = ...,
41
+ ) -> NDArray[Any]: ...
42
+
43
+ @overload
44
+ def sliding_window_view(
45
+ x: _ArrayLike[_SCT],
46
+ window_shape: int | Iterable[int],
47
+ axis: None | SupportsIndex = ...,
48
+ *,
49
+ subok: bool = ...,
50
+ writeable: bool = ...,
51
+ ) -> NDArray[_SCT]: ...
52
+ @overload
53
+ def sliding_window_view(
54
+ x: ArrayLike,
55
+ window_shape: int | Iterable[int],
56
+ axis: None | SupportsIndex = ...,
57
+ *,
58
+ subok: bool = ...,
59
+ writeable: bool = ...,
60
+ ) -> NDArray[Any]: ...
61
+
62
+ @overload
63
+ def broadcast_to(
64
+ array: _ArrayLike[_SCT],
65
+ shape: int | Iterable[int],
66
+ subok: bool = ...,
67
+ ) -> NDArray[_SCT]: ...
68
+ @overload
69
+ def broadcast_to(
70
+ array: ArrayLike,
71
+ shape: int | Iterable[int],
72
+ subok: bool = ...,
73
+ ) -> NDArray[Any]: ...
74
+
75
+ def broadcast_shapes(*args: _ShapeLike) -> _Shape: ...
76
+
77
+ def broadcast_arrays(
78
+ *args: ArrayLike,
79
+ subok: bool = ...,
80
+ ) -> list[NDArray[Any]]: ...
.venv/lib/python3.11/site-packages/numpy/lib/twodim_base.py ADDED
@@ -0,0 +1,1183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Basic functions for manipulating 2d arrays
2
+
3
+ """
4
+ import functools
5
+ import operator
6
+
7
+ from numpy.core.numeric import (
8
+ asanyarray, arange, zeros, greater_equal, multiply, ones,
9
+ asarray, where, int8, int16, int32, int64, intp, empty, promote_types,
10
+ diagonal, nonzero, indices
11
+ )
12
+ from numpy.core.overrides import set_array_function_like_doc, set_module
13
+ from numpy.core import overrides
14
+ from numpy.core import iinfo
15
+ from numpy.lib.stride_tricks import broadcast_to
16
+
17
+
18
+ __all__ = [
19
+ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
20
+ 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
21
+ 'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
22
+
23
+
24
+ array_function_dispatch = functools.partial(
25
+ overrides.array_function_dispatch, module='numpy')
26
+
27
+
28
+ i1 = iinfo(int8)
29
+ i2 = iinfo(int16)
30
+ i4 = iinfo(int32)
31
+
32
+
33
+ def _min_int(low, high):
34
+ """ get small int that fits the range """
35
+ if high <= i1.max and low >= i1.min:
36
+ return int8
37
+ if high <= i2.max and low >= i2.min:
38
+ return int16
39
+ if high <= i4.max and low >= i4.min:
40
+ return int32
41
+ return int64
42
+
43
+
44
+ def _flip_dispatcher(m):
45
+ return (m,)
46
+
47
+
48
+ @array_function_dispatch(_flip_dispatcher)
49
+ def fliplr(m):
50
+ """
51
+ Reverse the order of elements along axis 1 (left/right).
52
+
53
+ For a 2-D array, this flips the entries in each row in the left/right
54
+ direction. Columns are preserved, but appear in a different order than
55
+ before.
56
+
57
+ Parameters
58
+ ----------
59
+ m : array_like
60
+ Input array, must be at least 2-D.
61
+
62
+ Returns
63
+ -------
64
+ f : ndarray
65
+ A view of `m` with the columns reversed. Since a view
66
+ is returned, this operation is :math:`\\mathcal O(1)`.
67
+
68
+ See Also
69
+ --------
70
+ flipud : Flip array in the up/down direction.
71
+ flip : Flip array in one or more dimensions.
72
+ rot90 : Rotate array counterclockwise.
73
+
74
+ Notes
75
+ -----
76
+ Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``.
77
+ Requires the array to be at least 2-D.
78
+
79
+ Examples
80
+ --------
81
+ >>> A = np.diag([1.,2.,3.])
82
+ >>> A
83
+ array([[1., 0., 0.],
84
+ [0., 2., 0.],
85
+ [0., 0., 3.]])
86
+ >>> np.fliplr(A)
87
+ array([[0., 0., 1.],
88
+ [0., 2., 0.],
89
+ [3., 0., 0.]])
90
+
91
+ >>> A = np.random.randn(2,3,5)
92
+ >>> np.all(np.fliplr(A) == A[:,::-1,...])
93
+ True
94
+
95
+ """
96
+ m = asanyarray(m)
97
+ if m.ndim < 2:
98
+ raise ValueError("Input must be >= 2-d.")
99
+ return m[:, ::-1]
100
+
101
+
102
+ @array_function_dispatch(_flip_dispatcher)
103
+ def flipud(m):
104
+ """
105
+ Reverse the order of elements along axis 0 (up/down).
106
+
107
+ For a 2-D array, this flips the entries in each column in the up/down
108
+ direction. Rows are preserved, but appear in a different order than before.
109
+
110
+ Parameters
111
+ ----------
112
+ m : array_like
113
+ Input array.
114
+
115
+ Returns
116
+ -------
117
+ out : array_like
118
+ A view of `m` with the rows reversed. Since a view is
119
+ returned, this operation is :math:`\\mathcal O(1)`.
120
+
121
+ See Also
122
+ --------
123
+ fliplr : Flip array in the left/right direction.
124
+ flip : Flip array in one or more dimensions.
125
+ rot90 : Rotate array counterclockwise.
126
+
127
+ Notes
128
+ -----
129
+ Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``.
130
+ Requires the array to be at least 1-D.
131
+
132
+ Examples
133
+ --------
134
+ >>> A = np.diag([1.0, 2, 3])
135
+ >>> A
136
+ array([[1., 0., 0.],
137
+ [0., 2., 0.],
138
+ [0., 0., 3.]])
139
+ >>> np.flipud(A)
140
+ array([[0., 0., 3.],
141
+ [0., 2., 0.],
142
+ [1., 0., 0.]])
143
+
144
+ >>> A = np.random.randn(2,3,5)
145
+ >>> np.all(np.flipud(A) == A[::-1,...])
146
+ True
147
+
148
+ >>> np.flipud([1,2])
149
+ array([2, 1])
150
+
151
+ """
152
+ m = asanyarray(m)
153
+ if m.ndim < 1:
154
+ raise ValueError("Input must be >= 1-d.")
155
+ return m[::-1, ...]
156
+
157
+
158
+ @set_array_function_like_doc
159
+ @set_module('numpy')
160
+ def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
161
+ """
162
+ Return a 2-D array with ones on the diagonal and zeros elsewhere.
163
+
164
+ Parameters
165
+ ----------
166
+ N : int
167
+ Number of rows in the output.
168
+ M : int, optional
169
+ Number of columns in the output. If None, defaults to `N`.
170
+ k : int, optional
171
+ Index of the diagonal: 0 (the default) refers to the main diagonal,
172
+ a positive value refers to an upper diagonal, and a negative value
173
+ to a lower diagonal.
174
+ dtype : data-type, optional
175
+ Data-type of the returned array.
176
+ order : {'C', 'F'}, optional
177
+ Whether the output should be stored in row-major (C-style) or
178
+ column-major (Fortran-style) order in memory.
179
+
180
+ .. versionadded:: 1.14.0
181
+ ${ARRAY_FUNCTION_LIKE}
182
+
183
+ .. versionadded:: 1.20.0
184
+
185
+ Returns
186
+ -------
187
+ I : ndarray of shape (N,M)
188
+ An array where all elements are equal to zero, except for the `k`-th
189
+ diagonal, whose values are equal to one.
190
+
191
+ See Also
192
+ --------
193
+ identity : (almost) equivalent function
194
+ diag : diagonal 2-D array from a 1-D array specified by the user.
195
+
196
+ Examples
197
+ --------
198
+ >>> np.eye(2, dtype=int)
199
+ array([[1, 0],
200
+ [0, 1]])
201
+ >>> np.eye(3, k=1)
202
+ array([[0., 1., 0.],
203
+ [0., 0., 1.],
204
+ [0., 0., 0.]])
205
+
206
+ """
207
+ if like is not None:
208
+ return _eye_with_like(like, N, M=M, k=k, dtype=dtype, order=order)
209
+ if M is None:
210
+ M = N
211
+ m = zeros((N, M), dtype=dtype, order=order)
212
+ if k >= M:
213
+ return m
214
+ # Ensure M and k are integers, so we don't get any surprise casting
215
+ # results in the expressions `M-k` and `M+1` used below. This avoids
216
+ # a problem with inputs with type (for example) np.uint64.
217
+ M = operator.index(M)
218
+ k = operator.index(k)
219
+ if k >= 0:
220
+ i = k
221
+ else:
222
+ i = (-k) * M
223
+ m[:M-k].flat[i::M+1] = 1
224
+ return m
225
+
226
+
227
+ _eye_with_like = array_function_dispatch()(eye)
228
+
229
+
230
+ def _diag_dispatcher(v, k=None):
231
+ return (v,)
232
+
233
+
234
+ @array_function_dispatch(_diag_dispatcher)
235
+ def diag(v, k=0):
236
+ """
237
+ Extract a diagonal or construct a diagonal array.
238
+
239
+ See the more detailed documentation for ``numpy.diagonal`` if you use this
240
+ function to extract a diagonal and wish to write to the resulting array;
241
+ whether it returns a copy or a view depends on what version of numpy you
242
+ are using.
243
+
244
+ Parameters
245
+ ----------
246
+ v : array_like
247
+ If `v` is a 2-D array, return a copy of its `k`-th diagonal.
248
+ If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
249
+ diagonal.
250
+ k : int, optional
251
+ Diagonal in question. The default is 0. Use `k>0` for diagonals
252
+ above the main diagonal, and `k<0` for diagonals below the main
253
+ diagonal.
254
+
255
+ Returns
256
+ -------
257
+ out : ndarray
258
+ The extracted diagonal or constructed diagonal array.
259
+
260
+ See Also
261
+ --------
262
+ diagonal : Return specified diagonals.
263
+ diagflat : Create a 2-D array with the flattened input as a diagonal.
264
+ trace : Sum along diagonals.
265
+ triu : Upper triangle of an array.
266
+ tril : Lower triangle of an array.
267
+
268
+ Examples
269
+ --------
270
+ >>> x = np.arange(9).reshape((3,3))
271
+ >>> x
272
+ array([[0, 1, 2],
273
+ [3, 4, 5],
274
+ [6, 7, 8]])
275
+
276
+ >>> np.diag(x)
277
+ array([0, 4, 8])
278
+ >>> np.diag(x, k=1)
279
+ array([1, 5])
280
+ >>> np.diag(x, k=-1)
281
+ array([3, 7])
282
+
283
+ >>> np.diag(np.diag(x))
284
+ array([[0, 0, 0],
285
+ [0, 4, 0],
286
+ [0, 0, 8]])
287
+
288
+ """
289
+ v = asanyarray(v)
290
+ s = v.shape
291
+ if len(s) == 1:
292
+ n = s[0]+abs(k)
293
+ res = zeros((n, n), v.dtype)
294
+ if k >= 0:
295
+ i = k
296
+ else:
297
+ i = (-k) * n
298
+ res[:n-k].flat[i::n+1] = v
299
+ return res
300
+ elif len(s) == 2:
301
+ return diagonal(v, k)
302
+ else:
303
+ raise ValueError("Input must be 1- or 2-d.")
304
+
305
+
306
+ @array_function_dispatch(_diag_dispatcher)
307
+ def diagflat(v, k=0):
308
+ """
309
+ Create a two-dimensional array with the flattened input as a diagonal.
310
+
311
+ Parameters
312
+ ----------
313
+ v : array_like
314
+ Input data, which is flattened and set as the `k`-th
315
+ diagonal of the output.
316
+ k : int, optional
317
+ Diagonal to set; 0, the default, corresponds to the "main" diagonal,
318
+ a positive (negative) `k` giving the number of the diagonal above
319
+ (below) the main.
320
+
321
+ Returns
322
+ -------
323
+ out : ndarray
324
+ The 2-D output array.
325
+
326
+ See Also
327
+ --------
328
+ diag : MATLAB work-alike for 1-D and 2-D arrays.
329
+ diagonal : Return specified diagonals.
330
+ trace : Sum along diagonals.
331
+
332
+ Examples
333
+ --------
334
+ >>> np.diagflat([[1,2], [3,4]])
335
+ array([[1, 0, 0, 0],
336
+ [0, 2, 0, 0],
337
+ [0, 0, 3, 0],
338
+ [0, 0, 0, 4]])
339
+
340
+ >>> np.diagflat([1,2], 1)
341
+ array([[0, 1, 0],
342
+ [0, 0, 2],
343
+ [0, 0, 0]])
344
+
345
+ """
346
+ try:
347
+ wrap = v.__array_wrap__
348
+ except AttributeError:
349
+ wrap = None
350
+ v = asarray(v).ravel()
351
+ s = len(v)
352
+ n = s + abs(k)
353
+ res = zeros((n, n), v.dtype)
354
+ if (k >= 0):
355
+ i = arange(0, n-k, dtype=intp)
356
+ fi = i+k+i*n
357
+ else:
358
+ i = arange(0, n+k, dtype=intp)
359
+ fi = i+(i-k)*n
360
+ res.flat[fi] = v
361
+ if not wrap:
362
+ return res
363
+ return wrap(res)
364
+
365
+
366
+ @set_array_function_like_doc
367
+ @set_module('numpy')
368
+ def tri(N, M=None, k=0, dtype=float, *, like=None):
369
+ """
370
+ An array with ones at and below the given diagonal and zeros elsewhere.
371
+
372
+ Parameters
373
+ ----------
374
+ N : int
375
+ Number of rows in the array.
376
+ M : int, optional
377
+ Number of columns in the array.
378
+ By default, `M` is taken equal to `N`.
379
+ k : int, optional
380
+ The sub-diagonal at and below which the array is filled.
381
+ `k` = 0 is the main diagonal, while `k` < 0 is below it,
382
+ and `k` > 0 is above. The default is 0.
383
+ dtype : dtype, optional
384
+ Data type of the returned array. The default is float.
385
+ ${ARRAY_FUNCTION_LIKE}
386
+
387
+ .. versionadded:: 1.20.0
388
+
389
+ Returns
390
+ -------
391
+ tri : ndarray of shape (N, M)
392
+ Array with its lower triangle filled with ones and zero elsewhere;
393
+ in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
394
+
395
+ Examples
396
+ --------
397
+ >>> np.tri(3, 5, 2, dtype=int)
398
+ array([[1, 1, 1, 0, 0],
399
+ [1, 1, 1, 1, 0],
400
+ [1, 1, 1, 1, 1]])
401
+
402
+ >>> np.tri(3, 5, -1)
403
+ array([[0., 0., 0., 0., 0.],
404
+ [1., 0., 0., 0., 0.],
405
+ [1., 1., 0., 0., 0.]])
406
+
407
+ """
408
+ if like is not None:
409
+ return _tri_with_like(like, N, M=M, k=k, dtype=dtype)
410
+
411
+ if M is None:
412
+ M = N
413
+
414
+ m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
415
+ arange(-k, M-k, dtype=_min_int(-k, M - k)))
416
+
417
+ # Avoid making a copy if the requested type is already bool
418
+ m = m.astype(dtype, copy=False)
419
+
420
+ return m
421
+
422
+
423
+ _tri_with_like = array_function_dispatch()(tri)
424
+
425
+
426
+ def _trilu_dispatcher(m, k=None):
427
+ return (m,)
428
+
429
+
430
+ @array_function_dispatch(_trilu_dispatcher)
431
+ def tril(m, k=0):
432
+ """
433
+ Lower triangle of an array.
434
+
435
+ Return a copy of an array with elements above the `k`-th diagonal zeroed.
436
+ For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two
437
+ axes.
438
+
439
+ Parameters
440
+ ----------
441
+ m : array_like, shape (..., M, N)
442
+ Input array.
443
+ k : int, optional
444
+ Diagonal above which to zero elements. `k = 0` (the default) is the
445
+ main diagonal, `k < 0` is below it and `k > 0` is above.
446
+
447
+ Returns
448
+ -------
449
+ tril : ndarray, shape (..., M, N)
450
+ Lower triangle of `m`, of same shape and data-type as `m`.
451
+
452
+ See Also
453
+ --------
454
+ triu : same thing, only for the upper triangle
455
+
456
+ Examples
457
+ --------
458
+ >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
459
+ array([[ 0, 0, 0],
460
+ [ 4, 0, 0],
461
+ [ 7, 8, 0],
462
+ [10, 11, 12]])
463
+
464
+ >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5))
465
+ array([[[ 0, 0, 0, 0, 0],
466
+ [ 5, 6, 0, 0, 0],
467
+ [10, 11, 12, 0, 0],
468
+ [15, 16, 17, 18, 0]],
469
+ [[20, 0, 0, 0, 0],
470
+ [25, 26, 0, 0, 0],
471
+ [30, 31, 32, 0, 0],
472
+ [35, 36, 37, 38, 0]],
473
+ [[40, 0, 0, 0, 0],
474
+ [45, 46, 0, 0, 0],
475
+ [50, 51, 52, 0, 0],
476
+ [55, 56, 57, 58, 0]]])
477
+
478
+ """
479
+ m = asanyarray(m)
480
+ mask = tri(*m.shape[-2:], k=k, dtype=bool)
481
+
482
+ return where(mask, m, zeros(1, m.dtype))
483
+
484
+
485
+ @array_function_dispatch(_trilu_dispatcher)
486
+ def triu(m, k=0):
487
+ """
488
+ Upper triangle of an array.
489
+
490
+ Return a copy of an array with the elements below the `k`-th diagonal
491
+ zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
492
+ final two axes.
493
+
494
+ Please refer to the documentation for `tril` for further details.
495
+
496
+ See Also
497
+ --------
498
+ tril : lower triangle of an array
499
+
500
+ Examples
501
+ --------
502
+ >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
503
+ array([[ 1, 2, 3],
504
+ [ 4, 5, 6],
505
+ [ 0, 8, 9],
506
+ [ 0, 0, 12]])
507
+
508
+ >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
509
+ array([[[ 0, 1, 2, 3, 4],
510
+ [ 0, 6, 7, 8, 9],
511
+ [ 0, 0, 12, 13, 14],
512
+ [ 0, 0, 0, 18, 19]],
513
+ [[20, 21, 22, 23, 24],
514
+ [ 0, 26, 27, 28, 29],
515
+ [ 0, 0, 32, 33, 34],
516
+ [ 0, 0, 0, 38, 39]],
517
+ [[40, 41, 42, 43, 44],
518
+ [ 0, 46, 47, 48, 49],
519
+ [ 0, 0, 52, 53, 54],
520
+ [ 0, 0, 0, 58, 59]]])
521
+
522
+ """
523
+ m = asanyarray(m)
524
+ mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
525
+
526
+ return where(mask, zeros(1, m.dtype), m)
527
+
528
+
529
+ def _vander_dispatcher(x, N=None, increasing=None):
530
+ return (x,)
531
+
532
+
533
+ # Originally borrowed from John Hunter and matplotlib
534
+ @array_function_dispatch(_vander_dispatcher)
535
+ def vander(x, N=None, increasing=False):
536
+ """
537
+ Generate a Vandermonde matrix.
538
+
539
+ The columns of the output matrix are powers of the input vector. The
540
+ order of the powers is determined by the `increasing` boolean argument.
541
+ Specifically, when `increasing` is False, the `i`-th output column is
542
+ the input vector raised element-wise to the power of ``N - i - 1``. Such
543
+ a matrix with a geometric progression in each row is named for Alexandre-
544
+ Theophile Vandermonde.
545
+
546
+ Parameters
547
+ ----------
548
+ x : array_like
549
+ 1-D input array.
550
+ N : int, optional
551
+ Number of columns in the output. If `N` is not specified, a square
552
+ array is returned (``N = len(x)``).
553
+ increasing : bool, optional
554
+ Order of the powers of the columns. If True, the powers increase
555
+ from left to right, if False (the default) they are reversed.
556
+
557
+ .. versionadded:: 1.9.0
558
+
559
+ Returns
560
+ -------
561
+ out : ndarray
562
+ Vandermonde matrix. If `increasing` is False, the first column is
563
+ ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
564
+ True, the columns are ``x^0, x^1, ..., x^(N-1)``.
565
+
566
+ See Also
567
+ --------
568
+ polynomial.polynomial.polyvander
569
+
570
+ Examples
571
+ --------
572
+ >>> x = np.array([1, 2, 3, 5])
573
+ >>> N = 3
574
+ >>> np.vander(x, N)
575
+ array([[ 1, 1, 1],
576
+ [ 4, 2, 1],
577
+ [ 9, 3, 1],
578
+ [25, 5, 1]])
579
+
580
+ >>> np.column_stack([x**(N-1-i) for i in range(N)])
581
+ array([[ 1, 1, 1],
582
+ [ 4, 2, 1],
583
+ [ 9, 3, 1],
584
+ [25, 5, 1]])
585
+
586
+ >>> x = np.array([1, 2, 3, 5])
587
+ >>> np.vander(x)
588
+ array([[ 1, 1, 1, 1],
589
+ [ 8, 4, 2, 1],
590
+ [ 27, 9, 3, 1],
591
+ [125, 25, 5, 1]])
592
+ >>> np.vander(x, increasing=True)
593
+ array([[ 1, 1, 1, 1],
594
+ [ 1, 2, 4, 8],
595
+ [ 1, 3, 9, 27],
596
+ [ 1, 5, 25, 125]])
597
+
598
+ The determinant of a square Vandermonde matrix is the product
599
+ of the differences between the values of the input vector:
600
+
601
+ >>> np.linalg.det(np.vander(x))
602
+ 48.000000000000043 # may vary
603
+ >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
604
+ 48
605
+
606
+ """
607
+ x = asarray(x)
608
+ if x.ndim != 1:
609
+ raise ValueError("x must be a one-dimensional array or sequence.")
610
+ if N is None:
611
+ N = len(x)
612
+
613
+ v = empty((len(x), N), dtype=promote_types(x.dtype, int))
614
+ tmp = v[:, ::-1] if not increasing else v
615
+
616
+ if N > 0:
617
+ tmp[:, 0] = 1
618
+ if N > 1:
619
+ tmp[:, 1:] = x[:, None]
620
+ multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
621
+
622
+ return v
623
+
624
+
625
+ def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None,
626
+ weights=None):
627
+ yield x
628
+ yield y
629
+
630
+ # This terrible logic is adapted from the checks in histogram2d
631
+ try:
632
+ N = len(bins)
633
+ except TypeError:
634
+ N = 1
635
+ if N == 2:
636
+ yield from bins # bins=[x, y]
637
+ else:
638
+ yield bins
639
+
640
+ yield weights
641
+
642
+
643
+ @array_function_dispatch(_histogram2d_dispatcher)
644
+ def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
645
+ """
646
+ Compute the bi-dimensional histogram of two data samples.
647
+
648
+ Parameters
649
+ ----------
650
+ x : array_like, shape (N,)
651
+ An array containing the x coordinates of the points to be
652
+ histogrammed.
653
+ y : array_like, shape (N,)
654
+ An array containing the y coordinates of the points to be
655
+ histogrammed.
656
+ bins : int or array_like or [int, int] or [array, array], optional
657
+ The bin specification:
658
+
659
+ * If int, the number of bins for the two dimensions (nx=ny=bins).
660
+ * If array_like, the bin edges for the two dimensions
661
+ (x_edges=y_edges=bins).
662
+ * If [int, int], the number of bins in each dimension
663
+ (nx, ny = bins).
664
+ * If [array, array], the bin edges in each dimension
665
+ (x_edges, y_edges = bins).
666
+ * A combination [int, array] or [array, int], where int
667
+ is the number of bins and array is the bin edges.
668
+
669
+ range : array_like, shape(2,2), optional
670
+ The leftmost and rightmost edges of the bins along each dimension
671
+ (if not specified explicitly in the `bins` parameters):
672
+ ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
673
+ will be considered outliers and not tallied in the histogram.
674
+ density : bool, optional
675
+ If False, the default, returns the number of samples in each bin.
676
+ If True, returns the probability *density* function at the bin,
677
+ ``bin_count / sample_count / bin_area``.
678
+ weights : array_like, shape(N,), optional
679
+ An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
680
+ Weights are normalized to 1 if `density` is True. If `density` is
681
+ False, the values of the returned histogram are equal to the sum of
682
+ the weights belonging to the samples falling into each bin.
683
+
684
+ Returns
685
+ -------
686
+ H : ndarray, shape(nx, ny)
687
+ The bi-dimensional histogram of samples `x` and `y`. Values in `x`
688
+ are histogrammed along the first dimension and values in `y` are
689
+ histogrammed along the second dimension.
690
+ xedges : ndarray, shape(nx+1,)
691
+ The bin edges along the first dimension.
692
+ yedges : ndarray, shape(ny+1,)
693
+ The bin edges along the second dimension.
694
+
695
+ See Also
696
+ --------
697
+ histogram : 1D histogram
698
+ histogramdd : Multidimensional histogram
699
+
700
+ Notes
701
+ -----
702
+ When `density` is True, then the returned histogram is the sample
703
+ density, defined such that the sum over bins of the product
704
+ ``bin_value * bin_area`` is 1.
705
+
706
+ Please note that the histogram does not follow the Cartesian convention
707
+ where `x` values are on the abscissa and `y` values on the ordinate
708
+ axis. Rather, `x` is histogrammed along the first dimension of the
709
+ array (vertical), and `y` along the second dimension of the array
710
+ (horizontal). This ensures compatibility with `histogramdd`.
711
+
712
+ Examples
713
+ --------
714
+ >>> from matplotlib.image import NonUniformImage
715
+ >>> import matplotlib.pyplot as plt
716
+
717
+ Construct a 2-D histogram with variable bin width. First define the bin
718
+ edges:
719
+
720
+ >>> xedges = [0, 1, 3, 5]
721
+ >>> yedges = [0, 2, 3, 4, 6]
722
+
723
+ Next we create a histogram H with random bin content:
724
+
725
+ >>> x = np.random.normal(2, 1, 100)
726
+ >>> y = np.random.normal(1, 1, 100)
727
+ >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
728
+ >>> # Histogram does not follow Cartesian convention (see Notes),
729
+ >>> # therefore transpose H for visualization purposes.
730
+ >>> H = H.T
731
+
732
+ :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
733
+
734
+ >>> fig = plt.figure(figsize=(7, 3))
735
+ >>> ax = fig.add_subplot(131, title='imshow: square bins')
736
+ >>> plt.imshow(H, interpolation='nearest', origin='lower',
737
+ ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
738
+ <matplotlib.image.AxesImage object at 0x...>
739
+
740
+ :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
741
+
742
+ >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
743
+ ... aspect='equal')
744
+ >>> X, Y = np.meshgrid(xedges, yedges)
745
+ >>> ax.pcolormesh(X, Y, H)
746
+ <matplotlib.collections.QuadMesh object at 0x...>
747
+
748
+ :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
749
+ display actual bin edges with interpolation:
750
+
751
+ >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
752
+ ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
753
+ >>> im = NonUniformImage(ax, interpolation='bilinear')
754
+ >>> xcenters = (xedges[:-1] + xedges[1:]) / 2
755
+ >>> ycenters = (yedges[:-1] + yedges[1:]) / 2
756
+ >>> im.set_data(xcenters, ycenters, H)
757
+ >>> ax.add_image(im)
758
+ >>> plt.show()
759
+
760
+ It is also possible to construct a 2-D histogram without specifying bin
761
+ edges:
762
+
763
+ >>> # Generate non-symmetric test data
764
+ >>> n = 10000
765
+ >>> x = np.linspace(1, 100, n)
766
+ >>> y = 2*np.log(x) + np.random.rand(n) - 0.5
767
+ >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges
768
+ >>> H, yedges, xedges = np.histogram2d(y, x, bins=20)
769
+
770
+ Now we can plot the histogram using
771
+ :func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a
772
+ :func:`hexbin <matplotlib.pyplot.hexbin>` for comparison.
773
+
774
+ >>> # Plot histogram using pcolormesh
775
+ >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
776
+ >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')
777
+ >>> ax1.plot(x, 2*np.log(x), 'k-')
778
+ >>> ax1.set_xlim(x.min(), x.max())
779
+ >>> ax1.set_ylim(y.min(), y.max())
780
+ >>> ax1.set_xlabel('x')
781
+ >>> ax1.set_ylabel('y')
782
+ >>> ax1.set_title('histogram2d')
783
+ >>> ax1.grid()
784
+
785
+ >>> # Create hexbin plot for comparison
786
+ >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')
787
+ >>> ax2.plot(x, 2*np.log(x), 'k-')
788
+ >>> ax2.set_title('hexbin')
789
+ >>> ax2.set_xlim(x.min(), x.max())
790
+ >>> ax2.set_xlabel('x')
791
+ >>> ax2.grid()
792
+
793
+ >>> plt.show()
794
+ """
795
+ from numpy import histogramdd
796
+
797
+ if len(x) != len(y):
798
+ raise ValueError('x and y must have the same length.')
799
+
800
+ try:
801
+ N = len(bins)
802
+ except TypeError:
803
+ N = 1
804
+
805
+ if N != 1 and N != 2:
806
+ xedges = yedges = asarray(bins)
807
+ bins = [xedges, yedges]
808
+ hist, edges = histogramdd([x, y], bins, range, density, weights)
809
+ return hist, edges[0], edges[1]
810
+
811
+
812
+ @set_module('numpy')
813
+ def mask_indices(n, mask_func, k=0):
814
+ """
815
+ Return the indices to access (n, n) arrays, given a masking function.
816
+
817
+ Assume `mask_func` is a function that, for a square array a of size
818
+ ``(n, n)`` with a possible offset argument `k`, when called as
819
+ ``mask_func(a, k)`` returns a new array with zeros in certain locations
820
+ (functions like `triu` or `tril` do precisely this). Then this function
821
+ returns the indices where the non-zero values would be located.
822
+
823
+ Parameters
824
+ ----------
825
+ n : int
826
+ The returned indices will be valid to access arrays of shape (n, n).
827
+ mask_func : callable
828
+ A function whose call signature is similar to that of `triu`, `tril`.
829
+ That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
830
+ `k` is an optional argument to the function.
831
+ k : scalar
832
+ An optional argument which is passed through to `mask_func`. Functions
833
+ like `triu`, `tril` take a second argument that is interpreted as an
834
+ offset.
835
+
836
+ Returns
837
+ -------
838
+ indices : tuple of arrays.
839
+ The `n` arrays of indices corresponding to the locations where
840
+ ``mask_func(np.ones((n, n)), k)`` is True.
841
+
842
+ See Also
843
+ --------
844
+ triu, tril, triu_indices, tril_indices
845
+
846
+ Notes
847
+ -----
848
+ .. versionadded:: 1.4.0
849
+
850
+ Examples
851
+ --------
852
+ These are the indices that would allow you to access the upper triangular
853
+ part of any 3x3 array:
854
+
855
+ >>> iu = np.mask_indices(3, np.triu)
856
+
857
+ For example, if `a` is a 3x3 array:
858
+
859
+ >>> a = np.arange(9).reshape(3, 3)
860
+ >>> a
861
+ array([[0, 1, 2],
862
+ [3, 4, 5],
863
+ [6, 7, 8]])
864
+ >>> a[iu]
865
+ array([0, 1, 2, 4, 5, 8])
866
+
867
+ An offset can be passed also to the masking function. This gets us the
868
+ indices starting on the first diagonal right of the main one:
869
+
870
+ >>> iu1 = np.mask_indices(3, np.triu, 1)
871
+
872
+ with which we now extract only three elements:
873
+
874
+ >>> a[iu1]
875
+ array([1, 2, 5])
876
+
877
+ """
878
+ m = ones((n, n), int)
879
+ a = mask_func(m, k)
880
+ return nonzero(a != 0)
881
+
882
+
883
+ @set_module('numpy')
884
+ def tril_indices(n, k=0, m=None):
885
+ """
886
+ Return the indices for the lower-triangle of an (n, m) array.
887
+
888
+ Parameters
889
+ ----------
890
+ n : int
891
+ The row dimension of the arrays for which the returned
892
+ indices will be valid.
893
+ k : int, optional
894
+ Diagonal offset (see `tril` for details).
895
+ m : int, optional
896
+ .. versionadded:: 1.9.0
897
+
898
+ The column dimension of the arrays for which the returned
899
+ arrays will be valid.
900
+ By default `m` is taken equal to `n`.
901
+
902
+
903
+ Returns
904
+ -------
905
+ inds : tuple of arrays
906
+ The indices for the triangle. The returned tuple contains two arrays,
907
+ each with the indices along one dimension of the array.
908
+
909
+ See also
910
+ --------
911
+ triu_indices : similar function, for upper-triangular.
912
+ mask_indices : generic function accepting an arbitrary mask function.
913
+ tril, triu
914
+
915
+ Notes
916
+ -----
917
+ .. versionadded:: 1.4.0
918
+
919
+ Examples
920
+ --------
921
+ Compute two different sets of indices to access 4x4 arrays, one for the
922
+ lower triangular part starting at the main diagonal, and one starting two
923
+ diagonals further right:
924
+
925
+ >>> il1 = np.tril_indices(4)
926
+ >>> il2 = np.tril_indices(4, 2)
927
+
928
+ Here is how they can be used with a sample array:
929
+
930
+ >>> a = np.arange(16).reshape(4, 4)
931
+ >>> a
932
+ array([[ 0, 1, 2, 3],
933
+ [ 4, 5, 6, 7],
934
+ [ 8, 9, 10, 11],
935
+ [12, 13, 14, 15]])
936
+
937
+ Both for indexing:
938
+
939
+ >>> a[il1]
940
+ array([ 0, 4, 5, ..., 13, 14, 15])
941
+
942
+ And for assigning values:
943
+
944
+ >>> a[il1] = -1
945
+ >>> a
946
+ array([[-1, 1, 2, 3],
947
+ [-1, -1, 6, 7],
948
+ [-1, -1, -1, 11],
949
+ [-1, -1, -1, -1]])
950
+
951
+ These cover almost the whole array (two diagonals right of the main one):
952
+
953
+ >>> a[il2] = -10
954
+ >>> a
955
+ array([[-10, -10, -10, 3],
956
+ [-10, -10, -10, -10],
957
+ [-10, -10, -10, -10],
958
+ [-10, -10, -10, -10]])
959
+
960
+ """
961
+ tri_ = tri(n, m, k=k, dtype=bool)
962
+
963
+ return tuple(broadcast_to(inds, tri_.shape)[tri_]
964
+ for inds in indices(tri_.shape, sparse=True))
965
+
966
+
967
+ def _trilu_indices_form_dispatcher(arr, k=None):
968
+ return (arr,)
969
+
970
+
971
+ @array_function_dispatch(_trilu_indices_form_dispatcher)
972
+ def tril_indices_from(arr, k=0):
973
+ """
974
+ Return the indices for the lower-triangle of arr.
975
+
976
+ See `tril_indices` for full details.
977
+
978
+ Parameters
979
+ ----------
980
+ arr : array_like
981
+ The indices will be valid for square arrays whose dimensions are
982
+ the same as arr.
983
+ k : int, optional
984
+ Diagonal offset (see `tril` for details).
985
+
986
+ Examples
987
+ --------
988
+
989
+ Create a 4 by 4 array.
990
+
991
+ >>> a = np.arange(16).reshape(4, 4)
992
+ >>> a
993
+ array([[ 0, 1, 2, 3],
994
+ [ 4, 5, 6, 7],
995
+ [ 8, 9, 10, 11],
996
+ [12, 13, 14, 15]])
997
+
998
+ Pass the array to get the indices of the lower triangular elements.
999
+
1000
+ >>> trili = np.tril_indices_from(a)
1001
+ >>> trili
1002
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
1003
+
1004
+ >>> a[trili]
1005
+ array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
1006
+
1007
+ This is syntactic sugar for tril_indices().
1008
+
1009
+ >>> np.tril_indices(a.shape[0])
1010
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
1011
+
1012
+ Use the `k` parameter to return the indices for the lower triangular array
1013
+ up to the k-th diagonal.
1014
+
1015
+ >>> trili1 = np.tril_indices_from(a, k=1)
1016
+ >>> a[trili1]
1017
+ array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15])
1018
+
1019
+ See Also
1020
+ --------
1021
+ tril_indices, tril, triu_indices_from
1022
+
1023
+ Notes
1024
+ -----
1025
+ .. versionadded:: 1.4.0
1026
+
1027
+ """
1028
+ if arr.ndim != 2:
1029
+ raise ValueError("input array must be 2-d")
1030
+ return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
1031
+
1032
+
1033
+ @set_module('numpy')
1034
+ def triu_indices(n, k=0, m=None):
1035
+ """
1036
+ Return the indices for the upper-triangle of an (n, m) array.
1037
+
1038
+ Parameters
1039
+ ----------
1040
+ n : int
1041
+ The size of the arrays for which the returned indices will
1042
+ be valid.
1043
+ k : int, optional
1044
+ Diagonal offset (see `triu` for details).
1045
+ m : int, optional
1046
+ .. versionadded:: 1.9.0
1047
+
1048
+ The column dimension of the arrays for which the returned
1049
+ arrays will be valid.
1050
+ By default `m` is taken equal to `n`.
1051
+
1052
+
1053
+ Returns
1054
+ -------
1055
+ inds : tuple, shape(2) of ndarrays, shape(`n`)
1056
+ The indices for the triangle. The returned tuple contains two arrays,
1057
+ each with the indices along one dimension of the array. Can be used
1058
+ to slice a ndarray of shape(`n`, `n`).
1059
+
1060
+ See also
1061
+ --------
1062
+ tril_indices : similar function, for lower-triangular.
1063
+ mask_indices : generic function accepting an arbitrary mask function.
1064
+ triu, tril
1065
+
1066
+ Notes
1067
+ -----
1068
+ .. versionadded:: 1.4.0
1069
+
1070
+ Examples
1071
+ --------
1072
+ Compute two different sets of indices to access 4x4 arrays, one for the
1073
+ upper triangular part starting at the main diagonal, and one starting two
1074
+ diagonals further right:
1075
+
1076
+ >>> iu1 = np.triu_indices(4)
1077
+ >>> iu2 = np.triu_indices(4, 2)
1078
+
1079
+ Here is how they can be used with a sample array:
1080
+
1081
+ >>> a = np.arange(16).reshape(4, 4)
1082
+ >>> a
1083
+ array([[ 0, 1, 2, 3],
1084
+ [ 4, 5, 6, 7],
1085
+ [ 8, 9, 10, 11],
1086
+ [12, 13, 14, 15]])
1087
+
1088
+ Both for indexing:
1089
+
1090
+ >>> a[iu1]
1091
+ array([ 0, 1, 2, ..., 10, 11, 15])
1092
+
1093
+ And for assigning values:
1094
+
1095
+ >>> a[iu1] = -1
1096
+ >>> a
1097
+ array([[-1, -1, -1, -1],
1098
+ [ 4, -1, -1, -1],
1099
+ [ 8, 9, -1, -1],
1100
+ [12, 13, 14, -1]])
1101
+
1102
+ These cover only a small part of the whole array (two diagonals right
1103
+ of the main one):
1104
+
1105
+ >>> a[iu2] = -10
1106
+ >>> a
1107
+ array([[ -1, -1, -10, -10],
1108
+ [ 4, -1, -1, -10],
1109
+ [ 8, 9, -1, -1],
1110
+ [ 12, 13, 14, -1]])
1111
+
1112
+ """
1113
+ tri_ = ~tri(n, m, k=k - 1, dtype=bool)
1114
+
1115
+ return tuple(broadcast_to(inds, tri_.shape)[tri_]
1116
+ for inds in indices(tri_.shape, sparse=True))
1117
+
1118
+
1119
+ @array_function_dispatch(_trilu_indices_form_dispatcher)
1120
+ def triu_indices_from(arr, k=0):
1121
+ """
1122
+ Return the indices for the upper-triangle of arr.
1123
+
1124
+ See `triu_indices` for full details.
1125
+
1126
+ Parameters
1127
+ ----------
1128
+ arr : ndarray, shape(N, N)
1129
+ The indices will be valid for square arrays.
1130
+ k : int, optional
1131
+ Diagonal offset (see `triu` for details).
1132
+
1133
+ Returns
1134
+ -------
1135
+ triu_indices_from : tuple, shape(2) of ndarray, shape(N)
1136
+ Indices for the upper-triangle of `arr`.
1137
+
1138
+ Examples
1139
+ --------
1140
+
1141
+ Create a 4 by 4 array.
1142
+
1143
+ >>> a = np.arange(16).reshape(4, 4)
1144
+ >>> a
1145
+ array([[ 0, 1, 2, 3],
1146
+ [ 4, 5, 6, 7],
1147
+ [ 8, 9, 10, 11],
1148
+ [12, 13, 14, 15]])
1149
+
1150
+ Pass the array to get the indices of the upper triangular elements.
1151
+
1152
+ >>> triui = np.triu_indices_from(a)
1153
+ >>> triui
1154
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
1155
+
1156
+ >>> a[triui]
1157
+ array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
1158
+
1159
+ This is syntactic sugar for triu_indices().
1160
+
1161
+ >>> np.triu_indices(a.shape[0])
1162
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
1163
+
1164
+ Use the `k` parameter to return the indices for the upper triangular array
1165
+ from the k-th diagonal.
1166
+
1167
+ >>> triuim1 = np.triu_indices_from(a, k=1)
1168
+ >>> a[triuim1]
1169
+ array([ 1, 2, 3, 6, 7, 11])
1170
+
1171
+
1172
+ See Also
1173
+ --------
1174
+ triu_indices, triu, tril_indices_from
1175
+
1176
+ Notes
1177
+ -----
1178
+ .. versionadded:: 1.4.0
1179
+
1180
+ """
1181
+ if arr.ndim != 2:
1182
+ raise ValueError("input array must be 2-d")
1183
+ return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
.venv/lib/python3.11/site-packages/numpy/lib/twodim_base.pyi ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable, Sequence
2
+ from typing import (
3
+ Any,
4
+ overload,
5
+ TypeVar,
6
+ Union,
7
+ )
8
+
9
+ from numpy import (
10
+ generic,
11
+ number,
12
+ bool_,
13
+ timedelta64,
14
+ datetime64,
15
+ int_,
16
+ intp,
17
+ float64,
18
+ signedinteger,
19
+ floating,
20
+ complexfloating,
21
+ object_,
22
+ _OrderCF,
23
+ )
24
+
25
+ from numpy._typing import (
26
+ DTypeLike,
27
+ _DTypeLike,
28
+ ArrayLike,
29
+ _ArrayLike,
30
+ NDArray,
31
+ _SupportsArrayFunc,
32
+ _ArrayLikeInt_co,
33
+ _ArrayLikeFloat_co,
34
+ _ArrayLikeComplex_co,
35
+ _ArrayLikeObject_co,
36
+ )
37
+
38
+ _T = TypeVar("_T")
39
+ _SCT = TypeVar("_SCT", bound=generic)
40
+
41
+ # The returned arrays dtype must be compatible with `np.equal`
42
+ _MaskFunc = Callable[
43
+ [NDArray[int_], _T],
44
+ NDArray[Union[number[Any], bool_, timedelta64, datetime64, object_]],
45
+ ]
46
+
47
+ __all__: list[str]
48
+
49
+ @overload
50
+ def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
51
+ @overload
52
+ def fliplr(m: ArrayLike) -> NDArray[Any]: ...
53
+
54
+ @overload
55
+ def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
56
+ @overload
57
+ def flipud(m: ArrayLike) -> NDArray[Any]: ...
58
+
59
+ @overload
60
+ def eye(
61
+ N: int,
62
+ M: None | int = ...,
63
+ k: int = ...,
64
+ dtype: None = ...,
65
+ order: _OrderCF = ...,
66
+ *,
67
+ like: None | _SupportsArrayFunc = ...,
68
+ ) -> NDArray[float64]: ...
69
+ @overload
70
+ def eye(
71
+ N: int,
72
+ M: None | int = ...,
73
+ k: int = ...,
74
+ dtype: _DTypeLike[_SCT] = ...,
75
+ order: _OrderCF = ...,
76
+ *,
77
+ like: None | _SupportsArrayFunc = ...,
78
+ ) -> NDArray[_SCT]: ...
79
+ @overload
80
+ def eye(
81
+ N: int,
82
+ M: None | int = ...,
83
+ k: int = ...,
84
+ dtype: DTypeLike = ...,
85
+ order: _OrderCF = ...,
86
+ *,
87
+ like: None | _SupportsArrayFunc = ...,
88
+ ) -> NDArray[Any]: ...
89
+
90
+ @overload
91
+ def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
92
+ @overload
93
+ def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
94
+
95
+ @overload
96
+ def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
97
+ @overload
98
+ def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
99
+
100
+ @overload
101
+ def tri(
102
+ N: int,
103
+ M: None | int = ...,
104
+ k: int = ...,
105
+ dtype: None = ...,
106
+ *,
107
+ like: None | _SupportsArrayFunc = ...
108
+ ) -> NDArray[float64]: ...
109
+ @overload
110
+ def tri(
111
+ N: int,
112
+ M: None | int = ...,
113
+ k: int = ...,
114
+ dtype: _DTypeLike[_SCT] = ...,
115
+ *,
116
+ like: None | _SupportsArrayFunc = ...
117
+ ) -> NDArray[_SCT]: ...
118
+ @overload
119
+ def tri(
120
+ N: int,
121
+ M: None | int = ...,
122
+ k: int = ...,
123
+ dtype: DTypeLike = ...,
124
+ *,
125
+ like: None | _SupportsArrayFunc = ...
126
+ ) -> NDArray[Any]: ...
127
+
128
+ @overload
129
+ def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
130
+ @overload
131
+ def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
132
+
133
+ @overload
134
+ def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
135
+ @overload
136
+ def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
137
+
138
+ @overload
139
+ def vander( # type: ignore[misc]
140
+ x: _ArrayLikeInt_co,
141
+ N: None | int = ...,
142
+ increasing: bool = ...,
143
+ ) -> NDArray[signedinteger[Any]]: ...
144
+ @overload
145
+ def vander( # type: ignore[misc]
146
+ x: _ArrayLikeFloat_co,
147
+ N: None | int = ...,
148
+ increasing: bool = ...,
149
+ ) -> NDArray[floating[Any]]: ...
150
+ @overload
151
+ def vander(
152
+ x: _ArrayLikeComplex_co,
153
+ N: None | int = ...,
154
+ increasing: bool = ...,
155
+ ) -> NDArray[complexfloating[Any, Any]]: ...
156
+ @overload
157
+ def vander(
158
+ x: _ArrayLikeObject_co,
159
+ N: None | int = ...,
160
+ increasing: bool = ...,
161
+ ) -> NDArray[object_]: ...
162
+
163
+ @overload
164
+ def histogram2d( # type: ignore[misc]
165
+ x: _ArrayLikeFloat_co,
166
+ y: _ArrayLikeFloat_co,
167
+ bins: int | Sequence[int] = ...,
168
+ range: None | _ArrayLikeFloat_co = ...,
169
+ density: None | bool = ...,
170
+ weights: None | _ArrayLikeFloat_co = ...,
171
+ ) -> tuple[
172
+ NDArray[float64],
173
+ NDArray[floating[Any]],
174
+ NDArray[floating[Any]],
175
+ ]: ...
176
+ @overload
177
+ def histogram2d(
178
+ x: _ArrayLikeComplex_co,
179
+ y: _ArrayLikeComplex_co,
180
+ bins: int | Sequence[int] = ...,
181
+ range: None | _ArrayLikeFloat_co = ...,
182
+ density: None | bool = ...,
183
+ weights: None | _ArrayLikeFloat_co = ...,
184
+ ) -> tuple[
185
+ NDArray[float64],
186
+ NDArray[complexfloating[Any, Any]],
187
+ NDArray[complexfloating[Any, Any]],
188
+ ]: ...
189
+ @overload # TODO: Sort out `bins`
190
+ def histogram2d(
191
+ x: _ArrayLikeComplex_co,
192
+ y: _ArrayLikeComplex_co,
193
+ bins: Sequence[_ArrayLikeInt_co],
194
+ range: None | _ArrayLikeFloat_co = ...,
195
+ density: None | bool = ...,
196
+ weights: None | _ArrayLikeFloat_co = ...,
197
+ ) -> tuple[
198
+ NDArray[float64],
199
+ NDArray[Any],
200
+ NDArray[Any],
201
+ ]: ...
202
+
203
+ # NOTE: we're assuming/demanding here the `mask_func` returns
204
+ # an ndarray of shape `(n, n)`; otherwise there is the possibility
205
+ # of the output tuple having more or less than 2 elements
206
+ @overload
207
+ def mask_indices(
208
+ n: int,
209
+ mask_func: _MaskFunc[int],
210
+ k: int = ...,
211
+ ) -> tuple[NDArray[intp], NDArray[intp]]: ...
212
+ @overload
213
+ def mask_indices(
214
+ n: int,
215
+ mask_func: _MaskFunc[_T],
216
+ k: _T,
217
+ ) -> tuple[NDArray[intp], NDArray[intp]]: ...
218
+
219
+ def tril_indices(
220
+ n: int,
221
+ k: int = ...,
222
+ m: None | int = ...,
223
+ ) -> tuple[NDArray[int_], NDArray[int_]]: ...
224
+
225
+ def tril_indices_from(
226
+ arr: NDArray[Any],
227
+ k: int = ...,
228
+ ) -> tuple[NDArray[int_], NDArray[int_]]: ...
229
+
230
+ def triu_indices(
231
+ n: int,
232
+ k: int = ...,
233
+ m: None | int = ...,
234
+ ) -> tuple[NDArray[int_], NDArray[int_]]: ...
235
+
236
+ def triu_indices_from(
237
+ arr: NDArray[Any],
238
+ k: int = ...,
239
+ ) -> tuple[NDArray[int_], NDArray[int_]]: ...
.venv/lib/python3.11/site-packages/numpy/lib/type_check.py ADDED
@@ -0,0 +1,735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Automatically adapted for numpy Sep 19, 2005 by convertcode.py
2
+
3
+ """
4
+ import functools
5
+
6
+ __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
7
+ 'isreal', 'nan_to_num', 'real', 'real_if_close',
8
+ 'typename', 'asfarray', 'mintypecode',
9
+ 'common_type']
10
+
11
+ from .._utils import set_module
12
+ import numpy.core.numeric as _nx
13
+ from numpy.core.numeric import asarray, asanyarray, isnan, zeros
14
+ from numpy.core import overrides, getlimits
15
+ from .ufunclike import isneginf, isposinf
16
+
17
+
18
+ array_function_dispatch = functools.partial(
19
+ overrides.array_function_dispatch, module='numpy')
20
+
21
+
22
+ _typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
23
+
24
+
25
+ @set_module('numpy')
26
+ def mintypecode(typechars, typeset='GDFgdf', default='d'):
27
+ """
28
+ Return the character for the minimum-size type to which given types can
29
+ be safely cast.
30
+
31
+ The returned type character must represent the smallest size dtype such
32
+ that an array of the returned type can handle the data from an array of
33
+ all types in `typechars` (or if `typechars` is an array, then its
34
+ dtype.char).
35
+
36
+ Parameters
37
+ ----------
38
+ typechars : list of str or array_like
39
+ If a list of strings, each string should represent a dtype.
40
+ If array_like, the character representation of the array dtype is used.
41
+ typeset : str or list of str, optional
42
+ The set of characters that the returned character is chosen from.
43
+ The default set is 'GDFgdf'.
44
+ default : str, optional
45
+ The default character, this is returned if none of the characters in
46
+ `typechars` matches a character in `typeset`.
47
+
48
+ Returns
49
+ -------
50
+ typechar : str
51
+ The character representing the minimum-size type that was found.
52
+
53
+ See Also
54
+ --------
55
+ dtype, sctype2char, maximum_sctype
56
+
57
+ Examples
58
+ --------
59
+ >>> np.mintypecode(['d', 'f', 'S'])
60
+ 'd'
61
+ >>> x = np.array([1.1, 2-3.j])
62
+ >>> np.mintypecode(x)
63
+ 'D'
64
+
65
+ >>> np.mintypecode('abceh', default='G')
66
+ 'G'
67
+
68
+ """
69
+ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
70
+ for t in typechars)
71
+ intersection = set(t for t in typecodes if t in typeset)
72
+ if not intersection:
73
+ return default
74
+ if 'F' in intersection and 'd' in intersection:
75
+ return 'D'
76
+ return min(intersection, key=_typecodes_by_elsize.index)
77
+
78
+
79
+ def _asfarray_dispatcher(a, dtype=None):
80
+ return (a,)
81
+
82
+
83
+ @array_function_dispatch(_asfarray_dispatcher)
84
+ def asfarray(a, dtype=_nx.float_):
85
+ """
86
+ Return an array converted to a float type.
87
+
88
+ Parameters
89
+ ----------
90
+ a : array_like
91
+ The input array.
92
+ dtype : str or dtype object, optional
93
+ Float type code to coerce input array `a`. If `dtype` is one of the
94
+ 'int' dtypes, it is replaced with float64.
95
+
96
+ Returns
97
+ -------
98
+ out : ndarray
99
+ The input `a` as a float ndarray.
100
+
101
+ Examples
102
+ --------
103
+ >>> np.asfarray([2, 3])
104
+ array([2., 3.])
105
+ >>> np.asfarray([2, 3], dtype='float')
106
+ array([2., 3.])
107
+ >>> np.asfarray([2, 3], dtype='int8')
108
+ array([2., 3.])
109
+
110
+ """
111
+ if not _nx.issubdtype(dtype, _nx.inexact):
112
+ dtype = _nx.float_
113
+ return asarray(a, dtype=dtype)
114
+
115
+
116
+ def _real_dispatcher(val):
117
+ return (val,)
118
+
119
+
120
+ @array_function_dispatch(_real_dispatcher)
121
+ def real(val):
122
+ """
123
+ Return the real part of the complex argument.
124
+
125
+ Parameters
126
+ ----------
127
+ val : array_like
128
+ Input array.
129
+
130
+ Returns
131
+ -------
132
+ out : ndarray or scalar
133
+ The real component of the complex argument. If `val` is real, the type
134
+ of `val` is used for the output. If `val` has complex elements, the
135
+ returned type is float.
136
+
137
+ See Also
138
+ --------
139
+ real_if_close, imag, angle
140
+
141
+ Examples
142
+ --------
143
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
144
+ >>> a.real
145
+ array([1., 3., 5.])
146
+ >>> a.real = 9
147
+ >>> a
148
+ array([9.+2.j, 9.+4.j, 9.+6.j])
149
+ >>> a.real = np.array([9, 8, 7])
150
+ >>> a
151
+ array([9.+2.j, 8.+4.j, 7.+6.j])
152
+ >>> np.real(1 + 1j)
153
+ 1.0
154
+
155
+ """
156
+ try:
157
+ return val.real
158
+ except AttributeError:
159
+ return asanyarray(val).real
160
+
161
+
162
+ def _imag_dispatcher(val):
163
+ return (val,)
164
+
165
+
166
+ @array_function_dispatch(_imag_dispatcher)
167
+ def imag(val):
168
+ """
169
+ Return the imaginary part of the complex argument.
170
+
171
+ Parameters
172
+ ----------
173
+ val : array_like
174
+ Input array.
175
+
176
+ Returns
177
+ -------
178
+ out : ndarray or scalar
179
+ The imaginary component of the complex argument. If `val` is real,
180
+ the type of `val` is used for the output. If `val` has complex
181
+ elements, the returned type is float.
182
+
183
+ See Also
184
+ --------
185
+ real, angle, real_if_close
186
+
187
+ Examples
188
+ --------
189
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
190
+ >>> a.imag
191
+ array([2., 4., 6.])
192
+ >>> a.imag = np.array([8, 10, 12])
193
+ >>> a
194
+ array([1. +8.j, 3.+10.j, 5.+12.j])
195
+ >>> np.imag(1 + 1j)
196
+ 1.0
197
+
198
+ """
199
+ try:
200
+ return val.imag
201
+ except AttributeError:
202
+ return asanyarray(val).imag
203
+
204
+
205
+ def _is_type_dispatcher(x):
206
+ return (x,)
207
+
208
+
209
+ @array_function_dispatch(_is_type_dispatcher)
210
+ def iscomplex(x):
211
+ """
212
+ Returns a bool array, where True if input element is complex.
213
+
214
+ What is tested is whether the input has a non-zero imaginary part, not if
215
+ the input type is complex.
216
+
217
+ Parameters
218
+ ----------
219
+ x : array_like
220
+ Input array.
221
+
222
+ Returns
223
+ -------
224
+ out : ndarray of bools
225
+ Output array.
226
+
227
+ See Also
228
+ --------
229
+ isreal
230
+ iscomplexobj : Return True if x is a complex type or an array of complex
231
+ numbers.
232
+
233
+ Examples
234
+ --------
235
+ >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
236
+ array([ True, False, False, False, False, True])
237
+
238
+ """
239
+ ax = asanyarray(x)
240
+ if issubclass(ax.dtype.type, _nx.complexfloating):
241
+ return ax.imag != 0
242
+ res = zeros(ax.shape, bool)
243
+ return res[()] # convert to scalar if needed
244
+
245
+
246
+ @array_function_dispatch(_is_type_dispatcher)
247
+ def isreal(x):
248
+ """
249
+ Returns a bool array, where True if input element is real.
250
+
251
+ If element has complex type with zero complex part, the return value
252
+ for that element is True.
253
+
254
+ Parameters
255
+ ----------
256
+ x : array_like
257
+ Input array.
258
+
259
+ Returns
260
+ -------
261
+ out : ndarray, bool
262
+ Boolean array of same shape as `x`.
263
+
264
+ Notes
265
+ -----
266
+ `isreal` may behave unexpectedly for string or object arrays (see examples)
267
+
268
+ See Also
269
+ --------
270
+ iscomplex
271
+ isrealobj : Return True if x is not a complex type.
272
+
273
+ Examples
274
+ --------
275
+ >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
276
+ >>> np.isreal(a)
277
+ array([False, True, True, True, True, False])
278
+
279
+ The function does not work on string arrays.
280
+
281
+ >>> a = np.array([2j, "a"], dtype="U")
282
+ >>> np.isreal(a) # Warns about non-elementwise comparison
283
+ False
284
+
285
+ Returns True for all elements in input array of ``dtype=object`` even if
286
+ any of the elements is complex.
287
+
288
+ >>> a = np.array([1, "2", 3+4j], dtype=object)
289
+ >>> np.isreal(a)
290
+ array([ True, True, True])
291
+
292
+ isreal should not be used with object arrays
293
+
294
+ >>> a = np.array([1+2j, 2+1j], dtype=object)
295
+ >>> np.isreal(a)
296
+ array([ True, True])
297
+
298
+ """
299
+ return imag(x) == 0
300
+
301
+
302
+ @array_function_dispatch(_is_type_dispatcher)
303
+ def iscomplexobj(x):
304
+ """
305
+ Check for a complex type or an array of complex numbers.
306
+
307
+ The type of the input is checked, not the value. Even if the input
308
+ has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
309
+
310
+ Parameters
311
+ ----------
312
+ x : any
313
+ The input can be of any type and shape.
314
+
315
+ Returns
316
+ -------
317
+ iscomplexobj : bool
318
+ The return value, True if `x` is of a complex type or has at least
319
+ one complex element.
320
+
321
+ See Also
322
+ --------
323
+ isrealobj, iscomplex
324
+
325
+ Examples
326
+ --------
327
+ >>> np.iscomplexobj(1)
328
+ False
329
+ >>> np.iscomplexobj(1+0j)
330
+ True
331
+ >>> np.iscomplexobj([3, 1+0j, True])
332
+ True
333
+
334
+ """
335
+ try:
336
+ dtype = x.dtype
337
+ type_ = dtype.type
338
+ except AttributeError:
339
+ type_ = asarray(x).dtype.type
340
+ return issubclass(type_, _nx.complexfloating)
341
+
342
+
343
+ @array_function_dispatch(_is_type_dispatcher)
344
+ def isrealobj(x):
345
+ """
346
+ Return True if x is a not complex type or an array of complex numbers.
347
+
348
+ The type of the input is checked, not the value. So even if the input
349
+ has an imaginary part equal to zero, `isrealobj` evaluates to False
350
+ if the data type is complex.
351
+
352
+ Parameters
353
+ ----------
354
+ x : any
355
+ The input can be of any type and shape.
356
+
357
+ Returns
358
+ -------
359
+ y : bool
360
+ The return value, False if `x` is of a complex type.
361
+
362
+ See Also
363
+ --------
364
+ iscomplexobj, isreal
365
+
366
+ Notes
367
+ -----
368
+ The function is only meant for arrays with numerical values but it
369
+ accepts all other objects. Since it assumes array input, the return
370
+ value of other objects may be True.
371
+
372
+ >>> np.isrealobj('A string')
373
+ True
374
+ >>> np.isrealobj(False)
375
+ True
376
+ >>> np.isrealobj(None)
377
+ True
378
+
379
+ Examples
380
+ --------
381
+ >>> np.isrealobj(1)
382
+ True
383
+ >>> np.isrealobj(1+0j)
384
+ False
385
+ >>> np.isrealobj([3, 1+0j, True])
386
+ False
387
+
388
+ """
389
+ return not iscomplexobj(x)
390
+
391
+ #-----------------------------------------------------------------------------
392
+
393
+ def _getmaxmin(t):
394
+ from numpy.core import getlimits
395
+ f = getlimits.finfo(t)
396
+ return f.max, f.min
397
+
398
+
399
+ def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
400
+ return (x,)
401
+
402
+
403
+ @array_function_dispatch(_nan_to_num_dispatcher)
404
+ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
405
+ """
406
+ Replace NaN with zero and infinity with large finite numbers (default
407
+ behaviour) or with the numbers defined by the user using the `nan`,
408
+ `posinf` and/or `neginf` keywords.
409
+
410
+ If `x` is inexact, NaN is replaced by zero or by the user defined value in
411
+ `nan` keyword, infinity is replaced by the largest finite floating point
412
+ values representable by ``x.dtype`` or by the user defined value in
413
+ `posinf` keyword and -infinity is replaced by the most negative finite
414
+ floating point values representable by ``x.dtype`` or by the user defined
415
+ value in `neginf` keyword.
416
+
417
+ For complex dtypes, the above is applied to each of the real and
418
+ imaginary components of `x` separately.
419
+
420
+ If `x` is not inexact, then no replacements are made.
421
+
422
+ Parameters
423
+ ----------
424
+ x : scalar or array_like
425
+ Input data.
426
+ copy : bool, optional
427
+ Whether to create a copy of `x` (True) or to replace values
428
+ in-place (False). The in-place operation only occurs if
429
+ casting to an array does not require a copy.
430
+ Default is True.
431
+
432
+ .. versionadded:: 1.13
433
+ nan : int, float, optional
434
+ Value to be used to fill NaN values. If no value is passed
435
+ then NaN values will be replaced with 0.0.
436
+
437
+ .. versionadded:: 1.17
438
+ posinf : int, float, optional
439
+ Value to be used to fill positive infinity values. If no value is
440
+ passed then positive infinity values will be replaced with a very
441
+ large number.
442
+
443
+ .. versionadded:: 1.17
444
+ neginf : int, float, optional
445
+ Value to be used to fill negative infinity values. If no value is
446
+ passed then negative infinity values will be replaced with a very
447
+ small (or negative) number.
448
+
449
+ .. versionadded:: 1.17
450
+
451
+
452
+
453
+ Returns
454
+ -------
455
+ out : ndarray
456
+ `x`, with the non-finite values replaced. If `copy` is False, this may
457
+ be `x` itself.
458
+
459
+ See Also
460
+ --------
461
+ isinf : Shows which elements are positive or negative infinity.
462
+ isneginf : Shows which elements are negative infinity.
463
+ isposinf : Shows which elements are positive infinity.
464
+ isnan : Shows which elements are Not a Number (NaN).
465
+ isfinite : Shows which elements are finite (not NaN, not infinity)
466
+
467
+ Notes
468
+ -----
469
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
470
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
471
+
472
+ Examples
473
+ --------
474
+ >>> np.nan_to_num(np.inf)
475
+ 1.7976931348623157e+308
476
+ >>> np.nan_to_num(-np.inf)
477
+ -1.7976931348623157e+308
478
+ >>> np.nan_to_num(np.nan)
479
+ 0.0
480
+ >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
481
+ >>> np.nan_to_num(x)
482
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
483
+ -1.28000000e+002, 1.28000000e+002])
484
+ >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
485
+ array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
486
+ -1.2800000e+02, 1.2800000e+02])
487
+ >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
488
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
489
+ -1.28000000e+002, 1.28000000e+002])
490
+ >>> np.nan_to_num(y)
491
+ array([ 1.79769313e+308 +0.00000000e+000j, # may vary
492
+ 0.00000000e+000 +0.00000000e+000j,
493
+ 0.00000000e+000 +1.79769313e+308j])
494
+ >>> np.nan_to_num(y, nan=111111, posinf=222222)
495
+ array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])
496
+ """
497
+ x = _nx.array(x, subok=True, copy=copy)
498
+ xtype = x.dtype.type
499
+
500
+ isscalar = (x.ndim == 0)
501
+
502
+ if not issubclass(xtype, _nx.inexact):
503
+ return x[()] if isscalar else x
504
+
505
+ iscomplex = issubclass(xtype, _nx.complexfloating)
506
+
507
+ dest = (x.real, x.imag) if iscomplex else (x,)
508
+ maxf, minf = _getmaxmin(x.real.dtype)
509
+ if posinf is not None:
510
+ maxf = posinf
511
+ if neginf is not None:
512
+ minf = neginf
513
+ for d in dest:
514
+ idx_nan = isnan(d)
515
+ idx_posinf = isposinf(d)
516
+ idx_neginf = isneginf(d)
517
+ _nx.copyto(d, nan, where=idx_nan)
518
+ _nx.copyto(d, maxf, where=idx_posinf)
519
+ _nx.copyto(d, minf, where=idx_neginf)
520
+ return x[()] if isscalar else x
521
+
522
+ #-----------------------------------------------------------------------------
523
+
524
+ def _real_if_close_dispatcher(a, tol=None):
525
+ return (a,)
526
+
527
+
528
+ @array_function_dispatch(_real_if_close_dispatcher)
529
+ def real_if_close(a, tol=100):
530
+ """
531
+ If input is complex with all imaginary parts close to zero, return
532
+ real parts.
533
+
534
+ "Close to zero" is defined as `tol` * (machine epsilon of the type for
535
+ `a`).
536
+
537
+ Parameters
538
+ ----------
539
+ a : array_like
540
+ Input array.
541
+ tol : float
542
+ Tolerance in machine epsilons for the complex part of the elements
543
+ in the array. If the tolerance is <=1, then the absolute tolerance
544
+ is used.
545
+
546
+ Returns
547
+ -------
548
+ out : ndarray
549
+ If `a` is real, the type of `a` is used for the output. If `a`
550
+ has complex elements, the returned type is float.
551
+
552
+ See Also
553
+ --------
554
+ real, imag, angle
555
+
556
+ Notes
557
+ -----
558
+ Machine epsilon varies from machine to machine and between data types
559
+ but Python floats on most platforms have a machine epsilon equal to
560
+ 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
561
+ out the machine epsilon for floats.
562
+
563
+ Examples
564
+ --------
565
+ >>> np.finfo(float).eps
566
+ 2.2204460492503131e-16 # may vary
567
+
568
+ >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
569
+ array([2.1, 5.2])
570
+ >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
571
+ array([2.1+4.e-13j, 5.2 + 3e-15j])
572
+
573
+ """
574
+ a = asanyarray(a)
575
+ type_ = a.dtype.type
576
+ if not issubclass(type_, _nx.complexfloating):
577
+ return a
578
+ if tol > 1:
579
+ f = getlimits.finfo(type_)
580
+ tol = f.eps * tol
581
+ if _nx.all(_nx.absolute(a.imag) < tol):
582
+ a = a.real
583
+ return a
584
+
585
+
586
+ #-----------------------------------------------------------------------------
587
+
588
+ _namefromtype = {'S1': 'character',
589
+ '?': 'bool',
590
+ 'b': 'signed char',
591
+ 'B': 'unsigned char',
592
+ 'h': 'short',
593
+ 'H': 'unsigned short',
594
+ 'i': 'integer',
595
+ 'I': 'unsigned integer',
596
+ 'l': 'long integer',
597
+ 'L': 'unsigned long integer',
598
+ 'q': 'long long integer',
599
+ 'Q': 'unsigned long long integer',
600
+ 'f': 'single precision',
601
+ 'd': 'double precision',
602
+ 'g': 'long precision',
603
+ 'F': 'complex single precision',
604
+ 'D': 'complex double precision',
605
+ 'G': 'complex long double precision',
606
+ 'S': 'string',
607
+ 'U': 'unicode',
608
+ 'V': 'void',
609
+ 'O': 'object'
610
+ }
611
+
612
+ @set_module('numpy')
613
+ def typename(char):
614
+ """
615
+ Return a description for the given data type code.
616
+
617
+ Parameters
618
+ ----------
619
+ char : str
620
+ Data type code.
621
+
622
+ Returns
623
+ -------
624
+ out : str
625
+ Description of the input data type code.
626
+
627
+ See Also
628
+ --------
629
+ dtype, typecodes
630
+
631
+ Examples
632
+ --------
633
+ >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
634
+ ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
635
+ >>> for typechar in typechars:
636
+ ... print(typechar, ' : ', np.typename(typechar))
637
+ ...
638
+ S1 : character
639
+ ? : bool
640
+ B : unsigned char
641
+ D : complex double precision
642
+ G : complex long double precision
643
+ F : complex single precision
644
+ I : unsigned integer
645
+ H : unsigned short
646
+ L : unsigned long integer
647
+ O : object
648
+ Q : unsigned long long integer
649
+ S : string
650
+ U : unicode
651
+ V : void
652
+ b : signed char
653
+ d : double precision
654
+ g : long precision
655
+ f : single precision
656
+ i : integer
657
+ h : short
658
+ l : long integer
659
+ q : long long integer
660
+
661
+ """
662
+ return _namefromtype[char]
663
+
664
+ #-----------------------------------------------------------------------------
665
+
666
+ #determine the "minimum common type" for a group of arrays.
667
+ array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble],
668
+ [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]]
669
+ array_precision = {_nx.half: 0,
670
+ _nx.single: 1,
671
+ _nx.double: 2,
672
+ _nx.longdouble: 3,
673
+ _nx.csingle: 1,
674
+ _nx.cdouble: 2,
675
+ _nx.clongdouble: 3}
676
+
677
+
678
+ def _common_type_dispatcher(*arrays):
679
+ return arrays
680
+
681
+
682
+ @array_function_dispatch(_common_type_dispatcher)
683
+ def common_type(*arrays):
684
+ """
685
+ Return a scalar type which is common to the input arrays.
686
+
687
+ The return type will always be an inexact (i.e. floating point) scalar
688
+ type, even if all the arrays are integer arrays. If one of the inputs is
689
+ an integer array, the minimum precision type that is returned is a
690
+ 64-bit floating point dtype.
691
+
692
+ All input arrays except int64 and uint64 can be safely cast to the
693
+ returned dtype without loss of information.
694
+
695
+ Parameters
696
+ ----------
697
+ array1, array2, ... : ndarrays
698
+ Input arrays.
699
+
700
+ Returns
701
+ -------
702
+ out : data type code
703
+ Data type code.
704
+
705
+ See Also
706
+ --------
707
+ dtype, mintypecode
708
+
709
+ Examples
710
+ --------
711
+ >>> np.common_type(np.arange(2, dtype=np.float32))
712
+ <class 'numpy.float32'>
713
+ >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
714
+ <class 'numpy.float64'>
715
+ >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
716
+ <class 'numpy.complex128'>
717
+
718
+ """
719
+ is_complex = False
720
+ precision = 0
721
+ for a in arrays:
722
+ t = a.dtype.type
723
+ if iscomplexobj(a):
724
+ is_complex = True
725
+ if issubclass(t, _nx.integer):
726
+ p = 2 # array_precision[_nx.double]
727
+ else:
728
+ p = array_precision.get(t, None)
729
+ if p is None:
730
+ raise TypeError("can't get common type for non-numeric array")
731
+ precision = max(precision, p)
732
+ if is_complex:
733
+ return array_type[1][precision]
734
+ else:
735
+ return array_type[0][precision]
.venv/lib/python3.11/site-packages/numpy/lib/type_check.pyi ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Container, Iterable
2
+ from typing import (
3
+ Literal as L,
4
+ Any,
5
+ overload,
6
+ TypeVar,
7
+ Protocol,
8
+ )
9
+
10
+ from numpy import (
11
+ dtype,
12
+ generic,
13
+ bool_,
14
+ floating,
15
+ float64,
16
+ complexfloating,
17
+ integer,
18
+ )
19
+
20
+ from numpy._typing import (
21
+ ArrayLike,
22
+ DTypeLike,
23
+ NBitBase,
24
+ NDArray,
25
+ _64Bit,
26
+ _SupportsDType,
27
+ _ScalarLike_co,
28
+ _ArrayLike,
29
+ _DTypeLikeComplex,
30
+ )
31
+
32
+ _T = TypeVar("_T")
33
+ _T_co = TypeVar("_T_co", covariant=True)
34
+ _SCT = TypeVar("_SCT", bound=generic)
35
+ _NBit1 = TypeVar("_NBit1", bound=NBitBase)
36
+ _NBit2 = TypeVar("_NBit2", bound=NBitBase)
37
+
38
+ class _SupportsReal(Protocol[_T_co]):
39
+ @property
40
+ def real(self) -> _T_co: ...
41
+
42
+ class _SupportsImag(Protocol[_T_co]):
43
+ @property
44
+ def imag(self) -> _T_co: ...
45
+
46
+ __all__: list[str]
47
+
48
+ def mintypecode(
49
+ typechars: Iterable[str | ArrayLike],
50
+ typeset: Container[str] = ...,
51
+ default: str = ...,
52
+ ) -> str: ...
53
+
54
+ # `asfarray` ignores dtypes if they're not inexact
55
+
56
+ @overload
57
+ def asfarray(
58
+ a: object,
59
+ dtype: None | type[float] = ...,
60
+ ) -> NDArray[float64]: ...
61
+ @overload
62
+ def asfarray( # type: ignore[misc]
63
+ a: Any,
64
+ dtype: _DTypeLikeComplex,
65
+ ) -> NDArray[complexfloating[Any, Any]]: ...
66
+ @overload
67
+ def asfarray(
68
+ a: Any,
69
+ dtype: DTypeLike,
70
+ ) -> NDArray[floating[Any]]: ...
71
+
72
+ @overload
73
+ def real(val: _SupportsReal[_T]) -> _T: ...
74
+ @overload
75
+ def real(val: ArrayLike) -> NDArray[Any]: ...
76
+
77
+ @overload
78
+ def imag(val: _SupportsImag[_T]) -> _T: ...
79
+ @overload
80
+ def imag(val: ArrayLike) -> NDArray[Any]: ...
81
+
82
+ @overload
83
+ def iscomplex(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc]
84
+ @overload
85
+ def iscomplex(x: ArrayLike) -> NDArray[bool_]: ...
86
+
87
+ @overload
88
+ def isreal(x: _ScalarLike_co) -> bool_: ... # type: ignore[misc]
89
+ @overload
90
+ def isreal(x: ArrayLike) -> NDArray[bool_]: ...
91
+
92
+ def iscomplexobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
93
+
94
+ def isrealobj(x: _SupportsDType[dtype[Any]] | ArrayLike) -> bool: ...
95
+
96
+ @overload
97
+ def nan_to_num( # type: ignore[misc]
98
+ x: _SCT,
99
+ copy: bool = ...,
100
+ nan: float = ...,
101
+ posinf: None | float = ...,
102
+ neginf: None | float = ...,
103
+ ) -> _SCT: ...
104
+ @overload
105
+ def nan_to_num(
106
+ x: _ScalarLike_co,
107
+ copy: bool = ...,
108
+ nan: float = ...,
109
+ posinf: None | float = ...,
110
+ neginf: None | float = ...,
111
+ ) -> Any: ...
112
+ @overload
113
+ def nan_to_num(
114
+ x: _ArrayLike[_SCT],
115
+ copy: bool = ...,
116
+ nan: float = ...,
117
+ posinf: None | float = ...,
118
+ neginf: None | float = ...,
119
+ ) -> NDArray[_SCT]: ...
120
+ @overload
121
+ def nan_to_num(
122
+ x: ArrayLike,
123
+ copy: bool = ...,
124
+ nan: float = ...,
125
+ posinf: None | float = ...,
126
+ neginf: None | float = ...,
127
+ ) -> NDArray[Any]: ...
128
+
129
+ # If one passes a complex array to `real_if_close`, then one is reasonably
130
+ # expected to verify the output dtype (so we can return an unsafe union here)
131
+
132
+ @overload
133
+ def real_if_close( # type: ignore[misc]
134
+ a: _ArrayLike[complexfloating[_NBit1, _NBit1]],
135
+ tol: float = ...,
136
+ ) -> NDArray[floating[_NBit1]] | NDArray[complexfloating[_NBit1, _NBit1]]: ...
137
+ @overload
138
+ def real_if_close(
139
+ a: _ArrayLike[_SCT],
140
+ tol: float = ...,
141
+ ) -> NDArray[_SCT]: ...
142
+ @overload
143
+ def real_if_close(
144
+ a: ArrayLike,
145
+ tol: float = ...,
146
+ ) -> NDArray[Any]: ...
147
+
148
+ @overload
149
+ def typename(char: L['S1']) -> L['character']: ...
150
+ @overload
151
+ def typename(char: L['?']) -> L['bool']: ...
152
+ @overload
153
+ def typename(char: L['b']) -> L['signed char']: ...
154
+ @overload
155
+ def typename(char: L['B']) -> L['unsigned char']: ...
156
+ @overload
157
+ def typename(char: L['h']) -> L['short']: ...
158
+ @overload
159
+ def typename(char: L['H']) -> L['unsigned short']: ...
160
+ @overload
161
+ def typename(char: L['i']) -> L['integer']: ...
162
+ @overload
163
+ def typename(char: L['I']) -> L['unsigned integer']: ...
164
+ @overload
165
+ def typename(char: L['l']) -> L['long integer']: ...
166
+ @overload
167
+ def typename(char: L['L']) -> L['unsigned long integer']: ...
168
+ @overload
169
+ def typename(char: L['q']) -> L['long long integer']: ...
170
+ @overload
171
+ def typename(char: L['Q']) -> L['unsigned long long integer']: ...
172
+ @overload
173
+ def typename(char: L['f']) -> L['single precision']: ...
174
+ @overload
175
+ def typename(char: L['d']) -> L['double precision']: ...
176
+ @overload
177
+ def typename(char: L['g']) -> L['long precision']: ...
178
+ @overload
179
+ def typename(char: L['F']) -> L['complex single precision']: ...
180
+ @overload
181
+ def typename(char: L['D']) -> L['complex double precision']: ...
182
+ @overload
183
+ def typename(char: L['G']) -> L['complex long double precision']: ...
184
+ @overload
185
+ def typename(char: L['S']) -> L['string']: ...
186
+ @overload
187
+ def typename(char: L['U']) -> L['unicode']: ...
188
+ @overload
189
+ def typename(char: L['V']) -> L['void']: ...
190
+ @overload
191
+ def typename(char: L['O']) -> L['object']: ...
192
+
193
+ @overload
194
+ def common_type( # type: ignore[misc]
195
+ *arrays: _SupportsDType[dtype[
196
+ integer[Any]
197
+ ]]
198
+ ) -> type[floating[_64Bit]]: ...
199
+ @overload
200
+ def common_type( # type: ignore[misc]
201
+ *arrays: _SupportsDType[dtype[
202
+ floating[_NBit1]
203
+ ]]
204
+ ) -> type[floating[_NBit1]]: ...
205
+ @overload
206
+ def common_type( # type: ignore[misc]
207
+ *arrays: _SupportsDType[dtype[
208
+ integer[Any] | floating[_NBit1]
209
+ ]]
210
+ ) -> type[floating[_NBit1 | _64Bit]]: ...
211
+ @overload
212
+ def common_type( # type: ignore[misc]
213
+ *arrays: _SupportsDType[dtype[
214
+ floating[_NBit1] | complexfloating[_NBit2, _NBit2]
215
+ ]]
216
+ ) -> type[complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]]: ...
217
+ @overload
218
+ def common_type(
219
+ *arrays: _SupportsDType[dtype[
220
+ integer[Any] | floating[_NBit1] | complexfloating[_NBit2, _NBit2]
221
+ ]]
222
+ ) -> type[complexfloating[_64Bit | _NBit1 | _NBit2, _64Bit | _NBit1 | _NBit2]]: ...
.venv/lib/python3.11/site-packages/numpy/lib/ufunclike.py ADDED
@@ -0,0 +1,210 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module of functions that are like ufuncs in acting on arrays and optionally
3
+ storing results in an output array.
4
+
5
+ """
6
+ __all__ = ['fix', 'isneginf', 'isposinf']
7
+
8
+ import numpy.core.numeric as nx
9
+ from numpy.core.overrides import array_function_dispatch
10
+ import warnings
11
+ import functools
12
+
13
+
14
+ def _dispatcher(x, out=None):
15
+ return (x, out)
16
+
17
+
18
+ @array_function_dispatch(_dispatcher, verify=False, module='numpy')
19
+ def fix(x, out=None):
20
+ """
21
+ Round to nearest integer towards zero.
22
+
23
+ Round an array of floats element-wise to nearest integer towards zero.
24
+ The rounded values are returned as floats.
25
+
26
+ Parameters
27
+ ----------
28
+ x : array_like
29
+ An array of floats to be rounded
30
+ out : ndarray, optional
31
+ A location into which the result is stored. If provided, it must have
32
+ a shape that the input broadcasts to. If not provided or None, a
33
+ freshly-allocated array is returned.
34
+
35
+ Returns
36
+ -------
37
+ out : ndarray of floats
38
+ A float array with the same dimensions as the input.
39
+ If second argument is not supplied then a float array is returned
40
+ with the rounded values.
41
+
42
+ If a second argument is supplied the result is stored there.
43
+ The return value `out` is then a reference to that array.
44
+
45
+ See Also
46
+ --------
47
+ rint, trunc, floor, ceil
48
+ around : Round to given number of decimals
49
+
50
+ Examples
51
+ --------
52
+ >>> np.fix(3.14)
53
+ 3.0
54
+ >>> np.fix(3)
55
+ 3.0
56
+ >>> np.fix([2.1, 2.9, -2.1, -2.9])
57
+ array([ 2., 2., -2., -2.])
58
+
59
+ """
60
+ # promote back to an array if flattened
61
+ res = nx.asanyarray(nx.ceil(x, out=out))
62
+ res = nx.floor(x, out=res, where=nx.greater_equal(x, 0))
63
+
64
+ # when no out argument is passed and no subclasses are involved, flatten
65
+ # scalars
66
+ if out is None and type(res) is nx.ndarray:
67
+ res = res[()]
68
+ return res
69
+
70
+
71
+ @array_function_dispatch(_dispatcher, verify=False, module='numpy')
72
+ def isposinf(x, out=None):
73
+ """
74
+ Test element-wise for positive infinity, return result as bool array.
75
+
76
+ Parameters
77
+ ----------
78
+ x : array_like
79
+ The input array.
80
+ out : array_like, optional
81
+ A location into which the result is stored. If provided, it must have a
82
+ shape that the input broadcasts to. If not provided or None, a
83
+ freshly-allocated boolean array is returned.
84
+
85
+ Returns
86
+ -------
87
+ out : ndarray
88
+ A boolean array with the same dimensions as the input.
89
+ If second argument is not supplied then a boolean array is returned
90
+ with values True where the corresponding element of the input is
91
+ positive infinity and values False where the element of the input is
92
+ not positive infinity.
93
+
94
+ If a second argument is supplied the result is stored there. If the
95
+ type of that array is a numeric type the result is represented as zeros
96
+ and ones, if the type is boolean then as False and True.
97
+ The return value `out` is then a reference to that array.
98
+
99
+ See Also
100
+ --------
101
+ isinf, isneginf, isfinite, isnan
102
+
103
+ Notes
104
+ -----
105
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
106
+ (IEEE 754).
107
+
108
+ Errors result if the second argument is also supplied when x is a scalar
109
+ input, if first and second arguments have different shapes, or if the
110
+ first argument has complex values
111
+
112
+ Examples
113
+ --------
114
+ >>> np.isposinf(np.PINF)
115
+ True
116
+ >>> np.isposinf(np.inf)
117
+ True
118
+ >>> np.isposinf(np.NINF)
119
+ False
120
+ >>> np.isposinf([-np.inf, 0., np.inf])
121
+ array([False, False, True])
122
+
123
+ >>> x = np.array([-np.inf, 0., np.inf])
124
+ >>> y = np.array([2, 2, 2])
125
+ >>> np.isposinf(x, y)
126
+ array([0, 0, 1])
127
+ >>> y
128
+ array([0, 0, 1])
129
+
130
+ """
131
+ is_inf = nx.isinf(x)
132
+ try:
133
+ signbit = ~nx.signbit(x)
134
+ except TypeError as e:
135
+ dtype = nx.asanyarray(x).dtype
136
+ raise TypeError(f'This operation is not supported for {dtype} values '
137
+ 'because it would be ambiguous.') from e
138
+ else:
139
+ return nx.logical_and(is_inf, signbit, out)
140
+
141
+
142
+ @array_function_dispatch(_dispatcher, verify=False, module='numpy')
143
+ def isneginf(x, out=None):
144
+ """
145
+ Test element-wise for negative infinity, return result as bool array.
146
+
147
+ Parameters
148
+ ----------
149
+ x : array_like
150
+ The input array.
151
+ out : array_like, optional
152
+ A location into which the result is stored. If provided, it must have a
153
+ shape that the input broadcasts to. If not provided or None, a
154
+ freshly-allocated boolean array is returned.
155
+
156
+ Returns
157
+ -------
158
+ out : ndarray
159
+ A boolean array with the same dimensions as the input.
160
+ If second argument is not supplied then a numpy boolean array is
161
+ returned with values True where the corresponding element of the
162
+ input is negative infinity and values False where the element of
163
+ the input is not negative infinity.
164
+
165
+ If a second argument is supplied the result is stored there. If the
166
+ type of that array is a numeric type the result is represented as
167
+ zeros and ones, if the type is boolean then as False and True. The
168
+ return value `out` is then a reference to that array.
169
+
170
+ See Also
171
+ --------
172
+ isinf, isposinf, isnan, isfinite
173
+
174
+ Notes
175
+ -----
176
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
177
+ (IEEE 754).
178
+
179
+ Errors result if the second argument is also supplied when x is a scalar
180
+ input, if first and second arguments have different shapes, or if the
181
+ first argument has complex values.
182
+
183
+ Examples
184
+ --------
185
+ >>> np.isneginf(np.NINF)
186
+ True
187
+ >>> np.isneginf(np.inf)
188
+ False
189
+ >>> np.isneginf(np.PINF)
190
+ False
191
+ >>> np.isneginf([-np.inf, 0., np.inf])
192
+ array([ True, False, False])
193
+
194
+ >>> x = np.array([-np.inf, 0., np.inf])
195
+ >>> y = np.array([2, 2, 2])
196
+ >>> np.isneginf(x, y)
197
+ array([1, 0, 0])
198
+ >>> y
199
+ array([1, 0, 0])
200
+
201
+ """
202
+ is_inf = nx.isinf(x)
203
+ try:
204
+ signbit = nx.signbit(x)
205
+ except TypeError as e:
206
+ dtype = nx.asanyarray(x).dtype
207
+ raise TypeError(f'This operation is not supported for {dtype} values '
208
+ 'because it would be ambiguous.') from e
209
+ else:
210
+ return nx.logical_and(is_inf, signbit, out)