koichi12 commited on
Commit
adc48e1
·
verified ·
1 Parent(s): c36407f

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. .venv/lib/python3.11/site-packages/numpy/lib/__init__.py +92 -0
  3. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/__init__.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/_datasource.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/_iotools.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/_version.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/arraypad.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/arraysetops.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/arrayterator.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/format.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/histograms.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/index_tricks.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/mixins.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/polynomial.cpython-311.pyc +0 -0
  16. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/recfunctions.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/scimath.cpython-311.pyc +0 -0
  18. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/setup.cpython-311.pyc +0 -0
  19. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/shape_base.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/twodim_base.cpython-311.pyc +0 -0
  22. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/type_check.cpython-311.pyc +0 -0
  23. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/ufunclike.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/user_array.cpython-311.pyc +0 -0
  25. .venv/lib/python3.11/site-packages/numpy/lib/__pycache__/utils.cpython-311.pyc +0 -0
  26. .venv/lib/python3.11/site-packages/numpy/lib/_version.py +155 -0
  27. .venv/lib/python3.11/site-packages/numpy/lib/arraypad.py +882 -0
  28. .venv/lib/python3.11/site-packages/numpy/lib/arraysetops.pyi +362 -0
  29. .venv/lib/python3.11/site-packages/numpy/lib/function_base.py +0 -0
  30. .venv/lib/python3.11/site-packages/numpy/lib/nanfunctions.py +1887 -0
  31. .venv/lib/python3.11/site-packages/numpy/lib/nanfunctions.pyi +38 -0
  32. .venv/lib/python3.11/site-packages/numpy/lib/polynomial.py +1453 -0
  33. .venv/lib/python3.11/site-packages/numpy/lib/scimath.pyi +94 -0
  34. .venv/lib/python3.11/site-packages/numpy/lib/tests/__init__.py +0 -0
  35. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-311.pyc +0 -0
  36. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-311.pyc +0 -0
  38. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-311.pyc +0 -0
  39. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-311.pyc +0 -0
  40. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-311.pyc +0 -0
  41. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-311.pyc +0 -0
  43. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-311.pyc +0 -0
  44. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-311.pyc +0 -0
  45. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-311.pyc +0 -0
  46. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-311.pyc +0 -0
  47. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-311.pyc +0 -0
  48. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-311.pyc +0 -0
  49. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-311.pyc +0 -0
  50. .venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-311.pyc +0 -0
.gitattributes CHANGED
@@ -385,3 +385,6 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
385
  .venv/lib/python3.11/site-packages/numpy/distutils/__pycache__/misc_util.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
386
  .venv/lib/python3.11/site-packages/numpy/distutils/__pycache__/ccompiler_opt.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
387
  .venv/lib/python3.11/site-packages/numpy/distutils/__pycache__/system_info.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
 
385
  .venv/lib/python3.11/site-packages/numpy/distutils/__pycache__/misc_util.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
386
  .venv/lib/python3.11/site-packages/numpy/distutils/__pycache__/ccompiler_opt.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
387
  .venv/lib/python3.11/site-packages/numpy/distutils/__pycache__/system_info.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
388
+ .venv/lib/python3.11/site-packages/numpy/testing/_private/__pycache__/utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
389
+ .venv/lib/python3.11/site-packages/numpy/ma/__pycache__/core.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
390
+ .venv/lib/python3.11/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/numpy/lib/__init__.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ **Note:** almost all functions in the ``numpy.lib`` namespace
3
+ are also present in the main ``numpy`` namespace. Please use the
4
+ functions as ``np.<funcname>`` where possible.
5
+
6
+ ``numpy.lib`` is mostly a space for implementing functions that don't
7
+ belong in core or in another NumPy submodule with a clear purpose
8
+ (e.g. ``random``, ``fft``, ``linalg``, ``ma``).
9
+
10
+ Most contains basic functions that are used by several submodules and are
11
+ useful to have in the main name-space.
12
+
13
+ """
14
+
15
+ # Public submodules
16
+ # Note: recfunctions and (maybe) format are public too, but not imported
17
+ from . import mixins
18
+ from . import scimath as emath
19
+
20
+ # Private submodules
21
+ # load module names. See https://github.com/networkx/networkx/issues/5838
22
+ from . import type_check
23
+ from . import index_tricks
24
+ from . import function_base
25
+ from . import nanfunctions
26
+ from . import shape_base
27
+ from . import stride_tricks
28
+ from . import twodim_base
29
+ from . import ufunclike
30
+ from . import histograms
31
+ from . import polynomial
32
+ from . import utils
33
+ from . import arraysetops
34
+ from . import npyio
35
+ from . import arrayterator
36
+ from . import arraypad
37
+ from . import _version
38
+
39
+ from .type_check import *
40
+ from .index_tricks import *
41
+ from .function_base import *
42
+ from .nanfunctions import *
43
+ from .shape_base import *
44
+ from .stride_tricks import *
45
+ from .twodim_base import *
46
+ from .ufunclike import *
47
+ from .histograms import *
48
+
49
+ from .polynomial import *
50
+ from .utils import *
51
+ from .arraysetops import *
52
+ from .npyio import *
53
+ from .arrayterator import Arrayterator
54
+ from .arraypad import *
55
+ from ._version import *
56
+ from numpy.core._multiarray_umath import tracemalloc_domain
57
+
58
+ __all__ = ['emath', 'tracemalloc_domain', 'Arrayterator']
59
+ __all__ += type_check.__all__
60
+ __all__ += index_tricks.__all__
61
+ __all__ += function_base.__all__
62
+ __all__ += shape_base.__all__
63
+ __all__ += stride_tricks.__all__
64
+ __all__ += twodim_base.__all__
65
+ __all__ += ufunclike.__all__
66
+ __all__ += arraypad.__all__
67
+ __all__ += polynomial.__all__
68
+ __all__ += utils.__all__
69
+ __all__ += arraysetops.__all__
70
+ __all__ += npyio.__all__
71
+ __all__ += nanfunctions.__all__
72
+ __all__ += histograms.__all__
73
+
74
+ from numpy._pytesttester import PytestTester
75
+ test = PytestTester(__name__)
76
+ del PytestTester
77
+
78
+ def __getattr__(attr):
79
+ # Warn for reprecated attributes
80
+ import math
81
+ import warnings
82
+
83
+ if attr == 'math':
84
+ warnings.warn(
85
+ "`np.lib.math` is a deprecated alias for the standard library "
86
+ "`math` module (Deprecated Numpy 1.25). Replace usages of "
87
+ "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
88
+ return math
89
+ else:
90
+ raise AttributeError("module {!r} has no attribute "
91
+ "{!r}".format(__name__, attr))
92
+
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (3.35 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/_datasource.cpython-311.pyc ADDED
Binary file (27 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/_iotools.cpython-311.pyc ADDED
Binary file (37.7 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/_version.cpython-311.pyc ADDED
Binary file (7.11 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/arraypad.cpython-311.pyc ADDED
Binary file (30.7 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/arraysetops.cpython-311.pyc ADDED
Binary file (37.2 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/arrayterator.cpython-311.pyc ADDED
Binary file (10.3 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/format.cpython-311.pyc ADDED
Binary file (36.8 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/histograms.cpython-311.pyc ADDED
Binary file (41.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/index_tricks.cpython-311.pyc ADDED
Binary file (38.2 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/mixins.cpython-311.pyc ADDED
Binary file (9.25 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-311.pyc ADDED
Binary file (70.7 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/polynomial.cpython-311.pyc ADDED
Binary file (56.3 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/recfunctions.cpython-311.pyc ADDED
Binary file (70.4 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/scimath.cpython-311.pyc ADDED
Binary file (18.2 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/setup.cpython-311.pyc ADDED
Binary file (918 Bytes). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/shape_base.cpython-311.pyc ADDED
Binary file (45.6 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-311.pyc ADDED
Binary file (21 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/twodim_base.cpython-311.pyc ADDED
Binary file (39.3 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/type_check.cpython-311.pyc ADDED
Binary file (23.8 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/ufunclike.cpython-311.pyc ADDED
Binary file (7.52 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/user_array.cpython-311.pyc ADDED
Binary file (18.6 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/__pycache__/utils.cpython-311.pyc ADDED
Binary file (46.1 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/_version.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility to compare (NumPy) version strings.
2
+
3
+ The NumpyVersion class allows properly comparing numpy version strings.
4
+ The LooseVersion and StrictVersion classes that distutils provides don't
5
+ work; they don't recognize anything like alpha/beta/rc/dev versions.
6
+
7
+ """
8
+ import re
9
+
10
+
11
+ __all__ = ['NumpyVersion']
12
+
13
+
14
+ class NumpyVersion():
15
+ """Parse and compare numpy version strings.
16
+
17
+ NumPy has the following versioning scheme (numbers given are examples; they
18
+ can be > 9 in principle):
19
+
20
+ - Released version: '1.8.0', '1.8.1', etc.
21
+ - Alpha: '1.8.0a1', '1.8.0a2', etc.
22
+ - Beta: '1.8.0b1', '1.8.0b2', etc.
23
+ - Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
24
+ - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
25
+ - Development versions after a1: '1.8.0a1.dev-f1234afa',
26
+ '1.8.0b2.dev-f1234afa',
27
+ '1.8.1rc1.dev-f1234afa', etc.
28
+ - Development versions (no git hash available): '1.8.0.dev-Unknown'
29
+
30
+ Comparing needs to be done against a valid version string or other
31
+ `NumpyVersion` instance. Note that all development versions of the same
32
+ (pre-)release compare equal.
33
+
34
+ .. versionadded:: 1.9.0
35
+
36
+ Parameters
37
+ ----------
38
+ vstring : str
39
+ NumPy version string (``np.__version__``).
40
+
41
+ Examples
42
+ --------
43
+ >>> from numpy.lib import NumpyVersion
44
+ >>> if NumpyVersion(np.__version__) < '1.7.0':
45
+ ... print('skip')
46
+ >>> # skip
47
+
48
+ >>> NumpyVersion('1.7') # raises ValueError, add ".0"
49
+ Traceback (most recent call last):
50
+ ...
51
+ ValueError: Not a valid numpy version string
52
+
53
+ """
54
+
55
+ def __init__(self, vstring):
56
+ self.vstring = vstring
57
+ ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
58
+ if not ver_main:
59
+ raise ValueError("Not a valid numpy version string")
60
+
61
+ self.version = ver_main.group()
62
+ self.major, self.minor, self.bugfix = [int(x) for x in
63
+ self.version.split('.')]
64
+ if len(vstring) == ver_main.end():
65
+ self.pre_release = 'final'
66
+ else:
67
+ alpha = re.match(r'a\d', vstring[ver_main.end():])
68
+ beta = re.match(r'b\d', vstring[ver_main.end():])
69
+ rc = re.match(r'rc\d', vstring[ver_main.end():])
70
+ pre_rel = [m for m in [alpha, beta, rc] if m is not None]
71
+ if pre_rel:
72
+ self.pre_release = pre_rel[0].group()
73
+ else:
74
+ self.pre_release = ''
75
+
76
+ self.is_devversion = bool(re.search(r'.dev', vstring))
77
+
78
+ def _compare_version(self, other):
79
+ """Compare major.minor.bugfix"""
80
+ if self.major == other.major:
81
+ if self.minor == other.minor:
82
+ if self.bugfix == other.bugfix:
83
+ vercmp = 0
84
+ elif self.bugfix > other.bugfix:
85
+ vercmp = 1
86
+ else:
87
+ vercmp = -1
88
+ elif self.minor > other.minor:
89
+ vercmp = 1
90
+ else:
91
+ vercmp = -1
92
+ elif self.major > other.major:
93
+ vercmp = 1
94
+ else:
95
+ vercmp = -1
96
+
97
+ return vercmp
98
+
99
+ def _compare_pre_release(self, other):
100
+ """Compare alpha/beta/rc/final."""
101
+ if self.pre_release == other.pre_release:
102
+ vercmp = 0
103
+ elif self.pre_release == 'final':
104
+ vercmp = 1
105
+ elif other.pre_release == 'final':
106
+ vercmp = -1
107
+ elif self.pre_release > other.pre_release:
108
+ vercmp = 1
109
+ else:
110
+ vercmp = -1
111
+
112
+ return vercmp
113
+
114
+ def _compare(self, other):
115
+ if not isinstance(other, (str, NumpyVersion)):
116
+ raise ValueError("Invalid object to compare with NumpyVersion.")
117
+
118
+ if isinstance(other, str):
119
+ other = NumpyVersion(other)
120
+
121
+ vercmp = self._compare_version(other)
122
+ if vercmp == 0:
123
+ # Same x.y.z version, check for alpha/beta/rc
124
+ vercmp = self._compare_pre_release(other)
125
+ if vercmp == 0:
126
+ # Same version and same pre-release, check if dev version
127
+ if self.is_devversion is other.is_devversion:
128
+ vercmp = 0
129
+ elif self.is_devversion:
130
+ vercmp = -1
131
+ else:
132
+ vercmp = 1
133
+
134
+ return vercmp
135
+
136
+ def __lt__(self, other):
137
+ return self._compare(other) < 0
138
+
139
+ def __le__(self, other):
140
+ return self._compare(other) <= 0
141
+
142
+ def __eq__(self, other):
143
+ return self._compare(other) == 0
144
+
145
+ def __ne__(self, other):
146
+ return self._compare(other) != 0
147
+
148
+ def __gt__(self, other):
149
+ return self._compare(other) > 0
150
+
151
+ def __ge__(self, other):
152
+ return self._compare(other) >= 0
153
+
154
+ def __repr__(self):
155
+ return "NumpyVersion(%s)" % self.vstring
.venv/lib/python3.11/site-packages/numpy/lib/arraypad.py ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The arraypad module contains a group of functions to pad values onto the edges
3
+ of an n-dimensional array.
4
+
5
+ """
6
+ import numpy as np
7
+ from numpy.core.overrides import array_function_dispatch
8
+ from numpy.lib.index_tricks import ndindex
9
+
10
+
11
+ __all__ = ['pad']
12
+
13
+
14
+ ###############################################################################
15
+ # Private utility functions.
16
+
17
+
18
+ def _round_if_needed(arr, dtype):
19
+ """
20
+ Rounds arr inplace if destination dtype is integer.
21
+
22
+ Parameters
23
+ ----------
24
+ arr : ndarray
25
+ Input array.
26
+ dtype : dtype
27
+ The dtype of the destination array.
28
+ """
29
+ if np.issubdtype(dtype, np.integer):
30
+ arr.round(out=arr)
31
+
32
+
33
+ def _slice_at_axis(sl, axis):
34
+ """
35
+ Construct tuple of slices to slice an array in the given dimension.
36
+
37
+ Parameters
38
+ ----------
39
+ sl : slice
40
+ The slice for the given dimension.
41
+ axis : int
42
+ The axis to which `sl` is applied. All other dimensions are left
43
+ "unsliced".
44
+
45
+ Returns
46
+ -------
47
+ sl : tuple of slices
48
+ A tuple with slices matching `shape` in length.
49
+
50
+ Examples
51
+ --------
52
+ >>> _slice_at_axis(slice(None, 3, -1), 1)
53
+ (slice(None, None, None), slice(None, 3, -1), (...,))
54
+ """
55
+ return (slice(None),) * axis + (sl,) + (...,)
56
+
57
+
58
+ def _view_roi(array, original_area_slice, axis):
59
+ """
60
+ Get a view of the current region of interest during iterative padding.
61
+
62
+ When padding multiple dimensions iteratively corner values are
63
+ unnecessarily overwritten multiple times. This function reduces the
64
+ working area for the first dimensions so that corners are excluded.
65
+
66
+ Parameters
67
+ ----------
68
+ array : ndarray
69
+ The array with the region of interest.
70
+ original_area_slice : tuple of slices
71
+ Denotes the area with original values of the unpadded array.
72
+ axis : int
73
+ The currently padded dimension assuming that `axis` is padded before
74
+ `axis` + 1.
75
+
76
+ Returns
77
+ -------
78
+ roi : ndarray
79
+ The region of interest of the original `array`.
80
+ """
81
+ axis += 1
82
+ sl = (slice(None),) * axis + original_area_slice[axis:]
83
+ return array[sl]
84
+
85
+
86
+ def _pad_simple(array, pad_width, fill_value=None):
87
+ """
88
+ Pad array on all sides with either a single value or undefined values.
89
+
90
+ Parameters
91
+ ----------
92
+ array : ndarray
93
+ Array to grow.
94
+ pad_width : sequence of tuple[int, int]
95
+ Pad width on both sides for each dimension in `arr`.
96
+ fill_value : scalar, optional
97
+ If provided the padded area is filled with this value, otherwise
98
+ the pad area left undefined.
99
+
100
+ Returns
101
+ -------
102
+ padded : ndarray
103
+ The padded array with the same dtype as`array`. Its order will default
104
+ to C-style if `array` is not F-contiguous.
105
+ original_area_slice : tuple
106
+ A tuple of slices pointing to the area of the original array.
107
+ """
108
+ # Allocate grown array
109
+ new_shape = tuple(
110
+ left + size + right
111
+ for size, (left, right) in zip(array.shape, pad_width)
112
+ )
113
+ order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
114
+ padded = np.empty(new_shape, dtype=array.dtype, order=order)
115
+
116
+ if fill_value is not None:
117
+ padded.fill(fill_value)
118
+
119
+ # Copy old array into correct space
120
+ original_area_slice = tuple(
121
+ slice(left, left + size)
122
+ for size, (left, right) in zip(array.shape, pad_width)
123
+ )
124
+ padded[original_area_slice] = array
125
+
126
+ return padded, original_area_slice
127
+
128
+
129
+ def _set_pad_area(padded, axis, width_pair, value_pair):
130
+ """
131
+ Set empty-padded area in given dimension.
132
+
133
+ Parameters
134
+ ----------
135
+ padded : ndarray
136
+ Array with the pad area which is modified inplace.
137
+ axis : int
138
+ Dimension with the pad area to set.
139
+ width_pair : (int, int)
140
+ Pair of widths that mark the pad area on both sides in the given
141
+ dimension.
142
+ value_pair : tuple of scalars or ndarrays
143
+ Values inserted into the pad area on each side. It must match or be
144
+ broadcastable to the shape of `arr`.
145
+ """
146
+ left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
147
+ padded[left_slice] = value_pair[0]
148
+
149
+ right_slice = _slice_at_axis(
150
+ slice(padded.shape[axis] - width_pair[1], None), axis)
151
+ padded[right_slice] = value_pair[1]
152
+
153
+
154
+ def _get_edges(padded, axis, width_pair):
155
+ """
156
+ Retrieve edge values from empty-padded array in given dimension.
157
+
158
+ Parameters
159
+ ----------
160
+ padded : ndarray
161
+ Empty-padded array.
162
+ axis : int
163
+ Dimension in which the edges are considered.
164
+ width_pair : (int, int)
165
+ Pair of widths that mark the pad area on both sides in the given
166
+ dimension.
167
+
168
+ Returns
169
+ -------
170
+ left_edge, right_edge : ndarray
171
+ Edge values of the valid area in `padded` in the given dimension. Its
172
+ shape will always match `padded` except for the dimension given by
173
+ `axis` which will have a length of 1.
174
+ """
175
+ left_index = width_pair[0]
176
+ left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
177
+ left_edge = padded[left_slice]
178
+
179
+ right_index = padded.shape[axis] - width_pair[1]
180
+ right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
181
+ right_edge = padded[right_slice]
182
+
183
+ return left_edge, right_edge
184
+
185
+
186
+ def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
187
+ """
188
+ Construct linear ramps for empty-padded array in given dimension.
189
+
190
+ Parameters
191
+ ----------
192
+ padded : ndarray
193
+ Empty-padded array.
194
+ axis : int
195
+ Dimension in which the ramps are constructed.
196
+ width_pair : (int, int)
197
+ Pair of widths that mark the pad area on both sides in the given
198
+ dimension.
199
+ end_value_pair : (scalar, scalar)
200
+ End values for the linear ramps which form the edge of the fully padded
201
+ array. These values are included in the linear ramps.
202
+
203
+ Returns
204
+ -------
205
+ left_ramp, right_ramp : ndarray
206
+ Linear ramps to set on both sides of `padded`.
207
+ """
208
+ edge_pair = _get_edges(padded, axis, width_pair)
209
+
210
+ left_ramp, right_ramp = (
211
+ np.linspace(
212
+ start=end_value,
213
+ stop=edge.squeeze(axis), # Dimension is replaced by linspace
214
+ num=width,
215
+ endpoint=False,
216
+ dtype=padded.dtype,
217
+ axis=axis
218
+ )
219
+ for end_value, edge, width in zip(
220
+ end_value_pair, edge_pair, width_pair
221
+ )
222
+ )
223
+
224
+ # Reverse linear space in appropriate dimension
225
+ right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
226
+
227
+ return left_ramp, right_ramp
228
+
229
+
230
+ def _get_stats(padded, axis, width_pair, length_pair, stat_func):
231
+ """
232
+ Calculate statistic for the empty-padded array in given dimension.
233
+
234
+ Parameters
235
+ ----------
236
+ padded : ndarray
237
+ Empty-padded array.
238
+ axis : int
239
+ Dimension in which the statistic is calculated.
240
+ width_pair : (int, int)
241
+ Pair of widths that mark the pad area on both sides in the given
242
+ dimension.
243
+ length_pair : 2-element sequence of None or int
244
+ Gives the number of values in valid area from each side that is
245
+ taken into account when calculating the statistic. If None the entire
246
+ valid area in `padded` is considered.
247
+ stat_func : function
248
+ Function to compute statistic. The expected signature is
249
+ ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
250
+
251
+ Returns
252
+ -------
253
+ left_stat, right_stat : ndarray
254
+ Calculated statistic for both sides of `padded`.
255
+ """
256
+ # Calculate indices of the edges of the area with original values
257
+ left_index = width_pair[0]
258
+ right_index = padded.shape[axis] - width_pair[1]
259
+ # as well as its length
260
+ max_length = right_index - left_index
261
+
262
+ # Limit stat_lengths to max_length
263
+ left_length, right_length = length_pair
264
+ if left_length is None or max_length < left_length:
265
+ left_length = max_length
266
+ if right_length is None or max_length < right_length:
267
+ right_length = max_length
268
+
269
+ if (left_length == 0 or right_length == 0) \
270
+ and stat_func in {np.amax, np.amin}:
271
+ # amax and amin can't operate on an empty array,
272
+ # raise a more descriptive warning here instead of the default one
273
+ raise ValueError("stat_length of 0 yields no value for padding")
274
+
275
+ # Calculate statistic for the left side
276
+ left_slice = _slice_at_axis(
277
+ slice(left_index, left_index + left_length), axis)
278
+ left_chunk = padded[left_slice]
279
+ left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
280
+ _round_if_needed(left_stat, padded.dtype)
281
+
282
+ if left_length == right_length == max_length:
283
+ # return early as right_stat must be identical to left_stat
284
+ return left_stat, left_stat
285
+
286
+ # Calculate statistic for the right side
287
+ right_slice = _slice_at_axis(
288
+ slice(right_index - right_length, right_index), axis)
289
+ right_chunk = padded[right_slice]
290
+ right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
291
+ _round_if_needed(right_stat, padded.dtype)
292
+
293
+ return left_stat, right_stat
294
+
295
+
296
+ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
297
+ """
298
+ Pad `axis` of `arr` with reflection.
299
+
300
+ Parameters
301
+ ----------
302
+ padded : ndarray
303
+ Input array of arbitrary shape.
304
+ axis : int
305
+ Axis along which to pad `arr`.
306
+ width_pair : (int, int)
307
+ Pair of widths that mark the pad area on both sides in the given
308
+ dimension.
309
+ method : str
310
+ Controls method of reflection; options are 'even' or 'odd'.
311
+ include_edge : bool
312
+ If true, edge value is included in reflection, otherwise the edge
313
+ value forms the symmetric axis to the reflection.
314
+
315
+ Returns
316
+ -------
317
+ pad_amt : tuple of ints, length 2
318
+ New index positions of padding to do along the `axis`. If these are
319
+ both 0, padding is done in this dimension.
320
+ """
321
+ left_pad, right_pad = width_pair
322
+ old_length = padded.shape[axis] - right_pad - left_pad
323
+
324
+ if include_edge:
325
+ # Edge is included, we need to offset the pad amount by 1
326
+ edge_offset = 1
327
+ else:
328
+ edge_offset = 0 # Edge is not included, no need to offset pad amount
329
+ old_length -= 1 # but must be omitted from the chunk
330
+
331
+ if left_pad > 0:
332
+ # Pad with reflected values on left side:
333
+ # First limit chunk size which can't be larger than pad area
334
+ chunk_length = min(old_length, left_pad)
335
+ # Slice right to left, stop on or next to edge, start relative to stop
336
+ stop = left_pad - edge_offset
337
+ start = stop + chunk_length
338
+ left_slice = _slice_at_axis(slice(start, stop, -1), axis)
339
+ left_chunk = padded[left_slice]
340
+
341
+ if method == "odd":
342
+ # Negate chunk and align with edge
343
+ edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
344
+ left_chunk = 2 * padded[edge_slice] - left_chunk
345
+
346
+ # Insert chunk into padded area
347
+ start = left_pad - chunk_length
348
+ stop = left_pad
349
+ pad_area = _slice_at_axis(slice(start, stop), axis)
350
+ padded[pad_area] = left_chunk
351
+ # Adjust pointer to left edge for next iteration
352
+ left_pad -= chunk_length
353
+
354
+ if right_pad > 0:
355
+ # Pad with reflected values on right side:
356
+ # First limit chunk size which can't be larger than pad area
357
+ chunk_length = min(old_length, right_pad)
358
+ # Slice right to left, start on or next to edge, stop relative to start
359
+ start = -right_pad + edge_offset - 2
360
+ stop = start - chunk_length
361
+ right_slice = _slice_at_axis(slice(start, stop, -1), axis)
362
+ right_chunk = padded[right_slice]
363
+
364
+ if method == "odd":
365
+ # Negate chunk and align with edge
366
+ edge_slice = _slice_at_axis(
367
+ slice(-right_pad - 1, -right_pad), axis)
368
+ right_chunk = 2 * padded[edge_slice] - right_chunk
369
+
370
+ # Insert chunk into padded area
371
+ start = padded.shape[axis] - right_pad
372
+ stop = start + chunk_length
373
+ pad_area = _slice_at_axis(slice(start, stop), axis)
374
+ padded[pad_area] = right_chunk
375
+ # Adjust pointer to right edge for next iteration
376
+ right_pad -= chunk_length
377
+
378
+ return left_pad, right_pad
379
+
380
+
381
+ def _set_wrap_both(padded, axis, width_pair, original_period):
382
+ """
383
+ Pad `axis` of `arr` with wrapped values.
384
+
385
+ Parameters
386
+ ----------
387
+ padded : ndarray
388
+ Input array of arbitrary shape.
389
+ axis : int
390
+ Axis along which to pad `arr`.
391
+ width_pair : (int, int)
392
+ Pair of widths that mark the pad area on both sides in the given
393
+ dimension.
394
+ original_period : int
395
+ Original length of data on `axis` of `arr`.
396
+
397
+ Returns
398
+ -------
399
+ pad_amt : tuple of ints, length 2
400
+ New index positions of padding to do along the `axis`. If these are
401
+ both 0, padding is done in this dimension.
402
+ """
403
+ left_pad, right_pad = width_pair
404
+ period = padded.shape[axis] - right_pad - left_pad
405
+ # Avoid wrapping with only a subset of the original area by ensuring period
406
+ # can only be a multiple of the original area's length.
407
+ period = period // original_period * original_period
408
+
409
+ # If the current dimension of `arr` doesn't contain enough valid values
410
+ # (not part of the undefined pad area) we need to pad multiple times.
411
+ # Each time the pad area shrinks on both sides which is communicated with
412
+ # these variables.
413
+ new_left_pad = 0
414
+ new_right_pad = 0
415
+
416
+ if left_pad > 0:
417
+ # Pad with wrapped values on left side
418
+ # First slice chunk from left side of the non-pad area.
419
+ # Use min(period, left_pad) to ensure that chunk is not larger than
420
+ # pad area.
421
+ slice_end = left_pad + period
422
+ slice_start = slice_end - min(period, left_pad)
423
+ right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
424
+ right_chunk = padded[right_slice]
425
+
426
+ if left_pad > period:
427
+ # Chunk is smaller than pad area
428
+ pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
429
+ new_left_pad = left_pad - period
430
+ else:
431
+ # Chunk matches pad area
432
+ pad_area = _slice_at_axis(slice(None, left_pad), axis)
433
+ padded[pad_area] = right_chunk
434
+
435
+ if right_pad > 0:
436
+ # Pad with wrapped values on right side
437
+ # First slice chunk from right side of the non-pad area.
438
+ # Use min(period, right_pad) to ensure that chunk is not larger than
439
+ # pad area.
440
+ slice_start = -right_pad - period
441
+ slice_end = slice_start + min(period, right_pad)
442
+ left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
443
+ left_chunk = padded[left_slice]
444
+
445
+ if right_pad > period:
446
+ # Chunk is smaller than pad area
447
+ pad_area = _slice_at_axis(
448
+ slice(-right_pad, -right_pad + period), axis)
449
+ new_right_pad = right_pad - period
450
+ else:
451
+ # Chunk matches pad area
452
+ pad_area = _slice_at_axis(slice(-right_pad, None), axis)
453
+ padded[pad_area] = left_chunk
454
+
455
+ return new_left_pad, new_right_pad
456
+
457
+
458
+ def _as_pairs(x, ndim, as_index=False):
459
+ """
460
+ Broadcast `x` to an array with the shape (`ndim`, 2).
461
+
462
+ A helper function for `pad` that prepares and validates arguments like
463
+ `pad_width` for iteration in pairs.
464
+
465
+ Parameters
466
+ ----------
467
+ x : {None, scalar, array-like}
468
+ The object to broadcast to the shape (`ndim`, 2).
469
+ ndim : int
470
+ Number of pairs the broadcasted `x` will have.
471
+ as_index : bool, optional
472
+ If `x` is not None, try to round each element of `x` to an integer
473
+ (dtype `np.intp`) and ensure every element is positive.
474
+
475
+ Returns
476
+ -------
477
+ pairs : nested iterables, shape (`ndim`, 2)
478
+ The broadcasted version of `x`.
479
+
480
+ Raises
481
+ ------
482
+ ValueError
483
+ If `as_index` is True and `x` contains negative elements.
484
+ Or if `x` is not broadcastable to the shape (`ndim`, 2).
485
+ """
486
+ if x is None:
487
+ # Pass through None as a special case, otherwise np.round(x) fails
488
+ # with an AttributeError
489
+ return ((None, None),) * ndim
490
+
491
+ x = np.array(x)
492
+ if as_index:
493
+ x = np.round(x).astype(np.intp, copy=False)
494
+
495
+ if x.ndim < 3:
496
+ # Optimization: Possibly use faster paths for cases where `x` has
497
+ # only 1 or 2 elements. `np.broadcast_to` could handle these as well
498
+ # but is currently slower
499
+
500
+ if x.size == 1:
501
+ # x was supplied as a single value
502
+ x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
503
+ if as_index and x < 0:
504
+ raise ValueError("index can't contain negative values")
505
+ return ((x[0], x[0]),) * ndim
506
+
507
+ if x.size == 2 and x.shape != (2, 1):
508
+ # x was supplied with a single value for each side
509
+ # but except case when each dimension has a single value
510
+ # which should be broadcasted to a pair,
511
+ # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
512
+ x = x.ravel() # Ensure x[0], x[1] works
513
+ if as_index and (x[0] < 0 or x[1] < 0):
514
+ raise ValueError("index can't contain negative values")
515
+ return ((x[0], x[1]),) * ndim
516
+
517
+ if as_index and x.min() < 0:
518
+ raise ValueError("index can't contain negative values")
519
+
520
+ # Converting the array with `tolist` seems to improve performance
521
+ # when iterating and indexing the result (see usage in `pad`)
522
+ return np.broadcast_to(x, (ndim, 2)).tolist()
523
+
524
+
525
+ def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
526
+ return (array,)
527
+
528
+
529
+ ###############################################################################
530
+ # Public functions
531
+
532
+
533
+ @array_function_dispatch(_pad_dispatcher, module='numpy')
534
+ def pad(array, pad_width, mode='constant', **kwargs):
535
+ """
536
+ Pad an array.
537
+
538
+ Parameters
539
+ ----------
540
+ array : array_like of rank N
541
+ The array to pad.
542
+ pad_width : {sequence, array_like, int}
543
+ Number of values padded to the edges of each axis.
544
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
545
+ for each axis.
546
+ ``(before, after)`` or ``((before, after),)`` yields same before
547
+ and after pad for each axis.
548
+ ``(pad,)`` or ``int`` is a shortcut for before = after = pad width
549
+ for all axes.
550
+ mode : str or function, optional
551
+ One of the following string values or a user supplied function.
552
+
553
+ 'constant' (default)
554
+ Pads with a constant value.
555
+ 'edge'
556
+ Pads with the edge values of array.
557
+ 'linear_ramp'
558
+ Pads with the linear ramp between end_value and the
559
+ array edge value.
560
+ 'maximum'
561
+ Pads with the maximum value of all or part of the
562
+ vector along each axis.
563
+ 'mean'
564
+ Pads with the mean value of all or part of the
565
+ vector along each axis.
566
+ 'median'
567
+ Pads with the median value of all or part of the
568
+ vector along each axis.
569
+ 'minimum'
570
+ Pads with the minimum value of all or part of the
571
+ vector along each axis.
572
+ 'reflect'
573
+ Pads with the reflection of the vector mirrored on
574
+ the first and last values of the vector along each
575
+ axis.
576
+ 'symmetric'
577
+ Pads with the reflection of the vector mirrored
578
+ along the edge of the array.
579
+ 'wrap'
580
+ Pads with the wrap of the vector along the axis.
581
+ The first values are used to pad the end and the
582
+ end values are used to pad the beginning.
583
+ 'empty'
584
+ Pads with undefined values.
585
+
586
+ .. versionadded:: 1.17
587
+
588
+ <function>
589
+ Padding function, see Notes.
590
+ stat_length : sequence or int, optional
591
+ Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
592
+ values at edge of each axis used to calculate the statistic value.
593
+
594
+ ``((before_1, after_1), ... (before_N, after_N))`` unique statistic
595
+ lengths for each axis.
596
+
597
+ ``(before, after)`` or ``((before, after),)`` yields same before
598
+ and after statistic lengths for each axis.
599
+
600
+ ``(stat_length,)`` or ``int`` is a shortcut for
601
+ ``before = after = statistic`` length for all axes.
602
+
603
+ Default is ``None``, to use the entire axis.
604
+ constant_values : sequence or scalar, optional
605
+ Used in 'constant'. The values to set the padded values for each
606
+ axis.
607
+
608
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
609
+ for each axis.
610
+
611
+ ``(before, after)`` or ``((before, after),)`` yields same before
612
+ and after constants for each axis.
613
+
614
+ ``(constant,)`` or ``constant`` is a shortcut for
615
+ ``before = after = constant`` for all axes.
616
+
617
+ Default is 0.
618
+ end_values : sequence or scalar, optional
619
+ Used in 'linear_ramp'. The values used for the ending value of the
620
+ linear_ramp and that will form the edge of the padded array.
621
+
622
+ ``((before_1, after_1), ... (before_N, after_N))`` unique end values
623
+ for each axis.
624
+
625
+ ``(before, after)`` or ``((before, after),)`` yields same before
626
+ and after end values for each axis.
627
+
628
+ ``(constant,)`` or ``constant`` is a shortcut for
629
+ ``before = after = constant`` for all axes.
630
+
631
+ Default is 0.
632
+ reflect_type : {'even', 'odd'}, optional
633
+ Used in 'reflect', and 'symmetric'. The 'even' style is the
634
+ default with an unaltered reflection around the edge value. For
635
+ the 'odd' style, the extended part of the array is created by
636
+ subtracting the reflected values from two times the edge value.
637
+
638
+ Returns
639
+ -------
640
+ pad : ndarray
641
+ Padded array of rank equal to `array` with shape increased
642
+ according to `pad_width`.
643
+
644
+ Notes
645
+ -----
646
+ .. versionadded:: 1.7.0
647
+
648
+ For an array with rank greater than 1, some of the padding of later
649
+ axes is calculated from padding of previous axes. This is easiest to
650
+ think about with a rank 2 array where the corners of the padded array
651
+ are calculated by using padded values from the first axis.
652
+
653
+ The padding function, if used, should modify a rank 1 array in-place. It
654
+ has the following signature::
655
+
656
+ padding_func(vector, iaxis_pad_width, iaxis, kwargs)
657
+
658
+ where
659
+
660
+ vector : ndarray
661
+ A rank 1 array already padded with zeros. Padded values are
662
+ vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
663
+ iaxis_pad_width : tuple
664
+ A 2-tuple of ints, iaxis_pad_width[0] represents the number of
665
+ values padded at the beginning of vector where
666
+ iaxis_pad_width[1] represents the number of values padded at
667
+ the end of vector.
668
+ iaxis : int
669
+ The axis currently being calculated.
670
+ kwargs : dict
671
+ Any keyword arguments the function requires.
672
+
673
+ Examples
674
+ --------
675
+ >>> a = [1, 2, 3, 4, 5]
676
+ >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
677
+ array([4, 4, 1, ..., 6, 6, 6])
678
+
679
+ >>> np.pad(a, (2, 3), 'edge')
680
+ array([1, 1, 1, ..., 5, 5, 5])
681
+
682
+ >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
683
+ array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
684
+
685
+ >>> np.pad(a, (2,), 'maximum')
686
+ array([5, 5, 1, 2, 3, 4, 5, 5, 5])
687
+
688
+ >>> np.pad(a, (2,), 'mean')
689
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
690
+
691
+ >>> np.pad(a, (2,), 'median')
692
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
693
+
694
+ >>> a = [[1, 2], [3, 4]]
695
+ >>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
696
+ array([[1, 1, 1, 2, 1, 1, 1],
697
+ [1, 1, 1, 2, 1, 1, 1],
698
+ [1, 1, 1, 2, 1, 1, 1],
699
+ [1, 1, 1, 2, 1, 1, 1],
700
+ [3, 3, 3, 4, 3, 3, 3],
701
+ [1, 1, 1, 2, 1, 1, 1],
702
+ [1, 1, 1, 2, 1, 1, 1]])
703
+
704
+ >>> a = [1, 2, 3, 4, 5]
705
+ >>> np.pad(a, (2, 3), 'reflect')
706
+ array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
707
+
708
+ >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')
709
+ array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
710
+
711
+ >>> np.pad(a, (2, 3), 'symmetric')
712
+ array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
713
+
714
+ >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')
715
+ array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
716
+
717
+ >>> np.pad(a, (2, 3), 'wrap')
718
+ array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
719
+
720
+ >>> def pad_with(vector, pad_width, iaxis, kwargs):
721
+ ... pad_value = kwargs.get('padder', 10)
722
+ ... vector[:pad_width[0]] = pad_value
723
+ ... vector[-pad_width[1]:] = pad_value
724
+ >>> a = np.arange(6)
725
+ >>> a = a.reshape((2, 3))
726
+ >>> np.pad(a, 2, pad_with)
727
+ array([[10, 10, 10, 10, 10, 10, 10],
728
+ [10, 10, 10, 10, 10, 10, 10],
729
+ [10, 10, 0, 1, 2, 10, 10],
730
+ [10, 10, 3, 4, 5, 10, 10],
731
+ [10, 10, 10, 10, 10, 10, 10],
732
+ [10, 10, 10, 10, 10, 10, 10]])
733
+ >>> np.pad(a, 2, pad_with, padder=100)
734
+ array([[100, 100, 100, 100, 100, 100, 100],
735
+ [100, 100, 100, 100, 100, 100, 100],
736
+ [100, 100, 0, 1, 2, 100, 100],
737
+ [100, 100, 3, 4, 5, 100, 100],
738
+ [100, 100, 100, 100, 100, 100, 100],
739
+ [100, 100, 100, 100, 100, 100, 100]])
740
+ """
741
+ array = np.asarray(array)
742
+ pad_width = np.asarray(pad_width)
743
+
744
+ if not pad_width.dtype.kind == 'i':
745
+ raise TypeError('`pad_width` must be of integral type.')
746
+
747
+ # Broadcast to shape (array.ndim, 2)
748
+ pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
749
+
750
+ if callable(mode):
751
+ # Old behavior: Use user-supplied function with np.apply_along_axis
752
+ function = mode
753
+ # Create a new zero padded array
754
+ padded, _ = _pad_simple(array, pad_width, fill_value=0)
755
+ # And apply along each axis
756
+
757
+ for axis in range(padded.ndim):
758
+ # Iterate using ndindex as in apply_along_axis, but assuming that
759
+ # function operates inplace on the padded array.
760
+
761
+ # view with the iteration axis at the end
762
+ view = np.moveaxis(padded, axis, -1)
763
+
764
+ # compute indices for the iteration axes, and append a trailing
765
+ # ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
766
+ inds = ndindex(view.shape[:-1])
767
+ inds = (ind + (Ellipsis,) for ind in inds)
768
+ for ind in inds:
769
+ function(view[ind], pad_width[axis], axis, kwargs)
770
+
771
+ return padded
772
+
773
+ # Make sure that no unsupported keywords were passed for the current mode
774
+ allowed_kwargs = {
775
+ 'empty': [], 'edge': [], 'wrap': [],
776
+ 'constant': ['constant_values'],
777
+ 'linear_ramp': ['end_values'],
778
+ 'maximum': ['stat_length'],
779
+ 'mean': ['stat_length'],
780
+ 'median': ['stat_length'],
781
+ 'minimum': ['stat_length'],
782
+ 'reflect': ['reflect_type'],
783
+ 'symmetric': ['reflect_type'],
784
+ }
785
+ try:
786
+ unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
787
+ except KeyError:
788
+ raise ValueError("mode '{}' is not supported".format(mode)) from None
789
+ if unsupported_kwargs:
790
+ raise ValueError("unsupported keyword arguments for mode '{}': {}"
791
+ .format(mode, unsupported_kwargs))
792
+
793
+ stat_functions = {"maximum": np.amax, "minimum": np.amin,
794
+ "mean": np.mean, "median": np.median}
795
+
796
+ # Create array with final shape and original values
797
+ # (padded area is undefined)
798
+ padded, original_area_slice = _pad_simple(array, pad_width)
799
+ # And prepare iteration over all dimensions
800
+ # (zipping may be more readable than using enumerate)
801
+ axes = range(padded.ndim)
802
+
803
+ if mode == "constant":
804
+ values = kwargs.get("constant_values", 0)
805
+ values = _as_pairs(values, padded.ndim)
806
+ for axis, width_pair, value_pair in zip(axes, pad_width, values):
807
+ roi = _view_roi(padded, original_area_slice, axis)
808
+ _set_pad_area(roi, axis, width_pair, value_pair)
809
+
810
+ elif mode == "empty":
811
+ pass # Do nothing as _pad_simple already returned the correct result
812
+
813
+ elif array.size == 0:
814
+ # Only modes "constant" and "empty" can extend empty axes, all other
815
+ # modes depend on `array` not being empty
816
+ # -> ensure every empty axis is only "padded with 0"
817
+ for axis, width_pair in zip(axes, pad_width):
818
+ if array.shape[axis] == 0 and any(width_pair):
819
+ raise ValueError(
820
+ "can't extend empty axis {} using modes other than "
821
+ "'constant' or 'empty'".format(axis)
822
+ )
823
+ # passed, don't need to do anything more as _pad_simple already
824
+ # returned the correct result
825
+
826
+ elif mode == "edge":
827
+ for axis, width_pair in zip(axes, pad_width):
828
+ roi = _view_roi(padded, original_area_slice, axis)
829
+ edge_pair = _get_edges(roi, axis, width_pair)
830
+ _set_pad_area(roi, axis, width_pair, edge_pair)
831
+
832
+ elif mode == "linear_ramp":
833
+ end_values = kwargs.get("end_values", 0)
834
+ end_values = _as_pairs(end_values, padded.ndim)
835
+ for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
836
+ roi = _view_roi(padded, original_area_slice, axis)
837
+ ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
838
+ _set_pad_area(roi, axis, width_pair, ramp_pair)
839
+
840
+ elif mode in stat_functions:
841
+ func = stat_functions[mode]
842
+ length = kwargs.get("stat_length", None)
843
+ length = _as_pairs(length, padded.ndim, as_index=True)
844
+ for axis, width_pair, length_pair in zip(axes, pad_width, length):
845
+ roi = _view_roi(padded, original_area_slice, axis)
846
+ stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
847
+ _set_pad_area(roi, axis, width_pair, stat_pair)
848
+
849
+ elif mode in {"reflect", "symmetric"}:
850
+ method = kwargs.get("reflect_type", "even")
851
+ include_edge = True if mode == "symmetric" else False
852
+ for axis, (left_index, right_index) in zip(axes, pad_width):
853
+ if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
854
+ # Extending singleton dimension for 'reflect' is legacy
855
+ # behavior; it really should raise an error.
856
+ edge_pair = _get_edges(padded, axis, (left_index, right_index))
857
+ _set_pad_area(
858
+ padded, axis, (left_index, right_index), edge_pair)
859
+ continue
860
+
861
+ roi = _view_roi(padded, original_area_slice, axis)
862
+ while left_index > 0 or right_index > 0:
863
+ # Iteratively pad until dimension is filled with reflected
864
+ # values. This is necessary if the pad area is larger than
865
+ # the length of the original values in the current dimension.
866
+ left_index, right_index = _set_reflect_both(
867
+ roi, axis, (left_index, right_index),
868
+ method, include_edge
869
+ )
870
+
871
+ elif mode == "wrap":
872
+ for axis, (left_index, right_index) in zip(axes, pad_width):
873
+ roi = _view_roi(padded, original_area_slice, axis)
874
+ original_period = padded.shape[axis] - right_index - left_index
875
+ while left_index > 0 or right_index > 0:
876
+ # Iteratively pad until dimension is filled with wrapped
877
+ # values. This is necessary if the pad area is larger than
878
+ # the length of the original values in the current dimension.
879
+ left_index, right_index = _set_wrap_both(
880
+ roi, axis, (left_index, right_index), original_period)
881
+
882
+ return padded
.venv/lib/python3.11/site-packages/numpy/lib/arraysetops.pyi ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ Any,
4
+ TypeVar,
5
+ overload,
6
+ SupportsIndex,
7
+ )
8
+
9
+ from numpy import (
10
+ generic,
11
+ number,
12
+ bool_,
13
+ ushort,
14
+ ubyte,
15
+ uintc,
16
+ uint,
17
+ ulonglong,
18
+ short,
19
+ int8,
20
+ byte,
21
+ intc,
22
+ int_,
23
+ intp,
24
+ longlong,
25
+ half,
26
+ single,
27
+ double,
28
+ longdouble,
29
+ csingle,
30
+ cdouble,
31
+ clongdouble,
32
+ timedelta64,
33
+ datetime64,
34
+ object_,
35
+ str_,
36
+ bytes_,
37
+ void,
38
+ )
39
+
40
+ from numpy._typing import (
41
+ ArrayLike,
42
+ NDArray,
43
+ _ArrayLike,
44
+ _ArrayLikeBool_co,
45
+ _ArrayLikeDT64_co,
46
+ _ArrayLikeTD64_co,
47
+ _ArrayLikeObject_co,
48
+ _ArrayLikeNumber_co,
49
+ )
50
+
51
+ _SCT = TypeVar("_SCT", bound=generic)
52
+ _NumberType = TypeVar("_NumberType", bound=number[Any])
53
+
54
+ # Explicitly set all allowed values to prevent accidental castings to
55
+ # abstract dtypes (their common super-type).
56
+ #
57
+ # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)
58
+ # which could result in, for example, `int64` and `float64`producing a
59
+ # `number[_64Bit]` array
60
+ _SCTNoCast = TypeVar(
61
+ "_SCTNoCast",
62
+ bool_,
63
+ ushort,
64
+ ubyte,
65
+ uintc,
66
+ uint,
67
+ ulonglong,
68
+ short,
69
+ byte,
70
+ intc,
71
+ int_,
72
+ longlong,
73
+ half,
74
+ single,
75
+ double,
76
+ longdouble,
77
+ csingle,
78
+ cdouble,
79
+ clongdouble,
80
+ timedelta64,
81
+ datetime64,
82
+ object_,
83
+ str_,
84
+ bytes_,
85
+ void,
86
+ )
87
+
88
+ __all__: list[str]
89
+
90
+ @overload
91
+ def ediff1d(
92
+ ary: _ArrayLikeBool_co,
93
+ to_end: None | ArrayLike = ...,
94
+ to_begin: None | ArrayLike = ...,
95
+ ) -> NDArray[int8]: ...
96
+ @overload
97
+ def ediff1d(
98
+ ary: _ArrayLike[_NumberType],
99
+ to_end: None | ArrayLike = ...,
100
+ to_begin: None | ArrayLike = ...,
101
+ ) -> NDArray[_NumberType]: ...
102
+ @overload
103
+ def ediff1d(
104
+ ary: _ArrayLikeNumber_co,
105
+ to_end: None | ArrayLike = ...,
106
+ to_begin: None | ArrayLike = ...,
107
+ ) -> NDArray[Any]: ...
108
+ @overload
109
+ def ediff1d(
110
+ ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co,
111
+ to_end: None | ArrayLike = ...,
112
+ to_begin: None | ArrayLike = ...,
113
+ ) -> NDArray[timedelta64]: ...
114
+ @overload
115
+ def ediff1d(
116
+ ary: _ArrayLikeObject_co,
117
+ to_end: None | ArrayLike = ...,
118
+ to_begin: None | ArrayLike = ...,
119
+ ) -> NDArray[object_]: ...
120
+
121
+ @overload
122
+ def unique(
123
+ ar: _ArrayLike[_SCT],
124
+ return_index: L[False] = ...,
125
+ return_inverse: L[False] = ...,
126
+ return_counts: L[False] = ...,
127
+ axis: None | SupportsIndex = ...,
128
+ *,
129
+ equal_nan: bool = ...,
130
+ ) -> NDArray[_SCT]: ...
131
+ @overload
132
+ def unique(
133
+ ar: ArrayLike,
134
+ return_index: L[False] = ...,
135
+ return_inverse: L[False] = ...,
136
+ return_counts: L[False] = ...,
137
+ axis: None | SupportsIndex = ...,
138
+ *,
139
+ equal_nan: bool = ...,
140
+ ) -> NDArray[Any]: ...
141
+ @overload
142
+ def unique(
143
+ ar: _ArrayLike[_SCT],
144
+ return_index: L[True] = ...,
145
+ return_inverse: L[False] = ...,
146
+ return_counts: L[False] = ...,
147
+ axis: None | SupportsIndex = ...,
148
+ *,
149
+ equal_nan: bool = ...,
150
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
151
+ @overload
152
+ def unique(
153
+ ar: ArrayLike,
154
+ return_index: L[True] = ...,
155
+ return_inverse: L[False] = ...,
156
+ return_counts: L[False] = ...,
157
+ axis: None | SupportsIndex = ...,
158
+ *,
159
+ equal_nan: bool = ...,
160
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
161
+ @overload
162
+ def unique(
163
+ ar: _ArrayLike[_SCT],
164
+ return_index: L[False] = ...,
165
+ return_inverse: L[True] = ...,
166
+ return_counts: L[False] = ...,
167
+ axis: None | SupportsIndex = ...,
168
+ *,
169
+ equal_nan: bool = ...,
170
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
171
+ @overload
172
+ def unique(
173
+ ar: ArrayLike,
174
+ return_index: L[False] = ...,
175
+ return_inverse: L[True] = ...,
176
+ return_counts: L[False] = ...,
177
+ axis: None | SupportsIndex = ...,
178
+ *,
179
+ equal_nan: bool = ...,
180
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
181
+ @overload
182
+ def unique(
183
+ ar: _ArrayLike[_SCT],
184
+ return_index: L[False] = ...,
185
+ return_inverse: L[False] = ...,
186
+ return_counts: L[True] = ...,
187
+ axis: None | SupportsIndex = ...,
188
+ *,
189
+ equal_nan: bool = ...,
190
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
191
+ @overload
192
+ def unique(
193
+ ar: ArrayLike,
194
+ return_index: L[False] = ...,
195
+ return_inverse: L[False] = ...,
196
+ return_counts: L[True] = ...,
197
+ axis: None | SupportsIndex = ...,
198
+ *,
199
+ equal_nan: bool = ...,
200
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
201
+ @overload
202
+ def unique(
203
+ ar: _ArrayLike[_SCT],
204
+ return_index: L[True] = ...,
205
+ return_inverse: L[True] = ...,
206
+ return_counts: L[False] = ...,
207
+ axis: None | SupportsIndex = ...,
208
+ *,
209
+ equal_nan: bool = ...,
210
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
211
+ @overload
212
+ def unique(
213
+ ar: ArrayLike,
214
+ return_index: L[True] = ...,
215
+ return_inverse: L[True] = ...,
216
+ return_counts: L[False] = ...,
217
+ axis: None | SupportsIndex = ...,
218
+ *,
219
+ equal_nan: bool = ...,
220
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
221
+ @overload
222
+ def unique(
223
+ ar: _ArrayLike[_SCT],
224
+ return_index: L[True] = ...,
225
+ return_inverse: L[False] = ...,
226
+ return_counts: L[True] = ...,
227
+ axis: None | SupportsIndex = ...,
228
+ *,
229
+ equal_nan: bool = ...,
230
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
231
+ @overload
232
+ def unique(
233
+ ar: ArrayLike,
234
+ return_index: L[True] = ...,
235
+ return_inverse: L[False] = ...,
236
+ return_counts: L[True] = ...,
237
+ axis: None | SupportsIndex = ...,
238
+ *,
239
+ equal_nan: bool = ...,
240
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
241
+ @overload
242
+ def unique(
243
+ ar: _ArrayLike[_SCT],
244
+ return_index: L[False] = ...,
245
+ return_inverse: L[True] = ...,
246
+ return_counts: L[True] = ...,
247
+ axis: None | SupportsIndex = ...,
248
+ *,
249
+ equal_nan: bool = ...,
250
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
251
+ @overload
252
+ def unique(
253
+ ar: ArrayLike,
254
+ return_index: L[False] = ...,
255
+ return_inverse: L[True] = ...,
256
+ return_counts: L[True] = ...,
257
+ axis: None | SupportsIndex = ...,
258
+ *,
259
+ equal_nan: bool = ...,
260
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
261
+ @overload
262
+ def unique(
263
+ ar: _ArrayLike[_SCT],
264
+ return_index: L[True] = ...,
265
+ return_inverse: L[True] = ...,
266
+ return_counts: L[True] = ...,
267
+ axis: None | SupportsIndex = ...,
268
+ *,
269
+ equal_nan: bool = ...,
270
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
271
+ @overload
272
+ def unique(
273
+ ar: ArrayLike,
274
+ return_index: L[True] = ...,
275
+ return_inverse: L[True] = ...,
276
+ return_counts: L[True] = ...,
277
+ axis: None | SupportsIndex = ...,
278
+ *,
279
+ equal_nan: bool = ...,
280
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
281
+
282
+ @overload
283
+ def intersect1d(
284
+ ar1: _ArrayLike[_SCTNoCast],
285
+ ar2: _ArrayLike[_SCTNoCast],
286
+ assume_unique: bool = ...,
287
+ return_indices: L[False] = ...,
288
+ ) -> NDArray[_SCTNoCast]: ...
289
+ @overload
290
+ def intersect1d(
291
+ ar1: ArrayLike,
292
+ ar2: ArrayLike,
293
+ assume_unique: bool = ...,
294
+ return_indices: L[False] = ...,
295
+ ) -> NDArray[Any]: ...
296
+ @overload
297
+ def intersect1d(
298
+ ar1: _ArrayLike[_SCTNoCast],
299
+ ar2: _ArrayLike[_SCTNoCast],
300
+ assume_unique: bool = ...,
301
+ return_indices: L[True] = ...,
302
+ ) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ...
303
+ @overload
304
+ def intersect1d(
305
+ ar1: ArrayLike,
306
+ ar2: ArrayLike,
307
+ assume_unique: bool = ...,
308
+ return_indices: L[True] = ...,
309
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
310
+
311
+ @overload
312
+ def setxor1d(
313
+ ar1: _ArrayLike[_SCTNoCast],
314
+ ar2: _ArrayLike[_SCTNoCast],
315
+ assume_unique: bool = ...,
316
+ ) -> NDArray[_SCTNoCast]: ...
317
+ @overload
318
+ def setxor1d(
319
+ ar1: ArrayLike,
320
+ ar2: ArrayLike,
321
+ assume_unique: bool = ...,
322
+ ) -> NDArray[Any]: ...
323
+
324
+ def in1d(
325
+ ar1: ArrayLike,
326
+ ar2: ArrayLike,
327
+ assume_unique: bool = ...,
328
+ invert: bool = ...,
329
+ ) -> NDArray[bool_]: ...
330
+
331
+ def isin(
332
+ element: ArrayLike,
333
+ test_elements: ArrayLike,
334
+ assume_unique: bool = ...,
335
+ invert: bool = ...,
336
+ *,
337
+ kind: None | str = ...,
338
+ ) -> NDArray[bool_]: ...
339
+
340
+ @overload
341
+ def union1d(
342
+ ar1: _ArrayLike[_SCTNoCast],
343
+ ar2: _ArrayLike[_SCTNoCast],
344
+ ) -> NDArray[_SCTNoCast]: ...
345
+ @overload
346
+ def union1d(
347
+ ar1: ArrayLike,
348
+ ar2: ArrayLike,
349
+ ) -> NDArray[Any]: ...
350
+
351
+ @overload
352
+ def setdiff1d(
353
+ ar1: _ArrayLike[_SCTNoCast],
354
+ ar2: _ArrayLike[_SCTNoCast],
355
+ assume_unique: bool = ...,
356
+ ) -> NDArray[_SCTNoCast]: ...
357
+ @overload
358
+ def setdiff1d(
359
+ ar1: ArrayLike,
360
+ ar2: ArrayLike,
361
+ assume_unique: bool = ...,
362
+ ) -> NDArray[Any]: ...
.venv/lib/python3.11/site-packages/numpy/lib/function_base.py ADDED
The diff for this file is too large to render. See raw diff
 
.venv/lib/python3.11/site-packages/numpy/lib/nanfunctions.py ADDED
@@ -0,0 +1,1887 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions that ignore NaN.
3
+
4
+ Functions
5
+ ---------
6
+
7
+ - `nanmin` -- minimum non-NaN value
8
+ - `nanmax` -- maximum non-NaN value
9
+ - `nanargmin` -- index of minimum non-NaN value
10
+ - `nanargmax` -- index of maximum non-NaN value
11
+ - `nansum` -- sum of non-NaN values
12
+ - `nanprod` -- product of non-NaN values
13
+ - `nancumsum` -- cumulative sum of non-NaN values
14
+ - `nancumprod` -- cumulative product of non-NaN values
15
+ - `nanmean` -- mean of non-NaN values
16
+ - `nanvar` -- variance of non-NaN values
17
+ - `nanstd` -- standard deviation of non-NaN values
18
+ - `nanmedian` -- median of non-NaN values
19
+ - `nanquantile` -- qth quantile of non-NaN values
20
+ - `nanpercentile` -- qth percentile of non-NaN values
21
+
22
+ """
23
+ import functools
24
+ import warnings
25
+ import numpy as np
26
+ from numpy.lib import function_base
27
+ from numpy.core import overrides
28
+
29
+
30
+ array_function_dispatch = functools.partial(
31
+ overrides.array_function_dispatch, module='numpy')
32
+
33
+
34
+ __all__ = [
35
+ 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
36
+ 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
37
+ 'nancumsum', 'nancumprod', 'nanquantile'
38
+ ]
39
+
40
+
41
+ def _nan_mask(a, out=None):
42
+ """
43
+ Parameters
44
+ ----------
45
+ a : array-like
46
+ Input array with at least 1 dimension.
47
+ out : ndarray, optional
48
+ Alternate output array in which to place the result. The default
49
+ is ``None``; if provided, it must have the same shape as the
50
+ expected output and will prevent the allocation of a new array.
51
+
52
+ Returns
53
+ -------
54
+ y : bool ndarray or True
55
+ A bool array where ``np.nan`` positions are marked with ``False``
56
+ and other positions are marked with ``True``. If the type of ``a``
57
+ is such that it can't possibly contain ``np.nan``, returns ``True``.
58
+ """
59
+ # we assume that a is an array for this private function
60
+
61
+ if a.dtype.kind not in 'fc':
62
+ return True
63
+
64
+ y = np.isnan(a, out=out)
65
+ y = np.invert(y, out=y)
66
+ return y
67
+
68
+ def _replace_nan(a, val):
69
+ """
70
+ If `a` is of inexact type, make a copy of `a`, replace NaNs with
71
+ the `val` value, and return the copy together with a boolean mask
72
+ marking the locations where NaNs were present. If `a` is not of
73
+ inexact type, do nothing and return `a` together with a mask of None.
74
+
75
+ Note that scalars will end up as array scalars, which is important
76
+ for using the result as the value of the out argument in some
77
+ operations.
78
+
79
+ Parameters
80
+ ----------
81
+ a : array-like
82
+ Input array.
83
+ val : float
84
+ NaN values are set to val before doing the operation.
85
+
86
+ Returns
87
+ -------
88
+ y : ndarray
89
+ If `a` is of inexact type, return a copy of `a` with the NaNs
90
+ replaced by the fill value, otherwise return `a`.
91
+ mask: {bool, None}
92
+ If `a` is of inexact type, return a boolean mask marking locations of
93
+ NaNs, otherwise return None.
94
+
95
+ """
96
+ a = np.asanyarray(a)
97
+
98
+ if a.dtype == np.object_:
99
+ # object arrays do not support `isnan` (gh-9009), so make a guess
100
+ mask = np.not_equal(a, a, dtype=bool)
101
+ elif issubclass(a.dtype.type, np.inexact):
102
+ mask = np.isnan(a)
103
+ else:
104
+ mask = None
105
+
106
+ if mask is not None:
107
+ a = np.array(a, subok=True, copy=True)
108
+ np.copyto(a, val, where=mask)
109
+
110
+ return a, mask
111
+
112
+
113
+ def _copyto(a, val, mask):
114
+ """
115
+ Replace values in `a` with NaN where `mask` is True. This differs from
116
+ copyto in that it will deal with the case where `a` is a numpy scalar.
117
+
118
+ Parameters
119
+ ----------
120
+ a : ndarray or numpy scalar
121
+ Array or numpy scalar some of whose values are to be replaced
122
+ by val.
123
+ val : numpy scalar
124
+ Value used a replacement.
125
+ mask : ndarray, scalar
126
+ Boolean array. Where True the corresponding element of `a` is
127
+ replaced by `val`. Broadcasts.
128
+
129
+ Returns
130
+ -------
131
+ res : ndarray, scalar
132
+ Array with elements replaced or scalar `val`.
133
+
134
+ """
135
+ if isinstance(a, np.ndarray):
136
+ np.copyto(a, val, where=mask, casting='unsafe')
137
+ else:
138
+ a = a.dtype.type(val)
139
+ return a
140
+
141
+
142
+ def _remove_nan_1d(arr1d, overwrite_input=False):
143
+ """
144
+ Equivalent to arr1d[~arr1d.isnan()], but in a different order
145
+
146
+ Presumably faster as it incurs fewer copies
147
+
148
+ Parameters
149
+ ----------
150
+ arr1d : ndarray
151
+ Array to remove nans from
152
+ overwrite_input : bool
153
+ True if `arr1d` can be modified in place
154
+
155
+ Returns
156
+ -------
157
+ res : ndarray
158
+ Array with nan elements removed
159
+ overwrite_input : bool
160
+ True if `res` can be modified in place, given the constraint on the
161
+ input
162
+ """
163
+ if arr1d.dtype == object:
164
+ # object arrays do not support `isnan` (gh-9009), so make a guess
165
+ c = np.not_equal(arr1d, arr1d, dtype=bool)
166
+ else:
167
+ c = np.isnan(arr1d)
168
+
169
+ s = np.nonzero(c)[0]
170
+ if s.size == arr1d.size:
171
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
172
+ stacklevel=6)
173
+ return arr1d[:0], True
174
+ elif s.size == 0:
175
+ return arr1d, overwrite_input
176
+ else:
177
+ if not overwrite_input:
178
+ arr1d = arr1d.copy()
179
+ # select non-nans at end of array
180
+ enonan = arr1d[-s.size:][~c[-s.size:]]
181
+ # fill nans in beginning of array with non-nans of end
182
+ arr1d[s[:enonan.size]] = enonan
183
+
184
+ return arr1d[:-s.size], True
185
+
186
+
187
+ def _divide_by_count(a, b, out=None):
188
+ """
189
+ Compute a/b ignoring invalid results. If `a` is an array the division
190
+ is done in place. If `a` is a scalar, then its type is preserved in the
191
+ output. If out is None, then a is used instead so that the division
192
+ is in place. Note that this is only called with `a` an inexact type.
193
+
194
+ Parameters
195
+ ----------
196
+ a : {ndarray, numpy scalar}
197
+ Numerator. Expected to be of inexact type but not checked.
198
+ b : {ndarray, numpy scalar}
199
+ Denominator.
200
+ out : ndarray, optional
201
+ Alternate output array in which to place the result. The default
202
+ is ``None``; if provided, it must have the same shape as the
203
+ expected output, but the type will be cast if necessary.
204
+
205
+ Returns
206
+ -------
207
+ ret : {ndarray, numpy scalar}
208
+ The return value is a/b. If `a` was an ndarray the division is done
209
+ in place. If `a` is a numpy scalar, the division preserves its type.
210
+
211
+ """
212
+ with np.errstate(invalid='ignore', divide='ignore'):
213
+ if isinstance(a, np.ndarray):
214
+ if out is None:
215
+ return np.divide(a, b, out=a, casting='unsafe')
216
+ else:
217
+ return np.divide(a, b, out=out, casting='unsafe')
218
+ else:
219
+ if out is None:
220
+ # Precaution against reduced object arrays
221
+ try:
222
+ return a.dtype.type(a / b)
223
+ except AttributeError:
224
+ return a / b
225
+ else:
226
+ # This is questionable, but currently a numpy scalar can
227
+ # be output to a zero dimensional array.
228
+ return np.divide(a, b, out=out, casting='unsafe')
229
+
230
+
231
+ def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None,
232
+ initial=None, where=None):
233
+ return (a, out)
234
+
235
+
236
+ @array_function_dispatch(_nanmin_dispatcher)
237
+ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
238
+ where=np._NoValue):
239
+ """
240
+ Return minimum of an array or minimum along an axis, ignoring any NaNs.
241
+ When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
242
+ Nan is returned for that slice.
243
+
244
+ Parameters
245
+ ----------
246
+ a : array_like
247
+ Array containing numbers whose minimum is desired. If `a` is not an
248
+ array, a conversion is attempted.
249
+ axis : {int, tuple of int, None}, optional
250
+ Axis or axes along which the minimum is computed. The default is to compute
251
+ the minimum of the flattened array.
252
+ out : ndarray, optional
253
+ Alternate output array in which to place the result. The default
254
+ is ``None``; if provided, it must have the same shape as the
255
+ expected output, but the type will be cast if necessary. See
256
+ :ref:`ufuncs-output-type` for more details.
257
+
258
+ .. versionadded:: 1.8.0
259
+ keepdims : bool, optional
260
+ If this is set to True, the axes which are reduced are left
261
+ in the result as dimensions with size one. With this option,
262
+ the result will broadcast correctly against the original `a`.
263
+
264
+ If the value is anything but the default, then
265
+ `keepdims` will be passed through to the `min` method
266
+ of sub-classes of `ndarray`. If the sub-classes methods
267
+ does not implement `keepdims` any exceptions will be raised.
268
+
269
+ .. versionadded:: 1.8.0
270
+ initial : scalar, optional
271
+ The maximum value of an output element. Must be present to allow
272
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
273
+
274
+ .. versionadded:: 1.22.0
275
+ where : array_like of bool, optional
276
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
277
+ for details.
278
+
279
+ .. versionadded:: 1.22.0
280
+
281
+ Returns
282
+ -------
283
+ nanmin : ndarray
284
+ An array with the same shape as `a`, with the specified axis
285
+ removed. If `a` is a 0-d array, or if axis is None, an ndarray
286
+ scalar is returned. The same dtype as `a` is returned.
287
+
288
+ See Also
289
+ --------
290
+ nanmax :
291
+ The maximum value of an array along a given axis, ignoring any NaNs.
292
+ amin :
293
+ The minimum value of an array along a given axis, propagating any NaNs.
294
+ fmin :
295
+ Element-wise minimum of two arrays, ignoring any NaNs.
296
+ minimum :
297
+ Element-wise minimum of two arrays, propagating any NaNs.
298
+ isnan :
299
+ Shows which elements are Not a Number (NaN).
300
+ isfinite:
301
+ Shows which elements are neither NaN nor infinity.
302
+
303
+ amax, fmax, maximum
304
+
305
+ Notes
306
+ -----
307
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
308
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
309
+ Positive infinity is treated as a very large number and negative
310
+ infinity is treated as a very small (i.e. negative) number.
311
+
312
+ If the input has a integer type the function is equivalent to np.min.
313
+
314
+ Examples
315
+ --------
316
+ >>> a = np.array([[1, 2], [3, np.nan]])
317
+ >>> np.nanmin(a)
318
+ 1.0
319
+ >>> np.nanmin(a, axis=0)
320
+ array([1., 2.])
321
+ >>> np.nanmin(a, axis=1)
322
+ array([1., 3.])
323
+
324
+ When positive infinity and negative infinity are present:
325
+
326
+ >>> np.nanmin([1, 2, np.nan, np.inf])
327
+ 1.0
328
+ >>> np.nanmin([1, 2, np.nan, np.NINF])
329
+ -inf
330
+
331
+ """
332
+ kwargs = {}
333
+ if keepdims is not np._NoValue:
334
+ kwargs['keepdims'] = keepdims
335
+ if initial is not np._NoValue:
336
+ kwargs['initial'] = initial
337
+ if where is not np._NoValue:
338
+ kwargs['where'] = where
339
+
340
+ if type(a) is np.ndarray and a.dtype != np.object_:
341
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
342
+ # which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
343
+ res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
344
+ if np.isnan(res).any():
345
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
346
+ stacklevel=2)
347
+ else:
348
+ # Slow, but safe for subclasses of ndarray
349
+ a, mask = _replace_nan(a, +np.inf)
350
+ res = np.amin(a, axis=axis, out=out, **kwargs)
351
+ if mask is None:
352
+ return res
353
+
354
+ # Check for all-NaN axis
355
+ kwargs.pop("initial", None)
356
+ mask = np.all(mask, axis=axis, **kwargs)
357
+ if np.any(mask):
358
+ res = _copyto(res, np.nan, mask)
359
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
360
+ stacklevel=2)
361
+ return res
362
+
363
+
364
+ def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None,
365
+ initial=None, where=None):
366
+ return (a, out)
367
+
368
+
369
+ @array_function_dispatch(_nanmax_dispatcher)
370
+ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
371
+ where=np._NoValue):
372
+ """
373
+ Return the maximum of an array or maximum along an axis, ignoring any
374
+ NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
375
+ raised and NaN is returned for that slice.
376
+
377
+ Parameters
378
+ ----------
379
+ a : array_like
380
+ Array containing numbers whose maximum is desired. If `a` is not an
381
+ array, a conversion is attempted.
382
+ axis : {int, tuple of int, None}, optional
383
+ Axis or axes along which the maximum is computed. The default is to compute
384
+ the maximum of the flattened array.
385
+ out : ndarray, optional
386
+ Alternate output array in which to place the result. The default
387
+ is ``None``; if provided, it must have the same shape as the
388
+ expected output, but the type will be cast if necessary. See
389
+ :ref:`ufuncs-output-type` for more details.
390
+
391
+ .. versionadded:: 1.8.0
392
+ keepdims : bool, optional
393
+ If this is set to True, the axes which are reduced are left
394
+ in the result as dimensions with size one. With this option,
395
+ the result will broadcast correctly against the original `a`.
396
+
397
+ If the value is anything but the default, then
398
+ `keepdims` will be passed through to the `max` method
399
+ of sub-classes of `ndarray`. If the sub-classes methods
400
+ does not implement `keepdims` any exceptions will be raised.
401
+
402
+ .. versionadded:: 1.8.0
403
+ initial : scalar, optional
404
+ The minimum value of an output element. Must be present to allow
405
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
406
+
407
+ .. versionadded:: 1.22.0
408
+ where : array_like of bool, optional
409
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
410
+ for details.
411
+
412
+ .. versionadded:: 1.22.0
413
+
414
+ Returns
415
+ -------
416
+ nanmax : ndarray
417
+ An array with the same shape as `a`, with the specified axis removed.
418
+ If `a` is a 0-d array, or if axis is None, an ndarray scalar is
419
+ returned. The same dtype as `a` is returned.
420
+
421
+ See Also
422
+ --------
423
+ nanmin :
424
+ The minimum value of an array along a given axis, ignoring any NaNs.
425
+ amax :
426
+ The maximum value of an array along a given axis, propagating any NaNs.
427
+ fmax :
428
+ Element-wise maximum of two arrays, ignoring any NaNs.
429
+ maximum :
430
+ Element-wise maximum of two arrays, propagating any NaNs.
431
+ isnan :
432
+ Shows which elements are Not a Number (NaN).
433
+ isfinite:
434
+ Shows which elements are neither NaN nor infinity.
435
+
436
+ amin, fmin, minimum
437
+
438
+ Notes
439
+ -----
440
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
441
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
442
+ Positive infinity is treated as a very large number and negative
443
+ infinity is treated as a very small (i.e. negative) number.
444
+
445
+ If the input has a integer type the function is equivalent to np.max.
446
+
447
+ Examples
448
+ --------
449
+ >>> a = np.array([[1, 2], [3, np.nan]])
450
+ >>> np.nanmax(a)
451
+ 3.0
452
+ >>> np.nanmax(a, axis=0)
453
+ array([3., 2.])
454
+ >>> np.nanmax(a, axis=1)
455
+ array([2., 3.])
456
+
457
+ When positive infinity and negative infinity are present:
458
+
459
+ >>> np.nanmax([1, 2, np.nan, np.NINF])
460
+ 2.0
461
+ >>> np.nanmax([1, 2, np.nan, np.inf])
462
+ inf
463
+
464
+ """
465
+ kwargs = {}
466
+ if keepdims is not np._NoValue:
467
+ kwargs['keepdims'] = keepdims
468
+ if initial is not np._NoValue:
469
+ kwargs['initial'] = initial
470
+ if where is not np._NoValue:
471
+ kwargs['where'] = where
472
+
473
+ if type(a) is np.ndarray and a.dtype != np.object_:
474
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
475
+ # which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
476
+ res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
477
+ if np.isnan(res).any():
478
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
479
+ stacklevel=2)
480
+ else:
481
+ # Slow, but safe for subclasses of ndarray
482
+ a, mask = _replace_nan(a, -np.inf)
483
+ res = np.amax(a, axis=axis, out=out, **kwargs)
484
+ if mask is None:
485
+ return res
486
+
487
+ # Check for all-NaN axis
488
+ kwargs.pop("initial", None)
489
+ mask = np.all(mask, axis=axis, **kwargs)
490
+ if np.any(mask):
491
+ res = _copyto(res, np.nan, mask)
492
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
493
+ stacklevel=2)
494
+ return res
495
+
496
+
497
+ def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None):
498
+ return (a,)
499
+
500
+
501
+ @array_function_dispatch(_nanargmin_dispatcher)
502
+ def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue):
503
+ """
504
+ Return the indices of the minimum values in the specified axis ignoring
505
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
506
+ cannot be trusted if a slice contains only NaNs and Infs.
507
+
508
+ Parameters
509
+ ----------
510
+ a : array_like
511
+ Input data.
512
+ axis : int, optional
513
+ Axis along which to operate. By default flattened input is used.
514
+ out : array, optional
515
+ If provided, the result will be inserted into this array. It should
516
+ be of the appropriate shape and dtype.
517
+
518
+ .. versionadded:: 1.22.0
519
+ keepdims : bool, optional
520
+ If this is set to True, the axes which are reduced are left
521
+ in the result as dimensions with size one. With this option,
522
+ the result will broadcast correctly against the array.
523
+
524
+ .. versionadded:: 1.22.0
525
+
526
+ Returns
527
+ -------
528
+ index_array : ndarray
529
+ An array of indices or a single index value.
530
+
531
+ See Also
532
+ --------
533
+ argmin, nanargmax
534
+
535
+ Examples
536
+ --------
537
+ >>> a = np.array([[np.nan, 4], [2, 3]])
538
+ >>> np.argmin(a)
539
+ 0
540
+ >>> np.nanargmin(a)
541
+ 2
542
+ >>> np.nanargmin(a, axis=0)
543
+ array([1, 1])
544
+ >>> np.nanargmin(a, axis=1)
545
+ array([1, 0])
546
+
547
+ """
548
+ a, mask = _replace_nan(a, np.inf)
549
+ if mask is not None:
550
+ mask = np.all(mask, axis=axis)
551
+ if np.any(mask):
552
+ raise ValueError("All-NaN slice encountered")
553
+ res = np.argmin(a, axis=axis, out=out, keepdims=keepdims)
554
+ return res
555
+
556
+
557
+ def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None):
558
+ return (a,)
559
+
560
+
561
+ @array_function_dispatch(_nanargmax_dispatcher)
562
+ def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue):
563
+ """
564
+ Return the indices of the maximum values in the specified axis ignoring
565
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
566
+ results cannot be trusted if a slice contains only NaNs and -Infs.
567
+
568
+
569
+ Parameters
570
+ ----------
571
+ a : array_like
572
+ Input data.
573
+ axis : int, optional
574
+ Axis along which to operate. By default flattened input is used.
575
+ out : array, optional
576
+ If provided, the result will be inserted into this array. It should
577
+ be of the appropriate shape and dtype.
578
+
579
+ .. versionadded:: 1.22.0
580
+ keepdims : bool, optional
581
+ If this is set to True, the axes which are reduced are left
582
+ in the result as dimensions with size one. With this option,
583
+ the result will broadcast correctly against the array.
584
+
585
+ .. versionadded:: 1.22.0
586
+
587
+ Returns
588
+ -------
589
+ index_array : ndarray
590
+ An array of indices or a single index value.
591
+
592
+ See Also
593
+ --------
594
+ argmax, nanargmin
595
+
596
+ Examples
597
+ --------
598
+ >>> a = np.array([[np.nan, 4], [2, 3]])
599
+ >>> np.argmax(a)
600
+ 0
601
+ >>> np.nanargmax(a)
602
+ 1
603
+ >>> np.nanargmax(a, axis=0)
604
+ array([1, 0])
605
+ >>> np.nanargmax(a, axis=1)
606
+ array([1, 1])
607
+
608
+ """
609
+ a, mask = _replace_nan(a, -np.inf)
610
+ if mask is not None:
611
+ mask = np.all(mask, axis=axis)
612
+ if np.any(mask):
613
+ raise ValueError("All-NaN slice encountered")
614
+ res = np.argmax(a, axis=axis, out=out, keepdims=keepdims)
615
+ return res
616
+
617
+
618
+ def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
619
+ initial=None, where=None):
620
+ return (a, out)
621
+
622
+
623
+ @array_function_dispatch(_nansum_dispatcher)
624
+ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
625
+ initial=np._NoValue, where=np._NoValue):
626
+ """
627
+ Return the sum of array elements over a given axis treating Not a
628
+ Numbers (NaNs) as zero.
629
+
630
+ In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or
631
+ empty. In later versions zero is returned.
632
+
633
+ Parameters
634
+ ----------
635
+ a : array_like
636
+ Array containing numbers whose sum is desired. If `a` is not an
637
+ array, a conversion is attempted.
638
+ axis : {int, tuple of int, None}, optional
639
+ Axis or axes along which the sum is computed. The default is to compute the
640
+ sum of the flattened array.
641
+ dtype : data-type, optional
642
+ The type of the returned array and of the accumulator in which the
643
+ elements are summed. By default, the dtype of `a` is used. An
644
+ exception is when `a` has an integer type with less precision than
645
+ the platform (u)intp. In that case, the default will be either
646
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
647
+ bits. For inexact inputs, dtype must be inexact.
648
+
649
+ .. versionadded:: 1.8.0
650
+ out : ndarray, optional
651
+ Alternate output array in which to place the result. The default
652
+ is ``None``. If provided, it must have the same shape as the
653
+ expected output, but the type will be cast if necessary. See
654
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
655
+ can yield unexpected results.
656
+
657
+ .. versionadded:: 1.8.0
658
+ keepdims : bool, optional
659
+ If this is set to True, the axes which are reduced are left
660
+ in the result as dimensions with size one. With this option,
661
+ the result will broadcast correctly against the original `a`.
662
+
663
+
664
+ If the value is anything but the default, then
665
+ `keepdims` will be passed through to the `mean` or `sum` methods
666
+ of sub-classes of `ndarray`. If the sub-classes methods
667
+ does not implement `keepdims` any exceptions will be raised.
668
+
669
+ .. versionadded:: 1.8.0
670
+ initial : scalar, optional
671
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
672
+
673
+ .. versionadded:: 1.22.0
674
+ where : array_like of bool, optional
675
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
676
+
677
+ .. versionadded:: 1.22.0
678
+
679
+ Returns
680
+ -------
681
+ nansum : ndarray.
682
+ A new array holding the result is returned unless `out` is
683
+ specified, in which it is returned. The result has the same
684
+ size as `a`, and the same shape as `a` if `axis` is not None
685
+ or `a` is a 1-d array.
686
+
687
+ See Also
688
+ --------
689
+ numpy.sum : Sum across array propagating NaNs.
690
+ isnan : Show which elements are NaN.
691
+ isfinite : Show which elements are not NaN or +/-inf.
692
+
693
+ Notes
694
+ -----
695
+ If both positive and negative infinity are present, the sum will be Not
696
+ A Number (NaN).
697
+
698
+ Examples
699
+ --------
700
+ >>> np.nansum(1)
701
+ 1
702
+ >>> np.nansum([1])
703
+ 1
704
+ >>> np.nansum([1, np.nan])
705
+ 1.0
706
+ >>> a = np.array([[1, 1], [1, np.nan]])
707
+ >>> np.nansum(a)
708
+ 3.0
709
+ >>> np.nansum(a, axis=0)
710
+ array([2., 1.])
711
+ >>> np.nansum([1, np.nan, np.inf])
712
+ inf
713
+ >>> np.nansum([1, np.nan, np.NINF])
714
+ -inf
715
+ >>> from numpy.testing import suppress_warnings
716
+ >>> with suppress_warnings() as sup:
717
+ ... sup.filter(RuntimeWarning)
718
+ ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
719
+ nan
720
+
721
+ """
722
+ a, mask = _replace_nan(a, 0)
723
+ return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
724
+ initial=initial, where=where)
725
+
726
+
727
+ def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
728
+ initial=None, where=None):
729
+ return (a, out)
730
+
731
+
732
+ @array_function_dispatch(_nanprod_dispatcher)
733
+ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
734
+ initial=np._NoValue, where=np._NoValue):
735
+ """
736
+ Return the product of array elements over a given axis treating Not a
737
+ Numbers (NaNs) as ones.
738
+
739
+ One is returned for slices that are all-NaN or empty.
740
+
741
+ .. versionadded:: 1.10.0
742
+
743
+ Parameters
744
+ ----------
745
+ a : array_like
746
+ Array containing numbers whose product is desired. If `a` is not an
747
+ array, a conversion is attempted.
748
+ axis : {int, tuple of int, None}, optional
749
+ Axis or axes along which the product is computed. The default is to compute
750
+ the product of the flattened array.
751
+ dtype : data-type, optional
752
+ The type of the returned array and of the accumulator in which the
753
+ elements are summed. By default, the dtype of `a` is used. An
754
+ exception is when `a` has an integer type with less precision than
755
+ the platform (u)intp. In that case, the default will be either
756
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
757
+ bits. For inexact inputs, dtype must be inexact.
758
+ out : ndarray, optional
759
+ Alternate output array in which to place the result. The default
760
+ is ``None``. If provided, it must have the same shape as the
761
+ expected output, but the type will be cast if necessary. See
762
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
763
+ can yield unexpected results.
764
+ keepdims : bool, optional
765
+ If True, the axes which are reduced are left in the result as
766
+ dimensions with size one. With this option, the result will
767
+ broadcast correctly against the original `arr`.
768
+ initial : scalar, optional
769
+ The starting value for this product. See `~numpy.ufunc.reduce`
770
+ for details.
771
+
772
+ .. versionadded:: 1.22.0
773
+ where : array_like of bool, optional
774
+ Elements to include in the product. See `~numpy.ufunc.reduce`
775
+ for details.
776
+
777
+ .. versionadded:: 1.22.0
778
+
779
+ Returns
780
+ -------
781
+ nanprod : ndarray
782
+ A new array holding the result is returned unless `out` is
783
+ specified, in which case it is returned.
784
+
785
+ See Also
786
+ --------
787
+ numpy.prod : Product across array propagating NaNs.
788
+ isnan : Show which elements are NaN.
789
+
790
+ Examples
791
+ --------
792
+ >>> np.nanprod(1)
793
+ 1
794
+ >>> np.nanprod([1])
795
+ 1
796
+ >>> np.nanprod([1, np.nan])
797
+ 1.0
798
+ >>> a = np.array([[1, 2], [3, np.nan]])
799
+ >>> np.nanprod(a)
800
+ 6.0
801
+ >>> np.nanprod(a, axis=0)
802
+ array([3., 2.])
803
+
804
+ """
805
+ a, mask = _replace_nan(a, 1)
806
+ return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
807
+ initial=initial, where=where)
808
+
809
+
810
+ def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
811
+ return (a, out)
812
+
813
+
814
+ @array_function_dispatch(_nancumsum_dispatcher)
815
+ def nancumsum(a, axis=None, dtype=None, out=None):
816
+ """
817
+ Return the cumulative sum of array elements over a given axis treating Not a
818
+ Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
819
+ encountered and leading NaNs are replaced by zeros.
820
+
821
+ Zeros are returned for slices that are all-NaN or empty.
822
+
823
+ .. versionadded:: 1.12.0
824
+
825
+ Parameters
826
+ ----------
827
+ a : array_like
828
+ Input array.
829
+ axis : int, optional
830
+ Axis along which the cumulative sum is computed. The default
831
+ (None) is to compute the cumsum over the flattened array.
832
+ dtype : dtype, optional
833
+ Type of the returned array and of the accumulator in which the
834
+ elements are summed. If `dtype` is not specified, it defaults
835
+ to the dtype of `a`, unless `a` has an integer dtype with a
836
+ precision less than that of the default platform integer. In
837
+ that case, the default platform integer is used.
838
+ out : ndarray, optional
839
+ Alternative output array in which to place the result. It must
840
+ have the same shape and buffer length as the expected output
841
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
842
+ more details.
843
+
844
+ Returns
845
+ -------
846
+ nancumsum : ndarray.
847
+ A new array holding the result is returned unless `out` is
848
+ specified, in which it is returned. The result has the same
849
+ size as `a`, and the same shape as `a` if `axis` is not None
850
+ or `a` is a 1-d array.
851
+
852
+ See Also
853
+ --------
854
+ numpy.cumsum : Cumulative sum across array propagating NaNs.
855
+ isnan : Show which elements are NaN.
856
+
857
+ Examples
858
+ --------
859
+ >>> np.nancumsum(1)
860
+ array([1])
861
+ >>> np.nancumsum([1])
862
+ array([1])
863
+ >>> np.nancumsum([1, np.nan])
864
+ array([1., 1.])
865
+ >>> a = np.array([[1, 2], [3, np.nan]])
866
+ >>> np.nancumsum(a)
867
+ array([1., 3., 6., 6.])
868
+ >>> np.nancumsum(a, axis=0)
869
+ array([[1., 2.],
870
+ [4., 2.]])
871
+ >>> np.nancumsum(a, axis=1)
872
+ array([[1., 3.],
873
+ [3., 3.]])
874
+
875
+ """
876
+ a, mask = _replace_nan(a, 0)
877
+ return np.cumsum(a, axis=axis, dtype=dtype, out=out)
878
+
879
+
880
+ def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):
881
+ return (a, out)
882
+
883
+
884
+ @array_function_dispatch(_nancumprod_dispatcher)
885
+ def nancumprod(a, axis=None, dtype=None, out=None):
886
+ """
887
+ Return the cumulative product of array elements over a given axis treating Not a
888
+ Numbers (NaNs) as one. The cumulative product does not change when NaNs are
889
+ encountered and leading NaNs are replaced by ones.
890
+
891
+ Ones are returned for slices that are all-NaN or empty.
892
+
893
+ .. versionadded:: 1.12.0
894
+
895
+ Parameters
896
+ ----------
897
+ a : array_like
898
+ Input array.
899
+ axis : int, optional
900
+ Axis along which the cumulative product is computed. By default
901
+ the input is flattened.
902
+ dtype : dtype, optional
903
+ Type of the returned array, as well as of the accumulator in which
904
+ the elements are multiplied. If *dtype* is not specified, it
905
+ defaults to the dtype of `a`, unless `a` has an integer dtype with
906
+ a precision less than that of the default platform integer. In
907
+ that case, the default platform integer is used instead.
908
+ out : ndarray, optional
909
+ Alternative output array in which to place the result. It must
910
+ have the same shape and buffer length as the expected output
911
+ but the type of the resulting values will be cast if necessary.
912
+
913
+ Returns
914
+ -------
915
+ nancumprod : ndarray
916
+ A new array holding the result is returned unless `out` is
917
+ specified, in which case it is returned.
918
+
919
+ See Also
920
+ --------
921
+ numpy.cumprod : Cumulative product across array propagating NaNs.
922
+ isnan : Show which elements are NaN.
923
+
924
+ Examples
925
+ --------
926
+ >>> np.nancumprod(1)
927
+ array([1])
928
+ >>> np.nancumprod([1])
929
+ array([1])
930
+ >>> np.nancumprod([1, np.nan])
931
+ array([1., 1.])
932
+ >>> a = np.array([[1, 2], [3, np.nan]])
933
+ >>> np.nancumprod(a)
934
+ array([1., 2., 6., 6.])
935
+ >>> np.nancumprod(a, axis=0)
936
+ array([[1., 2.],
937
+ [3., 2.]])
938
+ >>> np.nancumprod(a, axis=1)
939
+ array([[1., 2.],
940
+ [3., 3.]])
941
+
942
+ """
943
+ a, mask = _replace_nan(a, 1)
944
+ return np.cumprod(a, axis=axis, dtype=dtype, out=out)
945
+
946
+
947
+ def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
948
+ *, where=None):
949
+ return (a, out)
950
+
951
+
952
+ @array_function_dispatch(_nanmean_dispatcher)
953
+ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
954
+ *, where=np._NoValue):
955
+ """
956
+ Compute the arithmetic mean along the specified axis, ignoring NaNs.
957
+
958
+ Returns the average of the array elements. The average is taken over
959
+ the flattened array by default, otherwise over the specified axis.
960
+ `float64` intermediate and return values are used for integer inputs.
961
+
962
+ For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
963
+
964
+ .. versionadded:: 1.8.0
965
+
966
+ Parameters
967
+ ----------
968
+ a : array_like
969
+ Array containing numbers whose mean is desired. If `a` is not an
970
+ array, a conversion is attempted.
971
+ axis : {int, tuple of int, None}, optional
972
+ Axis or axes along which the means are computed. The default is to compute
973
+ the mean of the flattened array.
974
+ dtype : data-type, optional
975
+ Type to use in computing the mean. For integer inputs, the default
976
+ is `float64`; for inexact inputs, it is the same as the input
977
+ dtype.
978
+ out : ndarray, optional
979
+ Alternate output array in which to place the result. The default
980
+ is ``None``; if provided, it must have the same shape as the
981
+ expected output, but the type will be cast if necessary. See
982
+ :ref:`ufuncs-output-type` for more details.
983
+ keepdims : bool, optional
984
+ If this is set to True, the axes which are reduced are left
985
+ in the result as dimensions with size one. With this option,
986
+ the result will broadcast correctly against the original `a`.
987
+
988
+ If the value is anything but the default, then
989
+ `keepdims` will be passed through to the `mean` or `sum` methods
990
+ of sub-classes of `ndarray`. If the sub-classes methods
991
+ does not implement `keepdims` any exceptions will be raised.
992
+ where : array_like of bool, optional
993
+ Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
994
+
995
+ .. versionadded:: 1.22.0
996
+
997
+ Returns
998
+ -------
999
+ m : ndarray, see dtype parameter above
1000
+ If `out=None`, returns a new array containing the mean values,
1001
+ otherwise a reference to the output array is returned. Nan is
1002
+ returned for slices that contain only NaNs.
1003
+
1004
+ See Also
1005
+ --------
1006
+ average : Weighted average
1007
+ mean : Arithmetic mean taken while not ignoring NaNs
1008
+ var, nanvar
1009
+
1010
+ Notes
1011
+ -----
1012
+ The arithmetic mean is the sum of the non-NaN elements along the axis
1013
+ divided by the number of non-NaN elements.
1014
+
1015
+ Note that for floating-point input, the mean is computed using the same
1016
+ precision the input has. Depending on the input data, this can cause
1017
+ the results to be inaccurate, especially for `float32`. Specifying a
1018
+ higher-precision accumulator using the `dtype` keyword can alleviate
1019
+ this issue.
1020
+
1021
+ Examples
1022
+ --------
1023
+ >>> a = np.array([[1, np.nan], [3, 4]])
1024
+ >>> np.nanmean(a)
1025
+ 2.6666666666666665
1026
+ >>> np.nanmean(a, axis=0)
1027
+ array([2., 4.])
1028
+ >>> np.nanmean(a, axis=1)
1029
+ array([1., 3.5]) # may vary
1030
+
1031
+ """
1032
+ arr, mask = _replace_nan(a, 0)
1033
+ if mask is None:
1034
+ return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1035
+ where=where)
1036
+
1037
+ if dtype is not None:
1038
+ dtype = np.dtype(dtype)
1039
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
1040
+ raise TypeError("If a is inexact, then dtype must be inexact")
1041
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
1042
+ raise TypeError("If a is inexact, then out must be inexact")
1043
+
1044
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims,
1045
+ where=where)
1046
+ tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1047
+ where=where)
1048
+ avg = _divide_by_count(tot, cnt, out=out)
1049
+
1050
+ isbad = (cnt == 0)
1051
+ if isbad.any():
1052
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
1053
+ # NaN is the only possible bad value, so no further
1054
+ # action is needed to handle bad results.
1055
+ return avg
1056
+
1057
+
1058
+ def _nanmedian1d(arr1d, overwrite_input=False):
1059
+ """
1060
+ Private function for rank 1 arrays. Compute the median ignoring NaNs.
1061
+ See nanmedian for parameter usage
1062
+ """
1063
+ arr1d_parsed, overwrite_input = _remove_nan_1d(
1064
+ arr1d, overwrite_input=overwrite_input,
1065
+ )
1066
+
1067
+ if arr1d_parsed.size == 0:
1068
+ # Ensure that a nan-esque scalar of the appropriate type (and unit)
1069
+ # is returned for `timedelta64` and `complexfloating`
1070
+ return arr1d[-1]
1071
+
1072
+ return np.median(arr1d_parsed, overwrite_input=overwrite_input)
1073
+
1074
+
1075
+ def _nanmedian(a, axis=None, out=None, overwrite_input=False):
1076
+ """
1077
+ Private function that doesn't support extended axis or keepdims.
1078
+ These methods are extended to this function using _ureduce
1079
+ See nanmedian for parameter usage
1080
+
1081
+ """
1082
+ if axis is None or a.ndim == 1:
1083
+ part = a.ravel()
1084
+ if out is None:
1085
+ return _nanmedian1d(part, overwrite_input)
1086
+ else:
1087
+ out[...] = _nanmedian1d(part, overwrite_input)
1088
+ return out
1089
+ else:
1090
+ # for small medians use sort + indexing which is still faster than
1091
+ # apply_along_axis
1092
+ # benchmarked with shuffled (50, 50, x) containing a few NaN
1093
+ if a.shape[axis] < 600:
1094
+ return _nanmedian_small(a, axis, out, overwrite_input)
1095
+ result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
1096
+ if out is not None:
1097
+ out[...] = result
1098
+ return result
1099
+
1100
+
1101
+ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
1102
+ """
1103
+ sort + indexing median, faster for small medians along multiple
1104
+ dimensions due to the high overhead of apply_along_axis
1105
+
1106
+ see nanmedian for parameter usage
1107
+ """
1108
+ a = np.ma.masked_array(a, np.isnan(a))
1109
+ m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
1110
+ for i in range(np.count_nonzero(m.mask.ravel())):
1111
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
1112
+ stacklevel=5)
1113
+
1114
+ fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan
1115
+ if out is not None:
1116
+ out[...] = m.filled(fill_value)
1117
+ return out
1118
+ return m.filled(fill_value)
1119
+
1120
+
1121
+ def _nanmedian_dispatcher(
1122
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
1123
+ return (a, out)
1124
+
1125
+
1126
+ @array_function_dispatch(_nanmedian_dispatcher)
1127
+ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
1128
+ """
1129
+ Compute the median along the specified axis, while ignoring NaNs.
1130
+
1131
+ Returns the median of the array elements.
1132
+
1133
+ .. versionadded:: 1.9.0
1134
+
1135
+ Parameters
1136
+ ----------
1137
+ a : array_like
1138
+ Input array or object that can be converted to an array.
1139
+ axis : {int, sequence of int, None}, optional
1140
+ Axis or axes along which the medians are computed. The default
1141
+ is to compute the median along a flattened version of the array.
1142
+ A sequence of axes is supported since version 1.9.0.
1143
+ out : ndarray, optional
1144
+ Alternative output array in which to place the result. It must
1145
+ have the same shape and buffer length as the expected output,
1146
+ but the type (of the output) will be cast if necessary.
1147
+ overwrite_input : bool, optional
1148
+ If True, then allow use of memory of input array `a` for
1149
+ calculations. The input array will be modified by the call to
1150
+ `median`. This will save memory when you do not need to preserve
1151
+ the contents of the input array. Treat the input as undefined,
1152
+ but it will probably be fully or partially sorted. Default is
1153
+ False. If `overwrite_input` is ``True`` and `a` is not already an
1154
+ `ndarray`, an error will be raised.
1155
+ keepdims : bool, optional
1156
+ If this is set to True, the axes which are reduced are left
1157
+ in the result as dimensions with size one. With this option,
1158
+ the result will broadcast correctly against the original `a`.
1159
+
1160
+ If this is anything but the default value it will be passed
1161
+ through (in the special case of an empty array) to the
1162
+ `mean` function of the underlying array. If the array is
1163
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1164
+ will raise a RuntimeError.
1165
+
1166
+ Returns
1167
+ -------
1168
+ median : ndarray
1169
+ A new array holding the result. If the input contains integers
1170
+ or floats smaller than ``float64``, then the output data-type is
1171
+ ``np.float64``. Otherwise, the data-type of the output is the
1172
+ same as that of the input. If `out` is specified, that array is
1173
+ returned instead.
1174
+
1175
+ See Also
1176
+ --------
1177
+ mean, median, percentile
1178
+
1179
+ Notes
1180
+ -----
1181
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
1182
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
1183
+ ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
1184
+ middle values of ``V_sorted`` when ``N`` is even.
1185
+
1186
+ Examples
1187
+ --------
1188
+ >>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
1189
+ >>> a[0, 1] = np.nan
1190
+ >>> a
1191
+ array([[10., nan, 4.],
1192
+ [ 3., 2., 1.]])
1193
+ >>> np.median(a)
1194
+ nan
1195
+ >>> np.nanmedian(a)
1196
+ 3.0
1197
+ >>> np.nanmedian(a, axis=0)
1198
+ array([6.5, 2. , 2.5])
1199
+ >>> np.median(a, axis=1)
1200
+ array([nan, 2.])
1201
+ >>> b = a.copy()
1202
+ >>> np.nanmedian(b, axis=1, overwrite_input=True)
1203
+ array([7., 2.])
1204
+ >>> assert not np.all(a==b)
1205
+ >>> b = a.copy()
1206
+ >>> np.nanmedian(b, axis=None, overwrite_input=True)
1207
+ 3.0
1208
+ >>> assert not np.all(a==b)
1209
+
1210
+ """
1211
+ a = np.asanyarray(a)
1212
+ # apply_along_axis in _nanmedian doesn't handle empty arrays well,
1213
+ # so deal them upfront
1214
+ if a.size == 0:
1215
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
1216
+
1217
+ return function_base._ureduce(a, func=_nanmedian, keepdims=keepdims,
1218
+ axis=axis, out=out,
1219
+ overwrite_input=overwrite_input)
1220
+
1221
+
1222
+ def _nanpercentile_dispatcher(
1223
+ a, q, axis=None, out=None, overwrite_input=None,
1224
+ method=None, keepdims=None, *, interpolation=None):
1225
+ return (a, q, out)
1226
+
1227
+
1228
+ @array_function_dispatch(_nanpercentile_dispatcher)
1229
+ def nanpercentile(
1230
+ a,
1231
+ q,
1232
+ axis=None,
1233
+ out=None,
1234
+ overwrite_input=False,
1235
+ method="linear",
1236
+ keepdims=np._NoValue,
1237
+ *,
1238
+ interpolation=None,
1239
+ ):
1240
+ """
1241
+ Compute the qth percentile of the data along the specified axis,
1242
+ while ignoring nan values.
1243
+
1244
+ Returns the qth percentile(s) of the array elements.
1245
+
1246
+ .. versionadded:: 1.9.0
1247
+
1248
+ Parameters
1249
+ ----------
1250
+ a : array_like
1251
+ Input array or object that can be converted to an array, containing
1252
+ nan values to be ignored.
1253
+ q : array_like of float
1254
+ Percentile or sequence of percentiles to compute, which must be
1255
+ between 0 and 100 inclusive.
1256
+ axis : {int, tuple of int, None}, optional
1257
+ Axis or axes along which the percentiles are computed. The default
1258
+ is to compute the percentile(s) along a flattened version of the
1259
+ array.
1260
+ out : ndarray, optional
1261
+ Alternative output array in which to place the result. It must have
1262
+ the same shape and buffer length as the expected output, but the
1263
+ type (of the output) will be cast if necessary.
1264
+ overwrite_input : bool, optional
1265
+ If True, then allow the input array `a` to be modified by
1266
+ intermediate calculations, to save memory. In this case, the
1267
+ contents of the input `a` after this function completes is
1268
+ undefined.
1269
+ method : str, optional
1270
+ This parameter specifies the method to use for estimating the
1271
+ percentile. There are many different methods, some unique to NumPy.
1272
+ See the notes for explanation. The options sorted by their R type
1273
+ as summarized in the H&F paper [1]_ are:
1274
+
1275
+ 1. 'inverted_cdf'
1276
+ 2. 'averaged_inverted_cdf'
1277
+ 3. 'closest_observation'
1278
+ 4. 'interpolated_inverted_cdf'
1279
+ 5. 'hazen'
1280
+ 6. 'weibull'
1281
+ 7. 'linear' (default)
1282
+ 8. 'median_unbiased'
1283
+ 9. 'normal_unbiased'
1284
+
1285
+ The first three methods are discontinuous. NumPy further defines the
1286
+ following discontinuous variations of the default 'linear' (7.) option:
1287
+
1288
+ * 'lower'
1289
+ * 'higher',
1290
+ * 'midpoint'
1291
+ * 'nearest'
1292
+
1293
+ .. versionchanged:: 1.22.0
1294
+ This argument was previously called "interpolation" and only
1295
+ offered the "linear" default and last four options.
1296
+
1297
+ keepdims : bool, optional
1298
+ If this is set to True, the axes which are reduced are left in
1299
+ the result as dimensions with size one. With this option, the
1300
+ result will broadcast correctly against the original array `a`.
1301
+
1302
+ If this is anything but the default value it will be passed
1303
+ through (in the special case of an empty array) to the
1304
+ `mean` function of the underlying array. If the array is
1305
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1306
+ will raise a RuntimeError.
1307
+
1308
+ interpolation : str, optional
1309
+ Deprecated name for the method keyword argument.
1310
+
1311
+ .. deprecated:: 1.22.0
1312
+
1313
+ Returns
1314
+ -------
1315
+ percentile : scalar or ndarray
1316
+ If `q` is a single percentile and `axis=None`, then the result
1317
+ is a scalar. If multiple percentiles are given, first axis of
1318
+ the result corresponds to the percentiles. The other axes are
1319
+ the axes that remain after the reduction of `a`. If the input
1320
+ contains integers or floats smaller than ``float64``, the output
1321
+ data-type is ``float64``. Otherwise, the output data-type is the
1322
+ same as that of the input. If `out` is specified, that array is
1323
+ returned instead.
1324
+
1325
+ See Also
1326
+ --------
1327
+ nanmean
1328
+ nanmedian : equivalent to ``nanpercentile(..., 50)``
1329
+ percentile, median, mean
1330
+ nanquantile : equivalent to nanpercentile, except q in range [0, 1].
1331
+
1332
+ Notes
1333
+ -----
1334
+ For more information please see `numpy.percentile`
1335
+
1336
+ Examples
1337
+ --------
1338
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
1339
+ >>> a[0][1] = np.nan
1340
+ >>> a
1341
+ array([[10., nan, 4.],
1342
+ [ 3., 2., 1.]])
1343
+ >>> np.percentile(a, 50)
1344
+ nan
1345
+ >>> np.nanpercentile(a, 50)
1346
+ 3.0
1347
+ >>> np.nanpercentile(a, 50, axis=0)
1348
+ array([6.5, 2. , 2.5])
1349
+ >>> np.nanpercentile(a, 50, axis=1, keepdims=True)
1350
+ array([[7.],
1351
+ [2.]])
1352
+ >>> m = np.nanpercentile(a, 50, axis=0)
1353
+ >>> out = np.zeros_like(m)
1354
+ >>> np.nanpercentile(a, 50, axis=0, out=out)
1355
+ array([6.5, 2. , 2.5])
1356
+ >>> m
1357
+ array([6.5, 2. , 2.5])
1358
+
1359
+ >>> b = a.copy()
1360
+ >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
1361
+ array([7., 2.])
1362
+ >>> assert not np.all(a==b)
1363
+
1364
+ References
1365
+ ----------
1366
+ .. [1] R. J. Hyndman and Y. Fan,
1367
+ "Sample quantiles in statistical packages,"
1368
+ The American Statistician, 50(4), pp. 361-365, 1996
1369
+
1370
+ """
1371
+ if interpolation is not None:
1372
+ method = function_base._check_interpolation_as_method(
1373
+ method, interpolation, "nanpercentile")
1374
+
1375
+ a = np.asanyarray(a)
1376
+ if a.dtype.kind == "c":
1377
+ raise TypeError("a must be an array of real numbers")
1378
+
1379
+ q = np.true_divide(q, 100.0)
1380
+ # undo any decay that the ufunc performed (see gh-13105)
1381
+ q = np.asanyarray(q)
1382
+ if not function_base._quantile_is_valid(q):
1383
+ raise ValueError("Percentiles must be in the range [0, 100]")
1384
+ return _nanquantile_unchecked(
1385
+ a, q, axis, out, overwrite_input, method, keepdims)
1386
+
1387
+
1388
+ def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
1389
+ method=None, keepdims=None, *, interpolation=None):
1390
+ return (a, q, out)
1391
+
1392
+
1393
+ @array_function_dispatch(_nanquantile_dispatcher)
1394
+ def nanquantile(
1395
+ a,
1396
+ q,
1397
+ axis=None,
1398
+ out=None,
1399
+ overwrite_input=False,
1400
+ method="linear",
1401
+ keepdims=np._NoValue,
1402
+ *,
1403
+ interpolation=None,
1404
+ ):
1405
+ """
1406
+ Compute the qth quantile of the data along the specified axis,
1407
+ while ignoring nan values.
1408
+ Returns the qth quantile(s) of the array elements.
1409
+
1410
+ .. versionadded:: 1.15.0
1411
+
1412
+ Parameters
1413
+ ----------
1414
+ a : array_like
1415
+ Input array or object that can be converted to an array, containing
1416
+ nan values to be ignored
1417
+ q : array_like of float
1418
+ Probability or sequence of probabilities for the quantiles to compute.
1419
+ Values must be between 0 and 1 inclusive.
1420
+ axis : {int, tuple of int, None}, optional
1421
+ Axis or axes along which the quantiles are computed. The
1422
+ default is to compute the quantile(s) along a flattened
1423
+ version of the array.
1424
+ out : ndarray, optional
1425
+ Alternative output array in which to place the result. It must
1426
+ have the same shape and buffer length as the expected output,
1427
+ but the type (of the output) will be cast if necessary.
1428
+ overwrite_input : bool, optional
1429
+ If True, then allow the input array `a` to be modified by intermediate
1430
+ calculations, to save memory. In this case, the contents of the input
1431
+ `a` after this function completes is undefined.
1432
+ method : str, optional
1433
+ This parameter specifies the method to use for estimating the
1434
+ quantile. There are many different methods, some unique to NumPy.
1435
+ See the notes for explanation. The options sorted by their R type
1436
+ as summarized in the H&F paper [1]_ are:
1437
+
1438
+ 1. 'inverted_cdf'
1439
+ 2. 'averaged_inverted_cdf'
1440
+ 3. 'closest_observation'
1441
+ 4. 'interpolated_inverted_cdf'
1442
+ 5. 'hazen'
1443
+ 6. 'weibull'
1444
+ 7. 'linear' (default)
1445
+ 8. 'median_unbiased'
1446
+ 9. 'normal_unbiased'
1447
+
1448
+ The first three methods are discontinuous. NumPy further defines the
1449
+ following discontinuous variations of the default 'linear' (7.) option:
1450
+
1451
+ * 'lower'
1452
+ * 'higher',
1453
+ * 'midpoint'
1454
+ * 'nearest'
1455
+
1456
+ .. versionchanged:: 1.22.0
1457
+ This argument was previously called "interpolation" and only
1458
+ offered the "linear" default and last four options.
1459
+
1460
+ keepdims : bool, optional
1461
+ If this is set to True, the axes which are reduced are left in
1462
+ the result as dimensions with size one. With this option, the
1463
+ result will broadcast correctly against the original array `a`.
1464
+
1465
+ If this is anything but the default value it will be passed
1466
+ through (in the special case of an empty array) to the
1467
+ `mean` function of the underlying array. If the array is
1468
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1469
+ will raise a RuntimeError.
1470
+
1471
+ interpolation : str, optional
1472
+ Deprecated name for the method keyword argument.
1473
+
1474
+ .. deprecated:: 1.22.0
1475
+
1476
+ Returns
1477
+ -------
1478
+ quantile : scalar or ndarray
1479
+ If `q` is a single probability and `axis=None`, then the result
1480
+ is a scalar. If multiple probability levels are given, first axis of
1481
+ the result corresponds to the quantiles. The other axes are
1482
+ the axes that remain after the reduction of `a`. If the input
1483
+ contains integers or floats smaller than ``float64``, the output
1484
+ data-type is ``float64``. Otherwise, the output data-type is the
1485
+ same as that of the input. If `out` is specified, that array is
1486
+ returned instead.
1487
+
1488
+ See Also
1489
+ --------
1490
+ quantile
1491
+ nanmean, nanmedian
1492
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
1493
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
1494
+
1495
+ Notes
1496
+ -----
1497
+ For more information please see `numpy.quantile`
1498
+
1499
+ Examples
1500
+ --------
1501
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
1502
+ >>> a[0][1] = np.nan
1503
+ >>> a
1504
+ array([[10., nan, 4.],
1505
+ [ 3., 2., 1.]])
1506
+ >>> np.quantile(a, 0.5)
1507
+ nan
1508
+ >>> np.nanquantile(a, 0.5)
1509
+ 3.0
1510
+ >>> np.nanquantile(a, 0.5, axis=0)
1511
+ array([6.5, 2. , 2.5])
1512
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
1513
+ array([[7.],
1514
+ [2.]])
1515
+ >>> m = np.nanquantile(a, 0.5, axis=0)
1516
+ >>> out = np.zeros_like(m)
1517
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
1518
+ array([6.5, 2. , 2.5])
1519
+ >>> m
1520
+ array([6.5, 2. , 2.5])
1521
+ >>> b = a.copy()
1522
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
1523
+ array([7., 2.])
1524
+ >>> assert not np.all(a==b)
1525
+
1526
+ References
1527
+ ----------
1528
+ .. [1] R. J. Hyndman and Y. Fan,
1529
+ "Sample quantiles in statistical packages,"
1530
+ The American Statistician, 50(4), pp. 361-365, 1996
1531
+
1532
+ """
1533
+
1534
+ if interpolation is not None:
1535
+ method = function_base._check_interpolation_as_method(
1536
+ method, interpolation, "nanquantile")
1537
+
1538
+ a = np.asanyarray(a)
1539
+ if a.dtype.kind == "c":
1540
+ raise TypeError("a must be an array of real numbers")
1541
+
1542
+ q = np.asanyarray(q)
1543
+ if not function_base._quantile_is_valid(q):
1544
+ raise ValueError("Quantiles must be in the range [0, 1]")
1545
+ return _nanquantile_unchecked(
1546
+ a, q, axis, out, overwrite_input, method, keepdims)
1547
+
1548
+
1549
+ def _nanquantile_unchecked(
1550
+ a,
1551
+ q,
1552
+ axis=None,
1553
+ out=None,
1554
+ overwrite_input=False,
1555
+ method="linear",
1556
+ keepdims=np._NoValue,
1557
+ ):
1558
+ """Assumes that q is in [0, 1], and is an ndarray"""
1559
+ # apply_along_axis in _nanpercentile doesn't handle empty arrays well,
1560
+ # so deal them upfront
1561
+ if a.size == 0:
1562
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
1563
+ return function_base._ureduce(a,
1564
+ func=_nanquantile_ureduce_func,
1565
+ q=q,
1566
+ keepdims=keepdims,
1567
+ axis=axis,
1568
+ out=out,
1569
+ overwrite_input=overwrite_input,
1570
+ method=method)
1571
+
1572
+
1573
+ def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
1574
+ method="linear"):
1575
+ """
1576
+ Private function that doesn't support extended axis or keepdims.
1577
+ These methods are extended to this function using _ureduce
1578
+ See nanpercentile for parameter usage
1579
+ """
1580
+ if axis is None or a.ndim == 1:
1581
+ part = a.ravel()
1582
+ result = _nanquantile_1d(part, q, overwrite_input, method)
1583
+ else:
1584
+ result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
1585
+ overwrite_input, method)
1586
+ # apply_along_axis fills in collapsed axis with results.
1587
+ # Move that axis to the beginning to match percentile's
1588
+ # convention.
1589
+ if q.ndim != 0:
1590
+ result = np.moveaxis(result, axis, 0)
1591
+
1592
+ if out is not None:
1593
+ out[...] = result
1594
+ return result
1595
+
1596
+
1597
+ def _nanquantile_1d(arr1d, q, overwrite_input=False, method="linear"):
1598
+ """
1599
+ Private function for rank 1 arrays. Compute quantile ignoring NaNs.
1600
+ See nanpercentile for parameter usage
1601
+ """
1602
+ arr1d, overwrite_input = _remove_nan_1d(arr1d,
1603
+ overwrite_input=overwrite_input)
1604
+ if arr1d.size == 0:
1605
+ # convert to scalar
1606
+ return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()]
1607
+
1608
+ return function_base._quantile_unchecked(
1609
+ arr1d, q, overwrite_input=overwrite_input, method=method)
1610
+
1611
+
1612
+ def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
1613
+ keepdims=None, *, where=None):
1614
+ return (a, out)
1615
+
1616
+
1617
+ @array_function_dispatch(_nanvar_dispatcher)
1618
+ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
1619
+ *, where=np._NoValue):
1620
+ """
1621
+ Compute the variance along the specified axis, while ignoring NaNs.
1622
+
1623
+ Returns the variance of the array elements, a measure of the spread of
1624
+ a distribution. The variance is computed for the flattened array by
1625
+ default, otherwise over the specified axis.
1626
+
1627
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
1628
+ returned and a `RuntimeWarning` is raised.
1629
+
1630
+ .. versionadded:: 1.8.0
1631
+
1632
+ Parameters
1633
+ ----------
1634
+ a : array_like
1635
+ Array containing numbers whose variance is desired. If `a` is not an
1636
+ array, a conversion is attempted.
1637
+ axis : {int, tuple of int, None}, optional
1638
+ Axis or axes along which the variance is computed. The default is to compute
1639
+ the variance of the flattened array.
1640
+ dtype : data-type, optional
1641
+ Type to use in computing the variance. For arrays of integer type
1642
+ the default is `float64`; for arrays of float types it is the same as
1643
+ the array type.
1644
+ out : ndarray, optional
1645
+ Alternate output array in which to place the result. It must have
1646
+ the same shape as the expected output, but the type is cast if
1647
+ necessary.
1648
+ ddof : int, optional
1649
+ "Delta Degrees of Freedom": the divisor used in the calculation is
1650
+ ``N - ddof``, where ``N`` represents the number of non-NaN
1651
+ elements. By default `ddof` is zero.
1652
+ keepdims : bool, optional
1653
+ If this is set to True, the axes which are reduced are left
1654
+ in the result as dimensions with size one. With this option,
1655
+ the result will broadcast correctly against the original `a`.
1656
+ where : array_like of bool, optional
1657
+ Elements to include in the variance. See `~numpy.ufunc.reduce` for
1658
+ details.
1659
+
1660
+ .. versionadded:: 1.22.0
1661
+
1662
+ Returns
1663
+ -------
1664
+ variance : ndarray, see dtype parameter above
1665
+ If `out` is None, return a new array containing the variance,
1666
+ otherwise return a reference to the output array. If ddof is >= the
1667
+ number of non-NaN elements in a slice or the slice contains only
1668
+ NaNs, then the result for that slice is NaN.
1669
+
1670
+ See Also
1671
+ --------
1672
+ std : Standard deviation
1673
+ mean : Average
1674
+ var : Variance while not ignoring NaNs
1675
+ nanstd, nanmean
1676
+ :ref:`ufuncs-output-type`
1677
+
1678
+ Notes
1679
+ -----
1680
+ The variance is the average of the squared deviations from the mean,
1681
+ i.e., ``var = mean(abs(x - x.mean())**2)``.
1682
+
1683
+ The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
1684
+ If, however, `ddof` is specified, the divisor ``N - ddof`` is used
1685
+ instead. In standard statistical practice, ``ddof=1`` provides an
1686
+ unbiased estimator of the variance of a hypothetical infinite
1687
+ population. ``ddof=0`` provides a maximum likelihood estimate of the
1688
+ variance for normally distributed variables.
1689
+
1690
+ Note that for complex numbers, the absolute value is taken before
1691
+ squaring, so that the result is always real and nonnegative.
1692
+
1693
+ For floating-point input, the variance is computed using the same
1694
+ precision the input has. Depending on the input data, this can cause
1695
+ the results to be inaccurate, especially for `float32` (see example
1696
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
1697
+ keyword can alleviate this issue.
1698
+
1699
+ For this function to work on sub-classes of ndarray, they must define
1700
+ `sum` with the kwarg `keepdims`
1701
+
1702
+ Examples
1703
+ --------
1704
+ >>> a = np.array([[1, np.nan], [3, 4]])
1705
+ >>> np.nanvar(a)
1706
+ 1.5555555555555554
1707
+ >>> np.nanvar(a, axis=0)
1708
+ array([1., 0.])
1709
+ >>> np.nanvar(a, axis=1)
1710
+ array([0., 0.25]) # may vary
1711
+
1712
+ """
1713
+ arr, mask = _replace_nan(a, 0)
1714
+ if mask is None:
1715
+ return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
1716
+ keepdims=keepdims, where=where)
1717
+
1718
+ if dtype is not None:
1719
+ dtype = np.dtype(dtype)
1720
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
1721
+ raise TypeError("If a is inexact, then dtype must be inexact")
1722
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
1723
+ raise TypeError("If a is inexact, then out must be inexact")
1724
+
1725
+ # Compute mean
1726
+ if type(arr) is np.matrix:
1727
+ _keepdims = np._NoValue
1728
+ else:
1729
+ _keepdims = True
1730
+ # we need to special case matrix for reverse compatibility
1731
+ # in order for this to work, these sums need to be called with
1732
+ # keepdims=True, however matrix now raises an error in this case, but
1733
+ # the reason that it drops the keepdims kwarg is to force keepdims=True
1734
+ # so this used to work by serendipity.
1735
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims,
1736
+ where=where)
1737
+ avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims, where=where)
1738
+ avg = _divide_by_count(avg, cnt)
1739
+
1740
+ # Compute squared deviation from mean.
1741
+ np.subtract(arr, avg, out=arr, casting='unsafe', where=where)
1742
+ arr = _copyto(arr, 0, mask)
1743
+ if issubclass(arr.dtype.type, np.complexfloating):
1744
+ sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real
1745
+ else:
1746
+ sqr = np.multiply(arr, arr, out=arr, where=where)
1747
+
1748
+ # Compute variance.
1749
+ var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1750
+ where=where)
1751
+
1752
+ # Precaution against reduced object arrays
1753
+ try:
1754
+ var_ndim = var.ndim
1755
+ except AttributeError:
1756
+ var_ndim = np.ndim(var)
1757
+ if var_ndim < cnt.ndim:
1758
+ # Subclasses of ndarray may ignore keepdims, so check here.
1759
+ cnt = cnt.squeeze(axis)
1760
+ dof = cnt - ddof
1761
+ var = _divide_by_count(var, dof)
1762
+
1763
+ isbad = (dof <= 0)
1764
+ if np.any(isbad):
1765
+ warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
1766
+ stacklevel=2)
1767
+ # NaN, inf, or negative numbers are all possible bad
1768
+ # values, so explicitly replace them with NaN.
1769
+ var = _copyto(var, np.nan, isbad)
1770
+ return var
1771
+
1772
+
1773
+ def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
1774
+ keepdims=None, *, where=None):
1775
+ return (a, out)
1776
+
1777
+
1778
+ @array_function_dispatch(_nanstd_dispatcher)
1779
+ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
1780
+ *, where=np._NoValue):
1781
+ """
1782
+ Compute the standard deviation along the specified axis, while
1783
+ ignoring NaNs.
1784
+
1785
+ Returns the standard deviation, a measure of the spread of a
1786
+ distribution, of the non-NaN array elements. The standard deviation is
1787
+ computed for the flattened array by default, otherwise over the
1788
+ specified axis.
1789
+
1790
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
1791
+ returned and a `RuntimeWarning` is raised.
1792
+
1793
+ .. versionadded:: 1.8.0
1794
+
1795
+ Parameters
1796
+ ----------
1797
+ a : array_like
1798
+ Calculate the standard deviation of the non-NaN values.
1799
+ axis : {int, tuple of int, None}, optional
1800
+ Axis or axes along which the standard deviation is computed. The default is
1801
+ to compute the standard deviation of the flattened array.
1802
+ dtype : dtype, optional
1803
+ Type to use in computing the standard deviation. For arrays of
1804
+ integer type the default is float64, for arrays of float types it
1805
+ is the same as the array type.
1806
+ out : ndarray, optional
1807
+ Alternative output array in which to place the result. It must have
1808
+ the same shape as the expected output but the type (of the
1809
+ calculated values) will be cast if necessary.
1810
+ ddof : int, optional
1811
+ Means Delta Degrees of Freedom. The divisor used in calculations
1812
+ is ``N - ddof``, where ``N`` represents the number of non-NaN
1813
+ elements. By default `ddof` is zero.
1814
+
1815
+ keepdims : bool, optional
1816
+ If this is set to True, the axes which are reduced are left
1817
+ in the result as dimensions with size one. With this option,
1818
+ the result will broadcast correctly against the original `a`.
1819
+
1820
+ If this value is anything but the default it is passed through
1821
+ as-is to the relevant functions of the sub-classes. If these
1822
+ functions do not have a `keepdims` kwarg, a RuntimeError will
1823
+ be raised.
1824
+ where : array_like of bool, optional
1825
+ Elements to include in the standard deviation.
1826
+ See `~numpy.ufunc.reduce` for details.
1827
+
1828
+ .. versionadded:: 1.22.0
1829
+
1830
+ Returns
1831
+ -------
1832
+ standard_deviation : ndarray, see dtype parameter above.
1833
+ If `out` is None, return a new array containing the standard
1834
+ deviation, otherwise return a reference to the output array. If
1835
+ ddof is >= the number of non-NaN elements in a slice or the slice
1836
+ contains only NaNs, then the result for that slice is NaN.
1837
+
1838
+ See Also
1839
+ --------
1840
+ var, mean, std
1841
+ nanvar, nanmean
1842
+ :ref:`ufuncs-output-type`
1843
+
1844
+ Notes
1845
+ -----
1846
+ The standard deviation is the square root of the average of the squared
1847
+ deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
1848
+
1849
+ The average squared deviation is normally calculated as
1850
+ ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
1851
+ specified, the divisor ``N - ddof`` is used instead. In standard
1852
+ statistical practice, ``ddof=1`` provides an unbiased estimator of the
1853
+ variance of the infinite population. ``ddof=0`` provides a maximum
1854
+ likelihood estimate of the variance for normally distributed variables.
1855
+ The standard deviation computed in this function is the square root of
1856
+ the estimated variance, so even with ``ddof=1``, it will not be an
1857
+ unbiased estimate of the standard deviation per se.
1858
+
1859
+ Note that, for complex numbers, `std` takes the absolute value before
1860
+ squaring, so that the result is always real and nonnegative.
1861
+
1862
+ For floating-point input, the *std* is computed using the same
1863
+ precision the input has. Depending on the input data, this can cause
1864
+ the results to be inaccurate, especially for float32 (see example
1865
+ below). Specifying a higher-accuracy accumulator using the `dtype`
1866
+ keyword can alleviate this issue.
1867
+
1868
+ Examples
1869
+ --------
1870
+ >>> a = np.array([[1, np.nan], [3, 4]])
1871
+ >>> np.nanstd(a)
1872
+ 1.247219128924647
1873
+ >>> np.nanstd(a, axis=0)
1874
+ array([1., 0.])
1875
+ >>> np.nanstd(a, axis=1)
1876
+ array([0., 0.5]) # may vary
1877
+
1878
+ """
1879
+ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
1880
+ keepdims=keepdims, where=where)
1881
+ if isinstance(var, np.ndarray):
1882
+ std = np.sqrt(var, out=var)
1883
+ elif hasattr(var, 'dtype'):
1884
+ std = var.dtype.type(np.sqrt(var))
1885
+ else:
1886
+ std = np.sqrt(var)
1887
+ return std
.venv/lib/python3.11/site-packages/numpy/lib/nanfunctions.pyi ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.core.fromnumeric import (
2
+ amin,
3
+ amax,
4
+ argmin,
5
+ argmax,
6
+ sum,
7
+ prod,
8
+ cumsum,
9
+ cumprod,
10
+ mean,
11
+ var,
12
+ std
13
+ )
14
+
15
+ from numpy.lib.function_base import (
16
+ median,
17
+ percentile,
18
+ quantile,
19
+ )
20
+
21
+ __all__: list[str]
22
+
23
+ # NOTE: In reaility these functions are not aliases but distinct functions
24
+ # with identical signatures.
25
+ nanmin = amin
26
+ nanmax = amax
27
+ nanargmin = argmin
28
+ nanargmax = argmax
29
+ nansum = sum
30
+ nanprod = prod
31
+ nancumsum = cumsum
32
+ nancumprod = cumprod
33
+ nanmean = mean
34
+ nanvar = var
35
+ nanstd = std
36
+ nanmedian = median
37
+ nanpercentile = percentile
38
+ nanquantile = quantile
.venv/lib/python3.11/site-packages/numpy/lib/polynomial.py ADDED
@@ -0,0 +1,1453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions to operate on polynomials.
3
+
4
+ """
5
+ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
6
+ 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
7
+ 'polyfit', 'RankWarning']
8
+
9
+ import functools
10
+ import re
11
+ import warnings
12
+
13
+ from .._utils import set_module
14
+ import numpy.core.numeric as NX
15
+
16
+ from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
17
+ ones)
18
+ from numpy.core import overrides
19
+ from numpy.lib.twodim_base import diag, vander
20
+ from numpy.lib.function_base import trim_zeros
21
+ from numpy.lib.type_check import iscomplex, real, imag, mintypecode
22
+ from numpy.linalg import eigvals, lstsq, inv
23
+
24
+
25
+ array_function_dispatch = functools.partial(
26
+ overrides.array_function_dispatch, module='numpy')
27
+
28
+
29
+ @set_module('numpy')
30
+ class RankWarning(UserWarning):
31
+ """
32
+ Issued by `polyfit` when the Vandermonde matrix is rank deficient.
33
+
34
+ For more information, a way to suppress the warning, and an example of
35
+ `RankWarning` being issued, see `polyfit`.
36
+
37
+ """
38
+ pass
39
+
40
+
41
+ def _poly_dispatcher(seq_of_zeros):
42
+ return seq_of_zeros
43
+
44
+
45
+ @array_function_dispatch(_poly_dispatcher)
46
+ def poly(seq_of_zeros):
47
+ """
48
+ Find the coefficients of a polynomial with the given sequence of roots.
49
+
50
+ .. note::
51
+ This forms part of the old polynomial API. Since version 1.4, the
52
+ new polynomial API defined in `numpy.polynomial` is preferred.
53
+ A summary of the differences can be found in the
54
+ :doc:`transition guide </reference/routines.polynomials>`.
55
+
56
+ Returns the coefficients of the polynomial whose leading coefficient
57
+ is one for the given sequence of zeros (multiple roots must be included
58
+ in the sequence as many times as their multiplicity; see Examples).
59
+ A square matrix (or array, which will be treated as a matrix) can also
60
+ be given, in which case the coefficients of the characteristic polynomial
61
+ of the matrix are returned.
62
+
63
+ Parameters
64
+ ----------
65
+ seq_of_zeros : array_like, shape (N,) or (N, N)
66
+ A sequence of polynomial roots, or a square array or matrix object.
67
+
68
+ Returns
69
+ -------
70
+ c : ndarray
71
+ 1D array of polynomial coefficients from highest to lowest degree:
72
+
73
+ ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
74
+ where c[0] always equals 1.
75
+
76
+ Raises
77
+ ------
78
+ ValueError
79
+ If input is the wrong shape (the input must be a 1-D or square
80
+ 2-D array).
81
+
82
+ See Also
83
+ --------
84
+ polyval : Compute polynomial values.
85
+ roots : Return the roots of a polynomial.
86
+ polyfit : Least squares polynomial fit.
87
+ poly1d : A one-dimensional polynomial class.
88
+
89
+ Notes
90
+ -----
91
+ Specifying the roots of a polynomial still leaves one degree of
92
+ freedom, typically represented by an undetermined leading
93
+ coefficient. [1]_ In the case of this function, that coefficient -
94
+ the first one in the returned array - is always taken as one. (If
95
+ for some reason you have one other point, the only automatic way
96
+ presently to leverage that information is to use ``polyfit``.)
97
+
98
+ The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
99
+ matrix **A** is given by
100
+
101
+ :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
102
+
103
+ where **I** is the `n`-by-`n` identity matrix. [2]_
104
+
105
+ References
106
+ ----------
107
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,
108
+ Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
109
+
110
+ .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
111
+ Academic Press, pg. 182, 1980.
112
+
113
+ Examples
114
+ --------
115
+ Given a sequence of a polynomial's zeros:
116
+
117
+ >>> np.poly((0, 0, 0)) # Multiple root example
118
+ array([1., 0., 0., 0.])
119
+
120
+ The line above represents z**3 + 0*z**2 + 0*z + 0.
121
+
122
+ >>> np.poly((-1./2, 0, 1./2))
123
+ array([ 1. , 0. , -0.25, 0. ])
124
+
125
+ The line above represents z**3 - z/4
126
+
127
+ >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
128
+ array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
129
+
130
+ Given a square array object:
131
+
132
+ >>> P = np.array([[0, 1./3], [-1./2, 0]])
133
+ >>> np.poly(P)
134
+ array([1. , 0. , 0.16666667])
135
+
136
+ Note how in all cases the leading coefficient is always 1.
137
+
138
+ """
139
+ seq_of_zeros = atleast_1d(seq_of_zeros)
140
+ sh = seq_of_zeros.shape
141
+
142
+ if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
143
+ seq_of_zeros = eigvals(seq_of_zeros)
144
+ elif len(sh) == 1:
145
+ dt = seq_of_zeros.dtype
146
+ # Let object arrays slip through, e.g. for arbitrary precision
147
+ if dt != object:
148
+ seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
149
+ else:
150
+ raise ValueError("input must be 1d or non-empty square 2d array.")
151
+
152
+ if len(seq_of_zeros) == 0:
153
+ return 1.0
154
+ dt = seq_of_zeros.dtype
155
+ a = ones((1,), dtype=dt)
156
+ for zero in seq_of_zeros:
157
+ a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
158
+
159
+ if issubclass(a.dtype.type, NX.complexfloating):
160
+ # if complex roots are all complex conjugates, the roots are real.
161
+ roots = NX.asarray(seq_of_zeros, complex)
162
+ if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
163
+ a = a.real.copy()
164
+
165
+ return a
166
+
167
+
168
+ def _roots_dispatcher(p):
169
+ return p
170
+
171
+
172
+ @array_function_dispatch(_roots_dispatcher)
173
+ def roots(p):
174
+ """
175
+ Return the roots of a polynomial with coefficients given in p.
176
+
177
+ .. note::
178
+ This forms part of the old polynomial API. Since version 1.4, the
179
+ new polynomial API defined in `numpy.polynomial` is preferred.
180
+ A summary of the differences can be found in the
181
+ :doc:`transition guide </reference/routines.polynomials>`.
182
+
183
+ The values in the rank-1 array `p` are coefficients of a polynomial.
184
+ If the length of `p` is n+1 then the polynomial is described by::
185
+
186
+ p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
187
+
188
+ Parameters
189
+ ----------
190
+ p : array_like
191
+ Rank-1 array of polynomial coefficients.
192
+
193
+ Returns
194
+ -------
195
+ out : ndarray
196
+ An array containing the roots of the polynomial.
197
+
198
+ Raises
199
+ ------
200
+ ValueError
201
+ When `p` cannot be converted to a rank-1 array.
202
+
203
+ See also
204
+ --------
205
+ poly : Find the coefficients of a polynomial with a given sequence
206
+ of roots.
207
+ polyval : Compute polynomial values.
208
+ polyfit : Least squares polynomial fit.
209
+ poly1d : A one-dimensional polynomial class.
210
+
211
+ Notes
212
+ -----
213
+ The algorithm relies on computing the eigenvalues of the
214
+ companion matrix [1]_.
215
+
216
+ References
217
+ ----------
218
+ .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
219
+ Cambridge University Press, 1999, pp. 146-7.
220
+
221
+ Examples
222
+ --------
223
+ >>> coeff = [3.2, 2, 1]
224
+ >>> np.roots(coeff)
225
+ array([-0.3125+0.46351241j, -0.3125-0.46351241j])
226
+
227
+ """
228
+ # If input is scalar, this makes it an array
229
+ p = atleast_1d(p)
230
+ if p.ndim != 1:
231
+ raise ValueError("Input must be a rank-1 array.")
232
+
233
+ # find non-zero array entries
234
+ non_zero = NX.nonzero(NX.ravel(p))[0]
235
+
236
+ # Return an empty array if polynomial is all zeros
237
+ if len(non_zero) == 0:
238
+ return NX.array([])
239
+
240
+ # find the number of trailing zeros -- this is the number of roots at 0.
241
+ trailing_zeros = len(p) - non_zero[-1] - 1
242
+
243
+ # strip leading and trailing zeros
244
+ p = p[int(non_zero[0]):int(non_zero[-1])+1]
245
+
246
+ # casting: if incoming array isn't floating point, make it floating point.
247
+ if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
248
+ p = p.astype(float)
249
+
250
+ N = len(p)
251
+ if N > 1:
252
+ # build companion matrix and find its eigenvalues (the roots)
253
+ A = diag(NX.ones((N-2,), p.dtype), -1)
254
+ A[0,:] = -p[1:] / p[0]
255
+ roots = eigvals(A)
256
+ else:
257
+ roots = NX.array([])
258
+
259
+ # tack any zeros onto the back of the array
260
+ roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
261
+ return roots
262
+
263
+
264
+ def _polyint_dispatcher(p, m=None, k=None):
265
+ return (p,)
266
+
267
+
268
+ @array_function_dispatch(_polyint_dispatcher)
269
+ def polyint(p, m=1, k=None):
270
+ """
271
+ Return an antiderivative (indefinite integral) of a polynomial.
272
+
273
+ .. note::
274
+ This forms part of the old polynomial API. Since version 1.4, the
275
+ new polynomial API defined in `numpy.polynomial` is preferred.
276
+ A summary of the differences can be found in the
277
+ :doc:`transition guide </reference/routines.polynomials>`.
278
+
279
+ The returned order `m` antiderivative `P` of polynomial `p` satisfies
280
+ :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
281
+ integration constants `k`. The constants determine the low-order
282
+ polynomial part
283
+
284
+ .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
285
+
286
+ of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
287
+
288
+ Parameters
289
+ ----------
290
+ p : array_like or poly1d
291
+ Polynomial to integrate.
292
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
293
+ m : int, optional
294
+ Order of the antiderivative. (Default: 1)
295
+ k : list of `m` scalars or scalar, optional
296
+ Integration constants. They are given in the order of integration:
297
+ those corresponding to highest-order terms come first.
298
+
299
+ If ``None`` (default), all constants are assumed to be zero.
300
+ If `m = 1`, a single scalar can be given instead of a list.
301
+
302
+ See Also
303
+ --------
304
+ polyder : derivative of a polynomial
305
+ poly1d.integ : equivalent method
306
+
307
+ Examples
308
+ --------
309
+ The defining property of the antiderivative:
310
+
311
+ >>> p = np.poly1d([1,1,1])
312
+ >>> P = np.polyint(p)
313
+ >>> P
314
+ poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
315
+ >>> np.polyder(P) == p
316
+ True
317
+
318
+ The integration constants default to zero, but can be specified:
319
+
320
+ >>> P = np.polyint(p, 3)
321
+ >>> P(0)
322
+ 0.0
323
+ >>> np.polyder(P)(0)
324
+ 0.0
325
+ >>> np.polyder(P, 2)(0)
326
+ 0.0
327
+ >>> P = np.polyint(p, 3, k=[6,5,3])
328
+ >>> P
329
+ poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
330
+
331
+ Note that 3 = 6 / 2!, and that the constants are given in the order of
332
+ integrations. Constant of the highest-order polynomial term comes first:
333
+
334
+ >>> np.polyder(P, 2)(0)
335
+ 6.0
336
+ >>> np.polyder(P, 1)(0)
337
+ 5.0
338
+ >>> P(0)
339
+ 3.0
340
+
341
+ """
342
+ m = int(m)
343
+ if m < 0:
344
+ raise ValueError("Order of integral must be positive (see polyder)")
345
+ if k is None:
346
+ k = NX.zeros(m, float)
347
+ k = atleast_1d(k)
348
+ if len(k) == 1 and m > 1:
349
+ k = k[0]*NX.ones(m, float)
350
+ if len(k) < m:
351
+ raise ValueError(
352
+ "k must be a scalar or a rank-1 array of length 1 or >m.")
353
+
354
+ truepoly = isinstance(p, poly1d)
355
+ p = NX.asarray(p)
356
+ if m == 0:
357
+ if truepoly:
358
+ return poly1d(p)
359
+ return p
360
+ else:
361
+ # Note: this must work also with object and integer arrays
362
+ y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
363
+ val = polyint(y, m - 1, k=k[1:])
364
+ if truepoly:
365
+ return poly1d(val)
366
+ return val
367
+
368
+
369
+ def _polyder_dispatcher(p, m=None):
370
+ return (p,)
371
+
372
+
373
+ @array_function_dispatch(_polyder_dispatcher)
374
+ def polyder(p, m=1):
375
+ """
376
+ Return the derivative of the specified order of a polynomial.
377
+
378
+ .. note::
379
+ This forms part of the old polynomial API. Since version 1.4, the
380
+ new polynomial API defined in `numpy.polynomial` is preferred.
381
+ A summary of the differences can be found in the
382
+ :doc:`transition guide </reference/routines.polynomials>`.
383
+
384
+ Parameters
385
+ ----------
386
+ p : poly1d or sequence
387
+ Polynomial to differentiate.
388
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
389
+ m : int, optional
390
+ Order of differentiation (default: 1)
391
+
392
+ Returns
393
+ -------
394
+ der : poly1d
395
+ A new polynomial representing the derivative.
396
+
397
+ See Also
398
+ --------
399
+ polyint : Anti-derivative of a polynomial.
400
+ poly1d : Class for one-dimensional polynomials.
401
+
402
+ Examples
403
+ --------
404
+ The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
405
+
406
+ >>> p = np.poly1d([1,1,1,1])
407
+ >>> p2 = np.polyder(p)
408
+ >>> p2
409
+ poly1d([3, 2, 1])
410
+
411
+ which evaluates to:
412
+
413
+ >>> p2(2.)
414
+ 17.0
415
+
416
+ We can verify this, approximating the derivative with
417
+ ``(f(x + h) - f(x))/h``:
418
+
419
+ >>> (p(2. + 0.001) - p(2.)) / 0.001
420
+ 17.007000999997857
421
+
422
+ The fourth-order derivative of a 3rd-order polynomial is zero:
423
+
424
+ >>> np.polyder(p, 2)
425
+ poly1d([6, 2])
426
+ >>> np.polyder(p, 3)
427
+ poly1d([6])
428
+ >>> np.polyder(p, 4)
429
+ poly1d([0])
430
+
431
+ """
432
+ m = int(m)
433
+ if m < 0:
434
+ raise ValueError("Order of derivative must be positive (see polyint)")
435
+
436
+ truepoly = isinstance(p, poly1d)
437
+ p = NX.asarray(p)
438
+ n = len(p) - 1
439
+ y = p[:-1] * NX.arange(n, 0, -1)
440
+ if m == 0:
441
+ val = p
442
+ else:
443
+ val = polyder(y, m - 1)
444
+ if truepoly:
445
+ val = poly1d(val)
446
+ return val
447
+
448
+
449
+ def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
450
+ return (x, y, w)
451
+
452
+
453
+ @array_function_dispatch(_polyfit_dispatcher)
454
+ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
455
+ """
456
+ Least squares polynomial fit.
457
+
458
+ .. note::
459
+ This forms part of the old polynomial API. Since version 1.4, the
460
+ new polynomial API defined in `numpy.polynomial` is preferred.
461
+ A summary of the differences can be found in the
462
+ :doc:`transition guide </reference/routines.polynomials>`.
463
+
464
+ Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
465
+ to points `(x, y)`. Returns a vector of coefficients `p` that minimises
466
+ the squared error in the order `deg`, `deg-1`, ... `0`.
467
+
468
+ The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
469
+ method is recommended for new code as it is more stable numerically. See
470
+ the documentation of the method for more information.
471
+
472
+ Parameters
473
+ ----------
474
+ x : array_like, shape (M,)
475
+ x-coordinates of the M sample points ``(x[i], y[i])``.
476
+ y : array_like, shape (M,) or (M, K)
477
+ y-coordinates of the sample points. Several data sets of sample
478
+ points sharing the same x-coordinates can be fitted at once by
479
+ passing in a 2D-array that contains one dataset per column.
480
+ deg : int
481
+ Degree of the fitting polynomial
482
+ rcond : float, optional
483
+ Relative condition number of the fit. Singular values smaller than
484
+ this relative to the largest singular value will be ignored. The
485
+ default value is len(x)*eps, where eps is the relative precision of
486
+ the float type, about 2e-16 in most cases.
487
+ full : bool, optional
488
+ Switch determining nature of return value. When it is False (the
489
+ default) just the coefficients are returned, when True diagnostic
490
+ information from the singular value decomposition is also returned.
491
+ w : array_like, shape (M,), optional
492
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
493
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
494
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
495
+ same variance. When using inverse-variance weighting, use
496
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
497
+ cov : bool or str, optional
498
+ If given and not `False`, return not just the estimate but also its
499
+ covariance matrix. By default, the covariance are scaled by
500
+ chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed
501
+ to be unreliable except in a relative sense and everything is scaled
502
+ such that the reduced chi2 is unity. This scaling is omitted if
503
+ ``cov='unscaled'``, as is relevant for the case that the weights are
504
+ w = 1/sigma, with sigma known to be a reliable estimate of the
505
+ uncertainty.
506
+
507
+ Returns
508
+ -------
509
+ p : ndarray, shape (deg + 1,) or (deg + 1, K)
510
+ Polynomial coefficients, highest power first. If `y` was 2-D, the
511
+ coefficients for `k`-th data set are in ``p[:,k]``.
512
+
513
+ residuals, rank, singular_values, rcond
514
+ These values are only returned if ``full == True``
515
+
516
+ - residuals -- sum of squared residuals of the least squares fit
517
+ - rank -- the effective rank of the scaled Vandermonde
518
+ coefficient matrix
519
+ - singular_values -- singular values of the scaled Vandermonde
520
+ coefficient matrix
521
+ - rcond -- value of `rcond`.
522
+
523
+ For more details, see `numpy.linalg.lstsq`.
524
+
525
+ V : ndarray, shape (M,M) or (M,M,K)
526
+ Present only if ``full == False`` and ``cov == True``. The covariance
527
+ matrix of the polynomial coefficient estimates. The diagonal of
528
+ this matrix are the variance estimates for each coefficient. If y
529
+ is a 2-D array, then the covariance matrix for the `k`-th data set
530
+ are in ``V[:,:,k]``
531
+
532
+
533
+ Warns
534
+ -----
535
+ RankWarning
536
+ The rank of the coefficient matrix in the least-squares fit is
537
+ deficient. The warning is only raised if ``full == False``.
538
+
539
+ The warnings can be turned off by
540
+
541
+ >>> import warnings
542
+ >>> warnings.simplefilter('ignore', np.RankWarning)
543
+
544
+ See Also
545
+ --------
546
+ polyval : Compute polynomial values.
547
+ linalg.lstsq : Computes a least-squares fit.
548
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
549
+
550
+ Notes
551
+ -----
552
+ The solution minimizes the squared error
553
+
554
+ .. math::
555
+ E = \\sum_{j=0}^k |p(x_j) - y_j|^2
556
+
557
+ in the equations::
558
+
559
+ x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
560
+ x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
561
+ ...
562
+ x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
563
+
564
+ The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
565
+
566
+ `polyfit` issues a `RankWarning` when the least-squares fit is badly
567
+ conditioned. This implies that the best fit is not well-defined due
568
+ to numerical error. The results may be improved by lowering the polynomial
569
+ degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
570
+ can also be set to a value smaller than its default, but the resulting
571
+ fit may be spurious: including contributions from the small singular
572
+ values can add numerical noise to the result.
573
+
574
+ Note that fitting polynomial coefficients is inherently badly conditioned
575
+ when the degree of the polynomial is large or the interval of sample points
576
+ is badly centered. The quality of the fit should always be checked in these
577
+ cases. When polynomial fits are not satisfactory, splines may be a good
578
+ alternative.
579
+
580
+ References
581
+ ----------
582
+ .. [1] Wikipedia, "Curve fitting",
583
+ https://en.wikipedia.org/wiki/Curve_fitting
584
+ .. [2] Wikipedia, "Polynomial interpolation",
585
+ https://en.wikipedia.org/wiki/Polynomial_interpolation
586
+
587
+ Examples
588
+ --------
589
+ >>> import warnings
590
+ >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
591
+ >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
592
+ >>> z = np.polyfit(x, y, 3)
593
+ >>> z
594
+ array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
595
+
596
+ It is convenient to use `poly1d` objects for dealing with polynomials:
597
+
598
+ >>> p = np.poly1d(z)
599
+ >>> p(0.5)
600
+ 0.6143849206349179 # may vary
601
+ >>> p(3.5)
602
+ -0.34732142857143039 # may vary
603
+ >>> p(10)
604
+ 22.579365079365115 # may vary
605
+
606
+ High-order polynomials may oscillate wildly:
607
+
608
+ >>> with warnings.catch_warnings():
609
+ ... warnings.simplefilter('ignore', np.RankWarning)
610
+ ... p30 = np.poly1d(np.polyfit(x, y, 30))
611
+ ...
612
+ >>> p30(4)
613
+ -0.80000000000000204 # may vary
614
+ >>> p30(5)
615
+ -0.99999999999999445 # may vary
616
+ >>> p30(4.5)
617
+ -0.10547061179440398 # may vary
618
+
619
+ Illustration:
620
+
621
+ >>> import matplotlib.pyplot as plt
622
+ >>> xp = np.linspace(-2, 6, 100)
623
+ >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
624
+ >>> plt.ylim(-2,2)
625
+ (-2, 2)
626
+ >>> plt.show()
627
+
628
+ """
629
+ order = int(deg) + 1
630
+ x = NX.asarray(x) + 0.0
631
+ y = NX.asarray(y) + 0.0
632
+
633
+ # check arguments.
634
+ if deg < 0:
635
+ raise ValueError("expected deg >= 0")
636
+ if x.ndim != 1:
637
+ raise TypeError("expected 1D vector for x")
638
+ if x.size == 0:
639
+ raise TypeError("expected non-empty vector for x")
640
+ if y.ndim < 1 or y.ndim > 2:
641
+ raise TypeError("expected 1D or 2D array for y")
642
+ if x.shape[0] != y.shape[0]:
643
+ raise TypeError("expected x and y to have same length")
644
+
645
+ # set rcond
646
+ if rcond is None:
647
+ rcond = len(x)*finfo(x.dtype).eps
648
+
649
+ # set up least squares equation for powers of x
650
+ lhs = vander(x, order)
651
+ rhs = y
652
+
653
+ # apply weighting
654
+ if w is not None:
655
+ w = NX.asarray(w) + 0.0
656
+ if w.ndim != 1:
657
+ raise TypeError("expected a 1-d array for weights")
658
+ if w.shape[0] != y.shape[0]:
659
+ raise TypeError("expected w and y to have the same length")
660
+ lhs *= w[:, NX.newaxis]
661
+ if rhs.ndim == 2:
662
+ rhs *= w[:, NX.newaxis]
663
+ else:
664
+ rhs *= w
665
+
666
+ # scale lhs to improve condition number and solve
667
+ scale = NX.sqrt((lhs*lhs).sum(axis=0))
668
+ lhs /= scale
669
+ c, resids, rank, s = lstsq(lhs, rhs, rcond)
670
+ c = (c.T/scale).T # broadcast scale coefficients
671
+
672
+ # warn on rank reduction, which indicates an ill conditioned matrix
673
+ if rank != order and not full:
674
+ msg = "Polyfit may be poorly conditioned"
675
+ warnings.warn(msg, RankWarning, stacklevel=2)
676
+
677
+ if full:
678
+ return c, resids, rank, s, rcond
679
+ elif cov:
680
+ Vbase = inv(dot(lhs.T, lhs))
681
+ Vbase /= NX.outer(scale, scale)
682
+ if cov == "unscaled":
683
+ fac = 1
684
+ else:
685
+ if len(x) <= order:
686
+ raise ValueError("the number of data points must exceed order "
687
+ "to scale the covariance matrix")
688
+ # note, this used to be: fac = resids / (len(x) - order - 2.0)
689
+ # it was deciced that the "- 2" (originally justified by "Bayesian
690
+ # uncertainty analysis") is not what the user expects
691
+ # (see gh-11196 and gh-11197)
692
+ fac = resids / (len(x) - order)
693
+ if y.ndim == 1:
694
+ return c, Vbase * fac
695
+ else:
696
+ return c, Vbase[:,:, NX.newaxis] * fac
697
+ else:
698
+ return c
699
+
700
+
701
+ def _polyval_dispatcher(p, x):
702
+ return (p, x)
703
+
704
+
705
+ @array_function_dispatch(_polyval_dispatcher)
706
+ def polyval(p, x):
707
+ """
708
+ Evaluate a polynomial at specific values.
709
+
710
+ .. note::
711
+ This forms part of the old polynomial API. Since version 1.4, the
712
+ new polynomial API defined in `numpy.polynomial` is preferred.
713
+ A summary of the differences can be found in the
714
+ :doc:`transition guide </reference/routines.polynomials>`.
715
+
716
+ If `p` is of length N, this function returns the value:
717
+
718
+ ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
719
+
720
+ If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.
721
+ If `x` is another polynomial then the composite polynomial ``p(x(t))``
722
+ is returned.
723
+
724
+ Parameters
725
+ ----------
726
+ p : array_like or poly1d object
727
+ 1D array of polynomial coefficients (including coefficients equal
728
+ to zero) from highest degree to the constant term, or an
729
+ instance of poly1d.
730
+ x : array_like or poly1d object
731
+ A number, an array of numbers, or an instance of poly1d, at
732
+ which to evaluate `p`.
733
+
734
+ Returns
735
+ -------
736
+ values : ndarray or poly1d
737
+ If `x` is a poly1d instance, the result is the composition of the two
738
+ polynomials, i.e., `x` is "substituted" in `p` and the simplified
739
+ result is returned. In addition, the type of `x` - array_like or
740
+ poly1d - governs the type of the output: `x` array_like => `values`
741
+ array_like, `x` a poly1d object => `values` is also.
742
+
743
+ See Also
744
+ --------
745
+ poly1d: A polynomial class.
746
+
747
+ Notes
748
+ -----
749
+ Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
750
+ for polynomials of high degree the values may be inaccurate due to
751
+ rounding errors. Use carefully.
752
+
753
+ If `x` is a subtype of `ndarray` the return value will be of the same type.
754
+
755
+ References
756
+ ----------
757
+ .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
758
+ trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
759
+ Reinhold Co., 1985, pg. 720.
760
+
761
+ Examples
762
+ --------
763
+ >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
764
+ 76
765
+ >>> np.polyval([3,0,1], np.poly1d(5))
766
+ poly1d([76])
767
+ >>> np.polyval(np.poly1d([3,0,1]), 5)
768
+ 76
769
+ >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
770
+ poly1d([76])
771
+
772
+ """
773
+ p = NX.asarray(p)
774
+ if isinstance(x, poly1d):
775
+ y = 0
776
+ else:
777
+ x = NX.asanyarray(x)
778
+ y = NX.zeros_like(x)
779
+ for pv in p:
780
+ y = y * x + pv
781
+ return y
782
+
783
+
784
+ def _binary_op_dispatcher(a1, a2):
785
+ return (a1, a2)
786
+
787
+
788
+ @array_function_dispatch(_binary_op_dispatcher)
789
+ def polyadd(a1, a2):
790
+ """
791
+ Find the sum of two polynomials.
792
+
793
+ .. note::
794
+ This forms part of the old polynomial API. Since version 1.4, the
795
+ new polynomial API defined in `numpy.polynomial` is preferred.
796
+ A summary of the differences can be found in the
797
+ :doc:`transition guide </reference/routines.polynomials>`.
798
+
799
+ Returns the polynomial resulting from the sum of two input polynomials.
800
+ Each input must be either a poly1d object or a 1D sequence of polynomial
801
+ coefficients, from highest to lowest degree.
802
+
803
+ Parameters
804
+ ----------
805
+ a1, a2 : array_like or poly1d object
806
+ Input polynomials.
807
+
808
+ Returns
809
+ -------
810
+ out : ndarray or poly1d object
811
+ The sum of the inputs. If either input is a poly1d object, then the
812
+ output is also a poly1d object. Otherwise, it is a 1D array of
813
+ polynomial coefficients from highest to lowest degree.
814
+
815
+ See Also
816
+ --------
817
+ poly1d : A one-dimensional polynomial class.
818
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
819
+
820
+ Examples
821
+ --------
822
+ >>> np.polyadd([1, 2], [9, 5, 4])
823
+ array([9, 6, 6])
824
+
825
+ Using poly1d objects:
826
+
827
+ >>> p1 = np.poly1d([1, 2])
828
+ >>> p2 = np.poly1d([9, 5, 4])
829
+ >>> print(p1)
830
+ 1 x + 2
831
+ >>> print(p2)
832
+ 2
833
+ 9 x + 5 x + 4
834
+ >>> print(np.polyadd(p1, p2))
835
+ 2
836
+ 9 x + 6 x + 6
837
+
838
+ """
839
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
840
+ a1 = atleast_1d(a1)
841
+ a2 = atleast_1d(a2)
842
+ diff = len(a2) - len(a1)
843
+ if diff == 0:
844
+ val = a1 + a2
845
+ elif diff > 0:
846
+ zr = NX.zeros(diff, a1.dtype)
847
+ val = NX.concatenate((zr, a1)) + a2
848
+ else:
849
+ zr = NX.zeros(abs(diff), a2.dtype)
850
+ val = a1 + NX.concatenate((zr, a2))
851
+ if truepoly:
852
+ val = poly1d(val)
853
+ return val
854
+
855
+
856
+ @array_function_dispatch(_binary_op_dispatcher)
857
+ def polysub(a1, a2):
858
+ """
859
+ Difference (subtraction) of two polynomials.
860
+
861
+ .. note::
862
+ This forms part of the old polynomial API. Since version 1.4, the
863
+ new polynomial API defined in `numpy.polynomial` is preferred.
864
+ A summary of the differences can be found in the
865
+ :doc:`transition guide </reference/routines.polynomials>`.
866
+
867
+ Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
868
+ `a1` and `a2` can be either array_like sequences of the polynomials'
869
+ coefficients (including coefficients equal to zero), or `poly1d` objects.
870
+
871
+ Parameters
872
+ ----------
873
+ a1, a2 : array_like or poly1d
874
+ Minuend and subtrahend polynomials, respectively.
875
+
876
+ Returns
877
+ -------
878
+ out : ndarray or poly1d
879
+ Array or `poly1d` object of the difference polynomial's coefficients.
880
+
881
+ See Also
882
+ --------
883
+ polyval, polydiv, polymul, polyadd
884
+
885
+ Examples
886
+ --------
887
+ .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
888
+
889
+ >>> np.polysub([2, 10, -2], [3, 10, -4])
890
+ array([-1, 0, 2])
891
+
892
+ """
893
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
894
+ a1 = atleast_1d(a1)
895
+ a2 = atleast_1d(a2)
896
+ diff = len(a2) - len(a1)
897
+ if diff == 0:
898
+ val = a1 - a2
899
+ elif diff > 0:
900
+ zr = NX.zeros(diff, a1.dtype)
901
+ val = NX.concatenate((zr, a1)) - a2
902
+ else:
903
+ zr = NX.zeros(abs(diff), a2.dtype)
904
+ val = a1 - NX.concatenate((zr, a2))
905
+ if truepoly:
906
+ val = poly1d(val)
907
+ return val
908
+
909
+
910
+ @array_function_dispatch(_binary_op_dispatcher)
911
+ def polymul(a1, a2):
912
+ """
913
+ Find the product of two polynomials.
914
+
915
+ .. note::
916
+ This forms part of the old polynomial API. Since version 1.4, the
917
+ new polynomial API defined in `numpy.polynomial` is preferred.
918
+ A summary of the differences can be found in the
919
+ :doc:`transition guide </reference/routines.polynomials>`.
920
+
921
+ Finds the polynomial resulting from the multiplication of the two input
922
+ polynomials. Each input must be either a poly1d object or a 1D sequence
923
+ of polynomial coefficients, from highest to lowest degree.
924
+
925
+ Parameters
926
+ ----------
927
+ a1, a2 : array_like or poly1d object
928
+ Input polynomials.
929
+
930
+ Returns
931
+ -------
932
+ out : ndarray or poly1d object
933
+ The polynomial resulting from the multiplication of the inputs. If
934
+ either inputs is a poly1d object, then the output is also a poly1d
935
+ object. Otherwise, it is a 1D array of polynomial coefficients from
936
+ highest to lowest degree.
937
+
938
+ See Also
939
+ --------
940
+ poly1d : A one-dimensional polynomial class.
941
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
942
+ convolve : Array convolution. Same output as polymul, but has parameter
943
+ for overlap mode.
944
+
945
+ Examples
946
+ --------
947
+ >>> np.polymul([1, 2, 3], [9, 5, 1])
948
+ array([ 9, 23, 38, 17, 3])
949
+
950
+ Using poly1d objects:
951
+
952
+ >>> p1 = np.poly1d([1, 2, 3])
953
+ >>> p2 = np.poly1d([9, 5, 1])
954
+ >>> print(p1)
955
+ 2
956
+ 1 x + 2 x + 3
957
+ >>> print(p2)
958
+ 2
959
+ 9 x + 5 x + 1
960
+ >>> print(np.polymul(p1, p2))
961
+ 4 3 2
962
+ 9 x + 23 x + 38 x + 17 x + 3
963
+
964
+ """
965
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
966
+ a1, a2 = poly1d(a1), poly1d(a2)
967
+ val = NX.convolve(a1, a2)
968
+ if truepoly:
969
+ val = poly1d(val)
970
+ return val
971
+
972
+
973
+ def _polydiv_dispatcher(u, v):
974
+ return (u, v)
975
+
976
+
977
+ @array_function_dispatch(_polydiv_dispatcher)
978
+ def polydiv(u, v):
979
+ """
980
+ Returns the quotient and remainder of polynomial division.
981
+
982
+ .. note::
983
+ This forms part of the old polynomial API. Since version 1.4, the
984
+ new polynomial API defined in `numpy.polynomial` is preferred.
985
+ A summary of the differences can be found in the
986
+ :doc:`transition guide </reference/routines.polynomials>`.
987
+
988
+ The input arrays are the coefficients (including any coefficients
989
+ equal to zero) of the "numerator" (dividend) and "denominator"
990
+ (divisor) polynomials, respectively.
991
+
992
+ Parameters
993
+ ----------
994
+ u : array_like or poly1d
995
+ Dividend polynomial's coefficients.
996
+
997
+ v : array_like or poly1d
998
+ Divisor polynomial's coefficients.
999
+
1000
+ Returns
1001
+ -------
1002
+ q : ndarray
1003
+ Coefficients, including those equal to zero, of the quotient.
1004
+ r : ndarray
1005
+ Coefficients, including those equal to zero, of the remainder.
1006
+
1007
+ See Also
1008
+ --------
1009
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
1010
+ polyval
1011
+
1012
+ Notes
1013
+ -----
1014
+ Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
1015
+ not equal `v.ndim`. In other words, all four possible combinations -
1016
+ ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
1017
+ ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
1018
+
1019
+ Examples
1020
+ --------
1021
+ .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
1022
+
1023
+ >>> x = np.array([3.0, 5.0, 2.0])
1024
+ >>> y = np.array([2.0, 1.0])
1025
+ >>> np.polydiv(x, y)
1026
+ (array([1.5 , 1.75]), array([0.25]))
1027
+
1028
+ """
1029
+ truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
1030
+ u = atleast_1d(u) + 0.0
1031
+ v = atleast_1d(v) + 0.0
1032
+ # w has the common type
1033
+ w = u[0] + v[0]
1034
+ m = len(u) - 1
1035
+ n = len(v) - 1
1036
+ scale = 1. / v[0]
1037
+ q = NX.zeros((max(m - n + 1, 1),), w.dtype)
1038
+ r = u.astype(w.dtype)
1039
+ for k in range(0, m-n+1):
1040
+ d = scale * r[k]
1041
+ q[k] = d
1042
+ r[k:k+n+1] -= d*v
1043
+ while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
1044
+ r = r[1:]
1045
+ if truepoly:
1046
+ return poly1d(q), poly1d(r)
1047
+ return q, r
1048
+
1049
+ _poly_mat = re.compile(r"\*\*([0-9]*)")
1050
+ def _raise_power(astr, wrap=70):
1051
+ n = 0
1052
+ line1 = ''
1053
+ line2 = ''
1054
+ output = ' '
1055
+ while True:
1056
+ mat = _poly_mat.search(astr, n)
1057
+ if mat is None:
1058
+ break
1059
+ span = mat.span()
1060
+ power = mat.groups()[0]
1061
+ partstr = astr[n:span[0]]
1062
+ n = span[1]
1063
+ toadd2 = partstr + ' '*(len(power)-1)
1064
+ toadd1 = ' '*(len(partstr)-1) + power
1065
+ if ((len(line2) + len(toadd2) > wrap) or
1066
+ (len(line1) + len(toadd1) > wrap)):
1067
+ output += line1 + "\n" + line2 + "\n "
1068
+ line1 = toadd1
1069
+ line2 = toadd2
1070
+ else:
1071
+ line2 += partstr + ' '*(len(power)-1)
1072
+ line1 += ' '*(len(partstr)-1) + power
1073
+ output += line1 + "\n" + line2
1074
+ return output + astr[n:]
1075
+
1076
+
1077
+ @set_module('numpy')
1078
+ class poly1d:
1079
+ """
1080
+ A one-dimensional polynomial class.
1081
+
1082
+ .. note::
1083
+ This forms part of the old polynomial API. Since version 1.4, the
1084
+ new polynomial API defined in `numpy.polynomial` is preferred.
1085
+ A summary of the differences can be found in the
1086
+ :doc:`transition guide </reference/routines.polynomials>`.
1087
+
1088
+ A convenience class, used to encapsulate "natural" operations on
1089
+ polynomials so that said operations may take on their customary
1090
+ form in code (see Examples).
1091
+
1092
+ Parameters
1093
+ ----------
1094
+ c_or_r : array_like
1095
+ The polynomial's coefficients, in decreasing powers, or if
1096
+ the value of the second parameter is True, the polynomial's
1097
+ roots (values where the polynomial evaluates to 0). For example,
1098
+ ``poly1d([1, 2, 3])`` returns an object that represents
1099
+ :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
1100
+ one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
1101
+ r : bool, optional
1102
+ If True, `c_or_r` specifies the polynomial's roots; the default
1103
+ is False.
1104
+ variable : str, optional
1105
+ Changes the variable used when printing `p` from `x` to `variable`
1106
+ (see Examples).
1107
+
1108
+ Examples
1109
+ --------
1110
+ Construct the polynomial :math:`x^2 + 2x + 3`:
1111
+
1112
+ >>> p = np.poly1d([1, 2, 3])
1113
+ >>> print(np.poly1d(p))
1114
+ 2
1115
+ 1 x + 2 x + 3
1116
+
1117
+ Evaluate the polynomial at :math:`x = 0.5`:
1118
+
1119
+ >>> p(0.5)
1120
+ 4.25
1121
+
1122
+ Find the roots:
1123
+
1124
+ >>> p.r
1125
+ array([-1.+1.41421356j, -1.-1.41421356j])
1126
+ >>> p(p.r)
1127
+ array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
1128
+
1129
+ These numbers in the previous line represent (0, 0) to machine precision
1130
+
1131
+ Show the coefficients:
1132
+
1133
+ >>> p.c
1134
+ array([1, 2, 3])
1135
+
1136
+ Display the order (the leading zero-coefficients are removed):
1137
+
1138
+ >>> p.order
1139
+ 2
1140
+
1141
+ Show the coefficient of the k-th power in the polynomial
1142
+ (which is equivalent to ``p.c[-(i+1)]``):
1143
+
1144
+ >>> p[1]
1145
+ 2
1146
+
1147
+ Polynomials can be added, subtracted, multiplied, and divided
1148
+ (returns quotient and remainder):
1149
+
1150
+ >>> p * p
1151
+ poly1d([ 1, 4, 10, 12, 9])
1152
+
1153
+ >>> (p**3 + 4) / p
1154
+ (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
1155
+
1156
+ ``asarray(p)`` gives the coefficient array, so polynomials can be
1157
+ used in all functions that accept arrays:
1158
+
1159
+ >>> p**2 # square of polynomial
1160
+ poly1d([ 1, 4, 10, 12, 9])
1161
+
1162
+ >>> np.square(p) # square of individual coefficients
1163
+ array([1, 4, 9])
1164
+
1165
+ The variable used in the string representation of `p` can be modified,
1166
+ using the `variable` parameter:
1167
+
1168
+ >>> p = np.poly1d([1,2,3], variable='z')
1169
+ >>> print(p)
1170
+ 2
1171
+ 1 z + 2 z + 3
1172
+
1173
+ Construct a polynomial from its roots:
1174
+
1175
+ >>> np.poly1d([1, 2], True)
1176
+ poly1d([ 1., -3., 2.])
1177
+
1178
+ This is the same polynomial as obtained by:
1179
+
1180
+ >>> np.poly1d([1, -1]) * np.poly1d([1, -2])
1181
+ poly1d([ 1, -3, 2])
1182
+
1183
+ """
1184
+ __hash__ = None
1185
+
1186
+ @property
1187
+ def coeffs(self):
1188
+ """ The polynomial coefficients """
1189
+ return self._coeffs
1190
+
1191
+ @coeffs.setter
1192
+ def coeffs(self, value):
1193
+ # allowing this makes p.coeffs *= 2 legal
1194
+ if value is not self._coeffs:
1195
+ raise AttributeError("Cannot set attribute")
1196
+
1197
+ @property
1198
+ def variable(self):
1199
+ """ The name of the polynomial variable """
1200
+ return self._variable
1201
+
1202
+ # calculated attributes
1203
+ @property
1204
+ def order(self):
1205
+ """ The order or degree of the polynomial """
1206
+ return len(self._coeffs) - 1
1207
+
1208
+ @property
1209
+ def roots(self):
1210
+ """ The roots of the polynomial, where self(x) == 0 """
1211
+ return roots(self._coeffs)
1212
+
1213
+ # our internal _coeffs property need to be backed by __dict__['coeffs'] for
1214
+ # scipy to work correctly.
1215
+ @property
1216
+ def _coeffs(self):
1217
+ return self.__dict__['coeffs']
1218
+ @_coeffs.setter
1219
+ def _coeffs(self, coeffs):
1220
+ self.__dict__['coeffs'] = coeffs
1221
+
1222
+ # alias attributes
1223
+ r = roots
1224
+ c = coef = coefficients = coeffs
1225
+ o = order
1226
+
1227
+ def __init__(self, c_or_r, r=False, variable=None):
1228
+ if isinstance(c_or_r, poly1d):
1229
+ self._variable = c_or_r._variable
1230
+ self._coeffs = c_or_r._coeffs
1231
+
1232
+ if set(c_or_r.__dict__) - set(self.__dict__):
1233
+ msg = ("In the future extra properties will not be copied "
1234
+ "across when constructing one poly1d from another")
1235
+ warnings.warn(msg, FutureWarning, stacklevel=2)
1236
+ self.__dict__.update(c_or_r.__dict__)
1237
+
1238
+ if variable is not None:
1239
+ self._variable = variable
1240
+ return
1241
+ if r:
1242
+ c_or_r = poly(c_or_r)
1243
+ c_or_r = atleast_1d(c_or_r)
1244
+ if c_or_r.ndim > 1:
1245
+ raise ValueError("Polynomial must be 1d only.")
1246
+ c_or_r = trim_zeros(c_or_r, trim='f')
1247
+ if len(c_or_r) == 0:
1248
+ c_or_r = NX.array([0], dtype=c_or_r.dtype)
1249
+ self._coeffs = c_or_r
1250
+ if variable is None:
1251
+ variable = 'x'
1252
+ self._variable = variable
1253
+
1254
+ def __array__(self, t=None):
1255
+ if t:
1256
+ return NX.asarray(self.coeffs, t)
1257
+ else:
1258
+ return NX.asarray(self.coeffs)
1259
+
1260
+ def __repr__(self):
1261
+ vals = repr(self.coeffs)
1262
+ vals = vals[6:-1]
1263
+ return "poly1d(%s)" % vals
1264
+
1265
+ def __len__(self):
1266
+ return self.order
1267
+
1268
+ def __str__(self):
1269
+ thestr = "0"
1270
+ var = self.variable
1271
+
1272
+ # Remove leading zeros
1273
+ coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
1274
+ N = len(coeffs)-1
1275
+
1276
+ def fmt_float(q):
1277
+ s = '%.4g' % q
1278
+ if s.endswith('.0000'):
1279
+ s = s[:-5]
1280
+ return s
1281
+
1282
+ for k, coeff in enumerate(coeffs):
1283
+ if not iscomplex(coeff):
1284
+ coefstr = fmt_float(real(coeff))
1285
+ elif real(coeff) == 0:
1286
+ coefstr = '%sj' % fmt_float(imag(coeff))
1287
+ else:
1288
+ coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),
1289
+ fmt_float(imag(coeff)))
1290
+
1291
+ power = (N-k)
1292
+ if power == 0:
1293
+ if coefstr != '0':
1294
+ newstr = '%s' % (coefstr,)
1295
+ else:
1296
+ if k == 0:
1297
+ newstr = '0'
1298
+ else:
1299
+ newstr = ''
1300
+ elif power == 1:
1301
+ if coefstr == '0':
1302
+ newstr = ''
1303
+ elif coefstr == 'b':
1304
+ newstr = var
1305
+ else:
1306
+ newstr = '%s %s' % (coefstr, var)
1307
+ else:
1308
+ if coefstr == '0':
1309
+ newstr = ''
1310
+ elif coefstr == 'b':
1311
+ newstr = '%s**%d' % (var, power,)
1312
+ else:
1313
+ newstr = '%s %s**%d' % (coefstr, var, power)
1314
+
1315
+ if k > 0:
1316
+ if newstr != '':
1317
+ if newstr.startswith('-'):
1318
+ thestr = "%s - %s" % (thestr, newstr[1:])
1319
+ else:
1320
+ thestr = "%s + %s" % (thestr, newstr)
1321
+ else:
1322
+ thestr = newstr
1323
+ return _raise_power(thestr)
1324
+
1325
+ def __call__(self, val):
1326
+ return polyval(self.coeffs, val)
1327
+
1328
+ def __neg__(self):
1329
+ return poly1d(-self.coeffs)
1330
+
1331
+ def __pos__(self):
1332
+ return self
1333
+
1334
+ def __mul__(self, other):
1335
+ if isscalar(other):
1336
+ return poly1d(self.coeffs * other)
1337
+ else:
1338
+ other = poly1d(other)
1339
+ return poly1d(polymul(self.coeffs, other.coeffs))
1340
+
1341
+ def __rmul__(self, other):
1342
+ if isscalar(other):
1343
+ return poly1d(other * self.coeffs)
1344
+ else:
1345
+ other = poly1d(other)
1346
+ return poly1d(polymul(self.coeffs, other.coeffs))
1347
+
1348
+ def __add__(self, other):
1349
+ other = poly1d(other)
1350
+ return poly1d(polyadd(self.coeffs, other.coeffs))
1351
+
1352
+ def __radd__(self, other):
1353
+ other = poly1d(other)
1354
+ return poly1d(polyadd(self.coeffs, other.coeffs))
1355
+
1356
+ def __pow__(self, val):
1357
+ if not isscalar(val) or int(val) != val or val < 0:
1358
+ raise ValueError("Power to non-negative integers only.")
1359
+ res = [1]
1360
+ for _ in range(val):
1361
+ res = polymul(self.coeffs, res)
1362
+ return poly1d(res)
1363
+
1364
+ def __sub__(self, other):
1365
+ other = poly1d(other)
1366
+ return poly1d(polysub(self.coeffs, other.coeffs))
1367
+
1368
+ def __rsub__(self, other):
1369
+ other = poly1d(other)
1370
+ return poly1d(polysub(other.coeffs, self.coeffs))
1371
+
1372
+ def __div__(self, other):
1373
+ if isscalar(other):
1374
+ return poly1d(self.coeffs/other)
1375
+ else:
1376
+ other = poly1d(other)
1377
+ return polydiv(self, other)
1378
+
1379
+ __truediv__ = __div__
1380
+
1381
+ def __rdiv__(self, other):
1382
+ if isscalar(other):
1383
+ return poly1d(other/self.coeffs)
1384
+ else:
1385
+ other = poly1d(other)
1386
+ return polydiv(other, self)
1387
+
1388
+ __rtruediv__ = __rdiv__
1389
+
1390
+ def __eq__(self, other):
1391
+ if not isinstance(other, poly1d):
1392
+ return NotImplemented
1393
+ if self.coeffs.shape != other.coeffs.shape:
1394
+ return False
1395
+ return (self.coeffs == other.coeffs).all()
1396
+
1397
+ def __ne__(self, other):
1398
+ if not isinstance(other, poly1d):
1399
+ return NotImplemented
1400
+ return not self.__eq__(other)
1401
+
1402
+
1403
+ def __getitem__(self, val):
1404
+ ind = self.order - val
1405
+ if val > self.order:
1406
+ return self.coeffs.dtype.type(0)
1407
+ if val < 0:
1408
+ return self.coeffs.dtype.type(0)
1409
+ return self.coeffs[ind]
1410
+
1411
+ def __setitem__(self, key, val):
1412
+ ind = self.order - key
1413
+ if key < 0:
1414
+ raise ValueError("Does not support negative powers.")
1415
+ if key > self.order:
1416
+ zr = NX.zeros(key-self.order, self.coeffs.dtype)
1417
+ self._coeffs = NX.concatenate((zr, self.coeffs))
1418
+ ind = 0
1419
+ self._coeffs[ind] = val
1420
+ return
1421
+
1422
+ def __iter__(self):
1423
+ return iter(self.coeffs)
1424
+
1425
+ def integ(self, m=1, k=0):
1426
+ """
1427
+ Return an antiderivative (indefinite integral) of this polynomial.
1428
+
1429
+ Refer to `polyint` for full documentation.
1430
+
1431
+ See Also
1432
+ --------
1433
+ polyint : equivalent function
1434
+
1435
+ """
1436
+ return poly1d(polyint(self.coeffs, m=m, k=k))
1437
+
1438
+ def deriv(self, m=1):
1439
+ """
1440
+ Return a derivative of this polynomial.
1441
+
1442
+ Refer to `polyder` for full documentation.
1443
+
1444
+ See Also
1445
+ --------
1446
+ polyder : equivalent function
1447
+
1448
+ """
1449
+ return poly1d(polyder(self.coeffs, m=m))
1450
+
1451
+ # Stuff to do on module import
1452
+
1453
+ warnings.simplefilter('always', RankWarning)
.venv/lib/python3.11/site-packages/numpy/lib/scimath.pyi ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import overload, Any
2
+
3
+ from numpy import complexfloating
4
+
5
+ from numpy._typing import (
6
+ NDArray,
7
+ _ArrayLikeFloat_co,
8
+ _ArrayLikeComplex_co,
9
+ _ComplexLike_co,
10
+ _FloatLike_co,
11
+ )
12
+
13
+ __all__: list[str]
14
+
15
+ @overload
16
+ def sqrt(x: _FloatLike_co) -> Any: ...
17
+ @overload
18
+ def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
19
+ @overload
20
+ def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
21
+ @overload
22
+ def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
23
+
24
+ @overload
25
+ def log(x: _FloatLike_co) -> Any: ...
26
+ @overload
27
+ def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
28
+ @overload
29
+ def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
30
+ @overload
31
+ def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
32
+
33
+ @overload
34
+ def log10(x: _FloatLike_co) -> Any: ...
35
+ @overload
36
+ def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
37
+ @overload
38
+ def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
39
+ @overload
40
+ def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
41
+
42
+ @overload
43
+ def log2(x: _FloatLike_co) -> Any: ...
44
+ @overload
45
+ def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
46
+ @overload
47
+ def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
48
+ @overload
49
+ def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
50
+
51
+ @overload
52
+ def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
53
+ @overload
54
+ def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
55
+ @overload
56
+ def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
57
+ @overload
58
+ def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
59
+
60
+ @overload
61
+ def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
62
+ @overload
63
+ def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ...
64
+ @overload
65
+ def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
66
+ @overload
67
+ def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
68
+
69
+ @overload
70
+ def arccos(x: _FloatLike_co) -> Any: ...
71
+ @overload
72
+ def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
73
+ @overload
74
+ def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
75
+ @overload
76
+ def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
77
+
78
+ @overload
79
+ def arcsin(x: _FloatLike_co) -> Any: ...
80
+ @overload
81
+ def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
82
+ @overload
83
+ def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
84
+ @overload
85
+ def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
86
+
87
+ @overload
88
+ def arctanh(x: _FloatLike_co) -> Any: ...
89
+ @overload
90
+ def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
91
+ @overload
92
+ def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
93
+ @overload
94
+ def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
.venv/lib/python3.11/site-packages/numpy/lib/tests/__init__.py ADDED
File without changes
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test__iotools.cpython-311.pyc ADDED
Binary file (22.3 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test__version.cpython-311.pyc ADDED
Binary file (4.24 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_arraysetops.cpython-311.pyc ADDED
Binary file (56.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_format.cpython-311.pyc ADDED
Binary file (59.3 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_histograms.cpython-311.pyc ADDED
Binary file (56.1 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_loadtxt.cpython-311.pyc ADDED
Binary file (73.7 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_mixins.cpython-311.pyc ADDED
Binary file (14.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_nanfunctions.cpython-311.pyc ADDED
Binary file (92.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_packbits.cpython-311.pyc ADDED
Binary file (24.8 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_polynomial.cpython-311.pyc ADDED
Binary file (23.7 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_recfunctions.cpython-311.pyc ADDED
Binary file (65 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_regression.cpython-311.pyc ADDED
Binary file (17.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_shape_base.cpython-311.pyc ADDED
Binary file (62.9 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_twodim_base.cpython-311.pyc ADDED
Binary file (34.8 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_type_check.cpython-311.pyc ADDED
Binary file (39.5 kB). View file
 
.venv/lib/python3.11/site-packages/numpy/lib/tests/__pycache__/test_utils.cpython-311.pyc ADDED
Binary file (12.7 kB). View file