diff --git a/.circleci/config.yml b/.circleci/config.yml index 8f9fa8c9fed0..646ee52633d3 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ _defaults: &defaults docker: # CircleCI maintains a library of pre-built images # documented at https://circleci.com/developer/images/image/cimg/python - - image: cimg/python:3.11.4 + - image: cimg/python:3.11.8 working_directory: ~/repo @@ -74,7 +74,7 @@ jobs: . venv/bin/activate cd doc # Don't use -q, show warning summary" - SPHINXOPTS="-j2 -n" make -e html || echo "ignoring errors for now, see gh-13114" + SPHINXOPTS="-j2 -n" make -e html if [[ $(find build/html -type f | wc -l) -lt 1000 ]]; then echo "doc build failed: build/html is empty" exit -1 @@ -85,7 +85,7 @@ jobs: command: | . venv/bin/activate cd doc/neps - SPHINXOPTS="-j2 -q" make -e html + SPHINXOPTS="-j2 -n" make -e html || echo "ignoring errors for now" - store_artifacts: path: doc/build/html/ diff --git a/doc/source/conf.py b/doc/source/conf.py index 1e734c0134bc..b30fe3c9978a 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -585,3 +585,12 @@ class NumPyLexer(CLexer): breathe_projects = dict(numpy=os.path.join("..", "build", "doxygen", "xml")) breathe_default_project = "numpy" breathe_default_members = ("members", "undoc-members", "protected-members") + +# See https://github.com/breathe-doc/breathe/issues/696 +nitpick_ignore = [ + ('c:identifier', 'FILE'), + ('c:identifier', 'size_t'), + ('c:identifier', 'PyHeapTypeObject'), +] + + diff --git a/doc/source/user/how-to-io.rst b/doc/source/user/how-to-io.rst index 9b3a71fa40bb..ca4abcd13746 100644 --- a/doc/source/user/how-to-io.rst +++ b/doc/source/user/how-to-io.rst @@ -327,7 +327,7 @@ created with NumPy 1.26. Convert from a pandas DataFrame to a NumPy array ================================================ -See :meth:`pandas.DataFrame.to_numpy`. +See :meth:`pandas.Series.to_numpy`. Save/restore using `~numpy.ndarray.tofile` and `~numpy.fromfile` ================================================================ diff --git a/numpy/_core/records.py b/numpy/_core/records.py index 79755e09bb40..8bdeec15c6d2 100644 --- a/numpy/_core/records.py +++ b/numpy/_core/records.py @@ -583,7 +583,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None, ``arrayList[0]``. formats, names, titles, aligned, byteorder : If `dtype` is ``None``, these arguments are passed to - `numpy.format_parser` to construct a dtype. See that function for + `numpy.rec.format_parser` to construct a dtype. See that function for detailed documentation. Returns diff --git a/numpy/lib/_npyio_impl.py b/numpy/lib/_npyio_impl.py index 8fef65e7f6ab..8986b94fd500 100644 --- a/numpy/lib/_npyio_impl.py +++ b/numpy/lib/_npyio_impl.py @@ -278,6 +278,34 @@ def __repr__(self): array_names += "..." return f"NpzFile {filename!r} with keys: {array_names}" + # Work around problems with the docstrings in the Mapping methods + # They contain a `->`, which confuses the type annotation interpretations + # of sphinx-docs. See gh-25964 + + def get(self, key, default=None, /): + """ + D.get(k,[,d]) returns D[k] if k in D, else d. d defaults to None. + """ + return Mapping.get(self, key, default) + + def items(self): + """ + D.items() returns a set-like object providing a view on the items + """ + return Mapping.items(self) + + def keys(self): + """ + D.keys() returns a set-like object providing a view on the keys + """ + return Mapping.keys(self) + + def values(self): + """ + D.values() returns a set-like object providing a view on the values + """ + return Mapping.values(self) + @set_module('numpy') def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, @@ -487,9 +515,9 @@ def save(file, arr, allow_pickle=True, fix_imports=True): arr : array_like Array data to be saved. allow_pickle : bool, optional - Allow saving object arrays using Python pickles. Reasons for + Allow saving object arrays using Python pickles. Reasons for disallowing pickles include security (loading pickled data can execute - arbitrary code) and portability (pickled objects may not be loadable + arbitrary code) and portability (pickled objects may not be loadable on different Python installations, for example if the stored objects require libraries that are not available, and not all pickled data is compatible between Python 2 and Python 3). @@ -1814,10 +1842,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, .. versionadded:: 1.10.0 encoding : str, optional Encoding used to decode the inputfile. Does not apply when `fname` - is a file object. The special value 'bytes' enables backward + is a file object. The special value 'bytes' enables backward compatibility workarounds that ensure that you receive byte arrays - when possible and passes latin1 encoded strings to converters. - Override this value to receive unicode arrays and pass strings + when possible and passes latin1 encoded strings to converters. + Override this value to receive unicode arrays and pass strings as input to converters. If set to None the system default is used. The default value is 'bytes'. @@ -1854,7 +1882,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, * Individual values are not stripped of spaces by default. When using a custom converter, make sure the function does remove spaces. * Custom converters may receive unexpected values due to dtype - discovery. + discovery. References ---------- @@ -2127,7 +2155,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, except ValueError: # We couldn't find it: the name must have been dropped continue - # Redefine the key if it's a column number + # Redefine the key if it's a column number # and usecols is defined if usecols: try: @@ -2161,23 +2189,23 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if len(dtype_flat) > 1: # Flexible type : get a converter from each dtype zipit = zip(dtype_flat, missing_values, filling_values) - converters = [StringConverter(dt, + converters = [StringConverter(dt, locked=True, - missing_values=miss, + missing_values=miss, default=fill) for (dt, miss, fill) in zipit] else: # Set to a default converter (but w/ different missing values) zipit = zip(missing_values, filling_values) - converters = [StringConverter(dtype, + converters = [StringConverter(dtype, locked=True, - missing_values=miss, + missing_values=miss, default=fill) for (miss, fill) in zipit] # Update the converters to use the user-defined ones uc_update = [] for (j, conv) in user_converters.items(): - # If the converter is specified by column names, + # If the converter is specified by column names, # use the index instead if _is_string_like(j): try: @@ -2201,8 +2229,8 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None, if conv is bytes: user_conv = asbytes elif byte_converters: - # Converters may use decode to workaround numpy's old - # behavior, so encode the string again before passing + # Converters may use decode to workaround numpy's old + # behavior, so encode the string again before passing # to the user converter. def tobytes_first(x, conv): if type(x) is bytes: @@ -2338,7 +2366,7 @@ def tobytes_first(x, conv): "argument is deprecated. Set the encoding, use None for the " "system default.", np.exceptions.VisibleDeprecationWarning, stacklevel=2) - + def encode_unicode_cols(row_tup): row = list(row_tup) for i in strcolidx: diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py index 44664c2df891..38ded1f26cda 100644 --- a/numpy/lib/tests/test_io.py +++ b/numpy/lib/tests/test_io.py @@ -2760,12 +2760,16 @@ def test_npzfile_dict(): assert_(f in ['x', 'y']) assert_equal(a.shape, (3, 3)) + for a in z.values(): + assert_equal(a.shape, (3, 3)) + assert_(len(z.items()) == 2) for f in z: assert_(f in ['x', 'y']) assert_('x' in z.keys()) + assert (z.get('x') == z['x']).all() @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")