diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 8dd07d911f5b18..fc1bb3388976d5 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -151,6 +151,8 @@ Lib/ast.py @isidentical **/*sysconfig* @FFY00 +**/*cjkcodecs* @corona10 + # macOS /Mac/ @python/macos-team **/*osx_support* @python/macos-team diff --git a/Doc/bugs.rst b/Doc/bugs.rst index 69d7c27410d56a..4f30ef19ee4d8a 100644 --- a/Doc/bugs.rst +++ b/Doc/bugs.rst @@ -19,6 +19,9 @@ If you find a bug in this documentation or would like to propose an improvement, please submit a bug report on the :ref:`tracker `. If you have a suggestion on how to fix it, include that as well. +You can also open a discussion item on our +`Documentation Discourse forum `_. + If you're short on time, you can also email documentation bug reports to docs@python.org (behavioral bugs can be sent to python-list@python.org). 'docs@' is a mailing list run by volunteers; your request will be noticed, diff --git a/Doc/c-api/code.rst b/Doc/c-api/code.rst index a6eb86f1a0b514..ae75d68901d7ba 100644 --- a/Doc/c-api/code.rst +++ b/Doc/c-api/code.rst @@ -77,6 +77,8 @@ bound into a function. Returns ``1`` if the function succeeds and 0 otherwise. + .. versionadded:: 3.11 + .. c:function:: PyObject* PyCode_GetCode(PyCodeObject *co) Equivalent to the Python code ``getattr(co, 'co_code')``. diff --git a/Doc/c-api/exceptions.rst b/Doc/c-api/exceptions.rst index 087e0a61d12d59..de9b15edd6859a 100644 --- a/Doc/c-api/exceptions.rst +++ b/Doc/c-api/exceptions.rst @@ -400,8 +400,61 @@ Querying the error indicator recursively in subtuples) are searched for a match. +.. c:function:: PyObject *PyErr_GetRaisedException(void) + + Returns the exception currently being raised, clearing the exception at + the same time. Do not confuse this with the exception currently being + handled which can be accessed with :c:func:`PyErr_GetHandledException`. + + .. note:: + + This function is normally only used by code that needs to catch exceptions or + by code that needs to save and restore the error indicator temporarily, e.g.:: + + { + PyObject *exc = PyErr_GetRaisedException(); + + /* ... code that might produce other errors ... */ + + PyErr_SetRaisedException(exc); + } + + .. versionadded:: 3.12 + + +.. c:function:: void PyErr_SetRaisedException(PyObject *exc) + + Sets the exception currently being raised ``exc``. + If the exception is already set, it is cleared first. + + ``exc`` must be a valid exception. + (Violating this rules will cause subtle problems later.) + This call consumes a reference to the ``exc`` object: you must own a + reference to that object before the call and after the call you no longer own + that reference. + (If you don't understand this, don't use this function. I warned you.) + + .. note:: + + This function is normally only used by code that needs to save and restore the + error indicator temporarily. Use :c:func:`PyErr_GetRaisedException` to save + the current exception, e.g.:: + + { + PyObject *exc = PyErr_GetRaisedException(); + + /* ... code that might produce other errors ... */ + + PyErr_SetRaisedException(exc); + } + + .. versionadded:: 3.12 + + .. c:function:: void PyErr_Fetch(PyObject **ptype, PyObject **pvalue, PyObject **ptraceback) + As of 3.12, this function is deprecated. Use :c:func:`PyErr_GetRaisedException` instead. + Retrieve the error indicator into three variables whose addresses are passed. If the error indicator is not set, set all three variables to ``NULL``. If it is set, it will be cleared and you own a reference to each object retrieved. The @@ -421,10 +474,14 @@ Querying the error indicator PyErr_Restore(type, value, traceback); } + .. deprecated:: 3.12 + .. c:function:: void PyErr_Restore(PyObject *type, PyObject *value, PyObject *traceback) - Set the error indicator from the three objects. If the error indicator is + As of 3.12, this function is deprecated. Use :c:func:`PyErr_SetRaisedException` instead. + + Set the error indicator from the three objects. If the error indicator is already set, it is cleared first. If the objects are ``NULL``, the error indicator is cleared. Do not pass a ``NULL`` type and non-``NULL`` value or traceback. The exception type should be a class. Do not pass an invalid @@ -440,9 +497,15 @@ Querying the error indicator error indicator temporarily. Use :c:func:`PyErr_Fetch` to save the current error indicator. + .. deprecated:: 3.12 + .. c:function:: void PyErr_NormalizeException(PyObject **exc, PyObject **val, PyObject **tb) + As of 3.12, this function is deprecated. + Use :c:func:`PyErr_GetRaisedException` instead of :c:func:`PyErr_Fetch` to avoid + any possible de-normalization. + Under certain circumstances, the values returned by :c:func:`PyErr_Fetch` below can be "unnormalized", meaning that ``*exc`` is a class object but ``*val`` is not an instance of the same class. This function can be used to instantiate @@ -459,6 +522,8 @@ Querying the error indicator PyException_SetTraceback(val, tb); } + .. deprecated:: 3.12 + .. c:function:: PyObject* PyErr_GetHandledException(void) @@ -704,6 +769,18 @@ Exception Objects :attr:`__suppress_context__` is implicitly set to ``True`` by this function. +.. c:function:: PyObject* PyException_GetArgs(PyObject *ex) + + Return args of the given exception as a new reference, + as accessible from Python through :attr:`args`. + + +.. c:function:: void PyException_SetArgs(PyObject *ex, PyObject *args) + + Set the args of the given exception, + as accessible from Python through :attr:`args`. + + .. _unicodeexceptions: Unicode Exception Objects diff --git a/Doc/c-api/typeobj.rst b/Doc/c-api/typeobj.rst index 644830b940b417..fd8f49ccb1caab 100644 --- a/Doc/c-api/typeobj.rst +++ b/Doc/c-api/typeobj.rst @@ -1313,6 +1313,16 @@ and :c:type:`PyType_Type` effectively act as defaults.) .. versionadded:: 3.10 + .. data:: Py_TPFLAGS_VALID_VERSION_TAG + + Internal. Do not set or unset this flag. + To indicate that a class has changed call :c:func:`PyType_Modified` + + .. warning:: + This flag is present in header files, but is an internal feature and should + not be used. It will be removed in a future version of CPython + + .. c:member:: const char* PyTypeObject.tp_doc An optional pointer to a NUL-terminated C string giving the docstring for this diff --git a/Doc/data/stable_abi.dat b/Doc/data/stable_abi.dat index 53895bbced8408..68ab0b5061f434 100644 --- a/Doc/data/stable_abi.dat +++ b/Doc/data/stable_abi.dat @@ -139,6 +139,7 @@ function,PyErr_Format,3.2,, function,PyErr_FormatV,3.5,, function,PyErr_GetExcInfo,3.7,, function,PyErr_GetHandledException,3.11,, +function,PyErr_GetRaisedException,3.12,, function,PyErr_GivenExceptionMatches,3.2,, function,PyErr_NewException,3.2,, function,PyErr_NewExceptionWithDoc,3.2,, @@ -168,6 +169,7 @@ function,PyErr_SetInterrupt,3.2,, function,PyErr_SetInterruptEx,3.10,, function,PyErr_SetNone,3.2,, function,PyErr_SetObject,3.2,, +function,PyErr_SetRaisedException,3.12,, function,PyErr_SetString,3.2,, function,PyErr_SyntaxLocation,3.2,, function,PyErr_SyntaxLocationEx,3.7,, @@ -266,9 +268,11 @@ var,PyExc_Warning,3.2,, var,PyExc_WindowsError,3.7,on Windows, var,PyExc_ZeroDivisionError,3.2,, function,PyExceptionClass_Name,3.8,, +function,PyException_GetArgs,3.12,, function,PyException_GetCause,3.2,, function,PyException_GetContext,3.2,, function,PyException_GetTraceback,3.2,, +function,PyException_SetArgs,3.12,, function,PyException_SetCause,3.2,, function,PyException_SetContext,3.2,, function,PyException_SetTraceback,3.2,, diff --git a/Doc/library/argparse.rst b/Doc/library/argparse.rst index 475cac70291e9a..dbaa5d0d9b995b 100644 --- a/Doc/library/argparse.rst +++ b/Doc/library/argparse.rst @@ -31,7 +31,7 @@ Core Functionality The :mod:`argparse` module's support for command-line interfaces is built around an instance of :class:`argparse.ArgumentParser`. It is a container for -argument specifications and has options that apply the parser as whole:: +argument specifications and has options that apply to the parser as whole:: parser = argparse.ArgumentParser( prog = 'ProgramName', diff --git a/Doc/library/array.rst b/Doc/library/array.rst index 95f1eaf401b052..75c49e0f6d1ebe 100644 --- a/Doc/library/array.rst +++ b/Doc/library/array.rst @@ -60,7 +60,7 @@ Notes: The actual representation of values is determined by the machine architecture (strictly speaking, by the C implementation). The actual size can be accessed -through the :attr:`itemsize` attribute. +through the :attr:`array.itemsize` attribute. The module defines the following item: @@ -85,161 +85,160 @@ The module defines the following type: to add initial items to the array. Otherwise, the iterable initializer is passed to the :meth:`extend` method. - .. audit-event:: array.__new__ typecode,initializer array.array + Array objects support the ordinary sequence operations of indexing, slicing, + concatenation, and multiplication. When using slice assignment, the assigned + value must be an array object with the same type code; in all other cases, + :exc:`TypeError` is raised. Array objects also implement the buffer interface, + and may be used wherever :term:`bytes-like objects ` are supported. + .. audit-event:: array.__new__ typecode,initializer array.array -Array objects support the ordinary sequence operations of indexing, slicing, -concatenation, and multiplication. When using slice assignment, the assigned -value must be an array object with the same type code; in all other cases, -:exc:`TypeError` is raised. Array objects also implement the buffer interface, -and may be used wherever :term:`bytes-like objects ` are supported. -The following data items and methods are also supported: + .. attribute:: typecode -.. attribute:: array.typecode + The typecode character used to create the array. - The typecode character used to create the array. + .. attribute:: itemsize -.. attribute:: array.itemsize + The length in bytes of one array item in the internal representation. - The length in bytes of one array item in the internal representation. + .. method:: append(x) -.. method:: array.append(x) + Append a new item with value *x* to the end of the array. - Append a new item with value *x* to the end of the array. + .. method:: buffer_info() -.. method:: array.buffer_info() + Return a tuple ``(address, length)`` giving the current memory address and the + length in elements of the buffer used to hold array's contents. The size of the + memory buffer in bytes can be computed as ``array.buffer_info()[1] * + array.itemsize``. This is occasionally useful when working with low-level (and + inherently unsafe) I/O interfaces that require memory addresses, such as certain + :c:func:`!ioctl` operations. The returned numbers are valid as long as the array + exists and no length-changing operations are applied to it. - Return a tuple ``(address, length)`` giving the current memory address and the - length in elements of the buffer used to hold array's contents. The size of the - memory buffer in bytes can be computed as ``array.buffer_info()[1] * - array.itemsize``. This is occasionally useful when working with low-level (and - inherently unsafe) I/O interfaces that require memory addresses, such as certain - :c:func:`ioctl` operations. The returned numbers are valid as long as the array - exists and no length-changing operations are applied to it. + .. note:: - .. note:: + When using array objects from code written in C or C++ (the only way to + effectively make use of this information), it makes more sense to use the buffer + interface supported by array objects. This method is maintained for backward + compatibility and should be avoided in new code. The buffer interface is + documented in :ref:`bufferobjects`. - When using array objects from code written in C or C++ (the only way to - effectively make use of this information), it makes more sense to use the buffer - interface supported by array objects. This method is maintained for backward - compatibility and should be avoided in new code. The buffer interface is - documented in :ref:`bufferobjects`. + .. method:: byteswap() -.. method:: array.byteswap() + "Byteswap" all items of the array. This is only supported for values which are + 1, 2, 4, or 8 bytes in size; for other types of values, :exc:`RuntimeError` is + raised. It is useful when reading data from a file written on a machine with a + different byte order. - "Byteswap" all items of the array. This is only supported for values which are - 1, 2, 4, or 8 bytes in size; for other types of values, :exc:`RuntimeError` is - raised. It is useful when reading data from a file written on a machine with a - different byte order. + .. method:: count(x) -.. method:: array.count(x) + Return the number of occurrences of *x* in the array. - Return the number of occurrences of *x* in the array. + .. method:: extend(iterable) -.. method:: array.extend(iterable) + Append items from *iterable* to the end of the array. If *iterable* is another + array, it must have *exactly* the same type code; if not, :exc:`TypeError` will + be raised. If *iterable* is not an array, it must be iterable and its elements + must be the right type to be appended to the array. - Append items from *iterable* to the end of the array. If *iterable* is another - array, it must have *exactly* the same type code; if not, :exc:`TypeError` will - be raised. If *iterable* is not an array, it must be iterable and its elements - must be the right type to be appended to the array. + .. method:: frombytes(s) -.. method:: array.frombytes(s) + Appends items from the string, interpreting the string as an array of machine + values (as if it had been read from a file using the :meth:`fromfile` method). - Appends items from the string, interpreting the string as an array of machine - values (as if it had been read from a file using the :meth:`fromfile` method). + .. versionadded:: 3.2 + :meth:`!fromstring` is renamed to :meth:`frombytes` for clarity. - .. versionadded:: 3.2 - :meth:`fromstring` is renamed to :meth:`frombytes` for clarity. + .. method:: fromfile(f, n) -.. method:: array.fromfile(f, n) + Read *n* items (as machine values) from the :term:`file object` *f* and append + them to the end of the array. If less than *n* items are available, + :exc:`EOFError` is raised, but the items that were available are still + inserted into the array. - Read *n* items (as machine values) from the :term:`file object` *f* and append - them to the end of the array. If less than *n* items are available, - :exc:`EOFError` is raised, but the items that were available are still - inserted into the array. + .. method:: fromlist(list) -.. method:: array.fromlist(list) + Append items from the list. This is equivalent to ``for x in list: + a.append(x)`` except that if there is a type error, the array is unchanged. - Append items from the list. This is equivalent to ``for x in list: - a.append(x)`` except that if there is a type error, the array is unchanged. + .. method:: fromunicode(s) -.. method:: array.fromunicode(s) + Extends this array with data from the given unicode string. The array must + be a type ``'u'`` array; otherwise a :exc:`ValueError` is raised. Use + ``array.frombytes(unicodestring.encode(enc))`` to append Unicode data to an + array of some other type. - Extends this array with data from the given unicode string. The array must - be a type ``'u'`` array; otherwise a :exc:`ValueError` is raised. Use - ``array.frombytes(unicodestring.encode(enc))`` to append Unicode data to an - array of some other type. + .. method:: index(x[, start[, stop]]) -.. method:: array.index(x[, start[, stop]]) + Return the smallest *i* such that *i* is the index of the first occurrence of + *x* in the array. The optional arguments *start* and *stop* can be + specified to search for *x* within a subsection of the array. Raise + :exc:`ValueError` if *x* is not found. - Return the smallest *i* such that *i* is the index of the first occurrence of - *x* in the array. The optional arguments *start* and *stop* can be - specified to search for *x* within a subsection of the array. Raise - :exc:`ValueError` if *x* is not found. + .. versionchanged:: 3.10 + Added optional *start* and *stop* parameters. - .. versionchanged:: 3.10 - Added optional *start* and *stop* parameters. -.. method:: array.insert(i, x) + .. method:: insert(i, x) - Insert a new item with value *x* in the array before position *i*. Negative - values are treated as being relative to the end of the array. + Insert a new item with value *x* in the array before position *i*. Negative + values are treated as being relative to the end of the array. -.. method:: array.pop([i]) + .. method:: pop([i]) - Removes the item with the index *i* from the array and returns it. The optional - argument defaults to ``-1``, so that by default the last item is removed and - returned. + Removes the item with the index *i* from the array and returns it. The optional + argument defaults to ``-1``, so that by default the last item is removed and + returned. -.. method:: array.remove(x) + .. method:: remove(x) - Remove the first occurrence of *x* from the array. + Remove the first occurrence of *x* from the array. -.. method:: array.reverse() + .. method:: reverse() - Reverse the order of the items in the array. + Reverse the order of the items in the array. -.. method:: array.tobytes() + .. method:: tobytes() - Convert the array to an array of machine values and return the bytes - representation (the same sequence of bytes that would be written to a file by - the :meth:`tofile` method.) + Convert the array to an array of machine values and return the bytes + representation (the same sequence of bytes that would be written to a file by + the :meth:`tofile` method.) - .. versionadded:: 3.2 - :meth:`tostring` is renamed to :meth:`tobytes` for clarity. + .. versionadded:: 3.2 + :meth:`!tostring` is renamed to :meth:`tobytes` for clarity. -.. method:: array.tofile(f) + .. method:: tofile(f) - Write all items (as machine values) to the :term:`file object` *f*. + Write all items (as machine values) to the :term:`file object` *f*. -.. method:: array.tolist() + .. method:: tolist() - Convert the array to an ordinary list with the same items. + Convert the array to an ordinary list with the same items. -.. method:: array.tounicode() + .. method:: tounicode() - Convert the array to a unicode string. The array must be a type ``'u'`` array; - otherwise a :exc:`ValueError` is raised. Use ``array.tobytes().decode(enc)`` to - obtain a unicode string from an array of some other type. + Convert the array to a unicode string. The array must be a type ``'u'`` array; + otherwise a :exc:`ValueError` is raised. Use ``array.tobytes().decode(enc)`` to + obtain a unicode string from an array of some other type. When an array object is printed or converted to a string, it is represented as diff --git a/Doc/library/asyncio-eventloop.rst b/Doc/library/asyncio-eventloop.rst index db63a5dd11ad6e..f86e784288029c 100644 --- a/Doc/library/asyncio-eventloop.rst +++ b/Doc/library/asyncio-eventloop.rst @@ -186,19 +186,24 @@ Running and stopping the loop .. coroutinemethod:: loop.shutdown_default_executor(timeout=None) Schedule the closure of the default executor and wait for it to join all of - the threads in the :class:`ThreadPoolExecutor`. After calling this method, a - :exc:`RuntimeError` will be raised if :meth:`loop.run_in_executor` is called - while using the default executor. + the threads in the :class:`~concurrent.futures.ThreadPoolExecutor`. + Once this method has been called, + using the default executor with :meth:`loop.run_in_executor` + will raise a :exc:`RuntimeError`. - The *timeout* parameter specifies the amount of time the executor will - be given to finish joining. The default value is ``None``, which means the - executor will be given an unlimited amount of time. + The *timeout* parameter specifies the amount of time + (in :class:`float` seconds) the executor will be given to finish joining. + With the default, ``None``, + the executor is allowed an unlimited amount of time. - If the timeout duration is reached, a warning is emitted and executor is - terminated without waiting for its threads to finish joining. + If the *timeout* is reached, a :exc:`RuntimeWarning` is emitted + and the default executor is terminated + without waiting for its threads to finish joining. - Note that there is no need to call this function when - :func:`asyncio.run` is used. + .. note:: + + Do not call this method when using :func:`asyncio.run`, + as the latter handles default executor shutdown automatically. .. versionadded:: 3.9 @@ -213,22 +218,23 @@ Scheduling callbacks Schedule the *callback* :term:`callback` to be called with *args* arguments at the next iteration of the event loop. + Return an instance of :class:`asyncio.Handle`, + which can be used later to cancel the callback. + Callbacks are called in the order in which they are registered. Each callback will be called exactly once. - An optional keyword-only *context* argument allows specifying a + The optional keyword-only *context* argument specifies a custom :class:`contextvars.Context` for the *callback* to run in. - The current context is used when no *context* is provided. - - An instance of :class:`asyncio.Handle` is returned, which can be - used later to cancel the callback. + Callbacks use the current context when no *context* is provided. - This method is not thread-safe. + Unlike :meth:`call_soon_threadsafe`, this method is not thread-safe. .. method:: loop.call_soon_threadsafe(callback, *args, context=None) - A thread-safe variant of :meth:`call_soon`. Must be used to - schedule callbacks *from another thread*. + A thread-safe variant of :meth:`call_soon`. When scheduling callbacks from + another thread, this function *must* be used, since :meth:`call_soon` is not + thread-safe. Raises :exc:`RuntimeError` if called on a loop that's been closed. This can happen on a secondary thread when the main application is diff --git a/Doc/library/asyncio-stream.rst b/Doc/library/asyncio-stream.rst index 533bdec8e03993..3b3c68ab6ef625 100644 --- a/Doc/library/asyncio-stream.rst +++ b/Doc/library/asyncio-stream.rst @@ -295,7 +295,8 @@ StreamWriter The method closes the stream and the underlying socket. - The method should be used along with the ``wait_closed()`` method:: + The method should be used, though not mandatory, + along with the ``wait_closed()`` method:: stream.close() await stream.wait_closed() @@ -372,7 +373,8 @@ StreamWriter Wait until the stream is closed. Should be called after :meth:`close` to wait until the underlying - connection is closed. + connection is closed, ensuring that all data has been flushed + before e.g. exiting the program. .. versionadded:: 3.7 @@ -402,6 +404,7 @@ TCP echo client using the :func:`asyncio.open_connection` function:: print('Close the connection') writer.close() + await writer.wait_closed() asyncio.run(tcp_echo_client('Hello World!')) @@ -434,6 +437,7 @@ TCP echo server using the :func:`asyncio.start_server` function:: print("Close the connection") writer.close() + await writer.wait_closed() async def main(): server = await asyncio.start_server( @@ -490,6 +494,7 @@ Simple example querying HTTP headers of the URL passed on the command line:: # Ignore the body, close the socket writer.close() + await writer.wait_closed() url = sys.argv[1] asyncio.run(print_http_headers(url)) @@ -535,6 +540,7 @@ Coroutine waiting until a socket receives data using the # Got data, we are done: close the socket print("Received:", data.decode()) writer.close() + await writer.wait_closed() # Close the second socket wsock.close() diff --git a/Doc/library/asyncio-task.rst b/Doc/library/asyncio-task.rst index 39112580285cc0..9b984243282268 100644 --- a/Doc/library/asyncio-task.rst +++ b/Doc/library/asyncio-task.rst @@ -1097,7 +1097,7 @@ Task Object The *limit* argument is passed to :meth:`get_stack` directly. The *file* argument is an I/O stream to which the output - is written; by default output is written to :data:`sys.stderr`. + is written; by default output is written to :data:`sys.stdout`. .. method:: get_coro() diff --git a/Doc/library/cmath.rst b/Doc/library/cmath.rst index c575b90e6461ed..28cd96b0e12da9 100644 --- a/Doc/library/cmath.rst +++ b/Doc/library/cmath.rst @@ -89,7 +89,7 @@ Power and logarithmic functions logarithms. -.. function:: log(x, base=None) +.. function:: log(x[, base]) Returns the logarithm of *x* to the given *base*. If the *base* is not specified, returns the natural logarithm of *x*. There is one branch cut, from 0 diff --git a/Doc/library/concurrent.futures.rst b/Doc/library/concurrent.futures.rst index 8106cc235e5a3c..c543c849585b7f 100644 --- a/Doc/library/concurrent.futures.rst +++ b/Doc/library/concurrent.futures.rst @@ -250,9 +250,10 @@ to a :class:`ProcessPoolExecutor` will result in deadlock. then :exc:`ValueError` will be raised. If *max_workers* is ``None``, then the default chosen will be at most ``61``, even if more processors are available. - *mp_context* can be a multiprocessing context or None. It will be used to - launch the workers. If *mp_context* is ``None`` or not given, the default - multiprocessing context is used. + *mp_context* can be a :mod:`multiprocessing` context or ``None``. It will be + used to launch the workers. If *mp_context* is ``None`` or not given, the + default :mod:`multiprocessing` context is used. + See :ref:`multiprocessing-start-methods`. *initializer* is an optional callable that is called at the start of each worker process; *initargs* is a tuple of arguments passed to the @@ -280,11 +281,18 @@ to a :class:`ProcessPoolExecutor` will result in deadlock. Added the *initializer* and *initargs* arguments. + .. note:: + The default :mod:`multiprocessing` start method + (see :ref:`multiprocessing-start-methods`) will change away from + *fork* in Python 3.14. Code that requires *fork* be used for their + :class:`ProcessPoolExecutor` should explicitly specify that by + passing a ``mp_context=multiprocessing.get_context("fork")`` + parameter. + .. versionchanged:: 3.11 The *max_tasks_per_child* argument was added to allow users to control the lifetime of workers in the pool. - .. _processpoolexecutor-example: ProcessPoolExecutor Example diff --git a/Doc/library/ctypes.rst b/Doc/library/ctypes.rst index 4de5c820f2c6ac..8fd681286b812d 100644 --- a/Doc/library/ctypes.rst +++ b/Doc/library/ctypes.rst @@ -1380,6 +1380,10 @@ way is to instantiate one of the following classes: DLLs and determine which one is not found using Windows debugging and tracing tools. + .. versionchanged:: 3.12 + + The *name* parameter can now be a :term:`path-like object`. + .. seealso:: `Microsoft DUMPBIN tool `_ @@ -1398,6 +1402,10 @@ way is to instantiate one of the following classes: .. versionchanged:: 3.3 :exc:`WindowsError` used to be raised. + .. versionchanged:: 3.12 + + The *name* parameter can now be a :term:`path-like object`. + .. class:: WinDLL(name, mode=DEFAULT_MODE, handle=None, use_errno=False, use_last_error=False, winmode=None) @@ -1405,6 +1413,10 @@ way is to instantiate one of the following classes: functions in these libraries use the ``stdcall`` calling convention, and are assumed to return :c:expr:`int` by default. + .. versionchanged:: 3.12 + + The *name* parameter can now be a :term:`path-like object`. + The Python :term:`global interpreter lock` is released before calling any function exported by these libraries, and reacquired afterwards. @@ -1418,6 +1430,10 @@ function exported by these libraries, and reacquired afterwards. Thus, this is only useful to call Python C api functions directly. + .. versionchanged:: 3.12 + + The *name* parameter can now be a :term:`path-like object`. + All these classes can be instantiated by calling them with at least one argument, the pathname of the shared library. If you have an existing handle to an already loaded shared library, it can be passed as the ``handle`` named @@ -2510,6 +2526,7 @@ fields, or any other data types containing pointer type fields. An optional small integer that allows overriding the alignment of structure fields in the instance. :attr:`_pack_` must already be defined when :attr:`_fields_` is assigned, otherwise it will have no effect. + Setting this attribute to 0 is the same as not setting it at all. .. attribute:: _anonymous_ diff --git a/Doc/library/dis.rst b/Doc/library/dis.rst index 2c9d868ebd872b..e00cb1b39f89de 100644 --- a/Doc/library/dis.rst +++ b/Doc/library/dis.rst @@ -694,6 +694,13 @@ iterations of the loop. Returns with ``STACK[-1]`` to the caller of the function. +.. opcode:: RETURN_CONST (consti) + + Returns with ``co_consts[consti]`` to the caller of the function. + + .. versionadded:: 3.12 + + .. opcode:: YIELD_VALUE Yields ``STACK.pop()`` from a :term:`generator`. diff --git a/Doc/library/enum.rst b/Doc/library/enum.rst index cfe4fbeb0698e7..13591a1bdc7347 100644 --- a/Doc/library/enum.rst +++ b/Doc/library/enum.rst @@ -922,6 +922,6 @@ Notes or you can reassign the appropriate :meth:`str`, etc., in your enum:: - >>> from enum import IntEnum + >>> from enum import Enum, IntEnum >>> class MyIntEnum(IntEnum): - ... __str__ = IntEnum.__str__ + ... __str__ = Enum.__str__ diff --git a/Doc/library/faulthandler.rst b/Doc/library/faulthandler.rst index 07a7489941442b..f64dfeb5e081c7 100644 --- a/Doc/library/faulthandler.rst +++ b/Doc/library/faulthandler.rst @@ -43,6 +43,13 @@ Python is deadlocked. The :ref:`Python Development Mode ` calls :func:`faulthandler.enable` at Python startup. +.. seealso:: + + Module :mod:`pdb` + Interactive source code debugger for Python programs. + + Module :mod:`traceback` + Standard interface to extract, format and print stack traces of Python programs. Dumping the traceback --------------------- @@ -52,6 +59,8 @@ Dumping the traceback Dump the tracebacks of all threads into *file*. If *all_threads* is ``False``, dump only the current thread. + .. seealso:: :func:`traceback.print_tb`, which can be used to print a traceback object. + .. versionchanged:: 3.5 Added support for passing file descriptor to this function. @@ -178,4 +187,3 @@ handler: File "/home/python/cpython/Lib/ctypes/__init__.py", line 486 in string_at File "", line 1 in Segmentation fault - diff --git a/Doc/library/math.rst b/Doc/library/math.rst index 9da22b6ad0562f..797f32408eac3d 100644 --- a/Doc/library/math.rst +++ b/Doc/library/math.rst @@ -393,12 +393,13 @@ Power and logarithmic functions .. versionadded:: 3.2 -.. function:: log(x, base=None) +.. function:: log(x[, base]) - Return the logarithm of *x* to the given *base*. + With one argument, return the natural logarithm of *x* (to base *e*). + + With two arguments, return the logarithm of *x* to the given *base*, + calculated as ``log(x)/log(base)``. - If the *base* is not specified, returns the natural - logarithm (base *e*) of *x*. .. function:: log1p(x) diff --git a/Doc/library/multiprocessing.rst b/Doc/library/multiprocessing.rst index b5ceeb796f8f2f..0ec47bb956a99e 100644 --- a/Doc/library/multiprocessing.rst +++ b/Doc/library/multiprocessing.rst @@ -19,7 +19,7 @@ offers both local and remote concurrency, effectively side-stepping the :term:`Global Interpreter Lock ` by using subprocesses instead of threads. Due to this, the :mod:`multiprocessing` module allows the programmer to fully -leverage multiple processors on a given machine. It runs on both Unix and +leverage multiple processors on a given machine. It runs on both POSIX and Windows. The :mod:`multiprocessing` module also introduces APIs which do not have @@ -99,11 +99,11 @@ necessary, see :ref:`multiprocessing-programming`. +.. _multiprocessing-start-methods: + Contexts and start methods ~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. _multiprocessing-start-methods: - Depending on the platform, :mod:`multiprocessing` supports three ways to start a process. These *start methods* are @@ -115,7 +115,7 @@ to start a process. These *start methods* are will not be inherited. Starting a process using this method is rather slow compared to using *fork* or *forkserver*. - Available on Unix and Windows. The default on Windows and macOS. + Available on POSIX and Windows platforms. The default on Windows and macOS. *fork* The parent process uses :func:`os.fork` to fork the Python @@ -124,32 +124,39 @@ to start a process. These *start methods* are inherited by the child process. Note that safely forking a multithreaded process is problematic. - Available on Unix only. The default on Unix. + Available on POSIX systems. Currently the default on POSIX except macOS. + + .. note:: + The default start method will change away from *fork* in Python 3.14. + Code that requires *fork* should explicitly specify that via + :func:`get_context` or :func:`set_start_method`. *forkserver* When the program starts and selects the *forkserver* start method, - a server process is started. From then on, whenever a new process + a server process is spawned. From then on, whenever a new process is needed, the parent process connects to the server and requests - that it fork a new process. The fork server process is single - threaded so it is safe for it to use :func:`os.fork`. No - unnecessary resources are inherited. + that it fork a new process. The fork server process is single threaded + unless system libraries or preloaded imports spawn threads as a + side-effect so it is generally safe for it to use :func:`os.fork`. + No unnecessary resources are inherited. + + Available on POSIX platforms which support passing file descriptors + over Unix pipes such as Linux. - Available on Unix platforms which support passing file descriptors - over Unix pipes. .. versionchanged:: 3.8 On macOS, the *spawn* start method is now the default. The *fork* start method should be considered unsafe as it can lead to crashes of the - subprocess. See :issue:`33725`. + subprocess as macOS system libraries may start threads. See :issue:`33725`. .. versionchanged:: 3.4 - *spawn* added on all Unix platforms, and *forkserver* added for - some Unix platforms. + *spawn* added on all POSIX platforms, and *forkserver* added for + some POSIX platforms. Child processes no longer inherit all of the parents inheritable handles on Windows. -On Unix using the *spawn* or *forkserver* start methods will also +On POSIX using the *spawn* or *forkserver* start methods will also start a *resource tracker* process which tracks the unlinked named system resources (such as named semaphores or :class:`~multiprocessing.shared_memory.SharedMemory` objects) created @@ -211,10 +218,10 @@ library user. .. warning:: - The ``'spawn'`` and ``'forkserver'`` start methods cannot currently + The ``'spawn'`` and ``'forkserver'`` start methods generally cannot be used with "frozen" executables (i.e., binaries produced by - packages like **PyInstaller** and **cx_Freeze**) on Unix. - The ``'fork'`` start method does work. + packages like **PyInstaller** and **cx_Freeze**) on POSIX systems. + The ``'fork'`` start method may work if code does not use threads. Exchanging objects between processes @@ -629,14 +636,14 @@ The :mod:`multiprocessing` package mostly replicates the API of the calling :meth:`join()` is simpler. On Windows, this is an OS handle usable with the ``WaitForSingleObject`` - and ``WaitForMultipleObjects`` family of API calls. On Unix, this is + and ``WaitForMultipleObjects`` family of API calls. On POSIX, this is a file descriptor usable with primitives from the :mod:`select` module. .. versionadded:: 3.3 .. method:: terminate() - Terminate the process. On Unix this is done using the ``SIGTERM`` signal; + Terminate the process. On POSIX this is done using the ``SIGTERM`` signal; on Windows :c:func:`TerminateProcess` is used. Note that exit handlers and finally clauses, etc., will not be executed. @@ -653,7 +660,7 @@ The :mod:`multiprocessing` package mostly replicates the API of the .. method:: kill() - Same as :meth:`terminate()` but using the ``SIGKILL`` signal on Unix. + Same as :meth:`terminate()` but using the ``SIGKILL`` signal on POSIX. .. versionadded:: 3.7 @@ -676,16 +683,17 @@ The :mod:`multiprocessing` package mostly replicates the API of the .. doctest:: >>> import multiprocessing, time, signal - >>> p = multiprocessing.Process(target=time.sleep, args=(1000,)) + >>> mp_context = multiprocessing.get_context('spawn') + >>> p = mp_context.Process(target=time.sleep, args=(1000,)) >>> print(p, p.is_alive()) - False + <...Process ... initial> False >>> p.start() >>> print(p, p.is_alive()) - True + <...Process ... started> True >>> p.terminate() >>> time.sleep(0.1) >>> print(p, p.is_alive()) - False + <...Process ... stopped exitcode=-SIGTERM> False >>> p.exitcode == -signal.SIGTERM True @@ -815,7 +823,7 @@ For an example of the usage of queues for interprocess communication see Return the approximate size of the queue. Because of multithreading/multiprocessing semantics, this number is not reliable. - Note that this may raise :exc:`NotImplementedError` on Unix platforms like + Note that this may raise :exc:`NotImplementedError` on platforms like macOS where ``sem_getvalue()`` is not implemented. .. method:: empty() @@ -1034,9 +1042,8 @@ Miscellaneous Returns a list of the supported start methods, the first of which is the default. The possible start methods are ``'fork'``, - ``'spawn'`` and ``'forkserver'``. On Windows only ``'spawn'`` is - available. On Unix ``'fork'`` and ``'spawn'`` are always - supported, with ``'fork'`` being the default. + ``'spawn'`` and ``'forkserver'``. Not all platforms support all + methods. See :ref:`multiprocessing-start-methods`. .. versionadded:: 3.4 @@ -1048,7 +1055,7 @@ Miscellaneous If *method* is ``None`` then the default context is returned. Otherwise *method* should be ``'fork'``, ``'spawn'``, ``'forkserver'``. :exc:`ValueError` is raised if the specified - start method is not available. + start method is not available. See :ref:`multiprocessing-start-methods`. .. versionadded:: 3.4 @@ -1062,8 +1069,7 @@ Miscellaneous is true then ``None`` is returned. The return value can be ``'fork'``, ``'spawn'``, ``'forkserver'`` - or ``None``. ``'fork'`` is the default on Unix, while ``'spawn'`` is - the default on Windows and macOS. + or ``None``. See :ref:`multiprocessing-start-methods`. .. versionchanged:: 3.8 @@ -1084,11 +1090,27 @@ Miscellaneous before they can create child processes. .. versionchanged:: 3.4 - Now supported on Unix when the ``'spawn'`` start method is used. + Now supported on POSIX when the ``'spawn'`` start method is used. .. versionchanged:: 3.11 Accepts a :term:`path-like object`. +.. function:: set_forkserver_preload(module_names) + + Set a list of module names for the forkserver main process to attempt to + import so that their already imported state is inherited by forked + processes. Any :exc:`ImportError` when doing so is silently ignored. + This can be used as a performance enhancement to avoid repeated work + in every process. + + For this to work, it must be called before the forkserver process has been + launched (before creating a :class:`Pool` or starting a :class:`Process`). + + Only meaningful when using the ``'forkserver'`` start method. + See :ref:`multiprocessing-start-methods`. + + .. versionadded:: 3.4 + .. function:: set_start_method(method, force=False) Set the method which should be used to start child processes. @@ -1102,6 +1124,8 @@ Miscellaneous protected inside the ``if __name__ == '__main__'`` clause of the main module. + See :ref:`multiprocessing-start-methods`. + .. versionadded:: 3.4 .. note:: @@ -1906,7 +1930,8 @@ their parent process exits. The manager classes are defined in the .. doctest:: - >>> manager = multiprocessing.Manager() + >>> mp_context = multiprocessing.get_context('spawn') + >>> manager = mp_context.Manager() >>> Global = manager.Namespace() >>> Global.x = 10 >>> Global.y = 'hello' @@ -2018,8 +2043,8 @@ the proxy). In this way, a proxy can be used just like its referent can: .. doctest:: - >>> from multiprocessing import Manager - >>> manager = Manager() + >>> mp_context = multiprocessing.get_context('spawn') + >>> manager = mp_context.Manager() >>> l = manager.list([i*i for i in range(10)]) >>> print(l) [0, 1, 4, 9, 16, 25, 36, 49, 64, 81] @@ -2520,7 +2545,7 @@ multiple connections at the same time. *timeout* is ``None`` then it will block for an unlimited period. A negative timeout is equivalent to a zero timeout. - For both Unix and Windows, an object can appear in *object_list* if + For both POSIX and Windows, an object can appear in *object_list* if it is * a readable :class:`~multiprocessing.connection.Connection` object; @@ -2531,7 +2556,7 @@ multiple connections at the same time. A connection or socket object is ready when there is data available to be read from it, or the other end has been closed. - **Unix**: ``wait(object_list, timeout)`` almost equivalent + **POSIX**: ``wait(object_list, timeout)`` almost equivalent ``select.select(object_list, [], [], timeout)``. The difference is that, if :func:`select.select` is interrupted by a signal, it can raise :exc:`OSError` with an error number of ``EINTR``, whereas @@ -2803,7 +2828,7 @@ Thread safety of proxies Joining zombie processes - On Unix when a process finishes but has not been joined it becomes a zombie. + On POSIX when a process finishes but has not been joined it becomes a zombie. There should never be very many because each time a new process starts (or :func:`~multiprocessing.active_children` is called) all completed processes which have not yet been joined will be joined. Also calling a finished @@ -2866,7 +2891,7 @@ Joining processes that use queues Explicitly pass resources to child processes - On Unix using the *fork* start method, a child process can make + On POSIX using the *fork* start method, a child process can make use of a shared resource created in a parent process using a global resource. However, it is better to pass the object as an argument to the constructor for the child process. diff --git a/Doc/library/pdb.rst b/Doc/library/pdb.rst index 383c3adcf289d5..4ae12a5d03a78d 100644 --- a/Doc/library/pdb.rst +++ b/Doc/library/pdb.rst @@ -27,6 +27,15 @@ The debugger is extensible -- it is actually defined as the class :class:`Pdb`. This is currently undocumented but easily understood by reading the source. The extension interface uses the modules :mod:`bdb` and :mod:`cmd`. +.. seealso:: + + Module :mod:`faulthandler` + Used to dump Python tracebacks explicitly, on a fault, after a timeout, + or on a user signal. + + Module :mod:`traceback` + Standard interface to extract, format and print stack traces of Python programs. + The debugger's prompt is ``(Pdb)``. Typical usage to run a program under control of the debugger is:: diff --git a/Doc/library/sqlite3.rst b/Doc/library/sqlite3.rst index c0f0f133f4a286..bbdc891c930cf4 100644 --- a/Doc/library/sqlite3.rst +++ b/Doc/library/sqlite3.rst @@ -302,10 +302,13 @@ Module functions :type isolation_level: str | None :param bool check_same_thread: - If ``True`` (default), only the creating thread may use the connection. - If ``False``, the connection may be shared across multiple threads; - if so, write operations should be serialized by the user to avoid data - corruption. + If ``True`` (default), :exc:`ProgrammingError` will be raised + if the database connection is used by a thread + other than the one that created it. + If ``False``, the connection may be accessed in multiple threads; + write operations may need to be serialized by the user + to avoid data corruption. + See :attr:`threadsafety` for more information. :param Connection factory: A custom subclass of :class:`Connection` to create the connection with, diff --git a/Doc/library/subprocess.rst b/Doc/library/subprocess.rst index 785251afdf262e..d792a43eeb271f 100644 --- a/Doc/library/subprocess.rst +++ b/Doc/library/subprocess.rst @@ -111,6 +111,14 @@ underlying :class:`Popen` interface can be used directly. Added the *text* parameter, as a more understandable alias of *universal_newlines*. Added the *capture_output* parameter. + .. versionchanged:: 3.11.3 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + .. class:: CompletedProcess The return value from :func:`run`, representing a process that has finished. @@ -457,7 +465,7 @@ functions. - :const:`0` means unbuffered (read and write are one system call and can return short) - :const:`1` means line buffered - (only usable if ``universal_newlines=True`` i.e., in a text mode) + (only usable if ``text=True`` or ``universal_newlines=True``) - any other positive value means use a buffer of approximately that size - negative bufsize (the default) means the system default of @@ -487,6 +495,14 @@ functions. *executable* parameter accepts a bytes and :term:`path-like object` on Windows. + .. versionchanged:: 3.11.3 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + *stdin*, *stdout* and *stderr* specify the executed program's standard input, standard output and standard error file handles, respectively. Valid values are ``None``, :data:`PIPE`, :data:`DEVNULL`, an existing file descriptor (a @@ -847,7 +863,8 @@ Instances of the :class:`Popen` class have the following methods: On Windows :meth:`kill` is an alias for :meth:`terminate`. -The following attributes are also available: +The following attributes are also set by the class for you to access. +Reassigning them to new values is unsupported: .. attribute:: Popen.args @@ -860,9 +877,9 @@ The following attributes are also available: If the *stdin* argument was :data:`PIPE`, this attribute is a writeable stream object as returned by :func:`open`. If the *encoding* or *errors* - arguments were specified or the *universal_newlines* argument was ``True``, - the stream is a text stream, otherwise it is a byte stream. If the *stdin* - argument was not :data:`PIPE`, this attribute is ``None``. + arguments were specified or the *text* or *universal_newlines* argument + was ``True``, the stream is a text stream, otherwise it is a byte stream. + If the *stdin* argument was not :data:`PIPE`, this attribute is ``None``. .. attribute:: Popen.stdout @@ -870,9 +887,9 @@ The following attributes are also available: If the *stdout* argument was :data:`PIPE`, this attribute is a readable stream object as returned by :func:`open`. Reading from the stream provides output from the child process. If the *encoding* or *errors* arguments were - specified or the *universal_newlines* argument was ``True``, the stream is a - text stream, otherwise it is a byte stream. If the *stdout* argument was not - :data:`PIPE`, this attribute is ``None``. + specified or the *text* or *universal_newlines* argument was ``True``, the + stream is a text stream, otherwise it is a byte stream. If the *stdout* + argument was not :data:`PIPE`, this attribute is ``None``. .. attribute:: Popen.stderr @@ -880,9 +897,9 @@ The following attributes are also available: If the *stderr* argument was :data:`PIPE`, this attribute is a readable stream object as returned by :func:`open`. Reading from the stream provides error output from the child process. If the *encoding* or *errors* arguments - were specified or the *universal_newlines* argument was ``True``, the stream - is a text stream, otherwise it is a byte stream. If the *stderr* argument was - not :data:`PIPE`, this attribute is ``None``. + were specified or the *text* or *universal_newlines* argument was ``True``, the + stream is a text stream, otherwise it is a byte stream. If the *stderr* argument + was not :data:`PIPE`, this attribute is ``None``. .. warning:: @@ -1157,6 +1174,14 @@ calls these functions. .. versionchanged:: 3.3 *timeout* was added. + .. versionchanged:: 3.11.3 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + .. function:: check_call(args, *, stdin=None, stdout=None, stderr=None, \ shell=False, cwd=None, timeout=None, \ **other_popen_kwargs) @@ -1189,6 +1214,14 @@ calls these functions. .. versionchanged:: 3.3 *timeout* was added. + .. versionchanged:: 3.11.3 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + .. function:: check_output(args, *, stdin=None, stderr=None, shell=False, \ cwd=None, encoding=None, errors=None, \ @@ -1244,6 +1277,14 @@ calls these functions. .. versionadded:: 3.7 *text* was added as a more readable alias for *universal_newlines*. + .. versionchanged:: 3.11.3 + + Changed Windows shell search order for ``shell=True``. The current + directory and ``%PATH%`` are replaced with ``%COMSPEC%`` and + ``%SystemRoot%\System32\cmd.exe``. As a result, dropping a + malicious program named ``cmd.exe`` into a current directory no + longer works. + .. _subprocess-replacements: diff --git a/Doc/library/timeit.rst b/Doc/library/timeit.rst index 5437704cec337b..32ab565aba0c08 100644 --- a/Doc/library/timeit.rst +++ b/Doc/library/timeit.rst @@ -206,7 +206,7 @@ Command-Line Interface When called as a program from the command line, the following form is used:: - python -m timeit [-n N] [-r N] [-u U] [-s S] [-h] [statement ...] + python -m timeit [-n N] [-r N] [-u U] [-s S] [-p] [-v] [-h] [statement ...] Where the following options are understood: diff --git a/Doc/library/traceback.rst b/Doc/library/traceback.rst index f8c1eabadacf9f..69818baf184d7c 100644 --- a/Doc/library/traceback.rst +++ b/Doc/library/traceback.rst @@ -20,8 +20,15 @@ The module uses traceback objects --- this is the object type that is stored in the :data:`sys.last_traceback` variable and returned as the third item from :func:`sys.exc_info`. -The module defines the following functions: +.. seealso:: + + Module :mod:`faulthandler` + Used to dump Python tracebacks explicitly, on a fault, after a timeout, or on a user signal. + Module :mod:`pdb` + Interactive source code debugger for Python programs. + +The module defines the following functions: .. function:: print_tb(tb, limit=None, file=None) diff --git a/Doc/requirements.txt b/Doc/requirements.txt index 134f39d6d7b3d4..71d3cd61e53877 100644 --- a/Doc/requirements.txt +++ b/Doc/requirements.txt @@ -8,7 +8,7 @@ sphinx==4.5.0 blurb sphinx-lint==0.6.7 -sphinxext-opengraph>=0.7.1 +sphinxext-opengraph==0.7.5 # The theme used by the documentation is stored separately, so we need # to install that as well. diff --git a/Doc/tutorial/venv.rst b/Doc/tutorial/venv.rst index 1fdb370b33d5af..05f0e6bbcc1b04 100644 --- a/Doc/tutorial/venv.rst +++ b/Doc/tutorial/venv.rst @@ -98,8 +98,8 @@ Managing Packages with pip ========================== You can install, upgrade, and remove packages using a program called -:program:`pip`. By default ``pip`` will install packages from the Python -Package Index, . You can browse the Python +:program:`pip`. By default ``pip`` will install packages from the `Python +Package Index `_. You can browse the Python Package Index by going to it in your web browser. ``pip`` has a number of subcommands: "install", "uninstall", diff --git a/Doc/using/windows.rst b/Doc/using/windows.rst index bd09666d5e01c1..1c4e41c0e0e239 100644 --- a/Doc/using/windows.rst +++ b/Doc/using/windows.rst @@ -743,22 +743,47 @@ command:: py -2 -You should find the latest version of Python 3.x starts. - If you see the following error, you do not have the launcher installed:: 'py' is not recognized as an internal or external command, operable program or batch file. -Per-user installations of Python do not add the launcher to :envvar:`PATH` -unless the option was selected on installation. - The command:: py --list displays the currently installed version(s) of Python. +The ``-x.y`` argument is the short form of the ``-V:Company/Tag`` argument, +which allows selecting a specific Python runtime, including those that may have +come from somewhere other than python.org. Any runtime registered by following +:pep:`514` will be discoverable. The ``--list`` command lists all available +runtimes using the ``-V:`` format. + +When using the ``-V:`` argument, specifying the Company will limit selection to +runtimes from that provider, while specifying only the Tag will select from all +providers. Note that omitting the slash implies a tag:: + + # Select any '3.*' tagged runtime + py -V:3 + + # Select any 'PythonCore' released runtime + py -V:PythonCore/ + + # Select PythonCore's latest Python 3 runtime + py -V:PythonCore/3 + +The short form of the argument (``-3``) only ever selects from core Python +releases, and not other distributions. However, the longer form (``-V:3``) will +select from any. + +The Company is matched on the full string, case-insenitive. The Tag is matched +oneither the full string, or a prefix, provided the next character is a dot or a +hyphen. This allows ``-V:3.1`` to match ``3.1-32``, but not ``3.10``. Tags are +sorted using numerical ordering (``3.10`` is newer than ``3.1``), but are +compared using text (``-V:3.01`` does not match ``3.1``). + + Virtual environments ^^^^^^^^^^^^^^^^^^^^ diff --git a/Doc/whatsnew/3.12.rst b/Doc/whatsnew/3.12.rst index a071159b800a34..45a5e5062d55b6 100644 --- a/Doc/whatsnew/3.12.rst +++ b/Doc/whatsnew/3.12.rst @@ -440,7 +440,6 @@ Deprecated warning at compile time. This field will be removed in Python 3.14. (Contributed by Ramvikrams and Kumar Aditya in :gh:`101193`. PEP by Ken Jin.) - Pending Removal in Python 3.13 ------------------------------ @@ -480,7 +479,7 @@ APIs: * :class:`webbrowser.MacOSX` (:gh:`86421`) Pending Removal in Python 3.14 -============================== +------------------------------ * Deprecated the following :mod:`importlib.abc` classes, scheduled for removal in Python 3.14: @@ -505,6 +504,17 @@ Pending Removal in Python 3.14 * Testing the truth value of an :class:`xml.etree.ElementTree.Element` is deprecated and will raise an exception in Python 3.14. +* The default :mod:`multiprocessing` start method will change to a safer one on + Linux, BSDs, and other non-macOS POSIX platforms where ``'fork'`` is currently + the default (:gh:`84559`). Adding a runtime warning about this was deemed too + disruptive as the majority of code is not expected to care. Use the + :func:`~multiprocessing.get_context` or + :func:`~multiprocessing.set_start_method` APIs to explicitly specify when + your code *requires* ``'fork'``. See :ref:`multiprocessing-start-methods`. + +* :mod:`pty` has two undocumented ``master_open()`` and ``slave_open()`` + functions that have been deprecated since Python 2 but only gained a + proper :exc:`DeprecationWarning` in 3.12. Remove them in 3.14. Pending Removal in Future Versions ---------------------------------- diff --git a/Include/cpython/pyerrors.h b/Include/cpython/pyerrors.h index 141341667795e8..0d9cc9922f7368 100644 --- a/Include/cpython/pyerrors.h +++ b/Include/cpython/pyerrors.h @@ -99,6 +99,7 @@ PyAPI_FUNC(void) _PyErr_GetExcInfo(PyThreadState *, PyObject **, PyObject **, Py /* Context manipulation (PEP 3134) */ PyAPI_FUNC(void) _PyErr_ChainExceptions(PyObject *, PyObject *, PyObject *); +PyAPI_FUNC(void) _PyErr_ChainExceptions1(PyObject *); /* Like PyErr_Format(), but saves current exception as __context__ and __cause__. diff --git a/Include/cpython/pystate.h b/Include/cpython/pystate.h index f5db52f76e8f4f..be1fcb61fa2244 100644 --- a/Include/cpython/pystate.h +++ b/Include/cpython/pystate.h @@ -166,9 +166,7 @@ struct _ts { PyObject *c_traceobj; /* The exception currently being raised */ - PyObject *curexc_type; - PyObject *curexc_value; - PyObject *curexc_traceback; + PyObject *current_exception; /* Pointer to the top of the exception stack for the exceptions * we may be currently handling. (See _PyErr_StackItem above.) diff --git a/Include/internal/pycore_opcode.h b/Include/internal/pycore_opcode.h index 2a86c6b24d4b21..dcf26ba1536963 100644 --- a/Include/internal/pycore_opcode.h +++ b/Include/internal/pycore_opcode.h @@ -193,6 +193,7 @@ const uint8_t _PyOpcode_Deopt[256] = { [RAISE_VARARGS] = RAISE_VARARGS, [RERAISE] = RERAISE, [RESUME] = RESUME, + [RETURN_CONST] = RETURN_CONST, [RETURN_GENERATOR] = RETURN_GENERATOR, [RETURN_VALUE] = RETURN_VALUE, [SEND] = SEND, @@ -350,7 +351,7 @@ static const char *const _PyOpcode_OpName[264] = { [CONTAINS_OP] = "CONTAINS_OP", [RERAISE] = "RERAISE", [COPY] = "COPY", - [STORE_FAST__LOAD_FAST] = "STORE_FAST__LOAD_FAST", + [RETURN_CONST] = "RETURN_CONST", [BINARY_OP] = "BINARY_OP", [SEND] = "SEND", [LOAD_FAST] = "LOAD_FAST", @@ -382,20 +383,20 @@ static const char *const _PyOpcode_OpName[264] = { [YIELD_VALUE] = "YIELD_VALUE", [RESUME] = "RESUME", [MATCH_CLASS] = "MATCH_CLASS", + [STORE_FAST__LOAD_FAST] = "STORE_FAST__LOAD_FAST", [STORE_FAST__STORE_FAST] = "STORE_FAST__STORE_FAST", - [STORE_SUBSCR_DICT] = "STORE_SUBSCR_DICT", [FORMAT_VALUE] = "FORMAT_VALUE", [BUILD_CONST_KEY_MAP] = "BUILD_CONST_KEY_MAP", [BUILD_STRING] = "BUILD_STRING", + [STORE_SUBSCR_DICT] = "STORE_SUBSCR_DICT", [STORE_SUBSCR_LIST_INT] = "STORE_SUBSCR_LIST_INT", [UNPACK_SEQUENCE_LIST] = "UNPACK_SEQUENCE_LIST", [UNPACK_SEQUENCE_TUPLE] = "UNPACK_SEQUENCE_TUPLE", - [UNPACK_SEQUENCE_TWO_TUPLE] = "UNPACK_SEQUENCE_TWO_TUPLE", [LIST_EXTEND] = "LIST_EXTEND", [SET_UPDATE] = "SET_UPDATE", [DICT_MERGE] = "DICT_MERGE", [DICT_UPDATE] = "DICT_UPDATE", - [166] = "<166>", + [UNPACK_SEQUENCE_TWO_TUPLE] = "UNPACK_SEQUENCE_TWO_TUPLE", [167] = "<167>", [168] = "<168>", [169] = "<169>", @@ -497,7 +498,6 @@ static const char *const _PyOpcode_OpName[264] = { #endif #define EXTRA_CASES \ - case 166: \ case 167: \ case 168: \ case 169: \ diff --git a/Include/internal/pycore_pyerrors.h b/Include/internal/pycore_pyerrors.h index 66f37942ef916a..1bb4a9aa103898 100644 --- a/Include/internal/pycore_pyerrors.h +++ b/Include/internal/pycore_pyerrors.h @@ -20,7 +20,10 @@ extern void _PyErr_FiniTypes(PyInterpreterState *); static inline PyObject* _PyErr_Occurred(PyThreadState *tstate) { assert(tstate != NULL); - return tstate->curexc_type; + if (tstate->current_exception == NULL) { + return NULL; + } + return (PyObject *)Py_TYPE(tstate->current_exception); } static inline void _PyErr_ClearExcState(_PyErr_StackItem *exc_state) @@ -37,10 +40,16 @@ PyAPI_FUNC(void) _PyErr_Fetch( PyObject **value, PyObject **traceback); +extern PyObject * +_PyErr_GetRaisedException(PyThreadState *tstate); + PyAPI_FUNC(int) _PyErr_ExceptionMatches( PyThreadState *tstate, PyObject *exc); +void +_PyErr_SetRaisedException(PyThreadState *tstate, PyObject *exc); + PyAPI_FUNC(void) _PyErr_Restore( PyThreadState *tstate, PyObject *type, diff --git a/Include/opcode.h b/Include/opcode.h index 9e0cc18c4345ea..4253fe3368b982 100644 --- a/Include/opcode.h +++ b/Include/opcode.h @@ -76,6 +76,7 @@ extern "C" { #define CONTAINS_OP 118 #define RERAISE 119 #define COPY 120 +#define RETURN_CONST 121 #define BINARY_OP 122 #define SEND 123 #define LOAD_FAST 124 @@ -181,13 +182,13 @@ extern "C" { #define STORE_ATTR_INSTANCE_VALUE 86 #define STORE_ATTR_SLOT 87 #define STORE_ATTR_WITH_HINT 113 -#define STORE_FAST__LOAD_FAST 121 -#define STORE_FAST__STORE_FAST 153 -#define STORE_SUBSCR_DICT 154 -#define STORE_SUBSCR_LIST_INT 158 -#define UNPACK_SEQUENCE_LIST 159 -#define UNPACK_SEQUENCE_TUPLE 160 -#define UNPACK_SEQUENCE_TWO_TUPLE 161 +#define STORE_FAST__LOAD_FAST 153 +#define STORE_FAST__STORE_FAST 154 +#define STORE_SUBSCR_DICT 158 +#define STORE_SUBSCR_LIST_INT 159 +#define UNPACK_SEQUENCE_LIST 160 +#define UNPACK_SEQUENCE_TUPLE 161 +#define UNPACK_SEQUENCE_TWO_TUPLE 166 #define DO_TRACING 255 #define HAS_ARG(op) ((((op) >= HAVE_ARGUMENT) && (!IS_PSEUDO_OPCODE(op)))\ @@ -199,6 +200,7 @@ extern "C" { #define HAS_CONST(op) (false\ || ((op) == LOAD_CONST) \ + || ((op) == RETURN_CONST) \ || ((op) == KW_NAMES) \ ) diff --git a/Include/patchlevel.h b/Include/patchlevel.h index 3a3e40c2e09229..7957220ed7cf9f 100644 --- a/Include/patchlevel.h +++ b/Include/patchlevel.h @@ -20,10 +20,10 @@ #define PY_MINOR_VERSION 12 #define PY_MICRO_VERSION 0 #define PY_RELEASE_LEVEL PY_RELEASE_LEVEL_ALPHA -#define PY_RELEASE_SERIAL 4 +#define PY_RELEASE_SERIAL 5 /* Version as a string */ -#define PY_VERSION "3.12.0a4+" +#define PY_VERSION "3.12.0a5+" /*--end constants--*/ /* Version as a single 4-byte hex number, e.g. 0x010502B2 == 1.5.2b2. diff --git a/Include/pyerrors.h b/Include/pyerrors.h index d5ac6af5b32c6c..d089fa71779330 100644 --- a/Include/pyerrors.h +++ b/Include/pyerrors.h @@ -18,6 +18,8 @@ PyAPI_FUNC(PyObject *) PyErr_Occurred(void); PyAPI_FUNC(void) PyErr_Clear(void); PyAPI_FUNC(void) PyErr_Fetch(PyObject **, PyObject **, PyObject **); PyAPI_FUNC(void) PyErr_Restore(PyObject *, PyObject *, PyObject *); +PyAPI_FUNC(PyObject *) PyErr_GetRaisedException(void); +PyAPI_FUNC(void) PyErr_SetRaisedException(PyObject *); #if !defined(Py_LIMITED_API) || Py_LIMITED_API+0 >= 0x030b0000 PyAPI_FUNC(PyObject*) PyErr_GetHandledException(void); PyAPI_FUNC(void) PyErr_SetHandledException(PyObject *); @@ -51,6 +53,10 @@ PyAPI_FUNC(void) PyException_SetCause(PyObject *, PyObject *); PyAPI_FUNC(PyObject *) PyException_GetContext(PyObject *); PyAPI_FUNC(void) PyException_SetContext(PyObject *, PyObject *); + +PyAPI_FUNC(PyObject *) PyException_GetArgs(PyObject *); +PyAPI_FUNC(void) PyException_SetArgs(PyObject *, PyObject *); + /* */ #define PyExceptionClass_Check(x) \ diff --git a/Include/pyport.h b/Include/pyport.h index b1b2a74779691d..40092c2f81ad48 100644 --- a/Include/pyport.h +++ b/Include/pyport.h @@ -247,6 +247,10 @@ typedef Py_ssize_t Py_ssize_clean_t; #define S_ISCHR(x) (((x) & S_IFMT) == S_IFCHR) #endif +#ifndef S_ISLNK +#define S_ISLNK(x) (((x) & S_IFMT) == S_IFLNK) +#endif + #ifdef __cplusplus /* Move this down here since some C++ #include's don't like to be included inside an extern "C" */ @@ -726,4 +730,10 @@ extern char * _getpty(int *, int, mode_t, int); # endif #endif + +/* AIX has __bool__ redefined in it's system header file. */ +#if defined(_AIX) && defined(__bool__) +#undef __bool__ +#endif + #endif /* Py_PYPORT_H */ diff --git a/Lib/_aix_support.py b/Lib/_aix_support.py index 18533e769b7514..dadc75c2bf4200 100644 --- a/Lib/_aix_support.py +++ b/Lib/_aix_support.py @@ -1,10 +1,28 @@ """Shared AIX support functions.""" -import subprocess import sys import sysconfig +# Taken from _osx_support _read_output function +def _read_cmd_output(commandstring, capture_stderr=False): + """Output from successful command execution or None""" + # Similar to os.popen(commandstring, "r").read(), + # but without actually using os.popen because that + # function is not usable during python bootstrap. + import os + import contextlib + fp = open("/tmp/_aix_support.%s"%( + os.getpid(),), "w+b") + + with contextlib.closing(fp) as fp: + if capture_stderr: + cmd = "%s >'%s' 2>&1" % (commandstring, fp.name) + else: + cmd = "%s 2>/dev/null >'%s'" % (commandstring, fp.name) + return fp.read() if not os.system(cmd) else None + + def _aix_tag(vrtl, bd): # type: (List[int], int) -> str # Infer the ABI bitwidth from maxsize (assuming 64 bit as the default) @@ -30,7 +48,12 @@ def _aix_bos_rte(): If no builddate is found give a value that will satisfy pep425 related queries """ # All AIX systems to have lslpp installed in this location - out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.rte"]) + # subprocess may not be available during python bootstrap + try: + import subprocess + out = subprocess.check_output(["/usr/bin/lslpp", "-Lqc", "bos.rte"]) + except ImportError: + out = _read_cmd_output("/usr/bin/lslpp -Lqc bos.rte") out = out.decode("utf-8") out = out.strip().split(":") # type: ignore _bd = int(out[-1]) if out[-1] != '' else 9988 diff --git a/Lib/collections/__init__.py b/Lib/collections/__init__.py index b5e4d16e9dbcad..a5393aad4249c0 100644 --- a/Lib/collections/__init__.py +++ b/Lib/collections/__init__.py @@ -267,7 +267,7 @@ def __repr__(self): 'od.__repr__() <==> repr(od)' if not self: return '%s()' % (self.__class__.__name__,) - return '%s(%r)' % (self.__class__.__name__, list(self.items())) + return '%s(%r)' % (self.__class__.__name__, dict(self.items())) def __reduce__(self): 'Return state information for pickling' diff --git a/Lib/compileall.py b/Lib/compileall.py index a388931fb5a99d..d394156cedc4e7 100644 --- a/Lib/compileall.py +++ b/Lib/compileall.py @@ -97,9 +97,15 @@ def compile_dir(dir, maxlevels=None, ddir=None, force=False, files = _walk_dir(dir, quiet=quiet, maxlevels=maxlevels) success = True if workers != 1 and ProcessPoolExecutor is not None: + import multiprocessing + if multiprocessing.get_start_method() == 'fork': + mp_context = multiprocessing.get_context('forkserver') + else: + mp_context = None # If workers == 0, let ProcessPoolExecutor choose workers = workers or None - with ProcessPoolExecutor(max_workers=workers) as executor: + with ProcessPoolExecutor(max_workers=workers, + mp_context=mp_context) as executor: results = executor.map(partial(compile_file, ddir=ddir, force=force, rx=rx, quiet=quiet, diff --git a/Lib/concurrent/futures/process.py b/Lib/concurrent/futures/process.py index 7e2f5fa30e8264..bee162430a6f8e 100644 --- a/Lib/concurrent/futures/process.py +++ b/Lib/concurrent/futures/process.py @@ -616,9 +616,9 @@ def __init__(self, max_workers=None, mp_context=None, max_workers: The maximum number of processes that can be used to execute the given calls. If None or not given then as many worker processes will be created as the machine has processors. - mp_context: A multiprocessing context to launch the workers. This - object should provide SimpleQueue, Queue and Process. Useful - to allow specific multiprocessing start methods. + mp_context: A multiprocessing context to launch the workers created + using the multiprocessing.get_context('start method') API. This + object should provide SimpleQueue, Queue and Process. initializer: A callable used to initialize worker processes. initargs: A tuple of arguments to pass to the initializer. max_tasks_per_child: The maximum number of tasks a worker process diff --git a/Lib/ctypes/__init__.py b/Lib/ctypes/__init__.py index 2e9d4c5e7238e9..95353bab26cc71 100644 --- a/Lib/ctypes/__init__.py +++ b/Lib/ctypes/__init__.py @@ -344,6 +344,8 @@ def __init__(self, name, mode=DEFAULT_MODE, handle=None, use_errno=False, use_last_error=False, winmode=None): + if name: + name = _os.fspath(name) self._name = name flags = self._func_flags_ if use_errno: diff --git a/Lib/datetime.py b/Lib/datetime.py index 68746de1cabf85..637144637485bc 100644 --- a/Lib/datetime.py +++ b/Lib/datetime.py @@ -587,9 +587,12 @@ class timedelta: returning a timedelta, and addition or subtraction of a datetime and a timedelta giving a datetime. - Representation: (days, seconds, microseconds). Why? Because I - felt like it. + Representation: (days, seconds, microseconds). """ + # The representation of (days, seconds, microseconds) was chosen + # arbitrarily; the exact rationale originally specified in the docstring + # was "Because I felt like it." + __slots__ = '_days', '_seconds', '_microseconds', '_hashcode' def __new__(cls, days=0, seconds=0, microseconds=0, diff --git a/Lib/dis.py b/Lib/dis.py index 72ab9536a2bf6a..a6921008d9d0e5 100644 --- a/Lib/dis.py +++ b/Lib/dis.py @@ -34,6 +34,7 @@ MAKE_FUNCTION_FLAGS = ('defaults', 'kwdefaults', 'annotations', 'closure') LOAD_CONST = opmap['LOAD_CONST'] +RETURN_CONST = opmap['RETURN_CONST'] LOAD_GLOBAL = opmap['LOAD_GLOBAL'] BINARY_OP = opmap['BINARY_OP'] JUMP_BACKWARD = opmap['JUMP_BACKWARD'] @@ -363,7 +364,7 @@ def _get_const_value(op, arg, co_consts): assert op in hasconst argval = UNKNOWN - if op == LOAD_CONST: + if op == LOAD_CONST or op == RETURN_CONST: if co_consts is not None: argval = co_consts[arg] return argval diff --git a/Lib/ensurepip/__init__.py b/Lib/ensurepip/__init__.py index 1a2f57c07ba341..052b7bca28ddd3 100644 --- a/Lib/ensurepip/__init__.py +++ b/Lib/ensurepip/__init__.py @@ -11,7 +11,7 @@ __all__ = ["version", "bootstrap"] _PACKAGE_NAMES = ('setuptools', 'pip') _SETUPTOOLS_VERSION = "65.5.0" -_PIP_VERSION = "22.3.1" +_PIP_VERSION = "23.0" _PROJECTS = [ ("setuptools", _SETUPTOOLS_VERSION, "py3"), ("pip", _PIP_VERSION, "py3"), diff --git a/Lib/ensurepip/_bundled/pip-22.3.1-py3-none-any.whl b/Lib/ensurepip/_bundled/pip-23.0-py3-none-any.whl similarity index 73% rename from Lib/ensurepip/_bundled/pip-22.3.1-py3-none-any.whl rename to Lib/ensurepip/_bundled/pip-23.0-py3-none-any.whl index c5b7753e757df2..bb9aebf474cfe9 100644 Binary files a/Lib/ensurepip/_bundled/pip-22.3.1-py3-none-any.whl and b/Lib/ensurepip/_bundled/pip-23.0-py3-none-any.whl differ diff --git a/Lib/enum.py b/Lib/enum.py index adb61519abe942..d14e91a9b017d1 100644 --- a/Lib/enum.py +++ b/Lib/enum.py @@ -1429,12 +1429,11 @@ def _missing_(cls, value): % (cls.__name__, value, unknown, bin(unknown)) ) # normal Flag? - __new__ = getattr(cls, '__new_member__', None) - if cls._member_type_ is object and not __new__: + if cls._member_type_ is object: # construct a singleton enum pseudo-member pseudo_member = object.__new__(cls) else: - pseudo_member = (__new__ or cls._member_type_.__new__)(cls, value) + pseudo_member = cls._member_type_.__new__(cls, value) if not hasattr(pseudo_member, '_value_'): pseudo_member._value_ = value if member_value: diff --git a/Lib/genericpath.py b/Lib/genericpath.py index ce36451a3af01c..1bd5b3897c3af9 100644 --- a/Lib/genericpath.py +++ b/Lib/genericpath.py @@ -7,7 +7,7 @@ import stat __all__ = ['commonprefix', 'exists', 'getatime', 'getctime', 'getmtime', - 'getsize', 'isdir', 'isfile', 'samefile', 'sameopenfile', + 'getsize', 'isdir', 'isfile', 'islink', 'samefile', 'sameopenfile', 'samestat'] @@ -45,6 +45,18 @@ def isdir(s): return stat.S_ISDIR(st.st_mode) +# Is a path a symbolic link? +# This will always return false on systems where os.lstat doesn't exist. + +def islink(path): + """Test whether a path is a symbolic link""" + try: + st = os.lstat(path) + except (OSError, ValueError, AttributeError): + return False + return stat.S_ISLNK(st.st_mode) + + def getsize(filename): """Return the size of a file, reported by os.stat().""" return os.stat(filename).st_size diff --git a/Lib/importlib/_bootstrap_external.py b/Lib/importlib/_bootstrap_external.py index 8c79eb42d97df2..0b94a8334ae500 100644 --- a/Lib/importlib/_bootstrap_external.py +++ b/Lib/importlib/_bootstrap_external.py @@ -431,7 +431,8 @@ def _write_atomic(path, data, mode=0o666): # Python 3.12a5 3515 (Embed jump mask in COMPARE_OP oparg) # Python 3.12a5 3516 (Add COMPARE_AND_BRANCH instruction) # Python 3.12a5 3517 (Change YIELD_VALUE oparg to exception block depth) -# Python 3.12a5 3518 (Inline sync list/dict/set comprehensions in the calling function) +# Python 3.12a5 3518 (Add RETURN_CONST instruction) +# Python 3.12a5 3519 (Inline sync list/dict/set comprehensions in the calling function) # Python 3.13 will start with 3550 @@ -444,7 +445,7 @@ def _write_atomic(path, data, mode=0o666): # Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array # in PC/launcher.c must also be updated. -MAGIC_NUMBER = (3518).to_bytes(2, 'little') + b'\r\n' +MAGIC_NUMBER = (3519).to_bytes(2, 'little') + b'\r\n' _RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c diff --git a/Lib/multiprocessing/context.py b/Lib/multiprocessing/context.py index b1960ea296fe20..de8a264829dff3 100644 --- a/Lib/multiprocessing/context.py +++ b/Lib/multiprocessing/context.py @@ -258,6 +258,7 @@ def get_start_method(self, allow_none=False): return self._actual_context._name def get_all_start_methods(self): + """Returns a list of the supported start methods, default first.""" if sys.platform == 'win32': return ['spawn'] else: diff --git a/Lib/ntpath.py b/Lib/ntpath.py index f9ee8e02a576b7..e93a5e69600973 100644 --- a/Lib/ntpath.py +++ b/Lib/ntpath.py @@ -276,19 +276,6 @@ def dirname(p): """Returns the directory component of a pathname""" return split(p)[0] -# Is a path a symbolic link? -# This will always return false on systems where os.lstat doesn't exist. - -def islink(path): - """Test whether a path is a symbolic link. - This will always return false for Windows prior to 6.0. - """ - try: - st = os.lstat(path) - except (OSError, ValueError, AttributeError): - return False - return stat.S_ISLNK(st.st_mode) - # Is a path a junction? @@ -870,11 +857,13 @@ def commonpath(paths): try: - # The genericpath.isdir implementation uses os.stat and checks the mode - # attribute to tell whether or not the path is a directory. - # This is overkill on Windows - just pass the path to GetFileAttributes - # and check the attribute from there. - from nt import _isdir as isdir + # The isdir(), isfile(), islink() and exists() implementations in + # genericpath use os.stat(). This is overkill on Windows. Use simpler + # builtin functions if they are available. + from nt import _path_isdir as isdir + from nt import _path_isfile as isfile + from nt import _path_islink as islink + from nt import _path_exists as exists except ImportError: - # Use genericpath.isdir as imported above. + # Use genericpath.* as imported above pass diff --git a/Lib/opcode.py b/Lib/opcode.py index bc5cedef8ce197..42f4b0f49b4425 100644 --- a/Lib/opcode.py +++ b/Lib/opcode.py @@ -164,6 +164,8 @@ def pseudo_op(name, op, real_ops): def_op('CONTAINS_OP', 118) def_op('RERAISE', 119) def_op('COPY', 120) +def_op('RETURN_CONST', 121) +hasconst.append(121) def_op('BINARY_OP', 122) jrel_op('SEND', 123) # Number of bytes to skip def_op('LOAD_FAST', 124) # Local variable number, no null check diff --git a/Lib/posixpath.py b/Lib/posixpath.py index 32b5d6e105dde9..e4f155e41a3221 100644 --- a/Lib/posixpath.py +++ b/Lib/posixpath.py @@ -187,18 +187,6 @@ def dirname(p): return head -# Is a path a symbolic link? -# This will always return false on systems where os.lstat doesn't exist. - -def islink(path): - """Test whether a path is a symbolic link""" - try: - st = os.lstat(path) - except (OSError, ValueError, AttributeError): - return False - return stat.S_ISLNK(st.st_mode) - - # Is a path a junction? def isjunction(path): diff --git a/Lib/pty.py b/Lib/pty.py index 03073f07c92c05..6571050886bd1d 100644 --- a/Lib/pty.py +++ b/Lib/pty.py @@ -40,6 +40,9 @@ def master_open(): Open a pty master and return the fd, and the filename of the slave end. Deprecated, use openpty() instead.""" + import warnings + warnings.warn("Use pty.openpty() instead.", DeprecationWarning, stacklevel=2) # Remove API in 3.14 + try: master_fd, slave_fd = os.openpty() except (AttributeError, OSError): @@ -69,6 +72,9 @@ def slave_open(tty_name): opened filedescriptor. Deprecated, use openpty() instead.""" + import warnings + warnings.warn("Use pty.openpty() instead.", DeprecationWarning, stacklevel=2) # Remove API in 3.14 + result = os.open(tty_name, os.O_RDWR) try: from fcntl import ioctl, I_PUSH @@ -101,20 +107,8 @@ def fork(): master_fd, slave_fd = openpty() pid = os.fork() if pid == CHILD: - # Establish a new session. - os.setsid() os.close(master_fd) - - # Slave becomes stdin/stdout/stderr of child. - os.dup2(slave_fd, STDIN_FILENO) - os.dup2(slave_fd, STDOUT_FILENO) - os.dup2(slave_fd, STDERR_FILENO) - if slave_fd > STDERR_FILENO: - os.close(slave_fd) - - # Explicitly open the tty to make it become a controlling tty. - tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR) - os.close(tmp_fd) + os.login_tty(slave_fd) else: os.close(slave_fd) diff --git a/Lib/pydoc_data/topics.py b/Lib/pydoc_data/topics.py index 11b75037e78b46..e7f403d3ffbf12 100644 --- a/Lib/pydoc_data/topics.py +++ b/Lib/pydoc_data/topics.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Autogenerated by Sphinx on Tue Jan 10 13:08:32 2023 +# Autogenerated by Sphinx on Tue Feb 7 13:18:04 2023 topics = {'assert': 'The "assert" statement\n' '**********************\n' '\n' @@ -4647,6 +4647,18 @@ 'the source. The extension interface uses the modules "bdb" and ' '"cmd".\n' '\n' + 'See also:\n' + '\n' + ' Module "faulthandler"\n' + ' Used to dump Python tracebacks explicitly, on a fault, ' + 'after a\n' + ' timeout, or on a user signal.\n' + '\n' + ' Module "traceback"\n' + ' Standard interface to extract, format and print stack ' + 'traces of\n' + ' Python programs.\n' + '\n' 'The debugger’s prompt is "(Pdb)". Typical usage to run a program ' 'under\n' 'control of the debugger is:\n' diff --git a/Lib/subprocess.py b/Lib/subprocess.py index 9cadd1bf8e622c..1f203bd00d3500 100644 --- a/Lib/subprocess.py +++ b/Lib/subprocess.py @@ -1480,7 +1480,23 @@ def _execute_child(self, args, executable, preexec_fn, close_fds, if shell: startupinfo.dwFlags |= _winapi.STARTF_USESHOWWINDOW startupinfo.wShowWindow = _winapi.SW_HIDE - comspec = os.environ.get("COMSPEC", "cmd.exe") + if not executable: + # gh-101283: without a fully-qualified path, before Windows + # checks the system directories, it first looks in the + # application directory, and also the current directory if + # NeedCurrentDirectoryForExePathW(ExeName) is true, so try + # to avoid executing unqualified "cmd.exe". + comspec = os.environ.get('ComSpec') + if not comspec: + system_root = os.environ.get('SystemRoot', '') + comspec = os.path.join(system_root, 'System32', 'cmd.exe') + if not os.path.isabs(comspec): + raise FileNotFoundError('shell not found: neither %ComSpec% nor %SystemRoot% is set') + if os.path.isabs(comspec): + executable = comspec + else: + comspec = executable + args = '{} /c "{}"'.format (comspec, args) if cwd is not None: diff --git a/Lib/test/_test_multiprocessing.py b/Lib/test/_test_multiprocessing.py index 2fa75eb4d11311..9a2db24b4bd597 100644 --- a/Lib/test/_test_multiprocessing.py +++ b/Lib/test/_test_multiprocessing.py @@ -4967,11 +4967,13 @@ def run_in_grandchild(cls, conn): conn.send(tuple(sys.flags)) @classmethod - def run_in_child(cls): + def run_in_child(cls, start_method): import json - r, w = multiprocessing.Pipe(duplex=False) - p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,)) - p.start() + mp = multiprocessing.get_context(start_method) + r, w = mp.Pipe(duplex=False) + p = mp.Process(target=cls.run_in_grandchild, args=(w,)) + with warnings.catch_warnings(category=DeprecationWarning): + p.start() grandchild_flags = r.recv() p.join() r.close() @@ -4982,8 +4984,10 @@ def run_in_child(cls): def test_flags(self): import json # start child process using unusual flags - prog = ('from test._test_multiprocessing import TestFlags; ' + - 'TestFlags.run_in_child()') + prog = ( + 'from test._test_multiprocessing import TestFlags; ' + f'TestFlags.run_in_child({multiprocessing.get_start_method()!r})' + ) data = subprocess.check_output( [sys.executable, '-E', '-S', '-O', '-c', prog]) child_flags, grandchild_flags = json.loads(data.decode('ascii')) diff --git a/Lib/test/_test_venv_multiprocessing.py b/Lib/test/_test_venv_multiprocessing.py index af72e915ba52bb..044a0c6cd3f5ca 100644 --- a/Lib/test/_test_venv_multiprocessing.py +++ b/Lib/test/_test_venv_multiprocessing.py @@ -30,6 +30,7 @@ def test_func(): def main(): + multiprocessing.set_start_method('spawn') test_pool = multiprocessing.Process(target=test_func) test_pool.start() test_pool.join() diff --git a/Lib/test/datetimetester.py b/Lib/test/datetimetester.py index 6a1df174a1b972..570f803918c1ef 100644 --- a/Lib/test/datetimetester.py +++ b/Lib/test/datetimetester.py @@ -6173,7 +6173,7 @@ def test_gaps(self): self.assertEqual(ldt.fold, 0) @unittest.skipUnless( - hasattr(time, "tzset"), "time module has no attribute tzset" + hasattr(_time, "tzset"), "time module has no attribute tzset" ) def test_system_transitions(self): if ('Riyadh8' in self.zonename or diff --git a/Lib/test/support/interpreters.py b/Lib/test/support/interpreters.py index 2935708f9df1a5..eeff3abe0324e5 100644 --- a/Lib/test/support/interpreters.py +++ b/Lib/test/support/interpreters.py @@ -2,11 +2,12 @@ import time import _xxsubinterpreters as _interpreters +import _xxinterpchannels as _channels # aliases: -from _xxsubinterpreters import ( +from _xxsubinterpreters import is_shareable +from _xxinterpchannels import ( ChannelError, ChannelNotFoundError, ChannelEmptyError, - is_shareable, ) @@ -102,7 +103,7 @@ def create_channel(): The channel may be used to pass data safely between interpreters. """ - cid = _interpreters.channel_create() + cid = _channels.create() recv, send = RecvChannel(cid), SendChannel(cid) return recv, send @@ -110,14 +111,14 @@ def create_channel(): def list_all_channels(): """Return a list of (recv, send) for all open channels.""" return [(RecvChannel(cid), SendChannel(cid)) - for cid in _interpreters.channel_list_all()] + for cid in _channels.list_all()] class _ChannelEnd: """The base class for RecvChannel and SendChannel.""" def __init__(self, id): - if not isinstance(id, (int, _interpreters.ChannelID)): + if not isinstance(id, (int, _channels.ChannelID)): raise TypeError(f'id must be an int, got {id!r}') self._id = id @@ -152,10 +153,10 @@ def recv(self, *, _sentinel=object(), _delay=10 / 1000): # 10 milliseconds This blocks until an object has been sent, if none have been sent already. """ - obj = _interpreters.channel_recv(self._id, _sentinel) + obj = _channels.recv(self._id, _sentinel) while obj is _sentinel: time.sleep(_delay) - obj = _interpreters.channel_recv(self._id, _sentinel) + obj = _channels.recv(self._id, _sentinel) return obj def recv_nowait(self, default=_NOT_SET): @@ -166,9 +167,9 @@ def recv_nowait(self, default=_NOT_SET): is the same as recv(). """ if default is _NOT_SET: - return _interpreters.channel_recv(self._id) + return _channels.recv(self._id) else: - return _interpreters.channel_recv(self._id, default) + return _channels.recv(self._id, default) class SendChannel(_ChannelEnd): @@ -179,7 +180,7 @@ def send(self, obj): This blocks until the object is received. """ - _interpreters.channel_send(self._id, obj) + _channels.send(self._id, obj) # XXX We are missing a low-level channel_send_wait(). # See bpo-32604 and gh-19829. # Until that shows up we fake it: @@ -194,4 +195,4 @@ def send_nowait(self, obj): # XXX Note that at the moment channel_send() only ever returns # None. This should be fixed when channel_send_wait() is added. # See bpo-32604 and gh-19829. - return _interpreters.channel_send(self._id, obj) + return _channels.send(self._id, obj) diff --git a/Lib/test/test__xxinterpchannels.py b/Lib/test/test__xxinterpchannels.py new file mode 100644 index 00000000000000..03bb5c80b8dac9 --- /dev/null +++ b/Lib/test/test__xxinterpchannels.py @@ -0,0 +1,1541 @@ +from collections import namedtuple +import contextlib +import os +import sys +from textwrap import dedent +import threading +import time +import unittest + +from test.support import import_helper + +from test.test__xxsubinterpreters import ( + interpreters, + _run_output, + clean_up_interpreters, +) + + +channels = import_helper.import_module('_xxinterpchannels') + + +################################## +# helpers + +#@contextmanager +#def run_threaded(id, source, **shared): +# def run(): +# run_interp(id, source, **shared) +# t = threading.Thread(target=run) +# t.start() +# yield +# t.join() + + +def run_interp(id, source, **shared): + _run_interp(id, source, shared) + + +def _run_interp(id, source, shared, _mainns={}): + source = dedent(source) + main = interpreters.get_main() + if main == id: + if interpreters.get_current() != main: + raise RuntimeError + # XXX Run a func? + exec(source, _mainns) + else: + interpreters.run_string(id, source, shared) + + +class Interpreter(namedtuple('Interpreter', 'name id')): + + @classmethod + def from_raw(cls, raw): + if isinstance(raw, cls): + return raw + elif isinstance(raw, str): + return cls(raw) + else: + raise NotImplementedError + + def __new__(cls, name=None, id=None): + main = interpreters.get_main() + if id == main: + if not name: + name = 'main' + elif name != 'main': + raise ValueError( + 'name mismatch (expected "main", got "{}")'.format(name)) + id = main + elif id is not None: + if not name: + name = 'interp' + elif name == 'main': + raise ValueError('name mismatch (unexpected "main")') + if not isinstance(id, interpreters.InterpreterID): + id = interpreters.InterpreterID(id) + elif not name or name == 'main': + name = 'main' + id = main + else: + id = interpreters.create() + self = super().__new__(cls, name, id) + return self + + +# XXX expect_channel_closed() is unnecessary once we improve exc propagation. + +@contextlib.contextmanager +def expect_channel_closed(): + try: + yield + except channels.ChannelClosedError: + pass + else: + assert False, 'channel not closed' + + +class ChannelAction(namedtuple('ChannelAction', 'action end interp')): + + def __new__(cls, action, end=None, interp=None): + if not end: + end = 'both' + if not interp: + interp = 'main' + self = super().__new__(cls, action, end, interp) + return self + + def __init__(self, *args, **kwargs): + if self.action == 'use': + if self.end not in ('same', 'opposite', 'send', 'recv'): + raise ValueError(self.end) + elif self.action in ('close', 'force-close'): + if self.end not in ('both', 'same', 'opposite', 'send', 'recv'): + raise ValueError(self.end) + else: + raise ValueError(self.action) + if self.interp not in ('main', 'same', 'other', 'extra'): + raise ValueError(self.interp) + + def resolve_end(self, end): + if self.end == 'same': + return end + elif self.end == 'opposite': + return 'recv' if end == 'send' else 'send' + else: + return self.end + + def resolve_interp(self, interp, other, extra): + if self.interp == 'same': + return interp + elif self.interp == 'other': + if other is None: + raise RuntimeError + return other + elif self.interp == 'extra': + if extra is None: + raise RuntimeError + return extra + elif self.interp == 'main': + if interp.name == 'main': + return interp + elif other and other.name == 'main': + return other + else: + raise RuntimeError + # Per __init__(), there aren't any others. + + +class ChannelState(namedtuple('ChannelState', 'pending closed')): + + def __new__(cls, pending=0, *, closed=False): + self = super().__new__(cls, pending, closed) + return self + + def incr(self): + return type(self)(self.pending + 1, closed=self.closed) + + def decr(self): + return type(self)(self.pending - 1, closed=self.closed) + + def close(self, *, force=True): + if self.closed: + if not force or self.pending == 0: + return self + return type(self)(0 if force else self.pending, closed=True) + + +def run_action(cid, action, end, state, *, hideclosed=True): + if state.closed: + if action == 'use' and end == 'recv' and state.pending: + expectfail = False + else: + expectfail = True + else: + expectfail = False + + try: + result = _run_action(cid, action, end, state) + except channels.ChannelClosedError: + if not hideclosed and not expectfail: + raise + result = state.close() + else: + if expectfail: + raise ... # XXX + return result + + +def _run_action(cid, action, end, state): + if action == 'use': + if end == 'send': + channels.send(cid, b'spam') + return state.incr() + elif end == 'recv': + if not state.pending: + try: + channels.recv(cid) + except channels.ChannelEmptyError: + return state + else: + raise Exception('expected ChannelEmptyError') + else: + channels.recv(cid) + return state.decr() + else: + raise ValueError(end) + elif action == 'close': + kwargs = {} + if end in ('recv', 'send'): + kwargs[end] = True + channels.close(cid, **kwargs) + return state.close() + elif action == 'force-close': + kwargs = { + 'force': True, + } + if end in ('recv', 'send'): + kwargs[end] = True + channels.close(cid, **kwargs) + return state.close(force=True) + else: + raise ValueError(action) + + +def clean_up_channels(): + for cid in channels.list_all(): + try: + channels.destroy(cid) + except channels.ChannelNotFoundError: + pass # already destroyed + + +class TestBase(unittest.TestCase): + + def tearDown(self): + clean_up_channels() + clean_up_interpreters() + + +################################## +# channel tests + +class ChannelIDTests(TestBase): + + def test_default_kwargs(self): + cid = channels._channel_id(10, force=True) + + self.assertEqual(int(cid), 10) + self.assertEqual(cid.end, 'both') + + def test_with_kwargs(self): + cid = channels._channel_id(10, send=True, force=True) + self.assertEqual(cid.end, 'send') + + cid = channels._channel_id(10, send=True, recv=False, force=True) + self.assertEqual(cid.end, 'send') + + cid = channels._channel_id(10, recv=True, force=True) + self.assertEqual(cid.end, 'recv') + + cid = channels._channel_id(10, recv=True, send=False, force=True) + self.assertEqual(cid.end, 'recv') + + cid = channels._channel_id(10, send=True, recv=True, force=True) + self.assertEqual(cid.end, 'both') + + def test_coerce_id(self): + class Int(str): + def __index__(self): + return 10 + + cid = channels._channel_id(Int(), force=True) + self.assertEqual(int(cid), 10) + + def test_bad_id(self): + self.assertRaises(TypeError, channels._channel_id, object()) + self.assertRaises(TypeError, channels._channel_id, 10.0) + self.assertRaises(TypeError, channels._channel_id, '10') + self.assertRaises(TypeError, channels._channel_id, b'10') + self.assertRaises(ValueError, channels._channel_id, -1) + self.assertRaises(OverflowError, channels._channel_id, 2**64) + + def test_bad_kwargs(self): + with self.assertRaises(ValueError): + channels._channel_id(10, send=False, recv=False) + + def test_does_not_exist(self): + cid = channels.create() + with self.assertRaises(channels.ChannelNotFoundError): + channels._channel_id(int(cid) + 1) # unforced + + def test_str(self): + cid = channels._channel_id(10, force=True) + self.assertEqual(str(cid), '10') + + def test_repr(self): + cid = channels._channel_id(10, force=True) + self.assertEqual(repr(cid), 'ChannelID(10)') + + cid = channels._channel_id(10, send=True, force=True) + self.assertEqual(repr(cid), 'ChannelID(10, send=True)') + + cid = channels._channel_id(10, recv=True, force=True) + self.assertEqual(repr(cid), 'ChannelID(10, recv=True)') + + cid = channels._channel_id(10, send=True, recv=True, force=True) + self.assertEqual(repr(cid), 'ChannelID(10)') + + def test_equality(self): + cid1 = channels.create() + cid2 = channels._channel_id(int(cid1)) + cid3 = channels.create() + + self.assertTrue(cid1 == cid1) + self.assertTrue(cid1 == cid2) + self.assertTrue(cid1 == int(cid1)) + self.assertTrue(int(cid1) == cid1) + self.assertTrue(cid1 == float(int(cid1))) + self.assertTrue(float(int(cid1)) == cid1) + self.assertFalse(cid1 == float(int(cid1)) + 0.1) + self.assertFalse(cid1 == str(int(cid1))) + self.assertFalse(cid1 == 2**1000) + self.assertFalse(cid1 == float('inf')) + self.assertFalse(cid1 == 'spam') + self.assertFalse(cid1 == cid3) + + self.assertFalse(cid1 != cid1) + self.assertFalse(cid1 != cid2) + self.assertTrue(cid1 != cid3) + + def test_shareable(self): + chan = channels.create() + + obj = channels.create() + channels.send(chan, obj) + got = channels.recv(chan) + + self.assertEqual(got, obj) + self.assertIs(type(got), type(obj)) + # XXX Check the following in the channel tests? + #self.assertIsNot(got, obj) + + +class ChannelTests(TestBase): + + def test_create_cid(self): + cid = channels.create() + self.assertIsInstance(cid, channels.ChannelID) + + def test_sequential_ids(self): + before = channels.list_all() + id1 = channels.create() + id2 = channels.create() + id3 = channels.create() + after = channels.list_all() + + self.assertEqual(id2, int(id1) + 1) + self.assertEqual(id3, int(id2) + 1) + self.assertEqual(set(after) - set(before), {id1, id2, id3}) + + def test_ids_global(self): + id1 = interpreters.create() + out = _run_output(id1, dedent(""" + import _xxinterpchannels as _channels + cid = _channels.create() + print(cid) + """)) + cid1 = int(out.strip()) + + id2 = interpreters.create() + out = _run_output(id2, dedent(""" + import _xxinterpchannels as _channels + cid = _channels.create() + print(cid) + """)) + cid2 = int(out.strip()) + + self.assertEqual(cid2, int(cid1) + 1) + + def test_channel_list_interpreters_none(self): + """Test listing interpreters for a channel with no associations.""" + # Test for channel with no associated interpreters. + cid = channels.create() + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, []) + self.assertEqual(recv_interps, []) + + def test_channel_list_interpreters_basic(self): + """Test basic listing channel interpreters.""" + interp0 = interpreters.get_main() + cid = channels.create() + channels.send(cid, "send") + # Test for a channel that has one end associated to an interpreter. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, [interp0]) + self.assertEqual(recv_interps, []) + + interp1 = interpreters.create() + _run_output(interp1, dedent(f""" + import _xxinterpchannels as _channels + obj = _channels.recv({cid}) + """)) + # Test for channel that has both ends associated to an interpreter. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, [interp0]) + self.assertEqual(recv_interps, [interp1]) + + def test_channel_list_interpreters_multiple(self): + """Test listing interpreters for a channel with many associations.""" + interp0 = interpreters.get_main() + interp1 = interpreters.create() + interp2 = interpreters.create() + interp3 = interpreters.create() + cid = channels.create() + + channels.send(cid, "send") + _run_output(interp1, dedent(f""" + import _xxinterpchannels as _channels + _channels.send({cid}, "send") + """)) + _run_output(interp2, dedent(f""" + import _xxinterpchannels as _channels + obj = _channels.recv({cid}) + """)) + _run_output(interp3, dedent(f""" + import _xxinterpchannels as _channels + obj = _channels.recv({cid}) + """)) + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(set(send_interps), {interp0, interp1}) + self.assertEqual(set(recv_interps), {interp2, interp3}) + + def test_channel_list_interpreters_destroyed(self): + """Test listing channel interpreters with a destroyed interpreter.""" + interp0 = interpreters.get_main() + interp1 = interpreters.create() + cid = channels.create() + channels.send(cid, "send") + _run_output(interp1, dedent(f""" + import _xxinterpchannels as _channels + obj = _channels.recv({cid}) + """)) + # Should be one interpreter associated with each end. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, [interp0]) + self.assertEqual(recv_interps, [interp1]) + + interpreters.destroy(interp1) + # Destroyed interpreter should not be listed. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(send_interps, [interp0]) + self.assertEqual(recv_interps, []) + + def test_channel_list_interpreters_released(self): + """Test listing channel interpreters with a released channel.""" + # Set up one channel with main interpreter on the send end and two + # subinterpreters on the receive end. + interp0 = interpreters.get_main() + interp1 = interpreters.create() + interp2 = interpreters.create() + cid = channels.create() + channels.send(cid, "data") + _run_output(interp1, dedent(f""" + import _xxinterpchannels as _channels + obj = _channels.recv({cid}) + """)) + channels.send(cid, "data") + _run_output(interp2, dedent(f""" + import _xxinterpchannels as _channels + obj = _channels.recv({cid}) + """)) + # Check the setup. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 1) + self.assertEqual(len(recv_interps), 2) + + # Release the main interpreter from the send end. + channels.release(cid, send=True) + # Send end should have no associated interpreters. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 0) + self.assertEqual(len(recv_interps), 2) + + # Release one of the subinterpreters from the receive end. + _run_output(interp2, dedent(f""" + import _xxinterpchannels as _channels + _channels.release({cid}) + """)) + # Receive end should have the released interpreter removed. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 0) + self.assertEqual(recv_interps, [interp1]) + + def test_channel_list_interpreters_closed(self): + """Test listing channel interpreters with a closed channel.""" + interp0 = interpreters.get_main() + interp1 = interpreters.create() + cid = channels.create() + # Put something in the channel so that it's not empty. + channels.send(cid, "send") + + # Check initial state. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 1) + self.assertEqual(len(recv_interps), 0) + + # Force close the channel. + channels.close(cid, force=True) + # Both ends should raise an error. + with self.assertRaises(channels.ChannelClosedError): + channels.list_interpreters(cid, send=True) + with self.assertRaises(channels.ChannelClosedError): + channels.list_interpreters(cid, send=False) + + def test_channel_list_interpreters_closed_send_end(self): + """Test listing channel interpreters with a channel's send end closed.""" + interp0 = interpreters.get_main() + interp1 = interpreters.create() + cid = channels.create() + # Put something in the channel so that it's not empty. + channels.send(cid, "send") + + # Check initial state. + send_interps = channels.list_interpreters(cid, send=True) + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(len(send_interps), 1) + self.assertEqual(len(recv_interps), 0) + + # Close the send end of the channel. + channels.close(cid, send=True) + # Send end should raise an error. + with self.assertRaises(channels.ChannelClosedError): + channels.list_interpreters(cid, send=True) + # Receive end should not be closed (since channel is not empty). + recv_interps = channels.list_interpreters(cid, send=False) + self.assertEqual(len(recv_interps), 0) + + # Close the receive end of the channel from a subinterpreter. + _run_output(interp1, dedent(f""" + import _xxinterpchannels as _channels + _channels.close({cid}, force=True) + """)) + # Both ends should raise an error. + with self.assertRaises(channels.ChannelClosedError): + channels.list_interpreters(cid, send=True) + with self.assertRaises(channels.ChannelClosedError): + channels.list_interpreters(cid, send=False) + + #################### + + def test_send_recv_main(self): + cid = channels.create() + orig = b'spam' + channels.send(cid, orig) + obj = channels.recv(cid) + + self.assertEqual(obj, orig) + self.assertIsNot(obj, orig) + + def test_send_recv_same_interpreter(self): + id1 = interpreters.create() + out = _run_output(id1, dedent(""" + import _xxinterpchannels as _channels + cid = _channels.create() + orig = b'spam' + _channels.send(cid, orig) + obj = _channels.recv(cid) + assert obj is not orig + assert obj == orig + """)) + + def test_send_recv_different_interpreters(self): + cid = channels.create() + id1 = interpreters.create() + out = _run_output(id1, dedent(f""" + import _xxinterpchannels as _channels + _channels.send({cid}, b'spam') + """)) + obj = channels.recv(cid) + + self.assertEqual(obj, b'spam') + + def test_send_recv_different_threads(self): + cid = channels.create() + + def f(): + while True: + try: + obj = channels.recv(cid) + break + except channels.ChannelEmptyError: + time.sleep(0.1) + channels.send(cid, obj) + t = threading.Thread(target=f) + t.start() + + channels.send(cid, b'spam') + t.join() + obj = channels.recv(cid) + + self.assertEqual(obj, b'spam') + + def test_send_recv_different_interpreters_and_threads(self): + cid = channels.create() + id1 = interpreters.create() + out = None + + def f(): + nonlocal out + out = _run_output(id1, dedent(f""" + import time + import _xxinterpchannels as _channels + while True: + try: + obj = _channels.recv({cid}) + break + except _channels.ChannelEmptyError: + time.sleep(0.1) + assert(obj == b'spam') + _channels.send({cid}, b'eggs') + """)) + t = threading.Thread(target=f) + t.start() + + channels.send(cid, b'spam') + t.join() + obj = channels.recv(cid) + + self.assertEqual(obj, b'eggs') + + def test_send_not_found(self): + with self.assertRaises(channels.ChannelNotFoundError): + channels.send(10, b'spam') + + def test_recv_not_found(self): + with self.assertRaises(channels.ChannelNotFoundError): + channels.recv(10) + + def test_recv_empty(self): + cid = channels.create() + with self.assertRaises(channels.ChannelEmptyError): + channels.recv(cid) + + def test_recv_default(self): + default = object() + cid = channels.create() + obj1 = channels.recv(cid, default) + channels.send(cid, None) + channels.send(cid, 1) + channels.send(cid, b'spam') + channels.send(cid, b'eggs') + obj2 = channels.recv(cid, default) + obj3 = channels.recv(cid, default) + obj4 = channels.recv(cid) + obj5 = channels.recv(cid, default) + obj6 = channels.recv(cid, default) + + self.assertIs(obj1, default) + self.assertIs(obj2, None) + self.assertEqual(obj3, 1) + self.assertEqual(obj4, b'spam') + self.assertEqual(obj5, b'eggs') + self.assertIs(obj6, default) + + def test_recv_sending_interp_destroyed(self): + cid = channels.create() + interp = interpreters.create() + interpreters.run_string(interp, dedent(f""" + import _xxinterpchannels as _channels + _channels.send({cid}, b'spam') + """)) + interpreters.destroy(interp) + + with self.assertRaisesRegex(RuntimeError, + 'unrecognized interpreter ID'): + channels.recv(cid) + + def test_allowed_types(self): + cid = channels.create() + objects = [ + None, + 'spam', + b'spam', + 42, + ] + for obj in objects: + with self.subTest(obj): + channels.send(cid, obj) + got = channels.recv(cid) + + self.assertEqual(got, obj) + self.assertIs(type(got), type(obj)) + # XXX Check the following? + #self.assertIsNot(got, obj) + # XXX What about between interpreters? + + def test_run_string_arg_unresolved(self): + cid = channels.create() + interp = interpreters.create() + + out = _run_output(interp, dedent(""" + import _xxinterpchannels as _channels + print(cid.end) + _channels.send(cid, b'spam') + """), + dict(cid=cid.send)) + obj = channels.recv(cid) + + self.assertEqual(obj, b'spam') + self.assertEqual(out.strip(), 'send') + + # XXX For now there is no high-level channel into which the + # sent channel ID can be converted... + # Note: this test caused crashes on some buildbots (bpo-33615). + @unittest.skip('disabled until high-level channels exist') + def test_run_string_arg_resolved(self): + cid = channels.create() + cid = channels._channel_id(cid, _resolve=True) + interp = interpreters.create() + + out = _run_output(interp, dedent(""" + import _xxinterpchannels as _channels + print(chan.id.end) + _channels.send(chan.id, b'spam') + """), + dict(chan=cid.send)) + obj = channels.recv(cid) + + self.assertEqual(obj, b'spam') + self.assertEqual(out.strip(), 'send') + + # close + + def test_close_single_user(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.recv(cid) + channels.close(cid) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_close_multiple_users(self): + cid = channels.create() + id1 = interpreters.create() + id2 = interpreters.create() + interpreters.run_string(id1, dedent(f""" + import _xxinterpchannels as _channels + _channels.send({cid}, b'spam') + """)) + interpreters.run_string(id2, dedent(f""" + import _xxinterpchannels as _channels + _channels.recv({cid}) + """)) + channels.close(cid) + with self.assertRaises(interpreters.RunFailedError) as cm: + interpreters.run_string(id1, dedent(f""" + _channels.send({cid}, b'spam') + """)) + self.assertIn('ChannelClosedError', str(cm.exception)) + with self.assertRaises(interpreters.RunFailedError) as cm: + interpreters.run_string(id2, dedent(f""" + _channels.send({cid}, b'spam') + """)) + self.assertIn('ChannelClosedError', str(cm.exception)) + + def test_close_multiple_times(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.recv(cid) + channels.close(cid) + + with self.assertRaises(channels.ChannelClosedError): + channels.close(cid) + + def test_close_empty(self): + tests = [ + (False, False), + (True, False), + (False, True), + (True, True), + ] + for send, recv in tests: + with self.subTest((send, recv)): + cid = channels.create() + channels.send(cid, b'spam') + channels.recv(cid) + channels.close(cid, send=send, recv=recv) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_close_defaults_with_unused_items(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'ham') + + with self.assertRaises(channels.ChannelNotEmptyError): + channels.close(cid) + channels.recv(cid) + channels.send(cid, b'eggs') + + def test_close_recv_with_unused_items_unforced(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'ham') + + with self.assertRaises(channels.ChannelNotEmptyError): + channels.close(cid, recv=True) + channels.recv(cid) + channels.send(cid, b'eggs') + channels.recv(cid) + channels.recv(cid) + channels.close(cid, recv=True) + + def test_close_send_with_unused_items_unforced(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'ham') + channels.close(cid, send=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + channels.recv(cid) + channels.recv(cid) + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_close_both_with_unused_items_unforced(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'ham') + + with self.assertRaises(channels.ChannelNotEmptyError): + channels.close(cid, recv=True, send=True) + channels.recv(cid) + channels.send(cid, b'eggs') + channels.recv(cid) + channels.recv(cid) + channels.close(cid, recv=True) + + def test_close_recv_with_unused_items_forced(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'ham') + channels.close(cid, recv=True, force=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_close_send_with_unused_items_forced(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'ham') + channels.close(cid, send=True, force=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_close_both_with_unused_items_forced(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'ham') + channels.close(cid, send=True, recv=True, force=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_close_never_used(self): + cid = channels.create() + channels.close(cid) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'spam') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_close_by_unassociated_interp(self): + cid = channels.create() + channels.send(cid, b'spam') + interp = interpreters.create() + interpreters.run_string(interp, dedent(f""" + import _xxinterpchannels as _channels + _channels.close({cid}, force=True) + """)) + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + with self.assertRaises(channels.ChannelClosedError): + channels.close(cid) + + def test_close_used_multiple_times_by_single_user(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'spam') + channels.send(cid, b'spam') + channels.recv(cid) + channels.close(cid, force=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_channel_list_interpreters_invalid_channel(self): + cid = channels.create() + # Test for invalid channel ID. + with self.assertRaises(channels.ChannelNotFoundError): + channels.list_interpreters(1000, send=True) + + channels.close(cid) + # Test for a channel that has been closed. + with self.assertRaises(channels.ChannelClosedError): + channels.list_interpreters(cid, send=True) + + def test_channel_list_interpreters_invalid_args(self): + # Tests for invalid arguments passed to the API. + cid = channels.create() + with self.assertRaises(TypeError): + channels.list_interpreters(cid) + + +class ChannelReleaseTests(TestBase): + + # XXX Add more test coverage a la the tests for close(). + + """ + - main / interp / other + - run in: current thread / new thread / other thread / different threads + - end / opposite + - force / no force + - used / not used (associated / not associated) + - empty / emptied / never emptied / partly emptied + - closed / not closed + - released / not released + - creator (interp) / other + - associated interpreter not running + - associated interpreter destroyed + """ + + """ + use + pre-release + release + after + check + """ + + """ + release in: main, interp1 + creator: same, other (incl. interp2) + + use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all + pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all + pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all + + release: same + release forced: same + + use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all + release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all + check released: send/recv for same/other(incl. interp2) + check closed: send/recv for same/other(incl. interp2) + """ + + def test_single_user(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.recv(cid) + channels.release(cid, send=True, recv=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_multiple_users(self): + cid = channels.create() + id1 = interpreters.create() + id2 = interpreters.create() + interpreters.run_string(id1, dedent(f""" + import _xxinterpchannels as _channels + _channels.send({cid}, b'spam') + """)) + out = _run_output(id2, dedent(f""" + import _xxinterpchannels as _channels + obj = _channels.recv({cid}) + _channels.release({cid}) + print(repr(obj)) + """)) + interpreters.run_string(id1, dedent(f""" + _channels.release({cid}) + """)) + + self.assertEqual(out.strip(), "b'spam'") + + def test_no_kwargs(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.recv(cid) + channels.release(cid) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_multiple_times(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.recv(cid) + channels.release(cid, send=True, recv=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.release(cid, send=True, recv=True) + + def test_with_unused_items(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'ham') + channels.release(cid, send=True, recv=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_never_used(self): + cid = channels.create() + channels.release(cid) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'spam') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_by_unassociated_interp(self): + cid = channels.create() + channels.send(cid, b'spam') + interp = interpreters.create() + interpreters.run_string(interp, dedent(f""" + import _xxinterpchannels as _channels + _channels.release({cid}) + """)) + obj = channels.recv(cid) + channels.release(cid) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + self.assertEqual(obj, b'spam') + + def test_close_if_unassociated(self): + # XXX Something's not right with this test... + cid = channels.create() + interp = interpreters.create() + interpreters.run_string(interp, dedent(f""" + import _xxinterpchannels as _channels + obj = _channels.send({cid}, b'spam') + _channels.release({cid}) + """)) + + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + def test_partially(self): + # XXX Is partial close too weird/confusing? + cid = channels.create() + channels.send(cid, None) + channels.recv(cid) + channels.send(cid, b'spam') + channels.release(cid, send=True) + obj = channels.recv(cid) + + self.assertEqual(obj, b'spam') + + def test_used_multiple_times_by_single_user(self): + cid = channels.create() + channels.send(cid, b'spam') + channels.send(cid, b'spam') + channels.send(cid, b'spam') + channels.recv(cid) + channels.release(cid, send=True, recv=True) + + with self.assertRaises(channels.ChannelClosedError): + channels.send(cid, b'eggs') + with self.assertRaises(channels.ChannelClosedError): + channels.recv(cid) + + +class ChannelCloseFixture(namedtuple('ChannelCloseFixture', + 'end interp other extra creator')): + + # Set this to True to avoid creating interpreters, e.g. when + # scanning through test permutations without running them. + QUICK = False + + def __new__(cls, end, interp, other, extra, creator): + assert end in ('send', 'recv') + if cls.QUICK: + known = {} + else: + interp = Interpreter.from_raw(interp) + other = Interpreter.from_raw(other) + extra = Interpreter.from_raw(extra) + known = { + interp.name: interp, + other.name: other, + extra.name: extra, + } + if not creator: + creator = 'same' + self = super().__new__(cls, end, interp, other, extra, creator) + self._prepped = set() + self._state = ChannelState() + self._known = known + return self + + @property + def state(self): + return self._state + + @property + def cid(self): + try: + return self._cid + except AttributeError: + creator = self._get_interpreter(self.creator) + self._cid = self._new_channel(creator) + return self._cid + + def get_interpreter(self, interp): + interp = self._get_interpreter(interp) + self._prep_interpreter(interp) + return interp + + def expect_closed_error(self, end=None): + if end is None: + end = self.end + if end == 'recv' and self.state.closed == 'send': + return False + return bool(self.state.closed) + + def prep_interpreter(self, interp): + self._prep_interpreter(interp) + + def record_action(self, action, result): + self._state = result + + def clean_up(self): + clean_up_interpreters() + clean_up_channels() + + # internal methods + + def _new_channel(self, creator): + if creator.name == 'main': + return channels.create() + else: + ch = channels.create() + run_interp(creator.id, f""" + import _xxsubinterpreters + cid = _xxsubchannels.create() + # We purposefully send back an int to avoid tying the + # channel to the other interpreter. + _xxsubchannels.send({ch}, int(cid)) + del _xxsubinterpreters + """) + self._cid = channels.recv(ch) + return self._cid + + def _get_interpreter(self, interp): + if interp in ('same', 'interp'): + return self.interp + elif interp == 'other': + return self.other + elif interp == 'extra': + return self.extra + else: + name = interp + try: + interp = self._known[name] + except KeyError: + interp = self._known[name] = Interpreter(name) + return interp + + def _prep_interpreter(self, interp): + if interp.id in self._prepped: + return + self._prepped.add(interp.id) + if interp.name == 'main': + return + run_interp(interp.id, f""" + import _xxinterpchannels as channels + import test.test__xxinterpchannels as helpers + ChannelState = helpers.ChannelState + try: + cid + except NameError: + cid = channels._channel_id({self.cid}) + """) + + +@unittest.skip('these tests take several hours to run') +class ExhaustiveChannelTests(TestBase): + + """ + - main / interp / other + - run in: current thread / new thread / other thread / different threads + - end / opposite + - force / no force + - used / not used (associated / not associated) + - empty / emptied / never emptied / partly emptied + - closed / not closed + - released / not released + - creator (interp) / other + - associated interpreter not running + - associated interpreter destroyed + + - close after unbound + """ + + """ + use + pre-close + close + after + check + """ + + """ + close in: main, interp1 + creator: same, other, extra + + use: None,send,recv,send/recv in None,same,other,same+other,all + pre-close: None,send,recv in None,same,other,same+other,all + pre-close forced: None,send,recv in None,same,other,same+other,all + + close: same + close forced: same + + use after: None,send,recv,send/recv in None,same,other,extra,same+other,all + close after: None,send,recv,send/recv in None,same,other,extra,same+other,all + check closed: send/recv for same/other(incl. interp2) + """ + + def iter_action_sets(self): + # - used / not used (associated / not associated) + # - empty / emptied / never emptied / partly emptied + # - closed / not closed + # - released / not released + + # never used + yield [] + + # only pre-closed (and possible used after) + for closeactions in self._iter_close_action_sets('same', 'other'): + yield closeactions + for postactions in self._iter_post_close_action_sets(): + yield closeactions + postactions + for closeactions in self._iter_close_action_sets('other', 'extra'): + yield closeactions + for postactions in self._iter_post_close_action_sets(): + yield closeactions + postactions + + # used + for useactions in self._iter_use_action_sets('same', 'other'): + yield useactions + for closeactions in self._iter_close_action_sets('same', 'other'): + actions = useactions + closeactions + yield actions + for postactions in self._iter_post_close_action_sets(): + yield actions + postactions + for closeactions in self._iter_close_action_sets('other', 'extra'): + actions = useactions + closeactions + yield actions + for postactions in self._iter_post_close_action_sets(): + yield actions + postactions + for useactions in self._iter_use_action_sets('other', 'extra'): + yield useactions + for closeactions in self._iter_close_action_sets('same', 'other'): + actions = useactions + closeactions + yield actions + for postactions in self._iter_post_close_action_sets(): + yield actions + postactions + for closeactions in self._iter_close_action_sets('other', 'extra'): + actions = useactions + closeactions + yield actions + for postactions in self._iter_post_close_action_sets(): + yield actions + postactions + + def _iter_use_action_sets(self, interp1, interp2): + interps = (interp1, interp2) + + # only recv end used + yield [ + ChannelAction('use', 'recv', interp1), + ] + yield [ + ChannelAction('use', 'recv', interp2), + ] + yield [ + ChannelAction('use', 'recv', interp1), + ChannelAction('use', 'recv', interp2), + ] + + # never emptied + yield [ + ChannelAction('use', 'send', interp1), + ] + yield [ + ChannelAction('use', 'send', interp2), + ] + yield [ + ChannelAction('use', 'send', interp1), + ChannelAction('use', 'send', interp2), + ] + + # partially emptied + for interp1 in interps: + for interp2 in interps: + for interp3 in interps: + yield [ + ChannelAction('use', 'send', interp1), + ChannelAction('use', 'send', interp2), + ChannelAction('use', 'recv', interp3), + ] + + # fully emptied + for interp1 in interps: + for interp2 in interps: + for interp3 in interps: + for interp4 in interps: + yield [ + ChannelAction('use', 'send', interp1), + ChannelAction('use', 'send', interp2), + ChannelAction('use', 'recv', interp3), + ChannelAction('use', 'recv', interp4), + ] + + def _iter_close_action_sets(self, interp1, interp2): + ends = ('recv', 'send') + interps = (interp1, interp2) + for force in (True, False): + op = 'force-close' if force else 'close' + for interp in interps: + for end in ends: + yield [ + ChannelAction(op, end, interp), + ] + for recvop in ('close', 'force-close'): + for sendop in ('close', 'force-close'): + for recv in interps: + for send in interps: + yield [ + ChannelAction(recvop, 'recv', recv), + ChannelAction(sendop, 'send', send), + ] + + def _iter_post_close_action_sets(self): + for interp in ('same', 'extra', 'other'): + yield [ + ChannelAction('use', 'recv', interp), + ] + yield [ + ChannelAction('use', 'send', interp), + ] + + def run_actions(self, fix, actions): + for action in actions: + self.run_action(fix, action) + + def run_action(self, fix, action, *, hideclosed=True): + end = action.resolve_end(fix.end) + interp = action.resolve_interp(fix.interp, fix.other, fix.extra) + fix.prep_interpreter(interp) + if interp.name == 'main': + result = run_action( + fix.cid, + action.action, + end, + fix.state, + hideclosed=hideclosed, + ) + fix.record_action(action, result) + else: + _cid = channels.create() + run_interp(interp.id, f""" + result = helpers.run_action( + {fix.cid}, + {repr(action.action)}, + {repr(end)}, + {repr(fix.state)}, + hideclosed={hideclosed}, + ) + channels.send({_cid}, result.pending.to_bytes(1, 'little')) + channels.send({_cid}, b'X' if result.closed else b'') + """) + result = ChannelState( + pending=int.from_bytes(channels.recv(_cid), 'little'), + closed=bool(channels.recv(_cid)), + ) + fix.record_action(action, result) + + def iter_fixtures(self): + # XXX threads? + interpreters = [ + ('main', 'interp', 'extra'), + ('interp', 'main', 'extra'), + ('interp1', 'interp2', 'extra'), + ('interp1', 'interp2', 'main'), + ] + for interp, other, extra in interpreters: + for creator in ('same', 'other', 'creator'): + for end in ('send', 'recv'): + yield ChannelCloseFixture(end, interp, other, extra, creator) + + def _close(self, fix, *, force): + op = 'force-close' if force else 'close' + close = ChannelAction(op, fix.end, 'same') + if not fix.expect_closed_error(): + self.run_action(fix, close, hideclosed=False) + else: + with self.assertRaises(channels.ChannelClosedError): + self.run_action(fix, close, hideclosed=False) + + def _assert_closed_in_interp(self, fix, interp=None): + if interp is None or interp.name == 'main': + with self.assertRaises(channels.ChannelClosedError): + channels.recv(fix.cid) + with self.assertRaises(channels.ChannelClosedError): + channels.send(fix.cid, b'spam') + with self.assertRaises(channels.ChannelClosedError): + channels.close(fix.cid) + with self.assertRaises(channels.ChannelClosedError): + channels.close(fix.cid, force=True) + else: + run_interp(interp.id, f""" + with helpers.expect_channel_closed(): + channels.recv(cid) + """) + run_interp(interp.id, f""" + with helpers.expect_channel_closed(): + channels.send(cid, b'spam') + """) + run_interp(interp.id, f""" + with helpers.expect_channel_closed(): + channels.close(cid) + """) + run_interp(interp.id, f""" + with helpers.expect_channel_closed(): + channels.close(cid, force=True) + """) + + def _assert_closed(self, fix): + self.assertTrue(fix.state.closed) + + for _ in range(fix.state.pending): + channels.recv(fix.cid) + self._assert_closed_in_interp(fix) + + for interp in ('same', 'other'): + interp = fix.get_interpreter(interp) + if interp.name == 'main': + continue + self._assert_closed_in_interp(fix, interp) + + interp = fix.get_interpreter('fresh') + self._assert_closed_in_interp(fix, interp) + + def _iter_close_tests(self, verbose=False): + i = 0 + for actions in self.iter_action_sets(): + print() + for fix in self.iter_fixtures(): + i += 1 + if i > 1000: + return + if verbose: + if (i - 1) % 6 == 0: + print() + print(i, fix, '({} actions)'.format(len(actions))) + else: + if (i - 1) % 6 == 0: + print(' ', end='') + print('.', end=''); sys.stdout.flush() + yield i, fix, actions + if verbose: + print('---') + print() + + # This is useful for scanning through the possible tests. + def _skim_close_tests(self): + ChannelCloseFixture.QUICK = True + for i, fix, actions in self._iter_close_tests(): + pass + + def test_close(self): + for i, fix, actions in self._iter_close_tests(): + with self.subTest('{} {} {}'.format(i, fix, actions)): + fix.prep_interpreter(fix.interp) + self.run_actions(fix, actions) + + self._close(fix, force=False) + + self._assert_closed(fix) + # XXX Things slow down if we have too many interpreters. + fix.clean_up() + + def test_force_close(self): + for i, fix, actions in self._iter_close_tests(): + with self.subTest('{} {} {}'.format(i, fix, actions)): + fix.prep_interpreter(fix.interp) + self.run_actions(fix, actions) + + self._close(fix, force=True) + + self._assert_closed(fix) + # XXX Things slow down if we have too many interpreters. + fix.clean_up() + + +if __name__ == '__main__': + unittest.main() diff --git a/Lib/test/test__xxsubinterpreters.py b/Lib/test/test__xxsubinterpreters.py index 18900bb9f7162c..687fcf3b770522 100644 --- a/Lib/test/test__xxsubinterpreters.py +++ b/Lib/test/test__xxsubinterpreters.py @@ -9,6 +9,7 @@ import time import unittest +import _testcapi from test import support from test.support import import_helper from test.support import script_helper @@ -73,207 +74,6 @@ def run(): t.join() -#@contextmanager -#def run_threaded(id, source, **shared): -# def run(): -# run_interp(id, source, **shared) -# t = threading.Thread(target=run) -# t.start() -# yield -# t.join() - - -def run_interp(id, source, **shared): - _run_interp(id, source, shared) - - -def _run_interp(id, source, shared, _mainns={}): - source = dedent(source) - main = interpreters.get_main() - if main == id: - if interpreters.get_current() != main: - raise RuntimeError - # XXX Run a func? - exec(source, _mainns) - else: - interpreters.run_string(id, source, shared) - - -class Interpreter(namedtuple('Interpreter', 'name id')): - - @classmethod - def from_raw(cls, raw): - if isinstance(raw, cls): - return raw - elif isinstance(raw, str): - return cls(raw) - else: - raise NotImplementedError - - def __new__(cls, name=None, id=None): - main = interpreters.get_main() - if id == main: - if not name: - name = 'main' - elif name != 'main': - raise ValueError( - 'name mismatch (expected "main", got "{}")'.format(name)) - id = main - elif id is not None: - if not name: - name = 'interp' - elif name == 'main': - raise ValueError('name mismatch (unexpected "main")') - if not isinstance(id, interpreters.InterpreterID): - id = interpreters.InterpreterID(id) - elif not name or name == 'main': - name = 'main' - id = main - else: - id = interpreters.create() - self = super().__new__(cls, name, id) - return self - - -# XXX expect_channel_closed() is unnecessary once we improve exc propagation. - -@contextlib.contextmanager -def expect_channel_closed(): - try: - yield - except interpreters.ChannelClosedError: - pass - else: - assert False, 'channel not closed' - - -class ChannelAction(namedtuple('ChannelAction', 'action end interp')): - - def __new__(cls, action, end=None, interp=None): - if not end: - end = 'both' - if not interp: - interp = 'main' - self = super().__new__(cls, action, end, interp) - return self - - def __init__(self, *args, **kwargs): - if self.action == 'use': - if self.end not in ('same', 'opposite', 'send', 'recv'): - raise ValueError(self.end) - elif self.action in ('close', 'force-close'): - if self.end not in ('both', 'same', 'opposite', 'send', 'recv'): - raise ValueError(self.end) - else: - raise ValueError(self.action) - if self.interp not in ('main', 'same', 'other', 'extra'): - raise ValueError(self.interp) - - def resolve_end(self, end): - if self.end == 'same': - return end - elif self.end == 'opposite': - return 'recv' if end == 'send' else 'send' - else: - return self.end - - def resolve_interp(self, interp, other, extra): - if self.interp == 'same': - return interp - elif self.interp == 'other': - if other is None: - raise RuntimeError - return other - elif self.interp == 'extra': - if extra is None: - raise RuntimeError - return extra - elif self.interp == 'main': - if interp.name == 'main': - return interp - elif other and other.name == 'main': - return other - else: - raise RuntimeError - # Per __init__(), there aren't any others. - - -class ChannelState(namedtuple('ChannelState', 'pending closed')): - - def __new__(cls, pending=0, *, closed=False): - self = super().__new__(cls, pending, closed) - return self - - def incr(self): - return type(self)(self.pending + 1, closed=self.closed) - - def decr(self): - return type(self)(self.pending - 1, closed=self.closed) - - def close(self, *, force=True): - if self.closed: - if not force or self.pending == 0: - return self - return type(self)(0 if force else self.pending, closed=True) - - -def run_action(cid, action, end, state, *, hideclosed=True): - if state.closed: - if action == 'use' and end == 'recv' and state.pending: - expectfail = False - else: - expectfail = True - else: - expectfail = False - - try: - result = _run_action(cid, action, end, state) - except interpreters.ChannelClosedError: - if not hideclosed and not expectfail: - raise - result = state.close() - else: - if expectfail: - raise ... # XXX - return result - - -def _run_action(cid, action, end, state): - if action == 'use': - if end == 'send': - interpreters.channel_send(cid, b'spam') - return state.incr() - elif end == 'recv': - if not state.pending: - try: - interpreters.channel_recv(cid) - except interpreters.ChannelEmptyError: - return state - else: - raise Exception('expected ChannelEmptyError') - else: - interpreters.channel_recv(cid) - return state.decr() - else: - raise ValueError(end) - elif action == 'close': - kwargs = {} - if end in ('recv', 'send'): - kwargs[end] = True - interpreters.channel_close(cid, **kwargs) - return state.close() - elif action == 'force-close': - kwargs = { - 'force': True, - } - if end in ('recv', 'send'): - kwargs[end] = True - interpreters.channel_close(cid, **kwargs) - return state.close(force=True) - else: - raise ValueError(action) - - def clean_up_interpreters(): for id in interpreters.list_all(): if id == 0: # main @@ -284,18 +84,9 @@ def clean_up_interpreters(): pass # already destroyed -def clean_up_channels(): - for cid in interpreters.channel_list_all(): - try: - interpreters.channel_destroy(cid) - except interpreters.ChannelNotFoundError: - pass # already destroyed - - class TestBase(unittest.TestCase): def tearDown(self): - clean_up_channels() clean_up_interpreters() @@ -354,30 +145,20 @@ class SubBytes(bytes): class ShareableTypeTests(unittest.TestCase): - def setUp(self): - super().setUp() - self.cid = interpreters.channel_create() - - def tearDown(self): - interpreters.channel_destroy(self.cid) - super().tearDown() - def _assert_values(self, values): for obj in values: with self.subTest(obj): - interpreters.channel_send(self.cid, obj) - got = interpreters.channel_recv(self.cid) + xid = _testcapi.get_crossinterp_data(obj) + got = _testcapi.restore_crossinterp_data(xid) self.assertEqual(got, obj) self.assertIs(type(got), type(obj)) - # XXX Check the following in the channel tests? - #self.assertIsNot(got, obj) def test_singletons(self): for obj in [None]: with self.subTest(obj): - interpreters.channel_send(self.cid, obj) - got = interpreters.channel_recv(self.cid) + xid = _testcapi.get_crossinterp_data(obj) + got = _testcapi.restore_crossinterp_data(xid) # XXX What about between interpreters? self.assertIs(got, obj) @@ -408,7 +189,7 @@ def test_non_shareable_int(self): for i in ints: with self.subTest(i): with self.assertRaises(OverflowError): - interpreters.channel_send(self.cid, i) + _testcapi.get_crossinterp_data(i) class ModuleTests(TestBase): @@ -555,7 +336,7 @@ def test_bad_id(self): self.assertRaises(OverflowError, interpreters.InterpreterID, 2**64) def test_does_not_exist(self): - id = interpreters.channel_create() + id = interpreters.create() with self.assertRaises(RuntimeError): interpreters.InterpreterID(int(id) + 1) # unforced @@ -864,6 +645,22 @@ def f(): self.assertEqual(out, 'it worked!') + def test_shareable_types(self): + interp = interpreters.create() + objects = [ + None, + 'spam', + b'spam', + 42, + ] + for obj in objects: + with self.subTest(obj): + interpreters.run_string( + interp, + f'assert(obj == {obj!r})', + shared=dict(obj=obj), + ) + def test_os_exec(self): expected = 'spam spam spam spam spam' subinterp = interpreters.create() @@ -1130,1285 +927,5 @@ def f(): self.assertEqual(retcode, 0) -################################## -# channel tests - -class ChannelIDTests(TestBase): - - def test_default_kwargs(self): - cid = interpreters._channel_id(10, force=True) - - self.assertEqual(int(cid), 10) - self.assertEqual(cid.end, 'both') - - def test_with_kwargs(self): - cid = interpreters._channel_id(10, send=True, force=True) - self.assertEqual(cid.end, 'send') - - cid = interpreters._channel_id(10, send=True, recv=False, force=True) - self.assertEqual(cid.end, 'send') - - cid = interpreters._channel_id(10, recv=True, force=True) - self.assertEqual(cid.end, 'recv') - - cid = interpreters._channel_id(10, recv=True, send=False, force=True) - self.assertEqual(cid.end, 'recv') - - cid = interpreters._channel_id(10, send=True, recv=True, force=True) - self.assertEqual(cid.end, 'both') - - def test_coerce_id(self): - class Int(str): - def __index__(self): - return 10 - - cid = interpreters._channel_id(Int(), force=True) - self.assertEqual(int(cid), 10) - - def test_bad_id(self): - self.assertRaises(TypeError, interpreters._channel_id, object()) - self.assertRaises(TypeError, interpreters._channel_id, 10.0) - self.assertRaises(TypeError, interpreters._channel_id, '10') - self.assertRaises(TypeError, interpreters._channel_id, b'10') - self.assertRaises(ValueError, interpreters._channel_id, -1) - self.assertRaises(OverflowError, interpreters._channel_id, 2**64) - - def test_bad_kwargs(self): - with self.assertRaises(ValueError): - interpreters._channel_id(10, send=False, recv=False) - - def test_does_not_exist(self): - cid = interpreters.channel_create() - with self.assertRaises(interpreters.ChannelNotFoundError): - interpreters._channel_id(int(cid) + 1) # unforced - - def test_str(self): - cid = interpreters._channel_id(10, force=True) - self.assertEqual(str(cid), '10') - - def test_repr(self): - cid = interpreters._channel_id(10, force=True) - self.assertEqual(repr(cid), 'ChannelID(10)') - - cid = interpreters._channel_id(10, send=True, force=True) - self.assertEqual(repr(cid), 'ChannelID(10, send=True)') - - cid = interpreters._channel_id(10, recv=True, force=True) - self.assertEqual(repr(cid), 'ChannelID(10, recv=True)') - - cid = interpreters._channel_id(10, send=True, recv=True, force=True) - self.assertEqual(repr(cid), 'ChannelID(10)') - - def test_equality(self): - cid1 = interpreters.channel_create() - cid2 = interpreters._channel_id(int(cid1)) - cid3 = interpreters.channel_create() - - self.assertTrue(cid1 == cid1) - self.assertTrue(cid1 == cid2) - self.assertTrue(cid1 == int(cid1)) - self.assertTrue(int(cid1) == cid1) - self.assertTrue(cid1 == float(int(cid1))) - self.assertTrue(float(int(cid1)) == cid1) - self.assertFalse(cid1 == float(int(cid1)) + 0.1) - self.assertFalse(cid1 == str(int(cid1))) - self.assertFalse(cid1 == 2**1000) - self.assertFalse(cid1 == float('inf')) - self.assertFalse(cid1 == 'spam') - self.assertFalse(cid1 == cid3) - - self.assertFalse(cid1 != cid1) - self.assertFalse(cid1 != cid2) - self.assertTrue(cid1 != cid3) - - def test_shareable(self): - chan = interpreters.channel_create() - - obj = interpreters.channel_create() - interpreters.channel_send(chan, obj) - got = interpreters.channel_recv(chan) - - self.assertEqual(got, obj) - self.assertIs(type(got), type(obj)) - # XXX Check the following in the channel tests? - #self.assertIsNot(got, obj) - - -class ChannelTests(TestBase): - - def test_create_cid(self): - cid = interpreters.channel_create() - self.assertIsInstance(cid, interpreters.ChannelID) - - def test_sequential_ids(self): - before = interpreters.channel_list_all() - id1 = interpreters.channel_create() - id2 = interpreters.channel_create() - id3 = interpreters.channel_create() - after = interpreters.channel_list_all() - - self.assertEqual(id2, int(id1) + 1) - self.assertEqual(id3, int(id2) + 1) - self.assertEqual(set(after) - set(before), {id1, id2, id3}) - - def test_ids_global(self): - id1 = interpreters.create() - out = _run_output(id1, dedent(""" - import _xxsubinterpreters as _interpreters - cid = _interpreters.channel_create() - print(cid) - """)) - cid1 = int(out.strip()) - - id2 = interpreters.create() - out = _run_output(id2, dedent(""" - import _xxsubinterpreters as _interpreters - cid = _interpreters.channel_create() - print(cid) - """)) - cid2 = int(out.strip()) - - self.assertEqual(cid2, int(cid1) + 1) - - def test_channel_list_interpreters_none(self): - """Test listing interpreters for a channel with no associations.""" - # Test for channel with no associated interpreters. - cid = interpreters.channel_create() - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(send_interps, []) - self.assertEqual(recv_interps, []) - - def test_channel_list_interpreters_basic(self): - """Test basic listing channel interpreters.""" - interp0 = interpreters.get_main() - cid = interpreters.channel_create() - interpreters.channel_send(cid, "send") - # Test for a channel that has one end associated to an interpreter. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(send_interps, [interp0]) - self.assertEqual(recv_interps, []) - - interp1 = interpreters.create() - _run_output(interp1, dedent(f""" - import _xxsubinterpreters as _interpreters - obj = _interpreters.channel_recv({cid}) - """)) - # Test for channel that has both ends associated to an interpreter. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(send_interps, [interp0]) - self.assertEqual(recv_interps, [interp1]) - - def test_channel_list_interpreters_multiple(self): - """Test listing interpreters for a channel with many associations.""" - interp0 = interpreters.get_main() - interp1 = interpreters.create() - interp2 = interpreters.create() - interp3 = interpreters.create() - cid = interpreters.channel_create() - - interpreters.channel_send(cid, "send") - _run_output(interp1, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_send({cid}, "send") - """)) - _run_output(interp2, dedent(f""" - import _xxsubinterpreters as _interpreters - obj = _interpreters.channel_recv({cid}) - """)) - _run_output(interp3, dedent(f""" - import _xxsubinterpreters as _interpreters - obj = _interpreters.channel_recv({cid}) - """)) - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(set(send_interps), {interp0, interp1}) - self.assertEqual(set(recv_interps), {interp2, interp3}) - - def test_channel_list_interpreters_destroyed(self): - """Test listing channel interpreters with a destroyed interpreter.""" - interp0 = interpreters.get_main() - interp1 = interpreters.create() - cid = interpreters.channel_create() - interpreters.channel_send(cid, "send") - _run_output(interp1, dedent(f""" - import _xxsubinterpreters as _interpreters - obj = _interpreters.channel_recv({cid}) - """)) - # Should be one interpreter associated with each end. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(send_interps, [interp0]) - self.assertEqual(recv_interps, [interp1]) - - interpreters.destroy(interp1) - # Destroyed interpreter should not be listed. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(send_interps, [interp0]) - self.assertEqual(recv_interps, []) - - def test_channel_list_interpreters_released(self): - """Test listing channel interpreters with a released channel.""" - # Set up one channel with main interpreter on the send end and two - # subinterpreters on the receive end. - interp0 = interpreters.get_main() - interp1 = interpreters.create() - interp2 = interpreters.create() - cid = interpreters.channel_create() - interpreters.channel_send(cid, "data") - _run_output(interp1, dedent(f""" - import _xxsubinterpreters as _interpreters - obj = _interpreters.channel_recv({cid}) - """)) - interpreters.channel_send(cid, "data") - _run_output(interp2, dedent(f""" - import _xxsubinterpreters as _interpreters - obj = _interpreters.channel_recv({cid}) - """)) - # Check the setup. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 1) - self.assertEqual(len(recv_interps), 2) - - # Release the main interpreter from the send end. - interpreters.channel_release(cid, send=True) - # Send end should have no associated interpreters. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 0) - self.assertEqual(len(recv_interps), 2) - - # Release one of the subinterpreters from the receive end. - _run_output(interp2, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_release({cid}) - """)) - # Receive end should have the released interpreter removed. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 0) - self.assertEqual(recv_interps, [interp1]) - - def test_channel_list_interpreters_closed(self): - """Test listing channel interpreters with a closed channel.""" - interp0 = interpreters.get_main() - interp1 = interpreters.create() - cid = interpreters.channel_create() - # Put something in the channel so that it's not empty. - interpreters.channel_send(cid, "send") - - # Check initial state. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 1) - self.assertEqual(len(recv_interps), 0) - - # Force close the channel. - interpreters.channel_close(cid, force=True) - # Both ends should raise an error. - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_list_interpreters(cid, send=True) - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_list_interpreters(cid, send=False) - - def test_channel_list_interpreters_closed_send_end(self): - """Test listing channel interpreters with a channel's send end closed.""" - interp0 = interpreters.get_main() - interp1 = interpreters.create() - cid = interpreters.channel_create() - # Put something in the channel so that it's not empty. - interpreters.channel_send(cid, "send") - - # Check initial state. - send_interps = interpreters.channel_list_interpreters(cid, send=True) - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(len(send_interps), 1) - self.assertEqual(len(recv_interps), 0) - - # Close the send end of the channel. - interpreters.channel_close(cid, send=True) - # Send end should raise an error. - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_list_interpreters(cid, send=True) - # Receive end should not be closed (since channel is not empty). - recv_interps = interpreters.channel_list_interpreters(cid, send=False) - self.assertEqual(len(recv_interps), 0) - - # Close the receive end of the channel from a subinterpreter. - _run_output(interp1, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_close({cid}, force=True) - """)) - # Both ends should raise an error. - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_list_interpreters(cid, send=True) - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_list_interpreters(cid, send=False) - - #################### - - def test_send_recv_main(self): - cid = interpreters.channel_create() - orig = b'spam' - interpreters.channel_send(cid, orig) - obj = interpreters.channel_recv(cid) - - self.assertEqual(obj, orig) - self.assertIsNot(obj, orig) - - def test_send_recv_same_interpreter(self): - id1 = interpreters.create() - out = _run_output(id1, dedent(""" - import _xxsubinterpreters as _interpreters - cid = _interpreters.channel_create() - orig = b'spam' - _interpreters.channel_send(cid, orig) - obj = _interpreters.channel_recv(cid) - assert obj is not orig - assert obj == orig - """)) - - def test_send_recv_different_interpreters(self): - cid = interpreters.channel_create() - id1 = interpreters.create() - out = _run_output(id1, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_send({cid}, b'spam') - """)) - obj = interpreters.channel_recv(cid) - - self.assertEqual(obj, b'spam') - - def test_send_recv_different_threads(self): - cid = interpreters.channel_create() - - def f(): - while True: - try: - obj = interpreters.channel_recv(cid) - break - except interpreters.ChannelEmptyError: - time.sleep(0.1) - interpreters.channel_send(cid, obj) - t = threading.Thread(target=f) - t.start() - - interpreters.channel_send(cid, b'spam') - t.join() - obj = interpreters.channel_recv(cid) - - self.assertEqual(obj, b'spam') - - def test_send_recv_different_interpreters_and_threads(self): - cid = interpreters.channel_create() - id1 = interpreters.create() - out = None - - def f(): - nonlocal out - out = _run_output(id1, dedent(f""" - import time - import _xxsubinterpreters as _interpreters - while True: - try: - obj = _interpreters.channel_recv({cid}) - break - except _interpreters.ChannelEmptyError: - time.sleep(0.1) - assert(obj == b'spam') - _interpreters.channel_send({cid}, b'eggs') - """)) - t = threading.Thread(target=f) - t.start() - - interpreters.channel_send(cid, b'spam') - t.join() - obj = interpreters.channel_recv(cid) - - self.assertEqual(obj, b'eggs') - - def test_send_not_found(self): - with self.assertRaises(interpreters.ChannelNotFoundError): - interpreters.channel_send(10, b'spam') - - def test_recv_not_found(self): - with self.assertRaises(interpreters.ChannelNotFoundError): - interpreters.channel_recv(10) - - def test_recv_empty(self): - cid = interpreters.channel_create() - with self.assertRaises(interpreters.ChannelEmptyError): - interpreters.channel_recv(cid) - - def test_recv_default(self): - default = object() - cid = interpreters.channel_create() - obj1 = interpreters.channel_recv(cid, default) - interpreters.channel_send(cid, None) - interpreters.channel_send(cid, 1) - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'eggs') - obj2 = interpreters.channel_recv(cid, default) - obj3 = interpreters.channel_recv(cid, default) - obj4 = interpreters.channel_recv(cid) - obj5 = interpreters.channel_recv(cid, default) - obj6 = interpreters.channel_recv(cid, default) - - self.assertIs(obj1, default) - self.assertIs(obj2, None) - self.assertEqual(obj3, 1) - self.assertEqual(obj4, b'spam') - self.assertEqual(obj5, b'eggs') - self.assertIs(obj6, default) - - def test_recv_sending_interp_destroyed(self): - cid = interpreters.channel_create() - interp = interpreters.create() - interpreters.run_string(interp, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_send({cid}, b'spam') - """)) - interpreters.destroy(interp) - - with self.assertRaisesRegex(RuntimeError, - 'unrecognized interpreter ID'): - interpreters.channel_recv(cid) - - def test_run_string_arg_unresolved(self): - cid = interpreters.channel_create() - interp = interpreters.create() - - out = _run_output(interp, dedent(""" - import _xxsubinterpreters as _interpreters - print(cid.end) - _interpreters.channel_send(cid, b'spam') - """), - dict(cid=cid.send)) - obj = interpreters.channel_recv(cid) - - self.assertEqual(obj, b'spam') - self.assertEqual(out.strip(), 'send') - - # XXX For now there is no high-level channel into which the - # sent channel ID can be converted... - # Note: this test caused crashes on some buildbots (bpo-33615). - @unittest.skip('disabled until high-level channels exist') - def test_run_string_arg_resolved(self): - cid = interpreters.channel_create() - cid = interpreters._channel_id(cid, _resolve=True) - interp = interpreters.create() - - out = _run_output(interp, dedent(""" - import _xxsubinterpreters as _interpreters - print(chan.id.end) - _interpreters.channel_send(chan.id, b'spam') - """), - dict(chan=cid.send)) - obj = interpreters.channel_recv(cid) - - self.assertEqual(obj, b'spam') - self.assertEqual(out.strip(), 'send') - - # close - - def test_close_single_user(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_recv(cid) - interpreters.channel_close(cid) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_close_multiple_users(self): - cid = interpreters.channel_create() - id1 = interpreters.create() - id2 = interpreters.create() - interpreters.run_string(id1, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_send({cid}, b'spam') - """)) - interpreters.run_string(id2, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_recv({cid}) - """)) - interpreters.channel_close(cid) - with self.assertRaises(interpreters.RunFailedError) as cm: - interpreters.run_string(id1, dedent(f""" - _interpreters.channel_send({cid}, b'spam') - """)) - self.assertIn('ChannelClosedError', str(cm.exception)) - with self.assertRaises(interpreters.RunFailedError) as cm: - interpreters.run_string(id2, dedent(f""" - _interpreters.channel_send({cid}, b'spam') - """)) - self.assertIn('ChannelClosedError', str(cm.exception)) - - def test_close_multiple_times(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_recv(cid) - interpreters.channel_close(cid) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_close(cid) - - def test_close_empty(self): - tests = [ - (False, False), - (True, False), - (False, True), - (True, True), - ] - for send, recv in tests: - with self.subTest((send, recv)): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_recv(cid) - interpreters.channel_close(cid, send=send, recv=recv) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_close_defaults_with_unused_items(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'ham') - - with self.assertRaises(interpreters.ChannelNotEmptyError): - interpreters.channel_close(cid) - interpreters.channel_recv(cid) - interpreters.channel_send(cid, b'eggs') - - def test_close_recv_with_unused_items_unforced(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'ham') - - with self.assertRaises(interpreters.ChannelNotEmptyError): - interpreters.channel_close(cid, recv=True) - interpreters.channel_recv(cid) - interpreters.channel_send(cid, b'eggs') - interpreters.channel_recv(cid) - interpreters.channel_recv(cid) - interpreters.channel_close(cid, recv=True) - - def test_close_send_with_unused_items_unforced(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'ham') - interpreters.channel_close(cid, send=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - interpreters.channel_recv(cid) - interpreters.channel_recv(cid) - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_close_both_with_unused_items_unforced(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'ham') - - with self.assertRaises(interpreters.ChannelNotEmptyError): - interpreters.channel_close(cid, recv=True, send=True) - interpreters.channel_recv(cid) - interpreters.channel_send(cid, b'eggs') - interpreters.channel_recv(cid) - interpreters.channel_recv(cid) - interpreters.channel_close(cid, recv=True) - - def test_close_recv_with_unused_items_forced(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'ham') - interpreters.channel_close(cid, recv=True, force=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_close_send_with_unused_items_forced(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'ham') - interpreters.channel_close(cid, send=True, force=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_close_both_with_unused_items_forced(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'ham') - interpreters.channel_close(cid, send=True, recv=True, force=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_close_never_used(self): - cid = interpreters.channel_create() - interpreters.channel_close(cid) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'spam') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_close_by_unassociated_interp(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interp = interpreters.create() - interpreters.run_string(interp, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_close({cid}, force=True) - """)) - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_close(cid) - - def test_close_used_multiple_times_by_single_user(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'spam') - interpreters.channel_recv(cid) - interpreters.channel_close(cid, force=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_channel_list_interpreters_invalid_channel(self): - cid = interpreters.channel_create() - # Test for invalid channel ID. - with self.assertRaises(interpreters.ChannelNotFoundError): - interpreters.channel_list_interpreters(1000, send=True) - - interpreters.channel_close(cid) - # Test for a channel that has been closed. - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_list_interpreters(cid, send=True) - - def test_channel_list_interpreters_invalid_args(self): - # Tests for invalid arguments passed to the API. - cid = interpreters.channel_create() - with self.assertRaises(TypeError): - interpreters.channel_list_interpreters(cid) - - -class ChannelReleaseTests(TestBase): - - # XXX Add more test coverage a la the tests for close(). - - """ - - main / interp / other - - run in: current thread / new thread / other thread / different threads - - end / opposite - - force / no force - - used / not used (associated / not associated) - - empty / emptied / never emptied / partly emptied - - closed / not closed - - released / not released - - creator (interp) / other - - associated interpreter not running - - associated interpreter destroyed - """ - - """ - use - pre-release - release - after - check - """ - - """ - release in: main, interp1 - creator: same, other (incl. interp2) - - use: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all - pre-release: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all - pre-release forced: None,send,recv,both in None,same,other(incl. interp2),same+other(incl. interp2),all - - release: same - release forced: same - - use after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all - release after: None,send,recv,send/recv in None,same,other(incl. interp2),same+other(incl. interp2),all - check released: send/recv for same/other(incl. interp2) - check closed: send/recv for same/other(incl. interp2) - """ - - def test_single_user(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_recv(cid) - interpreters.channel_release(cid, send=True, recv=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_multiple_users(self): - cid = interpreters.channel_create() - id1 = interpreters.create() - id2 = interpreters.create() - interpreters.run_string(id1, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_send({cid}, b'spam') - """)) - out = _run_output(id2, dedent(f""" - import _xxsubinterpreters as _interpreters - obj = _interpreters.channel_recv({cid}) - _interpreters.channel_release({cid}) - print(repr(obj)) - """)) - interpreters.run_string(id1, dedent(f""" - _interpreters.channel_release({cid}) - """)) - - self.assertEqual(out.strip(), "b'spam'") - - def test_no_kwargs(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_recv(cid) - interpreters.channel_release(cid) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_multiple_times(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_recv(cid) - interpreters.channel_release(cid, send=True, recv=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_release(cid, send=True, recv=True) - - def test_with_unused_items(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'ham') - interpreters.channel_release(cid, send=True, recv=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_never_used(self): - cid = interpreters.channel_create() - interpreters.channel_release(cid) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'spam') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_by_unassociated_interp(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interp = interpreters.create() - interpreters.run_string(interp, dedent(f""" - import _xxsubinterpreters as _interpreters - _interpreters.channel_release({cid}) - """)) - obj = interpreters.channel_recv(cid) - interpreters.channel_release(cid) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - self.assertEqual(obj, b'spam') - - def test_close_if_unassociated(self): - # XXX Something's not right with this test... - cid = interpreters.channel_create() - interp = interpreters.create() - interpreters.run_string(interp, dedent(f""" - import _xxsubinterpreters as _interpreters - obj = _interpreters.channel_send({cid}, b'spam') - _interpreters.channel_release({cid}) - """)) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - def test_partially(self): - # XXX Is partial close too weird/confusing? - cid = interpreters.channel_create() - interpreters.channel_send(cid, None) - interpreters.channel_recv(cid) - interpreters.channel_send(cid, b'spam') - interpreters.channel_release(cid, send=True) - obj = interpreters.channel_recv(cid) - - self.assertEqual(obj, b'spam') - - def test_used_multiple_times_by_single_user(self): - cid = interpreters.channel_create() - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'spam') - interpreters.channel_send(cid, b'spam') - interpreters.channel_recv(cid) - interpreters.channel_release(cid, send=True, recv=True) - - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(cid, b'eggs') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(cid) - - -class ChannelCloseFixture(namedtuple('ChannelCloseFixture', - 'end interp other extra creator')): - - # Set this to True to avoid creating interpreters, e.g. when - # scanning through test permutations without running them. - QUICK = False - - def __new__(cls, end, interp, other, extra, creator): - assert end in ('send', 'recv') - if cls.QUICK: - known = {} - else: - interp = Interpreter.from_raw(interp) - other = Interpreter.from_raw(other) - extra = Interpreter.from_raw(extra) - known = { - interp.name: interp, - other.name: other, - extra.name: extra, - } - if not creator: - creator = 'same' - self = super().__new__(cls, end, interp, other, extra, creator) - self._prepped = set() - self._state = ChannelState() - self._known = known - return self - - @property - def state(self): - return self._state - - @property - def cid(self): - try: - return self._cid - except AttributeError: - creator = self._get_interpreter(self.creator) - self._cid = self._new_channel(creator) - return self._cid - - def get_interpreter(self, interp): - interp = self._get_interpreter(interp) - self._prep_interpreter(interp) - return interp - - def expect_closed_error(self, end=None): - if end is None: - end = self.end - if end == 'recv' and self.state.closed == 'send': - return False - return bool(self.state.closed) - - def prep_interpreter(self, interp): - self._prep_interpreter(interp) - - def record_action(self, action, result): - self._state = result - - def clean_up(self): - clean_up_interpreters() - clean_up_channels() - - # internal methods - - def _new_channel(self, creator): - if creator.name == 'main': - return interpreters.channel_create() - else: - ch = interpreters.channel_create() - run_interp(creator.id, f""" - import _xxsubinterpreters - cid = _xxsubinterpreters.channel_create() - # We purposefully send back an int to avoid tying the - # channel to the other interpreter. - _xxsubinterpreters.channel_send({ch}, int(cid)) - del _xxsubinterpreters - """) - self._cid = interpreters.channel_recv(ch) - return self._cid - - def _get_interpreter(self, interp): - if interp in ('same', 'interp'): - return self.interp - elif interp == 'other': - return self.other - elif interp == 'extra': - return self.extra - else: - name = interp - try: - interp = self._known[name] - except KeyError: - interp = self._known[name] = Interpreter(name) - return interp - - def _prep_interpreter(self, interp): - if interp.id in self._prepped: - return - self._prepped.add(interp.id) - if interp.name == 'main': - return - run_interp(interp.id, f""" - import _xxsubinterpreters as interpreters - import test.test__xxsubinterpreters as helpers - ChannelState = helpers.ChannelState - try: - cid - except NameError: - cid = interpreters._channel_id({self.cid}) - """) - - -@unittest.skip('these tests take several hours to run') -class ExhaustiveChannelTests(TestBase): - - """ - - main / interp / other - - run in: current thread / new thread / other thread / different threads - - end / opposite - - force / no force - - used / not used (associated / not associated) - - empty / emptied / never emptied / partly emptied - - closed / not closed - - released / not released - - creator (interp) / other - - associated interpreter not running - - associated interpreter destroyed - - - close after unbound - """ - - """ - use - pre-close - close - after - check - """ - - """ - close in: main, interp1 - creator: same, other, extra - - use: None,send,recv,send/recv in None,same,other,same+other,all - pre-close: None,send,recv in None,same,other,same+other,all - pre-close forced: None,send,recv in None,same,other,same+other,all - - close: same - close forced: same - - use after: None,send,recv,send/recv in None,same,other,extra,same+other,all - close after: None,send,recv,send/recv in None,same,other,extra,same+other,all - check closed: send/recv for same/other(incl. interp2) - """ - - def iter_action_sets(self): - # - used / not used (associated / not associated) - # - empty / emptied / never emptied / partly emptied - # - closed / not closed - # - released / not released - - # never used - yield [] - - # only pre-closed (and possible used after) - for closeactions in self._iter_close_action_sets('same', 'other'): - yield closeactions - for postactions in self._iter_post_close_action_sets(): - yield closeactions + postactions - for closeactions in self._iter_close_action_sets('other', 'extra'): - yield closeactions - for postactions in self._iter_post_close_action_sets(): - yield closeactions + postactions - - # used - for useactions in self._iter_use_action_sets('same', 'other'): - yield useactions - for closeactions in self._iter_close_action_sets('same', 'other'): - actions = useactions + closeactions - yield actions - for postactions in self._iter_post_close_action_sets(): - yield actions + postactions - for closeactions in self._iter_close_action_sets('other', 'extra'): - actions = useactions + closeactions - yield actions - for postactions in self._iter_post_close_action_sets(): - yield actions + postactions - for useactions in self._iter_use_action_sets('other', 'extra'): - yield useactions - for closeactions in self._iter_close_action_sets('same', 'other'): - actions = useactions + closeactions - yield actions - for postactions in self._iter_post_close_action_sets(): - yield actions + postactions - for closeactions in self._iter_close_action_sets('other', 'extra'): - actions = useactions + closeactions - yield actions - for postactions in self._iter_post_close_action_sets(): - yield actions + postactions - - def _iter_use_action_sets(self, interp1, interp2): - interps = (interp1, interp2) - - # only recv end used - yield [ - ChannelAction('use', 'recv', interp1), - ] - yield [ - ChannelAction('use', 'recv', interp2), - ] - yield [ - ChannelAction('use', 'recv', interp1), - ChannelAction('use', 'recv', interp2), - ] - - # never emptied - yield [ - ChannelAction('use', 'send', interp1), - ] - yield [ - ChannelAction('use', 'send', interp2), - ] - yield [ - ChannelAction('use', 'send', interp1), - ChannelAction('use', 'send', interp2), - ] - - # partially emptied - for interp1 in interps: - for interp2 in interps: - for interp3 in interps: - yield [ - ChannelAction('use', 'send', interp1), - ChannelAction('use', 'send', interp2), - ChannelAction('use', 'recv', interp3), - ] - - # fully emptied - for interp1 in interps: - for interp2 in interps: - for interp3 in interps: - for interp4 in interps: - yield [ - ChannelAction('use', 'send', interp1), - ChannelAction('use', 'send', interp2), - ChannelAction('use', 'recv', interp3), - ChannelAction('use', 'recv', interp4), - ] - - def _iter_close_action_sets(self, interp1, interp2): - ends = ('recv', 'send') - interps = (interp1, interp2) - for force in (True, False): - op = 'force-close' if force else 'close' - for interp in interps: - for end in ends: - yield [ - ChannelAction(op, end, interp), - ] - for recvop in ('close', 'force-close'): - for sendop in ('close', 'force-close'): - for recv in interps: - for send in interps: - yield [ - ChannelAction(recvop, 'recv', recv), - ChannelAction(sendop, 'send', send), - ] - - def _iter_post_close_action_sets(self): - for interp in ('same', 'extra', 'other'): - yield [ - ChannelAction('use', 'recv', interp), - ] - yield [ - ChannelAction('use', 'send', interp), - ] - - def run_actions(self, fix, actions): - for action in actions: - self.run_action(fix, action) - - def run_action(self, fix, action, *, hideclosed=True): - end = action.resolve_end(fix.end) - interp = action.resolve_interp(fix.interp, fix.other, fix.extra) - fix.prep_interpreter(interp) - if interp.name == 'main': - result = run_action( - fix.cid, - action.action, - end, - fix.state, - hideclosed=hideclosed, - ) - fix.record_action(action, result) - else: - _cid = interpreters.channel_create() - run_interp(interp.id, f""" - result = helpers.run_action( - {fix.cid}, - {repr(action.action)}, - {repr(end)}, - {repr(fix.state)}, - hideclosed={hideclosed}, - ) - interpreters.channel_send({_cid}, result.pending.to_bytes(1, 'little')) - interpreters.channel_send({_cid}, b'X' if result.closed else b'') - """) - result = ChannelState( - pending=int.from_bytes(interpreters.channel_recv(_cid), 'little'), - closed=bool(interpreters.channel_recv(_cid)), - ) - fix.record_action(action, result) - - def iter_fixtures(self): - # XXX threads? - interpreters = [ - ('main', 'interp', 'extra'), - ('interp', 'main', 'extra'), - ('interp1', 'interp2', 'extra'), - ('interp1', 'interp2', 'main'), - ] - for interp, other, extra in interpreters: - for creator in ('same', 'other', 'creator'): - for end in ('send', 'recv'): - yield ChannelCloseFixture(end, interp, other, extra, creator) - - def _close(self, fix, *, force): - op = 'force-close' if force else 'close' - close = ChannelAction(op, fix.end, 'same') - if not fix.expect_closed_error(): - self.run_action(fix, close, hideclosed=False) - else: - with self.assertRaises(interpreters.ChannelClosedError): - self.run_action(fix, close, hideclosed=False) - - def _assert_closed_in_interp(self, fix, interp=None): - if interp is None or interp.name == 'main': - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_recv(fix.cid) - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_send(fix.cid, b'spam') - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_close(fix.cid) - with self.assertRaises(interpreters.ChannelClosedError): - interpreters.channel_close(fix.cid, force=True) - else: - run_interp(interp.id, f""" - with helpers.expect_channel_closed(): - interpreters.channel_recv(cid) - """) - run_interp(interp.id, f""" - with helpers.expect_channel_closed(): - interpreters.channel_send(cid, b'spam') - """) - run_interp(interp.id, f""" - with helpers.expect_channel_closed(): - interpreters.channel_close(cid) - """) - run_interp(interp.id, f""" - with helpers.expect_channel_closed(): - interpreters.channel_close(cid, force=True) - """) - - def _assert_closed(self, fix): - self.assertTrue(fix.state.closed) - - for _ in range(fix.state.pending): - interpreters.channel_recv(fix.cid) - self._assert_closed_in_interp(fix) - - for interp in ('same', 'other'): - interp = fix.get_interpreter(interp) - if interp.name == 'main': - continue - self._assert_closed_in_interp(fix, interp) - - interp = fix.get_interpreter('fresh') - self._assert_closed_in_interp(fix, interp) - - def _iter_close_tests(self, verbose=False): - i = 0 - for actions in self.iter_action_sets(): - print() - for fix in self.iter_fixtures(): - i += 1 - if i > 1000: - return - if verbose: - if (i - 1) % 6 == 0: - print() - print(i, fix, '({} actions)'.format(len(actions))) - else: - if (i - 1) % 6 == 0: - print(' ', end='') - print('.', end=''); sys.stdout.flush() - yield i, fix, actions - if verbose: - print('---') - print() - - # This is useful for scanning through the possible tests. - def _skim_close_tests(self): - ChannelCloseFixture.QUICK = True - for i, fix, actions in self._iter_close_tests(): - pass - - def test_close(self): - for i, fix, actions in self._iter_close_tests(): - with self.subTest('{} {} {}'.format(i, fix, actions)): - fix.prep_interpreter(fix.interp) - self.run_actions(fix, actions) - - self._close(fix, force=False) - - self._assert_closed(fix) - # XXX Things slow down if we have too many interpreters. - fix.clean_up() - - def test_force_close(self): - for i, fix, actions in self._iter_close_tests(): - with self.subTest('{} {} {}'.format(i, fix, actions)): - fix.prep_interpreter(fix.interp) - self.run_actions(fix, actions) - - self._close(fix, force=True) - - self._assert_closed(fix) - # XXX Things slow down if we have too many interpreters. - fix.clean_up() - - if __name__ == '__main__': unittest.main() diff --git a/Lib/test/test_ast.py b/Lib/test/test_ast.py index 98d9b603bbc1cb..7c9a57c685df75 100644 --- a/Lib/test/test_ast.py +++ b/Lib/test/test_ast.py @@ -1900,7 +1900,7 @@ def get_load_const(self, tree): co = compile(tree, '', 'exec') consts = [] for instr in dis.get_instructions(co): - if instr.opname == 'LOAD_CONST': + if instr.opname == 'LOAD_CONST' or instr.opname == 'RETURN_CONST': consts.append(instr.argval) return consts diff --git a/Lib/test/test_asyncio/test_events.py b/Lib/test/test_asyncio/test_events.py index 214544b89bc558..b9069056c3a436 100644 --- a/Lib/test/test_asyncio/test_events.py +++ b/Lib/test/test_asyncio/test_events.py @@ -4,6 +4,7 @@ import concurrent.futures import functools import io +import multiprocessing import os import platform import re @@ -2762,7 +2763,13 @@ def test_get_event_loop_new_process(self): support.skip_if_broken_multiprocessing_synchronize() async def main(): - pool = concurrent.futures.ProcessPoolExecutor() + if multiprocessing.get_start_method() == 'fork': + # Avoid 'fork' DeprecationWarning. + mp_context = multiprocessing.get_context('forkserver') + else: + mp_context = None + pool = concurrent.futures.ProcessPoolExecutor( + mp_context=mp_context) result = await self.loop.run_in_executor( pool, _test_get_event_loop_new_process__sub_proc) pool.shutdown() diff --git a/Lib/test/test_capi/test_eval_code_ex.py b/Lib/test/test_capi/test_eval_code_ex.py new file mode 100644 index 00000000000000..2d28e5289eff94 --- /dev/null +++ b/Lib/test/test_capi/test_eval_code_ex.py @@ -0,0 +1,56 @@ +import unittest + +from test.support import import_helper + + +# Skip this test if the _testcapi module isn't available. +_testcapi = import_helper.import_module('_testcapi') + + +class PyEval_EvalCodeExTests(unittest.TestCase): + + def test_simple(self): + def f(): + return a + + self.assertEqual(_testcapi.eval_code_ex(f.__code__, dict(a=1)), 1) + + # Need to force the compiler to use LOAD_NAME + # def test_custom_locals(self): + # def f(): + # return + + def test_with_args(self): + def f(a, b, c): + return a + + self.assertEqual(_testcapi.eval_code_ex(f.__code__, {}, {}, (1, 2, 3)), 1) + + def test_with_kwargs(self): + def f(a, b, c): + return a + + self.assertEqual(_testcapi.eval_code_ex(f.__code__, {}, {}, (), dict(a=1, b=2, c=3)), 1) + + def test_with_default(self): + def f(a): + return a + + self.assertEqual(_testcapi.eval_code_ex(f.__code__, {}, {}, (), {}, (1,)), 1) + + def test_with_kwarg_default(self): + def f(*, a): + return a + + self.assertEqual(_testcapi.eval_code_ex(f.__code__, {}, {}, (), {}, (), dict(a=1)), 1) + + def test_with_closure(self): + a = 1 + def f(): + return a + + self.assertEqual(_testcapi.eval_code_ex(f.__code__, {}, {}, (), {}, (), {}, f.__closure__), 1) + + +if __name__ == "__main__": + unittest.main() diff --git a/Lib/test/test_capi/test_misc.py b/Lib/test/test_capi/test_misc.py index dace37c362e569..7612cddb1f6576 100644 --- a/Lib/test/test_capi/test_misc.py +++ b/Lib/test/test_capi/test_misc.py @@ -1413,6 +1413,9 @@ def callback(): ret = assert_python_ok('-X', 'tracemalloc', '-c', code) self.assertIn(b'callback called', ret.out) + def test_gilstate_matches_current(self): + _testcapi.test_current_tstate_matches() + class Test_testcapi(unittest.TestCase): locals().update((name, getattr(_testcapi, name)) @@ -1550,5 +1553,44 @@ def func2(x=None): self.do_test(func2) +class Test_ErrSetAndRestore(unittest.TestCase): + + def test_err_set_raised(self): + with self.assertRaises(ValueError): + _testcapi.err_set_raised(ValueError()) + v = ValueError() + try: + _testcapi.err_set_raised(v) + except ValueError as ex: + self.assertIs(v, ex) + + def test_err_restore(self): + with self.assertRaises(ValueError): + _testcapi.err_restore(ValueError) + with self.assertRaises(ValueError): + _testcapi.err_restore(ValueError, 1) + with self.assertRaises(ValueError): + _testcapi.err_restore(ValueError, 1, None) + with self.assertRaises(ValueError): + _testcapi.err_restore(ValueError, ValueError()) + try: + _testcapi.err_restore(KeyError, "hi") + except KeyError as k: + self.assertEqual("hi", k.args[0]) + try: + 1/0 + except Exception as e: + tb = e.__traceback__ + with self.assertRaises(ValueError): + _testcapi.err_restore(ValueError, 1, tb) + with self.assertRaises(TypeError): + _testcapi.err_restore(ValueError, 1, 0) + try: + _testcapi.err_restore(ValueError, 1, tb) + except ValueError as v: + self.assertEqual(1, v.args[0]) + self.assertIs(tb, v.__traceback__.tb_next) + + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_code.py b/Lib/test/test_code.py index 67ed1694205cd6..9c2ac83e1b69e3 100644 --- a/Lib/test/test_code.py +++ b/Lib/test/test_code.py @@ -723,6 +723,7 @@ def f(): pass PY_CODE_LOCATION_INFO_NO_COLUMNS = 13 f.__code__ = f.__code__.replace( + co_stacksize=1, co_firstlineno=42, co_code=bytes( [ diff --git a/Lib/test/test_compile.py b/Lib/test/test_compile.py index 573961e5653ad0..597606e89d51a7 100644 --- a/Lib/test/test_compile.py +++ b/Lib/test/test_compile.py @@ -741,7 +741,7 @@ def unused_code_at_end(): # RETURN_VALUE opcode. This does not always crash an interpreter. # When you build with the clang memory sanitizer it reliably aborts. self.assertEqual( - 'RETURN_VALUE', + 'RETURN_CONST', list(dis.get_instructions(unused_code_at_end))[-1].opname) def test_dont_merge_constants(self): @@ -822,10 +822,9 @@ def unused_block_while_else(): for func in funcs: opcodes = list(dis.get_instructions(func)) - self.assertLessEqual(len(opcodes), 4) - self.assertEqual('LOAD_CONST', opcodes[-2].opname) - self.assertEqual(None, opcodes[-2].argval) - self.assertEqual('RETURN_VALUE', opcodes[-1].opname) + self.assertLessEqual(len(opcodes), 3) + self.assertEqual('RETURN_CONST', opcodes[-1].opname) + self.assertEqual(None, opcodes[-1].argval) def test_false_while_loop(self): def break_in_while(): @@ -841,10 +840,9 @@ def continue_in_while(): # Check that we did not raise but we also don't generate bytecode for func in funcs: opcodes = list(dis.get_instructions(func)) - self.assertEqual(3, len(opcodes)) - self.assertEqual('LOAD_CONST', opcodes[1].opname) + self.assertEqual(2, len(opcodes)) + self.assertEqual('RETURN_CONST', opcodes[1].opname) self.assertEqual(None, opcodes[1].argval) - self.assertEqual('RETURN_VALUE', opcodes[2].opname) def test_consts_in_conditionals(self): def and_true(x): @@ -1311,7 +1309,7 @@ def test_multiline_generator_expression(self): line=1, end_line=2, column=1, end_column=8, occurrence=1) self.assertOpcodeSourcePositionIs(compiled_code, 'JUMP_BACKWARD', line=1, end_line=2, column=1, end_column=8, occurrence=1) - self.assertOpcodeSourcePositionIs(compiled_code, 'RETURN_VALUE', + self.assertOpcodeSourcePositionIs(compiled_code, 'RETURN_CONST', line=1, end_line=6, column=0, end_column=32, occurrence=1) def test_multiline_async_generator_expression(self): @@ -1328,7 +1326,7 @@ def test_multiline_async_generator_expression(self): self.assertIsInstance(compiled_code, types.CodeType) self.assertOpcodeSourcePositionIs(compiled_code, 'YIELD_VALUE', line=1, end_line=2, column=1, end_column=8, occurrence=2) - self.assertOpcodeSourcePositionIs(compiled_code, 'RETURN_VALUE', + self.assertOpcodeSourcePositionIs(compiled_code, 'RETURN_CONST', line=1, end_line=6, column=0, end_column=32, occurrence=1) def test_multiline_list_comprehension(self): diff --git a/Lib/test/test_ctypes/test_loading.py b/Lib/test/test_ctypes/test_loading.py index 15e365ed267d9b..f2434926a51714 100644 --- a/Lib/test/test_ctypes/test_loading.py +++ b/Lib/test/test_ctypes/test_loading.py @@ -28,10 +28,20 @@ class LoaderTest(unittest.TestCase): unknowndll = "xxrandomnamexx" def test_load(self): - if libc_name is None: - self.skipTest('could not find libc') - CDLL(libc_name) - CDLL(os.path.basename(libc_name)) + if libc_name is not None: + test_lib = libc_name + else: + if os.name == "nt": + import _ctypes_test + test_lib = _ctypes_test.__file__ + else: + self.skipTest('could not find library to load') + CDLL(test_lib) + CDLL(os.path.basename(test_lib)) + class CTypesTestPathLikeCls: + def __fspath__(self): + return test_lib + CDLL(CTypesTestPathLikeCls()) self.assertRaises(OSError, CDLL, self.unknowndll) def test_load_version(self): diff --git a/Lib/test/test_ctypes/test_pep3118.py b/Lib/test/test_ctypes/test_pep3118.py index efffc80a66fcb8..c8a70e3e335693 100644 --- a/Lib/test/test_ctypes/test_pep3118.py +++ b/Lib/test/test_ctypes/test_pep3118.py @@ -86,6 +86,20 @@ class PackedPoint(Structure): _pack_ = 2 _fields_ = [("x", c_long), ("y", c_long)] +class PointMidPad(Structure): + _fields_ = [("x", c_byte), ("y", c_uint)] + +class PackedPointMidPad(Structure): + _pack_ = 2 + _fields_ = [("x", c_byte), ("y", c_uint64)] + +class PointEndPad(Structure): + _fields_ = [("x", c_uint), ("y", c_byte)] + +class PackedPointEndPad(Structure): + _pack_ = 2 + _fields_ = [("x", c_uint64), ("y", c_byte)] + class Point2(Structure): pass Point2._fields_ = [("x", c_long), ("y", c_long)] @@ -185,11 +199,14 @@ class Complete(Structure): ## structures and unions - (Point, "T{> END_FOR - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) """ % (bug708901.__code__.co_firstlineno, bug708901.__code__.co_firstlineno + 1, bug708901.__code__.co_firstlineno + 2, @@ -198,8 +191,7 @@ def bug42562(): dis_bug42562 = """\ RESUME 0 - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) """ # Extended arg followed by NOP @@ -240,8 +232,7 @@ def bug42562(): %3d LOAD_GLOBAL 0 (spam) POP_TOP - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) """ _BIG_LINENO_FORMAT2 = """\ @@ -249,20 +240,17 @@ def bug42562(): %4d LOAD_GLOBAL 0 (spam) POP_TOP - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) """ dis_module_expected_results = """\ Disassembly of f: 4 RESUME 0 - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) Disassembly of g: 5 RESUME 0 - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) """ @@ -286,8 +274,7 @@ def bug42562(): LOAD_CONST 0 (1) BINARY_OP 0 (+) STORE_NAME 0 (x) - LOAD_CONST 1 (None) - RETURN_VALUE + RETURN_CONST 1 (None) """ annot_stmt_str = """\ @@ -326,8 +313,7 @@ def bug42562(): STORE_SUBSCR LOAD_NAME 1 (int) POP_TOP - LOAD_CONST 4 (None) - RETURN_VALUE + RETURN_CONST 4 (None) """ compound_stmt_str = """\ @@ -447,12 +433,11 @@ def _with(c): %3d LOAD_CONST 2 (2) STORE_FAST 2 (y) - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) %3d >> PUSH_EXC_INFO WITH_EXCEPT_START - POP_JUMP_IF_TRUE 1 (to 46) + POP_JUMP_IF_TRUE 1 (to 44) RERAISE 2 >> POP_TOP POP_EXCEPT @@ -461,8 +446,7 @@ def _with(c): %3d LOAD_CONST 2 (2) STORE_FAST 2 (y) - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) >> COPY 3 POP_EXCEPT RERAISE 1 @@ -514,23 +498,22 @@ async def _asyncwith(c): %3d LOAD_CONST 2 (2) STORE_FAST 2 (y) - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) %3d >> CLEANUP_THROW - JUMP_BACKWARD 24 (to 22) + JUMP_BACKWARD 23 (to 22) >> CLEANUP_THROW - JUMP_BACKWARD 9 (to 56) + JUMP_BACKWARD 8 (to 56) >> PUSH_EXC_INFO WITH_EXCEPT_START GET_AWAITABLE 2 LOAD_CONST 0 (None) - >> SEND 4 (to 92) + >> SEND 4 (to 90) YIELD_VALUE 3 RESUME 3 - JUMP_BACKWARD_NO_INTERRUPT 4 (to 82) + JUMP_BACKWARD_NO_INTERRUPT 4 (to 80) >> CLEANUP_THROW - >> POP_JUMP_IF_TRUE 1 (to 96) + >> POP_JUMP_IF_TRUE 1 (to 94) RERAISE 2 >> POP_TOP POP_EXCEPT @@ -539,8 +522,7 @@ async def _asyncwith(c): %3d LOAD_CONST 2 (2) STORE_FAST 2 (y) - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) >> COPY 3 POP_EXCEPT RERAISE 1 @@ -610,8 +592,7 @@ def _tryfinallyconst(b): LOAD_FAST 0 (b) CALL 0 POP_TOP - LOAD_CONST 1 (1) - RETURN_VALUE + RETURN_CONST 1 (1) PUSH_EXC_INFO PUSH_NULL LOAD_FAST 0 (b) @@ -764,8 +745,7 @@ def loop_test(): JUMP_BACKWARD 17 (to 16) %3d >> END_FOR - LOAD_CONST 0 (None) - RETURN_VALUE + RETURN_CONST 0 (None) """ % (loop_test.__code__.co_firstlineno, loop_test.__code__.co_firstlineno + 1, loop_test.__code__.co_firstlineno + 2, @@ -782,8 +762,7 @@ def extended_arg_quick(): 6 UNPACK_EX 256 8 STORE_FAST 0 (_) 10 STORE_FAST 0 (_) - 12 LOAD_CONST 0 (None) - 14 RETURN_VALUE + 12 RETURN_CONST 0 (None) """% (extended_arg_quick.__code__.co_firstlineno, extended_arg_quick.__code__.co_firstlineno + 1,) @@ -1559,8 +1538,7 @@ def _prepare_test_cases(): Instruction(opname='LOAD_FAST', opcode=124, arg=1, argval='f', argrepr='f', offset=26, starts_line=None, is_jump_target=False, positions=None), Instruction(opname='CALL', opcode=171, arg=6, argval=6, argrepr='', offset=28, starts_line=None, is_jump_target=False, positions=None), Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=38, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=40, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='RETURN_VALUE', opcode=83, arg=None, argval=None, argrepr='', offset=42, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='RETURN_CONST', opcode=121, arg=0, argval=None, argrepr='None', offset=40, starts_line=None, is_jump_target=False, positions=None) ] expected_opinfo_jumpy = [ @@ -1640,52 +1618,50 @@ def _prepare_test_cases(): Instruction(opname='LOAD_CONST', opcode=100, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=286, starts_line=None, is_jump_target=False, positions=None), Instruction(opname='CALL', opcode=171, arg=1, argval=1, argrepr='', offset=288, starts_line=None, is_jump_target=False, positions=None), Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=298, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=300, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='RETURN_VALUE', opcode=83, arg=None, argval=None, argrepr='', offset=302, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='PUSH_EXC_INFO', opcode=35, arg=None, argval=None, argrepr='', offset=304, starts_line=25, is_jump_target=False, positions=None), - Instruction(opname='WITH_EXCEPT_START', opcode=49, arg=None, argval=None, argrepr='', offset=306, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_JUMP_IF_TRUE', opcode=115, arg=1, argval=312, argrepr='to 312', offset=308, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='RERAISE', opcode=119, arg=2, argval=2, argrepr='', offset=310, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=312, starts_line=None, is_jump_target=True, positions=None), - Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=314, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='RETURN_CONST', opcode=121, arg=0, argval=None, argrepr='None', offset=300, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='PUSH_EXC_INFO', opcode=35, arg=None, argval=None, argrepr='', offset=302, starts_line=25, is_jump_target=False, positions=None), + Instruction(opname='WITH_EXCEPT_START', opcode=49, arg=None, argval=None, argrepr='', offset=304, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_JUMP_IF_TRUE', opcode=115, arg=1, argval=310, argrepr='to 310', offset=306, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='RERAISE', opcode=119, arg=2, argval=2, argrepr='', offset=308, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=310, starts_line=None, is_jump_target=True, positions=None), + Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=312, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=314, starts_line=None, is_jump_target=False, positions=None), Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=316, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=318, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='JUMP_BACKWARD', opcode=140, arg=24, argval=274, argrepr='to 274', offset=320, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='COPY', opcode=120, arg=3, argval=3, argrepr='', offset=322, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=324, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='RERAISE', opcode=119, arg=1, argval=1, argrepr='', offset=326, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='PUSH_EXC_INFO', opcode=35, arg=None, argval=None, argrepr='', offset=328, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=4, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=330, starts_line=22, is_jump_target=False, positions=None), - Instruction(opname='CHECK_EXC_MATCH', opcode=36, arg=None, argval=None, argrepr='', offset=342, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=16, argval=378, argrepr='to 378', offset=344, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=346, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=3, argval='print', argrepr='NULL + print', offset=348, starts_line=23, is_jump_target=False, positions=None), - Instruction(opname='LOAD_CONST', opcode=100, arg=9, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=360, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='CALL', opcode=171, arg=1, argval=1, argrepr='', offset=362, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=372, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=374, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='JUMP_BACKWARD', opcode=140, arg=52, argval=274, argrepr='to 274', offset=376, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='RERAISE', opcode=119, arg=0, argval=0, argrepr='', offset=378, starts_line=22, is_jump_target=True, positions=None), - Instruction(opname='COPY', opcode=120, arg=3, argval=3, argrepr='', offset=380, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=382, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='RERAISE', opcode=119, arg=1, argval=1, argrepr='', offset=384, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='PUSH_EXC_INFO', opcode=35, arg=None, argval=None, argrepr='', offset=386, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='LOAD_GLOBAL', opcode=116, arg=3, argval='print', argrepr='NULL + print', offset=388, starts_line=28, is_jump_target=False, positions=None), - Instruction(opname='LOAD_CONST', opcode=100, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=400, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='CALL', opcode=171, arg=1, argval=1, argrepr='', offset=402, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=412, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='RERAISE', opcode=119, arg=0, argval=0, argrepr='', offset=414, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='COPY', opcode=120, arg=3, argval=3, argrepr='', offset=416, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=418, starts_line=None, is_jump_target=False, positions=None), - Instruction(opname='RERAISE', opcode=119, arg=1, argval=1, argrepr='', offset=420, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='JUMP_BACKWARD', opcode=140, arg=23, argval=274, argrepr='to 274', offset=318, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='COPY', opcode=120, arg=3, argval=3, argrepr='', offset=320, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=322, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='RERAISE', opcode=119, arg=1, argval=1, argrepr='', offset=324, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='PUSH_EXC_INFO', opcode=35, arg=None, argval=None, argrepr='', offset=326, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=4, argval='ZeroDivisionError', argrepr='ZeroDivisionError', offset=328, starts_line=22, is_jump_target=False, positions=None), + Instruction(opname='CHECK_EXC_MATCH', opcode=36, arg=None, argval=None, argrepr='', offset=340, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_JUMP_IF_FALSE', opcode=114, arg=16, argval=376, argrepr='to 376', offset=342, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=344, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=3, argval='print', argrepr='NULL + print', offset=346, starts_line=23, is_jump_target=False, positions=None), + Instruction(opname='LOAD_CONST', opcode=100, arg=9, argval='Here we go, here we go, here we go...', argrepr="'Here we go, here we go, here we go...'", offset=358, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='CALL', opcode=171, arg=1, argval=1, argrepr='', offset=360, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=370, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=372, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='JUMP_BACKWARD', opcode=140, arg=51, argval=274, argrepr='to 274', offset=374, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='RERAISE', opcode=119, arg=0, argval=0, argrepr='', offset=376, starts_line=22, is_jump_target=True, positions=None), + Instruction(opname='COPY', opcode=120, arg=3, argval=3, argrepr='', offset=378, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=380, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='RERAISE', opcode=119, arg=1, argval=1, argrepr='', offset=382, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='PUSH_EXC_INFO', opcode=35, arg=None, argval=None, argrepr='', offset=384, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='LOAD_GLOBAL', opcode=116, arg=3, argval='print', argrepr='NULL + print', offset=386, starts_line=28, is_jump_target=False, positions=None), + Instruction(opname='LOAD_CONST', opcode=100, arg=10, argval="OK, now we're done", argrepr='"OK, now we\'re done"', offset=398, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='CALL', opcode=171, arg=1, argval=1, argrepr='', offset=400, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_TOP', opcode=1, arg=None, argval=None, argrepr='', offset=410, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='RERAISE', opcode=119, arg=0, argval=0, argrepr='', offset=412, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='COPY', opcode=120, arg=3, argval=3, argrepr='', offset=414, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='POP_EXCEPT', opcode=89, arg=None, argval=None, argrepr='', offset=416, starts_line=None, is_jump_target=False, positions=None), + Instruction(opname='RERAISE', opcode=119, arg=1, argval=1, argrepr='', offset=418, starts_line=None, is_jump_target=False, positions=None) ] # One last piece of inspect fodder to check the default line number handling def simple(): pass expected_opinfo_simple = [ Instruction(opname='RESUME', opcode=151, arg=0, argval=0, argrepr='', offset=0, starts_line=simple.__code__.co_firstlineno, is_jump_target=False, positions=None), - Instruction(opname='LOAD_CONST', opcode=100, arg=0, argval=None, argrepr='None', offset=2, starts_line=None, is_jump_target=False), - Instruction(opname='RETURN_VALUE', opcode=83, arg=None, argval=None, argrepr='', offset=4, starts_line=None, is_jump_target=False) + Instruction(opname='RETURN_CONST', opcode=121, arg=0, argval=None, argrepr='None', offset=2, starts_line=None, is_jump_target=False), ] @@ -1746,7 +1722,6 @@ def test_co_positions(self): (2, 2, 8, 9), (1, 3, 0, 1), (1, 3, 0, 1), - (1, 3, 0, 1), (1, 3, 0, 1) ] self.assertEqual(positions, expected) diff --git a/Lib/test/test_enum.py b/Lib/test/test_enum.py index 1e653e94f6b57a..0a2e0c14d268af 100644 --- a/Lib/test/test_enum.py +++ b/Lib/test/test_enum.py @@ -2855,6 +2855,46 @@ class NTEnum(Enum): [TTuple(id=0, a=0, blist=[]), TTuple(id=1, a=2, blist=[4]), TTuple(id=2, a=4, blist=[0, 1, 2])], ) + def test_flag_with_custom_new(self): + class FlagFromChar(IntFlag): + def __new__(cls, c): + value = 1 << c + self = int.__new__(cls, value) + self._value_ = value + return self + # + a = ord('a') + # + self.assertEqual(FlagFromChar.a, 158456325028528675187087900672) + self.assertEqual(FlagFromChar.a|1, 158456325028528675187087900673) + # + # + class FlagFromChar(Flag): + def __new__(cls, c): + value = 1 << c + self = object.__new__(cls) + self._value_ = value + return self + # + a = ord('a') + z = 1 + # + self.assertEqual(FlagFromChar.a.value, 158456325028528675187087900672) + self.assertEqual((FlagFromChar.a|FlagFromChar.z).value, 158456325028528675187087900674) + # + # + class FlagFromChar(int, Flag, boundary=KEEP): + def __new__(cls, c): + value = 1 << c + self = int.__new__(cls, value) + self._value_ = value + return self + # + a = ord('a') + # + self.assertEqual(FlagFromChar.a, 158456325028528675187087900672) + self.assertEqual(FlagFromChar.a|1, 158456325028528675187087900673) + class TestOrder(unittest.TestCase): "test usage of the `_order_` attribute" diff --git a/Lib/test/test_exceptions.py b/Lib/test/test_exceptions.py index f629321458d8ae..4ae71e431c56dc 100644 --- a/Lib/test/test_exceptions.py +++ b/Lib/test/test_exceptions.py @@ -347,6 +347,7 @@ def test_capi2(): _testcapi.raise_exception(BadException, 0) except RuntimeError as err: exc, err, tb = sys.exc_info() + tb = tb.tb_next co = tb.tb_frame.f_code self.assertEqual(co.co_name, "__init__") self.assertTrue(co.co_filename.endswith('test_exceptions.py')) @@ -1415,8 +1416,8 @@ def gen(): @cpython_only def test_recursion_normalizing_infinite_exception(self): # Issue #30697. Test that a RecursionError is raised when - # PyErr_NormalizeException() maximum recursion depth has been - # exceeded. + # maximum recursion depth has been exceeded when creating + # an exception code = """if 1: import _testcapi try: @@ -1426,8 +1427,7 @@ def test_recursion_normalizing_infinite_exception(self): """ rc, out, err = script_helper.assert_python_failure("-c", code) self.assertEqual(rc, 1) - self.assertIn(b'RecursionError: maximum recursion depth exceeded ' - b'while normalizing an exception', err) + self.assertIn(b'RecursionError: maximum recursion depth exceeded', err) self.assertIn(b'Done.', out) diff --git a/Lib/test/test_fcntl.py b/Lib/test/test_fcntl.py index fc8c39365f12b7..113c7802821dd4 100644 --- a/Lib/test/test_fcntl.py +++ b/Lib/test/test_fcntl.py @@ -1,11 +1,11 @@ """Test program for the fcntl C module. """ +import multiprocessing import platform import os import struct import sys import unittest -from multiprocessing import Process from test.support import verbose, cpython_only from test.support.import_helper import import_module from test.support.os_helper import TESTFN, unlink @@ -160,7 +160,8 @@ def test_lockf_exclusive(self): self.f = open(TESTFN, 'wb+') cmd = fcntl.LOCK_EX | fcntl.LOCK_NB fcntl.lockf(self.f, cmd) - p = Process(target=try_lockf_on_other_process_fail, args=(TESTFN, cmd)) + mp = multiprocessing.get_context('spawn') + p = mp.Process(target=try_lockf_on_other_process_fail, args=(TESTFN, cmd)) p.start() p.join() fcntl.lockf(self.f, fcntl.LOCK_UN) @@ -171,7 +172,8 @@ def test_lockf_share(self): self.f = open(TESTFN, 'wb+') cmd = fcntl.LOCK_SH | fcntl.LOCK_NB fcntl.lockf(self.f, cmd) - p = Process(target=try_lockf_on_other_process, args=(TESTFN, cmd)) + mp = multiprocessing.get_context('spawn') + p = mp.Process(target=try_lockf_on_other_process, args=(TESTFN, cmd)) p.start() p.join() fcntl.lockf(self.f, fcntl.LOCK_UN) diff --git a/Lib/test/test_fileio.py b/Lib/test/test_fileio.py index 2263604ed1f97d..ebfcffd1829174 100644 --- a/Lib/test/test_fileio.py +++ b/Lib/test/test_fileio.py @@ -12,7 +12,9 @@ from test.support import ( cpython_only, swap_attr, gc_collect, is_emscripten, is_wasi ) -from test.support.os_helper import (TESTFN, TESTFN_UNICODE, make_bad_fd) +from test.support.os_helper import ( + TESTFN, TESTFN_ASCII, TESTFN_UNICODE, make_bad_fd, + ) from test.support.warnings_helper import check_warnings from collections import UserList @@ -431,18 +433,15 @@ def testUnicodeOpen(self): def testBytesOpen(self): # Opening a bytes filename - try: - fn = TESTFN.encode("ascii") - except UnicodeEncodeError: - self.skipTest('could not encode %r to ascii' % TESTFN) + fn = TESTFN_ASCII.encode("ascii") f = self.FileIO(fn, "w") try: f.write(b"abc") f.close() - with open(TESTFN, "rb") as f: + with open(TESTFN_ASCII, "rb") as f: self.assertEqual(f.read(), b"abc") finally: - os.unlink(TESTFN) + os.unlink(TESTFN_ASCII) @unittest.skipIf(sys.getfilesystemencoding() != 'utf-8', "test only works for utf-8 filesystems") diff --git a/Lib/test/test_hashlib.py b/Lib/test/test_hashlib.py index 450dc4933f47f7..9c92b4e9c280dc 100644 --- a/Lib/test/test_hashlib.py +++ b/Lib/test/test_hashlib.py @@ -22,6 +22,7 @@ from test.support import _4G, bigmemtest from test.support.import_helper import import_fresh_module from test.support import os_helper +from test.support import requires_resource from test.support import threading_helper from test.support import warnings_helper from http.client import HTTPException @@ -354,6 +355,15 @@ def test_large_update(self): self.assertEqual(m1.digest(*args), m4_copy.digest(*args)) self.assertEqual(m4.digest(*args), m4_digest) + @requires_resource('cpu') + def test_sha256_update_over_4gb(self): + zero_1mb = b"\0" * 1024 * 1024 + h = hashlib.sha256() + for i in range(0, 4096): + h.update(zero_1mb) + h.update(b"hello world") + self.assertEqual(h.hexdigest(), "a5364f7a52ebe2e25f1838a4ca715a893b6fd7a23f2a0d9e9762120da8b1bf53") + def check(self, name, data, hexdigest, shake=False, **kwargs): length = len(hexdigest)//2 hexdigest = hexdigest.lower() diff --git a/Lib/test/test_interpreters.py b/Lib/test/test_interpreters.py index b969ddf33d81bc..d1bebe47158322 100644 --- a/Lib/test/test_interpreters.py +++ b/Lib/test/test_interpreters.py @@ -8,6 +8,7 @@ from test import support from test.support import import_helper _interpreters = import_helper.import_module('_xxsubinterpreters') +_channels = import_helper.import_module('_xxinterpchannels') from test.support import interpreters @@ -533,7 +534,7 @@ class TestRecvChannelAttrs(TestBase): def test_id_type(self): rch, _ = interpreters.create_channel() - self.assertIsInstance(rch.id, _interpreters.ChannelID) + self.assertIsInstance(rch.id, _channels.ChannelID) def test_custom_id(self): rch = interpreters.RecvChannel(1) @@ -558,7 +559,7 @@ class TestSendChannelAttrs(TestBase): def test_id_type(self): _, sch = interpreters.create_channel() - self.assertIsInstance(sch.id, _interpreters.ChannelID) + self.assertIsInstance(sch.id, _channels.ChannelID) def test_custom_id(self): sch = interpreters.SendChannel(1) diff --git a/Lib/test/test_launcher.py b/Lib/test/test_launcher.py index 351a638c1dd340..2f35eaf08a2dc9 100644 --- a/Lib/test/test_launcher.py +++ b/Lib/test/test_launcher.py @@ -56,7 +56,17 @@ None: sys.prefix, } }, - } + }, + "PythonTestSuite1": { + "DisplayName": "Python Test Suite Single", + "3.100": { + "DisplayName": "Single Interpreter", + "InstallPath": { + None: sys.prefix, + "ExecutablePath": sys.executable, + } + } + }, } @@ -206,6 +216,7 @@ def run_py(self, args, env=None, allow_fail=False, expect_returncode=0, argv=Non **{k.upper(): v for k, v in os.environ.items() if k.upper() not in ignore}, "PYLAUNCHER_DEBUG": "1", "PYLAUNCHER_DRYRUN": "1", + "PYLAUNCHER_LIMIT_TO_COMPANY": "", **{k.upper(): v for k, v in (env or {}).items()}, } if not argv: @@ -388,23 +399,33 @@ def test_filter_to_tag(self): self.assertEqual(company, data["env.company"]) self.assertEqual("3.100", data["env.tag"]) - data = self.run_py([f"-V:3.100-3"]) + data = self.run_py([f"-V:3.100-32"]) self.assertEqual("X.Y-32.exe", data["LaunchCommand"]) self.assertEqual(company, data["env.company"]) self.assertEqual("3.100-32", data["env.tag"]) - data = self.run_py([f"-V:3.100-a"]) + data = self.run_py([f"-V:3.100-arm64"]) self.assertEqual("X.Y-arm64.exe -X fake_arg_for_test", data["LaunchCommand"]) self.assertEqual(company, data["env.company"]) self.assertEqual("3.100-arm64", data["env.tag"]) def test_filter_to_company_and_tag(self): company = "PythonTestSuite" - data = self.run_py([f"-V:{company}/3.1"]) + data = self.run_py([f"-V:{company}/3.1"], expect_returncode=103) + + data = self.run_py([f"-V:{company}/3.100"]) self.assertEqual("X.Y.exe", data["LaunchCommand"]) self.assertEqual(company, data["env.company"]) self.assertEqual("3.100", data["env.tag"]) + def test_filter_with_single_install(self): + company = "PythonTestSuite1" + data = self.run_py( + [f"-V:Nonexistent"], + env={"PYLAUNCHER_LIMIT_TO_COMPANY": company}, + expect_returncode=103, + ) + def test_search_major_3(self): try: data = self.run_py(["-3"], allow_fail=True) diff --git a/Lib/test/test_long.py b/Lib/test/test_long.py index 569ab15820e302..d299c34cec076d 100644 --- a/Lib/test/test_long.py +++ b/Lib/test/test_long.py @@ -1601,5 +1601,44 @@ def test_square(self): self.assertEqual(n**2, (1 << (2 * bitlen)) - (1 << (bitlen + 1)) + 1) + def test___sizeof__(self): + self.assertEqual(int.__itemsize__, sys.int_info.sizeof_digit) + + # Pairs (test_value, number of allocated digits) + test_values = [ + # We always allocate space for at least one digit, even for + # a value of zero; sys.getsizeof should reflect that. + (0, 1), + (1, 1), + (-1, 1), + (BASE-1, 1), + (1-BASE, 1), + (BASE, 2), + (-BASE, 2), + (BASE*BASE - 1, 2), + (BASE*BASE, 3), + ] + + for value, ndigits in test_values: + with self.subTest(value): + self.assertEqual( + value.__sizeof__(), + int.__basicsize__ + int.__itemsize__ * ndigits + ) + + # Same test for a subclass of int. + class MyInt(int): + pass + + self.assertEqual(MyInt.__itemsize__, sys.int_info.sizeof_digit) + + for value, ndigits in test_values: + with self.subTest(value): + self.assertEqual( + MyInt(value).__sizeof__(), + MyInt.__basicsize__ + MyInt.__itemsize__ * ndigits + ) + + if __name__ == "__main__": unittest.main() diff --git a/Lib/test/test_math.py b/Lib/test/test_math.py index 2c84e55ab45c57..433161c2dd4145 100644 --- a/Lib/test/test_math.py +++ b/Lib/test/test_math.py @@ -1146,7 +1146,6 @@ def testLog(self): self.ftest('log(1/e)', math.log(1/math.e), -1) self.ftest('log(1)', math.log(1), 0) self.ftest('log(e)', math.log(math.e), 1) - self.ftest('log(e, None)', math.log(math.e, None), 1) self.ftest('log(32,2)', math.log(32,2), 5) self.ftest('log(10**40, 10)', math.log(10**40, 10), 40) self.ftest('log(10**40, 10**20)', math.log(10**40, 10**20), 2) @@ -1450,6 +1449,11 @@ def Trial(dotfunc, c, n): n = 20 # Length of vectors c = 1e30 # Target condition number + # If the following test fails, it means that the C math library + # implementation of fma() is not compliant with the C99 standard + # and is inaccurate. To solve this problem, make a new build + # with the symbol UNRELIABLE_FMA defined. That will enable a + # slower but accurate code path that avoids the fma() call. relative_err = median(Trial(math.sumprod, c, n) for i in range(times)) self.assertLess(relative_err, 1e-16) diff --git a/Lib/test/test_ntpath.py b/Lib/test/test_ntpath.py index bce38a534a6a98..b32900697874b1 100644 --- a/Lib/test/test_ntpath.py +++ b/Lib/test/test_ntpath.py @@ -1,9 +1,10 @@ +import inspect import ntpath import os import sys import unittest import warnings -from test.support import os_helper +from test.support import cpython_only, os_helper from test.support import TestFailed, is_emscripten from test.support.os_helper import FakePath from test import test_genericpath @@ -938,6 +939,35 @@ def test_isjunction(self): self.assertFalse(ntpath.isjunction('tmpdir')) self.assertPathEqual(ntpath.realpath('testjunc'), ntpath.realpath('tmpdir')) + @unittest.skipIf(sys.platform != 'win32', "drive letters are a windows concept") + def test_isfile_driveletter(self): + drive = os.environ.get('SystemDrive') + if drive is None or len(drive) != 2 or drive[1] != ':': + raise unittest.SkipTest('SystemDrive is not defined or malformed') + self.assertFalse(os.path.isfile('\\\\.\\' + drive)) + + @unittest.skipIf(sys.platform != 'win32', "windows only") + def test_con_device(self): + self.assertFalse(os.path.isfile(r"\\.\CON")) + self.assertFalse(os.path.isdir(r"\\.\CON")) + self.assertFalse(os.path.islink(r"\\.\CON")) + self.assertTrue(os.path.exists(r"\\.\CON")) + + @unittest.skipIf(sys.platform != 'win32', "Fast paths are only for win32") + @cpython_only + def test_fast_paths_in_use(self): + # There are fast paths of these functions implemented in posixmodule.c. + # Confirm that they are being used, and not the Python fallbacks in + # genericpath.py. + self.assertTrue(os.path.isdir is nt._path_isdir) + self.assertFalse(inspect.isfunction(os.path.isdir)) + self.assertTrue(os.path.isfile is nt._path_isfile) + self.assertFalse(inspect.isfunction(os.path.isfile)) + self.assertTrue(os.path.islink is nt._path_islink) + self.assertFalse(inspect.isfunction(os.path.islink)) + self.assertTrue(os.path.exists is nt._path_exists) + self.assertFalse(inspect.isfunction(os.path.exists)) + class NtCommonTest(test_genericpath.CommonTest, unittest.TestCase): pathmodule = ntpath diff --git a/Lib/test/test_ordered_dict.py b/Lib/test/test_ordered_dict.py index 37447fd249b8c0..decbcc2419c9fc 100644 --- a/Lib/test/test_ordered_dict.py +++ b/Lib/test/test_ordered_dict.py @@ -362,7 +362,7 @@ def test_repr(self): OrderedDict = self.OrderedDict od = OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)]) self.assertEqual(repr(od), - "OrderedDict([('c', 1), ('b', 2), ('a', 3), ('d', 4), ('e', 5), ('f', 6)])") + "OrderedDict({'c': 1, 'b': 2, 'a': 3, 'd': 4, 'e': 5, 'f': 6})") self.assertEqual(eval(repr(od)), od) self.assertEqual(repr(OrderedDict()), "OrderedDict()") @@ -372,7 +372,7 @@ def test_repr_recursive(self): od = OrderedDict.fromkeys('abc') od['x'] = od self.assertEqual(repr(od), - "OrderedDict([('a', None), ('b', None), ('c', None), ('x', ...)])") + "OrderedDict({'a': None, 'b': None, 'c': None, 'x': ...})") def test_repr_recursive_values(self): OrderedDict = self.OrderedDict diff --git a/Lib/test/test_os.py b/Lib/test/test_os.py index 58e04dd1348fd1..387d2581c06fc6 100644 --- a/Lib/test/test_os.py +++ b/Lib/test/test_os.py @@ -742,6 +742,7 @@ def test_access_denied(self): ) result = os.stat(fname) self.assertNotEqual(result.st_size, 0) + self.assertTrue(os.path.isfile(fname)) @unittest.skipUnless(sys.platform == "win32", "Win32 specific tests") def test_stat_block_device(self): @@ -2860,6 +2861,7 @@ def test_appexeclink(self): self.assertEqual(st, os.stat(alias)) self.assertFalse(stat.S_ISLNK(st.st_mode)) self.assertEqual(st.st_reparse_tag, stat.IO_REPARSE_TAG_APPEXECLINK) + self.assertTrue(os.path.isfile(alias)) # testing the first one we see is sufficient break else: diff --git a/Lib/test/test_peepholer.py b/Lib/test/test_peepholer.py index 239c9d03fd9d1f..707ff821b31a8a 100644 --- a/Lib/test/test_peepholer.py +++ b/Lib/test/test_peepholer.py @@ -117,7 +117,7 @@ def f(): return None self.assertNotInBytecode(f, 'LOAD_GLOBAL') - self.assertInBytecode(f, 'LOAD_CONST', None) + self.assertInBytecode(f, 'RETURN_CONST', None) self.check_lnotab(f) def test_while_one(self): @@ -134,7 +134,7 @@ def f(): def test_pack_unpack(self): for line, elem in ( - ('a, = a,', 'LOAD_CONST',), + ('a, = a,', 'RETURN_CONST',), ('a, b = a, b', 'SWAP',), ('a, b, c = a, b, c', 'SWAP',), ): @@ -165,7 +165,7 @@ def test_folding_of_tuples_of_constants(self): # One LOAD_CONST for the tuple, one for the None return value load_consts = [instr for instr in dis.get_instructions(code) if instr.opname == 'LOAD_CONST'] - self.assertEqual(len(load_consts), 2) + self.assertEqual(len(load_consts), 1) self.check_lnotab(code) # Bug 1053819: Tuple of constants misidentified when presented with: diff --git a/Lib/test/test_pickle.py b/Lib/test/test_pickle.py index 44fdca7a6b1688..80e7a4d23a4ba8 100644 --- a/Lib/test/test_pickle.py +++ b/Lib/test/test_pickle.py @@ -533,6 +533,8 @@ def test_exceptions(self): def test_multiprocessing_exceptions(self): module = import_helper.import_module('multiprocessing.context') for name, exc in get_exceptions(module): + if issubclass(exc, Warning): + continue with self.subTest(name): self.assertEqual(reverse_mapping('multiprocessing.context', name), ('multiprocessing', name)) diff --git a/Lib/test/test_stable_abi_ctypes.py b/Lib/test/test_stable_abi_ctypes.py index 67c653428a6dee..e77c1c8409880d 100644 --- a/Lib/test/test_stable_abi_ctypes.py +++ b/Lib/test/test_stable_abi_ctypes.py @@ -172,6 +172,7 @@ def test_windows_feature_macros(self): "PyErr_FormatV", "PyErr_GetExcInfo", "PyErr_GetHandledException", + "PyErr_GetRaisedException", "PyErr_GivenExceptionMatches", "PyErr_NewException", "PyErr_NewExceptionWithDoc", @@ -195,6 +196,7 @@ def test_windows_feature_macros(self): "PyErr_SetInterruptEx", "PyErr_SetNone", "PyErr_SetObject", + "PyErr_SetRaisedException", "PyErr_SetString", "PyErr_SyntaxLocation", "PyErr_SyntaxLocationEx", @@ -292,9 +294,11 @@ def test_windows_feature_macros(self): "PyExc_Warning", "PyExc_ZeroDivisionError", "PyExceptionClass_Name", + "PyException_GetArgs", "PyException_GetCause", "PyException_GetContext", "PyException_GetTraceback", + "PyException_SetArgs", "PyException_SetCause", "PyException_SetContext", "PyException_SetTraceback", diff --git a/Lib/test/test_tarfile.py b/Lib/test/test_tarfile.py index 213932069201b9..f15a800976681c 100644 --- a/Lib/test/test_tarfile.py +++ b/Lib/test/test_tarfile.py @@ -225,6 +225,11 @@ def test_add_dir_getmember(self): self.add_dir_and_getmember('bar') self.add_dir_and_getmember('a'*101) + @unittest.skipIf( + (hasattr(os, 'getuid') and os.getuid() > 0o777_7777) or + (hasattr(os, 'getgid') and os.getgid() > 0o777_7777), + "uid or gid too high for USTAR format." + ) def add_dir_and_getmember(self, name): with os_helper.temp_cwd(): with tarfile.open(tmpname, 'w') as tar: diff --git a/Lib/test/test_typing.py b/Lib/test/test_typing.py index 5aa49bb0e2456d..7a460d94469fe7 100644 --- a/Lib/test/test_typing.py +++ b/Lib/test/test_typing.py @@ -4892,6 +4892,18 @@ class NontotalMovie(TypedDict, total=False): title: Required[str] year: int +class ParentNontotalMovie(TypedDict, total=False): + title: Required[str] + +class ChildTotalMovie(ParentNontotalMovie): + year: NotRequired[int] + +class ParentDeeplyAnnotatedMovie(TypedDict): + title: Annotated[Annotated[Required[str], "foobar"], "another level"] + +class ChildDeeplyAnnotatedMovie(ParentDeeplyAnnotatedMovie): + year: NotRequired[Annotated[int, 2000]] + class AnnotatedMovie(TypedDict): title: Annotated[Required[str], "foobar"] year: NotRequired[Annotated[int, 2000]] @@ -5221,6 +5233,17 @@ def test_get_type_hints_typeddict(self): 'a': Annotated[Required[int], "a", "b", "c"] }) + self.assertEqual(get_type_hints(ChildTotalMovie), {"title": str, "year": int}) + self.assertEqual(get_type_hints(ChildTotalMovie, include_extras=True), { + "title": Required[str], "year": NotRequired[int] + }) + + self.assertEqual(get_type_hints(ChildDeeplyAnnotatedMovie), {"title": str, "year": int}) + self.assertEqual(get_type_hints(ChildDeeplyAnnotatedMovie, include_extras=True), { + "title": Annotated[Required[str], "foobar", "another level"], + "year": NotRequired[Annotated[int, 2000]] + }) + def test_get_type_hints_collections_abc_callable(self): # https://github.com/python/cpython/issues/91621 P = ParamSpec('P') @@ -6381,6 +6404,16 @@ def test_required_notrequired_keys(self): self.assertEqual(WeirdlyQuotedMovie.__optional_keys__, frozenset({"year"})) + self.assertEqual(ChildTotalMovie.__required_keys__, + frozenset({"title"})) + self.assertEqual(ChildTotalMovie.__optional_keys__, + frozenset({"year"})) + + self.assertEqual(ChildDeeplyAnnotatedMovie.__required_keys__, + frozenset({"title"})) + self.assertEqual(ChildDeeplyAnnotatedMovie.__optional_keys__, + frozenset({"year"})) + def test_multiple_inheritance(self): class One(TypedDict): one: int diff --git a/Lib/test/test_xml_etree.py b/Lib/test/test_xml_etree.py index ca5bb562996b52..11efee00582e01 100644 --- a/Lib/test/test_xml_etree.py +++ b/Lib/test/test_xml_etree.py @@ -203,25 +203,6 @@ def serialize_check(self, elem, expected): def test_interface(self): # Test element tree interface. - def check_string(string): - len(string) - for char in string: - self.assertEqual(len(char), 1, - msg="expected one-character string, got %r" % char) - new_string = string + "" - new_string = string + " " - string[:0] - - def check_mapping(mapping): - len(mapping) - keys = mapping.keys() - items = mapping.items() - for key in keys: - item = mapping[key] - mapping["key"] = "value" - self.assertEqual(mapping["key"], "value", - msg="expected value string, got %r" % mapping["key"]) - def check_element(element): self.assertTrue(ET.iselement(element), msg="not an element") direlem = dir(element) @@ -231,12 +212,12 @@ def check_element(element): self.assertIn(attr, direlem, msg='no %s visible by dir' % attr) - check_string(element.tag) - check_mapping(element.attrib) + self.assertIsInstance(element.tag, str) + self.assertIsInstance(element.attrib, dict) if element.text is not None: - check_string(element.text) + self.assertIsInstance(element.text, str) if element.tail is not None: - check_string(element.tail) + self.assertIsInstance(element.tail, str) for elem in element: check_element(elem) diff --git a/Lib/test/test_zlib.py b/Lib/test/test_zlib.py index ae54f6c46891c6..3dac70eb12852c 100644 --- a/Lib/test/test_zlib.py +++ b/Lib/test/test_zlib.py @@ -944,13 +944,18 @@ def choose_lines(source, number, seed=None, generator=random): """ -class ZlibDecompressorTest(): +class ZlibDecompressorTest(unittest.TestCase): # Test adopted from test_bz2.py TEXT = HAMLET_SCENE DATA = zlib.compress(HAMLET_SCENE) BAD_DATA = b"Not a valid deflate block" + BIG_TEXT = DATA * ((128 * 1024 // len(DATA)) + 1) + BIG_DATA = zlib.compress(BIG_TEXT) + def test_Constructor(self): - self.assertRaises(TypeError, zlib._ZlibDecompressor, 42) + self.assertRaises(TypeError, zlib._ZlibDecompressor, "ASDA") + self.assertRaises(TypeError, zlib._ZlibDecompressor, -15, "notbytes") + self.assertRaises(TypeError, zlib._ZlibDecompressor, -15, b"bytes", 5) def testDecompress(self): zlibd = zlib._ZlibDecompressor() diff --git a/Makefile.pre.in b/Makefile.pre.in index 618bb7b5f7307d..7a84b953d97962 100644 --- a/Makefile.pre.in +++ b/Makefile.pre.in @@ -647,7 +647,7 @@ bolt-opt: @PREBOLT_RULE@ @LLVM_BOLT@ ./$(BUILDPYTHON) -instrument -instrumentation-file-append-pid -instrumentation-file=$(abspath $(BUILDPYTHON).bolt) -o $(BUILDPYTHON).bolt_inst ./$(BUILDPYTHON).bolt_inst $(PROFILE_TASK) || true @MERGE_FDATA@ $(BUILDPYTHON).*.fdata > $(BUILDPYTHON).fdata - @LLVM_BOLT@ ./$(BUILDPYTHON) -o $(BUILDPYTHON).bolt -data=$(BUILDPYTHON).fdata -update-debug-sections -reorder-blocks=ext-tsp -reorder-functions=hfsort+ -split-functions=3 -icf=1 -inline-all -split-eh -reorder-functions-use-hot-size -peepholes=all -jump-tables=aggressive -inline-ap -indirect-call-promotion=all -dyno-stats -use-gnu-stack -frame-opt=hot + @LLVM_BOLT@ ./$(BUILDPYTHON) -o $(BUILDPYTHON).bolt -data=$(BUILDPYTHON).fdata -update-debug-sections -reorder-blocks=ext-tsp -reorder-functions=hfsort+ -split-functions -icf=1 -inline-all -split-eh -reorder-functions-use-hot-size -peepholes=all -jump-tables=aggressive -inline-ap -indirect-call-promotion=all -dyno-stats -use-gnu-stack -frame-opt=hot rm -f *.fdata rm -f $(BUILDPYTHON).bolt_inst mv $(BUILDPYTHON).bolt $(BUILDPYTHON) @@ -1445,24 +1445,21 @@ regen-opcode-targets: .PHONY: regen-cases regen-cases: - # Regenerate Python/generated_cases.c.h from Python/bytecodes.c + # Regenerate Python/generated_cases.c.h + # and Python/opcode_metadata.h + # from Python/bytecodes.c # using Tools/cases_generator/generate_cases.py PYTHONPATH=$(srcdir)/Tools/cases_generator \ $(PYTHON_FOR_REGEN) \ $(srcdir)/Tools/cases_generator/generate_cases.py \ -i $(srcdir)/Python/bytecodes.c \ - -o $(srcdir)/Python/generated_cases.c.h.new + -o $(srcdir)/Python/generated_cases.c.h.new \ + -m $(srcdir)/Python/opcode_metadata.h.new $(UPDATE_FILE) $(srcdir)/Python/generated_cases.c.h $(srcdir)/Python/generated_cases.c.h.new - # Regenerate Python/opcode_metadata.h from Python/bytecodes.c - # using Tools/cases_generator/generate_cases.py --metadata - PYTHONPATH=$(srcdir)/Tools/cases_generator \ - $(PYTHON_FOR_REGEN) \ - $(srcdir)/Tools/cases_generator/generate_cases.py \ - --metadata \ - -i $(srcdir)/Python/bytecodes.c \ - -o $(srcdir)/Python/opcode_metadata.h.new $(UPDATE_FILE) $(srcdir)/Python/opcode_metadata.h $(srcdir)/Python/opcode_metadata.h.new +Python/compile.o: $(srcdir)/Python/opcode_metadata.h + Python/ceval.o: \ $(srcdir)/Python/ceval_macros.h \ $(srcdir)/Python/condvar.h \ @@ -1816,7 +1813,15 @@ commoninstall: check-clean-src @FRAMEWORKALTINSTALLFIRST@ \ # Install shared libraries enabled by Setup DESTDIRS= $(exec_prefix) $(LIBDIR) $(BINLIBDEST) $(DESTSHARED) -sharedinstall: $(DESTSHARED) all +sharedinstall: all + @for i in $(DESTDIRS); \ + do \ + if test ! -d $(DESTDIR)$$i; then \ + echo "Creating directory $$i"; \ + $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ + else true; \ + fi; \ + done @for i in X $(SHAREDMODS); do \ if test $$i != X; then \ echo $(INSTALL_SHARED) $$i $(DESTSHARED)/`basename $$i`; \ @@ -1828,17 +1833,6 @@ sharedinstall: $(DESTSHARED) all fi; \ done - -$(DESTSHARED): - @for i in $(DESTDIRS); \ - do \ - if test ! -d $(DESTDIR)$$i; then \ - echo "Creating directory $$i"; \ - $(INSTALL) -d -m $(DIRMODE) $(DESTDIR)$$i; \ - else true; \ - fi; \ - done - # Install the interpreter with $(VERSION) affixed # This goes into $(exec_prefix) altbininstall: $(BUILDPYTHON) @FRAMEWORKPYTHONW@ @@ -2612,7 +2606,7 @@ MODULE__HASHLIB_DEPS=$(srcdir)/Modules/hashlib.h MODULE__IO_DEPS=$(srcdir)/Modules/_io/_iomodule.h MODULE__MD5_DEPS=$(srcdir)/Modules/hashlib.h MODULE__SHA1_DEPS=$(srcdir)/Modules/hashlib.h -MODULE__SHA256_DEPS=$(srcdir)/Modules/hashlib.h +MODULE__SHA256_DEPS=$(srcdir)/Modules/hashlib.h $(srcdir)/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h $(srcdir)/Modules/_hacl/include/krml/lowstar_endianness.h $(srcdir)/Modules/_hacl/include/krml/internal/target.h $(srcdir)/Modules/_hacl/Hacl_Streaming_SHA2.h MODULE__SHA3_DEPS=$(srcdir)/Modules/_sha3/sha3.c $(srcdir)/Modules/_sha3/sha3.h $(srcdir)/Modules/hashlib.h MODULE__SHA512_DEPS=$(srcdir)/Modules/hashlib.h MODULE__SOCKET_DEPS=$(srcdir)/Modules/socketmodule.h $(srcdir)/Modules/addrinfo.h $(srcdir)/Modules/getaddrinfo.c $(srcdir)/Modules/getnameinfo.c diff --git a/Misc/ACKS b/Misc/ACKS index 74abcebe21ea60..e12cbea0ebd6ed 100644 --- a/Misc/ACKS +++ b/Misc/ACKS @@ -754,6 +754,7 @@ Tim Hochberg Benjamin Hodgson Joerg-Cyril Hoehle Douwe Hoekstra +Robert Hoelzl Gregor Hoffleit Chris Hoffman Tim Hoffmann @@ -1413,6 +1414,7 @@ Jean-François Piéronne Oleg Plakhotnyuk Anatoliy Platonov Marcel Plch +Kirill Podoprigora Remi Pointel Jon Poler Ariel Poliak diff --git a/Misc/NEWS.d/3.12.0a5.rst b/Misc/NEWS.d/3.12.0a5.rst new file mode 100644 index 00000000000000..f6f8de46cf70d9 --- /dev/null +++ b/Misc/NEWS.d/3.12.0a5.rst @@ -0,0 +1,664 @@ +.. date: 2022-11-08-12-06-52 +.. gh-issue: 99108 +.. nonce: 4Wrsuh +.. release date: 2023-02-07 +.. section: Security + +Replace the builtin :mod:`hashlib` implementations of SHA2-224 and SHA2-256 +originally from LibTomCrypt with formally verified, side-channel resistant +code from the `HACL* `_ project. +The builtins remain a fallback only used when OpenSSL does not provide them. + +.. + +.. date: 2023-02-06-20-13-36 +.. gh-issue: 92173 +.. nonce: RQE0mk +.. section: Core and Builtins + +Fix the ``defs`` and ``kwdefs`` arguments to :c:func:`PyEval_EvalCodeEx` and +a reference leak in that function. + +.. + +.. date: 2023-01-30-11-56-09 +.. gh-issue: 59956 +.. nonce: 7xqnC_ +.. section: Core and Builtins + +The GILState API is now partially compatible with subinterpreters. +Previously, ``PyThreadState_GET()`` and ``PyGILState_GetThisThreadState()`` +would get out of sync, causing inconsistent behavior and crashes. + +.. + +.. date: 2023-01-30-08-59-47 +.. gh-issue: 101400 +.. nonce: Di_ZFm +.. section: Core and Builtins + +Fix wrong lineno in exception message on :keyword:`continue` or +:keyword:`break` which are not in a loop. Patch by Dong-hee Na. + +.. + +.. date: 2023-01-28-20-31-42 +.. gh-issue: 101372 +.. nonce: 8BcpCC +.. section: Core and Builtins + +Fix :func:`~unicodedata.is_normalized` to properly handle the UCD 3.2.0 +cases. Patch by Dong-hee Na. + +.. + +.. date: 2023-01-28-13-11-52 +.. gh-issue: 101266 +.. nonce: AxV3OF +.. section: Core and Builtins + +Fix :func:`sys.getsizeof` reporting for :class:`int` subclasses. + +.. + +.. date: 2023-01-24-17-13-32 +.. gh-issue: 101291 +.. nonce: Yr6u_c +.. section: Core and Builtins + +Refactor the ``PyLongObject`` struct into a normal Python object header and +a ``PyLongValue`` struct. + +.. + +.. date: 2023-01-15-03-26-04 +.. gh-issue: 101046 +.. nonce: g2CM4S +.. section: Core and Builtins + +Fix a possible memory leak in the parser when raising :exc:`MemoryError`. +Patch by Pablo Galindo + +.. + +.. date: 2023-01-14-17-03-08 +.. gh-issue: 101037 +.. nonce: 9ATNuf +.. section: Core and Builtins + +Fix potential memory underallocation issue for instances of :class:`int` +subclasses with value zero. + +.. + +.. date: 2023-01-13-12-56-20 +.. gh-issue: 100762 +.. nonce: YvHaQJ +.. section: Core and Builtins + +Record the (virtual) exception block depth in the oparg of +:opcode:`YIELD_VALUE`. Use this to avoid the expensive ``throw()`` when +closing generators (and coroutines) that can be closed trivially. + +.. + +.. date: 2023-01-12-13-46-49 +.. gh-issue: 100982 +.. nonce: mJ234s +.. section: Core and Builtins + +Adds a new :opcode:`COMPARE_AND_BRANCH` instruction. This is a bit more +efficient when performing a comparison immediately followed by a branch, and +restores the design intent of PEP 659 that specializations are local to a +single instruction. + +.. + +.. date: 2023-01-11-22-52-19 +.. gh-issue: 100942 +.. nonce: ontOy_ +.. section: Core and Builtins + +Fixed segfault in property.getter/setter/deleter that occurred when a +property subclass overrode the ``__new__`` method to return a non-property +instance. + +.. + +.. date: 2023-01-10-16-59-33 +.. gh-issue: 100923 +.. nonce: ypJAX- +.. section: Core and Builtins + +Remove the ``mask`` cache entry for the :opcode:`COMPARE_OP` instruction and +embed the mask into the oparg. + +.. + +.. date: 2023-01-10-14-11-17 +.. gh-issue: 100892 +.. nonce: qfBVYI +.. section: Core and Builtins + +Fix race while iterating over thread states in clearing +:class:`threading.local`. Patch by Kumar Aditya. + +.. + +.. date: 2023-01-06-09-22-21 +.. gh-issue: 91351 +.. nonce: iq2vZ_ +.. section: Core and Builtins + +Fix a case where re-entrant imports could corrupt the import deadlock +detection code and cause a :exc:`KeyError` to be raised out of +:mod:`importlib/_bootstrap`. In addition to the straightforward cases, this +could also happen when garbage collection leads to a warning being emitted +-- as happens when it collects an open socket or file) + +.. + +.. date: 2023-01-03-20-59-20 +.. gh-issue: 100726 +.. nonce: W9huFl +.. section: Core and Builtins + +Optimize construction of ``range`` object for medium size integers. + +.. + +.. date: 2023-01-03-14-33-23 +.. gh-issue: 100712 +.. nonce: po6xyB +.. section: Core and Builtins + +Added option to build cpython with specialization disabled, by setting +``ENABLE_SPECIALIZATION=False`` in :mod:`opcode`, followed by ``make +regen-all``. + +.. + +.. bpo: 32780 +.. date: 2018-02-05-21-54-46 +.. nonce: Dtiz8z +.. section: Core and Builtins + +Inter-field padding is now inserted into the PEP3118 format strings obtained +from :class:`ctypes.Structure` objects, reflecting their true representation +in memory. + +.. + +.. date: 2023-02-05-14-39-49 +.. gh-issue: 101541 +.. nonce: Mo3ppp +.. section: Library + +[Enum] - fix psuedo-flag creation + +.. + +.. date: 2023-02-04-21-01-49 +.. gh-issue: 101570 +.. nonce: lbtUsD +.. section: Library + +Upgrade pip wheel bundled with ensurepip (pip 23.0) + +.. + +.. date: 2023-01-26-06-44-35 +.. gh-issue: 101323 +.. nonce: h8Hk11 +.. section: Library + +Fix a bug where errors where not thrown by zlib._ZlibDecompressor if +encountered during decompressing. + +.. + +.. date: 2023-01-26-01-25-56 +.. gh-issue: 101317 +.. nonce: vWaS1x +.. section: Library + +Add *ssl_shutdown_timeout* parameter for +:meth:`asyncio.StreamWriter.start_tls`. + +.. + +.. date: 2023-01-25-18-07-20 +.. gh-issue: 101326 +.. nonce: KL4SFv +.. section: Library + +Fix regression when passing ``None`` as second or third argument to +``FutureIter.throw``. + +.. + +.. date: 2023-01-24-12-53-59 +.. gh-issue: 92123 +.. nonce: jf6TO5 +.. section: Library + +Adapt the ``_elementtree`` extension module to multi-phase init +(:pep:`489`). Patches by Erlend E. Aasland. + +.. + +.. date: 2023-01-21-16-50-22 +.. gh-issue: 100795 +.. nonce: NPMZf7 +.. section: Library + +Avoid potential unexpected ``freeaddrinfo`` call (double free) in +:mod:`socket` when when a libc ``getaddrinfo()`` implementation leaves +garbage in an output pointer when returning an error. Original patch by +Sergey G. Brester. + +.. + +.. date: 2023-01-20-10-46-59 +.. gh-issue: 101143 +.. nonce: hJo8hu +.. section: Library + +Remove unused references to :class:`~asyncio.TimerHandle` in +``asyncio.base_events.BaseEventLoop._add_callback``. + +.. + +.. date: 2023-01-18-17-58-50 +.. gh-issue: 101144 +.. nonce: FHd8Un +.. section: Library + +Make :func:`zipfile.Path.open` and :func:`zipfile.Path.read_text` also +accept ``encoding`` as a positional argument. This was the behavior in +Python 3.9 and earlier. 3.10 introduced a regression where supplying it as +a positional argument would lead to a :exc:`TypeError`. + +.. + +.. date: 2023-01-15-09-11-30 +.. gh-issue: 94518 +.. nonce: jvxtxm +.. section: Library + +Group-related variables of ``_posixsubprocess`` module are renamed to stress +that supplimentary group affinity is added to a fork, not replace the +inherited ones. Patch by Oleg Iarygin. + +.. + +.. date: 2023-01-14-12-58-21 +.. gh-issue: 101015 +.. nonce: stWFid +.. section: Library + +Fix :func:`typing.get_type_hints` on ``'*tuple[...]'`` and ``*tuple[...]``. +It must not drop the ``Unpack`` part. + +.. + +.. date: 2023-01-12-21-22-20 +.. gh-issue: 101000 +.. nonce: wz4Xgc +.. section: Library + +Add :func:`os.path.splitroot()`, which splits a path into a 3-item tuple +``(drive, root, tail)``. This new function is used by :mod:`pathlib` to +improve the performance of path construction by up to a third. + +.. + +.. date: 2023-01-12-01-18-13 +.. gh-issue: 100573 +.. nonce: KDskqo +.. section: Library + +Fix a Windows :mod:`asyncio` bug with named pipes where a client doing +``os.stat()`` on the pipe would cause an error in the server that disabled +serving future requests. + +.. + +.. date: 2023-01-08-00-12-44 +.. gh-issue: 39615 +.. nonce: gn4PhB +.. section: Library + +:func:`warnings.warn` now has the ability to skip stack frames based on code +filename prefix rather than only a numeric ``stacklevel`` via the new +``skip_file_prefixes`` keyword argument. + +.. + +.. date: 2023-01-04-14-42-59 +.. gh-issue: 100750 +.. nonce: iFJs5Y +.. section: Library + +pass encoding kwarg to subprocess in platform + +.. + +.. date: 2022-12-21-17-49-50 +.. gh-issue: 100160 +.. nonce: N0NHRj +.. section: Library + +Emit a deprecation warning in +:meth:`asyncio.DefaultEventLoopPolicy.get_event_loop` if there is no current +event loop set and it decides to create one. + +.. + +.. date: 2022-12-19-23-19-26 +.. gh-issue: 96290 +.. nonce: qFjsi6 +.. section: Library + +Fix handling of partial and invalid UNC drives in ``ntpath.splitdrive()``, +and in ``ntpath.normpath()`` on non-Windows systems. Paths such as +'\\server' and '\\' are now considered by ``splitdrive()`` to contain only a +drive, and consequently are not modified by ``normpath()`` on non-Windows +systems. The behaviour of ``normpath()`` on Windows systems is unaffected, +as native OS APIs are used. Patch by Eryk Sun, with contributions by Barney +Gale. + +.. + +.. date: 2022-12-11-14-38-59 +.. gh-issue: 99952 +.. nonce: IYGLzr +.. section: Library + +Fix a reference undercounting issue in :class:`ctypes.Structure` with +``from_param()`` results larger than a C pointer. + +.. + +.. date: 2022-12-10-15-30-17 +.. gh-issue: 67790 +.. nonce: P9YUZM +.. section: Library + +Add float-style formatting support for :class:`fractions.Fraction` +instances. + +.. + +.. date: 2022-11-24-21-52-31 +.. gh-issue: 99266 +.. nonce: 88GcV9 +.. section: Library + +Preserve more detailed error messages in :mod:`ctypes`. + +.. + +.. date: 2022-11-15-23-30-39 +.. gh-issue: 86682 +.. nonce: gK9i1N +.. section: Library + +Ensure runtime-created collections have the correct module name using the +newly added (internal) :func:`sys._getframemodulename`. + +.. + +.. date: 2022-11-14-03-06-03 +.. gh-issue: 88597 +.. nonce: EYJA-Q +.. section: Library + +:mod:`uuid` now has a command line interface. Try ``python -m uuid -h``. + +.. + +.. date: 2022-09-26-21-18-47 +.. gh-issue: 60580 +.. nonce: 0hBgde +.. section: Library + +:data:`ctypes.wintypes.BYTE` definition changed from :data:`~ctypes.c_byte` +to :data:`~ctypes.c_ubyte` to match Windows SDK. Patch by Anatoly Techtonik +and Oleg Iarygin. + +.. + +.. date: 2022-07-22-13-38-37 +.. gh-issue: 94518 +.. nonce: _ZP0cz +.. section: Library + +``_posixsubprocess`` now initializes all UID and GID variables using a +reserved ``-1`` value instead of a separate flag. Patch by Oleg Iarygin. + +.. + +.. bpo: 38941 +.. date: 2022-02-05-12-01-58 +.. nonce: 8IhvyG +.. section: Library + +The :mod:`xml.etree.ElementTree` module now emits :exc:`DeprecationWarning` +when testing the truth value of an :class:`xml.etree.ElementTree.Element`. +Before, the Python implementation emitted :exc:`FutureWarning`, and the C +implementation emitted nothing. + +.. + +.. bpo: 40077 +.. date: 2020-11-20-21-06-08 +.. nonce: M-iZq3 +.. section: Library + +Convert :mod:`elementtree` types to heap types. Patch by Erlend E. Aasland. + +.. + +.. bpo: 29847 +.. date: 2020-04-18-17-45-03 +.. nonce: Uxtbq0 +.. section: Library + +Fix a bug where :class:`pathlib.Path` accepted and ignored keyword +arguments. Patch provided by Yurii Karabas. + +.. + +.. date: 2018-05-21-17-18-00 +.. gh-issue: 77772 +.. nonce: Fhg84L +.. section: Library + +:class:`ctypes.CDLL`, :class:`ctypes.OleDLL`, :class:`ctypes.WinDLL`, and +:class:`ctypes.PyDLL` now accept :term:`path-like objects ` as their ``name`` argument. Patch by Robert Hoelzl. + +.. + +.. date: 2022-06-19-22-04-47 +.. gh-issue: 88324 +.. nonce: GHhSQ1 +.. section: Documentation + +Reword :mod:`subprocess` to emphasize default behavior of *stdin*, *stdout*, +and *stderr* arguments. Remove inaccurate statement about child file handle +inheritance. + +.. + +.. date: 2023-02-04-17-24-33 +.. gh-issue: 101334 +.. nonce: _yOqwg +.. section: Tests + +``test_tarfile`` has been updated to pass when run as a high UID. + +.. + +.. date: 2023-02-04-06-59-07 +.. gh-issue: 101282 +.. nonce: 7sQz5l +.. section: Build + +Update BOLT configration not to use depreacted usage of ``--split +functions``. Patch by Dong-hee Na. + +.. + +.. date: 2023-02-02-23-43-46 +.. gh-issue: 101522 +.. nonce: lnUDta +.. section: Build + +Allow overriding Windows dependencies versions and paths using MSBuild +properties. + +.. + +.. date: 2023-01-26-19-02-11 +.. gh-issue: 77532 +.. nonce: cXD8bg +.. section: Build + +Minor fixes to allow building with ``PlatformToolset=ClangCL`` on Windows. + +.. + +.. date: 2023-01-21-10-31-35 +.. gh-issue: 101152 +.. nonce: xvM8pL +.. section: Build + +In accordance with :PEP:`699`, the ``ma_version_tag`` field in +:c:type:`PyDictObject` is deprecated for extension modules. Accessing this +field will generate a compiler warning at compile time. This field will be +removed in Python 3.14. + +.. + +.. date: 2023-01-17-21-32-51 +.. gh-issue: 100340 +.. nonce: i9zRGM +.. section: Build + +Allows -Wno-int-conversion for wasm-sdk 17 and onwards, thus enables +building WASI builds once against the latest sdk. + +.. + +.. date: 2023-01-15-11-22-15 +.. gh-issue: 101060 +.. nonce: 0mYk9E +.. section: Build + +Conditionally add ``-fno-reorder-blocks-and-partition`` in configure. +Effectively fixes ``--enable-bolt`` when using Clang, as this appears to be +a GCC-only flag. + +.. + +.. date: 2022-10-27-09-57-12 +.. gh-issue: 98705 +.. nonce: H11XmR +.. section: Build + +``__bool__`` is defined in AIX system header files which breaks the build in +AIX, so undefine it. + +.. + +.. date: 2022-10-25-11-53-55 +.. gh-issue: 98636 +.. nonce: e0RPAr +.. section: Build + +Fix a regression in detecting ``gdbm_compat`` library for the ``_gdbm`` +module build. + +.. + +.. date: 2022-08-30-10-16-31 +.. gh-issue: 96305 +.. nonce: 274i8B +.. section: Build + +``_aix_support`` now uses a simple code to get platform details rather than +the now non-existent ``_bootsubprocess`` during bootstrap. + +.. + +.. date: 2023-02-03-17-53-06 +.. gh-issue: 101543 +.. nonce: cORAT4 +.. section: Windows + +Ensure the install path in the registry is only used when the standard +library hasn't been located in any other way. + +.. + +.. date: 2023-01-31-16-50-07 +.. gh-issue: 101467 +.. nonce: ye9t-L +.. section: Windows + +The ``py.exe`` launcher now correctly filters when only a single runtime is +installed. It also correctly handles prefix matches on tags so that ``-3.1`` +does not match ``3.11``, but would still match ``3.1-32``. + +.. + +.. date: 2023-01-25-00-23-31 +.. gh-issue: 99834 +.. nonce: WN41lc +.. section: Windows + +Updates bundled copy of Tcl/Tk to 8.6.13.0 + +.. + +.. date: 2023-01-18-18-25-18 +.. gh-issue: 101135 +.. nonce: HF9VlG +.. section: Windows + +Restore ability to launch older 32-bit versions from the :file:`py.exe` +launcher when both 32-bit and 64-bit installs of the same version are +available. + +.. + +.. date: 2023-01-17-18-17-58 +.. gh-issue: 82052 +.. nonce: mWyysT +.. section: Windows + +Fixed an issue where writing more than 32K of Unicode output to the console +screen in one go can result in mojibake. + +.. + +.. date: 2023-01-11-16-28-09 +.. gh-issue: 100320 +.. nonce: 2DU2it +.. section: Windows + +Ensures the ``PythonPath`` registry key from an install is used when +launching from a different copy of Python that relies on an existing install +to provide a copy of its modules and standard library. + +.. + +.. date: 2023-01-11-14-42-11 +.. gh-issue: 100247 +.. nonce: YfEmSz +.. section: Windows + +Restores support for the :file:`py.exe` launcher finding shebang commands in +its configuration file using the full command name. diff --git a/Misc/NEWS.d/next/Build/2022-10-25-11-53-55.gh-issue-98636.e0RPAr.rst b/Misc/NEWS.d/next/Build/2022-10-25-11-53-55.gh-issue-98636.e0RPAr.rst deleted file mode 100644 index 26a7cc8acaf243..00000000000000 --- a/Misc/NEWS.d/next/Build/2022-10-25-11-53-55.gh-issue-98636.e0RPAr.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fix a regression in detecting ``gdbm_compat`` library for the ``_gdbm`` -module build. diff --git a/Misc/NEWS.d/next/Build/2022-12-18-08-33-28.gh-issue-100221.K94Ct3.rst b/Misc/NEWS.d/next/Build/2022-12-18-08-33-28.gh-issue-100221.K94Ct3.rst new file mode 100644 index 00000000000000..27c948330cfc17 --- /dev/null +++ b/Misc/NEWS.d/next/Build/2022-12-18-08-33-28.gh-issue-100221.K94Ct3.rst @@ -0,0 +1,2 @@ +Fix creating install directories in ``make sharedinstall`` if they exist +outside ``DESTDIR`` already. diff --git a/Misc/NEWS.d/next/Build/2023-01-15-11-22-15.gh-issue-101060.0mYk9E.rst b/Misc/NEWS.d/next/Build/2023-01-15-11-22-15.gh-issue-101060.0mYk9E.rst deleted file mode 100644 index bebbf8c898d547..00000000000000 --- a/Misc/NEWS.d/next/Build/2023-01-15-11-22-15.gh-issue-101060.0mYk9E.rst +++ /dev/null @@ -1,3 +0,0 @@ -Conditionally add ``-fno-reorder-blocks-and-partition`` in configure. -Effectively fixes ``--enable-bolt`` when using Clang, as this appears to be -a GCC-only flag. diff --git a/Misc/NEWS.d/next/Build/2023-01-17-21-32-51.gh-issue-100340.i9zRGM.rst b/Misc/NEWS.d/next/Build/2023-01-17-21-32-51.gh-issue-100340.i9zRGM.rst deleted file mode 100644 index 3a37f798dc6c6d..00000000000000 --- a/Misc/NEWS.d/next/Build/2023-01-17-21-32-51.gh-issue-100340.i9zRGM.rst +++ /dev/null @@ -1,2 +0,0 @@ -Allows -Wno-int-conversion for wasm-sdk 17 and onwards, thus enables -building WASI builds once against the latest sdk. diff --git a/Misc/NEWS.d/next/Build/2023-01-21-10-31-35.gh-issue-101152.xvM8pL.rst b/Misc/NEWS.d/next/Build/2023-01-21-10-31-35.gh-issue-101152.xvM8pL.rst deleted file mode 100644 index e35b6178aa4cf9..00000000000000 --- a/Misc/NEWS.d/next/Build/2023-01-21-10-31-35.gh-issue-101152.xvM8pL.rst +++ /dev/null @@ -1,3 +0,0 @@ -In accordance with :PEP:`699`, the ``ma_version_tag`` field in :c:type:`PyDictObject` -is deprecated for extension modules. Accessing this field will generate a compiler -warning at compile time. This field will be removed in Python 3.14. diff --git a/Misc/NEWS.d/next/Build/2023-01-26-19-02-11.gh-issue-77532.cXD8bg.rst b/Misc/NEWS.d/next/Build/2023-01-26-19-02-11.gh-issue-77532.cXD8bg.rst deleted file mode 100644 index 5a746dca2e7d8d..00000000000000 --- a/Misc/NEWS.d/next/Build/2023-01-26-19-02-11.gh-issue-77532.cXD8bg.rst +++ /dev/null @@ -1 +0,0 @@ -Minor fixes to allow building with ``PlatformToolset=ClangCL`` on Windows. diff --git a/Misc/NEWS.d/next/C API/2023-02-06-16-14-30.gh-issue-101578.PW5fA9.rst b/Misc/NEWS.d/next/C API/2023-02-06-16-14-30.gh-issue-101578.PW5fA9.rst new file mode 100644 index 00000000000000..fc694f6e051b53 --- /dev/null +++ b/Misc/NEWS.d/next/C API/2023-02-06-16-14-30.gh-issue-101578.PW5fA9.rst @@ -0,0 +1,13 @@ +Add new C-API functions for saving and restoring the current exception: +``PyErr_GetRaisedException`` and ``PyErr_SetRaisedException``. +These functions take and return a single exception rather than +the triple of ``PyErr_Fetch`` and ``PyErr_Restore``. +This is less error prone and a bit more efficient. + +The three arguments forms of saving and restoring the +current exception: ``PyErr_Fetch`` and ``PyErr_Restore`` +are deprecated. + +Also add ``PyException_GetArgs`` and ``PyException_SetArgs`` +as convenience functions to help dealing with +exceptions in the C API. diff --git a/Misc/NEWS.d/next/C API/2023-02-09-10-38-20.gh-issue-99293.mFqfpp.rst b/Misc/NEWS.d/next/C API/2023-02-09-10-38-20.gh-issue-99293.mFqfpp.rst new file mode 100644 index 00000000000000..8c0f05543747dc --- /dev/null +++ b/Misc/NEWS.d/next/C API/2023-02-09-10-38-20.gh-issue-99293.mFqfpp.rst @@ -0,0 +1,2 @@ +Document that the Py_TPFLAGS_VALID_VERSION_TAG is an internal feature, +should not be used, and will be removed. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-03-14-33-23.gh-issue-100712.po6xyB.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-03-14-33-23.gh-issue-100712.po6xyB.rst deleted file mode 100644 index 3ebee0dd2aa48f..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-03-14-33-23.gh-issue-100712.po6xyB.rst +++ /dev/null @@ -1 +0,0 @@ -Added option to build cpython with specialization disabled, by setting ``ENABLE_SPECIALIZATION=False`` in :mod:`opcode`, followed by ``make regen-all``. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-03-20-59-20.gh-issue-100726.W9huFl.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-03-20-59-20.gh-issue-100726.W9huFl.rst deleted file mode 100644 index 2c93098b347a7f..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-03-20-59-20.gh-issue-100726.W9huFl.rst +++ /dev/null @@ -1 +0,0 @@ -Optimize construction of ``range`` object for medium size integers. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-06-09-22-21.gh-issue-91351.iq2vZ_.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-06-09-22-21.gh-issue-91351.iq2vZ_.rst deleted file mode 100644 index 19de1f8d0fb31e..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-06-09-22-21.gh-issue-91351.iq2vZ_.rst +++ /dev/null @@ -1,5 +0,0 @@ -Fix a case where re-entrant imports could corrupt the import deadlock -detection code and cause a :exc:`KeyError` to be raised out of -:mod:`importlib/_bootstrap`. In addition to the straightforward cases, this -could also happen when garbage collection leads to a warning being emitted -- -as happens when it collects an open socket or file) diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-10-14-11-17.gh-issue-100892.qfBVYI.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-10-14-11-17.gh-issue-100892.qfBVYI.rst deleted file mode 100644 index f2576becc2fcfc..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-10-14-11-17.gh-issue-100892.qfBVYI.rst +++ /dev/null @@ -1 +0,0 @@ -Fix race while iterating over thread states in clearing :class:`threading.local`. Patch by Kumar Aditya. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-10-16-59-33.gh-issue-100923.ypJAX-.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-10-16-59-33.gh-issue-100923.ypJAX-.rst deleted file mode 100644 index b6b3f1d0c58f8e..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-10-16-59-33.gh-issue-100923.ypJAX-.rst +++ /dev/null @@ -1,2 +0,0 @@ -Remove the ``mask`` cache entry for the :opcode:`COMPARE_OP` instruction and -embed the mask into the oparg. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-11-22-52-19.gh-issue-100942.ontOy_.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-11-22-52-19.gh-issue-100942.ontOy_.rst deleted file mode 100644 index daccea255b1626..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-11-22-52-19.gh-issue-100942.ontOy_.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fixed segfault in property.getter/setter/deleter that occurred when a property -subclass overrode the ``__new__`` method to return a non-property instance. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-12-13-46-49.gh-issue-100982.mJ234s.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-12-13-46-49.gh-issue-100982.mJ234s.rst deleted file mode 100644 index 4f43e783cd6a19..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-12-13-46-49.gh-issue-100982.mJ234s.rst +++ /dev/null @@ -1,4 +0,0 @@ -Adds a new :opcode:`COMPARE_AND_BRANCH` instruction. This is a bit more -efficient when performing a comparison immediately followed by a branch, and -restores the design intent of PEP 659 that specializations are local to a -single instruction. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-13-12-56-20.gh-issue-100762.YvHaQJ.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-13-12-56-20.gh-issue-100762.YvHaQJ.rst deleted file mode 100644 index 2f6b121439a985..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-13-12-56-20.gh-issue-100762.YvHaQJ.rst +++ /dev/null @@ -1,3 +0,0 @@ -Record the (virtual) exception block depth in the oparg of -:opcode:`YIELD_VALUE`. Use this to avoid the expensive ``throw()`` when -closing generators (and coroutines) that can be closed trivially. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-14-17-03-08.gh-issue-101037.9ATNuf.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-14-17-03-08.gh-issue-101037.9ATNuf.rst deleted file mode 100644 index a48756657a29d3..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-14-17-03-08.gh-issue-101037.9ATNuf.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fix potential memory underallocation issue for instances of :class:`int` -subclasses with value zero. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-15-03-26-04.gh-issue-101046.g2CM4S.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-15-03-26-04.gh-issue-101046.g2CM4S.rst deleted file mode 100644 index f600473620f109..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-15-03-26-04.gh-issue-101046.g2CM4S.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fix a possible memory leak in the parser when raising :exc:`MemoryError`. -Patch by Pablo Galindo diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-24-17-13-32.gh-issue-101291.Yr6u_c.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-24-17-13-32.gh-issue-101291.Yr6u_c.rst deleted file mode 100644 index b585ff5a817edf..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-24-17-13-32.gh-issue-101291.Yr6u_c.rst +++ /dev/null @@ -1,2 +0,0 @@ -Refactor the ``PyLongObject`` struct into a normal Python object header and -a ``PyLongValue`` struct. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-01-30-08-59-47.gh-issue-101400.Di_ZFm.rst b/Misc/NEWS.d/next/Core and Builtins/2023-01-30-08-59-47.gh-issue-101400.Di_ZFm.rst deleted file mode 100644 index f3dd783c01e7c0..00000000000000 --- a/Misc/NEWS.d/next/Core and Builtins/2023-01-30-08-59-47.gh-issue-101400.Di_ZFm.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fix wrong lineno in exception message on :keyword:`continue` or -:keyword:`break` which are not in a loop. Patch by Dong-hee Na. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-02-07-14-56-43.gh-issue-101632.Fd1yxk.rst b/Misc/NEWS.d/next/Core and Builtins/2023-02-07-14-56-43.gh-issue-101632.Fd1yxk.rst new file mode 100644 index 00000000000000..136909ca699903 --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2023-02-07-14-56-43.gh-issue-101632.Fd1yxk.rst @@ -0,0 +1 @@ +Adds a new :opcode:`RETURN_CONST` instruction. diff --git a/Misc/NEWS.d/next/Core and Builtins/2023-02-08-17-13-31.gh-issue-101696.seJhTt.rst b/Misc/NEWS.d/next/Core and Builtins/2023-02-08-17-13-31.gh-issue-101696.seJhTt.rst new file mode 100644 index 00000000000000..ff2bbb4b564252 --- /dev/null +++ b/Misc/NEWS.d/next/Core and Builtins/2023-02-08-17-13-31.gh-issue-101696.seJhTt.rst @@ -0,0 +1 @@ +Invalidate type version tag in ``_PyStaticType_Dealloc`` for static types, avoiding bug where a false cache hit could crash the interpreter. Patch by Kumar Aditya. diff --git a/Misc/NEWS.d/next/Documentation/2022-06-19-22-04-47.gh-issue-88324.GHhSQ1.rst b/Misc/NEWS.d/next/Documentation/2022-06-19-22-04-47.gh-issue-88324.GHhSQ1.rst deleted file mode 100644 index 6c8d192daa7955..00000000000000 --- a/Misc/NEWS.d/next/Documentation/2022-06-19-22-04-47.gh-issue-88324.GHhSQ1.rst +++ /dev/null @@ -1,3 +0,0 @@ -Reword :mod:`subprocess` to emphasize default behavior of *stdin*, *stdout*, -and *stderr* arguments. Remove inaccurate statement about child file handle -inheritance. diff --git a/Misc/NEWS.d/next/Documentation/2023-02-07-21-43-24.gh-issue-97725.cuY7Cd.rst b/Misc/NEWS.d/next/Documentation/2023-02-07-21-43-24.gh-issue-97725.cuY7Cd.rst new file mode 100644 index 00000000000000..fd9ea049c23968 --- /dev/null +++ b/Misc/NEWS.d/next/Documentation/2023-02-07-21-43-24.gh-issue-97725.cuY7Cd.rst @@ -0,0 +1,2 @@ +Fix :meth:`asyncio.Task.print_stack` description for ``file=None``. +Patch by Oleg Iarygin. diff --git a/Misc/NEWS.d/next/Library/2020-04-18-17-45-03.bpo-29847.Uxtbq0.rst b/Misc/NEWS.d/next/Library/2020-04-18-17-45-03.bpo-29847.Uxtbq0.rst deleted file mode 100644 index 010d775a0d98ee..00000000000000 --- a/Misc/NEWS.d/next/Library/2020-04-18-17-45-03.bpo-29847.Uxtbq0.rst +++ /dev/null @@ -1 +0,0 @@ -Fix a bug where :class:`pathlib.Path` accepted and ignored keyword arguments. Patch provided by Yurii Karabas. diff --git a/Misc/NEWS.d/next/Library/2020-11-20-21-06-08.bpo-40077.M-iZq3.rst b/Misc/NEWS.d/next/Library/2020-11-20-21-06-08.bpo-40077.M-iZq3.rst deleted file mode 100644 index 8a74477a4b359d..00000000000000 --- a/Misc/NEWS.d/next/Library/2020-11-20-21-06-08.bpo-40077.M-iZq3.rst +++ /dev/null @@ -1 +0,0 @@ -Convert :mod:`elementtree` types to heap types. Patch by Erlend E. Aasland. diff --git a/Misc/NEWS.d/next/Library/2022-02-05-12-01-58.bpo-38941.8IhvyG.rst b/Misc/NEWS.d/next/Library/2022-02-05-12-01-58.bpo-38941.8IhvyG.rst deleted file mode 100644 index 5f996042260d09..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-02-05-12-01-58.bpo-38941.8IhvyG.rst +++ /dev/null @@ -1,4 +0,0 @@ -The :mod:`xml.etree.ElementTree` module now emits :exc:`DeprecationWarning` -when testing the truth value of an :class:`xml.etree.ElementTree.Element`. -Before, the Python implementation emitted :exc:`FutureWarning`, and the C -implementation emitted nothing. diff --git a/Misc/NEWS.d/next/Library/2022-07-22-13-38-37.gh-issue-94518._ZP0cz.rst b/Misc/NEWS.d/next/Library/2022-07-22-13-38-37.gh-issue-94518._ZP0cz.rst deleted file mode 100644 index a9d6d69f7effac..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-07-22-13-38-37.gh-issue-94518._ZP0cz.rst +++ /dev/null @@ -1,2 +0,0 @@ -``_posixsubprocess`` now initializes all UID and GID variables using a -reserved ``-1`` value instead of a separate flag. Patch by Oleg Iarygin. diff --git a/Misc/NEWS.d/next/Library/2022-09-26-21-18-47.gh-issue-60580.0hBgde.rst b/Misc/NEWS.d/next/Library/2022-09-26-21-18-47.gh-issue-60580.0hBgde.rst deleted file mode 100644 index 630e56cd2f7b87..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-09-26-21-18-47.gh-issue-60580.0hBgde.rst +++ /dev/null @@ -1,3 +0,0 @@ -:data:`ctypes.wintypes.BYTE` definition changed from -:data:`~ctypes.c_byte` to :data:`~ctypes.c_ubyte` to match Windows -SDK. Patch by Anatoly Techtonik and Oleg Iarygin. diff --git a/Misc/NEWS.d/next/Library/2022-11-14-03-06-03.gh-issue-88597.EYJA-Q.rst b/Misc/NEWS.d/next/Library/2022-11-14-03-06-03.gh-issue-88597.EYJA-Q.rst deleted file mode 100644 index a98e1ab4d15734..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-11-14-03-06-03.gh-issue-88597.EYJA-Q.rst +++ /dev/null @@ -1 +0,0 @@ -:mod:`uuid` now has a command line interface. Try ``python -m uuid -h``. diff --git a/Misc/NEWS.d/next/Library/2022-11-15-23-30-39.gh-issue-86682.gK9i1N.rst b/Misc/NEWS.d/next/Library/2022-11-15-23-30-39.gh-issue-86682.gK9i1N.rst deleted file mode 100644 index 64ef42a9a1c0b2..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-11-15-23-30-39.gh-issue-86682.gK9i1N.rst +++ /dev/null @@ -1,2 +0,0 @@ -Ensure runtime-created collections have the correct module name using -the newly added (internal) :func:`sys._getframemodulename`. diff --git a/Misc/NEWS.d/next/Library/2022-11-24-21-52-31.gh-issue-99266.88GcV9.rst b/Misc/NEWS.d/next/Library/2022-11-24-21-52-31.gh-issue-99266.88GcV9.rst deleted file mode 100644 index 97e9569e40a9bf..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-11-24-21-52-31.gh-issue-99266.88GcV9.rst +++ /dev/null @@ -1 +0,0 @@ -Preserve more detailed error messages in :mod:`ctypes`. diff --git a/Misc/NEWS.d/next/Library/2022-12-10-15-30-17.gh-issue-67790.P9YUZM.rst b/Misc/NEWS.d/next/Library/2022-12-10-15-30-17.gh-issue-67790.P9YUZM.rst deleted file mode 100644 index ba0db774f8b318..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-12-10-15-30-17.gh-issue-67790.P9YUZM.rst +++ /dev/null @@ -1,2 +0,0 @@ -Add float-style formatting support for :class:`fractions.Fraction` -instances. diff --git a/Misc/NEWS.d/next/Library/2022-12-11-14-38-59.gh-issue-99952.IYGLzr.rst b/Misc/NEWS.d/next/Library/2022-12-11-14-38-59.gh-issue-99952.IYGLzr.rst deleted file mode 100644 index 09ec961249534f..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-12-11-14-38-59.gh-issue-99952.IYGLzr.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fix a reference undercounting issue in :class:`ctypes.Structure` with ``from_param()`` -results larger than a C pointer. diff --git a/Misc/NEWS.d/next/Library/2022-12-19-23-19-26.gh-issue-96290.qFjsi6.rst b/Misc/NEWS.d/next/Library/2022-12-19-23-19-26.gh-issue-96290.qFjsi6.rst deleted file mode 100644 index 33f98602bd1b71..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-12-19-23-19-26.gh-issue-96290.qFjsi6.rst +++ /dev/null @@ -1,5 +0,0 @@ -Fix handling of partial and invalid UNC drives in ``ntpath.splitdrive()``, and in -``ntpath.normpath()`` on non-Windows systems. Paths such as '\\server' and '\\' are now considered -by ``splitdrive()`` to contain only a drive, and consequently are not modified by ``normpath()`` on -non-Windows systems. The behaviour of ``normpath()`` on Windows systems is unaffected, as native -OS APIs are used. Patch by Eryk Sun, with contributions by Barney Gale. diff --git a/Misc/NEWS.d/next/Library/2022-12-21-17-49-50.gh-issue-100160.N0NHRj.rst b/Misc/NEWS.d/next/Library/2022-12-21-17-49-50.gh-issue-100160.N0NHRj.rst deleted file mode 100644 index d5cc785722d7fd..00000000000000 --- a/Misc/NEWS.d/next/Library/2022-12-21-17-49-50.gh-issue-100160.N0NHRj.rst +++ /dev/null @@ -1,3 +0,0 @@ -Emit a deprecation warning in -:meth:`asyncio.DefaultEventLoopPolicy.get_event_loop` if there is no current -event loop set and it decides to create one. diff --git a/Misc/NEWS.d/next/Library/2023-01-04-14-42-59.gh-issue-100750.iFJs5Y.rst b/Misc/NEWS.d/next/Library/2023-01-04-14-42-59.gh-issue-100750.iFJs5Y.rst deleted file mode 100644 index be351532822c4b..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-04-14-42-59.gh-issue-100750.iFJs5Y.rst +++ /dev/null @@ -1 +0,0 @@ -pass encoding kwarg to subprocess in platform diff --git a/Misc/NEWS.d/next/Library/2023-01-08-00-12-44.gh-issue-39615.gn4PhB.rst b/Misc/NEWS.d/next/Library/2023-01-08-00-12-44.gh-issue-39615.gn4PhB.rst deleted file mode 100644 index 1d04cc2cd54b1e..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-08-00-12-44.gh-issue-39615.gn4PhB.rst +++ /dev/null @@ -1,3 +0,0 @@ -:func:`warnings.warn` now has the ability to skip stack frames based on code -filename prefix rather than only a numeric ``stacklevel`` via the new -``skip_file_prefixes`` keyword argument. diff --git a/Misc/NEWS.d/next/Library/2023-01-12-01-18-13.gh-issue-100573.KDskqo.rst b/Misc/NEWS.d/next/Library/2023-01-12-01-18-13.gh-issue-100573.KDskqo.rst deleted file mode 100644 index 97b95d18d1e426..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-12-01-18-13.gh-issue-100573.KDskqo.rst +++ /dev/null @@ -1 +0,0 @@ -Fix a Windows :mod:`asyncio` bug with named pipes where a client doing ``os.stat()`` on the pipe would cause an error in the server that disabled serving future requests. diff --git a/Misc/NEWS.d/next/Library/2023-01-12-21-22-20.gh-issue-101000.wz4Xgc.rst b/Misc/NEWS.d/next/Library/2023-01-12-21-22-20.gh-issue-101000.wz4Xgc.rst deleted file mode 100644 index 2082361c41d697..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-12-21-22-20.gh-issue-101000.wz4Xgc.rst +++ /dev/null @@ -1,3 +0,0 @@ -Add :func:`os.path.splitroot()`, which splits a path into a 3-item tuple -``(drive, root, tail)``. This new function is used by :mod:`pathlib` to -improve the performance of path construction by up to a third. diff --git a/Misc/NEWS.d/next/Library/2023-01-14-12-58-21.gh-issue-101015.stWFid.rst b/Misc/NEWS.d/next/Library/2023-01-14-12-58-21.gh-issue-101015.stWFid.rst deleted file mode 100644 index b9d73ff9855236..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-14-12-58-21.gh-issue-101015.stWFid.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fix :func:`typing.get_type_hints` on ``'*tuple[...]'`` and ``*tuple[...]``. -It must not drop the ``Unpack`` part. diff --git a/Misc/NEWS.d/next/Library/2023-01-15-09-11-30.gh-issue-94518.jvxtxm.rst b/Misc/NEWS.d/next/Library/2023-01-15-09-11-30.gh-issue-94518.jvxtxm.rst deleted file mode 100644 index 77563090464dbc..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-15-09-11-30.gh-issue-94518.jvxtxm.rst +++ /dev/null @@ -1,3 +0,0 @@ -Group-related variables of ``_posixsubprocess`` module are renamed to -stress that supplimentary group affinity is added to a fork, not -replace the inherited ones. Patch by Oleg Iarygin. diff --git a/Misc/NEWS.d/next/Library/2023-01-16-10-42-58.gh-issue-89381.lM2WL0.rst b/Misc/NEWS.d/next/Library/2023-01-16-10-42-58.gh-issue-89381.lM2WL0.rst deleted file mode 100644 index 7bffe7d226e38a..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-16-10-42-58.gh-issue-89381.lM2WL0.rst +++ /dev/null @@ -1 +0,0 @@ -:func:`~math.log` and :func:`~cmath.log` support default base=None values. diff --git a/Misc/NEWS.d/next/Library/2023-01-18-17-58-50.gh-issue-101144.FHd8Un.rst b/Misc/NEWS.d/next/Library/2023-01-18-17-58-50.gh-issue-101144.FHd8Un.rst deleted file mode 100644 index 297652259949fc..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-18-17-58-50.gh-issue-101144.FHd8Un.rst +++ /dev/null @@ -1,4 +0,0 @@ -Make :func:`zipfile.Path.open` and :func:`zipfile.Path.read_text` also accept -``encoding`` as a positional argument. This was the behavior in Python 3.9 and -earlier. 3.10 introduced a regression where supplying it as a positional -argument would lead to a :exc:`TypeError`. diff --git a/Misc/NEWS.d/next/Library/2023-01-20-10-46-59.gh-issue-101143.hJo8hu.rst b/Misc/NEWS.d/next/Library/2023-01-20-10-46-59.gh-issue-101143.hJo8hu.rst deleted file mode 100644 index d14b9e25a691fc..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-20-10-46-59.gh-issue-101143.hJo8hu.rst +++ /dev/null @@ -1,2 +0,0 @@ -Remove unused references to :class:`~asyncio.TimerHandle` in -``asyncio.base_events.BaseEventLoop._add_callback``. diff --git a/Misc/NEWS.d/next/Library/2023-01-21-16-50-22.gh-issue-100795.NPMZf7.rst b/Misc/NEWS.d/next/Library/2023-01-21-16-50-22.gh-issue-100795.NPMZf7.rst deleted file mode 100644 index 4cb56ea0f0e58d..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-21-16-50-22.gh-issue-100795.NPMZf7.rst +++ /dev/null @@ -1,3 +0,0 @@ -Avoid potential unexpected ``freeaddrinfo`` call (double free) in :mod:`socket` -when when a libc ``getaddrinfo()`` implementation leaves garbage in an output -pointer when returning an error. Original patch by Sergey G. Brester. diff --git a/Misc/NEWS.d/next/Library/2023-01-24-12-53-59.gh-issue-92123.jf6TO5.rst b/Misc/NEWS.d/next/Library/2023-01-24-12-53-59.gh-issue-92123.jf6TO5.rst deleted file mode 100644 index 4b4443a55fdb1a..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-24-12-53-59.gh-issue-92123.jf6TO5.rst +++ /dev/null @@ -1,2 +0,0 @@ -Adapt the ``_elementtree`` extension module to multi-phase init (:pep:`489`). -Patches by Erlend E. Aasland. diff --git a/Misc/NEWS.d/next/Library/2023-01-25-18-07-20.gh-issue-101326.KL4SFv.rst b/Misc/NEWS.d/next/Library/2023-01-25-18-07-20.gh-issue-101326.KL4SFv.rst deleted file mode 100644 index 54b69b9430910d..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-25-18-07-20.gh-issue-101326.KL4SFv.rst +++ /dev/null @@ -1 +0,0 @@ -Fix regression when passing ``None`` as second or third argument to ``FutureIter.throw``. diff --git a/Misc/NEWS.d/next/Library/2023-01-26-01-25-56.gh-issue-101317.vWaS1x.rst b/Misc/NEWS.d/next/Library/2023-01-26-01-25-56.gh-issue-101317.vWaS1x.rst deleted file mode 100644 index f1ce0e0a527661..00000000000000 --- a/Misc/NEWS.d/next/Library/2023-01-26-01-25-56.gh-issue-101317.vWaS1x.rst +++ /dev/null @@ -1,2 +0,0 @@ -Add *ssl_shutdown_timeout* parameter for :meth:`asyncio.StreamWriter.start_tls`. - diff --git a/Misc/NEWS.d/next/Library/2023-02-05-21-40-15.gh-issue-85984.Kfzbb2.rst b/Misc/NEWS.d/next/Library/2023-02-05-21-40-15.gh-issue-85984.Kfzbb2.rst new file mode 100644 index 00000000000000..c91829f2c739af --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-02-05-21-40-15.gh-issue-85984.Kfzbb2.rst @@ -0,0 +1,4 @@ +Refactored the implementation of :func:`pty.fork` to use :func:`os.login_tty`. + +A :exc:`DeprecationWarning` is now raised by ``pty.master_open()`` and ``pty.slave_open()``. They were +undocumented and deprecated long long ago in the docstring in favor of :func:`pty.openpty`. diff --git a/Misc/NEWS.d/next/Library/2023-02-07-22-21-46.gh-issue-101446.-c0FdK.rst b/Misc/NEWS.d/next/Library/2023-02-07-22-21-46.gh-issue-101446.-c0FdK.rst new file mode 100644 index 00000000000000..ddf897b71bb1d1 --- /dev/null +++ b/Misc/NEWS.d/next/Library/2023-02-07-22-21-46.gh-issue-101446.-c0FdK.rst @@ -0,0 +1,2 @@ +Change repr of :class:`collections.OrderedDict` to use regular dictionary +formating instead of pairs of keys and values. diff --git a/Misc/NEWS.d/next/Security/2023-01-24-16-12-00.gh-issue-101283.9tqu39.rst b/Misc/NEWS.d/next/Security/2023-01-24-16-12-00.gh-issue-101283.9tqu39.rst new file mode 100644 index 00000000000000..0efdfa10234185 --- /dev/null +++ b/Misc/NEWS.d/next/Security/2023-01-24-16-12-00.gh-issue-101283.9tqu39.rst @@ -0,0 +1,3 @@ +:class:`subprocess.Popen` now uses a safer approach to find +``cmd.exe`` when launching with ``shell=True``. Patch by Eryk Sun, +based on a patch by Oleg Iarygin. diff --git a/Misc/NEWS.d/next/Windows/2023-01-11-14-42-11.gh-issue-100247.YfEmSz.rst b/Misc/NEWS.d/next/Windows/2023-01-11-14-42-11.gh-issue-100247.YfEmSz.rst deleted file mode 100644 index 7bfcbd7ddecf5f..00000000000000 --- a/Misc/NEWS.d/next/Windows/2023-01-11-14-42-11.gh-issue-100247.YfEmSz.rst +++ /dev/null @@ -1,2 +0,0 @@ -Restores support for the :file:`py.exe` launcher finding shebang commands in -its configuration file using the full command name. diff --git a/Misc/NEWS.d/next/Windows/2023-01-11-16-28-09.gh-issue-100320.2DU2it.rst b/Misc/NEWS.d/next/Windows/2023-01-11-16-28-09.gh-issue-100320.2DU2it.rst deleted file mode 100644 index c206fc8520a5d9..00000000000000 --- a/Misc/NEWS.d/next/Windows/2023-01-11-16-28-09.gh-issue-100320.2DU2it.rst +++ /dev/null @@ -1,3 +0,0 @@ -Ensures the ``PythonPath`` registry key from an install is used when -launching from a different copy of Python that relies on an existing install -to provide a copy of its modules and standard library. diff --git a/Misc/NEWS.d/next/Windows/2023-01-17-18-17-58.gh-issue-82052.mWyysT.rst b/Misc/NEWS.d/next/Windows/2023-01-17-18-17-58.gh-issue-82052.mWyysT.rst deleted file mode 100644 index 4f7ab200b85cba..00000000000000 --- a/Misc/NEWS.d/next/Windows/2023-01-17-18-17-58.gh-issue-82052.mWyysT.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed an issue where writing more than 32K of Unicode output to the console screen in one go can result in mojibake. diff --git a/Misc/NEWS.d/next/Windows/2023-01-18-18-25-18.gh-issue-101135.HF9VlG.rst b/Misc/NEWS.d/next/Windows/2023-01-18-18-25-18.gh-issue-101135.HF9VlG.rst deleted file mode 100644 index 2e6d6371340d89..00000000000000 --- a/Misc/NEWS.d/next/Windows/2023-01-18-18-25-18.gh-issue-101135.HF9VlG.rst +++ /dev/null @@ -1,3 +0,0 @@ -Restore ability to launch older 32-bit versions from the :file:`py.exe` -launcher when both 32-bit and 64-bit installs of the same version are -available. diff --git a/Misc/NEWS.d/next/Windows/2023-01-25-00-23-31.gh-issue-99834.WN41lc.rst b/Misc/NEWS.d/next/Windows/2023-01-25-00-23-31.gh-issue-99834.WN41lc.rst deleted file mode 100644 index d3894fa4ea3012..00000000000000 --- a/Misc/NEWS.d/next/Windows/2023-01-25-00-23-31.gh-issue-99834.WN41lc.rst +++ /dev/null @@ -1 +0,0 @@ -Updates bundled copy of Tcl/Tk to 8.6.13.0 diff --git a/Misc/NEWS.d/next/Windows/2023-01-25-11-33-54.gh-issue-101196.wAX_2g.rst b/Misc/NEWS.d/next/Windows/2023-01-25-11-33-54.gh-issue-101196.wAX_2g.rst new file mode 100644 index 00000000000000..c61e9b90fb5373 --- /dev/null +++ b/Misc/NEWS.d/next/Windows/2023-01-25-11-33-54.gh-issue-101196.wAX_2g.rst @@ -0,0 +1,3 @@ +The functions ``os.path.isdir``, ``os.path.isfile``, ``os.path.islink`` and +``os.path.exists`` are now 13% to 28% faster on Windows, by making fewer Win32 +API calls. diff --git a/Misc/NEWS.d/next/Windows/2023-02-07-18-22-54.gh-issue-101614.NjVP0n.rst b/Misc/NEWS.d/next/Windows/2023-02-07-18-22-54.gh-issue-101614.NjVP0n.rst new file mode 100644 index 00000000000000..8ed0995d78925b --- /dev/null +++ b/Misc/NEWS.d/next/Windows/2023-02-07-18-22-54.gh-issue-101614.NjVP0n.rst @@ -0,0 +1 @@ +Correctly handle extensions built against debug binaries that reference ``python3_d.dll``. diff --git a/Misc/stable_abi.toml b/Misc/stable_abi.toml index c716f403d638ac..21ff9616133445 100644 --- a/Misc/stable_abi.toml +++ b/Misc/stable_abi.toml @@ -2333,6 +2333,15 @@ added = '3.12' [function.PyVectorcall_Call] added = '3.12' +[function.PyErr_GetRaisedException] + added = '3.12' +[function.PyErr_SetRaisedException] + added = '3.12' +[function.PyException_GetArgs] + added = '3.12' +[function.PyException_SetArgs] + added = '3.12' + [typedef.vectorcallfunc] added = '3.12' [function.PyObject_Vectorcall] diff --git a/Modules/Setup b/Modules/Setup index e3a82975b5ff5e..428be0a1bf8fa1 100644 --- a/Modules/Setup +++ b/Modules/Setup @@ -280,6 +280,7 @@ PYTHONPATH=$(COREPYTHONPATH) # Testing #_xxsubinterpreters _xxsubinterpretersmodule.c +#_xxinterpchannels _xxinterpchannelsmodule.c #_xxtestfuzz _xxtestfuzz/_xxtestfuzz.c _xxtestfuzz/fuzzer.c #_testbuffer _testbuffer.c #_testinternalcapi _testinternalcapi.c diff --git a/Modules/Setup.stdlib.in b/Modules/Setup.stdlib.in index d64752e8ca9609..f72783810f9415 100644 --- a/Modules/Setup.stdlib.in +++ b/Modules/Setup.stdlib.in @@ -43,6 +43,7 @@ @MODULE__STRUCT_TRUE@_struct _struct.c @MODULE__TYPING_TRUE@_typing _typingmodule.c @MODULE__XXSUBINTERPRETERS_TRUE@_xxsubinterpreters _xxsubinterpretersmodule.c +@MODULE__XXINTERPCHANNELS_TRUE@_xxinterpchannels _xxinterpchannelsmodule.c @MODULE__ZONEINFO_TRUE@_zoneinfo _zoneinfo.c # needs libm @@ -78,7 +79,7 @@ # hashing builtins, can be disabled with --without-builtin-hashlib-hashes @MODULE__MD5_TRUE@_md5 md5module.c @MODULE__SHA1_TRUE@_sha1 sha1module.c -@MODULE__SHA256_TRUE@_sha256 sha256module.c +@MODULE__SHA256_TRUE@_sha256 sha256module.c _hacl/Hacl_Streaming_SHA2.c @MODULE__SHA512_TRUE@_sha512 sha512module.c @MODULE__SHA3_TRUE@_sha3 _sha3/sha3module.c @MODULE__BLAKE2_TRUE@_blake2 _blake2/blake2module.c _blake2/blake2b_impl.c _blake2/blake2s_impl.c diff --git a/Modules/_ctypes/stgdict.c b/Modules/_ctypes/stgdict.c index 9a4041fb25280e..83a52757d60979 100644 --- a/Modules/_ctypes/stgdict.c +++ b/Modules/_ctypes/stgdict.c @@ -337,6 +337,29 @@ MakeAnonFields(PyObject *type) return 0; } +/* + Allocate a memory block for a pep3118 format string, copy prefix (if + non-null) into it and append `{padding}x` to the end. + Returns NULL on failure, with the error indicator set. +*/ +char * +_ctypes_alloc_format_padding(const char *prefix, Py_ssize_t padding) +{ + /* int64 decimal characters + x + null */ + char buf[19 + 1 + 1]; + + assert(padding > 0); + + if (padding == 1) { + /* Use x instead of 1x, for brevity */ + return _ctypes_alloc_format_string(prefix, "x"); + } + + int ret = PyOS_snprintf(buf, sizeof(buf), "%zdx", padding); (void)ret; + assert(0 <= ret && ret < (Py_ssize_t)sizeof(buf)); + return _ctypes_alloc_format_string(prefix, buf); +} + /* Retrieve the (optional) _pack_ attribute from a type, the _fields_ attribute, and create an StgDictObject. Used for Structure and Union subclasses. @@ -346,11 +369,10 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct { StgDictObject *stgdict, *basedict; Py_ssize_t len, offset, size, align, i; - Py_ssize_t union_size, total_align; + Py_ssize_t union_size, total_align, aligned_size; Py_ssize_t field_size = 0; int bitofs; PyObject *tmp; - int isPacked; int pack; Py_ssize_t ffi_ofs; int big_endian; @@ -374,7 +396,6 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct return -1; } if (tmp) { - isPacked = 1; pack = _PyLong_AsInt(tmp); Py_DECREF(tmp); if (pack < 0) { @@ -389,7 +410,7 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct } } else { - isPacked = 0; + /* Setting `_pack_ = 0` amounts to using the default alignment */ pack = 0; } @@ -470,12 +491,10 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct } assert(stgdict->format == NULL); - if (isStruct && !isPacked) { + if (isStruct) { stgdict->format = _ctypes_alloc_format_string(NULL, "T{"); } else { - /* PEP3118 doesn't support union, or packed structures (well, - only standard packing, but we don't support the pep for - that). Use 'B' for bytes. */ + /* PEP3118 doesn't support union. Use 'B' for bytes. */ stgdict->format = _ctypes_alloc_format_string(NULL, "B"); } if (stgdict->format == NULL) @@ -543,12 +562,14 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct } else bitsize = 0; - if (isStruct && !isPacked) { + if (isStruct) { const char *fieldfmt = dict->format ? dict->format : "B"; const char *fieldname = PyUnicode_AsUTF8(name); char *ptr; Py_ssize_t len; char *buf; + Py_ssize_t last_size = size; + Py_ssize_t padding; if (fieldname == NULL) { @@ -556,11 +577,38 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct return -1; } + /* construct the field now, as `prop->offset` is `offset` with + corrected alignment */ + prop = PyCField_FromDesc(desc, i, + &field_size, bitsize, &bitofs, + &size, &offset, &align, + pack, big_endian); + if (prop == NULL) { + Py_DECREF(pair); + return -1; + } + + /* number of bytes between the end of the last field and the start + of this one */ + padding = ((CFieldObject *)prop)->offset - last_size; + + if (padding > 0) { + ptr = stgdict->format; + stgdict->format = _ctypes_alloc_format_padding(ptr, padding); + PyMem_Free(ptr); + if (stgdict->format == NULL) { + Py_DECREF(pair); + Py_DECREF(prop); + return -1; + } + } + len = strlen(fieldname) + strlen(fieldfmt); buf = PyMem_Malloc(len + 2 + 1); if (buf == NULL) { Py_DECREF(pair); + Py_DECREF(prop); PyErr_NoMemory(); return -1; } @@ -578,15 +626,9 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct if (stgdict->format == NULL) { Py_DECREF(pair); + Py_DECREF(prop); return -1; } - } - - if (isStruct) { - prop = PyCField_FromDesc(desc, i, - &field_size, bitsize, &bitofs, - &size, &offset, &align, - pack, big_endian); } else /* union */ { size = 0; offset = 0; @@ -595,14 +637,14 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct &field_size, bitsize, &bitofs, &size, &offset, &align, pack, big_endian); + if (prop == NULL) { + Py_DECREF(pair); + return -1; + } union_size = max(size, union_size); } total_align = max(align, total_align); - if (!prop) { - Py_DECREF(pair); - return -1; - } if (-1 == PyObject_SetAttr(type, name, prop)) { Py_DECREF(prop); Py_DECREF(pair); @@ -612,26 +654,41 @@ PyCStructUnionType_update_stgdict(PyObject *type, PyObject *fields, int isStruct Py_DECREF(prop); } - if (isStruct && !isPacked) { - char *ptr = stgdict->format; + if (!isStruct) { + size = union_size; + } + + /* Adjust the size according to the alignment requirements */ + aligned_size = ((size + total_align - 1) / total_align) * total_align; + + if (isStruct) { + char *ptr; + Py_ssize_t padding; + + /* Pad up to the full size of the struct */ + padding = aligned_size - size; + if (padding > 0) { + ptr = stgdict->format; + stgdict->format = _ctypes_alloc_format_padding(ptr, padding); + PyMem_Free(ptr); + if (stgdict->format == NULL) { + return -1; + } + } + + ptr = stgdict->format; stgdict->format = _ctypes_alloc_format_string(stgdict->format, "}"); PyMem_Free(ptr); if (stgdict->format == NULL) return -1; } - if (!isStruct) - size = union_size; - - /* Adjust the size according to the alignment requirements */ - size = ((size + total_align - 1) / total_align) * total_align; - stgdict->ffi_type_pointer.alignment = Py_SAFE_DOWNCAST(total_align, Py_ssize_t, unsigned short); - stgdict->ffi_type_pointer.size = size; + stgdict->ffi_type_pointer.size = aligned_size; - stgdict->size = size; + stgdict->size = aligned_size; stgdict->align = total_align; stgdict->length = len; /* ADD ffi_ofs? */ diff --git a/Modules/_hacl/Hacl_Streaming_SHA2.c b/Modules/_hacl/Hacl_Streaming_SHA2.c new file mode 100644 index 00000000000000..84566571792a3c --- /dev/null +++ b/Modules/_hacl/Hacl_Streaming_SHA2.c @@ -0,0 +1,682 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#include "Hacl_Streaming_SHA2.h" + +#include "internal/Hacl_SHA2_Generic.h" + + +static inline void sha256_init(uint32_t *hash) +{ + KRML_MAYBE_FOR8(i, + (uint32_t)0U, + (uint32_t)8U, + (uint32_t)1U, + uint32_t *os = hash; + uint32_t x = Hacl_Impl_SHA2_Generic_h256[i]; + os[i] = x;); +} + +static inline void sha256_update0(uint8_t *b, uint32_t *hash) +{ + uint32_t hash_old[8U] = { 0U }; + uint32_t ws[16U] = { 0U }; + memcpy(hash_old, hash, (uint32_t)8U * sizeof (uint32_t)); + uint8_t *b10 = b; + uint32_t u = load32_be(b10); + ws[0U] = u; + uint32_t u0 = load32_be(b10 + (uint32_t)4U); + ws[1U] = u0; + uint32_t u1 = load32_be(b10 + (uint32_t)8U); + ws[2U] = u1; + uint32_t u2 = load32_be(b10 + (uint32_t)12U); + ws[3U] = u2; + uint32_t u3 = load32_be(b10 + (uint32_t)16U); + ws[4U] = u3; + uint32_t u4 = load32_be(b10 + (uint32_t)20U); + ws[5U] = u4; + uint32_t u5 = load32_be(b10 + (uint32_t)24U); + ws[6U] = u5; + uint32_t u6 = load32_be(b10 + (uint32_t)28U); + ws[7U] = u6; + uint32_t u7 = load32_be(b10 + (uint32_t)32U); + ws[8U] = u7; + uint32_t u8 = load32_be(b10 + (uint32_t)36U); + ws[9U] = u8; + uint32_t u9 = load32_be(b10 + (uint32_t)40U); + ws[10U] = u9; + uint32_t u10 = load32_be(b10 + (uint32_t)44U); + ws[11U] = u10; + uint32_t u11 = load32_be(b10 + (uint32_t)48U); + ws[12U] = u11; + uint32_t u12 = load32_be(b10 + (uint32_t)52U); + ws[13U] = u12; + uint32_t u13 = load32_be(b10 + (uint32_t)56U); + ws[14U] = u13; + uint32_t u14 = load32_be(b10 + (uint32_t)60U); + ws[15U] = u14; + KRML_MAYBE_FOR4(i0, + (uint32_t)0U, + (uint32_t)4U, + (uint32_t)1U, + KRML_MAYBE_FOR16(i, + (uint32_t)0U, + (uint32_t)16U, + (uint32_t)1U, + uint32_t k_t = Hacl_Impl_SHA2_Generic_k224_256[(uint32_t)16U * i0 + i]; + uint32_t ws_t = ws[i]; + uint32_t a0 = hash[0U]; + uint32_t b0 = hash[1U]; + uint32_t c0 = hash[2U]; + uint32_t d0 = hash[3U]; + uint32_t e0 = hash[4U]; + uint32_t f0 = hash[5U]; + uint32_t g0 = hash[6U]; + uint32_t h02 = hash[7U]; + uint32_t k_e_t = k_t; + uint32_t + t1 = + h02 + + + ((e0 << (uint32_t)26U | e0 >> (uint32_t)6U) + ^ + ((e0 << (uint32_t)21U | e0 >> (uint32_t)11U) + ^ (e0 << (uint32_t)7U | e0 >> (uint32_t)25U))) + + ((e0 & f0) ^ (~e0 & g0)) + + k_e_t + + ws_t; + uint32_t + t2 = + ((a0 << (uint32_t)30U | a0 >> (uint32_t)2U) + ^ + ((a0 << (uint32_t)19U | a0 >> (uint32_t)13U) + ^ (a0 << (uint32_t)10U | a0 >> (uint32_t)22U))) + + ((a0 & b0) ^ ((a0 & c0) ^ (b0 & c0))); + uint32_t a1 = t1 + t2; + uint32_t b1 = a0; + uint32_t c1 = b0; + uint32_t d1 = c0; + uint32_t e1 = d0 + t1; + uint32_t f1 = e0; + uint32_t g1 = f0; + uint32_t h12 = g0; + hash[0U] = a1; + hash[1U] = b1; + hash[2U] = c1; + hash[3U] = d1; + hash[4U] = e1; + hash[5U] = f1; + hash[6U] = g1; + hash[7U] = h12;); + if (i0 < (uint32_t)3U) + { + KRML_MAYBE_FOR16(i, + (uint32_t)0U, + (uint32_t)16U, + (uint32_t)1U, + uint32_t t16 = ws[i]; + uint32_t t15 = ws[(i + (uint32_t)1U) % (uint32_t)16U]; + uint32_t t7 = ws[(i + (uint32_t)9U) % (uint32_t)16U]; + uint32_t t2 = ws[(i + (uint32_t)14U) % (uint32_t)16U]; + uint32_t + s1 = + (t2 << (uint32_t)15U | t2 >> (uint32_t)17U) + ^ ((t2 << (uint32_t)13U | t2 >> (uint32_t)19U) ^ t2 >> (uint32_t)10U); + uint32_t + s0 = + (t15 << (uint32_t)25U | t15 >> (uint32_t)7U) + ^ ((t15 << (uint32_t)14U | t15 >> (uint32_t)18U) ^ t15 >> (uint32_t)3U); + ws[i] = s1 + t7 + s0 + t16;); + }); + KRML_MAYBE_FOR8(i, + (uint32_t)0U, + (uint32_t)8U, + (uint32_t)1U, + uint32_t *os = hash; + uint32_t x = hash[i] + hash_old[i]; + os[i] = x;); +} + +static inline void sha256_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st) +{ + uint32_t blocks = len / (uint32_t)64U; + for (uint32_t i = (uint32_t)0U; i < blocks; i++) + { + uint8_t *b0 = b; + uint8_t *mb = b0 + i * (uint32_t)64U; + sha256_update0(mb, st); + } +} + +static inline void +sha256_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *hash) +{ + uint32_t blocks; + if (len + (uint32_t)8U + (uint32_t)1U <= (uint32_t)64U) + { + blocks = (uint32_t)1U; + } + else + { + blocks = (uint32_t)2U; + } + uint32_t fin = blocks * (uint32_t)64U; + uint8_t last[128U] = { 0U }; + uint8_t totlen_buf[8U] = { 0U }; + uint64_t total_len_bits = totlen << (uint32_t)3U; + store64_be(totlen_buf, total_len_bits); + uint8_t *b0 = b; + memcpy(last, b0, len * sizeof (uint8_t)); + last[len] = (uint8_t)0x80U; + memcpy(last + fin - (uint32_t)8U, totlen_buf, (uint32_t)8U * sizeof (uint8_t)); + uint8_t *last00 = last; + uint8_t *last10 = last + (uint32_t)64U; + uint8_t *l0 = last00; + uint8_t *l1 = last10; + uint8_t *lb0 = l0; + uint8_t *lb1 = l1; + uint8_t *last0 = lb0; + uint8_t *last1 = lb1; + sha256_update0(last0, hash); + if (blocks > (uint32_t)1U) + { + sha256_update0(last1, hash); + return; + } +} + +static inline void sha256_finish(uint32_t *st, uint8_t *h) +{ + uint8_t hbuf[32U] = { 0U }; + KRML_MAYBE_FOR8(i, + (uint32_t)0U, + (uint32_t)8U, + (uint32_t)1U, + store32_be(hbuf + i * (uint32_t)4U, st[i]);); + memcpy(h, hbuf, (uint32_t)32U * sizeof (uint8_t)); +} + +static inline void sha224_init(uint32_t *hash) +{ + KRML_MAYBE_FOR8(i, + (uint32_t)0U, + (uint32_t)8U, + (uint32_t)1U, + uint32_t *os = hash; + uint32_t x = Hacl_Impl_SHA2_Generic_h224[i]; + os[i] = x;); +} + +static inline void sha224_update_nblocks(uint32_t len, uint8_t *b, uint32_t *st) +{ + sha256_update_nblocks(len, b, st); +} + +static void sha224_update_last(uint64_t totlen, uint32_t len, uint8_t *b, uint32_t *st) +{ + sha256_update_last(totlen, len, b, st); +} + +static inline void sha224_finish(uint32_t *st, uint8_t *h) +{ + uint8_t hbuf[32U] = { 0U }; + KRML_MAYBE_FOR8(i, + (uint32_t)0U, + (uint32_t)8U, + (uint32_t)1U, + store32_be(hbuf + i * (uint32_t)4U, st[i]);); + memcpy(h, hbuf, (uint32_t)28U * sizeof (uint8_t)); +} + +/** +Allocate initial state for the SHA2_256 hash. The state is to be freed by +calling `free_256`. +*/ +Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_256(void) +{ + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); + uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); + Hacl_Streaming_SHA2_state_sha2_224 + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; + Hacl_Streaming_SHA2_state_sha2_224 + *p = + (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof ( + Hacl_Streaming_SHA2_state_sha2_224 + )); + p[0U] = s; + sha256_init(block_state); + return p; +} + +/** +Copies the state passed as argument into a newly allocated state (deep copy). +The state is to be freed by calling `free_256`. Cloning the state this way is +useful, for instance, if your control-flow diverges and you need to feed +more (different) data into the hash in each branch. +*/ +Hacl_Streaming_SHA2_state_sha2_224 +*Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_SHA2_state_sha2_224 *s0) +{ + Hacl_Streaming_SHA2_state_sha2_224 scrut = *s0; + uint32_t *block_state0 = scrut.block_state; + uint8_t *buf0 = scrut.buf; + uint64_t total_len0 = scrut.total_len; + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); + memcpy(buf, buf0, (uint32_t)64U * sizeof (uint8_t)); + uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); + memcpy(block_state, block_state0, (uint32_t)8U * sizeof (uint32_t)); + Hacl_Streaming_SHA2_state_sha2_224 + s = { .block_state = block_state, .buf = buf, .total_len = total_len0 }; + Hacl_Streaming_SHA2_state_sha2_224 + *p = + (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof ( + Hacl_Streaming_SHA2_state_sha2_224 + )); + p[0U] = s; + return p; +} + +/** +Reset an existing state to the initial hash state with empty data. +*/ +void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_SHA2_state_sha2_224 *s) +{ + Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; + uint8_t *buf = scrut.buf; + uint32_t *block_state = scrut.block_state; + sha256_init(block_state); + Hacl_Streaming_SHA2_state_sha2_224 + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; + s[0U] = tmp; +} + +static inline uint32_t +update_224_256(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *data, uint32_t len) +{ + Hacl_Streaming_SHA2_state_sha2_224 s = *p; + uint64_t total_len = s.total_len; + if ((uint64_t)len > (uint64_t)2305843009213693951U - total_len) + { + return (uint32_t)1U; + } + uint32_t sz; + if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) + { + sz = (uint32_t)64U; + } + else + { + sz = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); + } + if (len <= (uint32_t)64U - sz) + { + Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; + uint32_t *block_state1 = s1.block_state; + uint8_t *buf = s1.buf; + uint64_t total_len1 = s1.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) + { + sz1 = (uint32_t)64U; + } + else + { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); + } + uint8_t *buf2 = buf + sz1; + memcpy(buf2, data, len * sizeof (uint8_t)); + uint64_t total_len2 = total_len1 + (uint64_t)len; + *p + = + ( + (Hacl_Streaming_SHA2_state_sha2_224){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len2 + } + ); + } + else if (sz == (uint32_t)0U) + { + Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; + uint32_t *block_state1 = s1.block_state; + uint8_t *buf = s1.buf; + uint64_t total_len1 = s1.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) + { + sz1 = (uint32_t)64U; + } + else + { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); + } + if (!(sz1 == (uint32_t)0U)) + { + sha256_update_nblocks((uint32_t)64U, buf, block_state1); + } + uint32_t ite; + if ((uint64_t)len % (uint64_t)(uint32_t)64U == (uint64_t)0U && (uint64_t)len > (uint64_t)0U) + { + ite = (uint32_t)64U; + } + else + { + ite = (uint32_t)((uint64_t)len % (uint64_t)(uint32_t)64U); + } + uint32_t n_blocks = (len - ite) / (uint32_t)64U; + uint32_t data1_len = n_blocks * (uint32_t)64U; + uint32_t data2_len = len - data1_len; + uint8_t *data1 = data; + uint8_t *data2 = data + data1_len; + sha256_update_nblocks(data1_len, data1, block_state1); + uint8_t *dst = buf; + memcpy(dst, data2, data2_len * sizeof (uint8_t)); + *p + = + ( + (Hacl_Streaming_SHA2_state_sha2_224){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len1 + (uint64_t)len + } + ); + } + else + { + uint32_t diff = (uint32_t)64U - sz; + uint8_t *data1 = data; + uint8_t *data2 = data + diff; + Hacl_Streaming_SHA2_state_sha2_224 s1 = *p; + uint32_t *block_state10 = s1.block_state; + uint8_t *buf0 = s1.buf; + uint64_t total_len10 = s1.total_len; + uint32_t sz10; + if (total_len10 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len10 > (uint64_t)0U) + { + sz10 = (uint32_t)64U; + } + else + { + sz10 = (uint32_t)(total_len10 % (uint64_t)(uint32_t)64U); + } + uint8_t *buf2 = buf0 + sz10; + memcpy(buf2, data1, diff * sizeof (uint8_t)); + uint64_t total_len2 = total_len10 + (uint64_t)diff; + *p + = + ( + (Hacl_Streaming_SHA2_state_sha2_224){ + .block_state = block_state10, + .buf = buf0, + .total_len = total_len2 + } + ); + Hacl_Streaming_SHA2_state_sha2_224 s10 = *p; + uint32_t *block_state1 = s10.block_state; + uint8_t *buf = s10.buf; + uint64_t total_len1 = s10.total_len; + uint32_t sz1; + if (total_len1 % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len1 > (uint64_t)0U) + { + sz1 = (uint32_t)64U; + } + else + { + sz1 = (uint32_t)(total_len1 % (uint64_t)(uint32_t)64U); + } + if (!(sz1 == (uint32_t)0U)) + { + sha256_update_nblocks((uint32_t)64U, buf, block_state1); + } + uint32_t ite; + if + ( + (uint64_t)(len - diff) + % (uint64_t)(uint32_t)64U + == (uint64_t)0U + && (uint64_t)(len - diff) > (uint64_t)0U + ) + { + ite = (uint32_t)64U; + } + else + { + ite = (uint32_t)((uint64_t)(len - diff) % (uint64_t)(uint32_t)64U); + } + uint32_t n_blocks = (len - diff - ite) / (uint32_t)64U; + uint32_t data1_len = n_blocks * (uint32_t)64U; + uint32_t data2_len = len - diff - data1_len; + uint8_t *data11 = data2; + uint8_t *data21 = data2 + data1_len; + sha256_update_nblocks(data1_len, data11, block_state1); + uint8_t *dst = buf; + memcpy(dst, data21, data2_len * sizeof (uint8_t)); + *p + = + ( + (Hacl_Streaming_SHA2_state_sha2_224){ + .block_state = block_state1, + .buf = buf, + .total_len = total_len1 + (uint64_t)(len - diff) + } + ); + } + return (uint32_t)0U; +} + +/** +Feed an arbitrary amount of data into the hash. This function returns 0 for +success, or 1 if the combined length of all of the data passed to `update_256` +(since the last call to `init_256`) exceeds 2^61-1 bytes. + +This function is identical to the update function for SHA2_224. +*/ +uint32_t +Hacl_Streaming_SHA2_update_256( + Hacl_Streaming_SHA2_state_sha2_224 *p, + uint8_t *input, + uint32_t input_len +) +{ + return update_224_256(p, input, input_len); +} + +/** +Write the resulting hash into `dst`, an array of 32 bytes. The state remains +valid after a call to `finish_256`, meaning the user may feed more data into +the hash via `update_256`. (The finish_256 function operates on an internal copy of +the state and therefore does not invalidate the client-held state `p`.) +*/ +void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst) +{ + Hacl_Streaming_SHA2_state_sha2_224 scrut = *p; + uint32_t *block_state = scrut.block_state; + uint8_t *buf_ = scrut.buf; + uint64_t total_len = scrut.total_len; + uint32_t r; + if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) + { + r = (uint32_t)64U; + } + else + { + r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); + } + uint8_t *buf_1 = buf_; + uint32_t tmp_block_state[8U] = { 0U }; + memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t)); + uint32_t ite; + if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) + { + ite = (uint32_t)64U; + } + else + { + ite = r % (uint32_t)64U; + } + uint8_t *buf_last = buf_1 + r - ite; + uint8_t *buf_multi = buf_1; + sha256_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state); + uint64_t prev_len_last = total_len - (uint64_t)r; + sha256_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state); + sha256_finish(tmp_block_state, dst); +} + +/** +Free a state allocated with `create_in_256`. + +This function is identical to the free function for SHA2_224. +*/ +void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_SHA2_state_sha2_224 *s) +{ + Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; + uint8_t *buf = scrut.buf; + uint32_t *block_state = scrut.block_state; + KRML_HOST_FREE(block_state); + KRML_HOST_FREE(buf); + KRML_HOST_FREE(s); +} + +/** +Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes. +*/ +void Hacl_Streaming_SHA2_sha256(uint8_t *input, uint32_t input_len, uint8_t *dst) +{ + uint8_t *ib = input; + uint8_t *rb = dst; + uint32_t st[8U] = { 0U }; + sha256_init(st); + uint32_t rem = input_len % (uint32_t)64U; + uint64_t len_ = (uint64_t)input_len; + sha256_update_nblocks(input_len, ib, st); + uint32_t rem1 = input_len % (uint32_t)64U; + uint8_t *b0 = ib; + uint8_t *lb = b0 + input_len - rem1; + sha256_update_last(len_, rem, lb, st); + sha256_finish(st, rb); +} + +Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_224(void) +{ + uint8_t *buf = (uint8_t *)KRML_HOST_CALLOC((uint32_t)64U, sizeof (uint8_t)); + uint32_t *block_state = (uint32_t *)KRML_HOST_CALLOC((uint32_t)8U, sizeof (uint32_t)); + Hacl_Streaming_SHA2_state_sha2_224 + s = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; + Hacl_Streaming_SHA2_state_sha2_224 + *p = + (Hacl_Streaming_SHA2_state_sha2_224 *)KRML_HOST_MALLOC(sizeof ( + Hacl_Streaming_SHA2_state_sha2_224 + )); + p[0U] = s; + sha224_init(block_state); + return p; +} + +void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_SHA2_state_sha2_224 *s) +{ + Hacl_Streaming_SHA2_state_sha2_224 scrut = *s; + uint8_t *buf = scrut.buf; + uint32_t *block_state = scrut.block_state; + sha224_init(block_state); + Hacl_Streaming_SHA2_state_sha2_224 + tmp = { .block_state = block_state, .buf = buf, .total_len = (uint64_t)(uint32_t)0U }; + s[0U] = tmp; +} + +uint32_t +Hacl_Streaming_SHA2_update_224( + Hacl_Streaming_SHA2_state_sha2_224 *p, + uint8_t *input, + uint32_t input_len +) +{ + return update_224_256(p, input, input_len); +} + +/** +Write the resulting hash into `dst`, an array of 28 bytes. The state remains +valid after a call to `finish_224`, meaning the user may feed more data into +the hash via `update_224`. +*/ +void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst) +{ + Hacl_Streaming_SHA2_state_sha2_224 scrut = *p; + uint32_t *block_state = scrut.block_state; + uint8_t *buf_ = scrut.buf; + uint64_t total_len = scrut.total_len; + uint32_t r; + if (total_len % (uint64_t)(uint32_t)64U == (uint64_t)0U && total_len > (uint64_t)0U) + { + r = (uint32_t)64U; + } + else + { + r = (uint32_t)(total_len % (uint64_t)(uint32_t)64U); + } + uint8_t *buf_1 = buf_; + uint32_t tmp_block_state[8U] = { 0U }; + memcpy(tmp_block_state, block_state, (uint32_t)8U * sizeof (uint32_t)); + uint32_t ite; + if (r % (uint32_t)64U == (uint32_t)0U && r > (uint32_t)0U) + { + ite = (uint32_t)64U; + } + else + { + ite = r % (uint32_t)64U; + } + uint8_t *buf_last = buf_1 + r - ite; + uint8_t *buf_multi = buf_1; + sha224_update_nblocks((uint32_t)0U, buf_multi, tmp_block_state); + uint64_t prev_len_last = total_len - (uint64_t)r; + sha224_update_last(prev_len_last + (uint64_t)r, r, buf_last, tmp_block_state); + sha224_finish(tmp_block_state, dst); +} + +void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_SHA2_state_sha2_224 *p) +{ + Hacl_Streaming_SHA2_free_256(p); +} + +/** +Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes. +*/ +void Hacl_Streaming_SHA2_sha224(uint8_t *input, uint32_t input_len, uint8_t *dst) +{ + uint8_t *ib = input; + uint8_t *rb = dst; + uint32_t st[8U] = { 0U }; + sha224_init(st); + uint32_t rem = input_len % (uint32_t)64U; + uint64_t len_ = (uint64_t)input_len; + sha224_update_nblocks(input_len, ib, st); + uint32_t rem1 = input_len % (uint32_t)64U; + uint8_t *b0 = ib; + uint8_t *lb = b0 + input_len - rem1; + sha224_update_last(len_, rem, lb, st); + sha224_finish(st, rb); +} + diff --git a/Modules/_hacl/Hacl_Streaming_SHA2.h b/Modules/_hacl/Hacl_Streaming_SHA2.h new file mode 100644 index 00000000000000..c83a835afe70fd --- /dev/null +++ b/Modules/_hacl/Hacl_Streaming_SHA2.h @@ -0,0 +1,136 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __Hacl_Streaming_SHA2_H +#define __Hacl_Streaming_SHA2_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "python_hacl_namespaces.h" +#include "krml/FStar_UInt_8_16_32_64.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + + + + +typedef struct Hacl_Streaming_SHA2_state_sha2_224_s +{ + uint32_t *block_state; + uint8_t *buf; + uint64_t total_len; +} +Hacl_Streaming_SHA2_state_sha2_224; + +typedef Hacl_Streaming_SHA2_state_sha2_224 Hacl_Streaming_SHA2_state_sha2_256; + +/** +Allocate initial state for the SHA2_256 hash. The state is to be freed by +calling `free_256`. +*/ +Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_256(void); + +/** +Copies the state passed as argument into a newly allocated state (deep copy). +The state is to be freed by calling `free_256`. Cloning the state this way is +useful, for instance, if your control-flow diverges and you need to feed +more (different) data into the hash in each branch. +*/ +Hacl_Streaming_SHA2_state_sha2_224 +*Hacl_Streaming_SHA2_copy_256(Hacl_Streaming_SHA2_state_sha2_224 *s0); + +/** +Reset an existing state to the initial hash state with empty data. +*/ +void Hacl_Streaming_SHA2_init_256(Hacl_Streaming_SHA2_state_sha2_224 *s); + +/** +Feed an arbitrary amount of data into the hash. This function returns 0 for +success, or 1 if the combined length of all of the data passed to `update_256` +(since the last call to `init_256`) exceeds 2^61-1 bytes. + +This function is identical to the update function for SHA2_224. +*/ +uint32_t +Hacl_Streaming_SHA2_update_256( + Hacl_Streaming_SHA2_state_sha2_224 *p, + uint8_t *input, + uint32_t input_len +); + +/** +Write the resulting hash into `dst`, an array of 32 bytes. The state remains +valid after a call to `finish_256`, meaning the user may feed more data into +the hash via `update_256`. (The finish_256 function operates on an internal copy of +the state and therefore does not invalidate the client-held state `p`.) +*/ +void Hacl_Streaming_SHA2_finish_256(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst); + +/** +Free a state allocated with `create_in_256`. + +This function is identical to the free function for SHA2_224. +*/ +void Hacl_Streaming_SHA2_free_256(Hacl_Streaming_SHA2_state_sha2_224 *s); + +/** +Hash `input`, of len `input_len`, into `dst`, an array of 32 bytes. +*/ +void Hacl_Streaming_SHA2_sha256(uint8_t *input, uint32_t input_len, uint8_t *dst); + +Hacl_Streaming_SHA2_state_sha2_224 *Hacl_Streaming_SHA2_create_in_224(void); + +void Hacl_Streaming_SHA2_init_224(Hacl_Streaming_SHA2_state_sha2_224 *s); + +uint32_t +Hacl_Streaming_SHA2_update_224( + Hacl_Streaming_SHA2_state_sha2_224 *p, + uint8_t *input, + uint32_t input_len +); + +/** +Write the resulting hash into `dst`, an array of 28 bytes. The state remains +valid after a call to `finish_224`, meaning the user may feed more data into +the hash via `update_224`. +*/ +void Hacl_Streaming_SHA2_finish_224(Hacl_Streaming_SHA2_state_sha2_224 *p, uint8_t *dst); + +void Hacl_Streaming_SHA2_free_224(Hacl_Streaming_SHA2_state_sha2_224 *p); + +/** +Hash `input`, of len `input_len`, into `dst`, an array of 28 bytes. +*/ +void Hacl_Streaming_SHA2_sha224(uint8_t *input, uint32_t input_len, uint8_t *dst); + +#if defined(__cplusplus) +} +#endif + +#define __Hacl_Streaming_SHA2_H_DEFINED +#endif diff --git a/Modules/_hacl/README.md b/Modules/_hacl/README.md new file mode 100644 index 00000000000000..e6a156a54b3cee --- /dev/null +++ b/Modules/_hacl/README.md @@ -0,0 +1,29 @@ +# Algorithm implementations used by the `hashlib` module. + +This code comes from the +[HACL\*](https://github.com/hacl-star/hacl-star/) project. + +HACL\* is a cryptographic library that has been formally verified for memory +safety, functional correctness, and secret independence. + +## Updating HACL* + +Use the `refresh.sh` script in this directory to pull in a new upstream code +version. The upstream git hash used for the most recent code pull is recorded +in the script. Modify the script as needed to bring in more if changes are +needed based on upstream code refactoring. + +Never manually edit HACL\* files. Always add transformation shell code to the +`refresh.sh` script to perform any necessary edits. If there are serious code +changes needed, work with the upstream repository. + +## Local files + +1. `./include/python_hacl_namespaces.h` +1. `./README.md` +1. `./refresh.sh` + +## ACKS + +* Jonathan Protzenko aka [@msprotz on Github](https://github.com/msprotz) +contributed our HACL\* based builtin code. diff --git a/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h b/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h new file mode 100644 index 00000000000000..3e2e4b32b22f96 --- /dev/null +++ b/Modules/_hacl/include/krml/FStar_UInt_8_16_32_64.h @@ -0,0 +1,109 @@ +/* + Copyright (c) INRIA and Microsoft Corporation. All rights reserved. + Licensed under the Apache 2.0 License. +*/ + + +#ifndef __FStar_UInt_8_16_32_64_H +#define __FStar_UInt_8_16_32_64_H + + + + +#include +#include + +#include "krml/lowstar_endianness.h" +#include "krml/FStar_UInt_8_16_32_64.h" +#include "krml/internal/target.h" +static inline uint64_t FStar_UInt64_eq_mask(uint64_t a, uint64_t b) +{ + uint64_t x = a ^ b; + uint64_t minus_x = ~x + (uint64_t)1U; + uint64_t x_or_minus_x = x | minus_x; + uint64_t xnx = x_or_minus_x >> (uint32_t)63U; + return xnx - (uint64_t)1U; +} + +static inline uint64_t FStar_UInt64_gte_mask(uint64_t a, uint64_t b) +{ + uint64_t x = a; + uint64_t y = b; + uint64_t x_xor_y = x ^ y; + uint64_t x_sub_y = x - y; + uint64_t x_sub_y_xor_y = x_sub_y ^ y; + uint64_t q = x_xor_y | x_sub_y_xor_y; + uint64_t x_xor_q = x ^ q; + uint64_t x_xor_q_ = x_xor_q >> (uint32_t)63U; + return x_xor_q_ - (uint64_t)1U; +} + +static inline uint32_t FStar_UInt32_eq_mask(uint32_t a, uint32_t b) +{ + uint32_t x = a ^ b; + uint32_t minus_x = ~x + (uint32_t)1U; + uint32_t x_or_minus_x = x | minus_x; + uint32_t xnx = x_or_minus_x >> (uint32_t)31U; + return xnx - (uint32_t)1U; +} + +static inline uint32_t FStar_UInt32_gte_mask(uint32_t a, uint32_t b) +{ + uint32_t x = a; + uint32_t y = b; + uint32_t x_xor_y = x ^ y; + uint32_t x_sub_y = x - y; + uint32_t x_sub_y_xor_y = x_sub_y ^ y; + uint32_t q = x_xor_y | x_sub_y_xor_y; + uint32_t x_xor_q = x ^ q; + uint32_t x_xor_q_ = x_xor_q >> (uint32_t)31U; + return x_xor_q_ - (uint32_t)1U; +} + +static inline uint16_t FStar_UInt16_eq_mask(uint16_t a, uint16_t b) +{ + uint16_t x = a ^ b; + uint16_t minus_x = ~x + (uint16_t)1U; + uint16_t x_or_minus_x = x | minus_x; + uint16_t xnx = x_or_minus_x >> (uint32_t)15U; + return xnx - (uint16_t)1U; +} + +static inline uint16_t FStar_UInt16_gte_mask(uint16_t a, uint16_t b) +{ + uint16_t x = a; + uint16_t y = b; + uint16_t x_xor_y = x ^ y; + uint16_t x_sub_y = x - y; + uint16_t x_sub_y_xor_y = x_sub_y ^ y; + uint16_t q = x_xor_y | x_sub_y_xor_y; + uint16_t x_xor_q = x ^ q; + uint16_t x_xor_q_ = x_xor_q >> (uint32_t)15U; + return x_xor_q_ - (uint16_t)1U; +} + +static inline uint8_t FStar_UInt8_eq_mask(uint8_t a, uint8_t b) +{ + uint8_t x = a ^ b; + uint8_t minus_x = ~x + (uint8_t)1U; + uint8_t x_or_minus_x = x | minus_x; + uint8_t xnx = x_or_minus_x >> (uint32_t)7U; + return xnx - (uint8_t)1U; +} + +static inline uint8_t FStar_UInt8_gte_mask(uint8_t a, uint8_t b) +{ + uint8_t x = a; + uint8_t y = b; + uint8_t x_xor_y = x ^ y; + uint8_t x_sub_y = x - y; + uint8_t x_sub_y_xor_y = x_sub_y ^ y; + uint8_t q = x_xor_y | x_sub_y_xor_y; + uint8_t x_xor_q = x ^ q; + uint8_t x_xor_q_ = x_xor_q >> (uint32_t)7U; + return x_xor_q_ - (uint8_t)1U; +} + + +#define __FStar_UInt_8_16_32_64_H_DEFINED +#endif diff --git a/Modules/_hacl/include/krml/internal/target.h b/Modules/_hacl/include/krml/internal/target.h new file mode 100644 index 00000000000000..9ef59859a554b5 --- /dev/null +++ b/Modules/_hacl/include/krml/internal/target.h @@ -0,0 +1,218 @@ +/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. + Licensed under the Apache 2.0 License. */ + +#ifndef __KRML_TARGET_H +#define __KRML_TARGET_H + +#include +#include +#include +#include +#include +#include +#include + +/* Since KaRaMeL emits the inline keyword unconditionally, we follow the + * guidelines at https://gcc.gnu.org/onlinedocs/gcc/Inline.html and make this + * __inline__ to ensure the code compiles with -std=c90 and earlier. */ +#ifdef __GNUC__ +# define inline __inline__ +#endif + +#ifndef KRML_HOST_MALLOC +# define KRML_HOST_MALLOC malloc +#endif + +#ifndef KRML_HOST_CALLOC +# define KRML_HOST_CALLOC calloc +#endif + +#ifndef KRML_HOST_FREE +# define KRML_HOST_FREE free +#endif + +/* Macros for prettier unrolling of loops */ +#define KRML_LOOP1(i, n, x) { \ + x \ + i += n; \ +} + +#define KRML_LOOP2(i, n, x) \ + KRML_LOOP1(i, n, x) \ + KRML_LOOP1(i, n, x) + +#define KRML_LOOP3(i, n, x) \ + KRML_LOOP2(i, n, x) \ + KRML_LOOP1(i, n, x) + +#define KRML_LOOP4(i, n, x) \ + KRML_LOOP2(i, n, x) \ + KRML_LOOP2(i, n, x) + +#define KRML_LOOP5(i, n, x) \ + KRML_LOOP4(i, n, x) \ + KRML_LOOP1(i, n, x) + +#define KRML_LOOP6(i, n, x) \ + KRML_LOOP4(i, n, x) \ + KRML_LOOP2(i, n, x) + +#define KRML_LOOP7(i, n, x) \ + KRML_LOOP4(i, n, x) \ + KRML_LOOP3(i, n, x) + +#define KRML_LOOP8(i, n, x) \ + KRML_LOOP4(i, n, x) \ + KRML_LOOP4(i, n, x) + +#define KRML_LOOP9(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP1(i, n, x) + +#define KRML_LOOP10(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP2(i, n, x) + +#define KRML_LOOP11(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP3(i, n, x) + +#define KRML_LOOP12(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP4(i, n, x) + +#define KRML_LOOP13(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP5(i, n, x) + +#define KRML_LOOP14(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP6(i, n, x) + +#define KRML_LOOP15(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP7(i, n, x) + +#define KRML_LOOP16(i, n, x) \ + KRML_LOOP8(i, n, x) \ + KRML_LOOP8(i, n, x) + +#define KRML_UNROLL_FOR(i, z, n, k, x) do { \ + uint32_t i = z; \ + KRML_LOOP##n(i, k, x) \ +} while (0) + +#define KRML_ACTUAL_FOR(i, z, n, k, x) \ + do { \ + for (uint32_t i = z; i < n; i += k) { \ + x \ + } \ + } while (0) + +#ifndef KRML_UNROLL_MAX +#define KRML_UNROLL_MAX 16 +#endif + +/* 1 is the number of loop iterations, i.e. (n - z)/k as evaluated by krml */ +#if 0 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR0(i, z, n, k, x) +#else +#define KRML_MAYBE_FOR0(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 1 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 1, k, x) +#else +#define KRML_MAYBE_FOR1(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 2 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 2, k, x) +#else +#define KRML_MAYBE_FOR2(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 3 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 3, k, x) +#else +#define KRML_MAYBE_FOR3(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 4 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 4, k, x) +#else +#define KRML_MAYBE_FOR4(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 5 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 5, k, x) +#else +#define KRML_MAYBE_FOR5(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 6 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 6, k, x) +#else +#define KRML_MAYBE_FOR6(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 7 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 7, k, x) +#else +#define KRML_MAYBE_FOR7(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 8 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 8, k, x) +#else +#define KRML_MAYBE_FOR8(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 9 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 9, k, x) +#else +#define KRML_MAYBE_FOR9(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 10 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 10, k, x) +#else +#define KRML_MAYBE_FOR10(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 11 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 11, k, x) +#else +#define KRML_MAYBE_FOR11(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 12 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 12, k, x) +#else +#define KRML_MAYBE_FOR12(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 13 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 13, k, x) +#else +#define KRML_MAYBE_FOR13(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 14 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 14, k, x) +#else +#define KRML_MAYBE_FOR14(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 15 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 15, k, x) +#else +#define KRML_MAYBE_FOR15(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif + +#if 16 <= KRML_UNROLL_MAX +#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_UNROLL_FOR(i, z, 16, k, x) +#else +#define KRML_MAYBE_FOR16(i, z, n, k, x) KRML_ACTUAL_FOR(i, z, n, k, x) +#endif +#endif diff --git a/Modules/_hacl/include/krml/lowstar_endianness.h b/Modules/_hacl/include/krml/lowstar_endianness.h new file mode 100644 index 00000000000000..32a7391e817ebb --- /dev/null +++ b/Modules/_hacl/include/krml/lowstar_endianness.h @@ -0,0 +1,230 @@ +/* Copyright (c) INRIA and Microsoft Corporation. All rights reserved. + Licensed under the Apache 2.0 License. */ + +#ifndef __LOWSTAR_ENDIANNESS_H +#define __LOWSTAR_ENDIANNESS_H + +#include +#include + +/******************************************************************************/ +/* Implementing C.fst (part 2: endian-ness macros) */ +/******************************************************************************/ + +/* ... for Linux */ +#if defined(__linux__) || defined(__CYGWIN__) || defined (__USE_SYSTEM_ENDIAN_H__) || defined(__GLIBC__) +# include + +/* ... for OSX */ +#elif defined(__APPLE__) +# include +# define htole64(x) OSSwapHostToLittleInt64(x) +# define le64toh(x) OSSwapLittleToHostInt64(x) +# define htobe64(x) OSSwapHostToBigInt64(x) +# define be64toh(x) OSSwapBigToHostInt64(x) + +# define htole16(x) OSSwapHostToLittleInt16(x) +# define le16toh(x) OSSwapLittleToHostInt16(x) +# define htobe16(x) OSSwapHostToBigInt16(x) +# define be16toh(x) OSSwapBigToHostInt16(x) + +# define htole32(x) OSSwapHostToLittleInt32(x) +# define le32toh(x) OSSwapLittleToHostInt32(x) +# define htobe32(x) OSSwapHostToBigInt32(x) +# define be32toh(x) OSSwapBigToHostInt32(x) + +/* ... for Solaris */ +#elif defined(__sun__) +# include +# define htole64(x) LE_64(x) +# define le64toh(x) LE_64(x) +# define htobe64(x) BE_64(x) +# define be64toh(x) BE_64(x) + +# define htole16(x) LE_16(x) +# define le16toh(x) LE_16(x) +# define htobe16(x) BE_16(x) +# define be16toh(x) BE_16(x) + +# define htole32(x) LE_32(x) +# define le32toh(x) LE_32(x) +# define htobe32(x) BE_32(x) +# define be32toh(x) BE_32(x) + +/* ... for the BSDs */ +#elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) +# include +#elif defined(__OpenBSD__) +# include + +/* ... for Windows (MSVC)... not targeting XBOX 360! */ +#elif defined(_MSC_VER) + +# include +# define htobe16(x) _byteswap_ushort(x) +# define htole16(x) (x) +# define be16toh(x) _byteswap_ushort(x) +# define le16toh(x) (x) + +# define htobe32(x) _byteswap_ulong(x) +# define htole32(x) (x) +# define be32toh(x) _byteswap_ulong(x) +# define le32toh(x) (x) + +# define htobe64(x) _byteswap_uint64(x) +# define htole64(x) (x) +# define be64toh(x) _byteswap_uint64(x) +# define le64toh(x) (x) + +/* ... for Windows (GCC-like, e.g. mingw or clang) */ +#elif (defined(_WIN32) || defined(_WIN64)) && \ + (defined(__GNUC__) || defined(__clang__)) + +# define htobe16(x) __builtin_bswap16(x) +# define htole16(x) (x) +# define be16toh(x) __builtin_bswap16(x) +# define le16toh(x) (x) + +# define htobe32(x) __builtin_bswap32(x) +# define htole32(x) (x) +# define be32toh(x) __builtin_bswap32(x) +# define le32toh(x) (x) + +# define htobe64(x) __builtin_bswap64(x) +# define htole64(x) (x) +# define be64toh(x) __builtin_bswap64(x) +# define le64toh(x) (x) + +/* ... generic big-endian fallback code */ +#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + +/* byte swapping code inspired by: + * https://github.com/rweather/arduinolibs/blob/master/libraries/Crypto/utility/EndianUtil.h + * */ + +# define htobe32(x) (x) +# define be32toh(x) (x) +# define htole32(x) \ + (__extension__({ \ + uint32_t _temp = (x); \ + ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \ + ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \ + })) +# define le32toh(x) (htole32((x))) + +# define htobe64(x) (x) +# define be64toh(x) (x) +# define htole64(x) \ + (__extension__({ \ + uint64_t __temp = (x); \ + uint32_t __low = htobe32((uint32_t)__temp); \ + uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \ + (((uint64_t)__low) << 32) | __high; \ + })) +# define le64toh(x) (htole64((x))) + +/* ... generic little-endian fallback code */ +#elif defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ + +# define htole32(x) (x) +# define le32toh(x) (x) +# define htobe32(x) \ + (__extension__({ \ + uint32_t _temp = (x); \ + ((_temp >> 24) & 0x000000FF) | ((_temp >> 8) & 0x0000FF00) | \ + ((_temp << 8) & 0x00FF0000) | ((_temp << 24) & 0xFF000000); \ + })) +# define be32toh(x) (htobe32((x))) + +# define htole64(x) (x) +# define le64toh(x) (x) +# define htobe64(x) \ + (__extension__({ \ + uint64_t __temp = (x); \ + uint32_t __low = htobe32((uint32_t)__temp); \ + uint32_t __high = htobe32((uint32_t)(__temp >> 32)); \ + (((uint64_t)__low) << 32) | __high; \ + })) +# define be64toh(x) (htobe64((x))) + +/* ... couldn't determine endian-ness of the target platform */ +#else +# error "Please define __BYTE_ORDER__!" + +#endif /* defined(__linux__) || ... */ + +/* Loads and stores. These avoid undefined behavior due to unaligned memory + * accesses, via memcpy. */ + +inline static uint16_t load16(uint8_t *b) { + uint16_t x; + memcpy(&x, b, 2); + return x; +} + +inline static uint32_t load32(uint8_t *b) { + uint32_t x; + memcpy(&x, b, 4); + return x; +} + +inline static uint64_t load64(uint8_t *b) { + uint64_t x; + memcpy(&x, b, 8); + return x; +} + +inline static void store16(uint8_t *b, uint16_t i) { + memcpy(b, &i, 2); +} + +inline static void store32(uint8_t *b, uint32_t i) { + memcpy(b, &i, 4); +} + +inline static void store64(uint8_t *b, uint64_t i) { + memcpy(b, &i, 8); +} + +/* Legacy accessors so that this header can serve as an implementation of + * C.Endianness */ +#define load16_le(b) (le16toh(load16(b))) +#define store16_le(b, i) (store16(b, htole16(i))) +#define load16_be(b) (be16toh(load16(b))) +#define store16_be(b, i) (store16(b, htobe16(i))) + +#define load32_le(b) (le32toh(load32(b))) +#define store32_le(b, i) (store32(b, htole32(i))) +#define load32_be(b) (be32toh(load32(b))) +#define store32_be(b, i) (store32(b, htobe32(i))) + +#define load64_le(b) (le64toh(load64(b))) +#define store64_le(b, i) (store64(b, htole64(i))) +#define load64_be(b) (be64toh(load64(b))) +#define store64_be(b, i) (store64(b, htobe64(i))) + +/* Co-existence of LowStar.Endianness and FStar.Endianness generates name + * conflicts, because of course both insist on having no prefixes. Until a + * prefix is added, or until we truly retire FStar.Endianness, solve this issue + * in an elegant way. */ +#define load16_le0 load16_le +#define store16_le0 store16_le +#define load16_be0 load16_be +#define store16_be0 store16_be + +#define load32_le0 load32_le +#define store32_le0 store32_le +#define load32_be0 load32_be +#define store32_be0 store32_be + +#define load64_le0 load64_le +#define store64_le0 store64_le +#define load64_be0 load64_be +#define store64_be0 store64_be + +#define load128_le0 load128_le +#define store128_le0 store128_le +#define load128_be0 load128_be +#define store128_be0 store128_be + +#endif diff --git a/Modules/_hacl/include/python_hacl_namespaces.h b/Modules/_hacl/include/python_hacl_namespaces.h new file mode 100644 index 00000000000000..af390459311fe8 --- /dev/null +++ b/Modules/_hacl/include/python_hacl_namespaces.h @@ -0,0 +1,28 @@ +#ifndef _PYTHON_HACL_NAMESPACES_H +#define _PYTHON_HACL_NAMESPACES_H + +/* + * C's excuse for namespaces: Use globally unique names to avoid linkage + * conflicts with builds linking or dynamically loading other code potentially + * using HACL* libraries. + */ + +#define Hacl_Streaming_SHA2_state_sha2_224_s python_hashlib_Hacl_Streaming_SHA2_state_sha2_224_s +#define Hacl_Streaming_SHA2_state_sha2_224 python_hashlib_Hacl_Streaming_SHA2_state_sha2_224 +#define Hacl_Streaming_SHA2_state_sha2_256 python_hashlib_Hacl_Streaming_SHA2_state_sha2_256 +#define Hacl_Streaming_SHA2_create_in_256 python_hashlib_Hacl_Streaming_SHA2_create_in_256 +#define Hacl_Streaming_SHA2_create_in_224 python_hashlib_Hacl_Streaming_SHA2_create_in_224 +#define Hacl_Streaming_SHA2_copy_256 python_hashlib_Hacl_Streaming_SHA2_copy_256 +#define Hacl_Streaming_SHA2_copy_224 python_hashlib_Hacl_Streaming_SHA2_copy_224 +#define Hacl_Streaming_SHA2_init_256 python_hashlib_Hacl_Streaming_SHA2_init_256 +#define Hacl_Streaming_SHA2_init_224 python_hashlib_Hacl_Streaming_SHA2_init_224 +#define Hacl_Streaming_SHA2_update_256 python_hashlib_Hacl_Streaming_SHA2_update_256 +#define Hacl_Streaming_SHA2_update_224 python_hashlib_Hacl_Streaming_SHA2_update_224 +#define Hacl_Streaming_SHA2_finish_256 python_hashlib_Hacl_Streaming_SHA2_finish_256 +#define Hacl_Streaming_SHA2_finish_224 python_hashlib_Hacl_Streaming_SHA2_finish_224 +#define Hacl_Streaming_SHA2_free_256 python_hashlib_Hacl_Streaming_SHA2_free_256 +#define Hacl_Streaming_SHA2_free_224 python_hashlib_Hacl_Streaming_SHA2_free_224 +#define Hacl_Streaming_SHA2_sha256 python_hashlib_Hacl_Streaming_SHA2_sha256 +#define Hacl_Streaming_SHA2_sha224 python_hashlib_Hacl_Streaming_SHA2_sha224 + +#endif // _PYTHON_HACL_NAMESPACES_H diff --git a/Modules/_hacl/internal/Hacl_SHA2_Generic.h b/Modules/_hacl/internal/Hacl_SHA2_Generic.h new file mode 100644 index 00000000000000..23f7cea1eb3884 --- /dev/null +++ b/Modules/_hacl/internal/Hacl_SHA2_Generic.h @@ -0,0 +1,135 @@ +/* MIT License + * + * Copyright (c) 2016-2022 INRIA, CMU and Microsoft Corporation + * Copyright (c) 2022-2023 HACL* Contributors + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in all + * copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + + +#ifndef __internal_Hacl_SHA2_Generic_H +#define __internal_Hacl_SHA2_Generic_H + +#if defined(__cplusplus) +extern "C" { +#endif + +#include +#include "krml/FStar_UInt_8_16_32_64.h" +#include "krml/lowstar_endianness.h" +#include "krml/internal/target.h" + + + + +static const +uint32_t +Hacl_Impl_SHA2_Generic_h224[8U] = + { + (uint32_t)0xc1059ed8U, (uint32_t)0x367cd507U, (uint32_t)0x3070dd17U, (uint32_t)0xf70e5939U, + (uint32_t)0xffc00b31U, (uint32_t)0x68581511U, (uint32_t)0x64f98fa7U, (uint32_t)0xbefa4fa4U + }; + +static const +uint32_t +Hacl_Impl_SHA2_Generic_h256[8U] = + { + (uint32_t)0x6a09e667U, (uint32_t)0xbb67ae85U, (uint32_t)0x3c6ef372U, (uint32_t)0xa54ff53aU, + (uint32_t)0x510e527fU, (uint32_t)0x9b05688cU, (uint32_t)0x1f83d9abU, (uint32_t)0x5be0cd19U + }; + +static const +uint64_t +Hacl_Impl_SHA2_Generic_h384[8U] = + { + (uint64_t)0xcbbb9d5dc1059ed8U, (uint64_t)0x629a292a367cd507U, (uint64_t)0x9159015a3070dd17U, + (uint64_t)0x152fecd8f70e5939U, (uint64_t)0x67332667ffc00b31U, (uint64_t)0x8eb44a8768581511U, + (uint64_t)0xdb0c2e0d64f98fa7U, (uint64_t)0x47b5481dbefa4fa4U + }; + +static const +uint64_t +Hacl_Impl_SHA2_Generic_h512[8U] = + { + (uint64_t)0x6a09e667f3bcc908U, (uint64_t)0xbb67ae8584caa73bU, (uint64_t)0x3c6ef372fe94f82bU, + (uint64_t)0xa54ff53a5f1d36f1U, (uint64_t)0x510e527fade682d1U, (uint64_t)0x9b05688c2b3e6c1fU, + (uint64_t)0x1f83d9abfb41bd6bU, (uint64_t)0x5be0cd19137e2179U + }; + +static const +uint32_t +Hacl_Impl_SHA2_Generic_k224_256[64U] = + { + (uint32_t)0x428a2f98U, (uint32_t)0x71374491U, (uint32_t)0xb5c0fbcfU, (uint32_t)0xe9b5dba5U, + (uint32_t)0x3956c25bU, (uint32_t)0x59f111f1U, (uint32_t)0x923f82a4U, (uint32_t)0xab1c5ed5U, + (uint32_t)0xd807aa98U, (uint32_t)0x12835b01U, (uint32_t)0x243185beU, (uint32_t)0x550c7dc3U, + (uint32_t)0x72be5d74U, (uint32_t)0x80deb1feU, (uint32_t)0x9bdc06a7U, (uint32_t)0xc19bf174U, + (uint32_t)0xe49b69c1U, (uint32_t)0xefbe4786U, (uint32_t)0x0fc19dc6U, (uint32_t)0x240ca1ccU, + (uint32_t)0x2de92c6fU, (uint32_t)0x4a7484aaU, (uint32_t)0x5cb0a9dcU, (uint32_t)0x76f988daU, + (uint32_t)0x983e5152U, (uint32_t)0xa831c66dU, (uint32_t)0xb00327c8U, (uint32_t)0xbf597fc7U, + (uint32_t)0xc6e00bf3U, (uint32_t)0xd5a79147U, (uint32_t)0x06ca6351U, (uint32_t)0x14292967U, + (uint32_t)0x27b70a85U, (uint32_t)0x2e1b2138U, (uint32_t)0x4d2c6dfcU, (uint32_t)0x53380d13U, + (uint32_t)0x650a7354U, (uint32_t)0x766a0abbU, (uint32_t)0x81c2c92eU, (uint32_t)0x92722c85U, + (uint32_t)0xa2bfe8a1U, (uint32_t)0xa81a664bU, (uint32_t)0xc24b8b70U, (uint32_t)0xc76c51a3U, + (uint32_t)0xd192e819U, (uint32_t)0xd6990624U, (uint32_t)0xf40e3585U, (uint32_t)0x106aa070U, + (uint32_t)0x19a4c116U, (uint32_t)0x1e376c08U, (uint32_t)0x2748774cU, (uint32_t)0x34b0bcb5U, + (uint32_t)0x391c0cb3U, (uint32_t)0x4ed8aa4aU, (uint32_t)0x5b9cca4fU, (uint32_t)0x682e6ff3U, + (uint32_t)0x748f82eeU, (uint32_t)0x78a5636fU, (uint32_t)0x84c87814U, (uint32_t)0x8cc70208U, + (uint32_t)0x90befffaU, (uint32_t)0xa4506cebU, (uint32_t)0xbef9a3f7U, (uint32_t)0xc67178f2U + }; + +static const +uint64_t +Hacl_Impl_SHA2_Generic_k384_512[80U] = + { + (uint64_t)0x428a2f98d728ae22U, (uint64_t)0x7137449123ef65cdU, (uint64_t)0xb5c0fbcfec4d3b2fU, + (uint64_t)0xe9b5dba58189dbbcU, (uint64_t)0x3956c25bf348b538U, (uint64_t)0x59f111f1b605d019U, + (uint64_t)0x923f82a4af194f9bU, (uint64_t)0xab1c5ed5da6d8118U, (uint64_t)0xd807aa98a3030242U, + (uint64_t)0x12835b0145706fbeU, (uint64_t)0x243185be4ee4b28cU, (uint64_t)0x550c7dc3d5ffb4e2U, + (uint64_t)0x72be5d74f27b896fU, (uint64_t)0x80deb1fe3b1696b1U, (uint64_t)0x9bdc06a725c71235U, + (uint64_t)0xc19bf174cf692694U, (uint64_t)0xe49b69c19ef14ad2U, (uint64_t)0xefbe4786384f25e3U, + (uint64_t)0x0fc19dc68b8cd5b5U, (uint64_t)0x240ca1cc77ac9c65U, (uint64_t)0x2de92c6f592b0275U, + (uint64_t)0x4a7484aa6ea6e483U, (uint64_t)0x5cb0a9dcbd41fbd4U, (uint64_t)0x76f988da831153b5U, + (uint64_t)0x983e5152ee66dfabU, (uint64_t)0xa831c66d2db43210U, (uint64_t)0xb00327c898fb213fU, + (uint64_t)0xbf597fc7beef0ee4U, (uint64_t)0xc6e00bf33da88fc2U, (uint64_t)0xd5a79147930aa725U, + (uint64_t)0x06ca6351e003826fU, (uint64_t)0x142929670a0e6e70U, (uint64_t)0x27b70a8546d22ffcU, + (uint64_t)0x2e1b21385c26c926U, (uint64_t)0x4d2c6dfc5ac42aedU, (uint64_t)0x53380d139d95b3dfU, + (uint64_t)0x650a73548baf63deU, (uint64_t)0x766a0abb3c77b2a8U, (uint64_t)0x81c2c92e47edaee6U, + (uint64_t)0x92722c851482353bU, (uint64_t)0xa2bfe8a14cf10364U, (uint64_t)0xa81a664bbc423001U, + (uint64_t)0xc24b8b70d0f89791U, (uint64_t)0xc76c51a30654be30U, (uint64_t)0xd192e819d6ef5218U, + (uint64_t)0xd69906245565a910U, (uint64_t)0xf40e35855771202aU, (uint64_t)0x106aa07032bbd1b8U, + (uint64_t)0x19a4c116b8d2d0c8U, (uint64_t)0x1e376c085141ab53U, (uint64_t)0x2748774cdf8eeb99U, + (uint64_t)0x34b0bcb5e19b48a8U, (uint64_t)0x391c0cb3c5c95a63U, (uint64_t)0x4ed8aa4ae3418acbU, + (uint64_t)0x5b9cca4f7763e373U, (uint64_t)0x682e6ff3d6b2b8a3U, (uint64_t)0x748f82ee5defb2fcU, + (uint64_t)0x78a5636f43172f60U, (uint64_t)0x84c87814a1f0ab72U, (uint64_t)0x8cc702081a6439ecU, + (uint64_t)0x90befffa23631e28U, (uint64_t)0xa4506cebde82bde9U, (uint64_t)0xbef9a3f7b2c67915U, + (uint64_t)0xc67178f2e372532bU, (uint64_t)0xca273eceea26619cU, (uint64_t)0xd186b8c721c0c207U, + (uint64_t)0xeada7dd6cde0eb1eU, (uint64_t)0xf57d4f7fee6ed178U, (uint64_t)0x06f067aa72176fbaU, + (uint64_t)0x0a637dc5a2c898a6U, (uint64_t)0x113f9804bef90daeU, (uint64_t)0x1b710b35131c471bU, + (uint64_t)0x28db77f523047d84U, (uint64_t)0x32caab7b40c72493U, (uint64_t)0x3c9ebe0a15c9bebcU, + (uint64_t)0x431d67c49c100d4cU, (uint64_t)0x4cc5d4becb3e42b6U, (uint64_t)0x597f299cfc657e2aU, + (uint64_t)0x5fcb6fab3ad6faecU, (uint64_t)0x6c44198c4a475817U + }; + +#if defined(__cplusplus) +} +#endif + +#define __internal_Hacl_SHA2_Generic_H_DEFINED +#endif diff --git a/Modules/_hacl/refresh.sh b/Modules/_hacl/refresh.sh new file mode 100755 index 00000000000000..594873862a2db0 --- /dev/null +++ b/Modules/_hacl/refresh.sh @@ -0,0 +1,132 @@ +#!/usr/bin/env bash +# +# Use this script to update the HACL generated hash algorithm implementation +# code from a local checkout of the upstream hacl-star repository. +# + +set -e +set -o pipefail + +if [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then + echo "A bash version >= 4 required. Got: $BASH_VERSION" >&2 + exit 1 +fi + +if [[ $1 == "" ]]; then + echo "Usage: $0 path-to-hacl-directory" + echo "" + echo " path-to-hacl-directory should be a local git checkout of a" + echo " https://github.com/hacl-star/hacl-star/ repo." + exit 1 +fi + +# Update this when updating to a new version after verifying that the changes +# the update brings in are good. +expected_hacl_star_rev=94aabbb4cf71347d3779a8db486c761403c6d036 + +hacl_dir="$(realpath "$1")" +cd "$(dirname "$0")" +actual_rev=$(cd "$hacl_dir" && git rev-parse HEAD) + +if [[ "$actual_rev" != "$expected_hacl_star_rev" ]]; then + echo "WARNING: HACL* in '$hacl_dir' is at revision:" >&2 + echo " $actual_rev" >&2 + echo "but expected revision:" >&2 + echo " $expected_hacl_star_rev" >&2 + echo "Edit the expected rev if the changes pulled in are what you want." +fi + +# Step 1: copy files + +declare -a dist_files +dist_files=( + Hacl_Streaming_SHA2.h + internal/Hacl_SHA2_Generic.h + Hacl_Streaming_SHA2.c +) + +declare -a include_files +include_files=( + include/krml/lowstar_endianness.h + include/krml/internal/target.h +) + +declare -a lib_files +lib_files=( + krmllib/dist/minimal/FStar_UInt_8_16_32_64.h +) + +# C files for the algorithms themselves: current directory +(cd "$hacl_dir/dist/gcc-compatible" && tar cf - "${dist_files[@]}") | tar xf - + +# Support header files (e.g. endianness macros): stays in include/ +(cd "$hacl_dir/dist/karamel" && tar cf - "${include_files[@]}") | tar xf - + +# Special treatment: we don't bother with an extra directory and move krmllib +# files to the same include directory +for f in "${lib_files[@]}"; do + cp "$hacl_dir/dist/karamel/$f" include/krml/ +done + +# Step 2: some in-place modifications to keep things simple and minimal + +# This is basic, but refreshes of the vendored HACL code are infrequent, so +# let's not over-engineer this. +if [[ $(uname) == "Darwin" ]]; then + # You're already running with homebrew or macports to satisfy the + # bash>=4 requirement, so requiring GNU sed is entirely reasonable. + sed=gsed +else + sed=sed +fi + +readarray -t all_files < <(find . -name '*.h' -or -name '*.c') + +# types.h is a simple wrapper that defines the uint128 type then proceeds to +# include FStar_UInt_8_16_32_64.h; we jump the types.h step since our current +# selection of algorithms does not necessitate the use of uint128 +$sed -i 's!#include.*types.h"!#include "krml/FStar_UInt_8_16_32_64.h"!g' "${all_files[@]}" +$sed -i 's!#include.*compat.h"!!g' "${all_files[@]}" + +# FStar_UInt_8_16_32_64 contains definitions useful in the general case, but not +# for us; trim! +$sed -i -z 's!\(extern\|typedef\)[^;]*;\n\n!!g' include/krml/FStar_UInt_8_16_32_64.h + +# This contains static inline prototypes that are defined in +# FStar_UInt_8_16_32_64; they are by default repeated for safety of separate +# compilation, but this is not necessary. +$sed -i 's!#include.*Hacl_Krmllib.h"!!g' "${all_files[@]}" + +# This header is useful for *other* algorithms that refer to SHA2, e.g. Ed25519 +# which needs to compute a digest of a message before signing it. Here, since no +# other algorithm builds upon SHA2, this internal header is useless (and is not +# included in $dist_files). +$sed -i 's!#include.*internal/Hacl_Streaming_SHA2.h"!#include "Hacl_Streaming_SHA2.h"!g' "${all_files[@]}" + +# The SHA2 file contains all variants of SHA2. We strip 384 and 512 for the time +# being, to be included later. +# This regexp matches a separator (two new lines), followed by: +# +# * +# ... 384 or 512 ... { +# * +# } +# +# The first non-empty lines are the comment block. The second ... may spill over +# the next following lines if the arguments are printed in one-per-line mode. +$sed -i -z 's/\n\n\([^\n]\+\n\)*[^\n]*\(384\|512\)[^{]*{\n\?\( [^\n]*\n\)*}//g' Hacl_Streaming_SHA2.c + +# Same thing with function prototypes +$sed -i -z 's/\n\n\([^\n]\+\n\)*[^\n]*\(384\|512\)[^;]*;//g' Hacl_Streaming_SHA2.h + +# Use globally unique names for the Hacl_ C APIs to avoid linkage conflicts. +$sed -i -z 's!#include \n!#include \n#include "python_hacl_namespaces.h"\n!' Hacl_Streaming_SHA2.h + +# Finally, we remove a bunch of ifdefs from target.h that are, again, useful in +# the general case, but not exercised by the subset of HACL* that we vendor. +$sed -z -i 's!#ifndef KRML_\(HOST_PRINTF\|HOST_EXIT\|PRE_ALIGN\|POST_ALIGN\|ALIGNED_MALLOC\|ALIGNED_FREE\|HOST_TIME\)\n\(\n\|# [^\n]*\n\|[^#][^\n]*\n\)*#endif\n\n!!g' include/krml/internal/target.h +$sed -z -i 's!\n\n\([^#][^\n]*\n\)*#define KRML_\(EABORT\|EXIT\|CHECK_SIZE\)[^\n]*\(\n [^\n]*\)*!!g' include/krml/internal/target.h +$sed -z -i 's!\n\n\([^#][^\n]*\n\)*#if [^\n]*\n\( [^\n]*\n\)*#define KRML_\(EABORT\|EXIT\|CHECK_SIZE\)[^\n]*\(\n [^\n]*\)*!!g' include/krml/internal/target.h +$sed -z -i 's!\n\n\([^#][^\n]*\n\)*#if [^\n]*\n\( [^\n]*\n\)*# define _\?KRML_\(DEPRECATED\|CHECK_SIZE_PRAGMA\|HOST_EPRINTF\|HOST_SNPRINTF\)[^\n]*\n\([^#][^\n]*\n\|#el[^\n]*\n\|# [^\n]*\n\)*#endif!!g' include/krml/internal/target.h + +echo "Updated; verify all is okay using git diff and git status." diff --git a/Modules/_math.h b/Modules/_math.h index 4a6bc223ef5fb5..2285b64747c0bd 100644 --- a/Modules/_math.h +++ b/Modules/_math.h @@ -7,8 +7,9 @@ static double _Py_log1p(double x) { - /* Some platforms supply a log1p function but don't respect the sign of - zero: log1p(-0.0) gives 0.0 instead of the correct result of -0.0. + /* Some platforms (e.g. MacOS X 10.8, see gh-59682) supply a log1p function + but don't respect the sign of zero: log1p(-0.0) gives 0.0 instead of + the correct result of -0.0. To save fiddling with configure tests and platform checks, we handle the special case of zero input directly on all platforms. diff --git a/Modules/_testcapi/heaptype.c b/Modules/_testcapi/heaptype.c index bf80fd64d80b35..39639f7ed048f2 100644 --- a/Modules/_testcapi/heaptype.c +++ b/Modules/_testcapi/heaptype.c @@ -116,10 +116,10 @@ test_from_spec_invalid_metatype_inheritance(PyObject *self, PyObject *Py_UNUSED( PyObject *bases = NULL; PyObject *new = NULL; PyObject *meta_error_string = NULL; - PyObject *exc_type = NULL; - PyObject *exc_value = NULL; - PyObject *exc_traceback = NULL; + PyObject *exc = NULL; PyObject *result = NULL; + PyObject *message = NULL; + PyObject *args = NULL; metaclass_a = PyType_FromSpecWithBases(&MinimalMetaclass_spec, (PyObject*)&PyType_Type); if (metaclass_a == NULL) { @@ -156,13 +156,19 @@ test_from_spec_invalid_metatype_inheritance(PyObject *self, PyObject *Py_UNUSED( // Assert that the correct exception was raised if (PyErr_ExceptionMatches(PyExc_TypeError)) { - PyErr_Fetch(&exc_type, &exc_value, &exc_traceback); - + exc = PyErr_GetRaisedException(); + args = PyException_GetArgs(exc); + if (!PyTuple_Check(args) || PyTuple_Size(args) != 1) { + PyErr_SetString(PyExc_AssertionError, + "TypeError args are not a one-tuple"); + goto finally; + } + message = Py_NewRef(PyTuple_GET_ITEM(args, 0)); meta_error_string = PyUnicode_FromString("metaclass conflict:"); if (meta_error_string == NULL) { goto finally; } - int res = PyUnicode_Contains(exc_value, meta_error_string); + int res = PyUnicode_Contains(message, meta_error_string); if (res < 0) { goto finally; } @@ -179,11 +185,11 @@ test_from_spec_invalid_metatype_inheritance(PyObject *self, PyObject *Py_UNUSED( Py_XDECREF(bases); Py_XDECREF(new); Py_XDECREF(meta_error_string); - Py_XDECREF(exc_type); - Py_XDECREF(exc_value); - Py_XDECREF(exc_traceback); + Py_XDECREF(exc); + Py_XDECREF(message); Py_XDECREF(class_a); Py_XDECREF(class_b); + Py_XDECREF(args); return result; } diff --git a/Modules/_testcapimodule.c b/Modules/_testcapimodule.c index 46c025bf53404a..3c411fa0d76358 100644 --- a/Modules/_testcapimodule.c +++ b/Modules/_testcapimodule.c @@ -1534,6 +1534,42 @@ crash_no_current_thread(PyObject *self, PyObject *Py_UNUSED(ignored)) return NULL; } +/* Test that the GILState thread and the "current" thread match. */ +static PyObject * +test_current_tstate_matches(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + PyThreadState *orig_tstate = PyThreadState_Get(); + + if (orig_tstate != PyGILState_GetThisThreadState()) { + PyErr_SetString(PyExc_RuntimeError, + "current thread state doesn't match GILState"); + return NULL; + } + + const char *err = NULL; + PyThreadState_Swap(NULL); + PyThreadState *substate = Py_NewInterpreter(); + + if (substate != PyThreadState_Get()) { + err = "subinterpreter thread state not current"; + goto finally; + } + if (substate != PyGILState_GetThisThreadState()) { + err = "subinterpreter thread state doesn't match GILState"; + goto finally; + } + +finally: + Py_EndInterpreter(substate); + PyThreadState_Swap(orig_tstate); + + if (err != NULL) { + PyErr_SetString(PyExc_RuntimeError, err); + return NULL; + } + Py_RETURN_NONE; +} + /* To run some code in a sub-interpreter. */ static PyObject * run_in_subinterp(PyObject *self, PyObject *args) @@ -1639,6 +1675,58 @@ run_in_subinterp_with_config(PyObject *self, PyObject *args, PyObject *kwargs) return PyLong_FromLong(r); } +static void +_xid_capsule_destructor(PyObject *capsule) +{ + _PyCrossInterpreterData *data = \ + (_PyCrossInterpreterData *)PyCapsule_GetPointer(capsule, NULL); + if (data != NULL) { + assert(_PyCrossInterpreterData_Release(data) == 0); + PyMem_Free(data); + } +} + +static PyObject * +get_crossinterp_data(PyObject *self, PyObject *args) +{ + PyObject *obj = NULL; + if (!PyArg_ParseTuple(args, "O:get_crossinterp_data", &obj)) { + return NULL; + } + + _PyCrossInterpreterData *data = PyMem_NEW(_PyCrossInterpreterData, 1); + if (data == NULL) { + PyErr_NoMemory(); + return NULL; + } + if (_PyObject_GetCrossInterpreterData(obj, data) != 0) { + PyMem_Free(data); + return NULL; + } + PyObject *capsule = PyCapsule_New(data, NULL, _xid_capsule_destructor); + if (capsule == NULL) { + assert(_PyCrossInterpreterData_Release(data) == 0); + PyMem_Free(data); + } + return capsule; +} + +static PyObject * +restore_crossinterp_data(PyObject *self, PyObject *args) +{ + PyObject *capsule = NULL; + if (!PyArg_ParseTuple(args, "O:restore_crossinterp_data", &capsule)) { + return NULL; + } + + _PyCrossInterpreterData *data = \ + (_PyCrossInterpreterData *)PyCapsule_GetPointer(capsule, NULL); + if (data == NULL) { + return NULL; + } + return _PyCrossInterpreterData_NewObject(data); +} + static void slot_tp_del(PyObject *self) { @@ -2149,7 +2237,7 @@ dict_get_version(PyObject *self, PyObject *args) return NULL; _Py_COMP_DIAG_PUSH - _Py_COMP_DIAG_IGNORE_DEPR_DECLS + _Py_COMP_DIAG_IGNORE_DEPR_DECLS version = dict->ma_version_tag; _Py_COMP_DIAG_POP @@ -2976,6 +3064,144 @@ eval_get_func_desc(PyObject *self, PyObject *func) return PyUnicode_FromString(PyEval_GetFuncDesc(func)); } +static PyObject * +eval_eval_code_ex(PyObject *mod, PyObject *pos_args) +{ + PyObject *result = NULL; + PyObject *code; + PyObject *globals; + PyObject *locals = NULL; + PyObject *args = NULL; + PyObject *kwargs = NULL; + PyObject *defaults = NULL; + PyObject *kw_defaults = NULL; + PyObject *closure = NULL; + + PyObject **c_kwargs = NULL; + + if (!PyArg_UnpackTuple(pos_args, + "eval_code_ex", + 2, + 8, + &code, + &globals, + &locals, + &args, + &kwargs, + &defaults, + &kw_defaults, + &closure)) + { + goto exit; + } + + if (!PyCode_Check(code)) { + PyErr_SetString(PyExc_TypeError, + "code must be a Python code object"); + goto exit; + } + + if (!PyDict_Check(globals)) { + PyErr_SetString(PyExc_TypeError, "globals must be a dict"); + goto exit; + } + + if (locals && !PyMapping_Check(locals)) { + PyErr_SetString(PyExc_TypeError, "locals must be a mapping"); + goto exit; + } + if (locals == Py_None) { + locals = NULL; + } + + PyObject **c_args = NULL; + Py_ssize_t c_args_len = 0; + + if (args) + { + if (!PyTuple_Check(args)) { + PyErr_SetString(PyExc_TypeError, "args must be a tuple"); + goto exit; + } else { + c_args = &PyTuple_GET_ITEM(args, 0); + c_args_len = PyTuple_Size(args); + } + } + + Py_ssize_t c_kwargs_len = 0; + + if (kwargs) + { + if (!PyDict_Check(kwargs)) { + PyErr_SetString(PyExc_TypeError, "keywords must be a dict"); + goto exit; + } else { + c_kwargs_len = PyDict_Size(kwargs); + if (c_kwargs_len > 0) { + c_kwargs = PyMem_NEW(PyObject*, 2 * c_kwargs_len); + if (!c_kwargs) { + PyErr_NoMemory(); + goto exit; + } + + Py_ssize_t i = 0; + Py_ssize_t pos = 0; + + while (PyDict_Next(kwargs, + &pos, + &c_kwargs[i], + &c_kwargs[i + 1])) + { + i += 2; + } + c_kwargs_len = i / 2; + /* XXX This is broken if the caller deletes dict items! */ + } + } + } + + + PyObject **c_defaults = NULL; + Py_ssize_t c_defaults_len = 0; + + if (defaults && PyTuple_Check(defaults)) { + c_defaults = &PyTuple_GET_ITEM(defaults, 0); + c_defaults_len = PyTuple_Size(defaults); + } + + if (kw_defaults && !PyDict_Check(kw_defaults)) { + PyErr_SetString(PyExc_TypeError, "kw_defaults must be a dict"); + goto exit; + } + + if (closure && !PyTuple_Check(closure)) { + PyErr_SetString(PyExc_TypeError, "closure must be a tuple of cells"); + goto exit; + } + + + result = PyEval_EvalCodeEx( + code, + globals, + locals, + c_args, + (int)c_args_len, + c_kwargs, + (int)c_kwargs_len, + c_defaults, + (int)c_defaults_len, + kw_defaults, + closure + ); + +exit: + if (c_kwargs) { + PyMem_DEL(c_kwargs); + } + + return result; +} + static PyObject * get_feature_macros(PyObject *self, PyObject *Py_UNUSED(args)) { @@ -3244,6 +3470,41 @@ function_set_kw_defaults(PyObject *self, PyObject *args) Py_RETURN_NONE; } +static PyObject * +err_set_raised(PyObject *self, PyObject *exc) +{ + Py_INCREF(exc); + PyErr_SetRaisedException(exc); + assert(PyErr_Occurred()); + return NULL; +} + +static PyObject * +err_restore(PyObject *self, PyObject *args) { + PyObject *type = NULL, *value = NULL, *traceback = NULL; + switch(PyTuple_Size(args)) { + case 3: + traceback = PyTuple_GetItem(args, 2); + Py_INCREF(traceback); + /* fall through */ + case 2: + value = PyTuple_GetItem(args, 1); + Py_INCREF(value); + /* fall through */ + case 1: + type = PyTuple_GetItem(args, 0); + Py_INCREF(type); + break; + default: + PyErr_SetString(PyExc_TypeError, + "wrong number of arguments"); + return NULL; + } + PyErr_Restore(type, value, traceback); + assert(PyErr_Occurred()); + return NULL; +} + static PyObject *test_buildvalue_issue38913(PyObject *, PyObject *); static PyMethodDef TestMethods[] = { @@ -3297,15 +3558,19 @@ static PyMethodDef TestMethods[] = { {"set_exc_info", test_set_exc_info, METH_VARARGS}, {"argparsing", argparsing, METH_VARARGS}, {"code_newempty", code_newempty, METH_VARARGS}, + {"eval_code_ex", eval_eval_code_ex, METH_VARARGS}, {"make_exception_with_doc", _PyCFunction_CAST(make_exception_with_doc), METH_VARARGS | METH_KEYWORDS}, {"make_memoryview_from_NULL_pointer", make_memoryview_from_NULL_pointer, METH_NOARGS}, {"crash_no_current_thread", crash_no_current_thread, METH_NOARGS}, + {"test_current_tstate_matches", test_current_tstate_matches, METH_NOARGS}, {"run_in_subinterp", run_in_subinterp, METH_VARARGS}, {"run_in_subinterp_with_config", _PyCFunction_CAST(run_in_subinterp_with_config), METH_VARARGS | METH_KEYWORDS}, + {"get_crossinterp_data", get_crossinterp_data, METH_VARARGS}, + {"restore_crossinterp_data", restore_crossinterp_data, METH_VARARGS}, {"with_tp_del", with_tp_del, METH_VARARGS}, {"create_cfunction", create_cfunction, METH_NOARGS}, {"call_in_temporary_c_thread", call_in_temporary_c_thread, METH_VARARGS, @@ -3392,6 +3657,8 @@ static PyMethodDef TestMethods[] = { {"function_set_defaults", function_set_defaults, METH_VARARGS, NULL}, {"function_get_kw_defaults", function_get_kw_defaults, METH_O, NULL}, {"function_set_kw_defaults", function_set_kw_defaults, METH_VARARGS, NULL}, + {"err_set_raised", err_set_raised, METH_O, NULL}, + {"err_restore", err_restore, METH_VARARGS, NULL}, {NULL, NULL} /* sentinel */ }; diff --git a/Modules/_xxinterpchannelsmodule.c b/Modules/_xxinterpchannelsmodule.c new file mode 100644 index 00000000000000..60538c31874864 --- /dev/null +++ b/Modules/_xxinterpchannelsmodule.c @@ -0,0 +1,2326 @@ + +/* interpreters module */ +/* low-level access to interpreter primitives */ +#ifndef Py_BUILD_CORE_BUILTIN +# define Py_BUILD_CORE_MODULE 1 +#endif + +#include "Python.h" +#include "pycore_pystate.h" // _PyThreadState_GET() +#include "pycore_interpreteridobject.h" + + +#define MODULE_NAME "_xxinterpchannels" + + +static PyInterpreterState * +_get_current_interp(void) +{ + // PyInterpreterState_Get() aborts if lookup fails, so don't need + // to check the result for NULL. + return PyInterpreterState_Get(); +} + +static PyObject * +_get_current_module(void) +{ + PyObject *name = PyUnicode_FromString(MODULE_NAME); + if (name == NULL) { + return NULL; + } + PyObject *mod = PyImport_GetModule(name); + Py_DECREF(name); + if (mod == NULL) { + return NULL; + } + assert(mod != Py_None); + return mod; +} + +static PyObject * +get_module_from_owned_type(PyTypeObject *cls) +{ + assert(cls != NULL); + return _get_current_module(); + // XXX Use the more efficient API now that we use heap types: + //return PyType_GetModule(cls); +} + +static struct PyModuleDef moduledef; + +static PyObject * +get_module_from_type(PyTypeObject *cls) +{ + assert(cls != NULL); + return _get_current_module(); + // XXX Use the more efficient API now that we use heap types: + //return PyType_GetModuleByDef(cls, &moduledef); +} + +static PyObject * +add_new_exception(PyObject *mod, const char *name, PyObject *base) +{ + assert(!PyObject_HasAttrString(mod, name)); + PyObject *exctype = PyErr_NewException(name, base, NULL); + if (exctype == NULL) { + return NULL; + } + int res = PyModule_AddType(mod, (PyTypeObject *)exctype); + if (res < 0) { + Py_DECREF(exctype); + return NULL; + } + return exctype; +} + +#define ADD_NEW_EXCEPTION(MOD, NAME, BASE) \ + add_new_exception(MOD, MODULE_NAME "." Py_STRINGIFY(NAME), BASE) + +static PyTypeObject * +add_new_type(PyObject *mod, PyType_Spec *spec, crossinterpdatafunc shared) +{ + PyTypeObject *cls = (PyTypeObject *)PyType_FromMetaclass( + NULL, mod, spec, NULL); + if (cls == NULL) { + return NULL; + } + if (PyModule_AddType(mod, cls) < 0) { + Py_DECREF(cls); + return NULL; + } + if (shared != NULL) { + if (_PyCrossInterpreterData_RegisterClass(cls, shared)) { + Py_DECREF(cls); + return NULL; + } + } + return cls; +} + +static int +_release_xid_data(_PyCrossInterpreterData *data, int ignoreexc) +{ + PyObject *exctype, *excval, *exctb; + if (ignoreexc) { + PyErr_Fetch(&exctype, &excval, &exctb); + } + int res = _PyCrossInterpreterData_Release(data); + if (res < 0) { + // XXX Fix this! + /* The owning interpreter is already destroyed. + * Ideally, this shouldn't ever happen. When an interpreter is + * about to be destroyed, we should clear out all of its objects + * from every channel associated with that interpreter. + * For now we hack around that to resolve refleaks, by decref'ing + * the released object here, even if its the wrong interpreter. + * The owning interpreter has already been destroyed + * so we should be okay, especially since the currently + * shareable types are all very basic, with no GC. + * That said, it becomes much messier once interpreters + * no longer share a GIL, so this needs to be fixed before then. */ + _PyCrossInterpreterData_Clear(NULL, data); + if (ignoreexc) { + // XXX Emit a warning? + PyErr_Clear(); + } + } + if (ignoreexc) { + PyErr_Restore(exctype, excval, exctb); + } + return res; +} + + +/* module state *************************************************************/ + +typedef struct { + /* heap types */ + PyTypeObject *ChannelIDType; + + /* exceptions */ + PyObject *ChannelError; + PyObject *ChannelNotFoundError; + PyObject *ChannelClosedError; + PyObject *ChannelEmptyError; + PyObject *ChannelNotEmptyError; +} module_state; + +static inline module_state * +get_module_state(PyObject *mod) +{ + assert(mod != NULL); + module_state *state = PyModule_GetState(mod); + assert(state != NULL); + return state; +} + +static int +traverse_module_state(module_state *state, visitproc visit, void *arg) +{ + /* heap types */ + Py_VISIT(state->ChannelIDType); + + /* exceptions */ + Py_VISIT(state->ChannelError); + Py_VISIT(state->ChannelNotFoundError); + Py_VISIT(state->ChannelClosedError); + Py_VISIT(state->ChannelEmptyError); + Py_VISIT(state->ChannelNotEmptyError); + + return 0; +} + +static int +clear_module_state(module_state *state) +{ + /* heap types */ + if (state->ChannelIDType != NULL) { + (void)_PyCrossInterpreterData_UnregisterClass(state->ChannelIDType); + } + Py_CLEAR(state->ChannelIDType); + + /* exceptions */ + Py_CLEAR(state->ChannelError); + Py_CLEAR(state->ChannelNotFoundError); + Py_CLEAR(state->ChannelClosedError); + Py_CLEAR(state->ChannelEmptyError); + Py_CLEAR(state->ChannelNotEmptyError); + + return 0; +} + + +/* channel-specific code ****************************************************/ + +#define CHANNEL_SEND 1 +#define CHANNEL_BOTH 0 +#define CHANNEL_RECV -1 + +/* channel errors */ + +#define ERR_CHANNEL_NOT_FOUND -2 +#define ERR_CHANNEL_CLOSED -3 +#define ERR_CHANNEL_INTERP_CLOSED -4 +#define ERR_CHANNEL_EMPTY -5 +#define ERR_CHANNEL_NOT_EMPTY -6 +#define ERR_CHANNEL_MUTEX_INIT -7 +#define ERR_CHANNELS_MUTEX_INIT -8 +#define ERR_NO_NEXT_CHANNEL_ID -9 + +static int +exceptions_init(PyObject *mod) +{ + module_state *state = get_module_state(mod); + if (state == NULL) { + return -1; + } + +#define ADD(NAME, BASE) \ + do { \ + assert(state->NAME == NULL); \ + state->NAME = ADD_NEW_EXCEPTION(mod, NAME, BASE); \ + if (state->NAME == NULL) { \ + return -1; \ + } \ + } while (0) + + // A channel-related operation failed. + ADD(ChannelError, PyExc_RuntimeError); + // An operation tried to use a channel that doesn't exist. + ADD(ChannelNotFoundError, state->ChannelError); + // An operation tried to use a closed channel. + ADD(ChannelClosedError, state->ChannelError); + // An operation tried to pop from an empty channel. + ADD(ChannelEmptyError, state->ChannelError); + // An operation tried to close a non-empty channel. + ADD(ChannelNotEmptyError, state->ChannelError); +#undef ADD + + return 0; +} + +static int +handle_channel_error(int err, PyObject *mod, int64_t cid) +{ + if (err == 0) { + assert(!PyErr_Occurred()); + return 0; + } + assert(err < 0); + module_state *state = get_module_state(mod); + assert(state != NULL); + if (err == ERR_CHANNEL_NOT_FOUND) { + PyErr_Format(state->ChannelNotFoundError, + "channel %" PRId64 " not found", cid); + } + else if (err == ERR_CHANNEL_CLOSED) { + PyErr_Format(state->ChannelClosedError, + "channel %" PRId64 " is closed", cid); + } + else if (err == ERR_CHANNEL_INTERP_CLOSED) { + PyErr_Format(state->ChannelClosedError, + "channel %" PRId64 " is already closed", cid); + } + else if (err == ERR_CHANNEL_EMPTY) { + PyErr_Format(state->ChannelEmptyError, + "channel %" PRId64 " is empty", cid); + } + else if (err == ERR_CHANNEL_NOT_EMPTY) { + PyErr_Format(state->ChannelNotEmptyError, + "channel %" PRId64 " may not be closed " + "if not empty (try force=True)", + cid); + } + else if (err == ERR_CHANNEL_MUTEX_INIT) { + PyErr_SetString(state->ChannelError, + "can't initialize mutex for new channel"); + } + else if (err == ERR_CHANNELS_MUTEX_INIT) { + PyErr_SetString(state->ChannelError, + "can't initialize mutex for channel management"); + } + else if (err == ERR_NO_NEXT_CHANNEL_ID) { + PyErr_SetString(state->ChannelError, + "failed to get a channel ID"); + } + else { + assert(PyErr_Occurred()); + } + return 1; +} + +/* the channel queue */ + +struct _channelitem; + +typedef struct _channelitem { + _PyCrossInterpreterData *data; + struct _channelitem *next; +} _channelitem; + +static _channelitem * +_channelitem_new(void) +{ + _channelitem *item = PyMem_NEW(_channelitem, 1); + if (item == NULL) { + PyErr_NoMemory(); + return NULL; + } + item->data = NULL; + item->next = NULL; + return item; +} + +static void +_channelitem_clear(_channelitem *item) +{ + if (item->data != NULL) { + (void)_release_xid_data(item->data, 1); + PyMem_Free(item->data); + item->data = NULL; + } + item->next = NULL; +} + +static void +_channelitem_free(_channelitem *item) +{ + _channelitem_clear(item); + PyMem_Free(item); +} + +static void +_channelitem_free_all(_channelitem *item) +{ + while (item != NULL) { + _channelitem *last = item; + item = item->next; + _channelitem_free(last); + } +} + +static _PyCrossInterpreterData * +_channelitem_popped(_channelitem *item) +{ + _PyCrossInterpreterData *data = item->data; + item->data = NULL; + _channelitem_free(item); + return data; +} + +typedef struct _channelqueue { + int64_t count; + _channelitem *first; + _channelitem *last; +} _channelqueue; + +static _channelqueue * +_channelqueue_new(void) +{ + _channelqueue *queue = PyMem_NEW(_channelqueue, 1); + if (queue == NULL) { + PyErr_NoMemory(); + return NULL; + } + queue->count = 0; + queue->first = NULL; + queue->last = NULL; + return queue; +} + +static void +_channelqueue_clear(_channelqueue *queue) +{ + _channelitem_free_all(queue->first); + queue->count = 0; + queue->first = NULL; + queue->last = NULL; +} + +static void +_channelqueue_free(_channelqueue *queue) +{ + _channelqueue_clear(queue); + PyMem_Free(queue); +} + +static int +_channelqueue_put(_channelqueue *queue, _PyCrossInterpreterData *data) +{ + _channelitem *item = _channelitem_new(); + if (item == NULL) { + return -1; + } + item->data = data; + + queue->count += 1; + if (queue->first == NULL) { + queue->first = item; + } + else { + queue->last->next = item; + } + queue->last = item; + return 0; +} + +static _PyCrossInterpreterData * +_channelqueue_get(_channelqueue *queue) +{ + _channelitem *item = queue->first; + if (item == NULL) { + return NULL; + } + queue->first = item->next; + if (queue->last == item) { + queue->last = NULL; + } + queue->count -= 1; + + return _channelitem_popped(item); +} + +/* channel-interpreter associations */ + +struct _channelend; + +typedef struct _channelend { + struct _channelend *next; + int64_t interp; + int open; +} _channelend; + +static _channelend * +_channelend_new(int64_t interp) +{ + _channelend *end = PyMem_NEW(_channelend, 1); + if (end == NULL) { + PyErr_NoMemory(); + return NULL; + } + end->next = NULL; + end->interp = interp; + end->open = 1; + return end; +} + +static void +_channelend_free(_channelend *end) +{ + PyMem_Free(end); +} + +static void +_channelend_free_all(_channelend *end) +{ + while (end != NULL) { + _channelend *last = end; + end = end->next; + _channelend_free(last); + } +} + +static _channelend * +_channelend_find(_channelend *first, int64_t interp, _channelend **pprev) +{ + _channelend *prev = NULL; + _channelend *end = first; + while (end != NULL) { + if (end->interp == interp) { + break; + } + prev = end; + end = end->next; + } + if (pprev != NULL) { + *pprev = prev; + } + return end; +} + +typedef struct _channelassociations { + // Note that the list entries are never removed for interpreter + // for which the channel is closed. This should not be a problem in + // practice. Also, a channel isn't automatically closed when an + // interpreter is destroyed. + int64_t numsendopen; + int64_t numrecvopen; + _channelend *send; + _channelend *recv; +} _channelends; + +static _channelends * +_channelends_new(void) +{ + _channelends *ends = PyMem_NEW(_channelends, 1); + if (ends== NULL) { + return NULL; + } + ends->numsendopen = 0; + ends->numrecvopen = 0; + ends->send = NULL; + ends->recv = NULL; + return ends; +} + +static void +_channelends_clear(_channelends *ends) +{ + _channelend_free_all(ends->send); + ends->send = NULL; + ends->numsendopen = 0; + + _channelend_free_all(ends->recv); + ends->recv = NULL; + ends->numrecvopen = 0; +} + +static void +_channelends_free(_channelends *ends) +{ + _channelends_clear(ends); + PyMem_Free(ends); +} + +static _channelend * +_channelends_add(_channelends *ends, _channelend *prev, int64_t interp, + int send) +{ + _channelend *end = _channelend_new(interp); + if (end == NULL) { + return NULL; + } + + if (prev == NULL) { + if (send) { + ends->send = end; + } + else { + ends->recv = end; + } + } + else { + prev->next = end; + } + if (send) { + ends->numsendopen += 1; + } + else { + ends->numrecvopen += 1; + } + return end; +} + +static int +_channelends_associate(_channelends *ends, int64_t interp, int send) +{ + _channelend *prev; + _channelend *end = _channelend_find(send ? ends->send : ends->recv, + interp, &prev); + if (end != NULL) { + if (!end->open) { + return ERR_CHANNEL_CLOSED; + } + // already associated + return 0; + } + if (_channelends_add(ends, prev, interp, send) == NULL) { + return -1; + } + return 0; +} + +static int +_channelends_is_open(_channelends *ends) +{ + if (ends->numsendopen != 0 || ends->numrecvopen != 0) { + return 1; + } + if (ends->send == NULL && ends->recv == NULL) { + return 1; + } + return 0; +} + +static void +_channelends_close_end(_channelends *ends, _channelend *end, int send) +{ + end->open = 0; + if (send) { + ends->numsendopen -= 1; + } + else { + ends->numrecvopen -= 1; + } +} + +static int +_channelends_close_interpreter(_channelends *ends, int64_t interp, int which) +{ + _channelend *prev; + _channelend *end; + if (which >= 0) { // send/both + end = _channelend_find(ends->send, interp, &prev); + if (end == NULL) { + // never associated so add it + end = _channelends_add(ends, prev, interp, 1); + if (end == NULL) { + return -1; + } + } + _channelends_close_end(ends, end, 1); + } + if (which <= 0) { // recv/both + end = _channelend_find(ends->recv, interp, &prev); + if (end == NULL) { + // never associated so add it + end = _channelends_add(ends, prev, interp, 0); + if (end == NULL) { + return -1; + } + } + _channelends_close_end(ends, end, 0); + } + return 0; +} + +static void +_channelends_close_all(_channelends *ends, int which, int force) +{ + // XXX Handle the ends. + // XXX Handle force is True. + + // Ensure all the "send"-associated interpreters are closed. + _channelend *end; + for (end = ends->send; end != NULL; end = end->next) { + _channelends_close_end(ends, end, 1); + } + + // Ensure all the "recv"-associated interpreters are closed. + for (end = ends->recv; end != NULL; end = end->next) { + _channelends_close_end(ends, end, 0); + } +} + +/* channels */ + +struct _channel; +struct _channel_closing; +static void _channel_clear_closing(struct _channel *); +static void _channel_finish_closing(struct _channel *); + +typedef struct _channel { + PyThread_type_lock mutex; + _channelqueue *queue; + _channelends *ends; + int open; + struct _channel_closing *closing; +} _PyChannelState; + +static _PyChannelState * +_channel_new(PyThread_type_lock mutex) +{ + _PyChannelState *chan = PyMem_NEW(_PyChannelState, 1); + if (chan == NULL) { + return NULL; + } + chan->mutex = mutex; + chan->queue = _channelqueue_new(); + if (chan->queue == NULL) { + PyMem_Free(chan); + return NULL; + } + chan->ends = _channelends_new(); + if (chan->ends == NULL) { + _channelqueue_free(chan->queue); + PyMem_Free(chan); + return NULL; + } + chan->open = 1; + chan->closing = NULL; + return chan; +} + +static void +_channel_free(_PyChannelState *chan) +{ + _channel_clear_closing(chan); + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + _channelqueue_free(chan->queue); + _channelends_free(chan->ends); + PyThread_release_lock(chan->mutex); + + PyThread_free_lock(chan->mutex); + PyMem_Free(chan); +} + +static int +_channel_add(_PyChannelState *chan, int64_t interp, + _PyCrossInterpreterData *data) +{ + int res = -1; + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + if (!chan->open) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + if (_channelends_associate(chan->ends, interp, 1) != 0) { + res = ERR_CHANNEL_INTERP_CLOSED; + goto done; + } + + if (_channelqueue_put(chan->queue, data) != 0) { + goto done; + } + + res = 0; +done: + PyThread_release_lock(chan->mutex); + return res; +} + +static int +_channel_next(_PyChannelState *chan, int64_t interp, + _PyCrossInterpreterData **res) +{ + int err = 0; + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + if (!chan->open) { + err = ERR_CHANNEL_CLOSED; + goto done; + } + if (_channelends_associate(chan->ends, interp, 0) != 0) { + err = ERR_CHANNEL_INTERP_CLOSED; + goto done; + } + + _PyCrossInterpreterData *data = _channelqueue_get(chan->queue); + if (data == NULL && !PyErr_Occurred() && chan->closing != NULL) { + chan->open = 0; + } + *res = data; + +done: + PyThread_release_lock(chan->mutex); + if (chan->queue->count == 0) { + _channel_finish_closing(chan); + } + return err; +} + +static int +_channel_close_interpreter(_PyChannelState *chan, int64_t interp, int end) +{ + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + int res = -1; + if (!chan->open) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + + if (_channelends_close_interpreter(chan->ends, interp, end) != 0) { + goto done; + } + chan->open = _channelends_is_open(chan->ends); + + res = 0; +done: + PyThread_release_lock(chan->mutex); + return res; +} + +static int +_channel_close_all(_PyChannelState *chan, int end, int force) +{ + int res = -1; + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + + if (!chan->open) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + + if (!force && chan->queue->count > 0) { + res = ERR_CHANNEL_NOT_EMPTY; + goto done; + } + + chan->open = 0; + + // We *could* also just leave these in place, since we've marked + // the channel as closed already. + _channelends_close_all(chan->ends, end, force); + + res = 0; +done: + PyThread_release_lock(chan->mutex); + return res; +} + +/* the set of channels */ + +struct _channelref; + +typedef struct _channelref { + int64_t id; + _PyChannelState *chan; + struct _channelref *next; + Py_ssize_t objcount; +} _channelref; + +static _channelref * +_channelref_new(int64_t id, _PyChannelState *chan) +{ + _channelref *ref = PyMem_NEW(_channelref, 1); + if (ref == NULL) { + return NULL; + } + ref->id = id; + ref->chan = chan; + ref->next = NULL; + ref->objcount = 0; + return ref; +} + +//static void +//_channelref_clear(_channelref *ref) +//{ +// ref->id = -1; +// ref->chan = NULL; +// ref->next = NULL; +// ref->objcount = 0; +//} + +static void +_channelref_free(_channelref *ref) +{ + if (ref->chan != NULL) { + _channel_clear_closing(ref->chan); + } + //_channelref_clear(ref); + PyMem_Free(ref); +} + +static _channelref * +_channelref_find(_channelref *first, int64_t id, _channelref **pprev) +{ + _channelref *prev = NULL; + _channelref *ref = first; + while (ref != NULL) { + if (ref->id == id) { + break; + } + prev = ref; + ref = ref->next; + } + if (pprev != NULL) { + *pprev = prev; + } + return ref; +} + +typedef struct _channels { + PyThread_type_lock mutex; + _channelref *head; + int64_t numopen; + int64_t next_id; +} _channels; + +static void +_channels_init(_channels *channels, PyThread_type_lock mutex) +{ + channels->mutex = mutex; + channels->head = NULL; + channels->numopen = 0; + channels->next_id = 0; +} + +static void +_channels_fini(_channels *channels) +{ + assert(channels->numopen == 0); + assert(channels->head == NULL); + if (channels->mutex != NULL) { + PyThread_free_lock(channels->mutex); + channels->mutex = NULL; + } +} + +static int64_t +_channels_next_id(_channels *channels) // needs lock +{ + int64_t id = channels->next_id; + if (id < 0) { + /* overflow */ + return -1; + } + channels->next_id += 1; + return id; +} + +static int +_channels_lookup(_channels *channels, int64_t id, PyThread_type_lock *pmutex, + _PyChannelState **res) +{ + int err = -1; + _PyChannelState *chan = NULL; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + if (pmutex != NULL) { + *pmutex = NULL; + } + + _channelref *ref = _channelref_find(channels->head, id, NULL); + if (ref == NULL) { + err = ERR_CHANNEL_NOT_FOUND; + goto done; + } + if (ref->chan == NULL || !ref->chan->open) { + err = ERR_CHANNEL_CLOSED; + goto done; + } + + if (pmutex != NULL) { + // The mutex will be closed by the caller. + *pmutex = channels->mutex; + } + + chan = ref->chan; + err = 0; + +done: + if (pmutex == NULL || *pmutex == NULL) { + PyThread_release_lock(channels->mutex); + } + *res = chan; + return err; +} + +static int64_t +_channels_add(_channels *channels, _PyChannelState *chan) +{ + int64_t cid = -1; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + // Create a new ref. + int64_t id = _channels_next_id(channels); + if (id < 0) { + cid = ERR_NO_NEXT_CHANNEL_ID; + goto done; + } + _channelref *ref = _channelref_new(id, chan); + if (ref == NULL) { + goto done; + } + + // Add it to the list. + // We assume that the channel is a new one (not already in the list). + ref->next = channels->head; + channels->head = ref; + channels->numopen += 1; + + cid = id; +done: + PyThread_release_lock(channels->mutex); + return cid; +} + +/* forward */ +static int _channel_set_closing(struct _channelref *, PyThread_type_lock); + +static int +_channels_close(_channels *channels, int64_t cid, _PyChannelState **pchan, + int end, int force) +{ + int res = -1; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + if (pchan != NULL) { + *pchan = NULL; + } + + _channelref *ref = _channelref_find(channels->head, cid, NULL); + if (ref == NULL) { + res = ERR_CHANNEL_NOT_FOUND; + goto done; + } + + if (ref->chan == NULL) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + else if (!force && end == CHANNEL_SEND && ref->chan->closing != NULL) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + else { + int err = _channel_close_all(ref->chan, end, force); + if (err != 0) { + if (end == CHANNEL_SEND && err == ERR_CHANNEL_NOT_EMPTY) { + if (ref->chan->closing != NULL) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + // Mark the channel as closing and return. The channel + // will be cleaned up in _channel_next(). + PyErr_Clear(); + int err = _channel_set_closing(ref, channels->mutex); + if (err != 0) { + res = err; + goto done; + } + if (pchan != NULL) { + *pchan = ref->chan; + } + res = 0; + } + else { + res = err; + } + goto done; + } + if (pchan != NULL) { + *pchan = ref->chan; + } + else { + _channel_free(ref->chan); + } + ref->chan = NULL; + } + + res = 0; +done: + PyThread_release_lock(channels->mutex); + return res; +} + +static void +_channels_remove_ref(_channels *channels, _channelref *ref, _channelref *prev, + _PyChannelState **pchan) +{ + if (ref == channels->head) { + channels->head = ref->next; + } + else { + prev->next = ref->next; + } + channels->numopen -= 1; + + if (pchan != NULL) { + *pchan = ref->chan; + } + _channelref_free(ref); +} + +static int +_channels_remove(_channels *channels, int64_t id, _PyChannelState **pchan) +{ + int res = -1; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + if (pchan != NULL) { + *pchan = NULL; + } + + _channelref *prev = NULL; + _channelref *ref = _channelref_find(channels->head, id, &prev); + if (ref == NULL) { + res = ERR_CHANNEL_NOT_FOUND; + goto done; + } + + _channels_remove_ref(channels, ref, prev, pchan); + + res = 0; +done: + PyThread_release_lock(channels->mutex); + return res; +} + +static int +_channels_add_id_object(_channels *channels, int64_t id) +{ + int res = -1; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + _channelref *ref = _channelref_find(channels->head, id, NULL); + if (ref == NULL) { + res = ERR_CHANNEL_NOT_FOUND; + goto done; + } + ref->objcount += 1; + + res = 0; +done: + PyThread_release_lock(channels->mutex); + return res; +} + +static void +_channels_drop_id_object(_channels *channels, int64_t id) +{ + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + + _channelref *prev = NULL; + _channelref *ref = _channelref_find(channels->head, id, &prev); + if (ref == NULL) { + // Already destroyed. + goto done; + } + ref->objcount -= 1; + + // Destroy if no longer used. + if (ref->objcount == 0) { + _PyChannelState *chan = NULL; + _channels_remove_ref(channels, ref, prev, &chan); + if (chan != NULL) { + _channel_free(chan); + } + } + +done: + PyThread_release_lock(channels->mutex); +} + +static int64_t * +_channels_list_all(_channels *channels, int64_t *count) +{ + int64_t *cids = NULL; + PyThread_acquire_lock(channels->mutex, WAIT_LOCK); + int64_t *ids = PyMem_NEW(int64_t, (Py_ssize_t)(channels->numopen)); + if (ids == NULL) { + goto done; + } + _channelref *ref = channels->head; + for (int64_t i=0; ref != NULL; ref = ref->next, i++) { + ids[i] = ref->id; + } + *count = channels->numopen; + + cids = ids; +done: + PyThread_release_lock(channels->mutex); + return cids; +} + +/* support for closing non-empty channels */ + +struct _channel_closing { + struct _channelref *ref; +}; + +static int +_channel_set_closing(struct _channelref *ref, PyThread_type_lock mutex) { + struct _channel *chan = ref->chan; + if (chan == NULL) { + // already closed + return 0; + } + int res = -1; + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + if (chan->closing != NULL) { + res = ERR_CHANNEL_CLOSED; + goto done; + } + chan->closing = PyMem_NEW(struct _channel_closing, 1); + if (chan->closing == NULL) { + goto done; + } + chan->closing->ref = ref; + + res = 0; +done: + PyThread_release_lock(chan->mutex); + return res; +} + +static void +_channel_clear_closing(struct _channel *chan) { + PyThread_acquire_lock(chan->mutex, WAIT_LOCK); + if (chan->closing != NULL) { + PyMem_Free(chan->closing); + chan->closing = NULL; + } + PyThread_release_lock(chan->mutex); +} + +static void +_channel_finish_closing(struct _channel *chan) { + struct _channel_closing *closing = chan->closing; + if (closing == NULL) { + return; + } + _channelref *ref = closing->ref; + _channel_clear_closing(chan); + // Do the things that would have been done in _channels_close(). + ref->chan = NULL; + _channel_free(chan); +} + +/* "high"-level channel-related functions */ + +static int64_t +_channel_create(_channels *channels) +{ + PyThread_type_lock mutex = PyThread_allocate_lock(); + if (mutex == NULL) { + return ERR_CHANNEL_MUTEX_INIT; + } + _PyChannelState *chan = _channel_new(mutex); + if (chan == NULL) { + PyThread_free_lock(mutex); + return -1; + } + int64_t id = _channels_add(channels, chan); + if (id < 0) { + _channel_free(chan); + } + return id; +} + +static int +_channel_destroy(_channels *channels, int64_t id) +{ + _PyChannelState *chan = NULL; + int err = _channels_remove(channels, id, &chan); + if (err != 0) { + return err; + } + if (chan != NULL) { + _channel_free(chan); + } + return 0; +} + +static int +_channel_send(_channels *channels, int64_t id, PyObject *obj) +{ + PyInterpreterState *interp = _get_current_interp(); + if (interp == NULL) { + return -1; + } + + // Look up the channel. + PyThread_type_lock mutex = NULL; + _PyChannelState *chan = NULL; + int err = _channels_lookup(channels, id, &mutex, &chan); + if (err != 0) { + return err; + } + assert(chan != NULL); + // Past this point we are responsible for releasing the mutex. + + if (chan->closing != NULL) { + PyThread_release_lock(mutex); + return ERR_CHANNEL_CLOSED; + } + + // Convert the object to cross-interpreter data. + _PyCrossInterpreterData *data = PyMem_NEW(_PyCrossInterpreterData, 1); + if (data == NULL) { + PyThread_release_lock(mutex); + return -1; + } + if (_PyObject_GetCrossInterpreterData(obj, data) != 0) { + PyThread_release_lock(mutex); + PyMem_Free(data); + return -1; + } + + // Add the data to the channel. + int res = _channel_add(chan, PyInterpreterState_GetID(interp), data); + PyThread_release_lock(mutex); + if (res != 0) { + // We may chain an exception here: + (void)_release_xid_data(data, 0); + PyMem_Free(data); + return res; + } + + return 0; +} + +static int +_channel_recv(_channels *channels, int64_t id, PyObject **res) +{ + int err; + *res = NULL; + + PyInterpreterState *interp = _get_current_interp(); + if (interp == NULL) { + // XXX Is this always an error? + if (PyErr_Occurred()) { + return -1; + } + return 0; + } + + // Look up the channel. + PyThread_type_lock mutex = NULL; + _PyChannelState *chan = NULL; + err = _channels_lookup(channels, id, &mutex, &chan); + if (err != 0) { + return err; + } + assert(chan != NULL); + // Past this point we are responsible for releasing the mutex. + + // Pop off the next item from the channel. + _PyCrossInterpreterData *data = NULL; + err = _channel_next(chan, PyInterpreterState_GetID(interp), &data); + PyThread_release_lock(mutex); + if (err != 0) { + return err; + } + else if (data == NULL) { + assert(!PyErr_Occurred()); + return 0; + } + + // Convert the data back to an object. + PyObject *obj = _PyCrossInterpreterData_NewObject(data); + if (obj == NULL) { + assert(PyErr_Occurred()); + (void)_release_xid_data(data, 1); + PyMem_Free(data); + return -1; + } + int release_res = _release_xid_data(data, 0); + PyMem_Free(data); + if (release_res < 0) { + // The source interpreter has been destroyed already. + assert(PyErr_Occurred()); + Py_DECREF(obj); + return -1; + } + + *res = obj; + return 0; +} + +static int +_channel_drop(_channels *channels, int64_t id, int send, int recv) +{ + PyInterpreterState *interp = _get_current_interp(); + if (interp == NULL) { + return -1; + } + + // Look up the channel. + PyThread_type_lock mutex = NULL; + _PyChannelState *chan = NULL; + int err = _channels_lookup(channels, id, &mutex, &chan); + if (err != 0) { + return err; + } + // Past this point we are responsible for releasing the mutex. + + // Close one or both of the two ends. + int res = _channel_close_interpreter(chan, PyInterpreterState_GetID(interp), send-recv); + PyThread_release_lock(mutex); + return res; +} + +static int +_channel_close(_channels *channels, int64_t id, int end, int force) +{ + return _channels_close(channels, id, NULL, end, force); +} + +static int +_channel_is_associated(_channels *channels, int64_t cid, int64_t interp, + int send) +{ + _PyChannelState *chan = NULL; + int err = _channels_lookup(channels, cid, NULL, &chan); + if (err != 0) { + return err; + } + else if (send && chan->closing != NULL) { + return ERR_CHANNEL_CLOSED; + } + + _channelend *end = _channelend_find(send ? chan->ends->send : chan->ends->recv, + interp, NULL); + + return (end != NULL && end->open); +} + +/* ChannelID class */ + +typedef struct channelid { + PyObject_HEAD + int64_t id; + int end; + int resolve; + _channels *channels; +} channelid; + +struct channel_id_converter_data { + PyObject *module; + int64_t cid; +}; + +static int +channel_id_converter(PyObject *arg, void *ptr) +{ + int64_t cid; + struct channel_id_converter_data *data = ptr; + module_state *state = get_module_state(data->module); + assert(state != NULL); + if (PyObject_TypeCheck(arg, state->ChannelIDType)) { + cid = ((channelid *)arg)->id; + } + else if (PyIndex_Check(arg)) { + cid = PyLong_AsLongLong(arg); + if (cid == -1 && PyErr_Occurred()) { + return 0; + } + if (cid < 0) { + PyErr_Format(PyExc_ValueError, + "channel ID must be a non-negative int, got %R", arg); + return 0; + } + } + else { + PyErr_Format(PyExc_TypeError, + "channel ID must be an int, got %.100s", + Py_TYPE(arg)->tp_name); + return 0; + } + data->cid = cid; + return 1; +} + +static int +newchannelid(PyTypeObject *cls, int64_t cid, int end, _channels *channels, + int force, int resolve, channelid **res) +{ + *res = NULL; + + channelid *self = PyObject_New(channelid, cls); + if (self == NULL) { + return -1; + } + self->id = cid; + self->end = end; + self->resolve = resolve; + self->channels = channels; + + int err = _channels_add_id_object(channels, cid); + if (err != 0) { + if (force && err == ERR_CHANNEL_NOT_FOUND) { + assert(!PyErr_Occurred()); + } + else { + Py_DECREF((PyObject *)self); + return err; + } + } + + *res = self; + return 0; +} + +static _channels * _global_channels(void); + +static PyObject * +_channelid_new(PyObject *mod, PyTypeObject *cls, + PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"id", "send", "recv", "force", "_resolve", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = mod, + }; + int send = -1; + int recv = -1; + int force = 0; + int resolve = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&|$pppp:ChannelID.__new__", kwlist, + channel_id_converter, &cid_data, + &send, &recv, &force, &resolve)) { + return NULL; + } + cid = cid_data.cid; + + // Handle "send" and "recv". + if (send == 0 && recv == 0) { + PyErr_SetString(PyExc_ValueError, + "'send' and 'recv' cannot both be False"); + return NULL; + } + + int end = 0; + if (send == 1) { + if (recv == 0 || recv == -1) { + end = CHANNEL_SEND; + } + } + else if (recv == 1) { + end = CHANNEL_RECV; + } + + PyObject *id = NULL; + int err = newchannelid(cls, cid, end, _global_channels(), + force, resolve, + (channelid **)&id); + if (handle_channel_error(err, mod, cid)) { + assert(id == NULL); + return NULL; + } + assert(id != NULL); + return id; +} + +static void +channelid_dealloc(PyObject *self) +{ + int64_t cid = ((channelid *)self)->id; + _channels *channels = ((channelid *)self)->channels; + + PyTypeObject *tp = Py_TYPE(self); + tp->tp_free(self); + /* "Instances of heap-allocated types hold a reference to their type." + * See: https://docs.python.org/3.11/howto/isolating-extensions.html#garbage-collection-protocol + * See: https://docs.python.org/3.11/c-api/typeobj.html#c.PyTypeObject.tp_traverse + */ + // XXX Why don't we implement Py_TPFLAGS_HAVE_GC, e.g. Py_tp_traverse, + // like we do for _abc._abc_data? + Py_DECREF(tp); + + _channels_drop_id_object(channels, cid); +} + +static PyObject * +channelid_repr(PyObject *self) +{ + PyTypeObject *type = Py_TYPE(self); + const char *name = _PyType_Name(type); + + channelid *cid = (channelid *)self; + const char *fmt; + if (cid->end == CHANNEL_SEND) { + fmt = "%s(%" PRId64 ", send=True)"; + } + else if (cid->end == CHANNEL_RECV) { + fmt = "%s(%" PRId64 ", recv=True)"; + } + else { + fmt = "%s(%" PRId64 ")"; + } + return PyUnicode_FromFormat(fmt, name, cid->id); +} + +static PyObject * +channelid_str(PyObject *self) +{ + channelid *cid = (channelid *)self; + return PyUnicode_FromFormat("%" PRId64 "", cid->id); +} + +static PyObject * +channelid_int(PyObject *self) +{ + channelid *cid = (channelid *)self; + return PyLong_FromLongLong(cid->id); +} + +static Py_hash_t +channelid_hash(PyObject *self) +{ + channelid *cid = (channelid *)self; + PyObject *id = PyLong_FromLongLong(cid->id); + if (id == NULL) { + return -1; + } + Py_hash_t hash = PyObject_Hash(id); + Py_DECREF(id); + return hash; +} + +static PyObject * +channelid_richcompare(PyObject *self, PyObject *other, int op) +{ + PyObject *res = NULL; + if (op != Py_EQ && op != Py_NE) { + Py_RETURN_NOTIMPLEMENTED; + } + + PyObject *mod = get_module_from_type(Py_TYPE(self)); + if (mod == NULL) { + return NULL; + } + module_state *state = get_module_state(mod); + if (state == NULL) { + goto done; + } + + if (!PyObject_TypeCheck(self, state->ChannelIDType)) { + res = Py_NewRef(Py_NotImplemented); + goto done; + } + + channelid *cid = (channelid *)self; + int equal; + if (PyObject_TypeCheck(other, state->ChannelIDType)) { + channelid *othercid = (channelid *)other; + equal = (cid->end == othercid->end) && (cid->id == othercid->id); + } + else if (PyLong_Check(other)) { + /* Fast path */ + int overflow; + long long othercid = PyLong_AsLongLongAndOverflow(other, &overflow); + if (othercid == -1 && PyErr_Occurred()) { + goto done; + } + equal = !overflow && (othercid >= 0) && (cid->id == othercid); + } + else if (PyNumber_Check(other)) { + PyObject *pyid = PyLong_FromLongLong(cid->id); + if (pyid == NULL) { + goto done; + } + res = PyObject_RichCompare(pyid, other, op); + Py_DECREF(pyid); + goto done; + } + else { + res = Py_NewRef(Py_NotImplemented); + goto done; + } + + if ((op == Py_EQ && equal) || (op == Py_NE && !equal)) { + res = Py_NewRef(Py_True); + } + else { + res = Py_NewRef(Py_False); + } + +done: + Py_DECREF(mod); + return res; +} + +static PyObject * +_channel_from_cid(PyObject *cid, int end) +{ + PyObject *highlevel = PyImport_ImportModule("interpreters"); + if (highlevel == NULL) { + PyErr_Clear(); + highlevel = PyImport_ImportModule("test.support.interpreters"); + if (highlevel == NULL) { + return NULL; + } + } + const char *clsname = (end == CHANNEL_RECV) ? "RecvChannel" : + "SendChannel"; + PyObject *cls = PyObject_GetAttrString(highlevel, clsname); + Py_DECREF(highlevel); + if (cls == NULL) { + return NULL; + } + PyObject *chan = PyObject_CallFunctionObjArgs(cls, cid, NULL); + Py_DECREF(cls); + if (chan == NULL) { + return NULL; + } + return chan; +} + +struct _channelid_xid { + int64_t id; + int end; + int resolve; +}; + +static PyObject * +_channelid_from_xid(_PyCrossInterpreterData *data) +{ + struct _channelid_xid *xid = (struct _channelid_xid *)data->data; + + // It might not be imported yet, so we can't use _get_current_module(). + PyObject *mod = PyImport_ImportModule(MODULE_NAME); + if (mod == NULL) { + return NULL; + } + assert(mod != Py_None); + module_state *state = get_module_state(mod); + if (state == NULL) { + return NULL; + } + + // Note that we do not preserve the "resolve" flag. + PyObject *cid = NULL; + int err = newchannelid(state->ChannelIDType, xid->id, xid->end, + _global_channels(), 0, 0, + (channelid **)&cid); + if (err != 0) { + assert(cid == NULL); + (void)handle_channel_error(err, mod, xid->id); + goto done; + } + assert(cid != NULL); + if (xid->end == 0) { + goto done; + } + if (!xid->resolve) { + goto done; + } + + /* Try returning a high-level channel end but fall back to the ID. */ + PyObject *chan = _channel_from_cid(cid, xid->end); + if (chan == NULL) { + PyErr_Clear(); + goto done; + } + Py_DECREF(cid); + cid = chan; + +done: + Py_DECREF(mod); + return cid; +} + +static int +_channelid_shared(PyThreadState *tstate, PyObject *obj, + _PyCrossInterpreterData *data) +{ + if (_PyCrossInterpreterData_InitWithSize( + data, tstate->interp, sizeof(struct _channelid_xid), obj, + _channelid_from_xid + ) < 0) + { + return -1; + } + struct _channelid_xid *xid = (struct _channelid_xid *)data->data; + xid->id = ((channelid *)obj)->id; + xid->end = ((channelid *)obj)->end; + xid->resolve = ((channelid *)obj)->resolve; + return 0; +} + +static PyObject * +channelid_end(PyObject *self, void *end) +{ + int force = 1; + channelid *cid = (channelid *)self; + if (end != NULL) { + PyObject *id = NULL; + int err = newchannelid(Py_TYPE(self), cid->id, *(int *)end, + cid->channels, force, cid->resolve, + (channelid **)&id); + if (err != 0) { + assert(id == NULL); + PyObject *mod = get_module_from_type(Py_TYPE(self)); + if (mod == NULL) { + return NULL; + } + (void)handle_channel_error(err, mod, cid->id); + Py_DECREF(mod); + return NULL; + } + assert(id != NULL); + return id; + } + + if (cid->end == CHANNEL_SEND) { + return PyUnicode_InternFromString("send"); + } + if (cid->end == CHANNEL_RECV) { + return PyUnicode_InternFromString("recv"); + } + return PyUnicode_InternFromString("both"); +} + +static int _channelid_end_send = CHANNEL_SEND; +static int _channelid_end_recv = CHANNEL_RECV; + +static PyGetSetDef channelid_getsets[] = { + {"end", (getter)channelid_end, NULL, + PyDoc_STR("'send', 'recv', or 'both'")}, + {"send", (getter)channelid_end, NULL, + PyDoc_STR("the 'send' end of the channel"), &_channelid_end_send}, + {"recv", (getter)channelid_end, NULL, + PyDoc_STR("the 'recv' end of the channel"), &_channelid_end_recv}, + {NULL} +}; + +PyDoc_STRVAR(channelid_doc, +"A channel ID identifies a channel and may be used as an int."); + +static PyType_Slot ChannelIDType_slots[] = { + {Py_tp_dealloc, (destructor)channelid_dealloc}, + {Py_tp_doc, (void *)channelid_doc}, + {Py_tp_repr, (reprfunc)channelid_repr}, + {Py_tp_str, (reprfunc)channelid_str}, + {Py_tp_hash, channelid_hash}, + {Py_tp_richcompare, channelid_richcompare}, + {Py_tp_getset, channelid_getsets}, + // number slots + {Py_nb_int, (unaryfunc)channelid_int}, + {Py_nb_index, (unaryfunc)channelid_int}, + {0, NULL}, +}; + +static PyType_Spec ChannelIDType_spec = { + .name = "_xxsubinterpreters.ChannelID", + .basicsize = sizeof(channelid), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE), + .slots = ChannelIDType_slots, +}; + + +/* module level code ********************************************************/ + +/* globals is the process-global state for the module. It holds all + the data that we need to share between interpreters, so it cannot + hold PyObject values. */ +static struct globals { + int module_count; + _channels channels; +} _globals = {0}; + +static int +_globals_init(void) +{ + // XXX This isn't thread-safe. + _globals.module_count++; + if (_globals.module_count > 1) { + // Already initialized. + return 0; + } + + assert(_globals.channels.mutex == NULL); + PyThread_type_lock mutex = PyThread_allocate_lock(); + if (mutex == NULL) { + return ERR_CHANNELS_MUTEX_INIT; + } + _channels_init(&_globals.channels, mutex); + return 0; +} + +static void +_globals_fini(void) +{ + // XXX This isn't thread-safe. + _globals.module_count--; + if (_globals.module_count > 0) { + return; + } + + _channels_fini(&_globals.channels); +} + +static _channels * +_global_channels(void) { + return &_globals.channels; +} + + +static PyObject * +channel_create(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + int64_t cid = _channel_create(&_globals.channels); + if (cid < 0) { + (void)handle_channel_error(-1, self, cid); + return NULL; + } + module_state *state = get_module_state(self); + if (state == NULL) { + return NULL; + } + PyObject *id = NULL; + int err = newchannelid(state->ChannelIDType, cid, 0, + &_globals.channels, 0, 0, + (channelid **)&id); + if (handle_channel_error(err, self, cid)) { + assert(id == NULL); + err = _channel_destroy(&_globals.channels, cid); + if (handle_channel_error(err, self, cid)) { + // XXX issue a warning? + } + return NULL; + } + assert(id != NULL); + assert(((channelid *)id)->channels != NULL); + return id; +} + +PyDoc_STRVAR(channel_create_doc, +"channel_create() -> cid\n\ +\n\ +Create a new cross-interpreter channel and return a unique generated ID."); + +static PyObject * +channel_destroy(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:channel_destroy", kwlist, + channel_id_converter, &cid_data)) { + return NULL; + } + cid = cid_data.cid; + + int err = _channel_destroy(&_globals.channels, cid); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channel_destroy_doc, +"channel_destroy(cid)\n\ +\n\ +Close and finalize the channel. Afterward attempts to use the channel\n\ +will behave as though it never existed."); + +static PyObject * +channel_list_all(PyObject *self, PyObject *Py_UNUSED(ignored)) +{ + int64_t count = 0; + int64_t *cids = _channels_list_all(&_globals.channels, &count); + if (cids == NULL) { + if (count == 0) { + return PyList_New(0); + } + return NULL; + } + PyObject *ids = PyList_New((Py_ssize_t)count); + if (ids == NULL) { + goto finally; + } + module_state *state = get_module_state(self); + if (state == NULL) { + Py_DECREF(ids); + ids = NULL; + goto finally; + } + int64_t *cur = cids; + for (int64_t i=0; i < count; cur++, i++) { + PyObject *id = NULL; + int err = newchannelid(state->ChannelIDType, *cur, 0, + &_globals.channels, 0, 0, + (channelid **)&id); + if (handle_channel_error(err, self, *cur)) { + assert(id == NULL); + Py_SETREF(ids, NULL); + break; + } + assert(id != NULL); + PyList_SET_ITEM(ids, (Py_ssize_t)i, id); + } + +finally: + PyMem_Free(cids); + return ids; +} + +PyDoc_STRVAR(channel_list_all_doc, +"channel_list_all() -> [cid]\n\ +\n\ +Return the list of all IDs for active channels."); + +static PyObject * +channel_list_interpreters(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "send", NULL}; + int64_t cid; /* Channel ID */ + struct channel_id_converter_data cid_data = { + .module = self, + }; + int send = 0; /* Send or receive end? */ + int64_t id; + PyObject *ids, *id_obj; + PyInterpreterState *interp; + + if (!PyArg_ParseTupleAndKeywords( + args, kwds, "O&$p:channel_list_interpreters", + kwlist, channel_id_converter, &cid_data, &send)) { + return NULL; + } + cid = cid_data.cid; + + ids = PyList_New(0); + if (ids == NULL) { + goto except; + } + + interp = PyInterpreterState_Head(); + while (interp != NULL) { + id = PyInterpreterState_GetID(interp); + assert(id >= 0); + int res = _channel_is_associated(&_globals.channels, cid, id, send); + if (res < 0) { + (void)handle_channel_error(res, self, cid); + goto except; + } + if (res) { + id_obj = _PyInterpreterState_GetIDObject(interp); + if (id_obj == NULL) { + goto except; + } + res = PyList_Insert(ids, 0, id_obj); + Py_DECREF(id_obj); + if (res < 0) { + goto except; + } + } + interp = PyInterpreterState_Next(interp); + } + + goto finally; + +except: + Py_CLEAR(ids); + +finally: + return ids; +} + +PyDoc_STRVAR(channel_list_interpreters_doc, +"channel_list_interpreters(cid, *, send) -> [id]\n\ +\n\ +Return the list of all interpreter IDs associated with an end of the channel.\n\ +\n\ +The 'send' argument should be a boolean indicating whether to use the send or\n\ +receive end."); + + +static PyObject * +channel_send(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "obj", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + PyObject *obj; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O:channel_send", kwlist, + channel_id_converter, &cid_data, &obj)) { + return NULL; + } + cid = cid_data.cid; + + int err = _channel_send(&_globals.channels, cid, obj); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channel_send_doc, +"channel_send(cid, obj)\n\ +\n\ +Add the object's data to the channel's queue."); + +static PyObject * +channel_recv(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "default", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + PyObject *dflt = NULL; + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O:channel_recv", kwlist, + channel_id_converter, &cid_data, &dflt)) { + return NULL; + } + cid = cid_data.cid; + + PyObject *obj = NULL; + int err = _channel_recv(&_globals.channels, cid, &obj); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_XINCREF(dflt); + if (obj == NULL) { + // Use the default. + if (dflt == NULL) { + (void)handle_channel_error(ERR_CHANNEL_EMPTY, self, cid); + return NULL; + } + obj = Py_NewRef(dflt); + } + Py_XDECREF(dflt); + return obj; +} + +PyDoc_STRVAR(channel_recv_doc, +"channel_recv(cid, [default]) -> obj\n\ +\n\ +Return a new object from the data at the front of the channel's queue.\n\ +\n\ +If there is nothing to receive then raise ChannelEmptyError, unless\n\ +a default value is provided. In that case return it."); + +static PyObject * +channel_close(PyObject *self, PyObject *args, PyObject *kwds) +{ + static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + int send = 0; + int recv = 0; + int force = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&|$ppp:channel_close", kwlist, + channel_id_converter, &cid_data, + &send, &recv, &force)) { + return NULL; + } + cid = cid_data.cid; + + int err = _channel_close(&_globals.channels, cid, send-recv, force); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channel_close_doc, +"channel_close(cid, *, send=None, recv=None, force=False)\n\ +\n\ +Close the channel for all interpreters.\n\ +\n\ +If the channel is empty then the keyword args are ignored and both\n\ +ends are immediately closed. Otherwise, if 'force' is True then\n\ +all queued items are released and both ends are immediately\n\ +closed.\n\ +\n\ +If the channel is not empty *and* 'force' is False then following\n\ +happens:\n\ +\n\ + * recv is True (regardless of send):\n\ + - raise ChannelNotEmptyError\n\ + * recv is None and send is None:\n\ + - raise ChannelNotEmptyError\n\ + * send is True and recv is not True:\n\ + - fully close the 'send' end\n\ + - close the 'recv' end to interpreters not already receiving\n\ + - fully close it once empty\n\ +\n\ +Closing an already closed channel results in a ChannelClosedError.\n\ +\n\ +Once the channel's ID has no more ref counts in any interpreter\n\ +the channel will be destroyed."); + +static PyObject * +channel_release(PyObject *self, PyObject *args, PyObject *kwds) +{ + // Note that only the current interpreter is affected. + static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; + int64_t cid; + struct channel_id_converter_data cid_data = { + .module = self, + }; + int send = 0; + int recv = 0; + int force = 0; + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "O&|$ppp:channel_release", kwlist, + channel_id_converter, &cid_data, + &send, &recv, &force)) { + return NULL; + } + cid = cid_data.cid; + if (send == 0 && recv == 0) { + send = 1; + recv = 1; + } + + // XXX Handle force is True. + // XXX Fix implicit release. + + int err = _channel_drop(&_globals.channels, cid, send, recv); + if (handle_channel_error(err, self, cid)) { + return NULL; + } + Py_RETURN_NONE; +} + +PyDoc_STRVAR(channel_release_doc, +"channel_release(cid, *, send=None, recv=None, force=True)\n\ +\n\ +Close the channel for the current interpreter. 'send' and 'recv'\n\ +(bool) may be used to indicate the ends to close. By default both\n\ +ends are closed. Closing an already closed end is a noop."); + +static PyObject * +channel__channel_id(PyObject *self, PyObject *args, PyObject *kwds) +{ + module_state *state = get_module_state(self); + if (state == NULL) { + return NULL; + } + PyTypeObject *cls = state->ChannelIDType; + PyObject *mod = get_module_from_owned_type(cls); + if (mod == NULL) { + return NULL; + } + PyObject *cid = _channelid_new(mod, cls, args, kwds); + Py_DECREF(mod); + return cid; +} + +static PyMethodDef module_functions[] = { + {"create", channel_create, + METH_NOARGS, channel_create_doc}, + {"destroy", _PyCFunction_CAST(channel_destroy), + METH_VARARGS | METH_KEYWORDS, channel_destroy_doc}, + {"list_all", channel_list_all, + METH_NOARGS, channel_list_all_doc}, + {"list_interpreters", _PyCFunction_CAST(channel_list_interpreters), + METH_VARARGS | METH_KEYWORDS, channel_list_interpreters_doc}, + {"send", _PyCFunction_CAST(channel_send), + METH_VARARGS | METH_KEYWORDS, channel_send_doc}, + {"recv", _PyCFunction_CAST(channel_recv), + METH_VARARGS | METH_KEYWORDS, channel_recv_doc}, + {"close", _PyCFunction_CAST(channel_close), + METH_VARARGS | METH_KEYWORDS, channel_close_doc}, + {"release", _PyCFunction_CAST(channel_release), + METH_VARARGS | METH_KEYWORDS, channel_release_doc}, + {"_channel_id", _PyCFunction_CAST(channel__channel_id), + METH_VARARGS | METH_KEYWORDS, NULL}, + + {NULL, NULL} /* sentinel */ +}; + + +/* initialization function */ + +PyDoc_STRVAR(module_doc, +"This module provides primitive operations to manage Python interpreters.\n\ +The 'interpreters' module provides a more convenient interface."); + +static int +module_exec(PyObject *mod) +{ + if (_globals_init() != 0) { + return -1; + } + + /* Add exception types */ + if (exceptions_init(mod) != 0) { + goto error; + } + + /* Add other types */ + module_state *state = get_module_state(mod); + if (state == NULL) { + goto error; + } + + // ChannelID + state->ChannelIDType = add_new_type( + mod, &ChannelIDType_spec, _channelid_shared); + if (state->ChannelIDType == NULL) { + goto error; + } + + return 0; + +error: + _globals_fini(); + return -1; +} + +static struct PyModuleDef_Slot module_slots[] = { + {Py_mod_exec, module_exec}, + {0, NULL}, +}; + +static int +module_traverse(PyObject *mod, visitproc visit, void *arg) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + traverse_module_state(state, visit, arg); + return 0; +} + +static int +module_clear(PyObject *mod) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + clear_module_state(state); + return 0; +} + +static void +module_free(void *mod) +{ + module_state *state = get_module_state(mod); + assert(state != NULL); + clear_module_state(state); + _globals_fini(); +} + +static struct PyModuleDef moduledef = { + .m_base = PyModuleDef_HEAD_INIT, + .m_name = MODULE_NAME, + .m_doc = module_doc, + .m_size = sizeof(module_state), + .m_methods = module_functions, + .m_slots = module_slots, + .m_traverse = module_traverse, + .m_clear = module_clear, + .m_free = (freefunc)module_free, +}; + +PyMODINIT_FUNC +PyInit__xxinterpchannels(void) +{ + return PyModuleDef_Init(&moduledef); +} diff --git a/Modules/_xxsubinterpretersmodule.c b/Modules/_xxsubinterpretersmodule.c index 0892fa3a9595e8..461c505c092c70 100644 --- a/Modules/_xxsubinterpretersmodule.c +++ b/Modules/_xxsubinterpretersmodule.c @@ -39,43 +39,6 @@ _get_current_interp(void) return PyInterpreterState_Get(); } -static PyObject * -_get_current_module(void) -{ - // We ensured it was imported in _run_script(). - PyObject *name = PyUnicode_FromString(MODULE_NAME); - if (name == NULL) { - return NULL; - } - PyObject *mod = PyImport_GetModule(name); - Py_DECREF(name); - if (mod == NULL) { - return NULL; - } - assert(mod != Py_None); - return mod; -} - -static PyObject * -get_module_from_owned_type(PyTypeObject *cls) -{ - assert(cls != NULL); - return _get_current_module(); - // XXX Use the more efficient API now that we use heap types: - //return PyType_GetModule(cls); -} - -static struct PyModuleDef moduledef; - -static PyObject * -get_module_from_type(PyTypeObject *cls) -{ - assert(cls != NULL); - return _get_current_module(); - // XXX Use the more efficient API now that we use heap types: - //return PyType_GetModuleByDef(cls, &moduledef); -} - static PyObject * add_new_exception(PyObject *mod, const char *name, PyObject *base) { @@ -95,27 +58,6 @@ add_new_exception(PyObject *mod, const char *name, PyObject *base) #define ADD_NEW_EXCEPTION(MOD, NAME, BASE) \ add_new_exception(MOD, MODULE_NAME "." Py_STRINGIFY(NAME), BASE) -static PyTypeObject * -add_new_type(PyObject *mod, PyType_Spec *spec, crossinterpdatafunc shared) -{ - PyTypeObject *cls = (PyTypeObject *)PyType_FromMetaclass( - NULL, mod, spec, NULL); - if (cls == NULL) { - return NULL; - } - if (PyModule_AddType(mod, cls) < 0) { - Py_DECREF(cls); - return NULL; - } - if (shared != NULL) { - if (_PyCrossInterpreterData_RegisterClass(cls, shared)) { - Py_DECREF(cls); - return NULL; - } - } - return cls; -} - static int _release_xid_data(_PyCrossInterpreterData *data, int ignoreexc) { @@ -127,9 +69,7 @@ _release_xid_data(_PyCrossInterpreterData *data, int ignoreexc) if (res < 0) { // XXX Fix this! /* The owning interpreter is already destroyed. - * Ideally, this shouldn't ever happen. When an interpreter is - * about to be destroyed, we should clear out all of its objects - * from every channel associated with that interpreter. + * Ideally, this shouldn't ever happen. (It's highly unlikely.) * For now we hack around that to resolve refleaks, by decref'ing * the released object here, even if its the wrong interpreter. * The owning interpreter has already been destroyed @@ -153,17 +93,8 @@ _release_xid_data(_PyCrossInterpreterData *data, int ignoreexc) /* module state *************************************************************/ typedef struct { - PyTypeObject *ChannelIDType; - - /* interpreter exceptions */ + /* exceptions */ PyObject *RunFailedError; - - /* channel exceptions */ - PyObject *ChannelError; - PyObject *ChannelNotFoundError; - PyObject *ChannelClosedError; - PyObject *ChannelEmptyError; - PyObject *ChannelNotEmptyError; } module_state; static inline module_state * @@ -178,37 +109,18 @@ get_module_state(PyObject *mod) static int traverse_module_state(module_state *state, visitproc visit, void *arg) { - /* heap types */ - Py_VISIT(state->ChannelIDType); - - /* interpreter exceptions */ + /* exceptions */ Py_VISIT(state->RunFailedError); - /* channel exceptions */ - Py_VISIT(state->ChannelError); - Py_VISIT(state->ChannelNotFoundError); - Py_VISIT(state->ChannelClosedError); - Py_VISIT(state->ChannelEmptyError); - Py_VISIT(state->ChannelNotEmptyError); return 0; } static int clear_module_state(module_state *state) { - /* heap types */ - (void)_PyCrossInterpreterData_UnregisterClass(state->ChannelIDType); - Py_CLEAR(state->ChannelIDType); - - /* interpreter exceptions */ + /* exceptions */ Py_CLEAR(state->RunFailedError); - /* channel exceptions */ - Py_CLEAR(state->ChannelError); - Py_CLEAR(state->ChannelNotFoundError); - Py_CLEAR(state->ChannelClosedError); - Py_CLEAR(state->ChannelEmptyError); - Py_CLEAR(state->ChannelNotEmptyError); return 0; } @@ -298,10 +210,8 @@ _sharedns_free(_sharedns *shared) } static _sharedns * -_get_shared_ns(PyObject *shareable, PyTypeObject *channelidtype, - int *needs_import) +_get_shared_ns(PyObject *shareable) { - *needs_import = 0; if (shareable == NULL || shareable == Py_None) { return NULL; } @@ -323,9 +233,6 @@ _get_shared_ns(PyObject *shareable, PyTypeObject *channelidtype, if (_sharednsitem_init(&shared->items[i], key, value) != 0) { break; } - if (Py_TYPE(value) == channelidtype) { - *needs_import = 1; - } } if (PyErr_Occurred()) { _sharedns_free(shared); @@ -394,1701 +301,79 @@ _sharedexception_bind(PyObject *exctype, PyObject *exc, PyObject *tb) _sharedexception *err = _sharedexception_new(); if (err == NULL) { - goto finally; - } - - PyObject *name = PyUnicode_FromFormat("%S", exctype); - if (name == NULL) { - failure = "unable to format exception type name"; - goto finally; - } - err->name = _copy_raw_string(name); - Py_DECREF(name); - if (err->name == NULL) { - if (PyErr_ExceptionMatches(PyExc_MemoryError)) { - failure = "out of memory copying exception type name"; - } else { - failure = "unable to encode and copy exception type name"; - } - goto finally; - } - - if (exc != NULL) { - PyObject *msg = PyUnicode_FromFormat("%S", exc); - if (msg == NULL) { - failure = "unable to format exception message"; - goto finally; - } - err->msg = _copy_raw_string(msg); - Py_DECREF(msg); - if (err->msg == NULL) { - if (PyErr_ExceptionMatches(PyExc_MemoryError)) { - failure = "out of memory copying exception message"; - } else { - failure = "unable to encode and copy exception message"; - } - goto finally; - } - } - -finally: - if (failure != NULL) { - PyErr_Clear(); - if (err->name != NULL) { - PyMem_Free(err->name); - err->name = NULL; - } - err->msg = failure; - } - return err; -} - -static void -_sharedexception_apply(_sharedexception *exc, PyObject *wrapperclass) -{ - if (exc->name != NULL) { - if (exc->msg != NULL) { - PyErr_Format(wrapperclass, "%s: %s", exc->name, exc->msg); - } - else { - PyErr_SetString(wrapperclass, exc->name); - } - } - else if (exc->msg != NULL) { - PyErr_SetString(wrapperclass, exc->msg); - } - else { - PyErr_SetNone(wrapperclass); - } -} - - -/* channel-specific code ****************************************************/ - -#define CHANNEL_SEND 1 -#define CHANNEL_BOTH 0 -#define CHANNEL_RECV -1 - -/* channel errors */ - -#define ERR_CHANNEL_NOT_FOUND -2 -#define ERR_CHANNEL_CLOSED -3 -#define ERR_CHANNEL_INTERP_CLOSED -4 -#define ERR_CHANNEL_EMPTY -5 -#define ERR_CHANNEL_NOT_EMPTY -6 -#define ERR_CHANNEL_MUTEX_INIT -7 -#define ERR_CHANNELS_MUTEX_INIT -8 -#define ERR_NO_NEXT_CHANNEL_ID -9 - -static int -channel_exceptions_init(PyObject *mod) -{ - module_state *state = get_module_state(mod); - if (state == NULL) { - return -1; - } - -#define ADD(NAME, BASE) \ - do { \ - assert(state->NAME == NULL); \ - state->NAME = ADD_NEW_EXCEPTION(mod, NAME, BASE); \ - if (state->NAME == NULL) { \ - return -1; \ - } \ - } while (0) - - // A channel-related operation failed. - ADD(ChannelError, PyExc_RuntimeError); - // An operation tried to use a channel that doesn't exist. - ADD(ChannelNotFoundError, state->ChannelError); - // An operation tried to use a closed channel. - ADD(ChannelClosedError, state->ChannelError); - // An operation tried to pop from an empty channel. - ADD(ChannelEmptyError, state->ChannelError); - // An operation tried to close a non-empty channel. - ADD(ChannelNotEmptyError, state->ChannelError); -#undef ADD - - return 0; -} - -static int -handle_channel_error(int err, PyObject *mod, int64_t cid) -{ - if (err == 0) { - assert(!PyErr_Occurred()); - return 0; - } - assert(err < 0); - module_state *state = get_module_state(mod); - assert(state != NULL); - if (err == ERR_CHANNEL_NOT_FOUND) { - PyErr_Format(state->ChannelNotFoundError, - "channel %" PRId64 " not found", cid); - } - else if (err == ERR_CHANNEL_CLOSED) { - PyErr_Format(state->ChannelClosedError, - "channel %" PRId64 " is closed", cid); - } - else if (err == ERR_CHANNEL_INTERP_CLOSED) { - PyErr_Format(state->ChannelClosedError, - "channel %" PRId64 " is already closed", cid); - } - else if (err == ERR_CHANNEL_EMPTY) { - PyErr_Format(state->ChannelEmptyError, - "channel %" PRId64 " is empty", cid); - } - else if (err == ERR_CHANNEL_NOT_EMPTY) { - PyErr_Format(state->ChannelNotEmptyError, - "channel %" PRId64 " may not be closed " - "if not empty (try force=True)", - cid); - } - else if (err == ERR_CHANNEL_MUTEX_INIT) { - PyErr_SetString(state->ChannelError, - "can't initialize mutex for new channel"); - } - else if (err == ERR_CHANNELS_MUTEX_INIT) { - PyErr_SetString(state->ChannelError, - "can't initialize mutex for channel management"); - } - else if (err == ERR_NO_NEXT_CHANNEL_ID) { - PyErr_SetString(state->ChannelError, - "failed to get a channel ID"); - } - else { - assert(PyErr_Occurred()); - } - return 1; -} - -/* the channel queue */ - -struct _channelitem; - -typedef struct _channelitem { - _PyCrossInterpreterData *data; - struct _channelitem *next; -} _channelitem; - -static _channelitem * -_channelitem_new(void) -{ - _channelitem *item = PyMem_NEW(_channelitem, 1); - if (item == NULL) { - PyErr_NoMemory(); - return NULL; - } - item->data = NULL; - item->next = NULL; - return item; -} - -static void -_channelitem_clear(_channelitem *item) -{ - if (item->data != NULL) { - (void)_release_xid_data(item->data, 1); - PyMem_Free(item->data); - item->data = NULL; - } - item->next = NULL; -} - -static void -_channelitem_free(_channelitem *item) -{ - _channelitem_clear(item); - PyMem_Free(item); -} - -static void -_channelitem_free_all(_channelitem *item) -{ - while (item != NULL) { - _channelitem *last = item; - item = item->next; - _channelitem_free(last); - } -} - -static _PyCrossInterpreterData * -_channelitem_popped(_channelitem *item) -{ - _PyCrossInterpreterData *data = item->data; - item->data = NULL; - _channelitem_free(item); - return data; -} - -typedef struct _channelqueue { - int64_t count; - _channelitem *first; - _channelitem *last; -} _channelqueue; - -static _channelqueue * -_channelqueue_new(void) -{ - _channelqueue *queue = PyMem_NEW(_channelqueue, 1); - if (queue == NULL) { - PyErr_NoMemory(); - return NULL; - } - queue->count = 0; - queue->first = NULL; - queue->last = NULL; - return queue; -} - -static void -_channelqueue_clear(_channelqueue *queue) -{ - _channelitem_free_all(queue->first); - queue->count = 0; - queue->first = NULL; - queue->last = NULL; -} - -static void -_channelqueue_free(_channelqueue *queue) -{ - _channelqueue_clear(queue); - PyMem_Free(queue); -} - -static int -_channelqueue_put(_channelqueue *queue, _PyCrossInterpreterData *data) -{ - _channelitem *item = _channelitem_new(); - if (item == NULL) { - return -1; - } - item->data = data; - - queue->count += 1; - if (queue->first == NULL) { - queue->first = item; - } - else { - queue->last->next = item; - } - queue->last = item; - return 0; -} - -static _PyCrossInterpreterData * -_channelqueue_get(_channelqueue *queue) -{ - _channelitem *item = queue->first; - if (item == NULL) { - return NULL; - } - queue->first = item->next; - if (queue->last == item) { - queue->last = NULL; - } - queue->count -= 1; - - return _channelitem_popped(item); -} - -/* channel-interpreter associations */ - -struct _channelend; - -typedef struct _channelend { - struct _channelend *next; - int64_t interp; - int open; -} _channelend; - -static _channelend * -_channelend_new(int64_t interp) -{ - _channelend *end = PyMem_NEW(_channelend, 1); - if (end == NULL) { - PyErr_NoMemory(); - return NULL; - } - end->next = NULL; - end->interp = interp; - end->open = 1; - return end; -} - -static void -_channelend_free(_channelend *end) -{ - PyMem_Free(end); -} - -static void -_channelend_free_all(_channelend *end) -{ - while (end != NULL) { - _channelend *last = end; - end = end->next; - _channelend_free(last); - } -} - -static _channelend * -_channelend_find(_channelend *first, int64_t interp, _channelend **pprev) -{ - _channelend *prev = NULL; - _channelend *end = first; - while (end != NULL) { - if (end->interp == interp) { - break; - } - prev = end; - end = end->next; - } - if (pprev != NULL) { - *pprev = prev; - } - return end; -} - -typedef struct _channelassociations { - // Note that the list entries are never removed for interpreter - // for which the channel is closed. This should not be a problem in - // practice. Also, a channel isn't automatically closed when an - // interpreter is destroyed. - int64_t numsendopen; - int64_t numrecvopen; - _channelend *send; - _channelend *recv; -} _channelends; - -static _channelends * -_channelends_new(void) -{ - _channelends *ends = PyMem_NEW(_channelends, 1); - if (ends== NULL) { - return NULL; - } - ends->numsendopen = 0; - ends->numrecvopen = 0; - ends->send = NULL; - ends->recv = NULL; - return ends; -} - -static void -_channelends_clear(_channelends *ends) -{ - _channelend_free_all(ends->send); - ends->send = NULL; - ends->numsendopen = 0; - - _channelend_free_all(ends->recv); - ends->recv = NULL; - ends->numrecvopen = 0; -} - -static void -_channelends_free(_channelends *ends) -{ - _channelends_clear(ends); - PyMem_Free(ends); -} - -static _channelend * -_channelends_add(_channelends *ends, _channelend *prev, int64_t interp, - int send) -{ - _channelend *end = _channelend_new(interp); - if (end == NULL) { - return NULL; - } - - if (prev == NULL) { - if (send) { - ends->send = end; - } - else { - ends->recv = end; - } - } - else { - prev->next = end; - } - if (send) { - ends->numsendopen += 1; - } - else { - ends->numrecvopen += 1; - } - return end; -} - -static int -_channelends_associate(_channelends *ends, int64_t interp, int send) -{ - _channelend *prev; - _channelend *end = _channelend_find(send ? ends->send : ends->recv, - interp, &prev); - if (end != NULL) { - if (!end->open) { - return ERR_CHANNEL_CLOSED; - } - // already associated - return 0; - } - if (_channelends_add(ends, prev, interp, send) == NULL) { - return -1; - } - return 0; -} - -static int -_channelends_is_open(_channelends *ends) -{ - if (ends->numsendopen != 0 || ends->numrecvopen != 0) { - return 1; - } - if (ends->send == NULL && ends->recv == NULL) { - return 1; - } - return 0; -} - -static void -_channelends_close_end(_channelends *ends, _channelend *end, int send) -{ - end->open = 0; - if (send) { - ends->numsendopen -= 1; - } - else { - ends->numrecvopen -= 1; - } -} - -static int -_channelends_close_interpreter(_channelends *ends, int64_t interp, int which) -{ - _channelend *prev; - _channelend *end; - if (which >= 0) { // send/both - end = _channelend_find(ends->send, interp, &prev); - if (end == NULL) { - // never associated so add it - end = _channelends_add(ends, prev, interp, 1); - if (end == NULL) { - return -1; - } - } - _channelends_close_end(ends, end, 1); - } - if (which <= 0) { // recv/both - end = _channelend_find(ends->recv, interp, &prev); - if (end == NULL) { - // never associated so add it - end = _channelends_add(ends, prev, interp, 0); - if (end == NULL) { - return -1; - } - } - _channelends_close_end(ends, end, 0); - } - return 0; -} - -static void -_channelends_close_all(_channelends *ends, int which, int force) -{ - // XXX Handle the ends. - // XXX Handle force is True. - - // Ensure all the "send"-associated interpreters are closed. - _channelend *end; - for (end = ends->send; end != NULL; end = end->next) { - _channelends_close_end(ends, end, 1); - } - - // Ensure all the "recv"-associated interpreters are closed. - for (end = ends->recv; end != NULL; end = end->next) { - _channelends_close_end(ends, end, 0); - } -} - -/* channels */ - -struct _channel; -struct _channel_closing; -static void _channel_clear_closing(struct _channel *); -static void _channel_finish_closing(struct _channel *); - -typedef struct _channel { - PyThread_type_lock mutex; - _channelqueue *queue; - _channelends *ends; - int open; - struct _channel_closing *closing; -} _PyChannelState; - -static _PyChannelState * -_channel_new(PyThread_type_lock mutex) -{ - _PyChannelState *chan = PyMem_NEW(_PyChannelState, 1); - if (chan == NULL) { - return NULL; - } - chan->mutex = mutex; - chan->queue = _channelqueue_new(); - if (chan->queue == NULL) { - PyMem_Free(chan); - return NULL; - } - chan->ends = _channelends_new(); - if (chan->ends == NULL) { - _channelqueue_free(chan->queue); - PyMem_Free(chan); - return NULL; - } - chan->open = 1; - chan->closing = NULL; - return chan; -} - -static void -_channel_free(_PyChannelState *chan) -{ - _channel_clear_closing(chan); - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - _channelqueue_free(chan->queue); - _channelends_free(chan->ends); - PyThread_release_lock(chan->mutex); - - PyThread_free_lock(chan->mutex); - PyMem_Free(chan); -} - -static int -_channel_add(_PyChannelState *chan, int64_t interp, - _PyCrossInterpreterData *data) -{ - int res = -1; - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - if (!chan->open) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - if (_channelends_associate(chan->ends, interp, 1) != 0) { - res = ERR_CHANNEL_INTERP_CLOSED; - goto done; - } - - if (_channelqueue_put(chan->queue, data) != 0) { - goto done; - } - - res = 0; -done: - PyThread_release_lock(chan->mutex); - return res; -} - -static int -_channel_next(_PyChannelState *chan, int64_t interp, - _PyCrossInterpreterData **res) -{ - int err = 0; - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - if (!chan->open) { - err = ERR_CHANNEL_CLOSED; - goto done; - } - if (_channelends_associate(chan->ends, interp, 0) != 0) { - err = ERR_CHANNEL_INTERP_CLOSED; - goto done; - } - - _PyCrossInterpreterData *data = _channelqueue_get(chan->queue); - if (data == NULL && !PyErr_Occurred() && chan->closing != NULL) { - chan->open = 0; - } - *res = data; - -done: - PyThread_release_lock(chan->mutex); - if (chan->queue->count == 0) { - _channel_finish_closing(chan); - } - return err; -} - -static int -_channel_close_interpreter(_PyChannelState *chan, int64_t interp, int end) -{ - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - int res = -1; - if (!chan->open) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - - if (_channelends_close_interpreter(chan->ends, interp, end) != 0) { - goto done; - } - chan->open = _channelends_is_open(chan->ends); - - res = 0; -done: - PyThread_release_lock(chan->mutex); - return res; -} - -static int -_channel_close_all(_PyChannelState *chan, int end, int force) -{ - int res = -1; - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - - if (!chan->open) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - - if (!force && chan->queue->count > 0) { - res = ERR_CHANNEL_NOT_EMPTY; - goto done; - } - - chan->open = 0; - - // We *could* also just leave these in place, since we've marked - // the channel as closed already. - _channelends_close_all(chan->ends, end, force); - - res = 0; -done: - PyThread_release_lock(chan->mutex); - return res; -} - -/* the set of channels */ - -struct _channelref; - -typedef struct _channelref { - int64_t id; - _PyChannelState *chan; - struct _channelref *next; - Py_ssize_t objcount; -} _channelref; - -static _channelref * -_channelref_new(int64_t id, _PyChannelState *chan) -{ - _channelref *ref = PyMem_NEW(_channelref, 1); - if (ref == NULL) { - return NULL; - } - ref->id = id; - ref->chan = chan; - ref->next = NULL; - ref->objcount = 0; - return ref; -} - -//static void -//_channelref_clear(_channelref *ref) -//{ -// ref->id = -1; -// ref->chan = NULL; -// ref->next = NULL; -// ref->objcount = 0; -//} - -static void -_channelref_free(_channelref *ref) -{ - if (ref->chan != NULL) { - _channel_clear_closing(ref->chan); - } - //_channelref_clear(ref); - PyMem_Free(ref); -} - -static _channelref * -_channelref_find(_channelref *first, int64_t id, _channelref **pprev) -{ - _channelref *prev = NULL; - _channelref *ref = first; - while (ref != NULL) { - if (ref->id == id) { - break; - } - prev = ref; - ref = ref->next; - } - if (pprev != NULL) { - *pprev = prev; - } - return ref; -} - -typedef struct _channels { - PyThread_type_lock mutex; - _channelref *head; - int64_t numopen; - int64_t next_id; -} _channels; - -static void -_channels_init(_channels *channels, PyThread_type_lock mutex) -{ - channels->mutex = mutex; - channels->head = NULL; - channels->numopen = 0; - channels->next_id = 0; -} - -static void -_channels_fini(_channels *channels) -{ - assert(channels->numopen == 0); - assert(channels->head == NULL); - if (channels->mutex != NULL) { - PyThread_free_lock(channels->mutex); - channels->mutex = NULL; - } -} - -static int64_t -_channels_next_id(_channels *channels) // needs lock -{ - int64_t id = channels->next_id; - if (id < 0) { - /* overflow */ - return -1; - } - channels->next_id += 1; - return id; -} - -static int -_channels_lookup(_channels *channels, int64_t id, PyThread_type_lock *pmutex, - _PyChannelState **res) -{ - int err = -1; - _PyChannelState *chan = NULL; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - if (pmutex != NULL) { - *pmutex = NULL; - } - - _channelref *ref = _channelref_find(channels->head, id, NULL); - if (ref == NULL) { - err = ERR_CHANNEL_NOT_FOUND; - goto done; - } - if (ref->chan == NULL || !ref->chan->open) { - err = ERR_CHANNEL_CLOSED; - goto done; - } - - if (pmutex != NULL) { - // The mutex will be closed by the caller. - *pmutex = channels->mutex; - } - - chan = ref->chan; - err = 0; - -done: - if (pmutex == NULL || *pmutex == NULL) { - PyThread_release_lock(channels->mutex); - } - *res = chan; - return err; -} - -static int64_t -_channels_add(_channels *channels, _PyChannelState *chan) -{ - int64_t cid = -1; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - // Create a new ref. - int64_t id = _channels_next_id(channels); - if (id < 0) { - cid = ERR_NO_NEXT_CHANNEL_ID; - goto done; - } - _channelref *ref = _channelref_new(id, chan); - if (ref == NULL) { - goto done; - } - - // Add it to the list. - // We assume that the channel is a new one (not already in the list). - ref->next = channels->head; - channels->head = ref; - channels->numopen += 1; - - cid = id; -done: - PyThread_release_lock(channels->mutex); - return cid; -} - -/* forward */ -static int _channel_set_closing(struct _channelref *, PyThread_type_lock); - -static int -_channels_close(_channels *channels, int64_t cid, _PyChannelState **pchan, - int end, int force) -{ - int res = -1; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - if (pchan != NULL) { - *pchan = NULL; - } - - _channelref *ref = _channelref_find(channels->head, cid, NULL); - if (ref == NULL) { - res = ERR_CHANNEL_NOT_FOUND; - goto done; - } - - if (ref->chan == NULL) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - else if (!force && end == CHANNEL_SEND && ref->chan->closing != NULL) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - else { - int err = _channel_close_all(ref->chan, end, force); - if (err != 0) { - if (end == CHANNEL_SEND && err == ERR_CHANNEL_NOT_EMPTY) { - if (ref->chan->closing != NULL) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - // Mark the channel as closing and return. The channel - // will be cleaned up in _channel_next(). - PyErr_Clear(); - int err = _channel_set_closing(ref, channels->mutex); - if (err != 0) { - res = err; - goto done; - } - if (pchan != NULL) { - *pchan = ref->chan; - } - res = 0; - } - else { - res = err; - } - goto done; - } - if (pchan != NULL) { - *pchan = ref->chan; - } - else { - _channel_free(ref->chan); - } - ref->chan = NULL; - } - - res = 0; -done: - PyThread_release_lock(channels->mutex); - return res; -} - -static void -_channels_remove_ref(_channels *channels, _channelref *ref, _channelref *prev, - _PyChannelState **pchan) -{ - if (ref == channels->head) { - channels->head = ref->next; - } - else { - prev->next = ref->next; - } - channels->numopen -= 1; - - if (pchan != NULL) { - *pchan = ref->chan; - } - _channelref_free(ref); -} - -static int -_channels_remove(_channels *channels, int64_t id, _PyChannelState **pchan) -{ - int res = -1; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - if (pchan != NULL) { - *pchan = NULL; - } - - _channelref *prev = NULL; - _channelref *ref = _channelref_find(channels->head, id, &prev); - if (ref == NULL) { - res = ERR_CHANNEL_NOT_FOUND; - goto done; - } - - _channels_remove_ref(channels, ref, prev, pchan); - - res = 0; -done: - PyThread_release_lock(channels->mutex); - return res; -} - -static int -_channels_add_id_object(_channels *channels, int64_t id) -{ - int res = -1; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - _channelref *ref = _channelref_find(channels->head, id, NULL); - if (ref == NULL) { - res = ERR_CHANNEL_NOT_FOUND; - goto done; - } - ref->objcount += 1; - - res = 0; -done: - PyThread_release_lock(channels->mutex); - return res; -} - -static void -_channels_drop_id_object(_channels *channels, int64_t id) -{ - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - - _channelref *prev = NULL; - _channelref *ref = _channelref_find(channels->head, id, &prev); - if (ref == NULL) { - // Already destroyed. - goto done; - } - ref->objcount -= 1; - - // Destroy if no longer used. - if (ref->objcount == 0) { - _PyChannelState *chan = NULL; - _channels_remove_ref(channels, ref, prev, &chan); - if (chan != NULL) { - _channel_free(chan); - } - } - -done: - PyThread_release_lock(channels->mutex); -} - -static int64_t * -_channels_list_all(_channels *channels, int64_t *count) -{ - int64_t *cids = NULL; - PyThread_acquire_lock(channels->mutex, WAIT_LOCK); - int64_t *ids = PyMem_NEW(int64_t, (Py_ssize_t)(channels->numopen)); - if (ids == NULL) { - goto done; - } - _channelref *ref = channels->head; - for (int64_t i=0; ref != NULL; ref = ref->next, i++) { - ids[i] = ref->id; - } - *count = channels->numopen; - - cids = ids; -done: - PyThread_release_lock(channels->mutex); - return cids; -} - -/* support for closing non-empty channels */ - -struct _channel_closing { - struct _channelref *ref; -}; - -static int -_channel_set_closing(struct _channelref *ref, PyThread_type_lock mutex) { - struct _channel *chan = ref->chan; - if (chan == NULL) { - // already closed - return 0; - } - int res = -1; - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - if (chan->closing != NULL) { - res = ERR_CHANNEL_CLOSED; - goto done; - } - chan->closing = PyMem_NEW(struct _channel_closing, 1); - if (chan->closing == NULL) { - goto done; - } - chan->closing->ref = ref; - - res = 0; -done: - PyThread_release_lock(chan->mutex); - return res; -} - -static void -_channel_clear_closing(struct _channel *chan) { - PyThread_acquire_lock(chan->mutex, WAIT_LOCK); - if (chan->closing != NULL) { - PyMem_Free(chan->closing); - chan->closing = NULL; - } - PyThread_release_lock(chan->mutex); -} - -static void -_channel_finish_closing(struct _channel *chan) { - struct _channel_closing *closing = chan->closing; - if (closing == NULL) { - return; - } - _channelref *ref = closing->ref; - _channel_clear_closing(chan); - // Do the things that would have been done in _channels_close(). - ref->chan = NULL; - _channel_free(chan); -} - -/* "high"-level channel-related functions */ - -static int64_t -_channel_create(_channels *channels) -{ - PyThread_type_lock mutex = PyThread_allocate_lock(); - if (mutex == NULL) { - return ERR_CHANNEL_MUTEX_INIT; - } - _PyChannelState *chan = _channel_new(mutex); - if (chan == NULL) { - PyThread_free_lock(mutex); - return -1; - } - int64_t id = _channels_add(channels, chan); - if (id < 0) { - _channel_free(chan); - } - return id; -} - -static int -_channel_destroy(_channels *channels, int64_t id) -{ - _PyChannelState *chan = NULL; - int err = _channels_remove(channels, id, &chan); - if (err != 0) { - return err; - } - if (chan != NULL) { - _channel_free(chan); - } - return 0; -} - -static int -_channel_send(_channels *channels, int64_t id, PyObject *obj) -{ - PyInterpreterState *interp = _get_current_interp(); - if (interp == NULL) { - return -1; - } - - // Look up the channel. - PyThread_type_lock mutex = NULL; - _PyChannelState *chan = NULL; - int err = _channels_lookup(channels, id, &mutex, &chan); - if (err != 0) { - return err; - } - assert(chan != NULL); - // Past this point we are responsible for releasing the mutex. - - if (chan->closing != NULL) { - PyThread_release_lock(mutex); - return ERR_CHANNEL_CLOSED; - } - - // Convert the object to cross-interpreter data. - _PyCrossInterpreterData *data = PyMem_NEW(_PyCrossInterpreterData, 1); - if (data == NULL) { - PyThread_release_lock(mutex); - return -1; - } - if (_PyObject_GetCrossInterpreterData(obj, data) != 0) { - PyThread_release_lock(mutex); - PyMem_Free(data); - return -1; - } - - // Add the data to the channel. - int res = _channel_add(chan, PyInterpreterState_GetID(interp), data); - PyThread_release_lock(mutex); - if (res != 0) { - // We may chain an exception here: - (void)_release_xid_data(data, 0); - PyMem_Free(data); - return res; - } - - return 0; -} - -static int -_channel_recv(_channels *channels, int64_t id, PyObject **res) -{ - int err; - *res = NULL; - - PyInterpreterState *interp = _get_current_interp(); - if (interp == NULL) { - // XXX Is this always an error? - if (PyErr_Occurred()) { - return -1; - } - return 0; - } - - // Look up the channel. - PyThread_type_lock mutex = NULL; - _PyChannelState *chan = NULL; - err = _channels_lookup(channels, id, &mutex, &chan); - if (err != 0) { - return err; - } - assert(chan != NULL); - // Past this point we are responsible for releasing the mutex. - - // Pop off the next item from the channel. - _PyCrossInterpreterData *data = NULL; - err = _channel_next(chan, PyInterpreterState_GetID(interp), &data); - PyThread_release_lock(mutex); - if (err != 0) { - return err; - } - else if (data == NULL) { - assert(!PyErr_Occurred()); - return 0; - } - - // Convert the data back to an object. - PyObject *obj = _PyCrossInterpreterData_NewObject(data); - if (obj == NULL) { - assert(PyErr_Occurred()); - (void)_release_xid_data(data, 1); - PyMem_Free(data); - return -1; - } - int release_res = _release_xid_data(data, 0); - PyMem_Free(data); - if (release_res < 0) { - // The source interpreter has been destroyed already. - assert(PyErr_Occurred()); - Py_DECREF(obj); - return -1; - } - - *res = obj; - return 0; -} - -static int -_channel_drop(_channels *channels, int64_t id, int send, int recv) -{ - PyInterpreterState *interp = _get_current_interp(); - if (interp == NULL) { - return -1; - } - - // Look up the channel. - PyThread_type_lock mutex = NULL; - _PyChannelState *chan = NULL; - int err = _channels_lookup(channels, id, &mutex, &chan); - if (err != 0) { - return err; - } - // Past this point we are responsible for releasing the mutex. - - // Close one or both of the two ends. - int res = _channel_close_interpreter(chan, PyInterpreterState_GetID(interp), send-recv); - PyThread_release_lock(mutex); - return res; -} - -static int -_channel_close(_channels *channels, int64_t id, int end, int force) -{ - return _channels_close(channels, id, NULL, end, force); -} - -static int -_channel_is_associated(_channels *channels, int64_t cid, int64_t interp, - int send) -{ - _PyChannelState *chan = NULL; - int err = _channels_lookup(channels, cid, NULL, &chan); - if (err != 0) { - return err; - } - else if (send && chan->closing != NULL) { - return ERR_CHANNEL_CLOSED; - } - - _channelend *end = _channelend_find(send ? chan->ends->send : chan->ends->recv, - interp, NULL); - - return (end != NULL && end->open); -} - -/* ChannelID class */ - -typedef struct channelid { - PyObject_HEAD - int64_t id; - int end; - int resolve; - _channels *channels; -} channelid; - -struct channel_id_converter_data { - PyObject *module; - int64_t cid; -}; - -static int -channel_id_converter(PyObject *arg, void *ptr) -{ - int64_t cid; - struct channel_id_converter_data *data = ptr; - module_state *state = get_module_state(data->module); - assert(state != NULL); - if (PyObject_TypeCheck(arg, state->ChannelIDType)) { - cid = ((channelid *)arg)->id; - } - else if (PyIndex_Check(arg)) { - cid = PyLong_AsLongLong(arg); - if (cid == -1 && PyErr_Occurred()) { - return 0; - } - if (cid < 0) { - PyErr_Format(PyExc_ValueError, - "channel ID must be a non-negative int, got %R", arg); - return 0; - } - } - else { - PyErr_Format(PyExc_TypeError, - "channel ID must be an int, got %.100s", - Py_TYPE(arg)->tp_name); - return 0; - } - data->cid = cid; - return 1; -} - -static int -newchannelid(PyTypeObject *cls, int64_t cid, int end, _channels *channels, - int force, int resolve, channelid **res) -{ - *res = NULL; - - channelid *self = PyObject_New(channelid, cls); - if (self == NULL) { - return -1; - } - self->id = cid; - self->end = end; - self->resolve = resolve; - self->channels = channels; - - int err = _channels_add_id_object(channels, cid); - if (err != 0) { - if (force && err == ERR_CHANNEL_NOT_FOUND) { - assert(!PyErr_Occurred()); - } - else { - Py_DECREF((PyObject *)self); - return err; - } - } - - *res = self; - return 0; -} - -static _channels * _global_channels(void); - -static PyObject * -_channelid_new(PyObject *mod, PyTypeObject *cls, - PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"id", "send", "recv", "force", "_resolve", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = mod, - }; - int send = -1; - int recv = -1; - int force = 0; - int resolve = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&|$pppp:ChannelID.__new__", kwlist, - channel_id_converter, &cid_data, - &send, &recv, &force, &resolve)) { - return NULL; - } - cid = cid_data.cid; - - // Handle "send" and "recv". - if (send == 0 && recv == 0) { - PyErr_SetString(PyExc_ValueError, - "'send' and 'recv' cannot both be False"); - return NULL; - } - - int end = 0; - if (send == 1) { - if (recv == 0 || recv == -1) { - end = CHANNEL_SEND; - } - } - else if (recv == 1) { - end = CHANNEL_RECV; - } - - PyObject *id = NULL; - int err = newchannelid(cls, cid, end, _global_channels(), - force, resolve, - (channelid **)&id); - if (handle_channel_error(err, mod, cid)) { - assert(id == NULL); - return NULL; - } - assert(id != NULL); - return id; -} - -static void -channelid_dealloc(PyObject *self) -{ - int64_t cid = ((channelid *)self)->id; - _channels *channels = ((channelid *)self)->channels; - - PyTypeObject *tp = Py_TYPE(self); - tp->tp_free(self); - /* "Instances of heap-allocated types hold a reference to their type." - * See: https://docs.python.org/3.11/howto/isolating-extensions.html#garbage-collection-protocol - * See: https://docs.python.org/3.11/c-api/typeobj.html#c.PyTypeObject.tp_traverse - */ - // XXX Why don't we implement Py_TPFLAGS_HAVE_GC, e.g. Py_tp_traverse, - // like we do for _abc._abc_data? - Py_DECREF(tp); - - _channels_drop_id_object(channels, cid); -} - -static PyObject * -channelid_repr(PyObject *self) -{ - PyTypeObject *type = Py_TYPE(self); - const char *name = _PyType_Name(type); - - channelid *cid = (channelid *)self; - const char *fmt; - if (cid->end == CHANNEL_SEND) { - fmt = "%s(%" PRId64 ", send=True)"; - } - else if (cid->end == CHANNEL_RECV) { - fmt = "%s(%" PRId64 ", recv=True)"; - } - else { - fmt = "%s(%" PRId64 ")"; - } - return PyUnicode_FromFormat(fmt, name, cid->id); -} - -static PyObject * -channelid_str(PyObject *self) -{ - channelid *cid = (channelid *)self; - return PyUnicode_FromFormat("%" PRId64 "", cid->id); -} - -static PyObject * -channelid_int(PyObject *self) -{ - channelid *cid = (channelid *)self; - return PyLong_FromLongLong(cid->id); -} - -static Py_hash_t -channelid_hash(PyObject *self) -{ - channelid *cid = (channelid *)self; - PyObject *id = PyLong_FromLongLong(cid->id); - if (id == NULL) { - return -1; - } - Py_hash_t hash = PyObject_Hash(id); - Py_DECREF(id); - return hash; -} - -static PyObject * -channelid_richcompare(PyObject *self, PyObject *other, int op) -{ - PyObject *res = NULL; - if (op != Py_EQ && op != Py_NE) { - Py_RETURN_NOTIMPLEMENTED; - } - - PyObject *mod = get_module_from_type(Py_TYPE(self)); - if (mod == NULL) { - return NULL; - } - module_state *state = get_module_state(mod); - if (state == NULL) { - goto done; - } - - if (!PyObject_TypeCheck(self, state->ChannelIDType)) { - res = Py_NewRef(Py_NotImplemented); - goto done; - } - - channelid *cid = (channelid *)self; - int equal; - if (PyObject_TypeCheck(other, state->ChannelIDType)) { - channelid *othercid = (channelid *)other; - equal = (cid->end == othercid->end) && (cid->id == othercid->id); - } - else if (PyLong_Check(other)) { - /* Fast path */ - int overflow; - long long othercid = PyLong_AsLongLongAndOverflow(other, &overflow); - if (othercid == -1 && PyErr_Occurred()) { - goto done; - } - equal = !overflow && (othercid >= 0) && (cid->id == othercid); - } - else if (PyNumber_Check(other)) { - PyObject *pyid = PyLong_FromLongLong(cid->id); - if (pyid == NULL) { - goto done; - } - res = PyObject_RichCompare(pyid, other, op); - Py_DECREF(pyid); - goto done; - } - else { - res = Py_NewRef(Py_NotImplemented); - goto done; - } - - if ((op == Py_EQ && equal) || (op == Py_NE && !equal)) { - res = Py_NewRef(Py_True); - } - else { - res = Py_NewRef(Py_False); - } - -done: - Py_DECREF(mod); - return res; -} - -static PyObject * -_channel_from_cid(PyObject *cid, int end) -{ - PyObject *highlevel = PyImport_ImportModule("interpreters"); - if (highlevel == NULL) { - PyErr_Clear(); - highlevel = PyImport_ImportModule("test.support.interpreters"); - if (highlevel == NULL) { - return NULL; - } - } - const char *clsname = (end == CHANNEL_RECV) ? "RecvChannel" : - "SendChannel"; - PyObject *cls = PyObject_GetAttrString(highlevel, clsname); - Py_DECREF(highlevel); - if (cls == NULL) { - return NULL; - } - PyObject *chan = PyObject_CallFunctionObjArgs(cls, cid, NULL); - Py_DECREF(cls); - if (chan == NULL) { - return NULL; - } - return chan; -} - -struct _channelid_xid { - int64_t id; - int end; - int resolve; -}; - -static PyObject * -_channelid_from_xid(_PyCrossInterpreterData *data) -{ - struct _channelid_xid *xid = (struct _channelid_xid *)data->data; - - PyObject *mod = _get_current_module(); - if (mod == NULL) { - return NULL; - } - module_state *state = get_module_state(mod); - if (state == NULL) { - return NULL; + goto finally; } - // Note that we do not preserve the "resolve" flag. - PyObject *cid = NULL; - int err = newchannelid(state->ChannelIDType, xid->id, xid->end, - _global_channels(), 0, 0, - (channelid **)&cid); - if (err != 0) { - assert(cid == NULL); - (void)handle_channel_error(err, mod, xid->id); - goto done; - } - assert(cid != NULL); - if (xid->end == 0) { - goto done; + PyObject *name = PyUnicode_FromFormat("%S", exctype); + if (name == NULL) { + failure = "unable to format exception type name"; + goto finally; } - if (!xid->resolve) { - goto done; + err->name = _copy_raw_string(name); + Py_DECREF(name); + if (err->name == NULL) { + if (PyErr_ExceptionMatches(PyExc_MemoryError)) { + failure = "out of memory copying exception type name"; + } else { + failure = "unable to encode and copy exception type name"; + } + goto finally; } - /* Try returning a high-level channel end but fall back to the ID. */ - PyObject *chan = _channel_from_cid(cid, xid->end); - if (chan == NULL) { - PyErr_Clear(); - goto done; + if (exc != NULL) { + PyObject *msg = PyUnicode_FromFormat("%S", exc); + if (msg == NULL) { + failure = "unable to format exception message"; + goto finally; + } + err->msg = _copy_raw_string(msg); + Py_DECREF(msg); + if (err->msg == NULL) { + if (PyErr_ExceptionMatches(PyExc_MemoryError)) { + failure = "out of memory copying exception message"; + } else { + failure = "unable to encode and copy exception message"; + } + goto finally; + } } - Py_DECREF(cid); - cid = chan; -done: - Py_DECREF(mod); - return cid; -} - -static int -_channelid_shared(PyThreadState *tstate, PyObject *obj, - _PyCrossInterpreterData *data) -{ - if (_PyCrossInterpreterData_InitWithSize( - data, tstate->interp, sizeof(struct _channelid_xid), obj, - _channelid_from_xid - ) < 0) - { - return -1; +finally: + if (failure != NULL) { + PyErr_Clear(); + if (err->name != NULL) { + PyMem_Free(err->name); + err->name = NULL; + } + err->msg = failure; } - struct _channelid_xid *xid = (struct _channelid_xid *)data->data; - xid->id = ((channelid *)obj)->id; - xid->end = ((channelid *)obj)->end; - xid->resolve = ((channelid *)obj)->resolve; - return 0; + return err; } -static PyObject * -channelid_end(PyObject *self, void *end) +static void +_sharedexception_apply(_sharedexception *exc, PyObject *wrapperclass) { - int force = 1; - channelid *cid = (channelid *)self; - if (end != NULL) { - PyObject *id = NULL; - int err = newchannelid(Py_TYPE(self), cid->id, *(int *)end, - cid->channels, force, cid->resolve, - (channelid **)&id); - if (err != 0) { - assert(id == NULL); - PyObject *mod = get_module_from_type(Py_TYPE(self)); - if (mod == NULL) { - return NULL; - } - (void)handle_channel_error(err, mod, cid->id); - Py_DECREF(mod); - return NULL; + if (exc->name != NULL) { + if (exc->msg != NULL) { + PyErr_Format(wrapperclass, "%s: %s", exc->name, exc->msg); + } + else { + PyErr_SetString(wrapperclass, exc->name); } - assert(id != NULL); - return id; } - - if (cid->end == CHANNEL_SEND) { - return PyUnicode_InternFromString("send"); + else if (exc->msg != NULL) { + PyErr_SetString(wrapperclass, exc->msg); } - if (cid->end == CHANNEL_RECV) { - return PyUnicode_InternFromString("recv"); + else { + PyErr_SetNone(wrapperclass); } - return PyUnicode_InternFromString("both"); } -static int _channelid_end_send = CHANNEL_SEND; -static int _channelid_end_recv = CHANNEL_RECV; - -static PyGetSetDef channelid_getsets[] = { - {"end", (getter)channelid_end, NULL, - PyDoc_STR("'send', 'recv', or 'both'")}, - {"send", (getter)channelid_end, NULL, - PyDoc_STR("the 'send' end of the channel"), &_channelid_end_send}, - {"recv", (getter)channelid_end, NULL, - PyDoc_STR("the 'recv' end of the channel"), &_channelid_end_recv}, - {NULL} -}; - -PyDoc_STRVAR(channelid_doc, -"A channel ID identifies a channel and may be used as an int."); - -static PyType_Slot ChannelIDType_slots[] = { - {Py_tp_dealloc, (destructor)channelid_dealloc}, - {Py_tp_doc, (void *)channelid_doc}, - {Py_tp_repr, (reprfunc)channelid_repr}, - {Py_tp_str, (reprfunc)channelid_str}, - {Py_tp_hash, channelid_hash}, - {Py_tp_richcompare, channelid_richcompare}, - {Py_tp_getset, channelid_getsets}, - // number slots - {Py_nb_int, (unaryfunc)channelid_int}, - {Py_nb_index, (unaryfunc)channelid_int}, - {0, NULL}, -}; - -static PyType_Spec ChannelIDType_spec = { - .name = "_xxsubinterpreters.ChannelID", - .basicsize = sizeof(channelid), - .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | - Py_TPFLAGS_DISALLOW_INSTANTIATION | Py_TPFLAGS_IMMUTABLETYPE), - .slots = ChannelIDType_slots, -}; - /* interpreter-specific code ************************************************/ static int -interp_exceptions_init(PyObject *mod) +exceptions_init(PyObject *mod) { module_state *state = get_module_state(mod); if (state == NULL) { @@ -2145,24 +430,12 @@ _ensure_not_running(PyInterpreterState *interp) static int _run_script(PyInterpreterState *interp, const char *codestr, - _sharedns *shared, int needs_import, - _sharedexception **exc) + _sharedns *shared, _sharedexception **exc) { PyObject *exctype = NULL; PyObject *excval = NULL; PyObject *tb = NULL; - if (needs_import) { - // It might not have been imported yet in the current interpreter. - // However, it will (almost) always have been imported already - // in the main interpreter. - PyObject *mod = PyImport_ImportModule(MODULE_NAME); - if (mod == NULL) { - goto error; - } - Py_DECREF(mod); - } - PyObject *main_mod = _PyInterpreterState_GetMainModule(interp); if (main_mod == NULL) { goto error; @@ -2223,9 +496,7 @@ _run_script_in_interpreter(PyObject *mod, PyInterpreterState *interp, } module_state *state = get_module_state(mod); - int needs_import = 0; - _sharedns *shared = _get_shared_ns(shareables, state->ChannelIDType, - &needs_import); + _sharedns *shared = _get_shared_ns(shareables); if (shared == NULL && PyErr_Occurred()) { return -1; } @@ -2241,7 +512,7 @@ _run_script_in_interpreter(PyObject *mod, PyInterpreterState *interp, // Run the script. _sharedexception *exc = NULL; - int result = _run_script(interp, codestr, shared, needs_import, &exc); + int result = _run_script(interp, codestr, shared, &exc); // Switch back. if (save_tstate != NULL) { @@ -2269,50 +540,6 @@ _run_script_in_interpreter(PyObject *mod, PyInterpreterState *interp, /* module level code ********************************************************/ -/* globals is the process-global state for the module. It holds all - the data that we need to share between interpreters, so it cannot - hold PyObject values. */ -static struct globals { - int module_count; - _channels channels; -} _globals = {0}; - -static int -_globals_init(void) -{ - // XXX This isn't thread-safe. - _globals.module_count++; - if (_globals.module_count > 1) { - // Already initialized. - return 0; - } - - assert(_globals.channels.mutex == NULL); - PyThread_type_lock mutex = PyThread_allocate_lock(); - if (mutex == NULL) { - return ERR_CHANNELS_MUTEX_INIT; - } - _channels_init(&_globals.channels, mutex); - return 0; -} - -static void -_globals_fini(void) -{ - // XXX This isn't thread-safe. - _globals.module_count--; - if (_globals.module_count > 0) { - return; - } - - _channels_fini(&_globals.channels); -} - -static _channels * -_global_channels(void) { - return &_globals.channels; -} - static PyObject * interp_create(PyObject *self, PyObject *args, PyObject *kwds) { @@ -2578,358 +805,6 @@ PyDoc_STRVAR(is_running_doc, \n\ Return whether or not the identified interpreter is running."); -static PyObject * -channel_create(PyObject *self, PyObject *Py_UNUSED(ignored)) -{ - int64_t cid = _channel_create(&_globals.channels); - if (cid < 0) { - (void)handle_channel_error(-1, self, cid); - return NULL; - } - module_state *state = get_module_state(self); - if (state == NULL) { - return NULL; - } - PyObject *id = NULL; - int err = newchannelid(state->ChannelIDType, cid, 0, - &_globals.channels, 0, 0, - (channelid **)&id); - if (handle_channel_error(err, self, cid)) { - assert(id == NULL); - err = _channel_destroy(&_globals.channels, cid); - if (handle_channel_error(err, self, cid)) { - // XXX issue a warning? - } - return NULL; - } - assert(id != NULL); - assert(((channelid *)id)->channels != NULL); - return id; -} - -PyDoc_STRVAR(channel_create_doc, -"channel_create() -> cid\n\ -\n\ -Create a new cross-interpreter channel and return a unique generated ID."); - -static PyObject * -channel_destroy(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&:channel_destroy", kwlist, - channel_id_converter, &cid_data)) { - return NULL; - } - cid = cid_data.cid; - - int err = _channel_destroy(&_globals.channels, cid); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channel_destroy_doc, -"channel_destroy(cid)\n\ -\n\ -Close and finalize the channel. Afterward attempts to use the channel\n\ -will behave as though it never existed."); - -static PyObject * -channel_list_all(PyObject *self, PyObject *Py_UNUSED(ignored)) -{ - int64_t count = 0; - int64_t *cids = _channels_list_all(&_globals.channels, &count); - if (cids == NULL) { - if (count == 0) { - return PyList_New(0); - } - return NULL; - } - PyObject *ids = PyList_New((Py_ssize_t)count); - if (ids == NULL) { - goto finally; - } - module_state *state = get_module_state(self); - if (state == NULL) { - Py_DECREF(ids); - ids = NULL; - goto finally; - } - int64_t *cur = cids; - for (int64_t i=0; i < count; cur++, i++) { - PyObject *id = NULL; - int err = newchannelid(state->ChannelIDType, *cur, 0, - &_globals.channels, 0, 0, - (channelid **)&id); - if (handle_channel_error(err, self, *cur)) { - assert(id == NULL); - Py_SETREF(ids, NULL); - break; - } - assert(id != NULL); - PyList_SET_ITEM(ids, (Py_ssize_t)i, id); - } - -finally: - PyMem_Free(cids); - return ids; -} - -PyDoc_STRVAR(channel_list_all_doc, -"channel_list_all() -> [cid]\n\ -\n\ -Return the list of all IDs for active channels."); - -static PyObject * -channel_list_interpreters(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "send", NULL}; - int64_t cid; /* Channel ID */ - struct channel_id_converter_data cid_data = { - .module = self, - }; - int send = 0; /* Send or receive end? */ - int64_t id; - PyObject *ids, *id_obj; - PyInterpreterState *interp; - - if (!PyArg_ParseTupleAndKeywords( - args, kwds, "O&$p:channel_list_interpreters", - kwlist, channel_id_converter, &cid_data, &send)) { - return NULL; - } - cid = cid_data.cid; - - ids = PyList_New(0); - if (ids == NULL) { - goto except; - } - - interp = PyInterpreterState_Head(); - while (interp != NULL) { - id = PyInterpreterState_GetID(interp); - assert(id >= 0); - int res = _channel_is_associated(&_globals.channels, cid, id, send); - if (res < 0) { - (void)handle_channel_error(res, self, cid); - goto except; - } - if (res) { - id_obj = _PyInterpreterState_GetIDObject(interp); - if (id_obj == NULL) { - goto except; - } - res = PyList_Insert(ids, 0, id_obj); - Py_DECREF(id_obj); - if (res < 0) { - goto except; - } - } - interp = PyInterpreterState_Next(interp); - } - - goto finally; - -except: - Py_CLEAR(ids); - -finally: - return ids; -} - -PyDoc_STRVAR(channel_list_interpreters_doc, -"channel_list_interpreters(cid, *, send) -> [id]\n\ -\n\ -Return the list of all interpreter IDs associated with an end of the channel.\n\ -\n\ -The 'send' argument should be a boolean indicating whether to use the send or\n\ -receive end."); - - -static PyObject * -channel_send(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "obj", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - PyObject *obj; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&O:channel_send", kwlist, - channel_id_converter, &cid_data, &obj)) { - return NULL; - } - cid = cid_data.cid; - - int err = _channel_send(&_globals.channels, cid, obj); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channel_send_doc, -"channel_send(cid, obj)\n\ -\n\ -Add the object's data to the channel's queue."); - -static PyObject * -channel_recv(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "default", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - PyObject *dflt = NULL; - if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O:channel_recv", kwlist, - channel_id_converter, &cid_data, &dflt)) { - return NULL; - } - cid = cid_data.cid; - - PyObject *obj = NULL; - int err = _channel_recv(&_globals.channels, cid, &obj); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_XINCREF(dflt); - if (obj == NULL) { - // Use the default. - if (dflt == NULL) { - (void)handle_channel_error(ERR_CHANNEL_EMPTY, self, cid); - return NULL; - } - obj = Py_NewRef(dflt); - } - Py_XDECREF(dflt); - return obj; -} - -PyDoc_STRVAR(channel_recv_doc, -"channel_recv(cid, [default]) -> obj\n\ -\n\ -Return a new object from the data at the front of the channel's queue.\n\ -\n\ -If there is nothing to receive then raise ChannelEmptyError, unless\n\ -a default value is provided. In that case return it."); - -static PyObject * -channel_close(PyObject *self, PyObject *args, PyObject *kwds) -{ - static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - int send = 0; - int recv = 0; - int force = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&|$ppp:channel_close", kwlist, - channel_id_converter, &cid_data, - &send, &recv, &force)) { - return NULL; - } - cid = cid_data.cid; - - int err = _channel_close(&_globals.channels, cid, send-recv, force); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channel_close_doc, -"channel_close(cid, *, send=None, recv=None, force=False)\n\ -\n\ -Close the channel for all interpreters.\n\ -\n\ -If the channel is empty then the keyword args are ignored and both\n\ -ends are immediately closed. Otherwise, if 'force' is True then\n\ -all queued items are released and both ends are immediately\n\ -closed.\n\ -\n\ -If the channel is not empty *and* 'force' is False then following\n\ -happens:\n\ -\n\ - * recv is True (regardless of send):\n\ - - raise ChannelNotEmptyError\n\ - * recv is None and send is None:\n\ - - raise ChannelNotEmptyError\n\ - * send is True and recv is not True:\n\ - - fully close the 'send' end\n\ - - close the 'recv' end to interpreters not already receiving\n\ - - fully close it once empty\n\ -\n\ -Closing an already closed channel results in a ChannelClosedError.\n\ -\n\ -Once the channel's ID has no more ref counts in any interpreter\n\ -the channel will be destroyed."); - -static PyObject * -channel_release(PyObject *self, PyObject *args, PyObject *kwds) -{ - // Note that only the current interpreter is affected. - static char *kwlist[] = {"cid", "send", "recv", "force", NULL}; - int64_t cid; - struct channel_id_converter_data cid_data = { - .module = self, - }; - int send = 0; - int recv = 0; - int force = 0; - if (!PyArg_ParseTupleAndKeywords(args, kwds, - "O&|$ppp:channel_release", kwlist, - channel_id_converter, &cid_data, - &send, &recv, &force)) { - return NULL; - } - cid = cid_data.cid; - if (send == 0 && recv == 0) { - send = 1; - recv = 1; - } - - // XXX Handle force is True. - // XXX Fix implicit release. - - int err = _channel_drop(&_globals.channels, cid, send, recv); - if (handle_channel_error(err, self, cid)) { - return NULL; - } - Py_RETURN_NONE; -} - -PyDoc_STRVAR(channel_release_doc, -"channel_release(cid, *, send=None, recv=None, force=True)\n\ -\n\ -Close the channel for the current interpreter. 'send' and 'recv'\n\ -(bool) may be used to indicate the ends to close. By default both\n\ -ends are closed. Closing an already closed end is a noop."); - -static PyObject * -channel__channel_id(PyObject *self, PyObject *args, PyObject *kwds) -{ - module_state *state = get_module_state(self); - if (state == NULL) { - return NULL; - } - PyTypeObject *cls = state->ChannelIDType; - PyObject *mod = get_module_from_owned_type(cls); - if (mod == NULL) { - return NULL; - } - PyObject *cid = _channelid_new(mod, cls, args, kwds); - Py_DECREF(mod); - return cid; -} - static PyMethodDef module_functions[] = { {"create", _PyCFunction_CAST(interp_create), METH_VARARGS | METH_KEYWORDS, create_doc}, @@ -2941,6 +816,7 @@ static PyMethodDef module_functions[] = { METH_NOARGS, get_current_doc}, {"get_main", interp_get_main, METH_NOARGS, get_main_doc}, + {"is_running", _PyCFunction_CAST(interp_is_running), METH_VARARGS | METH_KEYWORDS, is_running_doc}, {"run_string", _PyCFunction_CAST(interp_run_string), @@ -2949,25 +825,6 @@ static PyMethodDef module_functions[] = { {"is_shareable", _PyCFunction_CAST(object_is_shareable), METH_VARARGS | METH_KEYWORDS, is_shareable_doc}, - {"channel_create", channel_create, - METH_NOARGS, channel_create_doc}, - {"channel_destroy", _PyCFunction_CAST(channel_destroy), - METH_VARARGS | METH_KEYWORDS, channel_destroy_doc}, - {"channel_list_all", channel_list_all, - METH_NOARGS, channel_list_all_doc}, - {"channel_list_interpreters", _PyCFunction_CAST(channel_list_interpreters), - METH_VARARGS | METH_KEYWORDS, channel_list_interpreters_doc}, - {"channel_send", _PyCFunction_CAST(channel_send), - METH_VARARGS | METH_KEYWORDS, channel_send_doc}, - {"channel_recv", _PyCFunction_CAST(channel_recv), - METH_VARARGS | METH_KEYWORDS, channel_recv_doc}, - {"channel_close", _PyCFunction_CAST(channel_close), - METH_VARARGS | METH_KEYWORDS, channel_close_doc}, - {"channel_release", _PyCFunction_CAST(channel_release), - METH_VARARGS | METH_KEYWORDS, channel_release_doc}, - {"_channel_id", _PyCFunction_CAST(channel__channel_id), - METH_VARARGS | METH_KEYWORDS, NULL}, - {NULL, NULL} /* sentinel */ }; @@ -2981,29 +838,8 @@ The 'interpreters' module provides a more convenient interface."); static int module_exec(PyObject *mod) { - if (_globals_init() != 0) { - return -1; - } - - module_state *state = get_module_state(mod); - if (state == NULL) { - goto error; - } - /* Add exception types */ - if (interp_exceptions_init(mod) != 0) { - goto error; - } - if (channel_exceptions_init(mod) != 0) { - goto error; - } - - /* Add other types */ - - // ChannelID - state->ChannelIDType = add_new_type( - mod, &ChannelIDType_spec, _channelid_shared); - if (state->ChannelIDType == NULL) { + if (exceptions_init(mod) != 0) { goto error; } @@ -3015,8 +851,6 @@ module_exec(PyObject *mod) return 0; error: - (void)_PyCrossInterpreterData_UnregisterClass(state->ChannelIDType); - _globals_fini(); return -1; } @@ -3049,7 +883,6 @@ module_free(void *mod) module_state *state = get_module_state(mod); assert(state != NULL); clear_module_state(state); - _globals_fini(); } static struct PyModuleDef moduledef = { diff --git a/Modules/clinic/cmathmodule.c.h b/Modules/clinic/cmathmodule.c.h index bc91c20f373bcd..b1da9452c61db8 100644 --- a/Modules/clinic/cmathmodule.c.h +++ b/Modules/clinic/cmathmodule.c.h @@ -639,13 +639,12 @@ cmath_tanh(PyObject *module, PyObject *arg) } PyDoc_STRVAR(cmath_log__doc__, -"log($module, z, base=None, /)\n" +"log($module, z, base=, /)\n" "--\n" "\n" "log(z[, base]) -> the logarithm of z to the given base.\n" "\n" -"If the base is not specified or is None, returns the\n" -"natural logarithm (base e) of z."); +"If the base not specified, returns the natural logarithm (base e) of z."); #define CMATH_LOG_METHODDEF \ {"log", _PyCFunction_CAST(cmath_log), METH_FASTCALL, cmath_log__doc__}, @@ -658,7 +657,7 @@ cmath_log(PyObject *module, PyObject *const *args, Py_ssize_t nargs) { PyObject *return_value = NULL; Py_complex x; - PyObject *y_obj = Py_None; + PyObject *y_obj = NULL; if (!_PyArg_CheckPositional("log", nargs, 1, 2)) { goto exit; @@ -983,4 +982,4 @@ cmath_isclose(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObjec exit: return return_value; } -/*[clinic end generated code: output=2630f8740909a8f7 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=0146c656e67f5d5f input=a9049054013a1b77]*/ diff --git a/Modules/clinic/itertoolsmodule.c.h b/Modules/clinic/itertoolsmodule.c.h index c492c33daea5a2..d15d5f0890ca98 100644 --- a/Modules/clinic/itertoolsmodule.c.h +++ b/Modules/clinic/itertoolsmodule.c.h @@ -102,7 +102,7 @@ static PyObject * pairwise_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; - PyTypeObject *base_tp = &pairwise_type; + PyTypeObject *base_tp = clinic_state()->pairwise_type; PyObject *iterable; if ((type == base_tp || type->tp_init == base_tp->tp_init) && @@ -345,7 +345,7 @@ static PyObject * itertools_cycle(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; - PyTypeObject *base_tp = &cycle_type; + PyTypeObject *base_tp = clinic_state()->cycle_type; PyObject *iterable; if ((type == base_tp || type->tp_init == base_tp->tp_init) && @@ -377,7 +377,7 @@ static PyObject * itertools_dropwhile(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; - PyTypeObject *base_tp = &dropwhile_type; + PyTypeObject *base_tp = clinic_state()->dropwhile_type; PyObject *func; PyObject *seq; @@ -409,7 +409,7 @@ static PyObject * itertools_takewhile(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; - PyTypeObject *base_tp = &takewhile_type; + PyTypeObject *base_tp = clinic_state()->takewhile_type; PyObject *func; PyObject *seq; @@ -441,7 +441,7 @@ static PyObject * itertools_starmap(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; - PyTypeObject *base_tp = &starmap_type; + PyTypeObject *base_tp = clinic_state()->starmap_type; PyObject *func; PyObject *seq; @@ -821,7 +821,7 @@ static PyObject * itertools_filterfalse(PyTypeObject *type, PyObject *args, PyObject *kwargs) { PyObject *return_value = NULL; - PyTypeObject *base_tp = &filterfalse_type; + PyTypeObject *base_tp = clinic_state()->filterfalse_type; PyObject *func; PyObject *seq; @@ -913,4 +913,4 @@ itertools_count(PyTypeObject *type, PyObject *args, PyObject *kwargs) exit: return return_value; } -/*[clinic end generated code: output=c3069caac417e165 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=a08b58d7dac825da input=a9049054013a1b77]*/ diff --git a/Modules/clinic/mathmodule.c.h b/Modules/clinic/mathmodule.c.h index 0d61fd1be38ddb..1f9725883b9820 100644 --- a/Modules/clinic/mathmodule.c.h +++ b/Modules/clinic/mathmodule.c.h @@ -187,37 +187,43 @@ math_modf(PyObject *module, PyObject *arg) } PyDoc_STRVAR(math_log__doc__, -"log($module, x, base=None, /)\n" -"--\n" -"\n" +"log(x, [base=math.e])\n" "Return the logarithm of x to the given base.\n" "\n" -"If the base is not specified or is None, returns the natural\n" -"logarithm (base e) of x."); +"If the base not specified, returns the natural logarithm (base e) of x."); #define MATH_LOG_METHODDEF \ - {"log", _PyCFunction_CAST(math_log), METH_FASTCALL, math_log__doc__}, + {"log", (PyCFunction)math_log, METH_VARARGS, math_log__doc__}, static PyObject * -math_log_impl(PyObject *module, PyObject *x, PyObject *base); +math_log_impl(PyObject *module, PyObject *x, int group_right_1, + PyObject *base); static PyObject * -math_log(PyObject *module, PyObject *const *args, Py_ssize_t nargs) +math_log(PyObject *module, PyObject *args) { PyObject *return_value = NULL; PyObject *x; - PyObject *base = Py_None; + int group_right_1 = 0; + PyObject *base = NULL; - if (!_PyArg_CheckPositional("log", nargs, 1, 2)) { - goto exit; - } - x = args[0]; - if (nargs < 2) { - goto skip_optional; + switch (PyTuple_GET_SIZE(args)) { + case 1: + if (!PyArg_ParseTuple(args, "O:log", &x)) { + goto exit; + } + break; + case 2: + if (!PyArg_ParseTuple(args, "OO:log", &x, &base)) { + goto exit; + } + group_right_1 = 1; + break; + default: + PyErr_SetString(PyExc_TypeError, "math.log requires 1 to 2 arguments"); + goto exit; } - base = args[1]; -skip_optional: - return_value = math_log_impl(module, x, base); + return_value = math_log_impl(module, x, group_right_1, base); exit: return return_value; @@ -948,4 +954,4 @@ math_ulp(PyObject *module, PyObject *arg) exit: return return_value; } -/*[clinic end generated code: output=afec63ebb0da709a input=a9049054013a1b77]*/ +/*[clinic end generated code: output=899211ec70e4506c input=a9049054013a1b77]*/ diff --git a/Modules/clinic/posixmodule.c.h b/Modules/clinic/posixmodule.c.h index d4722cc533cbab..5e04507ddd6917 100644 --- a/Modules/clinic/posixmodule.c.h +++ b/Modules/clinic/posixmodule.c.h @@ -1794,6 +1794,242 @@ os__path_splitroot(PyObject *module, PyObject *const *args, Py_ssize_t nargs, Py #endif /* defined(MS_WINDOWS) */ +#if defined(MS_WINDOWS) + +PyDoc_STRVAR(os__path_isdir__doc__, +"_path_isdir($module, /, path)\n" +"--\n" +"\n" +"Return true if the pathname refers to an existing directory."); + +#define OS__PATH_ISDIR_METHODDEF \ + {"_path_isdir", _PyCFunction_CAST(os__path_isdir), METH_FASTCALL|METH_KEYWORDS, os__path_isdir__doc__}, + +static PyObject * +os__path_isdir_impl(PyObject *module, PyObject *path); + +static PyObject * +os__path_isdir(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 1 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(path), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"path", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .fname = "_path_isdir", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + PyObject *argsbuf[1]; + PyObject *path; + + args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); + if (!args) { + goto exit; + } + path = args[0]; + return_value = os__path_isdir_impl(module, path); + +exit: + return return_value; +} + +#endif /* defined(MS_WINDOWS) */ + +#if defined(MS_WINDOWS) + +PyDoc_STRVAR(os__path_isfile__doc__, +"_path_isfile($module, /, path)\n" +"--\n" +"\n" +"Test whether a path is a regular file"); + +#define OS__PATH_ISFILE_METHODDEF \ + {"_path_isfile", _PyCFunction_CAST(os__path_isfile), METH_FASTCALL|METH_KEYWORDS, os__path_isfile__doc__}, + +static PyObject * +os__path_isfile_impl(PyObject *module, PyObject *path); + +static PyObject * +os__path_isfile(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 1 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(path), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"path", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .fname = "_path_isfile", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + PyObject *argsbuf[1]; + PyObject *path; + + args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); + if (!args) { + goto exit; + } + path = args[0]; + return_value = os__path_isfile_impl(module, path); + +exit: + return return_value; +} + +#endif /* defined(MS_WINDOWS) */ + +#if defined(MS_WINDOWS) + +PyDoc_STRVAR(os__path_exists__doc__, +"_path_exists($module, /, path)\n" +"--\n" +"\n" +"Test whether a path exists. Returns False for broken symbolic links"); + +#define OS__PATH_EXISTS_METHODDEF \ + {"_path_exists", _PyCFunction_CAST(os__path_exists), METH_FASTCALL|METH_KEYWORDS, os__path_exists__doc__}, + +static PyObject * +os__path_exists_impl(PyObject *module, PyObject *path); + +static PyObject * +os__path_exists(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 1 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(path), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"path", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .fname = "_path_exists", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + PyObject *argsbuf[1]; + PyObject *path; + + args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); + if (!args) { + goto exit; + } + path = args[0]; + return_value = os__path_exists_impl(module, path); + +exit: + return return_value; +} + +#endif /* defined(MS_WINDOWS) */ + +#if defined(MS_WINDOWS) + +PyDoc_STRVAR(os__path_islink__doc__, +"_path_islink($module, /, path)\n" +"--\n" +"\n" +"Test whether a path is a symbolic link"); + +#define OS__PATH_ISLINK_METHODDEF \ + {"_path_islink", _PyCFunction_CAST(os__path_islink), METH_FASTCALL|METH_KEYWORDS, os__path_islink__doc__}, + +static PyObject * +os__path_islink_impl(PyObject *module, PyObject *path); + +static PyObject * +os__path_islink(PyObject *module, PyObject *const *args, Py_ssize_t nargs, PyObject *kwnames) +{ + PyObject *return_value = NULL; + #if defined(Py_BUILD_CORE) && !defined(Py_BUILD_CORE_MODULE) + + #define NUM_KEYWORDS 1 + static struct { + PyGC_Head _this_is_not_used; + PyObject_VAR_HEAD + PyObject *ob_item[NUM_KEYWORDS]; + } _kwtuple = { + .ob_base = PyVarObject_HEAD_INIT(&PyTuple_Type, NUM_KEYWORDS) + .ob_item = { &_Py_ID(path), }, + }; + #undef NUM_KEYWORDS + #define KWTUPLE (&_kwtuple.ob_base.ob_base) + + #else // !Py_BUILD_CORE + # define KWTUPLE NULL + #endif // !Py_BUILD_CORE + + static const char * const _keywords[] = {"path", NULL}; + static _PyArg_Parser _parser = { + .keywords = _keywords, + .fname = "_path_islink", + .kwtuple = KWTUPLE, + }; + #undef KWTUPLE + PyObject *argsbuf[1]; + PyObject *path; + + args = _PyArg_UnpackKeywords(args, nargs, NULL, kwnames, &_parser, 1, 1, 0, argsbuf); + if (!args) { + goto exit; + } + path = args[0]; + return_value = os__path_islink_impl(module, path); + +exit: + return return_value; +} + +#endif /* defined(MS_WINDOWS) */ + PyDoc_STRVAR(os__path_normpath__doc__, "_path_normpath($module, /, path)\n" "--\n" @@ -11041,6 +11277,22 @@ os_waitstatus_to_exitcode(PyObject *module, PyObject *const *args, Py_ssize_t na #define OS__PATH_SPLITROOT_METHODDEF #endif /* !defined(OS__PATH_SPLITROOT_METHODDEF) */ +#ifndef OS__PATH_ISDIR_METHODDEF + #define OS__PATH_ISDIR_METHODDEF +#endif /* !defined(OS__PATH_ISDIR_METHODDEF) */ + +#ifndef OS__PATH_ISFILE_METHODDEF + #define OS__PATH_ISFILE_METHODDEF +#endif /* !defined(OS__PATH_ISFILE_METHODDEF) */ + +#ifndef OS__PATH_EXISTS_METHODDEF + #define OS__PATH_EXISTS_METHODDEF +#endif /* !defined(OS__PATH_EXISTS_METHODDEF) */ + +#ifndef OS__PATH_ISLINK_METHODDEF + #define OS__PATH_ISLINK_METHODDEF +#endif /* !defined(OS__PATH_ISLINK_METHODDEF) */ + #ifndef OS_NICE_METHODDEF #define OS_NICE_METHODDEF #endif /* !defined(OS_NICE_METHODDEF) */ @@ -11560,4 +11812,4 @@ os_waitstatus_to_exitcode(PyObject *module, PyObject *const *args, Py_ssize_t na #ifndef OS_WAITSTATUS_TO_EXITCODE_METHODDEF #define OS_WAITSTATUS_TO_EXITCODE_METHODDEF #endif /* !defined(OS_WAITSTATUS_TO_EXITCODE_METHODDEF) */ -/*[clinic end generated code: output=41eab6c3523792a9 input=a9049054013a1b77]*/ +/*[clinic end generated code: output=a3f76228b549e8ec input=a9049054013a1b77]*/ diff --git a/Modules/cmathmodule.c b/Modules/cmathmodule.c index 62caba031eda27..2038ac26e65857 100644 --- a/Modules/cmathmodule.c +++ b/Modules/cmathmodule.c @@ -952,24 +952,23 @@ cmath_tanh_impl(PyObject *module, Py_complex z) cmath.log z as x: Py_complex - base as y_obj: object = None + base as y_obj: object = NULL / log(z[, base]) -> the logarithm of z to the given base. -If the base is not specified or is None, returns the -natural logarithm (base e) of z. +If the base not specified, returns the natural logarithm (base e) of z. [clinic start generated code]*/ static PyObject * cmath_log_impl(PyObject *module, Py_complex x, PyObject *y_obj) -/*[clinic end generated code: output=4effdb7d258e0d94 input=e7db51859ebf70bf]*/ +/*[clinic end generated code: output=4effdb7d258e0d94 input=230ed3a71ecd000a]*/ { Py_complex y; errno = 0; x = c_log(x); - if (y_obj != Py_None) { + if (y_obj != NULL) { y = PyComplex_AsCComplex(y_obj); if (PyErr_Occurred()) { return NULL; diff --git a/Modules/gcmodule.c b/Modules/gcmodule.c index 6630faa6f4471d..5879c5e11fe14a 100644 --- a/Modules/gcmodule.c +++ b/Modules/gcmodule.c @@ -2082,11 +2082,10 @@ PyGC_Collect(void) n = 0; } else { - PyObject *exc, *value, *tb; gcstate->collecting = 1; - _PyErr_Fetch(tstate, &exc, &value, &tb); + PyObject *exc = _PyErr_GetRaisedException(tstate); n = gc_collect_with_callback(tstate, NUM_GENERATIONS - 1); - _PyErr_Restore(tstate, exc, value, tb); + _PyErr_SetRaisedException(tstate, exc); gcstate->collecting = 0; } diff --git a/Modules/getpath.py b/Modules/getpath.py index 6a0883878778a5..9913fcba497d30 100644 --- a/Modules/getpath.py +++ b/Modules/getpath.py @@ -582,7 +582,7 @@ def search_up(prefix, *landmarks, test=isfile): # Detect prefix by searching from our executable location for the stdlib_dir if STDLIB_SUBDIR and STDLIB_LANDMARKS and executable_dir and not prefix: prefix = search_up(executable_dir, *STDLIB_LANDMARKS) - if prefix: + if prefix and not stdlib_dir: stdlib_dir = joinpath(prefix, STDLIB_SUBDIR) if PREFIX and not prefix: @@ -631,20 +631,6 @@ def search_up(prefix, *landmarks, test=isfile): warn('Consider setting $PYTHONHOME to [:]') -# If we haven't set [plat]stdlib_dir already, set them now -if not stdlib_dir: - if prefix: - stdlib_dir = joinpath(prefix, STDLIB_SUBDIR) - else: - stdlib_dir = '' - -if not platstdlib_dir: - if exec_prefix: - platstdlib_dir = joinpath(exec_prefix, PLATSTDLIB_LANDMARK) - else: - platstdlib_dir = '' - - # For a venv, update the main prefix/exec_prefix but leave the base ones unchanged # XXX: We currently do not update prefix here, but it happens in site.py #if venv_prefix: @@ -706,8 +692,9 @@ def search_up(prefix, *landmarks, test=isfile): pythonpath.extend(v.split(DELIM)) i += 1 # Paths from the core key get appended last, but only - # when home was not set and we aren't in a build dir - if not home_was_set and not venv_prefix and not build_prefix: + # when home was not set and we haven't found our stdlib + # some other way. + if not home and not stdlib_dir: v = winreg.QueryValue(key, None) if isinstance(v, str): pythonpath.extend(v.split(DELIM)) @@ -722,6 +709,11 @@ def search_up(prefix, *landmarks, test=isfile): pythonpath.append(joinpath(prefix, p)) # Then add stdlib_dir and platstdlib_dir + if not stdlib_dir and prefix: + stdlib_dir = joinpath(prefix, STDLIB_SUBDIR) + if not platstdlib_dir and exec_prefix: + platstdlib_dir = joinpath(exec_prefix, PLATSTDLIB_LANDMARK) + if os_name == 'nt': # QUIRK: Windows generates paths differently if platstdlib_dir: @@ -792,5 +784,6 @@ def search_up(prefix, *landmarks, test=isfile): config['base_exec_prefix'] = base_exec_prefix or exec_prefix config['platlibdir'] = platlibdir -config['stdlib_dir'] = stdlib_dir -config['platstdlib_dir'] = platstdlib_dir +# test_embed expects empty strings, not None +config['stdlib_dir'] = stdlib_dir or '' +config['platstdlib_dir'] = platstdlib_dir or '' diff --git a/Modules/itertoolsmodule.c b/Modules/itertoolsmodule.c index a2ee48228b5847..ce8720d0fd9228 100644 --- a/Modules/itertoolsmodule.c +++ b/Modules/itertoolsmodule.c @@ -12,8 +12,22 @@ */ typedef struct { + PyTypeObject *accumulate_type; + PyTypeObject *combinations_type; + PyTypeObject *compress_type; + PyTypeObject *count_type; + PyTypeObject *cwr_type; + PyTypeObject *cycle_type; + PyTypeObject *dropwhile_type; + PyTypeObject *filterfalse_type; PyTypeObject *groupby_type; PyTypeObject *_grouper_type; + PyTypeObject *pairwise_type; + PyTypeObject *permutations_type; + PyTypeObject *product_type; + PyTypeObject *starmap_type; + PyTypeObject *takewhile_type; + PyTypeObject *ziplongest_type; } itertools_state; static inline itertools_state * @@ -41,7 +55,6 @@ find_state_by_type(PyTypeObject *tp) assert(mod != NULL); return get_module_state(mod); } -#define clinic_state() (find_state_by_type(type)) /*[clinic input] module itertools @@ -50,38 +63,27 @@ class itertools._grouper "_grouperobject *" "clinic_state()->_grouper_type" class itertools.teedataobject "teedataobject *" "&teedataobject_type" class itertools._tee "teeobject *" "&tee_type" class itertools.batched "batchedobject *" "&batched_type" -class itertools.cycle "cycleobject *" "&cycle_type" -class itertools.dropwhile "dropwhileobject *" "&dropwhile_type" -class itertools.takewhile "takewhileobject *" "&takewhile_type" -class itertools.starmap "starmapobject *" "&starmap_type" +class itertools.cycle "cycleobject *" "clinic_state()->cycle_type" +class itertools.dropwhile "dropwhileobject *" "clinic_state()->dropwhile_type" +class itertools.takewhile "takewhileobject *" "clinic_state()->takewhile_type" +class itertools.starmap "starmapobject *" "clinic_state()->starmap_type" class itertools.chain "chainobject *" "&chain_type" -class itertools.combinations "combinationsobject *" "&combinations_type" -class itertools.combinations_with_replacement "cwr_object *" "&cwr_type" -class itertools.permutations "permutationsobject *" "&permutations_type" -class itertools.accumulate "accumulateobject *" "&accumulate_type" -class itertools.compress "compressobject *" "&compress_type" -class itertools.filterfalse "filterfalseobject *" "&filterfalse_type" -class itertools.count "countobject *" "&count_type" -class itertools.pairwise "pairwiseobject *" "&pairwise_type" +class itertools.combinations "combinationsobject *" "clinic_state()->combinations_type" +class itertools.combinations_with_replacement "cwr_object *" "clinic_state()->cwr_type" +class itertools.permutations "permutationsobject *" "clinic_state()->permutations_type" +class itertools.accumulate "accumulateobject *" "clinic_state()->accumulate_type" +class itertools.compress "compressobject *" "clinic_state()->compress_type" +class itertools.filterfalse "filterfalseobject *" "clinic_state()->filterfalse_type" +class itertools.count "countobject *" "clinic_state()->count_type" +class itertools.pairwise "pairwiseobject *" "clinic_state()->pairwise_type" [clinic start generated code]*/ -/*[clinic end generated code: output=da39a3ee5e6b4b0d input=424108522584b55b]*/ +/*[clinic end generated code: output=da39a3ee5e6b4b0d input=28ffff5c0c93eed7]*/ static PyTypeObject teedataobject_type; static PyTypeObject tee_type; static PyTypeObject batched_type; -static PyTypeObject cycle_type; -static PyTypeObject dropwhile_type; -static PyTypeObject takewhile_type; -static PyTypeObject starmap_type; -static PyTypeObject combinations_type; -static PyTypeObject cwr_type; -static PyTypeObject permutations_type; -static PyTypeObject accumulate_type; -static PyTypeObject compress_type; -static PyTypeObject filterfalse_type; -static PyTypeObject count_type; -static PyTypeObject pairwise_type; +#define clinic_state() (find_state_by_type(type)) #define clinic_state_by_cls() (get_module_state_by_cls(base_tp)) #include "clinic/itertoolsmodule.c.h" #undef clinic_state_by_cls @@ -308,15 +310,18 @@ pairwise_new_impl(PyTypeObject *type, PyObject *iterable) static void pairwise_dealloc(pairwiseobject *po) { + PyTypeObject *tp = Py_TYPE(po); PyObject_GC_UnTrack(po); Py_XDECREF(po->it); Py_XDECREF(po->old); - Py_TYPE(po)->tp_free(po); + tp->tp_free(po); + Py_DECREF(tp); } static int pairwise_traverse(pairwiseobject *po, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(po)); Py_VISIT(po->it); Py_VISIT(po->old); return 0; @@ -351,48 +356,25 @@ pairwise_next(pairwiseobject *po) return result; } -static PyTypeObject pairwise_type = { - PyVarObject_HEAD_INIT(&PyType_Type, 0) - "itertools.pairwise", /* tp_name */ - sizeof(pairwiseobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)pairwise_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - pairwise_new__doc__, /* tp_doc */ - (traverseproc)pairwise_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)pairwise_next, /* tp_iternext */ - 0, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - PyType_GenericAlloc, /* tp_alloc */ - pairwise_new, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot pairwise_slots[] = { + {Py_tp_dealloc, pairwise_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)pairwise_new__doc__}, + {Py_tp_traverse, pairwise_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, pairwise_next}, + {Py_tp_alloc, PyType_GenericAlloc}, + {Py_tp_new, pairwise_new}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec pairwise_spec = { + .name = "itertools.pairwise", + .basicsize = sizeof(pairwiseobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = pairwise_slots, }; @@ -1286,15 +1268,18 @@ itertools_cycle_impl(PyTypeObject *type, PyObject *iterable) static void cycle_dealloc(cycleobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->it); Py_XDECREF(lz->saved); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int cycle_traverse(cycleobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->it); Py_VISIT(lz->saved); return 0; @@ -1381,48 +1366,25 @@ static PyMethodDef cycle_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject cycle_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.cycle", /* tp_name */ - sizeof(cycleobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)cycle_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_cycle__doc__, /* tp_doc */ - (traverseproc)cycle_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)cycle_next, /* tp_iternext */ - cycle_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_cycle, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot cycle_slots[] = { + {Py_tp_dealloc, cycle_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_cycle__doc__}, + {Py_tp_traverse, cycle_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, cycle_next}, + {Py_tp_methods, cycle_methods}, + {Py_tp_new, itertools_cycle}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec cycle_spec = { + .name = "itertools.cycle", + .basicsize = sizeof(cycleobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = cycle_slots, }; @@ -1474,15 +1436,18 @@ itertools_dropwhile_impl(PyTypeObject *type, PyObject *func, PyObject *seq) static void dropwhile_dealloc(dropwhileobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->func); Py_XDECREF(lz->it); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int dropwhile_traverse(dropwhileobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->it); Py_VISIT(lz->func); return 0; @@ -1545,48 +1510,25 @@ static PyMethodDef dropwhile_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject dropwhile_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.dropwhile", /* tp_name */ - sizeof(dropwhileobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)dropwhile_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_dropwhile__doc__, /* tp_doc */ - (traverseproc)dropwhile_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)dropwhile_next, /* tp_iternext */ - dropwhile_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_dropwhile, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot dropwhile_slots[] = { + {Py_tp_dealloc, dropwhile_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_dropwhile__doc__}, + {Py_tp_traverse, dropwhile_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, dropwhile_next}, + {Py_tp_methods, dropwhile_methods}, + {Py_tp_new, itertools_dropwhile}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec dropwhile_spec = { + .name = "itertools.dropwhile", + .basicsize = sizeof(dropwhileobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = dropwhile_slots, }; @@ -1636,15 +1578,18 @@ itertools_takewhile_impl(PyTypeObject *type, PyObject *func, PyObject *seq) static void takewhile_dealloc(takewhileobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->func); Py_XDECREF(lz->it); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int takewhile_traverse(takewhileobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->it); Py_VISIT(lz->func); return 0; @@ -1704,48 +1649,25 @@ static PyMethodDef takewhile_reduce_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject takewhile_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.takewhile", /* tp_name */ - sizeof(takewhileobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)takewhile_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_takewhile__doc__, /* tp_doc */ - (traverseproc)takewhile_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)takewhile_next, /* tp_iternext */ - takewhile_reduce_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_takewhile, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot takewhile_slots[] = { + {Py_tp_dealloc, takewhile_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_takewhile__doc__}, + {Py_tp_traverse, takewhile_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, takewhile_next}, + {Py_tp_methods, takewhile_reduce_methods}, + {Py_tp_new, itertools_takewhile}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec takewhile_spec = { + .name = "itertools.takewhile", + .basicsize = sizeof(takewhileobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = takewhile_slots, }; @@ -2052,15 +1974,18 @@ itertools_starmap_impl(PyTypeObject *type, PyObject *func, PyObject *seq) static void starmap_dealloc(starmapobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->func); Py_XDECREF(lz->it); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int starmap_traverse(starmapobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->it); Py_VISIT(lz->func); return 0; @@ -2101,48 +2026,25 @@ static PyMethodDef starmap_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject starmap_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.starmap", /* tp_name */ - sizeof(starmapobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)starmap_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_starmap__doc__, /* tp_doc */ - (traverseproc)starmap_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)starmap_next, /* tp_iternext */ - starmap_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_starmap, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot starmap_slots[] = { + {Py_tp_dealloc, starmap_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_starmap__doc__}, + {Py_tp_traverse, starmap_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, starmap_next}, + {Py_tp_methods, starmap_methods}, + {Py_tp_new, itertools_starmap}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec starmap_spec = { + .name = "itertools.starmap", + .basicsize = sizeof(starmapobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = starmap_slots, }; @@ -2380,8 +2282,6 @@ typedef struct { int stopped; /* set to 1 when the iterator is exhausted */ } productobject; -static PyTypeObject product_type; - static PyObject * product_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { @@ -2468,12 +2368,14 @@ product_new(PyTypeObject *type, PyObject *args, PyObject *kwds) static void product_dealloc(productobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->pools); Py_XDECREF(lz->result); if (lz->indices != NULL) PyMem_Free(lz->indices); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static PyObject * @@ -2489,6 +2391,7 @@ PyDoc_STRVAR(sizeof_doc, "Returns size in memory, in bytes."); static int product_traverse(productobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->pools); Py_VISIT(lz->result); return 0; @@ -2680,51 +2583,28 @@ product(A, repeat=4) means the same as product(A, A, A, A).\n\n\ product('ab', range(3)) --> ('a',0) ('a',1) ('a',2) ('b',0) ('b',1) ('b',2)\n\ product((0,1), (0,1), (0,1)) --> (0,0,0) (0,0,1) (0,1,0) (0,1,1) (1,0,0) ..."); -static PyTypeObject product_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.product", /* tp_name */ - sizeof(productobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)product_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - product_doc, /* tp_doc */ - (traverseproc)product_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)product_next, /* tp_iternext */ - product_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - product_new, /* tp_new */ - PyObject_GC_Del, /* tp_free */ -}; - - +static PyType_Slot product_slots[] = { + {Py_tp_dealloc, product_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)product_doc}, + {Py_tp_traverse, product_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, product_next}, + {Py_tp_methods, product_methods}, + {Py_tp_new, product_new}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec product_spec = { + .name = "itertools.product", + .basicsize = sizeof(productobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = product_slots, +}; + + /* combinations object *******************************************************/ typedef struct { @@ -2799,12 +2679,14 @@ itertools_combinations_impl(PyTypeObject *type, PyObject *iterable, static void combinations_dealloc(combinationsobject *co) { + PyTypeObject *tp = Py_TYPE(co); PyObject_GC_UnTrack(co); Py_XDECREF(co->pool); Py_XDECREF(co->result); if (co->indices != NULL) PyMem_Free(co->indices); - Py_TYPE(co)->tp_free(co); + tp->tp_free(co); + Py_DECREF(tp); } static PyObject * @@ -2818,6 +2700,7 @@ combinations_sizeof(combinationsobject *co, void *unused) static int combinations_traverse(combinationsobject *co, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(co)); Py_VISIT(co->pool); Py_VISIT(co->result); return 0; @@ -2988,48 +2871,25 @@ static PyMethodDef combinations_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject combinations_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.combinations", /* tp_name */ - sizeof(combinationsobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)combinations_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_combinations__doc__, /* tp_doc */ - (traverseproc)combinations_traverse,/* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)combinations_next, /* tp_iternext */ - combinations_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_combinations, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot combinations_slots[] = { + {Py_tp_dealloc, combinations_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_combinations__doc__}, + {Py_tp_traverse, combinations_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, combinations_next}, + {Py_tp_methods, combinations_methods}, + {Py_tp_new, itertools_combinations}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec combinations_spec = { + .name = "itertools.combinations", + .basicsize = sizeof(combinationsobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = combinations_slots, }; @@ -3133,12 +2993,14 @@ itertools_combinations_with_replacement_impl(PyTypeObject *type, static void cwr_dealloc(cwrobject *co) { + PyTypeObject *tp = Py_TYPE(co); PyObject_GC_UnTrack(co); Py_XDECREF(co->pool); Py_XDECREF(co->result); if (co->indices != NULL) PyMem_Free(co->indices); - Py_TYPE(co)->tp_free(co); + tp->tp_free(co); + Py_DECREF(tp); } static PyObject * @@ -3152,6 +3014,7 @@ cwr_sizeof(cwrobject *co, void *unused) static int cwr_traverse(cwrobject *co, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(co)); Py_VISIT(co->pool); Py_VISIT(co->result); return 0; @@ -3312,48 +3175,25 @@ static PyMethodDef cwr_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject cwr_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.combinations_with_replacement", /* tp_name */ - sizeof(cwrobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)cwr_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_combinations_with_replacement__doc__, /* tp_doc */ - (traverseproc)cwr_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)cwr_next, /* tp_iternext */ - cwr_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_combinations_with_replacement, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot cwr_slots[] = { + {Py_tp_dealloc, cwr_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_combinations_with_replacement__doc__}, + {Py_tp_traverse, cwr_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, cwr_next}, + {Py_tp_methods, cwr_methods}, + {Py_tp_new, itertools_combinations_with_replacement}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec cwr_spec = { + .name = "itertools.combinations_with_replacement", + .basicsize = sizeof(cwrobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = cwr_slots, }; @@ -3476,12 +3316,14 @@ itertools_permutations_impl(PyTypeObject *type, PyObject *iterable, static void permutations_dealloc(permutationsobject *po) { + PyTypeObject *tp = Py_TYPE(po); PyObject_GC_UnTrack(po); Py_XDECREF(po->pool); Py_XDECREF(po->result); PyMem_Free(po->indices); PyMem_Free(po->cycles); - Py_TYPE(po)->tp_free(po); + tp->tp_free(po); + Py_DECREF(tp); } static PyObject * @@ -3496,6 +3338,7 @@ permutations_sizeof(permutationsobject *po, void *unused) static int permutations_traverse(permutationsobject *po, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(po)); Py_VISIT(po->pool); Py_VISIT(po->result); return 0; @@ -3701,48 +3544,25 @@ static PyMethodDef permuations_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject permutations_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.permutations", /* tp_name */ - sizeof(permutationsobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)permutations_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_permutations__doc__, /* tp_doc */ - (traverseproc)permutations_traverse,/* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)permutations_next, /* tp_iternext */ - permuations_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_permutations, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot permutations_slots[] = { + {Py_tp_dealloc, permutations_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_permutations__doc__}, + {Py_tp_traverse, permutations_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, permutations_next}, + {Py_tp_methods, permuations_methods}, + {Py_tp_new, itertools_permutations}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec permutations_spec = { + .name = "itertools.permutations", + .basicsize = sizeof(permutationsobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = permutations_slots, }; @@ -3798,17 +3618,20 @@ itertools_accumulate_impl(PyTypeObject *type, PyObject *iterable, static void accumulate_dealloc(accumulateobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->binop); Py_XDECREF(lz->total); Py_XDECREF(lz->it); Py_XDECREF(lz->initial); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int accumulate_traverse(accumulateobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->binop); Py_VISIT(lz->it); Py_VISIT(lz->total); @@ -3902,48 +3725,25 @@ static PyMethodDef accumulate_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject accumulate_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.accumulate", /* tp_name */ - sizeof(accumulateobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)accumulate_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_accumulate__doc__, /* tp_doc */ - (traverseproc)accumulate_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)accumulate_next, /* tp_iternext */ - accumulate_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_accumulate, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot accumulate_slots[] = { + {Py_tp_dealloc, accumulate_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_accumulate__doc__}, + {Py_tp_traverse, accumulate_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, accumulate_next}, + {Py_tp_methods, accumulate_methods}, + {Py_tp_new, itertools_accumulate}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec accumulate_spec = { + .name = "itertools.accumulate", + .basicsize = sizeof(accumulateobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = accumulate_slots, }; @@ -4004,15 +3804,18 @@ itertools_compress_impl(PyTypeObject *type, PyObject *seq1, PyObject *seq2) static void compress_dealloc(compressobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->data); Py_XDECREF(lz->selectors); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int compress_traverse(compressobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->data); Py_VISIT(lz->selectors); return 0; @@ -4067,48 +3870,25 @@ static PyMethodDef compress_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject compress_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.compress", /* tp_name */ - sizeof(compressobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)compress_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_compress__doc__, /* tp_doc */ - (traverseproc)compress_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)compress_next, /* tp_iternext */ - compress_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_compress, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot compress_slots[] = { + {Py_tp_dealloc, compress_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_compress__doc__}, + {Py_tp_traverse, compress_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, compress_next}, + {Py_tp_methods, compress_methods}, + {Py_tp_new, itertools_compress}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec compress_spec = { + .name = "itertools.compress", + .basicsize = sizeof(compressobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = compress_slots, }; @@ -4158,15 +3938,18 @@ itertools_filterfalse_impl(PyTypeObject *type, PyObject *func, PyObject *seq) static void filterfalse_dealloc(filterfalseobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->func); Py_XDECREF(lz->it); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int filterfalse_traverse(filterfalseobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->it); Py_VISIT(lz->func); return 0; @@ -4218,48 +4001,25 @@ static PyMethodDef filterfalse_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject filterfalse_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.filterfalse", /* tp_name */ - sizeof(filterfalseobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)filterfalse_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_filterfalse__doc__, /* tp_doc */ - (traverseproc)filterfalse_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)filterfalse_next, /* tp_iternext */ - filterfalse_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_filterfalse, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot filterfalse_slots[] = { + {Py_tp_dealloc, filterfalse_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_filterfalse__doc__}, + {Py_tp_traverse, filterfalse_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, filterfalse_next}, + {Py_tp_methods, filterfalse_methods}, + {Py_tp_new, itertools_filterfalse}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec filterfalse_spec = { + .name = "itertools.filterfalse", + .basicsize = sizeof(filterfalseobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = filterfalse_slots, }; @@ -4385,15 +4145,18 @@ itertools_count_impl(PyTypeObject *type, PyObject *long_cnt, static void count_dealloc(countobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->long_cnt); Py_XDECREF(lz->long_step); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int count_traverse(countobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->long_cnt); Py_VISIT(lz->long_step); return 0; @@ -4467,48 +4230,26 @@ static PyMethodDef count_methods[] = { {NULL, NULL} /* sentinel */ }; -static PyTypeObject count_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.count", /* tp_name */ - sizeof(countobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)count_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - (reprfunc)count_repr, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - itertools_count__doc__, /* tp_doc */ - (traverseproc)count_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)count_next, /* tp_iternext */ - count_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - itertools_count, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot count_slots[] = { + {Py_tp_dealloc, count_dealloc}, + {Py_tp_repr, count_repr}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)itertools_count__doc__}, + {Py_tp_traverse, count_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, count_next}, + {Py_tp_methods, count_methods}, + {Py_tp_new, itertools_count}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec count_spec = { + .name = "itertools.count", + .basicsize = sizeof(countobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = count_slots, }; @@ -4676,8 +4417,6 @@ typedef struct { PyObject *fillvalue; } ziplongestobject; -static PyTypeObject ziplongest_type; - static PyObject * zip_longest_new(PyTypeObject *type, PyObject *args, PyObject *kwds) { @@ -4749,16 +4488,19 @@ zip_longest_new(PyTypeObject *type, PyObject *args, PyObject *kwds) static void zip_longest_dealloc(ziplongestobject *lz) { + PyTypeObject *tp = Py_TYPE(lz); PyObject_GC_UnTrack(lz); Py_XDECREF(lz->ittuple); Py_XDECREF(lz->result); Py_XDECREF(lz->fillvalue); - Py_TYPE(lz)->tp_free(lz); + tp->tp_free(lz); + Py_DECREF(tp); } static int zip_longest_traverse(ziplongestobject *lz, visitproc visit, void *arg) { + Py_VISIT(Py_TYPE(lz)); Py_VISIT(lz->ittuple); Py_VISIT(lz->result); Py_VISIT(lz->fillvalue); @@ -4892,48 +4634,25 @@ are exhausted, the fillvalue is substituted in their place. The fillvalue\n\ defaults to None or can be specified by a keyword argument.\n\ "); -static PyTypeObject ziplongest_type = { - PyVarObject_HEAD_INIT(NULL, 0) - "itertools.zip_longest", /* tp_name */ - sizeof(ziplongestobject), /* tp_basicsize */ - 0, /* tp_itemsize */ - /* methods */ - (destructor)zip_longest_dealloc, /* tp_dealloc */ - 0, /* tp_vectorcall_offset */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_as_async */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - PyObject_GenericGetAttr, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | - Py_TPFLAGS_BASETYPE, /* tp_flags */ - zip_longest_doc, /* tp_doc */ - (traverseproc)zip_longest_traverse, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - PyObject_SelfIter, /* tp_iter */ - (iternextfunc)zip_longest_next, /* tp_iternext */ - zip_longest_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - 0, /* tp_init */ - 0, /* tp_alloc */ - zip_longest_new, /* tp_new */ - PyObject_GC_Del, /* tp_free */ +static PyType_Slot ziplongest_slots[] = { + {Py_tp_dealloc, zip_longest_dealloc}, + {Py_tp_getattro, PyObject_GenericGetAttr}, + {Py_tp_doc, (void *)zip_longest_doc}, + {Py_tp_traverse, zip_longest_traverse}, + {Py_tp_iter, PyObject_SelfIter}, + {Py_tp_iternext, zip_longest_next}, + {Py_tp_methods, zip_longest_methods}, + {Py_tp_new, zip_longest_new}, + {Py_tp_free, PyObject_GC_Del}, + {0, NULL}, +}; + +static PyType_Spec ziplongest_spec = { + .name = "itertools.zip_longest", + .basicsize = sizeof(ziplongestobject), + .flags = (Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_IMMUTABLETYPE), + .slots = ziplongest_slots, }; @@ -4975,8 +4694,22 @@ static int itertoolsmodule_traverse(PyObject *mod, visitproc visit, void *arg) { itertools_state *state = get_module_state(mod); + Py_VISIT(state->accumulate_type); + Py_VISIT(state->combinations_type); + Py_VISIT(state->compress_type); + Py_VISIT(state->count_type); + Py_VISIT(state->cwr_type); + Py_VISIT(state->cycle_type); + Py_VISIT(state->dropwhile_type); + Py_VISIT(state->filterfalse_type); Py_VISIT(state->groupby_type); Py_VISIT(state->_grouper_type); + Py_VISIT(state->pairwise_type); + Py_VISIT(state->permutations_type); + Py_VISIT(state->product_type); + Py_VISIT(state->starmap_type); + Py_VISIT(state->takewhile_type); + Py_VISIT(state->ziplongest_type); return 0; } @@ -4984,8 +4717,22 @@ static int itertoolsmodule_clear(PyObject *mod) { itertools_state *state = get_module_state(mod); + Py_CLEAR(state->accumulate_type); + Py_CLEAR(state->combinations_type); + Py_CLEAR(state->compress_type); + Py_CLEAR(state->count_type); + Py_CLEAR(state->cwr_type); + Py_CLEAR(state->cycle_type); + Py_CLEAR(state->dropwhile_type); + Py_CLEAR(state->filterfalse_type); Py_CLEAR(state->groupby_type); Py_CLEAR(state->_grouper_type); + Py_CLEAR(state->pairwise_type); + Py_CLEAR(state->permutations_type); + Py_CLEAR(state->product_type); + Py_CLEAR(state->starmap_type); + Py_CLEAR(state->takewhile_type); + Py_CLEAR(state->ziplongest_type); return 0; } @@ -5010,27 +4757,27 @@ static int itertoolsmodule_exec(PyObject *mod) { itertools_state *state = get_module_state(mod); + ADD_TYPE(mod, state->accumulate_type, &accumulate_spec); + ADD_TYPE(mod, state->combinations_type, &combinations_spec); + ADD_TYPE(mod, state->compress_type, &compress_spec); + ADD_TYPE(mod, state->count_type, &count_spec); + ADD_TYPE(mod, state->cwr_type, &cwr_spec); + ADD_TYPE(mod, state->cycle_type, &cycle_spec); + ADD_TYPE(mod, state->dropwhile_type, &dropwhile_spec); + ADD_TYPE(mod, state->filterfalse_type, &filterfalse_spec); ADD_TYPE(mod, state->groupby_type, &groupby_spec); ADD_TYPE(mod, state->_grouper_type, &_grouper_spec); + ADD_TYPE(mod, state->pairwise_type, &pairwise_spec); + ADD_TYPE(mod, state->permutations_type, &permutations_spec); + ADD_TYPE(mod, state->product_type, &product_spec); + ADD_TYPE(mod, state->starmap_type, &starmap_spec); + ADD_TYPE(mod, state->takewhile_type, &takewhile_spec); + ADD_TYPE(mod, state->ziplongest_type, &ziplongest_spec); PyTypeObject *typelist[] = { - &accumulate_type, &batched_type, - &combinations_type, - &cwr_type, - &cycle_type, - &dropwhile_type, - &takewhile_type, &islice_type, - &starmap_type, &chain_type, - &compress_type, - &filterfalse_type, - &count_type, - &ziplongest_type, - &pairwise_type, - &permutations_type, - &product_type, &repeat_type, &tee_type, &teedataobject_type diff --git a/Modules/mathmodule.c b/Modules/mathmodule.c index e6cdb3bae1ecff..544560e8322c72 100644 --- a/Modules/mathmodule.c +++ b/Modules/mathmodule.c @@ -101,10 +101,6 @@ get_math_module_state(PyObject *module) static const double pi = 3.141592653589793238462643383279502884197; static const double logpi = 1.144729885849400174143427351353058711647; -#if !defined(HAVE_ERF) || !defined(HAVE_ERFC) -static const double sqrtpi = 1.772453850905516027298167483341145182798; -#endif /* !defined(HAVE_ERF) || !defined(HAVE_ERFC) */ - /* Version of PyFloat_AsDouble() with in-line fast paths for exact floats and integers. Gives a substantial @@ -162,7 +158,9 @@ m_sinpi(double x) return copysign(1.0, x)*r; } -/* Implementation of the real gamma function. In extensive but non-exhaustive +/* Implementation of the real gamma function. Kept here to work around + issues (see e.g. gh-70309) with quality of libm's tgamma/lgamma implementations + on various platforms (Windows, MacOS). In extensive but non-exhaustive random tests, this function proved accurate to within <= 10 ulps across the entire float domain. Note that accuracy may depend on the quality of the system math functions, the pow function in particular. Special cases @@ -458,163 +456,6 @@ m_lgamma(double x) return r; } -#if !defined(HAVE_ERF) || !defined(HAVE_ERFC) - -/* - Implementations of the error function erf(x) and the complementary error - function erfc(x). - - Method: we use a series approximation for erf for small x, and a continued - fraction approximation for erfc(x) for larger x; - combined with the relations erf(-x) = -erf(x) and erfc(x) = 1.0 - erf(x), - this gives us erf(x) and erfc(x) for all x. - - The series expansion used is: - - erf(x) = x*exp(-x*x)/sqrt(pi) * [ - 2/1 + 4/3 x**2 + 8/15 x**4 + 16/105 x**6 + ...] - - The coefficient of x**(2k-2) here is 4**k*factorial(k)/factorial(2*k). - This series converges well for smallish x, but slowly for larger x. - - The continued fraction expansion used is: - - erfc(x) = x*exp(-x*x)/sqrt(pi) * [1/(0.5 + x**2 -) 0.5/(2.5 + x**2 - ) - 3.0/(4.5 + x**2 - ) 7.5/(6.5 + x**2 - ) ...] - - after the first term, the general term has the form: - - k*(k-0.5)/(2*k+0.5 + x**2 - ...). - - This expansion converges fast for larger x, but convergence becomes - infinitely slow as x approaches 0.0. The (somewhat naive) continued - fraction evaluation algorithm used below also risks overflow for large x; - but for large x, erfc(x) == 0.0 to within machine precision. (For - example, erfc(30.0) is approximately 2.56e-393). - - Parameters: use series expansion for abs(x) < ERF_SERIES_CUTOFF and - continued fraction expansion for ERF_SERIES_CUTOFF <= abs(x) < - ERFC_CONTFRAC_CUTOFF. ERFC_SERIES_TERMS and ERFC_CONTFRAC_TERMS are the - numbers of terms to use for the relevant expansions. */ - -#define ERF_SERIES_CUTOFF 1.5 -#define ERF_SERIES_TERMS 25 -#define ERFC_CONTFRAC_CUTOFF 30.0 -#define ERFC_CONTFRAC_TERMS 50 - -/* - Error function, via power series. - - Given a finite float x, return an approximation to erf(x). - Converges reasonably fast for small x. -*/ - -static double -m_erf_series(double x) -{ - double x2, acc, fk, result; - int i, saved_errno; - - x2 = x * x; - acc = 0.0; - fk = (double)ERF_SERIES_TERMS + 0.5; - for (i = 0; i < ERF_SERIES_TERMS; i++) { - acc = 2.0 + x2 * acc / fk; - fk -= 1.0; - } - /* Make sure the exp call doesn't affect errno; - see m_erfc_contfrac for more. */ - saved_errno = errno; - result = acc * x * exp(-x2) / sqrtpi; - errno = saved_errno; - return result; -} - -/* - Complementary error function, via continued fraction expansion. - - Given a positive float x, return an approximation to erfc(x). Converges - reasonably fast for x large (say, x > 2.0), and should be safe from - overflow if x and nterms are not too large. On an IEEE 754 machine, with x - <= 30.0, we're safe up to nterms = 100. For x >= 30.0, erfc(x) is smaller - than the smallest representable nonzero float. */ - -static double -m_erfc_contfrac(double x) -{ - double x2, a, da, p, p_last, q, q_last, b, result; - int i, saved_errno; - - if (x >= ERFC_CONTFRAC_CUTOFF) - return 0.0; - - x2 = x*x; - a = 0.0; - da = 0.5; - p = 1.0; p_last = 0.0; - q = da + x2; q_last = 1.0; - for (i = 0; i < ERFC_CONTFRAC_TERMS; i++) { - double temp; - a += da; - da += 2.0; - b = da + x2; - temp = p; p = b*p - a*p_last; p_last = temp; - temp = q; q = b*q - a*q_last; q_last = temp; - } - /* Issue #8986: On some platforms, exp sets errno on underflow to zero; - save the current errno value so that we can restore it later. */ - saved_errno = errno; - result = p / q * x * exp(-x2) / sqrtpi; - errno = saved_errno; - return result; -} - -#endif /* !defined(HAVE_ERF) || !defined(HAVE_ERFC) */ - -/* Error function erf(x), for general x */ - -static double -m_erf(double x) -{ -#ifdef HAVE_ERF - return erf(x); -#else - double absx, cf; - - if (Py_IS_NAN(x)) - return x; - absx = fabs(x); - if (absx < ERF_SERIES_CUTOFF) - return m_erf_series(x); - else { - cf = m_erfc_contfrac(absx); - return x > 0.0 ? 1.0 - cf : cf - 1.0; - } -#endif -} - -/* Complementary error function erfc(x), for general x. */ - -static double -m_erfc(double x) -{ -#ifdef HAVE_ERFC - return erfc(x); -#else - double absx, cf; - - if (Py_IS_NAN(x)) - return x; - absx = fabs(x); - if (absx < ERF_SERIES_CUTOFF) - return 1.0 - m_erf_series(x); - else { - cf = m_erfc_contfrac(absx); - return x > 0.0 ? cf : 2.0 - cf; - } -#endif -} - /* wrapper for atan2 that deals directly with special cases before delegating to the platform libm for the remaining cases. This @@ -801,25 +642,7 @@ m_log2(double x) } if (x > 0.0) { -#ifdef HAVE_LOG2 return log2(x); -#else - double m; - int e; - m = frexp(x, &e); - /* We want log2(m * 2**e) == log(m) / log(2) + e. Care is needed when - * x is just greater than 1.0: in that case e is 1, log(m) is negative, - * and we get significant cancellation error from the addition of - * log(m) / log(2) to e. The slight rewrite of the expression below - * avoids this problem. - */ - if (x >= 1.0) { - return log(2.0 * m) / log(2.0) + (e - 1); - } - else { - return log(m) / log(2.0) + e; - } -#endif } else if (x == 0.0) { errno = EDOM; @@ -1052,9 +875,7 @@ is_error(double x) */ static PyObject * -math_1_to_whatever(PyObject *arg, double (*func) (double), - PyObject *(*from_double_func) (double), - int can_overflow) +math_1(PyObject *arg, double (*func) (double), int can_overflow) { double x, r; x = PyFloat_AsDouble(arg); @@ -1080,7 +901,7 @@ math_1_to_whatever(PyObject *arg, double (*func) (double), /* this branch unnecessary on most platforms */ return NULL; - return (*from_double_func)(r); + return PyFloat_FromDouble(r); } /* variant of math_1, to be used when the function being wrapped is known to @@ -1128,12 +949,6 @@ math_1a(PyObject *arg, double (*func) (double)) OverflowError. */ -static PyObject * -math_1(PyObject *arg, double (*func) (double), int can_overflow) -{ - return math_1_to_whatever(arg, func, PyFloat_FromDouble, can_overflow); -} - static PyObject * math_2(PyObject *const *args, Py_ssize_t nargs, double (*func) (double, double), const char *funcname) @@ -1261,10 +1076,10 @@ FUNC1(cos, cos, 0, FUNC1(cosh, cosh, 1, "cosh($module, x, /)\n--\n\n" "Return the hyperbolic cosine of x.") -FUNC1A(erf, m_erf, +FUNC1A(erf, erf, "erf($module, x, /)\n--\n\n" "Error function at x.") -FUNC1A(erfc, m_erfc, +FUNC1A(erfc, erfc, "erfc($module, x, /)\n--\n\n" "Complementary error function at x.") FUNC1(exp, exp, 1, @@ -2366,24 +2181,26 @@ loghelper(PyObject* arg, double (*func)(double)) math.log x: object - base: object = None + [ + base: object(c_default="NULL") = math.e + ] / Return the logarithm of x to the given base. -If the base is not specified or is None, returns the natural -logarithm (base e) of x. +If the base not specified, returns the natural logarithm (base e) of x. [clinic start generated code]*/ static PyObject * -math_log_impl(PyObject *module, PyObject *x, PyObject *base) -/*[clinic end generated code: output=1dead263cbb1e854 input=ef032cc9837943e1]*/ +math_log_impl(PyObject *module, PyObject *x, int group_right_1, + PyObject *base) +/*[clinic end generated code: output=7b5a39e526b73fc9 input=0f62d5726cbfebbd]*/ { PyObject *num, *den; PyObject *ans; num = loghelper(x, m_log); - if (num == NULL || base == Py_None) + if (num == NULL || base == NULL) return num; den = loghelper(base, m_log); @@ -2851,6 +2668,8 @@ dl_sum(double a, double b) return (DoubleLength) {x, y}; } +#ifndef UNRELIABLE_FMA + static DoubleLength dl_mul(double x, double y) { @@ -2860,6 +2679,47 @@ dl_mul(double x, double y) return (DoubleLength) {z, zz}; } +#else + +/* + The default implementation of dl_mul() depends on the C math library + having an accurate fma() function as required by § 7.12.13.1 of the + C99 standard. + + The UNRELIABLE_FMA option is provided as a slower but accurate + alternative for builds where the fma() function is found wanting. + The speed penalty may be modest (17% slower on an Apple M1 Max), + so don't hesitate to enable this build option. + + The algorithms are from the T. J. Dekker paper: + A Floating-Point Technique for Extending the Available Precision + https://csclub.uwaterloo.ca/~pbarfuss/dekker1971.pdf +*/ + +static DoubleLength +dl_split(double x) { + // Dekker (5.5) and (5.6). + double t = x * 134217729.0; // Veltkamp constant = 2.0 ** 27 + 1 + double hi = t - (t - x); + double lo = x - hi; + return (DoubleLength) {hi, lo}; +} + +static DoubleLength +dl_mul(double x, double y) +{ + // Dekker (5.12) and mul12() + DoubleLength xx = dl_split(x); + DoubleLength yy = dl_split(y); + double p = xx.hi * yy.hi; + double q = xx.hi * yy.lo + xx.lo * yy.hi; + double z = p + q; + double zz = p - z + q + xx.lo * yy.lo; + return (DoubleLength) {z, zz}; +} + +#endif + typedef struct { double hi; double lo; double tiny; } TripleLength; static const TripleLength tl_zero = {0.0, 0.0, 0.0}; diff --git a/Modules/posixmodule.c b/Modules/posixmodule.c index b84fb0d280f4e3..cba6cea48b77e1 100644 --- a/Modules/posixmodule.c +++ b/Modules/posixmodule.c @@ -4490,6 +4490,311 @@ os__path_splitroot_impl(PyObject *module, path_t *path) } +/*[clinic input] +os._path_isdir + + path: 'O' + +Return true if the pathname refers to an existing directory. + +[clinic start generated code]*/ + +static PyObject * +os__path_isdir_impl(PyObject *module, PyObject *path) +/*[clinic end generated code: output=00faea0af309669d input=b1d2571cf7291aaf]*/ +{ + HANDLE hfile; + BOOL close_file = TRUE; + FILE_BASIC_INFO info; + path_t _path = PATH_T_INITIALIZE("isdir", "path", 0, 1); + int result; + + if (!path_converter(path, &_path)) { + path_cleanup(&_path); + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Clear(); + Py_RETURN_FALSE; + } + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + if (_path.fd != -1) { + hfile = _Py_get_osfhandle_noraise(_path.fd); + close_file = FALSE; + } + else { + hfile = CreateFileW(_path.wide, FILE_READ_ATTRIBUTES, 0, NULL, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); + } + if (hfile != INVALID_HANDLE_VALUE) { + if (GetFileInformationByHandleEx(hfile, FileBasicInfo, &info, + sizeof(info))) + { + result = info.FileAttributes & FILE_ATTRIBUTE_DIRECTORY; + } + else { + result = 0; + } + if (close_file) { + CloseHandle(hfile); + } + } + else { + STRUCT_STAT st; + switch (GetLastError()) { + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_CANT_ACCESS_FILE: + case ERROR_INVALID_PARAMETER: + if (STAT(_path.wide, &st)) { + result = 0; + } + else { + result = S_ISDIR(st.st_mode); + } + break; + default: + result = 0; + } + } + Py_END_ALLOW_THREADS + + path_cleanup(&_path); + if (result) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; +} + + +/*[clinic input] +os._path_isfile + + path: 'O' + +Test whether a path is a regular file + +[clinic start generated code]*/ + +static PyObject * +os__path_isfile_impl(PyObject *module, PyObject *path) +/*[clinic end generated code: output=2394ed7c4b5cfd85 input=de22d74960ade365]*/ +{ + HANDLE hfile; + BOOL close_file = TRUE; + FILE_BASIC_INFO info; + path_t _path = PATH_T_INITIALIZE("isfile", "path", 0, 1); + int result; + + if (!path_converter(path, &_path)) { + path_cleanup(&_path); + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Clear(); + Py_RETURN_FALSE; + } + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + if (_path.fd != -1) { + hfile = _Py_get_osfhandle_noraise(_path.fd); + close_file = FALSE; + } + else { + hfile = CreateFileW(_path.wide, FILE_READ_ATTRIBUTES, 0, NULL, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); + } + if (hfile != INVALID_HANDLE_VALUE) { + if (GetFileInformationByHandleEx(hfile, FileBasicInfo, &info, + sizeof(info))) + { + result = !(info.FileAttributes & FILE_ATTRIBUTE_DIRECTORY); + } + else { + result = 0; + } + if (close_file) { + CloseHandle(hfile); + } + } + else { + STRUCT_STAT st; + switch (GetLastError()) { + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_CANT_ACCESS_FILE: + case ERROR_INVALID_PARAMETER: + if (STAT(_path.wide, &st)) { + result = 0; + } + else { + result = S_ISREG(st.st_mode); + } + break; + default: + result = 0; + } + } + Py_END_ALLOW_THREADS + + path_cleanup(&_path); + if (result) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; +} + + +/*[clinic input] +os._path_exists + + path: 'O' + +Test whether a path exists. Returns False for broken symbolic links + +[clinic start generated code]*/ + +static PyObject * +os__path_exists_impl(PyObject *module, PyObject *path) +/*[clinic end generated code: output=f508c3b35e13a249 input=380f77cdfa0f7ae8]*/ +{ + HANDLE hfile; + BOOL close_file = TRUE; + path_t _path = PATH_T_INITIALIZE("exists", "path", 0, 1); + int result; + + if (!path_converter(path, &_path)) { + path_cleanup(&_path); + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Clear(); + Py_RETURN_FALSE; + } + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + if (_path.fd != -1) { + hfile = _Py_get_osfhandle_noraise(_path.fd); + close_file = FALSE; + } + else { + hfile = CreateFileW(_path.wide, FILE_READ_ATTRIBUTES, 0, NULL, + OPEN_EXISTING, FILE_FLAG_BACKUP_SEMANTICS, NULL); + } + if (hfile != INVALID_HANDLE_VALUE) { + result = 1; + if (close_file) { + CloseHandle(hfile); + } + } + else { + STRUCT_STAT st; + switch (GetLastError()) { + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_CANT_ACCESS_FILE: + case ERROR_INVALID_PARAMETER: + if (STAT(_path.wide, &st)) { + result = 0; + } + else { + result = 1; + } + break; + default: + result = 0; + } + } + Py_END_ALLOW_THREADS + + path_cleanup(&_path); + if (result) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; +} + + +/*[clinic input] +os._path_islink + + path: 'O' + +Test whether a path is a symbolic link + +[clinic start generated code]*/ + +static PyObject * +os__path_islink_impl(PyObject *module, PyObject *path) +/*[clinic end generated code: output=6d8640b1a390c054 input=38a3cb937ccf59bf]*/ +{ + HANDLE hfile; + BOOL close_file = TRUE; + FILE_ATTRIBUTE_TAG_INFO info; + path_t _path = PATH_T_INITIALIZE("islink", "path", 0, 1); + int result; + + if (!path_converter(path, &_path)) { + path_cleanup(&_path); + if (PyErr_ExceptionMatches(PyExc_ValueError)) { + PyErr_Clear(); + Py_RETURN_FALSE; + } + return NULL; + } + + Py_BEGIN_ALLOW_THREADS + if (_path.fd != -1) { + hfile = _Py_get_osfhandle_noraise(_path.fd); + close_file = FALSE; + } + else { + hfile = CreateFileW(_path.wide, FILE_READ_ATTRIBUTES, 0, NULL, + OPEN_EXISTING, + FILE_FLAG_OPEN_REPARSE_POINT | FILE_FLAG_BACKUP_SEMANTICS, + NULL); + } + if (hfile != INVALID_HANDLE_VALUE) { + if (GetFileInformationByHandleEx(hfile, FileAttributeTagInfo, &info, + sizeof(info))) + { + result = (info.ReparseTag == IO_REPARSE_TAG_SYMLINK); + } + else { + result = 0; + } + if (close_file) { + CloseHandle(hfile); + } + } + else { + STRUCT_STAT st; + switch (GetLastError()) { + case ERROR_ACCESS_DENIED: + case ERROR_SHARING_VIOLATION: + case ERROR_CANT_ACCESS_FILE: + case ERROR_INVALID_PARAMETER: + if (LSTAT(_path.wide, &st)) { + result = 0; + } + else { + result = S_ISLNK(st.st_mode); + } + break; + default: + result = 0; + } + } + Py_END_ALLOW_THREADS + + path_cleanup(&_path); + if (result) { + Py_RETURN_TRUE; + } + Py_RETURN_FALSE; +} + #endif /* MS_WINDOWS */ @@ -15150,6 +15455,11 @@ static PyMethodDef posix_methods[] = { OS_WAITSTATUS_TO_EXITCODE_METHODDEF OS_SETNS_METHODDEF OS_UNSHARE_METHODDEF + + OS__PATH_ISDIR_METHODDEF + OS__PATH_ISFILE_METHODDEF + OS__PATH_ISLINK_METHODDEF + OS__PATH_EXISTS_METHODDEF {NULL, NULL} /* Sentinel */ }; diff --git a/Modules/sha256module.c b/Modules/sha256module.c index 17ee86683b7a89..630e4bf03bbe96 100644 --- a/Modules/sha256module.c +++ b/Modules/sha256module.c @@ -31,29 +31,33 @@ class SHA256Type "SHAobject *" "&PyType_Type" [clinic start generated code]*/ /*[clinic end generated code: output=da39a3ee5e6b4b0d input=71a39174d4f0a744]*/ -/* Some useful types */ -typedef unsigned char SHA_BYTE; -typedef uint32_t SHA_INT32; /* 32-bit integer */ - -/* The SHA block size and message digest sizes, in bytes */ +/* The SHA block size and maximum message digest sizes, in bytes */ #define SHA_BLOCKSIZE 64 -#define SHA_DIGESTSIZE 32 +#define SHA_DIGESTSIZE 32 + +/* The SHA2-224 and SHA2-256 implementations defer to the HACL* verified + * library. */ -/* The structure for storing SHA info */ +#include "_hacl/Hacl_Streaming_SHA2.h" typedef struct { - PyObject_HEAD - SHA_INT32 digest[8]; /* Message digest */ - SHA_INT32 count_lo, count_hi; /* 64-bit bit count */ - SHA_BYTE data[SHA_BLOCKSIZE]; /* SHA data buffer */ - int local; /* unprocessed amount in data */ - int digestsize; + PyObject_HEAD + // Even though one could conceivably perform run-type checks to tell apart a + // sha224_type from a sha256_type (and thus deduce the digest size), we must + // keep this field because it's exposed as a member field on the underlying + // python object. + // TODO: could we transform this into a getter and get rid of the redundant + // field? + int digestsize; + Hacl_Streaming_SHA2_state_sha2_256 *state; } SHAobject; #include "clinic/sha256module.c.h" +/* We shall use run-time type information in the remainder of this module to + * tell apart SHA2-224 and SHA2-256 */ typedef struct { PyTypeObject* sha224_type; PyTypeObject* sha256_type; @@ -67,321 +71,12 @@ _sha256_get_state(PyObject *module) return (_sha256_state *)state; } -/* When run on a little-endian CPU we need to perform byte reversal on an - array of longwords. */ - -#if PY_LITTLE_ENDIAN -static void longReverse(SHA_INT32 *buffer, int byteCount) -{ - byteCount /= sizeof(*buffer); - for (; byteCount--; buffer++) { - *buffer = _Py_bswap32(*buffer); - } -} -#endif - static void SHAcopy(SHAobject *src, SHAobject *dest) { - dest->local = src->local; dest->digestsize = src->digestsize; - dest->count_lo = src->count_lo; - dest->count_hi = src->count_hi; - memcpy(dest->digest, src->digest, sizeof(src->digest)); - memcpy(dest->data, src->data, sizeof(src->data)); -} - - -/* ------------------------------------------------------------------------ - * - * This code for the SHA-256 algorithm was noted as public domain. The - * original headers are pasted below. - * - * Several changes have been made to make it more compatible with the - * Python environment and desired interface. - * - */ - -/* LibTomCrypt, modular cryptographic library -- Tom St Denis - * - * LibTomCrypt is a library that provides various cryptographic - * algorithms in a highly modular and flexible manner. - * - * The library is free for all purposes without any express - * guarantee it works. - * - * Tom St Denis, tomstdenis@iahu.ca, https://www.libtom.net - */ - - -/* SHA256 by Tom St Denis */ - -/* Various logical functions */ -#define ROR(x, y)\ -( ((((unsigned long)(x)&0xFFFFFFFFUL)>>(unsigned long)((y)&31)) | \ -((unsigned long)(x)<<(unsigned long)(32-((y)&31)))) & 0xFFFFFFFFUL) -#define Ch(x,y,z) (z ^ (x & (y ^ z))) -#define Maj(x,y,z) (((x | y) & z) | (x & y)) -#define S(x, n) ROR((x),(n)) -#define R(x, n) (((x)&0xFFFFFFFFUL)>>(n)) -#define Sigma0(x) (S(x, 2) ^ S(x, 13) ^ S(x, 22)) -#define Sigma1(x) (S(x, 6) ^ S(x, 11) ^ S(x, 25)) -#define Gamma0(x) (S(x, 7) ^ S(x, 18) ^ R(x, 3)) -#define Gamma1(x) (S(x, 17) ^ S(x, 19) ^ R(x, 10)) - - -static void -sha_transform(SHAobject *sha_info) -{ - int i; - SHA_INT32 S[8], W[64], t0, t1; - - memcpy(W, sha_info->data, sizeof(sha_info->data)); -#if PY_LITTLE_ENDIAN - longReverse(W, (int)sizeof(sha_info->data)); -#endif - - for (i = 16; i < 64; ++i) { - W[i] = Gamma1(W[i - 2]) + W[i - 7] + Gamma0(W[i - 15]) + W[i - 16]; - } - for (i = 0; i < 8; ++i) { - S[i] = sha_info->digest[i]; - } - - /* Compress */ -#define RND(a,b,c,d,e,f,g,h,i,ki) \ - t0 = h + Sigma1(e) + Ch(e, f, g) + ki + W[i]; \ - t1 = Sigma0(a) + Maj(a, b, c); \ - d += t0; \ - h = t0 + t1; - - RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],0,0x428a2f98); - RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],1,0x71374491); - RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],2,0xb5c0fbcf); - RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],3,0xe9b5dba5); - RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],4,0x3956c25b); - RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],5,0x59f111f1); - RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],6,0x923f82a4); - RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],7,0xab1c5ed5); - RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],8,0xd807aa98); - RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],9,0x12835b01); - RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],10,0x243185be); - RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],11,0x550c7dc3); - RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],12,0x72be5d74); - RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],13,0x80deb1fe); - RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],14,0x9bdc06a7); - RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],15,0xc19bf174); - RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],16,0xe49b69c1); - RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],17,0xefbe4786); - RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],18,0x0fc19dc6); - RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],19,0x240ca1cc); - RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],20,0x2de92c6f); - RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],21,0x4a7484aa); - RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],22,0x5cb0a9dc); - RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],23,0x76f988da); - RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],24,0x983e5152); - RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],25,0xa831c66d); - RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],26,0xb00327c8); - RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],27,0xbf597fc7); - RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],28,0xc6e00bf3); - RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],29,0xd5a79147); - RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],30,0x06ca6351); - RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],31,0x14292967); - RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],32,0x27b70a85); - RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],33,0x2e1b2138); - RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],34,0x4d2c6dfc); - RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],35,0x53380d13); - RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],36,0x650a7354); - RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],37,0x766a0abb); - RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],38,0x81c2c92e); - RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],39,0x92722c85); - RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],40,0xa2bfe8a1); - RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],41,0xa81a664b); - RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],42,0xc24b8b70); - RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],43,0xc76c51a3); - RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],44,0xd192e819); - RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],45,0xd6990624); - RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],46,0xf40e3585); - RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],47,0x106aa070); - RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],48,0x19a4c116); - RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],49,0x1e376c08); - RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],50,0x2748774c); - RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],51,0x34b0bcb5); - RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],52,0x391c0cb3); - RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],53,0x4ed8aa4a); - RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],54,0x5b9cca4f); - RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],55,0x682e6ff3); - RND(S[0],S[1],S[2],S[3],S[4],S[5],S[6],S[7],56,0x748f82ee); - RND(S[7],S[0],S[1],S[2],S[3],S[4],S[5],S[6],57,0x78a5636f); - RND(S[6],S[7],S[0],S[1],S[2],S[3],S[4],S[5],58,0x84c87814); - RND(S[5],S[6],S[7],S[0],S[1],S[2],S[3],S[4],59,0x8cc70208); - RND(S[4],S[5],S[6],S[7],S[0],S[1],S[2],S[3],60,0x90befffa); - RND(S[3],S[4],S[5],S[6],S[7],S[0],S[1],S[2],61,0xa4506ceb); - RND(S[2],S[3],S[4],S[5],S[6],S[7],S[0],S[1],62,0xbef9a3f7); - RND(S[1],S[2],S[3],S[4],S[5],S[6],S[7],S[0],63,0xc67178f2); - -#undef RND - - /* feedback */ - for (i = 0; i < 8; i++) { - sha_info->digest[i] = sha_info->digest[i] + S[i]; - } - -} - - - -/* initialize the SHA digest */ - -static void -sha_init(SHAobject *sha_info) -{ - sha_info->digest[0] = 0x6A09E667L; - sha_info->digest[1] = 0xBB67AE85L; - sha_info->digest[2] = 0x3C6EF372L; - sha_info->digest[3] = 0xA54FF53AL; - sha_info->digest[4] = 0x510E527FL; - sha_info->digest[5] = 0x9B05688CL; - sha_info->digest[6] = 0x1F83D9ABL; - sha_info->digest[7] = 0x5BE0CD19L; - sha_info->count_lo = 0L; - sha_info->count_hi = 0L; - sha_info->local = 0; - sha_info->digestsize = 32; + dest->state = Hacl_Streaming_SHA2_copy_256(src->state); } -static void -sha224_init(SHAobject *sha_info) -{ - sha_info->digest[0] = 0xc1059ed8L; - sha_info->digest[1] = 0x367cd507L; - sha_info->digest[2] = 0x3070dd17L; - sha_info->digest[3] = 0xf70e5939L; - sha_info->digest[4] = 0xffc00b31L; - sha_info->digest[5] = 0x68581511L; - sha_info->digest[6] = 0x64f98fa7L; - sha_info->digest[7] = 0xbefa4fa4L; - sha_info->count_lo = 0L; - sha_info->count_hi = 0L; - sha_info->local = 0; - sha_info->digestsize = 28; -} - - -/* update the SHA digest */ - -static void -sha_update(SHAobject *sha_info, SHA_BYTE *buffer, Py_ssize_t count) -{ - Py_ssize_t i; - SHA_INT32 clo; - - clo = sha_info->count_lo + ((SHA_INT32) count << 3); - if (clo < sha_info->count_lo) { - ++sha_info->count_hi; - } - sha_info->count_lo = clo; - sha_info->count_hi += (SHA_INT32) count >> 29; - if (sha_info->local) { - i = SHA_BLOCKSIZE - sha_info->local; - if (i > count) { - i = count; - } - memcpy(((SHA_BYTE *) sha_info->data) + sha_info->local, buffer, i); - count -= i; - buffer += i; - sha_info->local += (int)i; - if (sha_info->local == SHA_BLOCKSIZE) { - sha_transform(sha_info); - } - else { - return; - } - } - while (count >= SHA_BLOCKSIZE) { - memcpy(sha_info->data, buffer, SHA_BLOCKSIZE); - buffer += SHA_BLOCKSIZE; - count -= SHA_BLOCKSIZE; - sha_transform(sha_info); - } - memcpy(sha_info->data, buffer, count); - sha_info->local = (int)count; -} - -/* finish computing the SHA digest */ - -static void -sha_final(unsigned char digest[SHA_DIGESTSIZE], SHAobject *sha_info) -{ - int count; - SHA_INT32 lo_bit_count, hi_bit_count; - - lo_bit_count = sha_info->count_lo; - hi_bit_count = sha_info->count_hi; - count = (int) ((lo_bit_count >> 3) & 0x3f); - ((SHA_BYTE *) sha_info->data)[count++] = 0x80; - if (count > SHA_BLOCKSIZE - 8) { - memset(((SHA_BYTE *) sha_info->data) + count, 0, - SHA_BLOCKSIZE - count); - sha_transform(sha_info); - memset((SHA_BYTE *) sha_info->data, 0, SHA_BLOCKSIZE - 8); - } - else { - memset(((SHA_BYTE *) sha_info->data) + count, 0, - SHA_BLOCKSIZE - 8 - count); - } - - /* GJS: note that we add the hi/lo in big-endian. sha_transform will - swap these values into host-order. */ - sha_info->data[56] = (hi_bit_count >> 24) & 0xff; - sha_info->data[57] = (hi_bit_count >> 16) & 0xff; - sha_info->data[58] = (hi_bit_count >> 8) & 0xff; - sha_info->data[59] = (hi_bit_count >> 0) & 0xff; - sha_info->data[60] = (lo_bit_count >> 24) & 0xff; - sha_info->data[61] = (lo_bit_count >> 16) & 0xff; - sha_info->data[62] = (lo_bit_count >> 8) & 0xff; - sha_info->data[63] = (lo_bit_count >> 0) & 0xff; - sha_transform(sha_info); - digest[ 0] = (unsigned char) ((sha_info->digest[0] >> 24) & 0xff); - digest[ 1] = (unsigned char) ((sha_info->digest[0] >> 16) & 0xff); - digest[ 2] = (unsigned char) ((sha_info->digest[0] >> 8) & 0xff); - digest[ 3] = (unsigned char) ((sha_info->digest[0] ) & 0xff); - digest[ 4] = (unsigned char) ((sha_info->digest[1] >> 24) & 0xff); - digest[ 5] = (unsigned char) ((sha_info->digest[1] >> 16) & 0xff); - digest[ 6] = (unsigned char) ((sha_info->digest[1] >> 8) & 0xff); - digest[ 7] = (unsigned char) ((sha_info->digest[1] ) & 0xff); - digest[ 8] = (unsigned char) ((sha_info->digest[2] >> 24) & 0xff); - digest[ 9] = (unsigned char) ((sha_info->digest[2] >> 16) & 0xff); - digest[10] = (unsigned char) ((sha_info->digest[2] >> 8) & 0xff); - digest[11] = (unsigned char) ((sha_info->digest[2] ) & 0xff); - digest[12] = (unsigned char) ((sha_info->digest[3] >> 24) & 0xff); - digest[13] = (unsigned char) ((sha_info->digest[3] >> 16) & 0xff); - digest[14] = (unsigned char) ((sha_info->digest[3] >> 8) & 0xff); - digest[15] = (unsigned char) ((sha_info->digest[3] ) & 0xff); - digest[16] = (unsigned char) ((sha_info->digest[4] >> 24) & 0xff); - digest[17] = (unsigned char) ((sha_info->digest[4] >> 16) & 0xff); - digest[18] = (unsigned char) ((sha_info->digest[4] >> 8) & 0xff); - digest[19] = (unsigned char) ((sha_info->digest[4] ) & 0xff); - digest[20] = (unsigned char) ((sha_info->digest[5] >> 24) & 0xff); - digest[21] = (unsigned char) ((sha_info->digest[5] >> 16) & 0xff); - digest[22] = (unsigned char) ((sha_info->digest[5] >> 8) & 0xff); - digest[23] = (unsigned char) ((sha_info->digest[5] ) & 0xff); - digest[24] = (unsigned char) ((sha_info->digest[6] >> 24) & 0xff); - digest[25] = (unsigned char) ((sha_info->digest[6] >> 16) & 0xff); - digest[26] = (unsigned char) ((sha_info->digest[6] >> 8) & 0xff); - digest[27] = (unsigned char) ((sha_info->digest[6] ) & 0xff); - digest[28] = (unsigned char) ((sha_info->digest[7] >> 24) & 0xff); - digest[29] = (unsigned char) ((sha_info->digest[7] >> 16) & 0xff); - digest[30] = (unsigned char) ((sha_info->digest[7] >> 8) & 0xff); - digest[31] = (unsigned char) ((sha_info->digest[7] ) & 0xff); -} - -/* - * End of copied SHA code. - * - * ------------------------------------------------------------------------ - */ - - static SHAobject * newSHA224object(_sha256_state *state) { @@ -409,14 +104,31 @@ SHA_traverse(PyObject *ptr, visitproc visit, void *arg) } static void -SHA_dealloc(PyObject *ptr) +SHA_dealloc(SHAobject *ptr) { + Hacl_Streaming_SHA2_free_256(ptr->state); PyTypeObject *tp = Py_TYPE(ptr); PyObject_GC_UnTrack(ptr); PyObject_GC_Del(ptr); Py_DECREF(tp); } +/* HACL* takes a uint32_t for the length of its parameter, but Py_ssize_t can be + * 64 bits. */ +static void update_256(Hacl_Streaming_SHA2_state_sha2_256 *state, uint8_t *buf, Py_ssize_t len) { + /* Note: we explicitly ignore the error code on the basis that it would take > + * 1 billion years to overflow the maximum admissible length for SHA2-256 + * (namely, 2^61-1 bytes). */ + while (len > UINT32_MAX) { + Hacl_Streaming_SHA2_update_256(state, buf, UINT32_MAX); + len -= UINT32_MAX; + buf += UINT32_MAX; + } + /* Cast to uint32_t is safe: upon exiting the loop, len <= UINT32_MAX, and + * therefore fits in a uint32_t */ + Hacl_Streaming_SHA2_update_256(state, buf, (uint32_t) len); +} + /* External methods for a hash object */ @@ -458,11 +170,10 @@ static PyObject * SHA256Type_digest_impl(SHAobject *self) /*[clinic end generated code: output=46616a5e909fbc3d input=f1f4cfea5cbde35c]*/ { - unsigned char digest[SHA_DIGESTSIZE]; - SHAobject temp; - - SHAcopy(self, &temp); - sha_final(digest, &temp); + uint8_t digest[SHA_DIGESTSIZE]; + // HACL performs copies under the hood so that self->state remains valid + // after this call. + Hacl_Streaming_SHA2_finish_256(self->state, digest); return PyBytes_FromStringAndSize((const char *)digest, self->digestsize); } @@ -476,13 +187,8 @@ static PyObject * SHA256Type_hexdigest_impl(SHAobject *self) /*[clinic end generated code: output=725f8a7041ae97f3 input=0cc4c714693010d1]*/ { - unsigned char digest[SHA_DIGESTSIZE]; - SHAobject temp; - - /* Get the raw (binary) digest value */ - SHAcopy(self, &temp); - sha_final(digest, &temp); - + uint8_t digest[SHA_DIGESTSIZE]; + Hacl_Streaming_SHA2_finish_256(self->state, digest); return _Py_strhex((const char *)digest, self->digestsize); } @@ -503,7 +209,7 @@ SHA256Type_update(SHAobject *self, PyObject *obj) GET_BUFFER_VIEW_OR_ERROUT(obj, &buf); - sha_update(self, buf.buf, buf.len); + update_256(self->state, buf.buf, buf.len); PyBuffer_Release(&buf); Py_RETURN_NONE; @@ -524,12 +230,12 @@ SHA256_get_block_size(PyObject *self, void *closure) } static PyObject * -SHA256_get_name(PyObject *self, void *closure) +SHA256_get_name(SHAobject *self, void *closure) { - if (((SHAobject *)self)->digestsize == 32) - return PyUnicode_FromStringAndSize("sha256", 6); - else + if (self->digestsize == 28) { return PyUnicode_FromStringAndSize("sha224", 6); + } + return PyUnicode_FromStringAndSize("sha256", 6); } static PyGetSetDef SHA_getseters[] = { @@ -606,7 +312,8 @@ _sha256_sha256_impl(PyObject *module, PyObject *string, int usedforsecurity) return NULL; } - sha_init(new); + new->state = Hacl_Streaming_SHA2_create_in_256(); + new->digestsize = 32; if (PyErr_Occurred()) { Py_DECREF(new); @@ -616,7 +323,7 @@ _sha256_sha256_impl(PyObject *module, PyObject *string, int usedforsecurity) return NULL; } if (string) { - sha_update(new, buf.buf, buf.len); + update_256(new->state, buf.buf, buf.len); PyBuffer_Release(&buf); } @@ -651,7 +358,8 @@ _sha256_sha224_impl(PyObject *module, PyObject *string, int usedforsecurity) return NULL; } - sha224_init(new); + new->state = Hacl_Streaming_SHA2_create_in_224(); + new->digestsize = 28; if (PyErr_Occurred()) { Py_DECREF(new); @@ -661,7 +369,7 @@ _sha256_sha224_impl(PyObject *module, PyObject *string, int usedforsecurity) return NULL; } if (string) { - sha_update(new, buf.buf, buf.len); + update_256(new->state, buf.buf, buf.len); PyBuffer_Release(&buf); } diff --git a/Modules/unicodedata.c b/Modules/unicodedata.c index 59fccd4b834dd3..c108f14871f946 100644 --- a/Modules/unicodedata.c +++ b/Modules/unicodedata.c @@ -800,7 +800,7 @@ is_normalized_quickcheck(PyObject *self, PyObject *input, bool nfc, bool k, { /* UCD 3.2.0 is requested, quickchecks must be disabled. */ if (UCD_Check(self)) { - return NO; + return MAYBE; } if (PyUnicode_IS_ASCII(input)) { diff --git a/Modules/zlibmodule.c b/Modules/zlibmodule.c index 1cdfd01320288b..e2f7dbaca87a9f 100644 --- a/Modules/zlibmodule.c +++ b/Modules/zlibmodule.c @@ -1519,6 +1519,7 @@ decompress_buf(ZlibDecompressor *self, Py_ssize_t max_length) } } else if (err != Z_OK && err != Z_BUF_ERROR) { zlib_error(state, self->zst, err, "while decompressing data"); + goto error; } self->avail_in_real += self->zst.avail_in; diff --git a/Objects/boolobject.c b/Objects/boolobject.c index 55b4a4077ab783..a035f463323823 100644 --- a/Objects/boolobject.c +++ b/Objects/boolobject.c @@ -4,6 +4,8 @@ #include "pycore_object.h" // _Py_FatalRefcountError() #include "pycore_runtime.h" // _Py_ID() +#include + /* We define bool_repr to return "False" or "True" */ static PyObject * @@ -153,8 +155,8 @@ bool_dealloc(PyObject* Py_UNUSED(ignore)) PyTypeObject PyBool_Type = { PyVarObject_HEAD_INIT(&PyType_Type, 0) "bool", - sizeof(struct _longobject), - 0, + offsetof(struct _longobject, long_value.ob_digit), /* tp_basicsize */ + sizeof(digit), /* tp_itemsize */ bool_dealloc, /* tp_dealloc */ 0, /* tp_vectorcall_offset */ 0, /* tp_getattr */ diff --git a/Objects/dictobject.c b/Objects/dictobject.c index b9067213820b52..fc658ca2f4b7f8 100644 --- a/Objects/dictobject.c +++ b/Objects/dictobject.c @@ -1663,15 +1663,15 @@ PyDict_GetItem(PyObject *op, PyObject *key) #endif /* Preserve the existing exception */ - PyObject *exc_type, *exc_value, *exc_tb; PyObject *value; Py_ssize_t ix; (void)ix; - _PyErr_Fetch(tstate, &exc_type, &exc_value, &exc_tb); + + PyObject *exc = _PyErr_GetRaisedException(tstate); ix = _Py_dict_lookup(mp, key, hash, &value); /* Ignore any exception raised by the lookup */ - _PyErr_Restore(tstate, exc_type, exc_value, exc_tb); + _PyErr_SetRaisedException(tstate, exc); assert(ix >= 0 || value == NULL); diff --git a/Objects/exceptions.c b/Objects/exceptions.c index db6f7d52804d6a..976f84dbf63c93 100644 --- a/Objects/exceptions.c +++ b/Objects/exceptions.c @@ -8,6 +8,7 @@ #include #include #include "pycore_ceval.h" // _Py_EnterRecursiveCall +#include "pycore_pyerrors.h" // struct _PyErr_SetRaisedException #include "pycore_exceptions.h" // struct _Py_exc_state #include "pycore_initconfig.h" #include "pycore_object.h" @@ -288,13 +289,17 @@ BaseException_set_tb(PyBaseExceptionObject *self, PyObject *tb, void *Py_UNUSED( PyErr_SetString(PyExc_TypeError, "__traceback__ may not be deleted"); return -1; } - else if (!(tb == Py_None || PyTraceBack_Check(tb))) { + if (PyTraceBack_Check(tb)) { + Py_XSETREF(self->traceback, Py_NewRef(tb)); + } + else if (tb == Py_None) { + Py_CLEAR(self->traceback); + } + else { PyErr_SetString(PyExc_TypeError, "__traceback__ must be a traceback or None"); return -1; } - - Py_XSETREF(self->traceback, Py_NewRef(tb)); return 0; } @@ -413,6 +418,20 @@ PyException_SetContext(PyObject *self, PyObject *context) Py_XSETREF(_PyBaseExceptionObject_cast(self)->context, context); } +PyObject * +PyException_GetArgs(PyObject *self) +{ + PyObject *args = _PyBaseExceptionObject_cast(self)->args; + return Py_NewRef(args); +} + +void +PyException_SetArgs(PyObject *self, PyObject *args) +{ + Py_INCREF(args); + Py_XSETREF(_PyBaseExceptionObject_cast(self)->args, args); +} + const char * PyExceptionClass_Name(PyObject *ob) { @@ -3188,20 +3207,19 @@ SimpleExtendsException(PyExc_Exception, ReferenceError, #define MEMERRORS_SAVE 16 +static PyBaseExceptionObject last_resort_memory_error; + static PyObject * -MemoryError_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +get_memory_error(int allow_allocation, PyObject *args, PyObject *kwds) { PyBaseExceptionObject *self; - - /* If this is a subclass of MemoryError, don't use the freelist - * and just return a fresh object */ - if (type != (PyTypeObject *) PyExc_MemoryError) { - return BaseException_new(type, args, kwds); - } - struct _Py_exc_state *state = get_exc_state(); if (state->memerrors_freelist == NULL) { - return BaseException_new(type, args, kwds); + if (!allow_allocation) { + return Py_NewRef(&last_resort_memory_error); + } + PyObject *result = BaseException_new((PyTypeObject *)PyExc_MemoryError, args, kwds); + return result; } /* Fetch object from freelist and revive it */ @@ -3221,6 +3239,35 @@ MemoryError_new(PyTypeObject *type, PyObject *args, PyObject *kwds) return (PyObject *)self; } +static PyBaseExceptionObject last_resort_memory_error; + +static PyObject * +MemoryError_new(PyTypeObject *type, PyObject *args, PyObject *kwds) +{ + /* If this is a subclass of MemoryError, don't use the freelist + * and just return a fresh object */ + if (type != (PyTypeObject *) PyExc_MemoryError) { + return BaseException_new(type, args, kwds); + } + return get_memory_error(1, args, kwds); +} + +PyObject * +_PyErr_NoMemory(PyThreadState *tstate) +{ + if (Py_IS_TYPE(PyExc_MemoryError, NULL)) { + /* PyErr_NoMemory() has been called before PyExc_MemoryError has been + initialized by _PyExc_Init() */ + Py_FatalError("Out of memory and PyExc_MemoryError is not " + "initialized yet"); + } + PyObject *err = get_memory_error(0, NULL, NULL); + if (err != NULL) { + _PyErr_SetRaisedException(tstate, err); + } + return NULL; +} + static void MemoryError_dealloc(PyBaseExceptionObject *self) { @@ -3252,6 +3299,7 @@ preallocate_memerrors(void) /* We create enough MemoryErrors and then decref them, which will fill up the freelist. */ int i; + PyObject *errors[MEMERRORS_SAVE]; for (i = 0; i < MEMERRORS_SAVE; i++) { errors[i] = MemoryError_new((PyTypeObject *) PyExc_MemoryError, @@ -3291,6 +3339,9 @@ static PyTypeObject _PyExc_MemoryError = { }; PyObject *PyExc_MemoryError = (PyObject *) &_PyExc_MemoryError; +static PyBaseExceptionObject last_resort_memory_error = { + _PyObject_IMMORTAL_INIT(&_PyExc_MemoryError) +}; /* * BufferError extends Exception diff --git a/Objects/frameobject.c b/Objects/frameobject.c index 6bc04bc8e848fc..0e52a3e2399c06 100644 --- a/Objects/frameobject.c +++ b/Objects/frameobject.c @@ -397,6 +397,8 @@ mark_stacks(PyCodeObject *code_obj, int len) assert(pop_value(next_stack) == EMPTY_STACK); assert(top_of_stack(next_stack) == Object); break; + case RETURN_CONST: + break; case RAISE_VARARGS: break; case RERAISE: diff --git a/Objects/funcobject.c b/Objects/funcobject.c index baa360381a7724..91a6b3dd40a232 100644 --- a/Objects/funcobject.c +++ b/Objects/funcobject.c @@ -87,8 +87,8 @@ _PyFunction_FromConstructor(PyFrameConstructor *constr) op->func_name = Py_NewRef(constr->fc_name); op->func_qualname = Py_NewRef(constr->fc_qualname); op->func_code = Py_NewRef(constr->fc_code); - op->func_defaults = NULL; - op->func_kwdefaults = NULL; + op->func_defaults = Py_XNewRef(constr->fc_defaults); + op->func_kwdefaults = Py_XNewRef(constr->fc_kwdefaults); op->func_closure = Py_XNewRef(constr->fc_closure); op->func_doc = Py_NewRef(Py_None); op->func_dict = NULL; diff --git a/Objects/longobject.c b/Objects/longobject.c index 65bf15648b07fb..8293f133bed213 100644 --- a/Objects/longobject.c +++ b/Objects/longobject.c @@ -5882,13 +5882,10 @@ static Py_ssize_t int___sizeof___impl(PyObject *self) /*[clinic end generated code: output=3303f008eaa6a0a5 input=9b51620c76fc4507]*/ { - Py_ssize_t res; - - res = offsetof(PyLongObject, long_value.ob_digit) - /* using Py_MAX(..., 1) because we always allocate space for at least - one digit, even though the integer zero has a Py_SIZE of 0 */ - + Py_MAX(Py_ABS(Py_SIZE(self)), 1)*sizeof(digit); - return res; + /* using Py_MAX(..., 1) because we always allocate space for at least + one digit, even though the integer zero has a Py_SIZE of 0 */ + Py_ssize_t ndigits = Py_MAX(Py_ABS(Py_SIZE(self)), 1); + return Py_TYPE(self)->tp_basicsize + Py_TYPE(self)->tp_itemsize * ndigits; } /*[clinic input] diff --git a/Objects/object.c b/Objects/object.c index e940222c657e3c..7817c04ef5f5be 100644 --- a/Objects/object.c +++ b/Objects/object.c @@ -2416,10 +2416,10 @@ _Py_Dealloc(PyObject *op) destructor dealloc = type->tp_dealloc; #ifdef Py_DEBUG PyThreadState *tstate = _PyThreadState_GET(); - PyObject *old_exc_type = tstate != NULL ? tstate->curexc_type : NULL; + PyObject *old_exc = tstate != NULL ? tstate->current_exception : NULL; // Keep the old exception type alive to prevent undefined behavior // on (tstate->curexc_type != old_exc_type) below - Py_XINCREF(old_exc_type); + Py_XINCREF(old_exc); // Make sure that type->tp_name remains valid Py_INCREF(type); #endif @@ -2432,12 +2432,12 @@ _Py_Dealloc(PyObject *op) #ifdef Py_DEBUG // gh-89373: The tp_dealloc function must leave the current exception // unchanged. - if (tstate != NULL && tstate->curexc_type != old_exc_type) { + if (tstate != NULL && tstate->current_exception != old_exc) { const char *err; - if (old_exc_type == NULL) { + if (old_exc == NULL) { err = "Deallocator of type '%s' raised an exception"; } - else if (tstate->curexc_type == NULL) { + else if (tstate->current_exception == NULL) { err = "Deallocator of type '%s' cleared the current exception"; } else { @@ -2448,7 +2448,7 @@ _Py_Dealloc(PyObject *op) } _Py_FatalErrorFormat(__func__, err, type->tp_name); } - Py_XDECREF(old_exc_type); + Py_XDECREF(old_exc); Py_DECREF(type); #endif } diff --git a/Objects/odictobject.c b/Objects/odictobject.c index 4976b70b5dff5a..ab2bbed35873de 100644 --- a/Objects/odictobject.c +++ b/Objects/odictobject.c @@ -1367,7 +1367,7 @@ static PyObject * odict_repr(PyODictObject *self) { int i; - PyObject *pieces = NULL, *result = NULL; + PyObject *result = NULL, *dcopy = NULL; if (PyODict_SIZE(self) == 0) return PyUnicode_FromFormat("%s()", _PyType_Name(Py_TYPE(self))); @@ -1377,57 +1377,16 @@ odict_repr(PyODictObject *self) return i > 0 ? PyUnicode_FromString("...") : NULL; } - if (PyODict_CheckExact(self)) { - Py_ssize_t count = 0; - _ODictNode *node; - pieces = PyList_New(PyODict_SIZE(self)); - if (pieces == NULL) - goto Done; - - _odict_FOREACH(self, node) { - PyObject *pair; - PyObject *key = _odictnode_KEY(node); - PyObject *value = _odictnode_VALUE(node, self); - if (value == NULL) { - if (!PyErr_Occurred()) - PyErr_SetObject(PyExc_KeyError, key); - goto Done; - } - pair = PyTuple_Pack(2, key, value); - if (pair == NULL) - goto Done; - - if (count < PyList_GET_SIZE(pieces)) - PyList_SET_ITEM(pieces, count, pair); /* steals reference */ - else { - if (PyList_Append(pieces, pair) < 0) { - Py_DECREF(pair); - goto Done; - } - Py_DECREF(pair); - } - count++; - } - if (count < PyList_GET_SIZE(pieces)) { - Py_SET_SIZE(pieces, count); - } - } - else { - PyObject *items = PyObject_CallMethodNoArgs( - (PyObject *)self, &_Py_ID(items)); - if (items == NULL) - goto Done; - pieces = PySequence_List(items); - Py_DECREF(items); - if (pieces == NULL) - goto Done; + dcopy = PyDict_Copy((PyObject *)self); + if (dcopy == NULL) { + goto Done; } result = PyUnicode_FromFormat("%s(%R)", - _PyType_Name(Py_TYPE(self)), pieces); + _PyType_Name(Py_TYPE(self)), + dcopy); Done: - Py_XDECREF(pieces); Py_ReprLeave((PyObject *)self); return result; } diff --git a/Objects/typeobject.c b/Objects/typeobject.c index 59e0bf2995bac2..bf6ccdb77a90f0 100644 --- a/Objects/typeobject.c +++ b/Objects/typeobject.c @@ -4469,6 +4469,8 @@ _PyStaticType_Dealloc(PyTypeObject *type) } type->tp_flags &= ~Py_TPFLAGS_READY; + type->tp_flags &= ~Py_TPFLAGS_VALID_VERSION_TAG; + type->tp_version_tag = 0; if (type->tp_flags & _Py_TPFLAGS_STATIC_BUILTIN) { _PyStaticType_ClearWeakRefs(type); diff --git a/PC/config.c b/PC/config.c index 9d900c78e40d00..cdb5db23c4ae49 100644 --- a/PC/config.c +++ b/PC/config.c @@ -37,6 +37,7 @@ extern PyObject* PyInit__weakref(void); /* XXX: These two should really be extracted to standalone extensions. */ extern PyObject* PyInit_xxsubtype(void); extern PyObject* PyInit__xxsubinterpreters(void); +extern PyObject* PyInit__xxinterpchannels(void); extern PyObject* PyInit__random(void); extern PyObject* PyInit_itertools(void); extern PyObject* PyInit__collections(void); @@ -134,6 +135,7 @@ struct _inittab _PyImport_Inittab[] = { {"xxsubtype", PyInit_xxsubtype}, {"_xxsubinterpreters", PyInit__xxsubinterpreters}, + {"_xxinterpchannels", PyInit__xxinterpchannels}, #ifdef _Py_HAVE_ZLIB {"zlib", PyInit_zlib}, #endif diff --git a/PC/launcher2.c b/PC/launcher2.c index 2052a2ffeb57b7..beeb2ae46b83f0 100644 --- a/PC/launcher2.c +++ b/PC/launcher2.c @@ -295,6 +295,30 @@ _startsWithArgument(const wchar_t *x, int xLen, const wchar_t *y, int yLen) } +// Unlike regular startsWith, this function requires that the following +// character is either NULL (that is, the entire string matches) or is one of +// the characters in 'separators'. +bool +_startsWithSeparated(const wchar_t *x, int xLen, const wchar_t *y, int yLen, const wchar_t *separators) +{ + if (!x || !y) { + return false; + } + yLen = yLen < 0 ? (int)wcsnlen_s(y, MAXLEN) : yLen; + xLen = xLen < 0 ? (int)wcsnlen_s(x, MAXLEN) : xLen; + if (xLen < yLen) { + return false; + } + if (xLen == yLen) { + return 0 == _compare(x, xLen, y, yLen); + } + return separators && + 0 == _compare(x, yLen, y, yLen) && + wcschr(separators, x[yLen]) != NULL; +} + + + /******************************************************************************\ *** HELP TEXT *** \******************************************************************************/ @@ -409,6 +433,9 @@ typedef struct { bool listPaths; // if true, display help message before contiuning bool help; + // if set, limits search to registry keys with the specified Company + // This is intended for debugging and testing only + const wchar_t *limitToCompany; // dynamically allocated buffers to free later struct _SearchInfoBuffer *_buffer; } SearchInfo; @@ -489,6 +516,7 @@ dumpSearchInfo(SearchInfo *search) DEBUG_BOOL(list); DEBUG_BOOL(listPaths); DEBUG_BOOL(help); + DEBUG(limitToCompany); #undef DEBUG_BOOL #undef DEBUG_2 #undef DEBUG @@ -1606,6 +1634,10 @@ registrySearch(const SearchInfo *search, EnvironmentInfo **result, HKEY root, in } break; } + if (search->limitToCompany && 0 != _compare(search->limitToCompany, -1, buffer, cchBuffer)) { + debug(L"# Skipping %s due to PYLAUNCHER_LIMIT_TO_COMPANY\n", buffer); + continue; + } HKEY subkey; if (ERROR_SUCCESS == RegOpenKeyExW(root, buffer, 0, KEY_READ, &subkey)) { exitCode = _registrySearchTags(search, result, subkey, sortKey, buffer, fallbackArch); @@ -1884,6 +1916,11 @@ collectEnvironments(const SearchInfo *search, EnvironmentInfo **result) } } + if (search->limitToCompany) { + debug(L"# Skipping APPX search due to PYLAUNCHER_LIMIT_TO_COMPANY\n"); + return 0; + } + for (struct AppxSearchInfo *info = APPX_SEARCH; info->familyName; ++info) { exitCode = appxSearch(search, result, info->familyName, info->tag, info->sortKey); if (exitCode && exitCode != RC_NO_PYTHON) { @@ -2053,12 +2090,15 @@ _companyMatches(const SearchInfo *search, const EnvironmentInfo *env) bool -_tagMatches(const SearchInfo *search, const EnvironmentInfo *env) +_tagMatches(const SearchInfo *search, const EnvironmentInfo *env, int searchTagLength) { - if (!search->tag || !search->tagLength) { + if (searchTagLength < 0) { + searchTagLength = search->tagLength; + } + if (!search->tag || !searchTagLength) { return true; } - return _startsWith(env->tag, -1, search->tag, search->tagLength); + return _startsWithSeparated(env->tag, -1, search->tag, searchTagLength, L".-"); } @@ -2095,7 +2135,7 @@ _selectEnvironment(const SearchInfo *search, EnvironmentInfo *env, EnvironmentIn } if (!search->oldStyleTag) { - if (_companyMatches(search, env) && _tagMatches(search, env)) { + if (_companyMatches(search, env) && _tagMatches(search, env, -1)) { // Because of how our sort tree is set up, we will walk up the // "prev" side and implicitly select the "best" best. By // returning straight after a match, we skip the entire "next" @@ -2120,7 +2160,7 @@ _selectEnvironment(const SearchInfo *search, EnvironmentInfo *env, EnvironmentIn } } - if (_startsWith(env->tag, -1, search->tag, tagLength)) { + if (_tagMatches(search, env, tagLength)) { if (exclude32Bit && _is32Bit(env)) { debug(L"# Excluding %s/%s because it looks like 32bit\n", env->company, env->tag); } else if (only32Bit && !_is32Bit(env)) { @@ -2147,10 +2187,6 @@ selectEnvironment(const SearchInfo *search, EnvironmentInfo *root, EnvironmentIn *best = NULL; return RC_NO_PYTHON_AT_ALL; } - if (!root->next && !root->prev) { - *best = root; - return 0; - } EnvironmentInfo *result = NULL; int exitCode = _selectEnvironment(search, root, &result); @@ -2560,6 +2596,17 @@ process(int argc, wchar_t ** argv) debug(L"argv0: %s\nversion: %S\n", argv[0], PY_VERSION); } + DWORD len = GetEnvironmentVariableW(L"PYLAUNCHER_LIMIT_TO_COMPANY", NULL, 0); + if (len > 1) { + wchar_t *limitToCompany = allocSearchInfoBuffer(&search, len); + search.limitToCompany = limitToCompany; + if (0 == GetEnvironmentVariableW(L"PYLAUNCHER_LIMIT_TO_COMPANY", limitToCompany, len)) { + exitCode = RC_INTERNAL_ERROR; + winerror(0, L"Failed to read PYLAUNCHER_LIMIT_TO_COMPANY variable"); + goto abort; + } + } + search.originalCmdLine = GetCommandLineW(); exitCode = performSearch(&search, &envs); diff --git a/PC/python3dll.c b/PC/python3dll.c index 931f316bb99843..e300819365756e 100755 --- a/PC/python3dll.c +++ b/PC/python3dll.c @@ -198,6 +198,7 @@ EXPORT_FUNC(PyErr_Format) EXPORT_FUNC(PyErr_FormatV) EXPORT_FUNC(PyErr_GetExcInfo) EXPORT_FUNC(PyErr_GetHandledException) +EXPORT_FUNC(PyErr_GetRaisedException) EXPORT_FUNC(PyErr_GivenExceptionMatches) EXPORT_FUNC(PyErr_NewException) EXPORT_FUNC(PyErr_NewExceptionWithDoc) @@ -227,6 +228,7 @@ EXPORT_FUNC(PyErr_SetInterrupt) EXPORT_FUNC(PyErr_SetInterruptEx) EXPORT_FUNC(PyErr_SetNone) EXPORT_FUNC(PyErr_SetObject) +EXPORT_FUNC(PyErr_SetRaisedException) EXPORT_FUNC(PyErr_SetString) EXPORT_FUNC(PyErr_SyntaxLocation) EXPORT_FUNC(PyErr_SyntaxLocationEx) @@ -255,9 +257,11 @@ EXPORT_FUNC(PyEval_ReleaseThread) EXPORT_FUNC(PyEval_RestoreThread) EXPORT_FUNC(PyEval_SaveThread) EXPORT_FUNC(PyEval_ThreadsInitialized) +EXPORT_FUNC(PyException_GetArgs) EXPORT_FUNC(PyException_GetCause) EXPORT_FUNC(PyException_GetContext) EXPORT_FUNC(PyException_GetTraceback) +EXPORT_FUNC(PyException_SetArgs) EXPORT_FUNC(PyException_SetCause) EXPORT_FUNC(PyException_SetContext) EXPORT_FUNC(PyException_SetTraceback) diff --git a/PCbuild/python.props b/PCbuild/python.props index 971c1490d9f120..57360e57baba66 100644 --- a/PCbuild/python.props +++ b/PCbuild/python.props @@ -56,23 +56,32 @@ ..\\.. ..\\..\\.. + - - $(EXTERNALS_DIR) + + + $(EXTERNALS_DIR) $([System.IO.Path]::GetFullPath(`$(PySourcePath)externals`)) $(ExternalsDir)\ - $(ExternalsDir)sqlite-3.39.4.0\ - $(ExternalsDir)bzip2-1.0.8\ - $(ExternalsDir)xz-5.2.5\ - $(ExternalsDir)libffi-3.4.3\ - $(ExternalsDir)libffi-3.4.3\$(ArchName)\ - $(libffiOutDir)include - $(ExternalsDir)openssl-1.1.1s\ - $(ExternalsDir)openssl-bin-1.1.1s\$(ArchName)\ - $(opensslOutDir)include - $(ExternalsDir)\nasm-2.11.06\ - $(ExternalsDir)\zlib-1.2.13\ - + + + + + + $(ExternalsDir)sqlite-3.39.4.0\ + $(ExternalsDir)bzip2-1.0.8\ + $(ExternalsDir)xz-5.2.5\ + $(ExternalsDir)libffi-3.4.3\ + $(libffiDir)$(ArchName)\ + $(libffiOutDir)include + $(ExternalsDir)openssl-1.1.1s\ + $(ExternalsDir)openssl-bin-1.1.1s\$(ArchName)\ + $(opensslOutDir)include + $(ExternalsDir)\nasm-2.11.06\ + $(ExternalsDir)\zlib-1.2.13\ + + + _d diff --git a/PCbuild/pythoncore.vcxproj b/PCbuild/pythoncore.vcxproj index d3bd5b378e0dd0..e8e9ff01e306bc 100644 --- a/PCbuild/pythoncore.vcxproj +++ b/PCbuild/pythoncore.vcxproj @@ -100,7 +100,7 @@ /Zm200 %(AdditionalOptions) - $(PySourcePath)Python;%(AdditionalIncludeDirectories) + $(PySourcePath)Modules\_hacl\include;$(PySourcePath)Modules\_hacl\internal;$(PySourcePath)Python;%(AdditionalIncludeDirectories) $(zlibDir);%(AdditionalIncludeDirectories) _USRDLL;Py_BUILD_CORE;Py_BUILD_CORE_BUILTIN;Py_ENABLE_SHARED;MS_DLL_ID="$(SysWinVer)";%(PreprocessorDefinitions) _Py_HAVE_ZLIB;%(PreprocessorDefinitions) @@ -407,6 +407,7 @@ + @@ -418,6 +419,7 @@ + diff --git a/PCbuild/pythoncore.vcxproj.filters b/PCbuild/pythoncore.vcxproj.filters index c1b531fd818a97..4820db6f2c32dc 100644 --- a/PCbuild/pythoncore.vcxproj.filters +++ b/PCbuild/pythoncore.vcxproj.filters @@ -866,6 +866,9 @@ Modules + + Modules + Modules @@ -1322,6 +1325,9 @@ Modules + + Modules + Parser diff --git a/PCbuild/tcltk.props b/PCbuild/tcltk.props index 15c03e20fe2171..9d5189b3b8e93d 100644 --- a/PCbuild/tcltk.props +++ b/PCbuild/tcltk.props @@ -2,22 +2,25 @@ - 8 - 6 - 13 - 0 - $(TclMajorVersion) - $(TclMinorVersion) - $(TclPatchLevel) - $(TclRevision) - 8 - 4 - 3 - 6 - $(ExternalsDir)tcl-core-$(TclMajorVersion).$(TclMinorVersion).$(TclPatchLevel).$(TclRevision)\ - $(ExternalsDir)tk-$(TkMajorVersion).$(TkMinorVersion).$(TkPatchLevel).$(TkRevision)\ - $(ExternalsDir)tix-$(TixMajorVersion).$(TixMinorVersion).$(TixPatchLevel).$(TixRevision)\ - $(ExternalsDir)tcltk-$(TclMajorVersion).$(TclMinorVersion).$(TclPatchLevel).$(TclRevision)\$(ArchName)\ + 8.6.13.0 + $(TclVersion) + 8.4.3.6 + $([System.Version]::Parse($(TclVersion)).Major) + $([System.Version]::Parse($(TclVersion)).Minor) + $([System.Version]::Parse($(TclVersion)).Build) + $([System.Version]::Parse($(TclVersion)).Revision) + $([System.Version]::Parse($(TkVersion)).Major) + $([System.Version]::Parse($(TkVersion)).Minor) + $([System.Version]::Parse($(TkVersion)).Build) + $([System.Version]::Parse($(TkVersion)).Revision) + $([System.Version]::Parse($(TixVersion)).Major) + $([System.Version]::Parse($(TixVersion)).Minor) + $([System.Version]::Parse($(TixVersion)).Build) + $([System.Version]::Parse($(TixVersion)).Revision) + $(ExternalsDir)tcl-core-$(TclVersion)\ + $(ExternalsDir)tk-$(TkVersion)\ + $(ExternalsDir)tix-$(TixVersion)\ + $(ExternalsDir)tcltk-$(TclVersion)\$(ArchName)\ $(tcltkDir)\bin\tclsh$(TclMajorVersion)$(TclMinorVersion)t.exe $(tcltkDir)\..\win32\bin\tclsh$(TclMajorVersion)$(TclMinorVersion)t.exe diff --git a/Parser/pegen.c b/Parser/pegen.c index d84e06861edefc..94dd9de8a431c1 100644 --- a/Parser/pegen.c +++ b/Parser/pegen.c @@ -643,13 +643,10 @@ _PyPegen_number_token(Parser *p) PyThreadState *tstate = _PyThreadState_GET(); // The only way a ValueError should happen in _this_ code is via // PyLong_FromString hitting a length limit. - if (tstate->curexc_type == PyExc_ValueError && - tstate->curexc_value != NULL) { - PyObject *type, *value, *tb; - // This acts as PyErr_Clear() as we're replacing curexc. - PyErr_Fetch(&type, &value, &tb); - Py_XDECREF(tb); - Py_DECREF(type); + if (tstate->current_exception != NULL && + Py_TYPE(tstate->current_exception) == (PyTypeObject *)PyExc_ValueError + ) { + PyObject *exc = PyErr_GetRaisedException(); /* Intentionally omitting columns to avoid a wall of 1000s of '^'s * on the error message. Nobody is going to overlook their huge * numeric literal once given the line. */ @@ -659,8 +656,8 @@ _PyPegen_number_token(Parser *p) t->end_lineno, -1 /* end_col_offset */, "%S - Consider hexadecimal for huge integer literals " "to avoid decimal conversion limits.", - value); - Py_DECREF(value); + exc); + Py_DECREF(exc); } return NULL; } diff --git a/Programs/test_frozenmain.h b/Programs/test_frozenmain.h index 95f78b19e65eb6..8e5055bd7bceb1 100644 --- a/Programs/test_frozenmain.h +++ b/Programs/test_frozenmain.h @@ -1,7 +1,7 @@ // Auto-generated by Programs/freeze_test_frozenmain.py unsigned char M_test_frozenmain[] = { 227,0,0,0,0,0,0,0,0,0,0,0,0,8,0,0, - 0,0,0,0,0,243,184,0,0,0,151,0,100,0,100,1, + 0,0,0,0,0,243,182,0,0,0,151,0,100,0,100,1, 108,0,90,0,100,0,100,1,108,1,90,1,2,0,101,2, 100,2,171,1,0,0,0,0,0,0,0,0,1,0,2,0, 101,2,100,3,101,0,106,6,0,0,0,0,0,0,0,0, @@ -12,28 +12,28 @@ unsigned char M_test_frozenmain[] = { 0,0,0,0,90,5,100,5,68,0,93,23,0,0,90,6, 2,0,101,2,100,6,101,6,155,0,100,7,101,5,101,6, 25,0,0,0,0,0,0,0,0,0,155,0,157,4,171,1, - 0,0,0,0,0,0,0,0,1,0,140,25,4,0,100,1, - 83,0,41,8,233,0,0,0,0,78,122,18,70,114,111,122, - 101,110,32,72,101,108,108,111,32,87,111,114,108,100,122,8, - 115,121,115,46,97,114,103,118,218,6,99,111,110,102,105,103, - 41,5,218,12,112,114,111,103,114,97,109,95,110,97,109,101, - 218,10,101,120,101,99,117,116,97,98,108,101,218,15,117,115, - 101,95,101,110,118,105,114,111,110,109,101,110,116,218,17,99, - 111,110,102,105,103,117,114,101,95,99,95,115,116,100,105,111, - 218,14,98,117,102,102,101,114,101,100,95,115,116,100,105,111, - 122,7,99,111,110,102,105,103,32,122,2,58,32,41,7,218, - 3,115,121,115,218,17,95,116,101,115,116,105,110,116,101,114, - 110,97,108,99,97,112,105,218,5,112,114,105,110,116,218,4, - 97,114,103,118,218,11,103,101,116,95,99,111,110,102,105,103, - 115,114,3,0,0,0,218,3,107,101,121,169,0,243,0,0, - 0,0,250,18,116,101,115,116,95,102,114,111,122,101,110,109, - 97,105,110,46,112,121,250,8,60,109,111,100,117,108,101,62, - 114,18,0,0,0,1,0,0,0,115,100,0,0,0,240,3, - 1,1,1,243,8,0,1,11,219,0,24,225,0,5,208,6, - 26,213,0,27,217,0,5,128,106,144,35,151,40,145,40,213, - 0,27,216,9,38,208,9,26,215,9,38,209,9,38,212,9, - 40,168,24,212,9,50,128,6,240,2,6,12,2,242,0,7, - 1,42,128,67,241,14,0,5,10,208,10,40,144,67,209,10, - 40,152,54,160,35,156,59,209,10,40,214,4,41,242,15,7, - 1,42,114,16,0,0,0, + 0,0,0,0,0,0,0,0,1,0,140,25,4,0,121,1, + 41,8,233,0,0,0,0,78,122,18,70,114,111,122,101,110, + 32,72,101,108,108,111,32,87,111,114,108,100,122,8,115,121, + 115,46,97,114,103,118,218,6,99,111,110,102,105,103,41,5, + 218,12,112,114,111,103,114,97,109,95,110,97,109,101,218,10, + 101,120,101,99,117,116,97,98,108,101,218,15,117,115,101,95, + 101,110,118,105,114,111,110,109,101,110,116,218,17,99,111,110, + 102,105,103,117,114,101,95,99,95,115,116,100,105,111,218,14, + 98,117,102,102,101,114,101,100,95,115,116,100,105,111,122,7, + 99,111,110,102,105,103,32,122,2,58,32,41,7,218,3,115, + 121,115,218,17,95,116,101,115,116,105,110,116,101,114,110,97, + 108,99,97,112,105,218,5,112,114,105,110,116,218,4,97,114, + 103,118,218,11,103,101,116,95,99,111,110,102,105,103,115,114, + 3,0,0,0,218,3,107,101,121,169,0,243,0,0,0,0, + 250,18,116,101,115,116,95,102,114,111,122,101,110,109,97,105, + 110,46,112,121,250,8,60,109,111,100,117,108,101,62,114,18, + 0,0,0,1,0,0,0,115,100,0,0,0,240,3,1,1, + 1,243,8,0,1,11,219,0,24,225,0,5,208,6,26,213, + 0,27,217,0,5,128,106,144,35,151,40,145,40,213,0,27, + 216,9,38,208,9,26,215,9,38,209,9,38,212,9,40,168, + 24,212,9,50,128,6,240,2,6,12,2,242,0,7,1,42, + 128,67,241,14,0,5,10,208,10,40,144,67,209,10,40,152, + 54,160,35,156,59,209,10,40,214,4,41,241,15,7,1,42, + 114,16,0,0,0, }; diff --git a/Python/bytecodes.c b/Python/bytecodes.c index 4e8c6037909aa0..d7d72683519282 100644 --- a/Python/bytecodes.c +++ b/Python/bytecodes.c @@ -561,6 +561,23 @@ dummy_func( goto resume_frame; } + inst(RETURN_CONST, (--)) { + PyObject *retval = GETITEM(consts, oparg); + Py_INCREF(retval); + assert(EMPTY()); + _PyFrame_SetStackPointer(frame, stack_pointer); + TRACE_FUNCTION_EXIT(); + DTRACE_FUNCTION_EXIT(); + _Py_LeaveRecursiveCallPy(tstate); + assert(frame != &entry_frame); + // GH-99729: We need to unlink the frame *before* clearing it: + _PyInterpreterFrame *dying = frame; + frame = cframe.current_frame = dying->previous; + _PyEvalFrameClearAndPop(tstate, dying); + _PyFrame_StackPush(frame, retval); + goto resume_frame; + } + inst(GET_AITER, (obj -- iter)) { unaryfunc getter = NULL; PyTypeObject *type = Py_TYPE(obj); @@ -669,14 +686,10 @@ dummy_func( PREDICT(LOAD_CONST); } - // error: SEND stack effect depends on jump flag - inst(SEND) { + inst(SEND, (receiver, v -- receiver if (!jump), retval)) { assert(frame != &entry_frame); - assert(STACK_LEVEL() >= 2); - PyObject *v = POP(); - PyObject *receiver = TOP(); + bool jump = false; PySendResult gen_status; - PyObject *retval; if (tstate->c_tracefunc == NULL) { gen_status = PyIter_Send(receiver, v, &retval); } else { @@ -701,21 +714,20 @@ dummy_func( gen_status = PYGEN_NEXT; } } - Py_DECREF(v); if (gen_status == PYGEN_ERROR) { assert(retval == NULL); goto error; } + Py_DECREF(v); if (gen_status == PYGEN_RETURN) { assert(retval != NULL); Py_DECREF(receiver); - SET_TOP(retval); JUMPBY(oparg); + jump = true; } else { assert(gen_status == PYGEN_NEXT); assert(retval != NULL); - PUSH(retval); } } @@ -745,10 +757,10 @@ dummy_func( Py_XSETREF(exc_info->exc_value, exc_value); } - // stack effect: (__0 -- ) - inst(RERAISE) { + inst(RERAISE, (values[oparg], exc -- values[oparg])) { + assert(oparg >= 0 && oparg <= 2); if (oparg) { - PyObject *lasti = PEEK(oparg + 1); + PyObject *lasti = values[0]; if (PyLong_Check(lasti)) { frame->prev_instr = _PyCode_CODE(frame->f_code) + PyLong_AsLong(lasti); assert(!_PyErr_Occurred(tstate)); @@ -759,11 +771,11 @@ dummy_func( goto error; } } - PyObject *val = POP(); - assert(val && PyExceptionInstance_Check(val)); - PyObject *exc = Py_NewRef(PyExceptionInstance_Class(val)); - PyObject *tb = PyException_GetTraceback(val); - _PyErr_Restore(tstate, exc, val, tb); + assert(exc && PyExceptionInstance_Check(exc)); + Py_INCREF(exc); + PyObject *typ = Py_NewRef(PyExceptionInstance_Class(exc)); + PyObject *tb = PyException_GetTraceback(exc); + _PyErr_Restore(tstate, typ, exc, tb); goto exception_unwind; } @@ -790,23 +802,15 @@ dummy_func( } } - // stack effect: (__0, __1 -- ) - inst(CLEANUP_THROW) { + inst(CLEANUP_THROW, (sub_iter, last_sent_val, exc_value -- value)) { assert(throwflag); - PyObject *exc_value = TOP(); assert(exc_value && PyExceptionInstance_Check(exc_value)); if (PyErr_GivenExceptionMatches(exc_value, PyExc_StopIteration)) { - PyObject *value = ((PyStopIterationObject *)exc_value)->value; - Py_INCREF(value); - Py_DECREF(POP()); // The StopIteration. - Py_DECREF(POP()); // The last sent value. - Py_DECREF(POP()); // The delegated sub-iterator. - PUSH(value); + value = Py_NewRef(((PyStopIterationObject *)exc_value)->value); + DECREF_INPUTS(); } else { - PyObject *exc_type = Py_NewRef(Py_TYPE(exc_value)); - PyObject *exc_traceback = PyException_GetTraceback(exc_value); - _PyErr_Restore(tstate, exc_type, Py_NewRef(exc_value), exc_traceback); + _PyErr_SetRaisedException(tstate, Py_NewRef(exc_value)); goto exception_unwind; } } @@ -876,13 +880,18 @@ dummy_func( } } - // stack effect: (__0 -- __array[oparg]) - inst(UNPACK_SEQUENCE) { + family(unpack_sequence, INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE) = { + UNPACK_SEQUENCE, + UNPACK_SEQUENCE_TWO_TUPLE, + UNPACK_SEQUENCE_TUPLE, + UNPACK_SEQUENCE_LIST, + }; + + inst(UNPACK_SEQUENCE, (unused/1, seq -- unused[oparg])) { #if ENABLE_SPECIALIZATION _PyUnpackSequenceCache *cache = (_PyUnpackSequenceCache *)next_instr; if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) { assert(cframe.use_tracing == 0); - PyObject *seq = TOP(); next_instr--; _Py_Specialize_UnpackSequence(seq, next_instr, oparg); DISPATCH_SAME_OPARG(); @@ -890,70 +899,50 @@ dummy_func( STAT_INC(UNPACK_SEQUENCE, deferred); DECREMENT_ADAPTIVE_COUNTER(cache->counter); #endif /* ENABLE_SPECIALIZATION */ - PyObject *seq = POP(); - PyObject **top = stack_pointer + oparg; - if (!unpack_iterable(tstate, seq, oparg, -1, top)) { - Py_DECREF(seq); - goto error; - } - STACK_GROW(oparg); + PyObject **top = stack_pointer + oparg - 1; + int res = unpack_iterable(tstate, seq, oparg, -1, top); Py_DECREF(seq); - JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE); + ERROR_IF(res == 0, error); } - // stack effect: (__0 -- __array[oparg]) - inst(UNPACK_SEQUENCE_TWO_TUPLE) { - PyObject *seq = TOP(); + inst(UNPACK_SEQUENCE_TWO_TUPLE, (unused/1, seq -- values[oparg])) { DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE); DEOPT_IF(PyTuple_GET_SIZE(seq) != 2, UNPACK_SEQUENCE); + assert(oparg == 2); STAT_INC(UNPACK_SEQUENCE, hit); - SET_TOP(Py_NewRef(PyTuple_GET_ITEM(seq, 1))); - PUSH(Py_NewRef(PyTuple_GET_ITEM(seq, 0))); + values[0] = Py_NewRef(PyTuple_GET_ITEM(seq, 1)); + values[1] = Py_NewRef(PyTuple_GET_ITEM(seq, 0)); Py_DECREF(seq); - JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE); } - // stack effect: (__0 -- __array[oparg]) - inst(UNPACK_SEQUENCE_TUPLE) { - PyObject *seq = TOP(); + inst(UNPACK_SEQUENCE_TUPLE, (unused/1, seq -- values[oparg])) { DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE); DEOPT_IF(PyTuple_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE); STAT_INC(UNPACK_SEQUENCE, hit); - STACK_SHRINK(1); PyObject **items = _PyTuple_ITEMS(seq); - while (oparg--) { - PUSH(Py_NewRef(items[oparg])); + for (int i = oparg; --i >= 0; ) { + *values++ = Py_NewRef(items[i]); } Py_DECREF(seq); - JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE); } - // stack effect: (__0 -- __array[oparg]) - inst(UNPACK_SEQUENCE_LIST) { - PyObject *seq = TOP(); + inst(UNPACK_SEQUENCE_LIST, (unused/1, seq -- values[oparg])) { DEOPT_IF(!PyList_CheckExact(seq), UNPACK_SEQUENCE); DEOPT_IF(PyList_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE); STAT_INC(UNPACK_SEQUENCE, hit); - STACK_SHRINK(1); PyObject **items = _PyList_ITEMS(seq); - while (oparg--) { - PUSH(Py_NewRef(items[oparg])); + for (int i = oparg; --i >= 0; ) { + *values++ = Py_NewRef(items[i]); } Py_DECREF(seq); - JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE); } - // error: UNPACK_EX has irregular stack effect - inst(UNPACK_EX) { + inst(UNPACK_EX, (seq -- unused[oparg & 0xFF], unused, unused[oparg >> 8])) { int totalargs = 1 + (oparg & 0xFF) + (oparg >> 8); - PyObject *seq = POP(); - PyObject **top = stack_pointer + totalargs; - if (!unpack_iterable(tstate, seq, oparg & 0xFF, oparg >> 8, top)) { - Py_DECREF(seq); - goto error; - } - STACK_GROW(totalargs); + PyObject **top = stack_pointer + totalargs - 1; + int res = unpack_iterable(tstate, seq, oparg & 0xFF, oparg >> 8, top); Py_DECREF(seq); + ERROR_IF(res == 0, error); } family(store_attr, INLINE_CACHE_ENTRIES_STORE_ATTR) = { @@ -1074,8 +1063,13 @@ dummy_func( } } - // error: LOAD_GLOBAL has irregular stack effect - inst(LOAD_GLOBAL) { + family(load_global, INLINE_CACHE_ENTRIES_LOAD_GLOBAL) = { + LOAD_GLOBAL, + LOAD_GLOBAL_MODULE, + LOAD_GLOBAL_BUILTIN, + }; + + inst(LOAD_GLOBAL, (unused/1, unused/1, unused/2, unused/1 -- null if (oparg & 1), v)) { #if ENABLE_SPECIALIZATION _PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr; if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) { @@ -1088,10 +1082,7 @@ dummy_func( STAT_INC(LOAD_GLOBAL, deferred); DECREMENT_ADAPTIVE_COUNTER(cache->counter); #endif /* ENABLE_SPECIALIZATION */ - int push_null = oparg & 1; - PEEK(0) = NULL; PyObject *name = GETITEM(names, oparg>>1); - PyObject *v; if (PyDict_CheckExact(GLOBALS()) && PyDict_CheckExact(BUILTINS())) { @@ -1105,7 +1096,7 @@ dummy_func( format_exc_check_arg(tstate, PyExc_NameError, NAME_ERROR_MSG, name); } - goto error; + ERROR_IF(true, error); } Py_INCREF(v); } @@ -1115,9 +1106,7 @@ dummy_func( /* namespace 1: globals */ v = PyObject_GetItem(GLOBALS(), name); if (v == NULL) { - if (!_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) { - goto error; - } + ERROR_IF(!_PyErr_ExceptionMatches(tstate, PyExc_KeyError), error); _PyErr_Clear(tstate); /* namespace 2: builtins */ @@ -1128,58 +1117,42 @@ dummy_func( tstate, PyExc_NameError, NAME_ERROR_MSG, name); } - goto error; + ERROR_IF(true, error); } } } - /* Skip over inline cache */ - JUMPBY(INLINE_CACHE_ENTRIES_LOAD_GLOBAL); - STACK_GROW(push_null); - PUSH(v); + null = NULL; } - // error: LOAD_GLOBAL has irregular stack effect - inst(LOAD_GLOBAL_MODULE) { + inst(LOAD_GLOBAL_MODULE, (unused/1, index/1, version/2, unused/1 -- null if (oparg & 1), res)) { assert(cframe.use_tracing == 0); DEOPT_IF(!PyDict_CheckExact(GLOBALS()), LOAD_GLOBAL); PyDictObject *dict = (PyDictObject *)GLOBALS(); - _PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr; - uint32_t version = read_u32(cache->module_keys_version); DEOPT_IF(dict->ma_keys->dk_version != version, LOAD_GLOBAL); assert(DK_IS_UNICODE(dict->ma_keys)); PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(dict->ma_keys); - PyObject *res = entries[cache->index].me_value; + res = entries[index].me_value; DEOPT_IF(res == NULL, LOAD_GLOBAL); - int push_null = oparg & 1; - PEEK(0) = NULL; - JUMPBY(INLINE_CACHE_ENTRIES_LOAD_GLOBAL); + Py_INCREF(res); STAT_INC(LOAD_GLOBAL, hit); - STACK_GROW(push_null+1); - SET_TOP(Py_NewRef(res)); + null = NULL; } - // error: LOAD_GLOBAL has irregular stack effect - inst(LOAD_GLOBAL_BUILTIN) { + inst(LOAD_GLOBAL_BUILTIN, (unused/1, index/1, mod_version/2, bltn_version/1 -- null if (oparg & 1), res)) { assert(cframe.use_tracing == 0); DEOPT_IF(!PyDict_CheckExact(GLOBALS()), LOAD_GLOBAL); DEOPT_IF(!PyDict_CheckExact(BUILTINS()), LOAD_GLOBAL); PyDictObject *mdict = (PyDictObject *)GLOBALS(); PyDictObject *bdict = (PyDictObject *)BUILTINS(); - _PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr; - uint32_t mod_version = read_u32(cache->module_keys_version); - uint16_t bltn_version = cache->builtin_keys_version; DEOPT_IF(mdict->ma_keys->dk_version != mod_version, LOAD_GLOBAL); DEOPT_IF(bdict->ma_keys->dk_version != bltn_version, LOAD_GLOBAL); assert(DK_IS_UNICODE(bdict->ma_keys)); PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(bdict->ma_keys); - PyObject *res = entries[cache->index].me_value; + res = entries[index].me_value; DEOPT_IF(res == NULL, LOAD_GLOBAL); - int push_null = oparg & 1; - PEEK(0) = NULL; - JUMPBY(INLINE_CACHE_ENTRIES_LOAD_GLOBAL); + Py_INCREF(res); STAT_INC(LOAD_GLOBAL, hit); - STACK_GROW(push_null+1); - SET_TOP(Py_NewRef(res)); + null = NULL; } inst(DELETE_FAST, (--)) { @@ -2071,56 +2044,63 @@ dummy_func( ERROR_IF(iter == NULL, error); } - // stack effect: ( -- ) - inst(GET_YIELD_FROM_ITER) { + inst(GET_YIELD_FROM_ITER, (iterable -- iter)) { /* before: [obj]; after [getiter(obj)] */ - PyObject *iterable = TOP(); - PyObject *iter; if (PyCoro_CheckExact(iterable)) { /* `iterable` is a coroutine */ if (!(frame->f_code->co_flags & (CO_COROUTINE | CO_ITERABLE_COROUTINE))) { /* and it is used in a 'yield from' expression of a regular generator. */ - Py_DECREF(iterable); - SET_TOP(NULL); _PyErr_SetString(tstate, PyExc_TypeError, "cannot 'yield from' a coroutine object " "in a non-coroutine generator"); goto error; } + iter = iterable; } - else if (!PyGen_CheckExact(iterable)) { + else if (PyGen_CheckExact(iterable)) { + iter = iterable; + } + else { /* `iterable` is not a generator. */ iter = PyObject_GetIter(iterable); - Py_DECREF(iterable); - SET_TOP(iter); - if (iter == NULL) + if (iter == NULL) { goto error; + } + Py_DECREF(iterable); } PREDICT(LOAD_CONST); } - // stack effect: ( -- __0) - inst(FOR_ITER) { + // Most members of this family are "secretly" super-instructions. + // When the loop is exhausted, they jump, and the jump target is + // always END_FOR, which pops two values off the stack. + // This is optimized by skipping that instruction and combining + // its effect (popping 'iter' instead of pushing 'next'.) + + family(for_iter, INLINE_CACHE_ENTRIES_FOR_ITER) = { + FOR_ITER, + FOR_ITER_LIST, + FOR_ITER_TUPLE, + FOR_ITER_RANGE, + FOR_ITER_GEN, + }; + + inst(FOR_ITER, (unused/1, iter -- iter, next)) { #if ENABLE_SPECIALIZATION _PyForIterCache *cache = (_PyForIterCache *)next_instr; if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) { assert(cframe.use_tracing == 0); next_instr--; - _Py_Specialize_ForIter(TOP(), next_instr, oparg); + _Py_Specialize_ForIter(iter, next_instr, oparg); DISPATCH_SAME_OPARG(); } STAT_INC(FOR_ITER, deferred); DECREMENT_ADAPTIVE_COUNTER(cache->counter); #endif /* ENABLE_SPECIALIZATION */ - /* before: [iter]; after: [iter, iter()] *or* [] */ - PyObject *iter = TOP(); - PyObject *next = (*Py_TYPE(iter)->tp_iternext)(iter); - if (next != NULL) { - PUSH(next); - JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER); - } - else { + /* before: [iter]; after: [iter, iter()] *or* [] (and jump over END_FOR.) */ + next = (*Py_TYPE(iter)->tp_iternext)(iter); + if (next == NULL) { if (_PyErr_Occurred(tstate)) { if (!_PyErr_ExceptionMatches(tstate, PyExc_StopIteration)) { goto error; @@ -2132,63 +2112,66 @@ dummy_func( } /* iterator ended normally */ assert(_Py_OPCODE(next_instr[INLINE_CACHE_ENTRIES_FOR_ITER + oparg]) == END_FOR); - STACK_SHRINK(1); Py_DECREF(iter); - /* Skip END_FOR */ + STACK_SHRINK(1); + /* Jump forward oparg, then skip following END_FOR instruction */ JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1); + DISPATCH(); } + // Common case: no jump, leave it to the code generator } - // stack effect: ( -- __0) - inst(FOR_ITER_LIST) { + inst(FOR_ITER_LIST, (unused/1, iter -- iter, next)) { assert(cframe.use_tracing == 0); - _PyListIterObject *it = (_PyListIterObject *)TOP(); - DEOPT_IF(Py_TYPE(it) != &PyListIter_Type, FOR_ITER); + DEOPT_IF(Py_TYPE(iter) != &PyListIter_Type, FOR_ITER); + _PyListIterObject *it = (_PyListIterObject *)iter; STAT_INC(FOR_ITER, hit); PyListObject *seq = it->it_seq; if (seq) { if (it->it_index < PyList_GET_SIZE(seq)) { - PyObject *next = PyList_GET_ITEM(seq, it->it_index++); - PUSH(Py_NewRef(next)); - JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER); + next = Py_NewRef(PyList_GET_ITEM(seq, it->it_index++)); goto end_for_iter_list; // End of this instruction } it->it_seq = NULL; Py_DECREF(seq); } + Py_DECREF(iter); STACK_SHRINK(1); - Py_DECREF(it); + /* Jump forward oparg, then skip following END_FOR instruction */ JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1); + DISPATCH(); end_for_iter_list: + // Common case: no jump, leave it to the code generator } - // stack effect: ( -- __0) - inst(FOR_ITER_TUPLE) { + inst(FOR_ITER_TUPLE, (unused/1, iter -- iter, next)) { assert(cframe.use_tracing == 0); - _PyTupleIterObject *it = (_PyTupleIterObject *)TOP(); + _PyTupleIterObject *it = (_PyTupleIterObject *)iter; DEOPT_IF(Py_TYPE(it) != &PyTupleIter_Type, FOR_ITER); STAT_INC(FOR_ITER, hit); PyTupleObject *seq = it->it_seq; if (seq) { if (it->it_index < PyTuple_GET_SIZE(seq)) { - PyObject *next = PyTuple_GET_ITEM(seq, it->it_index++); - PUSH(Py_NewRef(next)); - JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER); + next = Py_NewRef(PyTuple_GET_ITEM(seq, it->it_index++)); goto end_for_iter_tuple; // End of this instruction } it->it_seq = NULL; Py_DECREF(seq); } + Py_DECREF(iter); STACK_SHRINK(1); - Py_DECREF(it); + /* Jump forward oparg, then skip following END_FOR instruction */ JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1); + DISPATCH(); end_for_iter_tuple: + // Common case: no jump, leave it to the code generator } - // stack effect: ( -- __0) - inst(FOR_ITER_RANGE) { + // This is slightly different, when the loop isn't terminated we + // jump over the immediately following STORE_FAST instruction. + inst(FOR_ITER_RANGE, (unused/1, iter -- iter, unused)) { assert(cframe.use_tracing == 0); - _PyRangeIterObject *r = (_PyRangeIterObject *)TOP(); + _PyRangeIterObject *r = (_PyRangeIterObject *)iter; DEOPT_IF(Py_TYPE(r) != &PyRangeIter_Type, FOR_ITER); STAT_INC(FOR_ITER, hit); _Py_CODEUNIT next = next_instr[INLINE_CACHE_ENTRIES_FOR_ITER]; @@ -2196,6 +2179,7 @@ dummy_func( if (r->len <= 0) { STACK_SHRINK(1); Py_DECREF(r); + // Jump over END_FOR instruction. JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1); } else { @@ -2208,11 +2192,13 @@ dummy_func( // The STORE_FAST is already done. JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + 1); } + DISPATCH(); } - inst(FOR_ITER_GEN) { + // This is *not* a super-instruction, unique in the family. + inst(FOR_ITER_GEN, (unused/1, iter -- iter, unused)) { assert(cframe.use_tracing == 0); - PyGenObject *gen = (PyGenObject *)TOP(); + PyGenObject *gen = (PyGenObject *)iter; DEOPT_IF(Py_TYPE(gen) != &PyGen_Type, FOR_ITER); DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING, FOR_ITER); STAT_INC(FOR_ITER, hit); @@ -2380,70 +2366,87 @@ dummy_func( assert(oparg & 1); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_BOUND_METHOD_EXACT_ARGS) { - DEOPT_IF(is_method(stack_pointer, oparg), CALL); - PyObject *function = PEEK(oparg + 1); - DEOPT_IF(Py_TYPE(function) != &PyMethod_Type, CALL); - STAT_INC(CALL, hit); - PyObject *self = ((PyMethodObject *)function)->im_self; - PEEK(oparg + 1) = Py_NewRef(self); - PyObject *meth = ((PyMethodObject *)function)->im_func; - PEEK(oparg + 2) = Py_NewRef(meth); - Py_DECREF(function); - GO_TO_INSTRUCTION(CALL_PY_EXACT_ARGS); - } - inst(KW_NAMES, (--)) { assert(kwnames == NULL); assert(oparg < PyTuple_GET_SIZE(consts)); kwnames = GETITEM(consts, oparg); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL) { + // Cache layout: counter/1, func_version/2, min_args/1 + // Neither CALL_INTRINSIC_1 nor CALL_FUNCTION_EX are members! + family(call, INLINE_CACHE_ENTRIES_CALL) = { + CALL, + CALL_BOUND_METHOD_EXACT_ARGS, + CALL_PY_EXACT_ARGS, + CALL_PY_WITH_DEFAULTS, + CALL_NO_KW_TYPE_1, + CALL_NO_KW_STR_1, + CALL_NO_KW_TUPLE_1, + CALL_BUILTIN_CLASS, + CALL_NO_KW_BUILTIN_O, + CALL_NO_KW_BUILTIN_FAST, + CALL_BUILTIN_FAST_WITH_KEYWORDS, + CALL_NO_KW_LEN, + CALL_NO_KW_ISINSTANCE, + CALL_NO_KW_LIST_APPEND, + CALL_NO_KW_METHOD_DESCRIPTOR_O, + CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS, + CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS, + CALL_NO_KW_METHOD_DESCRIPTOR_FAST, + }; + + // On entry, the stack is either + // [NULL, callable, arg1, arg2, ...] + // or + // [method, self, arg1, arg2, ...] + // (Some args may be keywords, see KW_NAMES, which sets 'kwnames'.) + // On exit, the stack is [result]. + // When calling Python, inline the call using DISPATCH_INLINED(). + inst(CALL, (unused/1, unused/2, unused/1, method, callable, args[oparg] -- res)) { + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } #if ENABLE_SPECIALIZATION _PyCallCache *cache = (_PyCallCache *)next_instr; if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) { assert(cframe.use_tracing == 0); - int is_meth = is_method(stack_pointer, oparg); - int nargs = oparg + is_meth; - PyObject *callable = PEEK(nargs + 1); next_instr--; - _Py_Specialize_Call(callable, next_instr, nargs, kwnames); + _Py_Specialize_Call(callable, next_instr, total_args, kwnames); DISPATCH_SAME_OPARG(); } STAT_INC(CALL, deferred); DECREMENT_ADAPTIVE_COUNTER(cache->counter); #endif /* ENABLE_SPECIALIZATION */ - int total_args, is_meth; - is_meth = is_method(stack_pointer, oparg); - PyObject *function = PEEK(oparg + 1); - if (!is_meth && Py_TYPE(function) == &PyMethod_Type) { - PyObject *self = ((PyMethodObject *)function)->im_self; - PEEK(oparg+1) = Py_NewRef(self); - PyObject *meth = ((PyMethodObject *)function)->im_func; - PEEK(oparg+2) = Py_NewRef(meth); - Py_DECREF(function); - is_meth = 1; - } - total_args = oparg + is_meth; - function = PEEK(total_args + 1); + if (!is_meth && Py_TYPE(callable) == &PyMethod_Type) { + is_meth = 1; // For consistenct; it's dead, though + args--; + total_args++; + PyObject *self = ((PyMethodObject *)callable)->im_self; + args[0] = Py_NewRef(self); + method = ((PyMethodObject *)callable)->im_func; + args[-1] = Py_NewRef(method); + Py_DECREF(callable); + callable = method; + } int positional_args = total_args - KWNAMES_LEN(); // Check if the call can be inlined or not - if (Py_TYPE(function) == &PyFunction_Type && + if (Py_TYPE(callable) == &PyFunction_Type && tstate->interp->eval_frame == NULL && - ((PyFunctionObject *)function)->vectorcall == _PyFunction_Vectorcall) + ((PyFunctionObject *)callable)->vectorcall == _PyFunction_Vectorcall) { - int code_flags = ((PyCodeObject*)PyFunction_GET_CODE(function))->co_flags; - PyObject *locals = code_flags & CO_OPTIMIZED ? NULL : Py_NewRef(PyFunction_GET_GLOBALS(function)); - STACK_SHRINK(total_args); + int code_flags = ((PyCodeObject*)PyFunction_GET_CODE(callable))->co_flags; + PyObject *locals = code_flags & CO_OPTIMIZED ? NULL : Py_NewRef(PyFunction_GET_GLOBALS(callable)); _PyInterpreterFrame *new_frame = _PyEvalFramePushAndInit( - tstate, (PyFunctionObject *)function, locals, - stack_pointer, positional_args, kwnames + tstate, (PyFunctionObject *)callable, locals, + args, positional_args, kwnames ); kwnames = NULL; - STACK_SHRINK(2-is_meth); + // Manipulate stack directly since we leave using DISPATCH_INLINED(). + STACK_SHRINK(oparg + 2); // The frame has stolen all the arguments from the stack, // so there is no need to clean them up. if (new_frame == NULL) { @@ -2453,190 +2456,180 @@ dummy_func( DISPATCH_INLINED(new_frame); } /* Callable is not a normal Python function */ - PyObject *res; if (cframe.use_tracing) { res = trace_call_function( - tstate, function, stack_pointer-total_args, + tstate, callable, args, positional_args, kwnames); } else { res = PyObject_Vectorcall( - function, stack_pointer-total_args, + callable, args, positional_args | PY_VECTORCALL_ARGUMENTS_OFFSET, kwnames); } kwnames = NULL; assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); - Py_DECREF(function); - /* Clear the stack */ - STACK_SHRINK(total_args); + Py_DECREF(callable); for (int i = 0; i < total_args; i++) { - Py_DECREF(stack_pointer[i]); + Py_DECREF(args[i]); } - STACK_SHRINK(2-is_meth); - PUSH(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_PY_EXACT_ARGS) { + // Start out with [NULL, bound_method, arg1, arg2, ...] + // Transform to [callable, self, arg1, arg2, ...] + // Then fall through to CALL_PY_EXACT_ARGS + inst(CALL_BOUND_METHOD_EXACT_ARGS, (unused/1, unused/2, unused/1, method, callable, unused[oparg] -- unused)) { + DEOPT_IF(method != NULL, CALL); + DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type, CALL); + STAT_INC(CALL, hit); + PyObject *self = ((PyMethodObject *)callable)->im_self; + PEEK(oparg + 1) = Py_NewRef(self); // callable + PyObject *meth = ((PyMethodObject *)callable)->im_func; + PEEK(oparg + 2) = Py_NewRef(meth); // method + Py_DECREF(callable); + GO_TO_INSTRUCTION(CALL_PY_EXACT_ARGS); + } + + inst(CALL_PY_EXACT_ARGS, (unused/1, func_version/2, unused/1, method, callable, args[oparg] -- unused)) { assert(kwnames == NULL); DEOPT_IF(tstate->interp->eval_frame, CALL); - _PyCallCache *cache = (_PyCallCache *)next_instr; - int is_meth = is_method(stack_pointer, oparg); - int argcount = oparg + is_meth; - PyObject *callable = PEEK(argcount + 1); + int is_meth = method != NULL; + int argcount = oparg; + if (is_meth) { + callable = method; + args--; + argcount++; + } DEOPT_IF(!PyFunction_Check(callable), CALL); PyFunctionObject *func = (PyFunctionObject *)callable; - DEOPT_IF(func->func_version != read_u32(cache->func_version), CALL); + DEOPT_IF(func->func_version != func_version, CALL); PyCodeObject *code = (PyCodeObject *)func->func_code; DEOPT_IF(code->co_argcount != argcount, CALL); DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL); STAT_INC(CALL, hit); _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, func, argcount); - STACK_SHRINK(argcount); for (int i = 0; i < argcount; i++) { - new_frame->localsplus[i] = stack_pointer[i]; + new_frame->localsplus[i] = args[i]; } - STACK_SHRINK(2-is_meth); + // Manipulate stack directly since we leave using DISPATCH_INLINED(). + STACK_SHRINK(oparg + 2); JUMPBY(INLINE_CACHE_ENTRIES_CALL); DISPATCH_INLINED(new_frame); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_PY_WITH_DEFAULTS) { + inst(CALL_PY_WITH_DEFAULTS, (unused/1, func_version/2, min_args/1, method, callable, args[oparg] -- unused)) { assert(kwnames == NULL); DEOPT_IF(tstate->interp->eval_frame, CALL); - _PyCallCache *cache = (_PyCallCache *)next_instr; - int is_meth = is_method(stack_pointer, oparg); - int argcount = oparg + is_meth; - PyObject *callable = PEEK(argcount + 1); + int is_meth = method != NULL; + int argcount = oparg; + if (is_meth) { + callable = method; + args--; + argcount++; + } DEOPT_IF(!PyFunction_Check(callable), CALL); PyFunctionObject *func = (PyFunctionObject *)callable; - DEOPT_IF(func->func_version != read_u32(cache->func_version), CALL); + DEOPT_IF(func->func_version != func_version, CALL); PyCodeObject *code = (PyCodeObject *)func->func_code; DEOPT_IF(argcount > code->co_argcount, CALL); - int minargs = cache->min_args; - DEOPT_IF(argcount < minargs, CALL); + DEOPT_IF(argcount < min_args, CALL); DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL); STAT_INC(CALL, hit); _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, func, code->co_argcount); - STACK_SHRINK(argcount); for (int i = 0; i < argcount; i++) { - new_frame->localsplus[i] = stack_pointer[i]; + new_frame->localsplus[i] = args[i]; } for (int i = argcount; i < code->co_argcount; i++) { - PyObject *def = PyTuple_GET_ITEM(func->func_defaults, - i - minargs); + PyObject *def = PyTuple_GET_ITEM(func->func_defaults, i - min_args); new_frame->localsplus[i] = Py_NewRef(def); } - STACK_SHRINK(2-is_meth); + // Manipulate stack and cache directly since we leave using DISPATCH_INLINED(). + STACK_SHRINK(oparg + 2); JUMPBY(INLINE_CACHE_ENTRIES_CALL); DISPATCH_INLINED(new_frame); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_TYPE_1) { + inst(CALL_NO_KW_TYPE_1, (unused/1, unused/2, unused/1, null, callable, args[oparg] -- res)) { assert(kwnames == NULL); assert(cframe.use_tracing == 0); assert(oparg == 1); - DEOPT_IF(is_method(stack_pointer, 1), CALL); - PyObject *obj = TOP(); - PyObject *callable = SECOND(); + DEOPT_IF(null != NULL, CALL); + PyObject *obj = args[0]; DEOPT_IF(callable != (PyObject *)&PyType_Type, CALL); STAT_INC(CALL, hit); - JUMPBY(INLINE_CACHE_ENTRIES_CALL); - PyObject *res = Py_NewRef(Py_TYPE(obj)); - Py_DECREF(callable); + res = Py_NewRef(Py_TYPE(obj)); Py_DECREF(obj); - STACK_SHRINK(2); - SET_TOP(res); + Py_DECREF(&PyType_Type); // I.e., callable } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_STR_1) { + inst(CALL_NO_KW_STR_1, (unused/1, unused/2, unused/1, null, callable, args[oparg] -- res)) { assert(kwnames == NULL); assert(cframe.use_tracing == 0); assert(oparg == 1); - DEOPT_IF(is_method(stack_pointer, 1), CALL); - PyObject *callable = PEEK(2); + DEOPT_IF(null != NULL, CALL); DEOPT_IF(callable != (PyObject *)&PyUnicode_Type, CALL); STAT_INC(CALL, hit); - PyObject *arg = TOP(); - PyObject *res = PyObject_Str(arg); + PyObject *arg = args[0]; + res = PyObject_Str(arg); Py_DECREF(arg); - Py_DECREF(&PyUnicode_Type); - STACK_SHRINK(2); - SET_TOP(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + Py_DECREF(&PyUnicode_Type); // I.e., callable + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_TUPLE_1) { + inst(CALL_NO_KW_TUPLE_1, (unused/1, unused/2, unused/1, null, callable, args[oparg] -- res)) { assert(kwnames == NULL); assert(oparg == 1); - DEOPT_IF(is_method(stack_pointer, 1), CALL); - PyObject *callable = PEEK(2); + DEOPT_IF(null != NULL, CALL); DEOPT_IF(callable != (PyObject *)&PyTuple_Type, CALL); STAT_INC(CALL, hit); - PyObject *arg = TOP(); - PyObject *res = PySequence_Tuple(arg); + PyObject *arg = args[0]; + res = PySequence_Tuple(arg); Py_DECREF(arg); - Py_DECREF(&PyTuple_Type); - STACK_SHRINK(2); - SET_TOP(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + Py_DECREF(&PyTuple_Type); // I.e., tuple + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_BUILTIN_CLASS) { - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + inst(CALL_BUILTIN_CLASS, (unused/1, unused/2, unused/1, method, callable, args[oparg] -- res)) { + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } int kwnames_len = KWNAMES_LEN(); - PyObject *callable = PEEK(total_args + 1); DEOPT_IF(!PyType_Check(callable), CALL); PyTypeObject *tp = (PyTypeObject *)callable; DEOPT_IF(tp->tp_vectorcall == NULL, CALL); STAT_INC(CALL, hit); - STACK_SHRINK(total_args); - PyObject *res = tp->tp_vectorcall((PyObject *)tp, stack_pointer, - total_args-kwnames_len, kwnames); + res = tp->tp_vectorcall((PyObject *)tp, args, + total_args - kwnames_len, kwnames); kwnames = NULL; /* Free the arguments. */ for (int i = 0; i < total_args; i++) { - Py_DECREF(stack_pointer[i]); + Py_DECREF(args[i]); } Py_DECREF(tp); - STACK_SHRINK(1-is_meth); - SET_TOP(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_BUILTIN_O) { + inst(CALL_NO_KW_BUILTIN_O, (unused/1, unused/2, unused/1, method, callable, args[oparg] -- res)) { assert(cframe.use_tracing == 0); /* Builtin METH_O functions */ assert(kwnames == NULL); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(total_args != 1, CALL); - PyObject *callable = PEEK(total_args + 1); DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_O, CALL); STAT_INC(CALL, hit); @@ -2646,81 +2639,74 @@ dummy_func( if (_Py_EnterRecursiveCallTstate(tstate, " while calling a Python object")) { goto error; } - PyObject *arg = TOP(); - PyObject *res = _PyCFunction_TrampolineCall(cfunc, PyCFunction_GET_SELF(callable), arg); + PyObject *arg = args[0]; + res = _PyCFunction_TrampolineCall(cfunc, PyCFunction_GET_SELF(callable), arg); _Py_LeaveRecursiveCallTstate(tstate); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); Py_DECREF(arg); Py_DECREF(callable); - STACK_SHRINK(2-is_meth); - SET_TOP(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_BUILTIN_FAST) { + inst(CALL_NO_KW_BUILTIN_FAST, (unused/1, unused/2, unused/1, method, callable, args[oparg] -- res)) { assert(cframe.use_tracing == 0); /* Builtin METH_FASTCALL functions, without keywords */ assert(kwnames == NULL); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; - PyObject *callable = PEEK(total_args + 1); + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); - DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_FASTCALL, - CALL); + DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_FASTCALL, CALL); STAT_INC(CALL, hit); PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable); - STACK_SHRINK(total_args); /* res = func(self, args, nargs) */ - PyObject *res = ((_PyCFunctionFast)(void(*)(void))cfunc)( + res = ((_PyCFunctionFast)(void(*)(void))cfunc)( PyCFunction_GET_SELF(callable), - stack_pointer, + args, total_args); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); /* Free the arguments. */ for (int i = 0; i < total_args; i++) { - Py_DECREF(stack_pointer[i]); + Py_DECREF(args[i]); } - STACK_SHRINK(2-is_meth); - PUSH(res); Py_DECREF(callable); - if (res == NULL) { + ERROR_IF(res == NULL, error); /* Not deopting because this doesn't mean our optimization was wrong. `res` can be NULL for valid reasons. Eg. getattr(x, 'invalid'). In those cases an exception is set, so we must handle it. */ - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_BUILTIN_FAST_WITH_KEYWORDS) { + inst(CALL_BUILTIN_FAST_WITH_KEYWORDS, (unused/1, unused/2, unused/1, method, callable, args[oparg] -- res)) { assert(cframe.use_tracing == 0); /* Builtin METH_FASTCALL | METH_KEYWORDS functions */ - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; - PyObject *callable = PEEK(total_args + 1); + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); DEOPT_IF(PyCFunction_GET_FLAGS(callable) != (METH_FASTCALL | METH_KEYWORDS), CALL); STAT_INC(CALL, hit); - STACK_SHRINK(total_args); /* res = func(self, args, nargs, kwnames) */ _PyCFunctionFastWithKeywords cfunc = (_PyCFunctionFastWithKeywords)(void(*)(void)) PyCFunction_GET_FUNCTION(callable); - PyObject *res = cfunc( + res = cfunc( PyCFunction_GET_SELF(callable), - stack_pointer, + args, total_args - KWNAMES_LEN(), kwnames ); @@ -2729,117 +2715,109 @@ dummy_func( /* Free the arguments. */ for (int i = 0; i < total_args; i++) { - Py_DECREF(stack_pointer[i]); + Py_DECREF(args[i]); } - STACK_SHRINK(2-is_meth); - PUSH(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_LEN) { + inst(CALL_NO_KW_LEN, (unused/1, unused/2, unused/1, method, callable, args[oparg] -- res)) { assert(cframe.use_tracing == 0); assert(kwnames == NULL); /* len(o) */ - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(total_args != 1, CALL); - PyObject *callable = PEEK(total_args + 1); PyInterpreterState *interp = _PyInterpreterState_GET(); DEOPT_IF(callable != interp->callable_cache.len, CALL); STAT_INC(CALL, hit); - PyObject *arg = TOP(); + PyObject *arg = args[0]; Py_ssize_t len_i = PyObject_Length(arg); if (len_i < 0) { goto error; } - PyObject *res = PyLong_FromSsize_t(len_i); + res = PyLong_FromSsize_t(len_i); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); - STACK_SHRINK(2-is_meth); - SET_TOP(res); Py_DECREF(callable); Py_DECREF(arg); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_ISINSTANCE) { + inst(CALL_NO_KW_ISINSTANCE, (unused/1, unused/2, unused/1, method, callable, args[oparg] -- res)) { assert(cframe.use_tracing == 0); assert(kwnames == NULL); /* isinstance(o, o2) */ - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; - PyObject *callable = PEEK(total_args + 1); + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(total_args != 2, CALL); PyInterpreterState *interp = _PyInterpreterState_GET(); DEOPT_IF(callable != interp->callable_cache.isinstance, CALL); STAT_INC(CALL, hit); - PyObject *cls = POP(); - PyObject *inst = TOP(); + PyObject *cls = args[1]; + PyObject *inst = args[0]; int retval = PyObject_IsInstance(inst, cls); if (retval < 0) { - Py_DECREF(cls); goto error; } - PyObject *res = PyBool_FromLong(retval); + res = PyBool_FromLong(retval); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); - STACK_SHRINK(2-is_meth); - SET_TOP(res); Py_DECREF(inst); Py_DECREF(cls); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_LIST_APPEND) { + // This is secretly a super-instruction + inst(CALL_NO_KW_LIST_APPEND, (unused/1, unused/2, unused/1, method, self, args[oparg] -- unused)) { assert(cframe.use_tracing == 0); assert(kwnames == NULL); assert(oparg == 1); - PyObject *callable = PEEK(3); + assert(method != NULL); PyInterpreterState *interp = _PyInterpreterState_GET(); - DEOPT_IF(callable != interp->callable_cache.list_append, CALL); - PyObject *list = SECOND(); - DEOPT_IF(!PyList_Check(list), CALL); + DEOPT_IF(method != interp->callable_cache.list_append, CALL); + DEOPT_IF(!PyList_Check(self), CALL); STAT_INC(CALL, hit); - PyObject *arg = POP(); - if (_PyList_AppendTakeRef((PyListObject *)list, arg) < 0) { - goto error; + if (_PyList_AppendTakeRef((PyListObject *)self, args[0]) < 0) { + goto pop_1_error; // Since arg is DECREF'ed already } - STACK_SHRINK(2); - Py_DECREF(list); - Py_DECREF(callable); + Py_DECREF(self); + Py_DECREF(method); + STACK_SHRINK(3); // CALL + POP_TOP JUMPBY(INLINE_CACHE_ENTRIES_CALL + 1); assert(_Py_OPCODE(next_instr[-1]) == POP_TOP); + DISPATCH(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_METHOD_DESCRIPTOR_O) { + inst(CALL_NO_KW_METHOD_DESCRIPTOR_O, (unused/1, unused/2, unused/1, method, unused, args[oparg] -- res)) { assert(kwnames == NULL); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + args--; + total_args++; + } PyMethodDescrObject *callable = (PyMethodDescrObject *)PEEK(total_args + 1); DEOPT_IF(total_args != 2, CALL); DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), CALL); PyMethodDef *meth = callable->d_method; DEOPT_IF(meth->ml_flags != METH_O, CALL); - PyObject *arg = TOP(); - PyObject *self = SECOND(); + PyObject *arg = args[1]; + PyObject *self = args[0]; DEOPT_IF(!Py_IS_TYPE(self, callable->d_common.d_type), CALL); STAT_INC(CALL, hit); PyCFunction cfunc = meth->ml_meth; @@ -2848,69 +2826,62 @@ dummy_func( if (_Py_EnterRecursiveCallTstate(tstate, " while calling a Python object")) { goto error; } - PyObject *res = _PyCFunction_TrampolineCall(cfunc, self, arg); + res = _PyCFunction_TrampolineCall(cfunc, self, arg); _Py_LeaveRecursiveCallTstate(tstate); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); Py_DECREF(self); Py_DECREF(arg); - STACK_SHRINK(oparg + 1); - SET_TOP(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS) { - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + inst(CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS, (unused/1, unused/2, unused/1, method, unused, args[oparg] -- res)) { + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + args--; + total_args++; + } PyMethodDescrObject *callable = (PyMethodDescrObject *)PEEK(total_args + 1); DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), CALL); PyMethodDef *meth = callable->d_method; DEOPT_IF(meth->ml_flags != (METH_FASTCALL|METH_KEYWORDS), CALL); PyTypeObject *d_type = callable->d_common.d_type; - PyObject *self = PEEK(total_args); + PyObject *self = args[0]; DEOPT_IF(!Py_IS_TYPE(self, d_type), CALL); STAT_INC(CALL, hit); - int nargs = total_args-1; - STACK_SHRINK(nargs); + int nargs = total_args - 1; _PyCFunctionFastWithKeywords cfunc = (_PyCFunctionFastWithKeywords)(void(*)(void))meth->ml_meth; - PyObject *res = cfunc(self, stack_pointer, nargs - KWNAMES_LEN(), - kwnames); + res = cfunc(self, args + 1, nargs - KWNAMES_LEN(), kwnames); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); kwnames = NULL; /* Free the arguments. */ - for (int i = 0; i < nargs; i++) { - Py_DECREF(stack_pointer[i]); + for (int i = 0; i < total_args; i++) { + Py_DECREF(args[i]); } - Py_DECREF(self); - STACK_SHRINK(2-is_meth); - SET_TOP(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS) { + inst(CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS, (unused/1, unused/2, unused/1, method, unused, args[oparg] -- res)) { assert(kwnames == NULL); assert(oparg == 0 || oparg == 1); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + args--; + total_args++; + } DEOPT_IF(total_args != 1, CALL); PyMethodDescrObject *callable = (PyMethodDescrObject *)SECOND(); DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), CALL); PyMethodDef *meth = callable->d_method; - PyObject *self = TOP(); + PyObject *self = args[0]; DEOPT_IF(!Py_IS_TYPE(self, callable->d_common.d_type), CALL); DEOPT_IF(meth->ml_flags != METH_NOARGS, CALL); STAT_INC(CALL, hit); @@ -2920,75 +2891,61 @@ dummy_func( if (_Py_EnterRecursiveCallTstate(tstate, " while calling a Python object")) { goto error; } - PyObject *res = _PyCFunction_TrampolineCall(cfunc, self, NULL); + res = _PyCFunction_TrampolineCall(cfunc, self, NULL); _Py_LeaveRecursiveCallTstate(tstate); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); Py_DECREF(self); - STACK_SHRINK(oparg + 1); - SET_TOP(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // stack effect: (__0, __array[oparg] -- ) - inst(CALL_NO_KW_METHOD_DESCRIPTOR_FAST) { + inst(CALL_NO_KW_METHOD_DESCRIPTOR_FAST, (unused/1, unused/2, unused/1, method, unused, args[oparg] -- res)) { assert(kwnames == NULL); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + args--; + total_args++; + } PyMethodDescrObject *callable = (PyMethodDescrObject *)PEEK(total_args + 1); /* Builtin METH_FASTCALL methods, without keywords */ DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), CALL); PyMethodDef *meth = callable->d_method; DEOPT_IF(meth->ml_flags != METH_FASTCALL, CALL); - PyObject *self = PEEK(total_args); + PyObject *self = args[0]; DEOPT_IF(!Py_IS_TYPE(self, callable->d_common.d_type), CALL); STAT_INC(CALL, hit); _PyCFunctionFast cfunc = (_PyCFunctionFast)(void(*)(void))meth->ml_meth; - int nargs = total_args-1; - STACK_SHRINK(nargs); - PyObject *res = cfunc(self, stack_pointer, nargs); + int nargs = total_args - 1; + res = cfunc(self, args + 1, nargs); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); /* Clear the stack of the arguments. */ - for (int i = 0; i < nargs; i++) { - Py_DECREF(stack_pointer[i]); + for (int i = 0; i < total_args; i++) { + Py_DECREF(args[i]); } - Py_DECREF(self); - STACK_SHRINK(2-is_meth); - SET_TOP(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + ERROR_IF(res == NULL, error); CHECK_EVAL_BREAKER(); } - // error: CALL_FUNCTION_EX has irregular stack effect - inst(CALL_FUNCTION_EX) { - PyObject *func, *callargs, *kwargs = NULL, *result; - if (oparg & 0x01) { - kwargs = POP(); + inst(CALL_FUNCTION_EX, (unused, func, callargs, kwargs if (oparg & 1) -- result)) { + if (oparg & 1) { // DICT_MERGE is called before this opcode if there are kwargs. // It converts all dict subtypes in kwargs into regular dicts. assert(PyDict_CheckExact(kwargs)); } - callargs = POP(); - func = TOP(); if (!PyTuple_CheckExact(callargs)) { if (check_args_iterable(tstate, func, callargs) < 0) { - Py_DECREF(callargs); goto error; } - Py_SETREF(callargs, PySequence_Tuple(callargs)); - if (callargs == NULL) { + PyObject *tuple = PySequence_Tuple(callargs); + if (tuple == NULL) { goto error; } + Py_SETREF(callargs, tuple); } assert(PyTuple_CheckExact(callargs)); @@ -2997,49 +2954,47 @@ dummy_func( Py_DECREF(callargs); Py_XDECREF(kwargs); - STACK_SHRINK(1); - assert(TOP() == NULL); - SET_TOP(result); - if (result == NULL) { - goto error; - } + assert(PEEK(3 + (oparg & 1)) == NULL); + ERROR_IF(result == NULL, error); CHECK_EVAL_BREAKER(); } - // error: MAKE_FUNCTION has irregular stack effect - inst(MAKE_FUNCTION) { - PyObject *codeobj = POP(); - PyFunctionObject *func = (PyFunctionObject *) + inst(MAKE_FUNCTION, (defaults if (oparg & 0x01), + kwdefaults if (oparg & 0x02), + annotations if (oparg & 0x04), + closure if (oparg & 0x08), + codeobj -- func)) { + + PyFunctionObject *func_obj = (PyFunctionObject *) PyFunction_New(codeobj, GLOBALS()); Py_DECREF(codeobj); - if (func == NULL) { + if (func_obj == NULL) { goto error; } if (oparg & 0x08) { - assert(PyTuple_CheckExact(TOP())); - func->func_closure = POP(); + assert(PyTuple_CheckExact(closure)); + func_obj->func_closure = closure; } if (oparg & 0x04) { - assert(PyTuple_CheckExact(TOP())); - func->func_annotations = POP(); + assert(PyTuple_CheckExact(annotations)); + func_obj->func_annotations = annotations; } if (oparg & 0x02) { - assert(PyDict_CheckExact(TOP())); - func->func_kwdefaults = POP(); + assert(PyDict_CheckExact(kwdefaults)); + func_obj->func_kwdefaults = kwdefaults; } if (oparg & 0x01) { - assert(PyTuple_CheckExact(TOP())); - func->func_defaults = POP(); + assert(PyTuple_CheckExact(defaults)); + func_obj->func_defaults = defaults; } - func->func_version = ((PyCodeObject *)codeobj)->co_version; - PUSH((PyObject *)func); + func_obj->func_version = ((PyCodeObject *)codeobj)->co_version; + func = (PyObject *)func_obj; } - // stack effect: ( -- ) - inst(RETURN_GENERATOR) { + inst(RETURN_GENERATOR, (--)) { assert(PyFunction_Check(frame->f_funcobj)); PyFunctionObject *func = (PyFunctionObject *)frame->f_funcobj; PyGenObject *gen = (PyGenObject *)_Py_MakeCoro(func); @@ -3062,36 +3017,18 @@ dummy_func( goto resume_frame; } - // error: BUILD_SLICE has irregular stack effect - inst(BUILD_SLICE) { - PyObject *start, *stop, *step, *slice; - if (oparg == 3) - step = POP(); - else - step = NULL; - stop = POP(); - start = TOP(); + inst(BUILD_SLICE, (start, stop, step if (oparg == 3) -- slice)) { slice = PySlice_New(start, stop, step); Py_DECREF(start); Py_DECREF(stop); Py_XDECREF(step); - SET_TOP(slice); - if (slice == NULL) - goto error; + ERROR_IF(slice == NULL, error); } - // error: FORMAT_VALUE has irregular stack effect - inst(FORMAT_VALUE) { + inst(FORMAT_VALUE, (value, fmt_spec if ((oparg & FVS_MASK) == FVS_HAVE_SPEC) -- result)) { /* Handles f-string value formatting. */ - PyObject *result; - PyObject *fmt_spec; - PyObject *value; PyObject *(*conv_fn)(PyObject *); int which_conversion = oparg & FVC_MASK; - int have_fmt_spec = (oparg & FVS_MASK) == FVS_HAVE_SPEC; - - fmt_spec = have_fmt_spec ? POP() : NULL; - value = POP(); /* See if any conversion is specified. */ switch (which_conversion) { @@ -3114,7 +3051,7 @@ dummy_func( Py_DECREF(value); if (result == NULL) { Py_XDECREF(fmt_spec); - goto error; + ERROR_IF(true, error); } value = result; } @@ -3132,19 +3069,13 @@ dummy_func( result = PyObject_Format(value, fmt_spec); Py_DECREF(value); Py_XDECREF(fmt_spec); - if (result == NULL) { - goto error; - } + ERROR_IF(result == NULL, error); } - - PUSH(result); } - // stack effect: ( -- __0) - inst(COPY) { - assert(oparg != 0); - PyObject *peek = PEEK(oparg); - PUSH(Py_NewRef(peek)); + inst(COPY, (bottom, unused[oparg-1] -- bottom, unused[oparg-1], top)) { + assert(oparg > 0); + top = Py_NewRef(bottom); } inst(BINARY_OP, (unused/1, lhs, rhs -- res)) { @@ -3168,12 +3099,9 @@ dummy_func( ERROR_IF(res == NULL, error); } - // stack effect: ( -- ) - inst(SWAP) { - assert(oparg != 0); - PyObject *top = TOP(); - SET_TOP(PEEK(oparg)); - PEEK(oparg) = top; + inst(SWAP, (bottom, unused[oparg-2], top -- + top, unused[oparg-2], bottom)) { + assert(oparg >= 2); } inst(EXTENDED_ARG, (--)) { @@ -3207,21 +3135,4 @@ dummy_func( // Future families go below this point // -family(call, INLINE_CACHE_ENTRIES_CALL) = { - CALL, CALL_PY_EXACT_ARGS, - CALL_PY_WITH_DEFAULTS, CALL_BOUND_METHOD_EXACT_ARGS, CALL_BUILTIN_CLASS, - CALL_BUILTIN_FAST_WITH_KEYWORDS, CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS, CALL_NO_KW_BUILTIN_FAST, - CALL_NO_KW_BUILTIN_O, CALL_NO_KW_ISINSTANCE, CALL_NO_KW_LEN, - CALL_NO_KW_LIST_APPEND, CALL_NO_KW_METHOD_DESCRIPTOR_FAST, CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS, - CALL_NO_KW_METHOD_DESCRIPTOR_O, CALL_NO_KW_STR_1, CALL_NO_KW_TUPLE_1, - CALL_NO_KW_TYPE_1 }; -family(for_iter, INLINE_CACHE_ENTRIES_FOR_ITER) = { - FOR_ITER, FOR_ITER_LIST, - FOR_ITER_RANGE }; -family(load_global, INLINE_CACHE_ENTRIES_LOAD_GLOBAL) = { - LOAD_GLOBAL, LOAD_GLOBAL_BUILTIN, - LOAD_GLOBAL_MODULE }; family(store_fast) = { STORE_FAST, STORE_FAST__LOAD_FAST, STORE_FAST__STORE_FAST }; -family(unpack_sequence, INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE) = { - UNPACK_SEQUENCE, UNPACK_SEQUENCE_LIST, - UNPACK_SEQUENCE_TUPLE, UNPACK_SEQUENCE_TWO_TUPLE }; diff --git a/Python/ceval.c b/Python/ceval.c index 2e6fed580dede4..611d62b0eba9af 100644 --- a/Python/ceval.c +++ b/Python/ceval.c @@ -688,12 +688,6 @@ static inline void _Py_LeaveRecursiveCallPy(PyThreadState *tstate) { } -// GH-89279: Must be a macro to be sure it's inlined by MSVC. -#define is_method(stack_pointer, args) (PEEK((args)+2) != NULL) - -#define KWNAMES_LEN() \ - (kwnames == NULL ? 0 : ((int)PyTuple_GET_SIZE(kwnames))) - /* Disable unused label warnings. They are handy for debugging, even if computed gotos aren't used. */ @@ -1761,9 +1755,6 @@ PyEval_EvalCodeEx(PyObject *_co, PyObject *globals, PyObject *locals, } allargs = newargs; } - for (int i = 0; i < kwcount; i++) { - PyTuple_SET_ITEM(kwnames, i, Py_NewRef(kws[2*i])); - } PyFrameConstructor constr = { .fc_globals = globals, .fc_builtins = builtins, @@ -2905,13 +2896,13 @@ format_kwargs_error(PyThreadState *tstate, PyObject *func, PyObject *kwargs) } } else if (_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) { - PyObject *exc, *val, *tb; - _PyErr_Fetch(tstate, &exc, &val, &tb); - if (val && PyTuple_Check(val) && PyTuple_GET_SIZE(val) == 1) { + PyObject *exc = _PyErr_GetRaisedException(tstate); + PyObject *args = ((PyBaseExceptionObject *)exc)->args; + if (exc && PyTuple_Check(args) && PyTuple_GET_SIZE(args) == 1) { _PyErr_Clear(tstate); PyObject *funcstr = _PyObject_FunctionStr(func); if (funcstr != NULL) { - PyObject *key = PyTuple_GET_ITEM(val, 0); + PyObject *key = PyTuple_GET_ITEM(args, 0); _PyErr_Format( tstate, PyExc_TypeError, "%U got multiple values for keyword argument '%S'", @@ -2919,11 +2910,9 @@ format_kwargs_error(PyThreadState *tstate, PyObject *func, PyObject *kwargs) Py_DECREF(funcstr); } Py_XDECREF(exc); - Py_XDECREF(val); - Py_XDECREF(tb); } else { - _PyErr_Restore(tstate, exc, val, tb); + _PyErr_SetRaisedException(tstate, exc); } } } diff --git a/Python/ceval_macros.h b/Python/ceval_macros.h index d7a8f0beeec872..691bf8e1caae95 100644 --- a/Python/ceval_macros.h +++ b/Python/ceval_macros.h @@ -347,3 +347,6 @@ GETITEM(PyObject *v, Py_ssize_t i) { } while (0); #define NAME_ERROR_MSG "name '%.200s' is not defined" + +#define KWNAMES_LEN() \ + (kwnames == NULL ? 0 : ((int)PyTuple_GET_SIZE(kwnames))) diff --git a/Python/compile.c b/Python/compile.c index 5787155dbb7166..5d9be7fbd6d762 100644 --- a/Python/compile.c +++ b/Python/compile.c @@ -124,6 +124,7 @@ #define IS_SCOPE_EXIT_OPCODE(opcode) \ ((opcode) == RETURN_VALUE || \ + (opcode) == RETURN_CONST || \ (opcode) == RAISE_VARARGS || \ (opcode) == RERAISE) @@ -354,7 +355,7 @@ basicblock_last_instr(const basicblock *b) { static inline int basicblock_returns(const basicblock *b) { struct instr *last = basicblock_last_instr(b); - return last && last->i_opcode == RETURN_VALUE; + return last && (last->i_opcode == RETURN_VALUE || last->i_opcode == RETURN_CONST); } static inline int @@ -1075,133 +1076,49 @@ basicblock_next_instr(basicblock *b) static int stack_effect(int opcode, int oparg, int jump) { - switch (opcode) { - case NOP: - case EXTENDED_ARG: - case RESUME: - case CACHE: - return 0; - - /* Stack manipulation */ - case POP_TOP: - return -1; - case SWAP: - return 0; - case END_FOR: - return -2; - - /* Unary operators */ - case UNARY_NEGATIVE: - case UNARY_NOT: - case UNARY_INVERT: - return 0; - - case SET_ADD: - case LIST_APPEND: - return -1; - case MAP_ADD: - return -2; - - case BINARY_SUBSCR: - return -1; - case BINARY_SLICE: - return -2; - case STORE_SUBSCR: - return -3; - case STORE_SLICE: - return -4; - case DELETE_SUBSCR: - return -2; - - case GET_ITER: - return 0; - - case LOAD_BUILD_CLASS: - return 1; + if (0 <= opcode && opcode <= MAX_REAL_OPCODE) { + if (_PyOpcode_Deopt[opcode] != opcode) { + // Specialized instructions are not supported. + return PY_INVALID_STACK_EFFECT; + } + int popped, pushed; + if (jump > 0) { + popped = _PyOpcode_num_popped(opcode, oparg, true); + pushed = _PyOpcode_num_pushed(opcode, oparg, true); + } + else { + popped = _PyOpcode_num_popped(opcode, oparg, false); + pushed = _PyOpcode_num_pushed(opcode, oparg, false); + } + if (popped < 0 || pushed < 0) { + return PY_INVALID_STACK_EFFECT; + } + if (jump >= 0) { + return pushed - popped; + } + if (jump < 0) { + // Compute max(pushed - popped, alt_pushed - alt_popped) + int alt_popped = _PyOpcode_num_popped(opcode, oparg, true); + int alt_pushed = _PyOpcode_num_pushed(opcode, oparg, true); + if (alt_popped < 0 || alt_pushed < 0) { + return PY_INVALID_STACK_EFFECT; + } + int diff = pushed - popped; + int alt_diff = alt_pushed - alt_popped; + if (alt_diff > diff) { + return alt_diff; + } + return diff; + } + } - case RETURN_VALUE: - return -1; - case SETUP_ANNOTATIONS: - return 0; - case YIELD_VALUE: - return 0; + // Pseudo ops + switch (opcode) { case POP_BLOCK: - return 0; - case POP_EXCEPT: - return -1; - - case STORE_NAME: - return -1; - case DELETE_NAME: - return 0; - case UNPACK_SEQUENCE: - return oparg-1; - case UNPACK_EX: - return (oparg&0xFF) + (oparg>>8); - case FOR_ITER: - return 1; - case SEND: - return jump > 0 ? -1 : 0; - case STORE_ATTR: - return -2; - case DELETE_ATTR: - return -1; - case STORE_GLOBAL: - return -1; - case DELETE_GLOBAL: - return 0; - case LOAD_CONST: - return 1; - case LOAD_NAME: - return 1; - case BUILD_TUPLE: - case BUILD_LIST: - case BUILD_SET: - case BUILD_STRING: - return 1-oparg; - case BUILD_MAP: - return 1 - 2*oparg; - case BUILD_CONST_KEY_MAP: - return -oparg; - case LOAD_ATTR: - return (oparg & 1); - case COMPARE_OP: - case IS_OP: - case CONTAINS_OP: - return -1; - case CHECK_EXC_MATCH: - return 0; - case CHECK_EG_MATCH: - return 0; - case IMPORT_NAME: - return -1; - case IMPORT_FROM: - return 1; - - /* Jumps */ - case JUMP_FORWARD: - case JUMP_BACKWARD: case JUMP: - case JUMP_BACKWARD_NO_INTERRUPT: case JUMP_NO_INTERRUPT: return 0; - case JUMP_IF_TRUE_OR_POP: - case JUMP_IF_FALSE_OR_POP: - return jump ? 0 : -1; - - case POP_JUMP_IF_NONE: - case POP_JUMP_IF_NOT_NONE: - case POP_JUMP_IF_FALSE: - case POP_JUMP_IF_TRUE: - return -1; - - case COMPARE_AND_BRANCH: - return -2; - - case LOAD_GLOBAL: - return (oparg & 1) + 1; - /* Exception handling pseudo-instructions */ case SETUP_FINALLY: /* 0 in the normal flow. @@ -1217,111 +1134,15 @@ stack_effect(int opcode, int oparg, int jump) * of __(a)enter__ and push 2 values before jumping to the handler * if an exception be raised. */ return jump ? 1 : 0; - case PREP_RERAISE_STAR: - return -1; - case RERAISE: - return -1; - case PUSH_EXC_INFO: - return 1; - - case WITH_EXCEPT_START: - return 1; - case LOAD_FAST: - case LOAD_FAST_CHECK: - case LOAD_FAST_AND_CLEAR: - return 1; - case STORE_FAST: case STORE_FAST_MAYBE_NULL: return -1; - case DELETE_FAST: - return 0; - - case RETURN_GENERATOR: - return 0; - - case RAISE_VARARGS: - return -oparg; - - /* Functions and calls */ - case KW_NAMES: - return 0; - case CALL: - return -1-oparg; - case CALL_INTRINSIC_1: - return 0; - case CALL_FUNCTION_EX: - return -2 - ((oparg & 0x01) != 0); - case MAKE_FUNCTION: - return 0 - ((oparg & 0x01) != 0) - ((oparg & 0x02) != 0) - - ((oparg & 0x04) != 0) - ((oparg & 0x08) != 0); - case BUILD_SLICE: - if (oparg == 3) - return -2; - else - return -1; - - /* Closures */ - case MAKE_CELL: - case COPY_FREE_VARS: - return 0; - case LOAD_CLOSURE: - return 1; - case LOAD_DEREF: - case LOAD_CLASSDEREF: - return 1; - case STORE_DEREF: - return -1; - case DELETE_DEREF: - return 0; - - /* Iterators and generators */ - case GET_AWAITABLE: - return 0; - - case BEFORE_ASYNC_WITH: - case BEFORE_WITH: - return 1; - case GET_AITER: - return 0; - case GET_ANEXT: - return 1; - case GET_YIELD_FROM_ITER: - return 0; - case END_ASYNC_FOR: - return -2; - case CLEANUP_THROW: - return -2; - case FORMAT_VALUE: - /* If there's a fmt_spec on the stack, we go from 2->1, - else 1->1. */ - return (oparg & FVS_MASK) == FVS_HAVE_SPEC ? -1 : 0; case LOAD_METHOD: return 1; - case LOAD_ASSERTION_ERROR: - return 1; - case LIST_EXTEND: - case SET_UPDATE: - case DICT_MERGE: - case DICT_UPDATE: - return -1; - case MATCH_CLASS: - return -2; - case GET_LEN: - case MATCH_MAPPING: - case MATCH_SEQUENCE: - case MATCH_KEYS: - return 1; - case COPY: - case PUSH_NULL: - return 1; - case BINARY_OP: - return -1; - case INTERPRETER_EXIT: - return -1; default: return PY_INVALID_STACK_EFFECT; } + return PY_INVALID_STACK_EFFECT; /* not reachable */ } @@ -9442,6 +9263,10 @@ optimize_basic_block(PyObject *const_cache, basicblock *bb, PyObject *consts) } Py_DECREF(cnt); break; + case RETURN_VALUE: + INSTR_SET_OP1(inst, RETURN_CONST, oparg); + INSTR_SET_OP0(&bb->b_instr[i + 1], NOP); + break; } break; } @@ -9904,9 +9729,7 @@ remove_unused_consts(basicblock *entryblock, PyObject *consts) /* mark used consts */ for (basicblock *b = entryblock; b != NULL; b = b->b_next) { for (int i = 0; i < b->b_iused; i++) { - if (b->b_instr[i].i_opcode == LOAD_CONST || - b->b_instr[i].i_opcode == KW_NAMES) { - + if (HAS_CONST(b->b_instr[i].i_opcode)) { int index = b->b_instr[i].i_oparg; index_map[index] = index; } @@ -9961,9 +9784,7 @@ remove_unused_consts(basicblock *entryblock, PyObject *consts) for (basicblock *b = entryblock; b != NULL; b = b->b_next) { for (int i = 0; i < b->b_iused; i++) { - if (b->b_instr[i].i_opcode == LOAD_CONST || - b->b_instr[i].i_opcode == KW_NAMES) { - + if (HAS_CONST(b->b_instr[i].i_opcode)) { int index = b->b_instr[i].i_oparg; assert(reverse_index_map[index] >= 0); assert(reverse_index_map[index] < n_used_consts); diff --git a/Python/dynload_win.c b/Python/dynload_win.c index c03bc5602bffee..7bd04d573df4ad 100644 --- a/Python/dynload_win.c +++ b/Python/dynload_win.c @@ -125,14 +125,15 @@ static char *GetPythonImport (HINSTANCE hModule) !strncmp(import_name,"python",6)) { char *pch; -#ifndef _DEBUG - /* In a release version, don't claim that python3.dll is - a Python DLL. */ + /* Don't claim that python3.dll is a Python DLL. */ +#ifdef _DEBUG + if (strcmp(import_name, "python3_d.dll") == 0) { +#else if (strcmp(import_name, "python3.dll") == 0) { +#endif import_data += 20; continue; } -#endif /* Ensure python prefix is followed only by numbers to the end of the basename */ diff --git a/Python/errors.c b/Python/errors.c index 05ef62246ec0a4..f573bed3d63ef0 100644 --- a/Python/errors.c +++ b/Python/errors.c @@ -27,32 +27,84 @@ static PyObject * _PyErr_FormatV(PyThreadState *tstate, PyObject *exception, const char *format, va_list vargs); - void -_PyErr_Restore(PyThreadState *tstate, PyObject *type, PyObject *value, - PyObject *traceback) +_PyErr_SetRaisedException(PyThreadState *tstate, PyObject *exc) { - PyObject *oldtype, *oldvalue, *oldtraceback; + PyObject *old_exc = tstate->current_exception; + tstate->current_exception = exc; + Py_XDECREF(old_exc); +} - if (traceback != NULL && !PyTraceBack_Check(traceback)) { - /* XXX Should never happen -- fatal error instead? */ - /* Well, it could be None. */ - Py_SETREF(traceback, NULL); +static PyObject* +_PyErr_CreateException(PyObject *exception_type, PyObject *value) +{ + PyObject *exc; + + if (value == NULL || value == Py_None) { + exc = _PyObject_CallNoArgs(exception_type); + } + else if (PyTuple_Check(value)) { + exc = PyObject_Call(exception_type, value, NULL); + } + else { + exc = PyObject_CallOneArg(exception_type, value); } - /* Save these in locals to safeguard against recursive - invocation through Py_XDECREF */ - oldtype = tstate->curexc_type; - oldvalue = tstate->curexc_value; - oldtraceback = tstate->curexc_traceback; + if (exc != NULL && !PyExceptionInstance_Check(exc)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %s", + exception_type, Py_TYPE(exc)->tp_name); + Py_CLEAR(exc); + } - tstate->curexc_type = type; - tstate->curexc_value = value; - tstate->curexc_traceback = traceback; + return exc; +} - Py_XDECREF(oldtype); - Py_XDECREF(oldvalue); - Py_XDECREF(oldtraceback); +void +_PyErr_Restore(PyThreadState *tstate, PyObject *type, PyObject *value, + PyObject *traceback) +{ + if (type == NULL) { + assert(value == NULL); + assert(traceback == NULL); + _PyErr_SetRaisedException(tstate, NULL); + return; + } + assert(PyExceptionClass_Check(type)); + if (value != NULL && type == (PyObject *)Py_TYPE(value)) { + /* Already normalized */ + assert(((PyBaseExceptionObject *)value)->traceback != Py_None); + } + else { + PyObject *exc = _PyErr_CreateException(type, value); + Py_XDECREF(value); + if (exc == NULL) { + Py_DECREF(type); + Py_XDECREF(traceback); + return; + } + value = exc; + } + assert(PyExceptionInstance_Check(value)); + if (traceback != NULL && !PyTraceBack_Check(traceback)) { + if (traceback == Py_None) { + Py_DECREF(Py_None); + traceback = NULL; + } + else { + PyErr_SetString(PyExc_TypeError, "traceback must be a Traceback or None"); + Py_XDECREF(value); + Py_DECREF(type); + Py_XDECREF(traceback); + return; + } + } + PyObject *old_traceback = ((PyBaseExceptionObject *)value)->traceback; + ((PyBaseExceptionObject *)value)->traceback = traceback; + Py_XDECREF(old_traceback); + _PyErr_SetRaisedException(tstate, value); + Py_DECREF(type); } void @@ -62,6 +114,12 @@ PyErr_Restore(PyObject *type, PyObject *value, PyObject *traceback) _PyErr_Restore(tstate, type, value, traceback); } +void +PyErr_SetRaisedException(PyObject *exc) +{ + PyThreadState *tstate = _PyThreadState_GET(); + _PyErr_SetRaisedException(tstate, exc); +} _PyErr_StackItem * _PyErr_GetTopmostException(PyThreadState *tstate) @@ -77,32 +135,6 @@ _PyErr_GetTopmostException(PyThreadState *tstate) return exc_info; } -static PyObject* -_PyErr_CreateException(PyObject *exception_type, PyObject *value) -{ - PyObject *exc; - - if (value == NULL || value == Py_None) { - exc = _PyObject_CallNoArgs(exception_type); - } - else if (PyTuple_Check(value)) { - exc = PyObject_Call(exception_type, value, NULL); - } - else { - exc = PyObject_CallOneArg(exception_type, value); - } - - if (exc != NULL && !PyExceptionInstance_Check(exc)) { - PyErr_Format(PyExc_TypeError, - "calling %R should have returned an instance of " - "BaseException, not %s", - exception_type, Py_TYPE(exc)->tp_name); - Py_CLEAR(exc); - } - - return exc; -} - void _PyErr_SetObject(PyThreadState *tstate, PyObject *exception, PyObject *value) { @@ -117,30 +149,29 @@ _PyErr_SetObject(PyThreadState *tstate, PyObject *exception, PyObject *value) exception); return; } - Py_XINCREF(value); - exc_value = _PyErr_GetTopmostException(tstate)->exc_value; - if (exc_value != NULL && exc_value != Py_None) { - /* Implicit exception chaining */ - Py_INCREF(exc_value); - if (value == NULL || !PyExceptionInstance_Check(value)) { - /* We must normalize the value right now */ - PyObject *fixed_value; - - /* Issue #23571: functions must not be called with an - exception set */ - _PyErr_Clear(tstate); + /* Normalize the exception */ + if (value == NULL || (PyObject *)Py_TYPE(value) != exception) { + /* We must normalize the value right now */ + PyObject *fixed_value; - fixed_value = _PyErr_CreateException(exception, value); - Py_XDECREF(value); - if (fixed_value == NULL) { - Py_DECREF(exc_value); - return; - } + /* Issue #23571: functions must not be called with an + exception set */ + _PyErr_Clear(tstate); - value = fixed_value; + fixed_value = _PyErr_CreateException(exception, value); + Py_XDECREF(value); + if (fixed_value == NULL) { + return; } + value = fixed_value; + } + + exc_value = _PyErr_GetTopmostException(tstate)->exc_value; + if (exc_value != NULL && exc_value != Py_None) { + /* Implicit exception chaining */ + Py_INCREF(exc_value); /* Avoid creating new reference cycles through the context chain, while taking care not to hang on pre-existing ones. @@ -414,17 +445,34 @@ PyErr_NormalizeException(PyObject **exc, PyObject **val, PyObject **tb) } +PyObject * +_PyErr_GetRaisedException(PyThreadState *tstate) { + PyObject *exc = tstate->current_exception; + tstate->current_exception = NULL; + return exc; +} + +PyObject * +PyErr_GetRaisedException(void) +{ + PyThreadState *tstate = _PyThreadState_GET(); + return _PyErr_GetRaisedException(tstate); +} + void _PyErr_Fetch(PyThreadState *tstate, PyObject **p_type, PyObject **p_value, PyObject **p_traceback) { - *p_type = tstate->curexc_type; - *p_value = tstate->curexc_value; - *p_traceback = tstate->curexc_traceback; - - tstate->curexc_type = NULL; - tstate->curexc_value = NULL; - tstate->curexc_traceback = NULL; + PyObject *exc = _PyErr_GetRaisedException(tstate); + *p_value = exc; + if (exc == NULL) { + *p_type = NULL; + *p_traceback = NULL; + } + else { + *p_type = Py_NewRef(Py_TYPE(exc)); + *p_traceback = Py_XNewRef(((PyBaseExceptionObject *)exc)->traceback); + } } @@ -597,6 +645,28 @@ _PyErr_ChainExceptions(PyObject *typ, PyObject *val, PyObject *tb) } } +/* Like PyErr_SetRaisedException(), but if an exception is already set, + set the context associated with it. + + The caller is responsible for ensuring that this call won't create + any cycles in the exception context chain. */ +void +_PyErr_ChainExceptions1(PyObject *exc) +{ + if (exc == NULL) { + return; + } + PyThreadState *tstate = _PyThreadState_GET(); + if (_PyErr_Occurred(tstate)) { + PyObject *exc2 = _PyErr_GetRaisedException(tstate); + PyException_SetContext(exc2, exc); + _PyErr_SetRaisedException(tstate, exc2); + } + else { + _PyErr_SetRaisedException(tstate, exc); + } +} + /* Set the currently set exception's context to the given exception. If the provided exc_info is NULL, then the current Python thread state's @@ -706,19 +776,6 @@ PyErr_BadArgument(void) return 0; } -PyObject * -_PyErr_NoMemory(PyThreadState *tstate) -{ - if (Py_IS_TYPE(PyExc_MemoryError, NULL)) { - /* PyErr_NoMemory() has been called before PyExc_MemoryError has been - initialized by _PyExc_Init() */ - Py_FatalError("Out of memory and PyExc_MemoryError is not " - "initialized yet"); - } - _PyErr_SetNone(tstate, PyExc_MemoryError); - return NULL; -} - PyObject * PyErr_NoMemory(void) { diff --git a/Python/generated_cases.c.h b/Python/generated_cases.c.h index ba2f5a24754133..caa77547d26086 100644 --- a/Python/generated_cases.c.h +++ b/Python/generated_cases.c.h @@ -752,6 +752,23 @@ goto resume_frame; } + TARGET(RETURN_CONST) { + PyObject *retval = GETITEM(consts, oparg); + Py_INCREF(retval); + assert(EMPTY()); + _PyFrame_SetStackPointer(frame, stack_pointer); + TRACE_FUNCTION_EXIT(); + DTRACE_FUNCTION_EXIT(); + _Py_LeaveRecursiveCallPy(tstate); + assert(frame != &entry_frame); + // GH-99729: We need to unlink the frame *before* clearing it: + _PyInterpreterFrame *dying = frame; + frame = cframe.current_frame = dying->previous; + _PyEvalFrameClearAndPop(tstate, dying); + _PyFrame_StackPush(frame, retval); + goto resume_frame; + } + TARGET(GET_AITER) { PyObject *obj = PEEK(1); PyObject *iter; @@ -875,12 +892,12 @@ } TARGET(SEND) { + PyObject *v = PEEK(1); + PyObject *receiver = PEEK(2); + PyObject *retval; assert(frame != &entry_frame); - assert(STACK_LEVEL() >= 2); - PyObject *v = POP(); - PyObject *receiver = TOP(); + bool jump = false; PySendResult gen_status; - PyObject *retval; if (tstate->c_tracefunc == NULL) { gen_status = PyIter_Send(receiver, v, &retval); } else { @@ -905,22 +922,24 @@ gen_status = PYGEN_NEXT; } } - Py_DECREF(v); if (gen_status == PYGEN_ERROR) { assert(retval == NULL); goto error; } + Py_DECREF(v); if (gen_status == PYGEN_RETURN) { assert(retval != NULL); Py_DECREF(receiver); - SET_TOP(retval); JUMPBY(oparg); + jump = true; } else { assert(gen_status == PYGEN_NEXT); assert(retval != NULL); - PUSH(retval); } + STACK_SHRINK(1); + STACK_GROW(((!jump) ? 1 : 0)); + POKE(1, retval); DISPATCH(); } @@ -955,8 +974,11 @@ } TARGET(RERAISE) { + PyObject *exc = PEEK(1); + PyObject **values = &PEEK(1 + oparg); + assert(oparg >= 0 && oparg <= 2); if (oparg) { - PyObject *lasti = PEEK(oparg + 1); + PyObject *lasti = values[0]; if (PyLong_Check(lasti)) { frame->prev_instr = _PyCode_CODE(frame->f_code) + PyLong_AsLong(lasti); assert(!_PyErr_Occurred(tstate)); @@ -967,11 +989,11 @@ goto error; } } - PyObject *val = POP(); - assert(val && PyExceptionInstance_Check(val)); - PyObject *exc = Py_NewRef(PyExceptionInstance_Class(val)); - PyObject *tb = PyException_GetTraceback(val); - _PyErr_Restore(tstate, exc, val, tb); + assert(exc && PyExceptionInstance_Check(exc)); + Py_INCREF(exc); + PyObject *typ = Py_NewRef(PyExceptionInstance_Class(exc)); + PyObject *tb = PyException_GetTraceback(exc); + _PyErr_Restore(tstate, typ, exc, tb); goto exception_unwind; } @@ -1011,23 +1033,24 @@ } TARGET(CLEANUP_THROW) { + PyObject *exc_value = PEEK(1); + PyObject *last_sent_val = PEEK(2); + PyObject *sub_iter = PEEK(3); + PyObject *value; assert(throwflag); - PyObject *exc_value = TOP(); assert(exc_value && PyExceptionInstance_Check(exc_value)); if (PyErr_GivenExceptionMatches(exc_value, PyExc_StopIteration)) { - PyObject *value = ((PyStopIterationObject *)exc_value)->value; - Py_INCREF(value); - Py_DECREF(POP()); // The StopIteration. - Py_DECREF(POP()); // The last sent value. - Py_DECREF(POP()); // The delegated sub-iterator. - PUSH(value); + value = Py_NewRef(((PyStopIterationObject *)exc_value)->value); + Py_DECREF(sub_iter); + Py_DECREF(last_sent_val); + Py_DECREF(exc_value); } else { - PyObject *exc_type = Py_NewRef(Py_TYPE(exc_value)); - PyObject *exc_traceback = PyException_GetTraceback(exc_value); - _PyErr_Restore(tstate, exc_type, Py_NewRef(exc_value), exc_traceback); + _PyErr_SetRaisedException(tstate, Py_NewRef(exc_value)); goto exception_unwind; } + STACK_SHRINK(2); + POKE(1, value); DISPATCH(); } @@ -1110,11 +1133,12 @@ TARGET(UNPACK_SEQUENCE) { PREDICTED(UNPACK_SEQUENCE); + static_assert(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE == 1, "incorrect cache size"); + PyObject *seq = PEEK(1); #if ENABLE_SPECIALIZATION _PyUnpackSequenceCache *cache = (_PyUnpackSequenceCache *)next_instr; if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) { assert(cframe.use_tracing == 0); - PyObject *seq = TOP(); next_instr--; _Py_Specialize_UnpackSequence(seq, next_instr, oparg); DISPATCH_SAME_OPARG(); @@ -1122,70 +1146,74 @@ STAT_INC(UNPACK_SEQUENCE, deferred); DECREMENT_ADAPTIVE_COUNTER(cache->counter); #endif /* ENABLE_SPECIALIZATION */ - PyObject *seq = POP(); - PyObject **top = stack_pointer + oparg; - if (!unpack_iterable(tstate, seq, oparg, -1, top)) { - Py_DECREF(seq); - goto error; - } - STACK_GROW(oparg); + PyObject **top = stack_pointer + oparg - 1; + int res = unpack_iterable(tstate, seq, oparg, -1, top); Py_DECREF(seq); - JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE); + if (res == 0) goto pop_1_error; + STACK_SHRINK(1); + STACK_GROW(oparg); + JUMPBY(1); DISPATCH(); } TARGET(UNPACK_SEQUENCE_TWO_TUPLE) { - PyObject *seq = TOP(); + PyObject *seq = PEEK(1); + PyObject **values = stack_pointer - (1); DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE); DEOPT_IF(PyTuple_GET_SIZE(seq) != 2, UNPACK_SEQUENCE); + assert(oparg == 2); STAT_INC(UNPACK_SEQUENCE, hit); - SET_TOP(Py_NewRef(PyTuple_GET_ITEM(seq, 1))); - PUSH(Py_NewRef(PyTuple_GET_ITEM(seq, 0))); + values[0] = Py_NewRef(PyTuple_GET_ITEM(seq, 1)); + values[1] = Py_NewRef(PyTuple_GET_ITEM(seq, 0)); Py_DECREF(seq); - JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE); + STACK_SHRINK(1); + STACK_GROW(oparg); + JUMPBY(1); DISPATCH(); } TARGET(UNPACK_SEQUENCE_TUPLE) { - PyObject *seq = TOP(); + PyObject *seq = PEEK(1); + PyObject **values = stack_pointer - (1); DEOPT_IF(!PyTuple_CheckExact(seq), UNPACK_SEQUENCE); DEOPT_IF(PyTuple_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE); STAT_INC(UNPACK_SEQUENCE, hit); - STACK_SHRINK(1); PyObject **items = _PyTuple_ITEMS(seq); - while (oparg--) { - PUSH(Py_NewRef(items[oparg])); + for (int i = oparg; --i >= 0; ) { + *values++ = Py_NewRef(items[i]); } Py_DECREF(seq); - JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE); + STACK_SHRINK(1); + STACK_GROW(oparg); + JUMPBY(1); DISPATCH(); } TARGET(UNPACK_SEQUENCE_LIST) { - PyObject *seq = TOP(); + PyObject *seq = PEEK(1); + PyObject **values = stack_pointer - (1); DEOPT_IF(!PyList_CheckExact(seq), UNPACK_SEQUENCE); DEOPT_IF(PyList_GET_SIZE(seq) != oparg, UNPACK_SEQUENCE); STAT_INC(UNPACK_SEQUENCE, hit); - STACK_SHRINK(1); PyObject **items = _PyList_ITEMS(seq); - while (oparg--) { - PUSH(Py_NewRef(items[oparg])); + for (int i = oparg; --i >= 0; ) { + *values++ = Py_NewRef(items[i]); } Py_DECREF(seq); - JUMPBY(INLINE_CACHE_ENTRIES_UNPACK_SEQUENCE); + STACK_SHRINK(1); + STACK_GROW(oparg); + JUMPBY(1); DISPATCH(); } TARGET(UNPACK_EX) { + PyObject *seq = PEEK(1); int totalargs = 1 + (oparg & 0xFF) + (oparg >> 8); - PyObject *seq = POP(); - PyObject **top = stack_pointer + totalargs; - if (!unpack_iterable(tstate, seq, oparg & 0xFF, oparg >> 8, top)) { - Py_DECREF(seq); - goto error; - } - STACK_GROW(totalargs); + PyObject **top = stack_pointer + totalargs - 1; + int res = unpack_iterable(tstate, seq, oparg & 0xFF, oparg >> 8, top); Py_DECREF(seq); + if (res == 0) goto pop_1_error; + STACK_GROW((oparg & 0xFF) + (oparg >> 8)); DISPATCH(); } @@ -1321,6 +1349,9 @@ TARGET(LOAD_GLOBAL) { PREDICTED(LOAD_GLOBAL); + static_assert(INLINE_CACHE_ENTRIES_LOAD_GLOBAL == 5, "incorrect cache size"); + PyObject *null = NULL; + PyObject *v; #if ENABLE_SPECIALIZATION _PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr; if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) { @@ -1333,10 +1364,7 @@ STAT_INC(LOAD_GLOBAL, deferred); DECREMENT_ADAPTIVE_COUNTER(cache->counter); #endif /* ENABLE_SPECIALIZATION */ - int push_null = oparg & 1; - PEEK(0) = NULL; PyObject *name = GETITEM(names, oparg>>1); - PyObject *v; if (PyDict_CheckExact(GLOBALS()) && PyDict_CheckExact(BUILTINS())) { @@ -1350,7 +1378,7 @@ format_exc_check_arg(tstate, PyExc_NameError, NAME_ERROR_MSG, name); } - goto error; + if (true) goto error; } Py_INCREF(v); } @@ -1360,9 +1388,7 @@ /* namespace 1: globals */ v = PyObject_GetItem(GLOBALS(), name); if (v == NULL) { - if (!_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) { - goto error; - } + if (!_PyErr_ExceptionMatches(tstate, PyExc_KeyError)) goto error; _PyErr_Clear(tstate); /* namespace 2: builtins */ @@ -1373,58 +1399,68 @@ tstate, PyExc_NameError, NAME_ERROR_MSG, name); } - goto error; + if (true) goto error; } } } - /* Skip over inline cache */ - JUMPBY(INLINE_CACHE_ENTRIES_LOAD_GLOBAL); - STACK_GROW(push_null); - PUSH(v); + null = NULL; + STACK_GROW(1); + STACK_GROW(((oparg & 1) ? 1 : 0)); + POKE(1, v); + if (oparg & 1) { POKE(1 + ((oparg & 1) ? 1 : 0), null); } + JUMPBY(5); DISPATCH(); } TARGET(LOAD_GLOBAL_MODULE) { + PyObject *null = NULL; + PyObject *res; + uint16_t index = read_u16(&next_instr[1].cache); + uint32_t version = read_u32(&next_instr[2].cache); assert(cframe.use_tracing == 0); DEOPT_IF(!PyDict_CheckExact(GLOBALS()), LOAD_GLOBAL); PyDictObject *dict = (PyDictObject *)GLOBALS(); - _PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr; - uint32_t version = read_u32(cache->module_keys_version); DEOPT_IF(dict->ma_keys->dk_version != version, LOAD_GLOBAL); assert(DK_IS_UNICODE(dict->ma_keys)); PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(dict->ma_keys); - PyObject *res = entries[cache->index].me_value; + res = entries[index].me_value; DEOPT_IF(res == NULL, LOAD_GLOBAL); - int push_null = oparg & 1; - PEEK(0) = NULL; - JUMPBY(INLINE_CACHE_ENTRIES_LOAD_GLOBAL); + Py_INCREF(res); STAT_INC(LOAD_GLOBAL, hit); - STACK_GROW(push_null+1); - SET_TOP(Py_NewRef(res)); + null = NULL; + STACK_GROW(1); + STACK_GROW(((oparg & 1) ? 1 : 0)); + POKE(1, res); + if (oparg & 1) { POKE(1 + ((oparg & 1) ? 1 : 0), null); } + JUMPBY(5); DISPATCH(); } TARGET(LOAD_GLOBAL_BUILTIN) { + PyObject *null = NULL; + PyObject *res; + uint16_t index = read_u16(&next_instr[1].cache); + uint32_t mod_version = read_u32(&next_instr[2].cache); + uint16_t bltn_version = read_u16(&next_instr[4].cache); assert(cframe.use_tracing == 0); DEOPT_IF(!PyDict_CheckExact(GLOBALS()), LOAD_GLOBAL); DEOPT_IF(!PyDict_CheckExact(BUILTINS()), LOAD_GLOBAL); PyDictObject *mdict = (PyDictObject *)GLOBALS(); PyDictObject *bdict = (PyDictObject *)BUILTINS(); - _PyLoadGlobalCache *cache = (_PyLoadGlobalCache *)next_instr; - uint32_t mod_version = read_u32(cache->module_keys_version); - uint16_t bltn_version = cache->builtin_keys_version; DEOPT_IF(mdict->ma_keys->dk_version != mod_version, LOAD_GLOBAL); DEOPT_IF(bdict->ma_keys->dk_version != bltn_version, LOAD_GLOBAL); assert(DK_IS_UNICODE(bdict->ma_keys)); PyDictUnicodeEntry *entries = DK_UNICODE_ENTRIES(bdict->ma_keys); - PyObject *res = entries[cache->index].me_value; + res = entries[index].me_value; DEOPT_IF(res == NULL, LOAD_GLOBAL); - int push_null = oparg & 1; - PEEK(0) = NULL; - JUMPBY(INLINE_CACHE_ENTRIES_LOAD_GLOBAL); + Py_INCREF(res); STAT_INC(LOAD_GLOBAL, hit); - STACK_GROW(push_null+1); - SET_TOP(Py_NewRef(res)); + null = NULL; + STACK_GROW(1); + STACK_GROW(((oparg & 1) ? 1 : 0)); + POKE(1, res); + if (oparg & 1) { POKE(1 + ((oparg & 1) ? 1 : 0), null); } + JUMPBY(5); DISPATCH(); } @@ -2580,55 +2616,56 @@ } TARGET(GET_YIELD_FROM_ITER) { - /* before: [obj]; after [getiter(obj)] */ - PyObject *iterable = TOP(); + PyObject *iterable = PEEK(1); PyObject *iter; + /* before: [obj]; after [getiter(obj)] */ if (PyCoro_CheckExact(iterable)) { /* `iterable` is a coroutine */ if (!(frame->f_code->co_flags & (CO_COROUTINE | CO_ITERABLE_COROUTINE))) { /* and it is used in a 'yield from' expression of a regular generator. */ - Py_DECREF(iterable); - SET_TOP(NULL); _PyErr_SetString(tstate, PyExc_TypeError, "cannot 'yield from' a coroutine object " "in a non-coroutine generator"); goto error; } + iter = iterable; + } + else if (PyGen_CheckExact(iterable)) { + iter = iterable; } - else if (!PyGen_CheckExact(iterable)) { + else { /* `iterable` is not a generator. */ iter = PyObject_GetIter(iterable); - Py_DECREF(iterable); - SET_TOP(iter); - if (iter == NULL) + if (iter == NULL) { goto error; + } + Py_DECREF(iterable); } + POKE(1, iter); PREDICT(LOAD_CONST); DISPATCH(); } TARGET(FOR_ITER) { PREDICTED(FOR_ITER); + static_assert(INLINE_CACHE_ENTRIES_FOR_ITER == 1, "incorrect cache size"); + PyObject *iter = PEEK(1); + PyObject *next; #if ENABLE_SPECIALIZATION _PyForIterCache *cache = (_PyForIterCache *)next_instr; if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) { assert(cframe.use_tracing == 0); next_instr--; - _Py_Specialize_ForIter(TOP(), next_instr, oparg); + _Py_Specialize_ForIter(iter, next_instr, oparg); DISPATCH_SAME_OPARG(); } STAT_INC(FOR_ITER, deferred); DECREMENT_ADAPTIVE_COUNTER(cache->counter); #endif /* ENABLE_SPECIALIZATION */ - /* before: [iter]; after: [iter, iter()] *or* [] */ - PyObject *iter = TOP(); - PyObject *next = (*Py_TYPE(iter)->tp_iternext)(iter); - if (next != NULL) { - PUSH(next); - JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER); - } - else { + /* before: [iter]; after: [iter, iter()] *or* [] (and jump over END_FOR.) */ + next = (*Py_TYPE(iter)->tp_iternext)(iter); + if (next == NULL) { if (_PyErr_Occurred(tstate)) { if (!_PyErr_ExceptionMatches(tstate, PyExc_StopIteration)) { goto error; @@ -2640,63 +2677,81 @@ } /* iterator ended normally */ assert(_Py_OPCODE(next_instr[INLINE_CACHE_ENTRIES_FOR_ITER + oparg]) == END_FOR); - STACK_SHRINK(1); Py_DECREF(iter); - /* Skip END_FOR */ + STACK_SHRINK(1); + /* Jump forward oparg, then skip following END_FOR instruction */ JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1); + DISPATCH(); } + // Common case: no jump, leave it to the code generator + STACK_GROW(1); + POKE(1, next); + JUMPBY(1); DISPATCH(); } TARGET(FOR_ITER_LIST) { + PyObject *iter = PEEK(1); + PyObject *next; assert(cframe.use_tracing == 0); - _PyListIterObject *it = (_PyListIterObject *)TOP(); - DEOPT_IF(Py_TYPE(it) != &PyListIter_Type, FOR_ITER); + DEOPT_IF(Py_TYPE(iter) != &PyListIter_Type, FOR_ITER); + _PyListIterObject *it = (_PyListIterObject *)iter; STAT_INC(FOR_ITER, hit); PyListObject *seq = it->it_seq; if (seq) { if (it->it_index < PyList_GET_SIZE(seq)) { - PyObject *next = PyList_GET_ITEM(seq, it->it_index++); - PUSH(Py_NewRef(next)); - JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER); + next = Py_NewRef(PyList_GET_ITEM(seq, it->it_index++)); goto end_for_iter_list; // End of this instruction } it->it_seq = NULL; Py_DECREF(seq); } + Py_DECREF(iter); STACK_SHRINK(1); - Py_DECREF(it); + /* Jump forward oparg, then skip following END_FOR instruction */ JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1); + DISPATCH(); end_for_iter_list: + // Common case: no jump, leave it to the code generator + STACK_GROW(1); + POKE(1, next); + JUMPBY(1); DISPATCH(); } TARGET(FOR_ITER_TUPLE) { + PyObject *iter = PEEK(1); + PyObject *next; assert(cframe.use_tracing == 0); - _PyTupleIterObject *it = (_PyTupleIterObject *)TOP(); + _PyTupleIterObject *it = (_PyTupleIterObject *)iter; DEOPT_IF(Py_TYPE(it) != &PyTupleIter_Type, FOR_ITER); STAT_INC(FOR_ITER, hit); PyTupleObject *seq = it->it_seq; if (seq) { if (it->it_index < PyTuple_GET_SIZE(seq)) { - PyObject *next = PyTuple_GET_ITEM(seq, it->it_index++); - PUSH(Py_NewRef(next)); - JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER); + next = Py_NewRef(PyTuple_GET_ITEM(seq, it->it_index++)); goto end_for_iter_tuple; // End of this instruction } it->it_seq = NULL; Py_DECREF(seq); } + Py_DECREF(iter); STACK_SHRINK(1); - Py_DECREF(it); + /* Jump forward oparg, then skip following END_FOR instruction */ JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1); + DISPATCH(); end_for_iter_tuple: + // Common case: no jump, leave it to the code generator + STACK_GROW(1); + POKE(1, next); + JUMPBY(1); DISPATCH(); } TARGET(FOR_ITER_RANGE) { + PyObject *iter = PEEK(1); assert(cframe.use_tracing == 0); - _PyRangeIterObject *r = (_PyRangeIterObject *)TOP(); + _PyRangeIterObject *r = (_PyRangeIterObject *)iter; DEOPT_IF(Py_TYPE(r) != &PyRangeIter_Type, FOR_ITER); STAT_INC(FOR_ITER, hit); _Py_CODEUNIT next = next_instr[INLINE_CACHE_ENTRIES_FOR_ITER]; @@ -2704,6 +2759,7 @@ if (r->len <= 0) { STACK_SHRINK(1); Py_DECREF(r); + // Jump over END_FOR instruction. JUMPBY(INLINE_CACHE_ENTRIES_FOR_ITER + oparg + 1); } else { @@ -2720,8 +2776,9 @@ } TARGET(FOR_ITER_GEN) { + PyObject *iter = PEEK(1); assert(cframe.use_tracing == 0); - PyGenObject *gen = (PyGenObject *)TOP(); + PyGenObject *gen = (PyGenObject *)iter; DEOPT_IF(Py_TYPE(gen) != &PyGen_Type, FOR_ITER); DEOPT_IF(gen->gi_frame_state >= FRAME_EXECUTING, FOR_ITER); STAT_INC(FOR_ITER, hit); @@ -2947,19 +3004,6 @@ DISPATCH(); } - TARGET(CALL_BOUND_METHOD_EXACT_ARGS) { - DEOPT_IF(is_method(stack_pointer, oparg), CALL); - PyObject *function = PEEK(oparg + 1); - DEOPT_IF(Py_TYPE(function) != &PyMethod_Type, CALL); - STAT_INC(CALL, hit); - PyObject *self = ((PyMethodObject *)function)->im_self; - PEEK(oparg + 1) = Py_NewRef(self); - PyObject *meth = ((PyMethodObject *)function)->im_func; - PEEK(oparg + 2) = Py_NewRef(meth); - Py_DECREF(function); - GO_TO_INSTRUCTION(CALL_PY_EXACT_ARGS); - } - TARGET(KW_NAMES) { assert(kwnames == NULL); assert(oparg < PyTuple_GET_SIZE(consts)); @@ -2969,48 +3013,55 @@ TARGET(CALL) { PREDICTED(CALL); + static_assert(INLINE_CACHE_ENTRIES_CALL == 4, "incorrect cache size"); + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } #if ENABLE_SPECIALIZATION _PyCallCache *cache = (_PyCallCache *)next_instr; if (ADAPTIVE_COUNTER_IS_ZERO(cache->counter)) { assert(cframe.use_tracing == 0); - int is_meth = is_method(stack_pointer, oparg); - int nargs = oparg + is_meth; - PyObject *callable = PEEK(nargs + 1); next_instr--; - _Py_Specialize_Call(callable, next_instr, nargs, kwnames); + _Py_Specialize_Call(callable, next_instr, total_args, kwnames); DISPATCH_SAME_OPARG(); } STAT_INC(CALL, deferred); DECREMENT_ADAPTIVE_COUNTER(cache->counter); #endif /* ENABLE_SPECIALIZATION */ - int total_args, is_meth; - is_meth = is_method(stack_pointer, oparg); - PyObject *function = PEEK(oparg + 1); - if (!is_meth && Py_TYPE(function) == &PyMethod_Type) { - PyObject *self = ((PyMethodObject *)function)->im_self; - PEEK(oparg+1) = Py_NewRef(self); - PyObject *meth = ((PyMethodObject *)function)->im_func; - PEEK(oparg+2) = Py_NewRef(meth); - Py_DECREF(function); - is_meth = 1; - } - total_args = oparg + is_meth; - function = PEEK(total_args + 1); + if (!is_meth && Py_TYPE(callable) == &PyMethod_Type) { + is_meth = 1; // For consistenct; it's dead, though + args--; + total_args++; + PyObject *self = ((PyMethodObject *)callable)->im_self; + args[0] = Py_NewRef(self); + method = ((PyMethodObject *)callable)->im_func; + args[-1] = Py_NewRef(method); + Py_DECREF(callable); + callable = method; + } int positional_args = total_args - KWNAMES_LEN(); // Check if the call can be inlined or not - if (Py_TYPE(function) == &PyFunction_Type && + if (Py_TYPE(callable) == &PyFunction_Type && tstate->interp->eval_frame == NULL && - ((PyFunctionObject *)function)->vectorcall == _PyFunction_Vectorcall) + ((PyFunctionObject *)callable)->vectorcall == _PyFunction_Vectorcall) { - int code_flags = ((PyCodeObject*)PyFunction_GET_CODE(function))->co_flags; - PyObject *locals = code_flags & CO_OPTIMIZED ? NULL : Py_NewRef(PyFunction_GET_GLOBALS(function)); - STACK_SHRINK(total_args); + int code_flags = ((PyCodeObject*)PyFunction_GET_CODE(callable))->co_flags; + PyObject *locals = code_flags & CO_OPTIMIZED ? NULL : Py_NewRef(PyFunction_GET_GLOBALS(callable)); _PyInterpreterFrame *new_frame = _PyEvalFramePushAndInit( - tstate, (PyFunctionObject *)function, locals, - stack_pointer, positional_args, kwnames + tstate, (PyFunctionObject *)callable, locals, + args, positional_args, kwnames ); kwnames = NULL; - STACK_SHRINK(2-is_meth); + // Manipulate stack directly since we leave using DISPATCH_INLINED(). + STACK_SHRINK(oparg + 2); // The frame has stolen all the arguments from the stack, // so there is no need to clean them up. if (new_frame == NULL) { @@ -3020,189 +3071,234 @@ DISPATCH_INLINED(new_frame); } /* Callable is not a normal Python function */ - PyObject *res; if (cframe.use_tracing) { res = trace_call_function( - tstate, function, stack_pointer-total_args, + tstate, callable, args, positional_args, kwnames); } else { res = PyObject_Vectorcall( - function, stack_pointer-total_args, + callable, args, positional_args | PY_VECTORCALL_ARGUMENTS_OFFSET, kwnames); } kwnames = NULL; assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); - Py_DECREF(function); - /* Clear the stack */ - STACK_SHRINK(total_args); + Py_DECREF(callable); for (int i = 0; i < total_args; i++) { - Py_DECREF(stack_pointer[i]); + Py_DECREF(args[i]); } - STACK_SHRINK(2-is_meth); - PUSH(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } + TARGET(CALL_BOUND_METHOD_EXACT_ARGS) { + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + DEOPT_IF(method != NULL, CALL); + DEOPT_IF(Py_TYPE(callable) != &PyMethod_Type, CALL); + STAT_INC(CALL, hit); + PyObject *self = ((PyMethodObject *)callable)->im_self; + PEEK(oparg + 1) = Py_NewRef(self); // callable + PyObject *meth = ((PyMethodObject *)callable)->im_func; + PEEK(oparg + 2) = Py_NewRef(meth); // method + Py_DECREF(callable); + GO_TO_INSTRUCTION(CALL_PY_EXACT_ARGS); + } + TARGET(CALL_PY_EXACT_ARGS) { PREDICTED(CALL_PY_EXACT_ARGS); + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + uint32_t func_version = read_u32(&next_instr[1].cache); assert(kwnames == NULL); DEOPT_IF(tstate->interp->eval_frame, CALL); - _PyCallCache *cache = (_PyCallCache *)next_instr; - int is_meth = is_method(stack_pointer, oparg); - int argcount = oparg + is_meth; - PyObject *callable = PEEK(argcount + 1); + int is_meth = method != NULL; + int argcount = oparg; + if (is_meth) { + callable = method; + args--; + argcount++; + } DEOPT_IF(!PyFunction_Check(callable), CALL); PyFunctionObject *func = (PyFunctionObject *)callable; - DEOPT_IF(func->func_version != read_u32(cache->func_version), CALL); + DEOPT_IF(func->func_version != func_version, CALL); PyCodeObject *code = (PyCodeObject *)func->func_code; DEOPT_IF(code->co_argcount != argcount, CALL); DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL); STAT_INC(CALL, hit); _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, func, argcount); - STACK_SHRINK(argcount); for (int i = 0; i < argcount; i++) { - new_frame->localsplus[i] = stack_pointer[i]; + new_frame->localsplus[i] = args[i]; } - STACK_SHRINK(2-is_meth); + // Manipulate stack directly since we leave using DISPATCH_INLINED(). + STACK_SHRINK(oparg + 2); JUMPBY(INLINE_CACHE_ENTRIES_CALL); DISPATCH_INLINED(new_frame); } TARGET(CALL_PY_WITH_DEFAULTS) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + uint32_t func_version = read_u32(&next_instr[1].cache); + uint16_t min_args = read_u16(&next_instr[3].cache); assert(kwnames == NULL); DEOPT_IF(tstate->interp->eval_frame, CALL); - _PyCallCache *cache = (_PyCallCache *)next_instr; - int is_meth = is_method(stack_pointer, oparg); - int argcount = oparg + is_meth; - PyObject *callable = PEEK(argcount + 1); + int is_meth = method != NULL; + int argcount = oparg; + if (is_meth) { + callable = method; + args--; + argcount++; + } DEOPT_IF(!PyFunction_Check(callable), CALL); PyFunctionObject *func = (PyFunctionObject *)callable; - DEOPT_IF(func->func_version != read_u32(cache->func_version), CALL); + DEOPT_IF(func->func_version != func_version, CALL); PyCodeObject *code = (PyCodeObject *)func->func_code; DEOPT_IF(argcount > code->co_argcount, CALL); - int minargs = cache->min_args; - DEOPT_IF(argcount < minargs, CALL); + DEOPT_IF(argcount < min_args, CALL); DEOPT_IF(!_PyThreadState_HasStackSpace(tstate, code->co_framesize), CALL); STAT_INC(CALL, hit); _PyInterpreterFrame *new_frame = _PyFrame_PushUnchecked(tstate, func, code->co_argcount); - STACK_SHRINK(argcount); for (int i = 0; i < argcount; i++) { - new_frame->localsplus[i] = stack_pointer[i]; + new_frame->localsplus[i] = args[i]; } for (int i = argcount; i < code->co_argcount; i++) { - PyObject *def = PyTuple_GET_ITEM(func->func_defaults, - i - minargs); + PyObject *def = PyTuple_GET_ITEM(func->func_defaults, i - min_args); new_frame->localsplus[i] = Py_NewRef(def); } - STACK_SHRINK(2-is_meth); + // Manipulate stack and cache directly since we leave using DISPATCH_INLINED(). + STACK_SHRINK(oparg + 2); JUMPBY(INLINE_CACHE_ENTRIES_CALL); DISPATCH_INLINED(new_frame); } TARGET(CALL_NO_KW_TYPE_1) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *null = PEEK(2 + oparg); + PyObject *res; assert(kwnames == NULL); assert(cframe.use_tracing == 0); assert(oparg == 1); - DEOPT_IF(is_method(stack_pointer, 1), CALL); - PyObject *obj = TOP(); - PyObject *callable = SECOND(); + DEOPT_IF(null != NULL, CALL); + PyObject *obj = args[0]; DEOPT_IF(callable != (PyObject *)&PyType_Type, CALL); STAT_INC(CALL, hit); - JUMPBY(INLINE_CACHE_ENTRIES_CALL); - PyObject *res = Py_NewRef(Py_TYPE(obj)); - Py_DECREF(callable); + res = Py_NewRef(Py_TYPE(obj)); Py_DECREF(obj); - STACK_SHRINK(2); - SET_TOP(res); + Py_DECREF(&PyType_Type); // I.e., callable + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); DISPATCH(); } TARGET(CALL_NO_KW_STR_1) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *null = PEEK(2 + oparg); + PyObject *res; assert(kwnames == NULL); assert(cframe.use_tracing == 0); assert(oparg == 1); - DEOPT_IF(is_method(stack_pointer, 1), CALL); - PyObject *callable = PEEK(2); + DEOPT_IF(null != NULL, CALL); DEOPT_IF(callable != (PyObject *)&PyUnicode_Type, CALL); STAT_INC(CALL, hit); - PyObject *arg = TOP(); - PyObject *res = PyObject_Str(arg); + PyObject *arg = args[0]; + res = PyObject_Str(arg); Py_DECREF(arg); - Py_DECREF(&PyUnicode_Type); - STACK_SHRINK(2); - SET_TOP(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + Py_DECREF(&PyUnicode_Type); // I.e., callable + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_NO_KW_TUPLE_1) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *null = PEEK(2 + oparg); + PyObject *res; assert(kwnames == NULL); assert(oparg == 1); - DEOPT_IF(is_method(stack_pointer, 1), CALL); - PyObject *callable = PEEK(2); + DEOPT_IF(null != NULL, CALL); DEOPT_IF(callable != (PyObject *)&PyTuple_Type, CALL); STAT_INC(CALL, hit); - PyObject *arg = TOP(); - PyObject *res = PySequence_Tuple(arg); + PyObject *arg = args[0]; + res = PySequence_Tuple(arg); Py_DECREF(arg); - Py_DECREF(&PyTuple_Type); - STACK_SHRINK(2); - SET_TOP(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + Py_DECREF(&PyTuple_Type); // I.e., tuple + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_BUILTIN_CLASS) { - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } int kwnames_len = KWNAMES_LEN(); - PyObject *callable = PEEK(total_args + 1); DEOPT_IF(!PyType_Check(callable), CALL); PyTypeObject *tp = (PyTypeObject *)callable; DEOPT_IF(tp->tp_vectorcall == NULL, CALL); STAT_INC(CALL, hit); - STACK_SHRINK(total_args); - PyObject *res = tp->tp_vectorcall((PyObject *)tp, stack_pointer, - total_args-kwnames_len, kwnames); + res = tp->tp_vectorcall((PyObject *)tp, args, + total_args - kwnames_len, kwnames); kwnames = NULL; /* Free the arguments. */ for (int i = 0; i < total_args; i++) { - Py_DECREF(stack_pointer[i]); + Py_DECREF(args[i]); } Py_DECREF(tp); - STACK_SHRINK(1-is_meth); - SET_TOP(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_NO_KW_BUILTIN_O) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; assert(cframe.use_tracing == 0); /* Builtin METH_O functions */ assert(kwnames == NULL); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(total_args != 1, CALL); - PyObject *callable = PEEK(total_args + 1); DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_O, CALL); STAT_INC(CALL, hit); @@ -3212,81 +3308,92 @@ if (_Py_EnterRecursiveCallTstate(tstate, " while calling a Python object")) { goto error; } - PyObject *arg = TOP(); - PyObject *res = _PyCFunction_TrampolineCall(cfunc, PyCFunction_GET_SELF(callable), arg); + PyObject *arg = args[0]; + res = _PyCFunction_TrampolineCall(cfunc, PyCFunction_GET_SELF(callable), arg); _Py_LeaveRecursiveCallTstate(tstate); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); Py_DECREF(arg); Py_DECREF(callable); - STACK_SHRINK(2-is_meth); - SET_TOP(res); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_NO_KW_BUILTIN_FAST) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; assert(cframe.use_tracing == 0); /* Builtin METH_FASTCALL functions, without keywords */ assert(kwnames == NULL); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; - PyObject *callable = PEEK(total_args + 1); + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); - DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_FASTCALL, - CALL); + DEOPT_IF(PyCFunction_GET_FLAGS(callable) != METH_FASTCALL, CALL); STAT_INC(CALL, hit); PyCFunction cfunc = PyCFunction_GET_FUNCTION(callable); - STACK_SHRINK(total_args); /* res = func(self, args, nargs) */ - PyObject *res = ((_PyCFunctionFast)(void(*)(void))cfunc)( + res = ((_PyCFunctionFast)(void(*)(void))cfunc)( PyCFunction_GET_SELF(callable), - stack_pointer, + args, total_args); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); /* Free the arguments. */ for (int i = 0; i < total_args; i++) { - Py_DECREF(stack_pointer[i]); + Py_DECREF(args[i]); } - STACK_SHRINK(2-is_meth); - PUSH(res); Py_DECREF(callable); - if (res == NULL) { + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } /* Not deopting because this doesn't mean our optimization was wrong. `res` can be NULL for valid reasons. Eg. getattr(x, 'invalid'). In those cases an exception is set, so we must handle it. */ - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_BUILTIN_FAST_WITH_KEYWORDS) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; assert(cframe.use_tracing == 0); /* Builtin METH_FASTCALL | METH_KEYWORDS functions */ - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; - PyObject *callable = PEEK(total_args + 1); + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(!PyCFunction_CheckExact(callable), CALL); DEOPT_IF(PyCFunction_GET_FLAGS(callable) != (METH_FASTCALL | METH_KEYWORDS), CALL); STAT_INC(CALL, hit); - STACK_SHRINK(total_args); /* res = func(self, args, nargs, kwnames) */ _PyCFunctionFastWithKeywords cfunc = (_PyCFunctionFastWithKeywords)(void(*)(void)) PyCFunction_GET_FUNCTION(callable); - PyObject *res = cfunc( + res = cfunc( PyCFunction_GET_SELF(callable), - stack_pointer, + args, total_args - KWNAMES_LEN(), kwnames ); @@ -3295,99 +3402,112 @@ /* Free the arguments. */ for (int i = 0; i < total_args; i++) { - Py_DECREF(stack_pointer[i]); + Py_DECREF(args[i]); } - STACK_SHRINK(2-is_meth); - PUSH(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_NO_KW_LEN) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; assert(cframe.use_tracing == 0); assert(kwnames == NULL); /* len(o) */ - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(total_args != 1, CALL); - PyObject *callable = PEEK(total_args + 1); PyInterpreterState *interp = _PyInterpreterState_GET(); DEOPT_IF(callable != interp->callable_cache.len, CALL); STAT_INC(CALL, hit); - PyObject *arg = TOP(); + PyObject *arg = args[0]; Py_ssize_t len_i = PyObject_Length(arg); if (len_i < 0) { goto error; } - PyObject *res = PyLong_FromSsize_t(len_i); + res = PyLong_FromSsize_t(len_i); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); - STACK_SHRINK(2-is_meth); - SET_TOP(res); Py_DECREF(callable); Py_DECREF(arg); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); DISPATCH(); } TARGET(CALL_NO_KW_ISINSTANCE) { + PyObject **args = &PEEK(oparg); + PyObject *callable = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; assert(cframe.use_tracing == 0); assert(kwnames == NULL); /* isinstance(o, o2) */ - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; - PyObject *callable = PEEK(total_args + 1); + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + callable = method; + args--; + total_args++; + } DEOPT_IF(total_args != 2, CALL); PyInterpreterState *interp = _PyInterpreterState_GET(); DEOPT_IF(callable != interp->callable_cache.isinstance, CALL); STAT_INC(CALL, hit); - PyObject *cls = POP(); - PyObject *inst = TOP(); + PyObject *cls = args[1]; + PyObject *inst = args[0]; int retval = PyObject_IsInstance(inst, cls); if (retval < 0) { - Py_DECREF(cls); goto error; } - PyObject *res = PyBool_FromLong(retval); + res = PyBool_FromLong(retval); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); - STACK_SHRINK(2-is_meth); - SET_TOP(res); Py_DECREF(inst); Py_DECREF(cls); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); DISPATCH(); } TARGET(CALL_NO_KW_LIST_APPEND) { + PyObject **args = &PEEK(oparg); + PyObject *self = PEEK(1 + oparg); + PyObject *method = PEEK(2 + oparg); assert(cframe.use_tracing == 0); assert(kwnames == NULL); assert(oparg == 1); - PyObject *callable = PEEK(3); + assert(method != NULL); PyInterpreterState *interp = _PyInterpreterState_GET(); - DEOPT_IF(callable != interp->callable_cache.list_append, CALL); - PyObject *list = SECOND(); - DEOPT_IF(!PyList_Check(list), CALL); + DEOPT_IF(method != interp->callable_cache.list_append, CALL); + DEOPT_IF(!PyList_Check(self), CALL); STAT_INC(CALL, hit); - PyObject *arg = POP(); - if (_PyList_AppendTakeRef((PyListObject *)list, arg) < 0) { - goto error; + if (_PyList_AppendTakeRef((PyListObject *)self, args[0]) < 0) { + goto pop_1_error; // Since arg is DECREF'ed already } - STACK_SHRINK(2); - Py_DECREF(list); - Py_DECREF(callable); + Py_DECREF(self); + Py_DECREF(method); + STACK_SHRINK(3); // CALL + POP_TOP JUMPBY(INLINE_CACHE_ENTRIES_CALL + 1); assert(_Py_OPCODE(next_instr[-1]) == POP_TOP); @@ -3395,17 +3515,24 @@ } TARGET(CALL_NO_KW_METHOD_DESCRIPTOR_O) { + PyObject **args = &PEEK(oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; assert(kwnames == NULL); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + args--; + total_args++; + } PyMethodDescrObject *callable = (PyMethodDescrObject *)PEEK(total_args + 1); DEOPT_IF(total_args != 2, CALL); DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), CALL); PyMethodDef *meth = callable->d_method; DEOPT_IF(meth->ml_flags != METH_O, CALL); - PyObject *arg = TOP(); - PyObject *self = SECOND(); + PyObject *arg = args[1]; + PyObject *self = args[0]; DEOPT_IF(!Py_IS_TYPE(self, callable->d_common.d_type), CALL); STAT_INC(CALL, hit); PyCFunction cfunc = meth->ml_meth; @@ -3414,69 +3541,78 @@ if (_Py_EnterRecursiveCallTstate(tstate, " while calling a Python object")) { goto error; } - PyObject *res = _PyCFunction_TrampolineCall(cfunc, self, arg); + res = _PyCFunction_TrampolineCall(cfunc, self, arg); _Py_LeaveRecursiveCallTstate(tstate); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); Py_DECREF(self); Py_DECREF(arg); - STACK_SHRINK(oparg + 1); - SET_TOP(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS) { - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + PyObject **args = &PEEK(oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + args--; + total_args++; + } PyMethodDescrObject *callable = (PyMethodDescrObject *)PEEK(total_args + 1); DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), CALL); PyMethodDef *meth = callable->d_method; DEOPT_IF(meth->ml_flags != (METH_FASTCALL|METH_KEYWORDS), CALL); PyTypeObject *d_type = callable->d_common.d_type; - PyObject *self = PEEK(total_args); + PyObject *self = args[0]; DEOPT_IF(!Py_IS_TYPE(self, d_type), CALL); STAT_INC(CALL, hit); - int nargs = total_args-1; - STACK_SHRINK(nargs); + int nargs = total_args - 1; _PyCFunctionFastWithKeywords cfunc = (_PyCFunctionFastWithKeywords)(void(*)(void))meth->ml_meth; - PyObject *res = cfunc(self, stack_pointer, nargs - KWNAMES_LEN(), - kwnames); + res = cfunc(self, args + 1, nargs - KWNAMES_LEN(), kwnames); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); kwnames = NULL; /* Free the arguments. */ - for (int i = 0; i < nargs; i++) { - Py_DECREF(stack_pointer[i]); + for (int i = 0; i < total_args; i++) { + Py_DECREF(args[i]); } - Py_DECREF(self); - STACK_SHRINK(2-is_meth); - SET_TOP(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS) { + PyObject **args = &PEEK(oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; assert(kwnames == NULL); assert(oparg == 0 || oparg == 1); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + args--; + total_args++; + } DEOPT_IF(total_args != 1, CALL); PyMethodDescrObject *callable = (PyMethodDescrObject *)SECOND(); DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), CALL); PyMethodDef *meth = callable->d_method; - PyObject *self = TOP(); + PyObject *self = args[0]; DEOPT_IF(!Py_IS_TYPE(self, callable->d_common.d_type), CALL); DEOPT_IF(meth->ml_flags != METH_NOARGS, CALL); STAT_INC(CALL, hit); @@ -3486,76 +3622,79 @@ if (_Py_EnterRecursiveCallTstate(tstate, " while calling a Python object")) { goto error; } - PyObject *res = _PyCFunction_TrampolineCall(cfunc, self, NULL); + res = _PyCFunction_TrampolineCall(cfunc, self, NULL); _Py_LeaveRecursiveCallTstate(tstate); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); Py_DECREF(self); - STACK_SHRINK(oparg + 1); - SET_TOP(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_NO_KW_METHOD_DESCRIPTOR_FAST) { + PyObject **args = &PEEK(oparg); + PyObject *method = PEEK(2 + oparg); + PyObject *res; assert(kwnames == NULL); - int is_meth = is_method(stack_pointer, oparg); - int total_args = oparg + is_meth; + int is_meth = method != NULL; + int total_args = oparg; + if (is_meth) { + args--; + total_args++; + } PyMethodDescrObject *callable = (PyMethodDescrObject *)PEEK(total_args + 1); /* Builtin METH_FASTCALL methods, without keywords */ DEOPT_IF(!Py_IS_TYPE(callable, &PyMethodDescr_Type), CALL); PyMethodDef *meth = callable->d_method; DEOPT_IF(meth->ml_flags != METH_FASTCALL, CALL); - PyObject *self = PEEK(total_args); + PyObject *self = args[0]; DEOPT_IF(!Py_IS_TYPE(self, callable->d_common.d_type), CALL); STAT_INC(CALL, hit); _PyCFunctionFast cfunc = (_PyCFunctionFast)(void(*)(void))meth->ml_meth; - int nargs = total_args-1; - STACK_SHRINK(nargs); - PyObject *res = cfunc(self, stack_pointer, nargs); + int nargs = total_args - 1; + res = cfunc(self, args + 1, nargs); assert((res != NULL) ^ (_PyErr_Occurred(tstate) != NULL)); /* Clear the stack of the arguments. */ - for (int i = 0; i < nargs; i++) { - Py_DECREF(stack_pointer[i]); + for (int i = 0; i < total_args; i++) { + Py_DECREF(args[i]); } - Py_DECREF(self); - STACK_SHRINK(2-is_meth); - SET_TOP(res); Py_DECREF(callable); - if (res == NULL) { - goto error; - } - JUMPBY(INLINE_CACHE_ENTRIES_CALL); + if (res == NULL) { STACK_SHRINK(oparg); goto pop_2_error; } + STACK_SHRINK(oparg); + STACK_SHRINK(1); + POKE(1, res); + JUMPBY(4); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(CALL_FUNCTION_EX) { PREDICTED(CALL_FUNCTION_EX); - PyObject *func, *callargs, *kwargs = NULL, *result; - if (oparg & 0x01) { - kwargs = POP(); + PyObject *kwargs = (oparg & 1) ? PEEK(((oparg & 1) ? 1 : 0)) : NULL; + PyObject *callargs = PEEK(1 + ((oparg & 1) ? 1 : 0)); + PyObject *func = PEEK(2 + ((oparg & 1) ? 1 : 0)); + PyObject *result; + if (oparg & 1) { // DICT_MERGE is called before this opcode if there are kwargs. // It converts all dict subtypes in kwargs into regular dicts. assert(PyDict_CheckExact(kwargs)); } - callargs = POP(); - func = TOP(); if (!PyTuple_CheckExact(callargs)) { if (check_args_iterable(tstate, func, callargs) < 0) { - Py_DECREF(callargs); goto error; } - Py_SETREF(callargs, PySequence_Tuple(callargs)); - if (callargs == NULL) { + PyObject *tuple = PySequence_Tuple(callargs); + if (tuple == NULL) { goto error; } + Py_SETREF(callargs, tuple); } assert(PyTuple_CheckExact(callargs)); @@ -3564,45 +3703,52 @@ Py_DECREF(callargs); Py_XDECREF(kwargs); - STACK_SHRINK(1); - assert(TOP() == NULL); - SET_TOP(result); - if (result == NULL) { - goto error; - } + assert(PEEK(3 + (oparg & 1)) == NULL); + if (result == NULL) { STACK_SHRINK(((oparg & 1) ? 1 : 0)); goto pop_3_error; } + STACK_SHRINK(((oparg & 1) ? 1 : 0)); + STACK_SHRINK(2); + POKE(1, result); CHECK_EVAL_BREAKER(); DISPATCH(); } TARGET(MAKE_FUNCTION) { - PyObject *codeobj = POP(); - PyFunctionObject *func = (PyFunctionObject *) + PyObject *codeobj = PEEK(1); + PyObject *closure = (oparg & 0x08) ? PEEK(1 + ((oparg & 0x08) ? 1 : 0)) : NULL; + PyObject *annotations = (oparg & 0x04) ? PEEK(1 + ((oparg & 0x08) ? 1 : 0) + ((oparg & 0x04) ? 1 : 0)) : NULL; + PyObject *kwdefaults = (oparg & 0x02) ? PEEK(1 + ((oparg & 0x08) ? 1 : 0) + ((oparg & 0x04) ? 1 : 0) + ((oparg & 0x02) ? 1 : 0)) : NULL; + PyObject *defaults = (oparg & 0x01) ? PEEK(1 + ((oparg & 0x08) ? 1 : 0) + ((oparg & 0x04) ? 1 : 0) + ((oparg & 0x02) ? 1 : 0) + ((oparg & 0x01) ? 1 : 0)) : NULL; + PyObject *func; + + PyFunctionObject *func_obj = (PyFunctionObject *) PyFunction_New(codeobj, GLOBALS()); Py_DECREF(codeobj); - if (func == NULL) { + if (func_obj == NULL) { goto error; } if (oparg & 0x08) { - assert(PyTuple_CheckExact(TOP())); - func->func_closure = POP(); + assert(PyTuple_CheckExact(closure)); + func_obj->func_closure = closure; } if (oparg & 0x04) { - assert(PyTuple_CheckExact(TOP())); - func->func_annotations = POP(); + assert(PyTuple_CheckExact(annotations)); + func_obj->func_annotations = annotations; } if (oparg & 0x02) { - assert(PyDict_CheckExact(TOP())); - func->func_kwdefaults = POP(); + assert(PyDict_CheckExact(kwdefaults)); + func_obj->func_kwdefaults = kwdefaults; } if (oparg & 0x01) { - assert(PyTuple_CheckExact(TOP())); - func->func_defaults = POP(); + assert(PyTuple_CheckExact(defaults)); + func_obj->func_defaults = defaults; } - func->func_version = ((PyCodeObject *)codeobj)->co_version; - PUSH((PyObject *)func); + func_obj->func_version = ((PyCodeObject *)codeobj)->co_version; + func = (PyObject *)func_obj; + STACK_SHRINK(((oparg & 0x01) ? 1 : 0) + ((oparg & 0x02) ? 1 : 0) + ((oparg & 0x04) ? 1 : 0) + ((oparg & 0x08) ? 1 : 0)); + POKE(1, func); DISPATCH(); } @@ -3630,34 +3776,28 @@ } TARGET(BUILD_SLICE) { - PyObject *start, *stop, *step, *slice; - if (oparg == 3) - step = POP(); - else - step = NULL; - stop = POP(); - start = TOP(); + PyObject *step = (oparg == 3) ? PEEK(((oparg == 3) ? 1 : 0)) : NULL; + PyObject *stop = PEEK(1 + ((oparg == 3) ? 1 : 0)); + PyObject *start = PEEK(2 + ((oparg == 3) ? 1 : 0)); + PyObject *slice; slice = PySlice_New(start, stop, step); Py_DECREF(start); Py_DECREF(stop); Py_XDECREF(step); - SET_TOP(slice); - if (slice == NULL) - goto error; + if (slice == NULL) { STACK_SHRINK(((oparg == 3) ? 1 : 0)); goto pop_2_error; } + STACK_SHRINK(((oparg == 3) ? 1 : 0)); + STACK_SHRINK(1); + POKE(1, slice); DISPATCH(); } TARGET(FORMAT_VALUE) { - /* Handles f-string value formatting. */ + PyObject *fmt_spec = ((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? PEEK((((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? 1 : 0)) : NULL; + PyObject *value = PEEK(1 + (((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? 1 : 0)); PyObject *result; - PyObject *fmt_spec; - PyObject *value; + /* Handles f-string value formatting. */ PyObject *(*conv_fn)(PyObject *); int which_conversion = oparg & FVC_MASK; - int have_fmt_spec = (oparg & FVS_MASK) == FVS_HAVE_SPEC; - - fmt_spec = have_fmt_spec ? POP() : NULL; - value = POP(); /* See if any conversion is specified. */ switch (which_conversion) { @@ -3680,7 +3820,7 @@ Py_DECREF(value); if (result == NULL) { Py_XDECREF(fmt_spec); - goto error; + if (true) { STACK_SHRINK((((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? 1 : 0)); goto pop_1_error; } } value = result; } @@ -3698,19 +3838,20 @@ result = PyObject_Format(value, fmt_spec); Py_DECREF(value); Py_XDECREF(fmt_spec); - if (result == NULL) { - goto error; - } + if (result == NULL) { STACK_SHRINK((((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? 1 : 0)); goto pop_1_error; } } - - PUSH(result); + STACK_SHRINK((((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? 1 : 0)); + POKE(1, result); DISPATCH(); } TARGET(COPY) { - assert(oparg != 0); - PyObject *peek = PEEK(oparg); - PUSH(Py_NewRef(peek)); + PyObject *bottom = PEEK(1 + (oparg-1)); + PyObject *top; + assert(oparg > 0); + top = Py_NewRef(bottom); + STACK_GROW(1); + POKE(1, top); DISPATCH(); } @@ -3745,10 +3886,11 @@ } TARGET(SWAP) { - assert(oparg != 0); - PyObject *top = TOP(); - SET_TOP(PEEK(oparg)); - PEEK(oparg) = top; + PyObject *top = PEEK(1); + PyObject *bottom = PEEK(2 + (oparg-2)); + assert(oparg >= 2); + POKE(1, bottom); + POKE(2 + (oparg-2), top); DISPATCH(); } diff --git a/Python/import.c b/Python/import.c index da6c15c5fd4144..302255d76edcd5 100644 --- a/Python/import.c +++ b/Python/import.c @@ -1592,6 +1592,13 @@ remove_importlib_frames(PyThreadState *tstate) Py_DECREF(code); tb = next; } + assert(PyExceptionInstance_Check(value)); + assert((PyObject *)Py_TYPE(value) == exception); + if (base_tb == NULL) { + base_tb = Py_None; + Py_INCREF(Py_None); + } + PyException_SetTraceback(value, base_tb); done: _PyErr_Restore(tstate, exception, value, base_tb); } @@ -2644,7 +2651,7 @@ PyImport_ExtendInittab(struct _inittab *newtab) int res = 0; if (_PyRuntime.imports.inittab != NULL) { - Py_FatalError("PyImport_ExtendInittab() may be be called after Py_Initialize()"); + Py_FatalError("PyImport_ExtendInittab() may not be called after Py_Initialize()"); } /* Count the number of entries in both tables */ @@ -2692,7 +2699,7 @@ PyImport_AppendInittab(const char *name, PyObject* (*initfunc)(void)) struct _inittab newtab[2]; if (_PyRuntime.imports.inittab != NULL) { - Py_FatalError("PyImport_AppendInittab() may be be called after Py_Initialize()"); + Py_FatalError("PyImport_AppendInittab() may not be called after Py_Initialize()"); } memset(newtab, '\0', sizeof newtab); diff --git a/Python/initconfig.c b/Python/initconfig.c index d7b2dc4a297425..deec805a6b1ca4 100644 --- a/Python/initconfig.c +++ b/Python/initconfig.c @@ -3143,8 +3143,7 @@ init_dump_ascii_wstr(const wchar_t *str) void _Py_DumpPathConfig(PyThreadState *tstate) { - PyObject *exc_type, *exc_value, *exc_tb; - _PyErr_Fetch(tstate, &exc_type, &exc_value, &exc_tb); + PyObject *exc = _PyErr_GetRaisedException(tstate); PySys_WriteStderr("Python path configuration:\n"); @@ -3202,5 +3201,5 @@ _Py_DumpPathConfig(PyThreadState *tstate) PySys_WriteStderr(" ]\n"); } - _PyErr_Restore(tstate, exc_type, exc_value, exc_tb); + _PyErr_SetRaisedException(tstate, exc); } diff --git a/Python/opcode_metadata.h b/Python/opcode_metadata.h index b0db2d565cb062..17c4eb2f911aae 100644 --- a/Python/opcode_metadata.h +++ b/Python/opcode_metadata.h @@ -2,8 +2,10 @@ // from Python/bytecodes.c // Do not edit! -#ifndef NDEBUG -static int +#ifndef NEED_OPCODE_TABLES +extern int _PyOpcode_num_popped(int opcode, int oparg, bool jump); +#else +int _PyOpcode_num_popped(int opcode, int oparg, bool jump) { switch(opcode) { case NOP: @@ -94,6 +96,8 @@ _PyOpcode_num_popped(int opcode, int oparg, bool jump) { return 1; case RETURN_VALUE: return 1; + case RETURN_CONST: + return 0; case GET_AITER: return 1; case GET_ANEXT: @@ -101,19 +105,19 @@ _PyOpcode_num_popped(int opcode, int oparg, bool jump) { case GET_AWAITABLE: return 1; case SEND: - return -1; + return 2; case YIELD_VALUE: return 1; case POP_EXCEPT: return 1; case RERAISE: - return -1; + return oparg + 1; case PREP_RERAISE_STAR: return 2; case END_ASYNC_FOR: return 2; case CLEANUP_THROW: - return -1; + return 3; case LOAD_ASSERTION_ERROR: return 0; case LOAD_BUILD_CLASS: @@ -123,15 +127,15 @@ _PyOpcode_num_popped(int opcode, int oparg, bool jump) { case DELETE_NAME: return 0; case UNPACK_SEQUENCE: - return -1; + return 1; case UNPACK_SEQUENCE_TWO_TUPLE: - return -1; + return 1; case UNPACK_SEQUENCE_TUPLE: - return -1; + return 1; case UNPACK_SEQUENCE_LIST: - return -1; + return 1; case UNPACK_EX: - return -1; + return 1; case STORE_ATTR: return 2; case DELETE_ATTR: @@ -143,11 +147,11 @@ _PyOpcode_num_popped(int opcode, int oparg, bool jump) { case LOAD_NAME: return 0; case LOAD_GLOBAL: - return -1; + return 0; case LOAD_GLOBAL_MODULE: - return -1; + return 0; case LOAD_GLOBAL_BUILTIN: - return -1; + return 0; case DELETE_FAST: return 0; case MAKE_CELL: @@ -261,17 +265,17 @@ _PyOpcode_num_popped(int opcode, int oparg, bool jump) { case GET_ITER: return 1; case GET_YIELD_FROM_ITER: - return -1; + return 1; case FOR_ITER: - return -1; + return 1; case FOR_ITER_LIST: - return -1; + return 1; case FOR_ITER_TUPLE: - return -1; + return 1; case FOR_ITER_RANGE: - return -1; + return 1; case FOR_ITER_GEN: - return -1; + return 1; case BEFORE_ASYNC_WITH: return 1; case BEFORE_WITH: @@ -286,72 +290,74 @@ _PyOpcode_num_popped(int opcode, int oparg, bool jump) { return 1; case LOAD_ATTR_METHOD_LAZY_DICT: return 1; - case CALL_BOUND_METHOD_EXACT_ARGS: - return -1; case KW_NAMES: return 0; case CALL: - return -1; + return oparg + 2; + case CALL_BOUND_METHOD_EXACT_ARGS: + return oparg + 2; case CALL_PY_EXACT_ARGS: - return -1; + return oparg + 2; case CALL_PY_WITH_DEFAULTS: - return -1; + return oparg + 2; case CALL_NO_KW_TYPE_1: - return -1; + return oparg + 2; case CALL_NO_KW_STR_1: - return -1; + return oparg + 2; case CALL_NO_KW_TUPLE_1: - return -1; + return oparg + 2; case CALL_BUILTIN_CLASS: - return -1; + return oparg + 2; case CALL_NO_KW_BUILTIN_O: - return -1; + return oparg + 2; case CALL_NO_KW_BUILTIN_FAST: - return -1; + return oparg + 2; case CALL_BUILTIN_FAST_WITH_KEYWORDS: - return -1; + return oparg + 2; case CALL_NO_KW_LEN: - return -1; + return oparg + 2; case CALL_NO_KW_ISINSTANCE: - return -1; + return oparg + 2; case CALL_NO_KW_LIST_APPEND: - return -1; + return oparg + 2; case CALL_NO_KW_METHOD_DESCRIPTOR_O: - return -1; + return oparg + 2; case CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS: - return -1; + return oparg + 2; case CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS: - return -1; + return oparg + 2; case CALL_NO_KW_METHOD_DESCRIPTOR_FAST: - return -1; + return oparg + 2; case CALL_FUNCTION_EX: - return -1; + return ((oparg & 1) ? 1 : 0) + 3; case MAKE_FUNCTION: - return -1; + return ((oparg & 0x01) ? 1 : 0) + ((oparg & 0x02) ? 1 : 0) + ((oparg & 0x04) ? 1 : 0) + ((oparg & 0x08) ? 1 : 0) + 1; case RETURN_GENERATOR: - return -1; + return 0; case BUILD_SLICE: - return -1; + return ((oparg == 3) ? 1 : 0) + 2; case FORMAT_VALUE: - return -1; + return (((oparg & FVS_MASK) == FVS_HAVE_SPEC) ? 1 : 0) + 1; case COPY: - return -1; + return (oparg-1) + 1; case BINARY_OP: return 2; case SWAP: - return -1; + return (oparg-2) + 2; case EXTENDED_ARG: return 0; case CACHE: return 0; default: - Py_UNREACHABLE(); + return -1; } } #endif -#ifndef NDEBUG -static int +#ifndef NEED_OPCODE_TABLES +extern int _PyOpcode_num_pushed(int opcode, int oparg, bool jump); +#else +int _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { switch(opcode) { case NOP: @@ -442,6 +448,8 @@ _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { return 0; case RETURN_VALUE: return 0; + case RETURN_CONST: + return 0; case GET_AITER: return 1; case GET_ANEXT: @@ -449,19 +457,19 @@ _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { case GET_AWAITABLE: return 1; case SEND: - return -1; + return ((!jump) ? 1 : 0) + 1; case YIELD_VALUE: return 1; case POP_EXCEPT: return 0; case RERAISE: - return -1; + return oparg; case PREP_RERAISE_STAR: return 1; case END_ASYNC_FOR: return 0; case CLEANUP_THROW: - return -1; + return 1; case LOAD_ASSERTION_ERROR: return 1; case LOAD_BUILD_CLASS: @@ -471,15 +479,15 @@ _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { case DELETE_NAME: return 0; case UNPACK_SEQUENCE: - return -1; + return oparg; case UNPACK_SEQUENCE_TWO_TUPLE: - return -1; + return oparg; case UNPACK_SEQUENCE_TUPLE: - return -1; + return oparg; case UNPACK_SEQUENCE_LIST: - return -1; + return oparg; case UNPACK_EX: - return -1; + return (oparg & 0xFF) + (oparg >> 8) + 1; case STORE_ATTR: return 0; case DELETE_ATTR: @@ -491,11 +499,11 @@ _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { case LOAD_NAME: return 1; case LOAD_GLOBAL: - return -1; + return ((oparg & 1) ? 1 : 0) + 1; case LOAD_GLOBAL_MODULE: - return -1; + return ((oparg & 1) ? 1 : 0) + 1; case LOAD_GLOBAL_BUILTIN: - return -1; + return ((oparg & 1) ? 1 : 0) + 1; case DELETE_FAST: return 0; case MAKE_CELL: @@ -609,17 +617,17 @@ _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { case GET_ITER: return 1; case GET_YIELD_FROM_ITER: - return -1; + return 1; case FOR_ITER: - return -1; + return 2; case FOR_ITER_LIST: - return -1; + return 2; case FOR_ITER_TUPLE: - return -1; + return 2; case FOR_ITER_RANGE: - return -1; + return 2; case FOR_ITER_GEN: - return -1; + return 2; case BEFORE_ASYNC_WITH: return 2; case BEFORE_WITH: @@ -634,78 +642,84 @@ _PyOpcode_num_pushed(int opcode, int oparg, bool jump) { return ((oparg & 1) ? 1 : 0) + 1; case LOAD_ATTR_METHOD_LAZY_DICT: return ((oparg & 1) ? 1 : 0) + 1; - case CALL_BOUND_METHOD_EXACT_ARGS: - return -1; case KW_NAMES: return 0; case CALL: - return -1; + return 1; + case CALL_BOUND_METHOD_EXACT_ARGS: + return 1; case CALL_PY_EXACT_ARGS: - return -1; + return 1; case CALL_PY_WITH_DEFAULTS: - return -1; + return 1; case CALL_NO_KW_TYPE_1: - return -1; + return 1; case CALL_NO_KW_STR_1: - return -1; + return 1; case CALL_NO_KW_TUPLE_1: - return -1; + return 1; case CALL_BUILTIN_CLASS: - return -1; + return 1; case CALL_NO_KW_BUILTIN_O: - return -1; + return 1; case CALL_NO_KW_BUILTIN_FAST: - return -1; + return 1; case CALL_BUILTIN_FAST_WITH_KEYWORDS: - return -1; + return 1; case CALL_NO_KW_LEN: - return -1; + return 1; case CALL_NO_KW_ISINSTANCE: - return -1; + return 1; case CALL_NO_KW_LIST_APPEND: - return -1; + return 1; case CALL_NO_KW_METHOD_DESCRIPTOR_O: - return -1; + return 1; case CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS: - return -1; + return 1; case CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS: - return -1; + return 1; case CALL_NO_KW_METHOD_DESCRIPTOR_FAST: - return -1; + return 1; case CALL_FUNCTION_EX: - return -1; + return 1; case MAKE_FUNCTION: - return -1; + return 1; case RETURN_GENERATOR: - return -1; + return 0; case BUILD_SLICE: - return -1; + return 1; case FORMAT_VALUE: - return -1; + return 1; case COPY: - return -1; + return (oparg-1) + 2; case BINARY_OP: return 1; case SWAP: - return -1; + return (oparg-2) + 2; case EXTENDED_ARG: return 0; case CACHE: return 0; default: - Py_UNREACHABLE(); + return -1; } } #endif + enum Direction { DIR_NONE, DIR_READ, DIR_WRITE }; -enum InstructionFormat { INSTR_FMT_IB, INSTR_FMT_IBC, INSTR_FMT_IBC0, INSTR_FMT_IBC000, INSTR_FMT_IBC00000000, INSTR_FMT_IBIB, INSTR_FMT_IX, INSTR_FMT_IXC, INSTR_FMT_IXC000 }; +enum InstructionFormat { INSTR_FMT_IB, INSTR_FMT_IBC, INSTR_FMT_IBC0, INSTR_FMT_IBC000, INSTR_FMT_IBC0000, INSTR_FMT_IBC00000000, INSTR_FMT_IBIB, INSTR_FMT_IX, INSTR_FMT_IXC, INSTR_FMT_IXC000 }; struct opcode_metadata { enum Direction dir_op1; enum Direction dir_op2; enum Direction dir_op3; bool valid_entry; enum InstructionFormat instr_format; -} _PyOpcode_opcode_metadata[256] = { +}; + +#ifndef NEED_OPCODE_TABLES +extern const struct opcode_metadata _PyOpcode_opcode_metadata[256]; +#else +const struct opcode_metadata _PyOpcode_opcode_metadata[256] = { [NOP] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [RESUME] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [LOAD_CLOSURE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, @@ -750,6 +764,7 @@ struct opcode_metadata { [RAISE_VARARGS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [INTERPRETER_EXIT] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [RETURN_VALUE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, + [RETURN_CONST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [GET_AITER] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [GET_ANEXT] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [GET_AWAITABLE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, @@ -764,19 +779,19 @@ struct opcode_metadata { [LOAD_BUILD_CLASS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [STORE_NAME] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [DELETE_NAME] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [UNPACK_SEQUENCE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [UNPACK_SEQUENCE_TWO_TUPLE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, - [UNPACK_SEQUENCE_TUPLE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [UNPACK_SEQUENCE_LIST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, + [UNPACK_SEQUENCE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, + [UNPACK_SEQUENCE_TWO_TUPLE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, + [UNPACK_SEQUENCE_TUPLE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, + [UNPACK_SEQUENCE_LIST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, [UNPACK_EX] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [STORE_ATTR] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, [DELETE_ATTR] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [STORE_GLOBAL] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [DELETE_GLOBAL] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [LOAD_NAME] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [LOAD_GLOBAL] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [LOAD_GLOBAL_MODULE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [LOAD_GLOBAL_BUILTIN] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, + [LOAD_GLOBAL] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC0000 }, + [LOAD_GLOBAL_MODULE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC0000 }, + [LOAD_GLOBAL_BUILTIN] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC0000 }, [DELETE_FAST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [MAKE_CELL] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [DELETE_DEREF] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, @@ -834,11 +849,11 @@ struct opcode_metadata { [MATCH_KEYS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [GET_ITER] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [GET_YIELD_FROM_ITER] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, - [FOR_ITER] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [FOR_ITER_LIST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [FOR_ITER_TUPLE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [FOR_ITER_RANGE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [FOR_ITER_GEN] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, + [FOR_ITER] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, + [FOR_ITER_LIST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, + [FOR_ITER_TUPLE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, + [FOR_ITER_RANGE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, + [FOR_ITER_GEN] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC }, [BEFORE_ASYNC_WITH] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [BEFORE_WITH] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, [WITH_EXCEPT_START] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, @@ -846,25 +861,25 @@ struct opcode_metadata { [LOAD_ATTR_METHOD_WITH_VALUES] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC00000000 }, [LOAD_ATTR_METHOD_NO_DICT] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC00000000 }, [LOAD_ATTR_METHOD_LAZY_DICT] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC00000000 }, - [CALL_BOUND_METHOD_EXACT_ARGS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [KW_NAMES] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_PY_EXACT_ARGS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_PY_WITH_DEFAULTS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_TYPE_1] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_STR_1] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_TUPLE_1] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_BUILTIN_CLASS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_BUILTIN_O] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_BUILTIN_FAST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_BUILTIN_FAST_WITH_KEYWORDS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_LEN] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_ISINSTANCE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_LIST_APPEND] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_METHOD_DESCRIPTOR_O] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, - [CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, + [CALL] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_BOUND_METHOD_EXACT_ARGS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_PY_EXACT_ARGS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_PY_WITH_DEFAULTS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_TYPE_1] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_STR_1] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_TUPLE_1] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_BUILTIN_CLASS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_BUILTIN_O] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_BUILTIN_FAST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_BUILTIN_FAST_WITH_KEYWORDS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_LEN] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_ISINSTANCE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_LIST_APPEND] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_METHOD_DESCRIPTOR_O] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_METHOD_DESCRIPTOR_FAST_WITH_KEYWORDS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_METHOD_DESCRIPTOR_NOARGS] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, + [CALL_NO_KW_METHOD_DESCRIPTOR_FAST] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IBC000 }, [CALL_FUNCTION_EX] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [MAKE_FUNCTION] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [RETURN_GENERATOR] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, @@ -876,3 +891,4 @@ struct opcode_metadata { [EXTENDED_ARG] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IB }, [CACHE] = { DIR_NONE, DIR_NONE, DIR_NONE, true, INSTR_FMT_IX }, }; +#endif diff --git a/Python/opcode_targets.h b/Python/opcode_targets.h index 4252228ee5a159..9c141533a61e9c 100644 --- a/Python/opcode_targets.h +++ b/Python/opcode_targets.h @@ -120,7 +120,7 @@ static void *opcode_targets[256] = { &&TARGET_CONTAINS_OP, &&TARGET_RERAISE, &&TARGET_COPY, - &&TARGET_STORE_FAST__LOAD_FAST, + &&TARGET_RETURN_CONST, &&TARGET_BINARY_OP, &&TARGET_SEND, &&TARGET_LOAD_FAST, @@ -152,20 +152,20 @@ static void *opcode_targets[256] = { &&TARGET_YIELD_VALUE, &&TARGET_RESUME, &&TARGET_MATCH_CLASS, + &&TARGET_STORE_FAST__LOAD_FAST, &&TARGET_STORE_FAST__STORE_FAST, - &&TARGET_STORE_SUBSCR_DICT, &&TARGET_FORMAT_VALUE, &&TARGET_BUILD_CONST_KEY_MAP, &&TARGET_BUILD_STRING, + &&TARGET_STORE_SUBSCR_DICT, &&TARGET_STORE_SUBSCR_LIST_INT, &&TARGET_UNPACK_SEQUENCE_LIST, &&TARGET_UNPACK_SEQUENCE_TUPLE, - &&TARGET_UNPACK_SEQUENCE_TWO_TUPLE, &&TARGET_LIST_EXTEND, &&TARGET_SET_UPDATE, &&TARGET_DICT_MERGE, &&TARGET_DICT_UPDATE, - &&_unknown_opcode, + &&TARGET_UNPACK_SEQUENCE_TWO_TUPLE, &&_unknown_opcode, &&_unknown_opcode, &&_unknown_opcode, diff --git a/Python/pystate.c b/Python/pystate.c index 8bb49d954a81b6..1261092d1435fa 100644 --- a/Python/pystate.c +++ b/Python/pystate.c @@ -266,10 +266,10 @@ unbind_tstate(PyThreadState *tstate) thread state for an OS level thread is when there are multiple interpreters. - The PyGILState_*() APIs don't work with multiple - interpreters (see bpo-10915 and bpo-15751), so this function - sets TSS only once. Thus, the first thread state created for that - given OS level thread will "win", which seems reasonable behaviour. + Before 3.12, the PyGILState_*() APIs didn't work with multiple + interpreters (see bpo-10915 and bpo-15751), so this function used + to set TSS only once. Thus, the first thread state created for that + given OS level thread would "win", which seemed reasonable behaviour. */ static void @@ -286,10 +286,6 @@ bind_gilstate_tstate(PyThreadState *tstate) assert(tstate != tcur); if (tcur != NULL) { - // The original gilstate implementation only respects the - // first thread state set. - // XXX Skipping like this does not play nice with multiple interpreters. - return; tcur->_status.bound_gilstate = 0; } gilstate_tss_set(runtime, tstate); @@ -1379,9 +1375,7 @@ PyThreadState_Clear(PyThreadState *tstate) Py_CLEAR(tstate->dict); Py_CLEAR(tstate->async_exc); - Py_CLEAR(tstate->curexc_type); - Py_CLEAR(tstate->curexc_value); - Py_CLEAR(tstate->curexc_traceback); + Py_CLEAR(tstate->current_exception); Py_CLEAR(tstate->exc_state.exc_value); @@ -1738,20 +1732,7 @@ _PyThreadState_Swap(_PyRuntimeState *runtime, PyThreadState *newts) tstate_activate(newts); } - /* It should not be possible for more than one thread state - to be used for a thread. Check this the best we can in debug - builds. - */ - // XXX The above isn't true when multiple interpreters are involved. #if defined(Py_DEBUG) - if (newts && gilstate_tss_initialized(runtime)) { - PyThreadState *check = gilstate_tss_get(runtime); - if (check != newts) { - if (check && check->interp == newts->interp) { - Py_FatalError("Invalid thread state for this thread"); - } - } - } errno = err; #endif return oldts; diff --git a/Python/pythonrun.c b/Python/pythonrun.c index 35292b6478a833..6a4d593768690a 100644 --- a/Python/pythonrun.c +++ b/Python/pythonrun.c @@ -748,13 +748,10 @@ _Py_HandleSystemExit(int *exitcode_p) } done: - /* Restore and clear the exception info, in order to properly decref - * the exception, value, and traceback. If we just exit instead, - * these leak, which confuses PYTHONDUMPREFS output, and may prevent - * some finalizers from running. - */ - PyErr_Restore(exception, value, tb); - PyErr_Clear(); + /* Cleanup the exception */ + Py_CLEAR(exception); + Py_CLEAR(value); + Py_CLEAR(tb); *exitcode_p = exitcode; return 1; } diff --git a/Python/sysmodule.c b/Python/sysmodule.c index f9f766a94d1464..6e81ef92b67f70 100644 --- a/Python/sysmodule.c +++ b/Python/sysmodule.c @@ -66,12 +66,11 @@ _PySys_GetAttr(PyThreadState *tstate, PyObject *name) if (sd == NULL) { return NULL; } - PyObject *exc_type, *exc_value, *exc_tb; - _PyErr_Fetch(tstate, &exc_type, &exc_value, &exc_tb); + PyObject *exc = _PyErr_GetRaisedException(tstate); /* XXX Suppress a new exception if it was raised and restore * the old one. */ PyObject *value = _PyDict_GetItemWithError(sd, name); - _PyErr_Restore(tstate, exc_type, exc_value, exc_tb); + _PyErr_SetRaisedException(tstate, exc); return value; } @@ -3704,11 +3703,10 @@ static void sys_format(PyObject *key, FILE *fp, const char *format, va_list va) { PyObject *file, *message; - PyObject *error_type, *error_value, *error_traceback; const char *utf8; PyThreadState *tstate = _PyThreadState_GET(); - _PyErr_Fetch(tstate, &error_type, &error_value, &error_traceback); + PyObject *error = _PyErr_GetRaisedException(tstate); file = _PySys_GetAttr(tstate, key); message = PyUnicode_FromFormatV(format, va); if (message != NULL) { @@ -3720,7 +3718,7 @@ sys_format(PyObject *key, FILE *fp, const char *format, va_list va) } Py_DECREF(message); } - _PyErr_Restore(tstate, error_type, error_value, error_traceback); + _PyErr_SetRaisedException(tstate, error); } void diff --git a/Python/traceback.c b/Python/traceback.c index da26c9b260a3bd..31b85e77575efa 100644 --- a/Python/traceback.c +++ b/Python/traceback.c @@ -249,6 +249,8 @@ PyTraceBack_Here(PyFrameObject *frame) _PyErr_ChainExceptions(exc, val, tb); return -1; } + assert(PyExceptionInstance_Check(val)); + PyException_SetTraceback(val, newtb); PyErr_Restore(exc, val, newtb); Py_XDECREF(tb); return 0; @@ -260,13 +262,12 @@ void _PyTraceback_Add(const char *funcname, const char *filename, int lineno) PyObject *globals; PyCodeObject *code; PyFrameObject *frame; - PyObject *exc, *val, *tb; PyThreadState *tstate = _PyThreadState_GET(); /* Save and clear the current exception. Python functions must not be called with an exception set. Calling Python functions happens when the codec of the filesystem encoding is implemented in pure Python. */ - _PyErr_Fetch(tstate, &exc, &val, &tb); + PyObject *exc = _PyErr_GetRaisedException(tstate); globals = PyDict_New(); if (!globals) @@ -283,13 +284,13 @@ void _PyTraceback_Add(const char *funcname, const char *filename, int lineno) goto error; frame->f_lineno = lineno; - _PyErr_Restore(tstate, exc, val, tb); + _PyErr_SetRaisedException(tstate, exc); PyTraceBack_Here(frame); Py_DECREF(frame); return; error: - _PyErr_ChainExceptions(exc, val, tb); + _PyErr_ChainExceptions1(exc); } static PyObject * diff --git a/README.rst b/README.rst index 814efef83a357a..b1756e20c141ab 100644 --- a/README.rst +++ b/README.rst @@ -1,4 +1,4 @@ -This is Python version 3.12.0 alpha 4 +This is Python version 3.12.0 alpha 5 ===================================== .. image:: https://github.com/python/cpython/workflows/Tests/badge.svg diff --git a/Tools/build/generate_stdlib_module_names.py b/Tools/build/generate_stdlib_module_names.py index c8a23f41fdd475..d15e5e2d5450d7 100644 --- a/Tools/build/generate_stdlib_module_names.py +++ b/Tools/build/generate_stdlib_module_names.py @@ -36,6 +36,7 @@ '_testmultiphase', '_testsinglephase', '_xxsubinterpreters', + '_xxinterpchannels', '_xxtestfuzz', 'idlelib.idle_test', 'test', diff --git a/Tools/cases_generator/README.md b/Tools/cases_generator/README.md index dc055ead1941cd..c595a932ac4470 100644 --- a/Tools/cases_generator/README.md +++ b/Tools/cases_generator/README.md @@ -1,5 +1,8 @@ # Tooling to generate interpreters +Documentation for the instruction definitions in `Python/bytecodes.c` +("the DSL") is [here](interpreter_definition.md). + What's currently here: - `lexer.py`: lexer for C, originally written by Mark Shannon @@ -7,10 +10,10 @@ What's currently here: - `parser.py`: Parser for instruction definition DSL; main class `Parser` - `generate_cases.py`: driver script to read `Python/bytecodes.c` and write `Python/generated_cases.c.h` +- `test_generator.py`: tests, require manual running using `pytest` -The DSL for the instruction definitions in `Python/bytecodes.c` is described -[here](https://github.com/faster-cpython/ideas/blob/main/3.12/interpreter_definition.md). -Note that there is some dummy C code at the top and bottom of the file +Note that there is some dummy C code at the top and bottom of +`Python/bytecodes.c` to fool text editors like VS Code into believing this is valid C code. ## A bit about the parser diff --git a/Tools/cases_generator/generate_cases.py b/Tools/cases_generator/generate_cases.py index 3925583b40e728..aa8e14075c8738 100644 --- a/Tools/cases_generator/generate_cases.py +++ b/Tools/cases_generator/generate_cases.py @@ -43,10 +43,7 @@ "-o", "--output", type=str, help="Generated code", default=DEFAULT_OUTPUT ) arg_parser.add_argument( - "-m", - "--metadata", - action="store_true", - help=f"Generate metadata instead, changes output default to {DEFAULT_METADATA_OUTPUT}", + "-m", "--metadata", type=str, help="Generated metadata", default=DEFAULT_METADATA_OUTPUT ) @@ -180,11 +177,8 @@ def assign(self, dst: StackEffect, src: StackEffect): stmt = f"if ({src.cond}) {{ {stmt} }}" self.emit(stmt) elif m := re.match(r"^&PEEK\(.*\)$", dst.name): - # NOTE: MOVE_ITEMS() does not actually exist. - # The only supported output array forms are: - # - unused[...] - # - X[...] where X[...] matches an input array exactly - self.emit(f"MOVE_ITEMS({dst.name}, {src.name}, {src.size});") + # The user code is responsible for writing to the output array. + pass elif m := re.match(r"^REG\(oparg(\d+)\)$", dst.name): self.emit(f"Py_XSETREF({dst.name}, {cast}{src.name});") else: @@ -230,7 +224,8 @@ def __init__(self, inst: parser.InstDef): self.kind = inst.kind self.name = inst.name self.block = inst.block - self.block_text, self.predictions = extract_block_text(self.block) + self.block_text, self.check_eval_breaker, self.predictions = \ + extract_block_text(self.block) self.always_exits = always_exits(self.block_text) self.cache_effects = [ effect for effect in inst.inputs if isinstance(effect, parser.CacheEffect) @@ -309,10 +304,24 @@ def write(self, out: Formatter) -> None: out.declare(ieffect, src) # Write output stack effect variable declarations + isize = string_effect_size(list_effect_size(self.input_effects)) input_names = {ieffect.name for ieffect in self.input_effects} - for oeffect in self.output_effects: + for i, oeffect in enumerate(self.output_effects): if oeffect.name not in input_names: - out.declare(oeffect, None) + if oeffect.size: + osize = string_effect_size( + list_effect_size([oeff for oeff in self.output_effects[:i]]) + ) + offset = "stack_pointer" + if isize != osize: + if isize != "0": + offset += f" - ({isize})" + if osize != "0": + offset += f" + {osize}" + src = StackEffect(offset, "PyObject **") + out.declare(oeffect, src) + else: + out.declare(oeffect, None) # out.emit(f"JUMPBY(OPSIZE({self.inst.name}) - 1);") @@ -379,9 +388,11 @@ def write_body(self, out: Formatter, dedent: int, cache_adjust: int = 0) -> None # Write the body, substituting a goto for ERROR_IF() and other stuff assert dedent <= 0 extra = " " * -dedent + names_to_skip = self.unmoved_names | frozenset({UNUSED, "null"}) for line in self.block_text: if m := re.match(r"(\s*)ERROR_IF\((.+), (\w+)\);\s*(?://.*)?$", line): space, cond, label = m.groups() + space = extra + space # ERROR_IF() must pop the inputs from the stack. # The code block is responsible for DECREF()ing them. # NOTE: If the label doesn't exist, just add it to ceval.c. @@ -400,16 +411,25 @@ def write_body(self, out: Formatter, dedent: int, cache_adjust: int = 0) -> None symbolic = "" if symbolic: out.write_raw( - f"{extra}{space}if ({cond}) {{ STACK_SHRINK({symbolic}); goto {label}; }}\n" + f"{space}if ({cond}) {{ STACK_SHRINK({symbolic}); goto {label}; }}\n" ) else: - out.write_raw(f"{extra}{space}if ({cond}) goto {label};\n") + out.write_raw(f"{space}if ({cond}) goto {label};\n") elif m := re.match(r"(\s*)DECREF_INPUTS\(\);\s*(?://.*)?$", line): if not self.register: - space = m.group(1) + space = extra + m.group(1) for ieff in self.input_effects: - if ieff.name not in self.unmoved_names: - out.write_raw(f"{extra}{space}Py_DECREF({ieff.name});\n") + if ieff.name in names_to_skip: + continue + if ieff.size: + out.write_raw( + f"{space}for (int _i = {ieff.size}; --_i >= 0;) {{\n" + ) + out.write_raw(f"{space} Py_DECREF({ieff.name}[_i]);\n") + out.write_raw(f"{space}}}\n") + else: + decref = "XDECREF" if ieff.cond else "DECREF" + out.write_raw(f"{space}Py_{decref}({ieff.name});\n") else: out.write_raw(extra + line) @@ -475,13 +495,15 @@ class Analyzer: filename: str output_filename: str + metadata_filename: str src: str errors: int = 0 - def __init__(self, filename: str, output_filename: str): + def __init__(self, filename: str, output_filename: str, metadata_filename: str): """Read the input file.""" self.filename = filename self.output_filename = output_filename + self.metadata_filename = metadata_filename with open(filename) as f: self.src = f.read() @@ -866,21 +888,25 @@ def write_stack_effect_functions(self) -> None: def write_function( direction: str, data: list[tuple[AnyInstruction, str]] ) -> None: - self.out.emit("\n#ifndef NDEBUG") - self.out.emit("static int") + self.out.emit("") + self.out.emit("#ifndef NEED_OPCODE_TABLES") + self.out.emit(f"extern int _PyOpcode_num_{direction}(int opcode, int oparg, bool jump);") + self.out.emit("#else") + self.out.emit("int") self.out.emit(f"_PyOpcode_num_{direction}(int opcode, int oparg, bool jump) {{") self.out.emit(" switch(opcode) {") for instr, effect in data: self.out.emit(f" case {instr.name}:") self.out.emit(f" return {effect};") self.out.emit(" default:") - self.out.emit(" Py_UNREACHABLE();") + self.out.emit(" return -1;") self.out.emit(" }") self.out.emit("}") self.out.emit("#endif") write_function("popped", popped_data) write_function("pushed", pushed_data) + self.out.emit("") def write_metadata(self) -> None: """Write instruction metadata to output file.""" @@ -901,7 +927,7 @@ def write_metadata(self) -> None: # Turn it into a list of enum definitions. format_enums = [INSTR_FMT_PREFIX + format for format in sorted(all_formats)] - with open(self.output_filename, "w") as f: + with open(self.metadata_filename, "w") as f: # Write provenance header f.write(f"// This file is generated by {THIS} --metadata\n") f.write(f"// from {os.path.relpath(self.filename, ROOT)}\n") @@ -912,7 +938,7 @@ def write_metadata(self) -> None: self.write_stack_effect_functions() - # Write variable definition + # Write type definitions self.out.emit("enum Direction { DIR_NONE, DIR_READ, DIR_WRITE };") self.out.emit(f"enum InstructionFormat {{ {', '.join(format_enums)} }};") self.out.emit("struct opcode_metadata {") @@ -922,7 +948,14 @@ def write_metadata(self) -> None: self.out.emit("enum Direction dir_op3;") self.out.emit("bool valid_entry;") self.out.emit("enum InstructionFormat instr_format;") - self.out.emit("} _PyOpcode_opcode_metadata[256] = {") + self.out.emit("};") + self.out.emit("") + + # Write metadata array declaration + self.out.emit("#ifndef NEED_OPCODE_TABLES") + self.out.emit("extern const struct opcode_metadata _PyOpcode_opcode_metadata[256];") + self.out.emit("#else") + self.out.emit("const struct opcode_metadata _PyOpcode_opcode_metadata[256] = {") # Write metadata for each instruction for thing in self.everything: @@ -939,6 +972,7 @@ def write_metadata(self) -> None: # Write end of array self.out.emit("};") + self.out.emit("#endif") def write_metadata_for_inst(self, instr: Instruction) -> None: """Write metadata for a single instruction.""" @@ -1016,6 +1050,8 @@ def write_instr(self, instr: Instruction) -> None: if not instr.always_exits: for prediction in instr.predictions: self.out.emit(f"PREDICT({prediction});") + if instr.check_eval_breaker: + self.out.emit("CHECK_EVAL_BREAKER();") self.out.emit(f"DISPATCH();") def write_super(self, sup: SuperInstruction) -> None: @@ -1091,7 +1127,7 @@ def wrap_super_or_macro(self, up: SuperOrMacroInstruction): self.out.emit(f"DISPATCH();") -def extract_block_text(block: parser.Block) -> tuple[list[str], list[str]]: +def extract_block_text(block: parser.Block) -> tuple[list[str], bool, list[str]]: # Get lines of text with proper dedent blocklines = block.text.splitlines(True) @@ -1111,6 +1147,12 @@ def extract_block_text(block: parser.Block) -> tuple[list[str], list[str]]: while blocklines and not blocklines[-1].strip(): blocklines.pop() + # Separate CHECK_EVAL_BREAKER() macro from end + check_eval_breaker = \ + blocklines != [] and blocklines[-1].strip() == "CHECK_EVAL_BREAKER();" + if check_eval_breaker: + del blocklines[-1] + # Separate PREDICT(...) macros from end predictions: list[str] = [] while blocklines and ( @@ -1119,7 +1161,7 @@ def extract_block_text(block: parser.Block) -> tuple[list[str], list[str]]: predictions.insert(0, m.group(1)) blocklines.pop() - return blocklines, predictions + return blocklines, check_eval_breaker, predictions def always_exits(lines: list[str]) -> bool: @@ -1153,18 +1195,13 @@ def variable_used(node: parser.Node, name: str) -> bool: def main(): """Parse command line, parse input, analyze, write output.""" args = arg_parser.parse_args() # Prints message and sys.exit(2) on error - if args.metadata: - if args.output == DEFAULT_OUTPUT: - args.output = DEFAULT_METADATA_OUTPUT - a = Analyzer(args.input, args.output) # Raises OSError if input unreadable + a = Analyzer(args.input, args.output, args.metadata) # Raises OSError if input unreadable a.parse() # Raises SyntaxError on failure a.analyze() # Prints messages and sets a.errors on failure if a.errors: sys.exit(f"Found {a.errors} errors") - if args.metadata: - a.write_metadata() - else: - a.write_instructions() # Raises OSError if output can't be written + a.write_instructions() # Raises OSError if output can't be written + a.write_metadata() if __name__ == "__main__": diff --git a/Tools/cases_generator/interpreter_definition.md b/Tools/cases_generator/interpreter_definition.md new file mode 100644 index 00000000000000..c7bd38d32ff411 --- /dev/null +++ b/Tools/cases_generator/interpreter_definition.md @@ -0,0 +1,409 @@ +# A higher level definition of the bytecode interpreter + +## Abstract + +The CPython interpreter is defined in C, meaning that the semantics of the +bytecode instructions, the dispatching mechanism, error handling, and +tracing and instrumentation are all intermixed. + +This document proposes defining a custom C-like DSL for defining the +instruction semantics and tools for generating the code deriving from +the instruction definitions. + +These tools would be used to: +* Generate the main interpreter (done) +* Generate the tier 2 interpreter +* Generate documentation for instructions +* Generate metadata about instructions, such as stack use (done). + +Having a single definition file ensures that there is a single source +of truth for bytecode semantics. + +Other tools that operate on bytecodes, like `frame.setlineno` +and the `dis` module, will be derived from the common semantic +definition, reducing errors. + +## Motivation + +The bytecode interpreter of CPython has traditionally been defined as standard +C code, but with a lot of macros. +The presence of these macros and the nature of bytecode interpreters means +that the interpreter is effectively defined in a domain specific language (DSL). + +Rather than using an ad-hoc DSL embedded in the C code for the interpreter, +a custom DSL should be defined and the semantics of the bytecode instructions, +and the instructions defined in that DSL. + +Generating the interpreter decouples low-level details of dispatching +and error handling from the semantics of the instructions, resulting +in more maintainable code and a potentially faster interpreter. + +It also provides the ability to create and check optimizers and optimization +passes from the semantic definition, reducing errors. + +## Rationale + +As we improve the performance of CPython, we need to optimize larger regions +of code, use more complex optimizations and, ultimately, translate to machine +code. + +All of these steps introduce the possibility of more bugs, and require more code +to be written. One way to mitigate this is through the use of code generators. +Code generators decouple the debugging of the code (the generator) from checking +the correctness (the DSL input). + +For example, we are likely to want a new interpreter for the tier 2 optimizer +to be added in 3.12. That interpreter will have a different API, a different +set of instructions and potentially different dispatching mechanism. +But the instructions it will interpret will be built from the same building +blocks as the instructions for the tier 1 (PEP 659) interpreter. + +Rewriting all the instructions is tedious and error-prone, and changing the +instructions is a maintenance headache as both versions need to be kept in sync. + +By using a code generator and using a common source for the instructions, or +parts of instructions, we can reduce the potential for errors considerably. + + +## Specification + +This specification is at an early stage and is likely to change considerably. + +Syntax +------ + +Each op definition has a kind, a name, a stack and instruction stream effect, +and a piece of C code describing its semantics:: + +``` + file: + (definition | family)+ + + definition: + "inst" "(" NAME ["," stack_effect] ")" "{" C-code "}" + | + "op" "(" NAME "," stack_effect ")" "{" C-code "}" + | + "macro" "(" NAME ")" "=" uop ("+" uop)* ";" + | + "super" "(" NAME ")" "=" NAME ("+" NAME)* ";" + + stack_effect: + "(" [inputs] "--" [outputs] ")" + + inputs: + input ("," input)* + + outputs: + output ("," output)* + + input: + object | stream | array + + output: + object | array + + uop: + NAME | stream + + object: + NAME [":" type] [ "if" "(" C-expression ")" ] + + type: + NAME + + stream: + NAME "/" size + + size: + INTEGER + + array: + object "[" C-expression "]" + + family: + "family" "(" NAME ")" = "{" NAME ("," NAME)+ "}" ";" +``` + +The following definitions may occur: + +* `inst`: A normal instruction, as previously defined by `TARGET(NAME)` in `ceval.c`. +* `op`: A part instruction from which macros can be constructed. +* `macro`: A bytecode instruction constructed from ops and cache effects. +* `super`: A super-instruction, such as `LOAD_FAST__LOAD_FAST`, constructed from + normal or macro instructions. + +`NAME` can be any ASCII identifier that is a C identifier and not a C or Python keyword. +`foo_1` is legal. `$` is not legal, nor is `struct` or `class`. + +The optional `type` in an `object` is the C type. It defaults to `PyObject *`. +The objects before the "--" are the objects on top of the the stack at the start +of the instruction. Those after the "--" are the objects on top of the the stack +at the end of the instruction. + +An `inst` without `stack_effect` is a transitional form to allow the original C code +definitions to be copied. It lacks information to generate anything other than the +interpreter, but is useful for initial porting of code. + +Stack effect names may be `unused`, indicating the space is to be reserved +but no use of it will be made in the instruction definition. +This is useful to ensure that all instructions in a family have the same +stack effect. + +The number in a `stream` define how many codeunits are consumed from the +instruction stream. It returns a 16, 32 or 64 bit value. +If the name is `unused` the size can be any value and that many codeunits +will be skipped in the instruction stream. + +By convention cache effects (`stream`) must precede the input effects. + +The name `oparg` is pre-defined as a 32 bit value fetched from the instruction stream. + +The C code may include special functions that are understood by the tools as +part of the DSL. + +Those functions include: + +* `DEOPT_IF(cond, instruction)`. Deoptimize if `cond` is met. +* `ERROR_IF(cond, label)`. Jump to error handler if `cond` is true. +* `DECREF_INPUTS()`. Generate `Py_DECREF()` calls for the input stack effects. + +Variables can either be defined in the input, output, or in the C code. +Variables defined in the input may not be assigned in the C code. +If an `ERROR_IF` occurs, all values will be removed from the stack; +they must already be `DECREF`'ed by the code block. +If a `DEOPT_IF` occurs, no values will be removed from the stack or +the instruction stream; no values must have been `DECREF`'ed or created. + +These requirements result in the following constraints on the use of +`DEOPT_IF` and `ERROR_IF` in any instruction's code block: + +1. Until the last `DEOPT_IF`, no objects may be allocated, `INCREF`ed, + or `DECREF`ed. +2. Before the first `ERROR_IF`, all input values must be `DECREF`ed, + and no objects may be allocated or `INCREF`ed, with the exception + of attempting to create an object and checking for success using + `ERROR_IF(result == NULL, label)`. (TODO: Unclear what to do with + intermediate results.) +3. No `DEOPT_IF` may follow an `ERROR_IF` in the same block. + +Semantics +--------- + +The underlying execution model is a stack machine. +Operations pop values from the stack, and push values to the stack. +They also can look at, and consume, values from the instruction stream. + +All members of a family must have the same stack and instruction stream effect. + +Examples +-------- + +(Another source of examples can be found in the [tests](test_generator.py).) + +Some examples: + +### Output stack effect +```C + inst ( LOAD_FAST, (-- value) ) { + value = frame->f_localsplus[oparg]; + Py_INCREF(value); + } +``` +This would generate: +```C + TARGET(LOAD_FAST) { + PyObject *value; + value = frame->f_localsplus[oparg]; + Py_INCREF(value); + PUSH(value); + DISPATCH(); + } +``` + +### Input stack effect +```C + inst ( STORE_FAST, (value --) ) { + SETLOCAL(oparg, value); + } +``` +This would generate: +```C + TARGET(STORE_FAST) { + PyObject *value = PEEK(1); + SETLOCAL(oparg, value); + STACK_SHRINK(1); + DISPATCH(); + } +``` + +### Super-instruction definition + +```C + super ( LOAD_FAST__LOAD_FAST ) = LOAD_FAST + LOAD_FAST ; +``` +This might get translated into the following: +```C + TARGET(LOAD_FAST__LOAD_FAST) { + PyObject *value; + value = frame->f_localsplus[oparg]; + Py_INCREF(value); + PUSH(value); + NEXTOPARG(); + next_instr++; + value = frame->f_localsplus[oparg]; + Py_INCREF(value); + PUSH(value); + DISPATCH(); + } +``` + +### Input stack effect and cache effect +```C + op ( CHECK_OBJECT_TYPE, (owner, type_version/2 -- owner) ) { + PyTypeObject *tp = Py_TYPE(owner); + assert(type_version != 0); + DEOPT_IF(tp->tp_version_tag != type_version); + } +``` +This might become (if it was an instruction): +```C + TARGET(CHECK_OBJECT_TYPE) { + PyObject *owner = PEEK(1); + uint32 type_version = read32(next_instr); + PyTypeObject *tp = Py_TYPE(owner); + assert(type_version != 0); + DEOPT_IF(tp->tp_version_tag != type_version); + next_instr += 2; + DISPATCH(); + } +``` + +### More examples + +For explanations see "Generating the interpreter" below.) +```C + op ( CHECK_HAS_INSTANCE_VALUES, (owner -- owner) ) { + PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); + DEOPT_IF(!_PyDictOrValues_IsValues(dorv)); + } +``` +```C + op ( LOAD_INSTANCE_VALUE, (owner, index/1 -- null if (oparg & 1), res) ) { + res = _PyDictOrValues_GetValues(dorv)->values[index]; + DEOPT_IF(res == NULL); + Py_INCREF(res); + null = NULL; + Py_DECREF(owner); + } +``` +```C + macro ( LOAD_ATTR_INSTANCE_VALUE ) = + counter/1 + CHECK_OBJECT_TYPE + CHECK_HAS_INSTANCE_VALUES + + LOAD_INSTANCE_VALUE + unused/4 ; +``` +```C + op ( LOAD_SLOT, (owner, index/1 -- null if (oparg & 1), res) ) { + char *addr = (char *)owner + index; + res = *(PyObject **)addr; + DEOPT_IF(res == NULL); + Py_INCREF(res); + null = NULL; + Py_DECREF(owner); + } +``` +```C + macro ( LOAD_ATTR_SLOT ) = counter/1 + CHECK_OBJECT_TYPE + LOAD_SLOT + unused/4; +``` +```C + inst ( BUILD_TUPLE, (items[oparg] -- tuple) ) { + tuple = _PyTuple_FromArraySteal(items, oparg); + ERROR_IF(tuple == NULL, error); + } +``` +```C + inst ( PRINT_EXPR ) { + PyObject *value = POP(); + PyObject *hook = _PySys_GetAttr(tstate, &_Py_ID(displayhook)); + PyObject *res; + if (hook == NULL) { + _PyErr_SetString(tstate, PyExc_RuntimeError, + "lost sys.displayhook"); + Py_DECREF(value); + goto error; + } + res = PyObject_CallOneArg(hook, value); + Py_DECREF(value); + ERROR_IF(res == NULL); + Py_DECREF(res); + } +``` + +### Define an instruction family +These opcodes all share the same instruction format): +```C + family(load_attr) = { LOAD_ATTR, LOAD_ATTR_INSTANCE_VALUE, LOAD_SLOT } ; +``` + +Generating the interpreter +========================== + +The generated C code for a single instruction includes a preamble and dispatch at the end +which can be easily inserted. What is more complex is ensuring the correct stack effects +and not generating excess pops and pushes. + +For example, in `CHECK_HAS_INSTANCE_VALUES`, `owner` occurs in the input, so it cannot be +redefined. Thus it doesn't need to written and can be read without adjusting the stack pointer. +The C code generated for `CHECK_HAS_INSTANCE_VALUES` would look something like: + +```C + { + PyObject *owner = stack_pointer[-1]; + PyDictOrValues dorv = *_PyObject_DictOrValuesPointer(owner); + DEOPT_IF(!_PyDictOrValues_IsValues(dorv)); + } +``` + +When combining ops together to form instructions, temporary values should be used, +rather than popping and pushing, such that `LOAD_ATTR_SLOT` would look something like: + +```C + case LOAD_ATTR_SLOT: { + PyObject *s1 = stack_pointer[-1]; + /* CHECK_OBJECT_TYPE */ + { + PyObject *owner = s1; + uint32_t type_version = read32(next_instr + 1); + PyTypeObject *tp = Py_TYPE(owner); + assert(type_version != 0); + if (tp->tp_version_tag != type_version) goto deopt; + } + /* LOAD_SLOT */ + { + PyObject *owner = s1; + uint16_t index = *(next_instr + 1 + 2); + char *addr = (char *)owner + index; + PyObject *null; + PyObject *res = *(PyObject **)addr; + if (res == NULL) goto deopt; + Py_INCREF(res); + null = NULL; + Py_DECREF(owner); + if (oparg & 1) { + stack_pointer[0] = null; + stack_pointer += 1; + } + s1 = res; + } + next_instr += (1 + 1 + 2 + 1 + 4); + stack_pointer[-1] = s1; + DISPATCH(); + } +``` + +Other tools +=========== + +From the instruction definitions we can generate the stack marking code used in `frame.set_lineno()`, +and the tables for use by disassemblers. + diff --git a/Tools/cases_generator/parser.py b/Tools/cases_generator/parser.py index ced66faee4931f..c7c8d8af6b7318 100644 --- a/Tools/cases_generator/parser.py +++ b/Tools/cases_generator/parser.py @@ -263,7 +263,14 @@ def stack_effect(self) -> StackEffect | None: @contextual def expression(self) -> Expression | None: tokens: list[lx.Token] = [] - while (tkn := self.peek()) and tkn.kind not in (lx.RBRACKET, lx.RPAREN): + level = 1 + while tkn := self.peek(): + if tkn.kind in (lx.LBRACKET, lx.LPAREN): + level += 1 + elif tkn.kind in (lx.RBRACKET, lx.RPAREN): + level -= 1 + if level == 0: + break tokens.append(tkn) self.next() if not tokens: diff --git a/Tools/cases_generator/test_generator.py b/Tools/cases_generator/test_generator.py index 9df97d24ab6f43..036094ac8ef487 100644 --- a/Tools/cases_generator/test_generator.py +++ b/Tools/cases_generator/test_generator.py @@ -177,15 +177,16 @@ def test_overlap(): """ run_cases_test(input, output) -def test_predictions(): +def test_predictions_and_eval_breaker(): input = """ inst(OP1, (--)) { } inst(OP2, (--)) { } - inst(OP3, (--)) { + inst(OP3, (arg -- res)) { DEOPT_IF(xxx, OP1); PREDICT(OP2); + CHECK_EVAL_BREAKER(); } """ output = """ @@ -200,8 +201,12 @@ def test_predictions(): } TARGET(OP3) { + PyObject *arg = PEEK(1); + PyObject *res; DEOPT_IF(xxx, OP1); + POKE(1, res); PREDICT(OP2); + CHECK_EVAL_BREAKER(); DISPATCH(); } """ @@ -424,20 +429,18 @@ def test_array_input(): def test_array_output(): input = """ - inst(OP, (-- below, values[oparg*3], above)) { - spam(); + inst(OP, (unused, unused -- below, values[oparg*3], above)) { + spam(values, oparg); } """ output = """ TARGET(OP) { PyObject *below; - PyObject **values; + PyObject **values = stack_pointer - (2) + 1; PyObject *above; - spam(); - STACK_GROW(2); + spam(values, oparg); STACK_GROW(oparg*3); POKE(1, above); - MOVE_ITEMS(&PEEK(1 + oparg*3), values, oparg*3); POKE(2 + oparg*3, below); DISPATCH(); } @@ -446,18 +449,17 @@ def test_array_output(): def test_array_input_output(): input = """ - inst(OP, (below, values[oparg] -- values[oparg], above)) { - spam(); + inst(OP, (values[oparg] -- values[oparg], above)) { + spam(values, oparg); } """ output = """ TARGET(OP) { PyObject **values = &PEEK(oparg); - PyObject *below = PEEK(1 + oparg); PyObject *above; - spam(); + spam(values, oparg); + STACK_GROW(1); POKE(1, above); - MOVE_ITEMS(&PEEK(1 + oparg), values, oparg); DISPATCH(); } """ @@ -503,20 +505,20 @@ def test_register(): def test_cond_effect(): input = """ - inst(OP, (aa, input if (oparg & 1), cc -- xx, output if (oparg & 2), zz)) { + inst(OP, (aa, input if ((oparg & 1) == 1), cc -- xx, output if (oparg & 2), zz)) { output = spam(oparg, input); } """ output = """ TARGET(OP) { PyObject *cc = PEEK(1); - PyObject *input = (oparg & 1) ? PEEK(1 + ((oparg & 1) ? 1 : 0)) : NULL; - PyObject *aa = PEEK(2 + ((oparg & 1) ? 1 : 0)); + PyObject *input = ((oparg & 1) == 1) ? PEEK(1 + (((oparg & 1) == 1) ? 1 : 0)) : NULL; + PyObject *aa = PEEK(2 + (((oparg & 1) == 1) ? 1 : 0)); PyObject *xx; PyObject *output = NULL; PyObject *zz; output = spam(oparg, input); - STACK_SHRINK(((oparg & 1) ? 1 : 0)); + STACK_SHRINK((((oparg & 1) == 1) ? 1 : 0)); STACK_GROW(((oparg & 2) ? 1 : 0)); POKE(1, zz); if (oparg & 2) { POKE(1 + ((oparg & 2) ? 1 : 0), output); } diff --git a/configure b/configure index 8b707cda62129f..97694c602d1cc8 100755 --- a/configure +++ b/configure @@ -752,6 +752,8 @@ MODULE__MULTIPROCESSING_FALSE MODULE__MULTIPROCESSING_TRUE MODULE__ZONEINFO_FALSE MODULE__ZONEINFO_TRUE +MODULE__XXINTERPCHANNELS_FALSE +MODULE__XXINTERPCHANNELS_TRUE MODULE__XXSUBINTERPRETERS_FALSE MODULE__XXSUBINTERPRETERS_TRUE MODULE__TYPING_FALSE @@ -24424,6 +24426,7 @@ SRCDIRS="\ Modules/_ctypes \ Modules/_decimal \ Modules/_decimal/libmpdec \ + Modules/_hacl \ Modules/_io \ Modules/_multiprocessing \ Modules/_sha3 \ @@ -25615,6 +25618,7 @@ case $ac_sys_system in #( py_cv_module__scproxy=n/a py_cv_module__tkinter=n/a py_cv_module__xxsubinterpreters=n/a + py_cv_module__xxinterpchannels=n/a py_cv_module_grp=n/a py_cv_module_nis=n/a py_cv_module_ossaudiodev=n/a @@ -26057,6 +26061,26 @@ fi +fi + + + if test "$py_cv_module__xxinterpchannels" != "n/a"; then : + py_cv_module__xxinterpchannels=yes +fi + if test "$py_cv_module__xxinterpchannels" = yes; then + MODULE__XXINTERPCHANNELS_TRUE= + MODULE__XXINTERPCHANNELS_FALSE='#' +else + MODULE__XXINTERPCHANNELS_TRUE='#' + MODULE__XXINTERPCHANNELS_FALSE= +fi + + as_fn_append MODULE_BLOCK "MODULE__XXINTERPCHANNELS_STATE=$py_cv_module__xxinterpchannels$as_nl" + if test "x$py_cv_module__xxinterpchannels" = xyes; then : + + + + fi @@ -26943,7 +26967,7 @@ fi as_fn_append MODULE_BLOCK "MODULE__SHA256_STATE=$py_cv_module__sha256$as_nl" if test "x$py_cv_module__sha256" = xyes; then : - + as_fn_append MODULE_BLOCK "MODULE__SHA256_CFLAGS=-I\$(srcdir)/Modules/_hacl/include -I\$(srcdir)/Modules/_hacl/internal -D_BSD_SOURCE -D_DEFAULT_SOURCE$as_nl" fi @@ -28236,6 +28260,10 @@ if test -z "${MODULE__XXSUBINTERPRETERS_TRUE}" && test -z "${MODULE__XXSUBINTERP as_fn_error $? "conditional \"MODULE__XXSUBINTERPRETERS\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 fi +if test -z "${MODULE__XXINTERPCHANNELS_TRUE}" && test -z "${MODULE__XXINTERPCHANNELS_FALSE}"; then + as_fn_error $? "conditional \"MODULE__XXINTERPCHANNELS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi if test -z "${MODULE__ZONEINFO_TRUE}" && test -z "${MODULE__ZONEINFO_FALSE}"; then as_fn_error $? "conditional \"MODULE__ZONEINFO\" was never defined. Usually this means the macro was only invoked conditionally." "$LINENO" 5 diff --git a/configure.ac b/configure.ac index 5eee4586680dbb..09369b985b33f6 100644 --- a/configure.ac +++ b/configure.ac @@ -6508,6 +6508,7 @@ SRCDIRS="\ Modules/_ctypes \ Modules/_decimal \ Modules/_decimal/libmpdec \ + Modules/_hacl \ Modules/_io \ Modules/_multiprocessing \ Modules/_sha3 \ @@ -7017,6 +7018,7 @@ AS_CASE([$ac_sys_system], [_scproxy], [_tkinter], [_xxsubinterpreters], + [_xxinterpchannels], [grp], [nis], [ossaudiodev], @@ -7135,6 +7137,7 @@ PY_STDLIB_MOD_SIMPLE([select]) PY_STDLIB_MOD_SIMPLE([_struct]) PY_STDLIB_MOD_SIMPLE([_typing]) PY_STDLIB_MOD_SIMPLE([_xxsubinterpreters]) +PY_STDLIB_MOD_SIMPLE([_xxinterpchannels]) PY_STDLIB_MOD_SIMPLE([_zoneinfo]) dnl multiprocessing modules @@ -7195,7 +7198,9 @@ dnl By default we always compile these even when OpenSSL is available dnl (issue #14693). The modules are small. PY_STDLIB_MOD([_md5], [test "$with_builtin_md5" = yes]) PY_STDLIB_MOD([_sha1], [test "$with_builtin_sha1" = yes]) -PY_STDLIB_MOD([_sha256], [test "$with_builtin_sha256" = yes]) +PY_STDLIB_MOD([_sha256], + [test "$with_builtin_sha256" = yes], [], + [-I\$(srcdir)/Modules/_hacl/include -I\$(srcdir)/Modules/_hacl/internal -D_BSD_SOURCE -D_DEFAULT_SOURCE]) PY_STDLIB_MOD([_sha512], [test "$with_builtin_sha512" = yes]) PY_STDLIB_MOD([_sha3], [test "$with_builtin_sha3" = yes]) PY_STDLIB_MOD([_blake2],