diff --git a/__pycache__/cleanName.cpython-311.pyc b/__pycache__/cleanName.cpython-311.pyc new file mode 100644 index 0000000..511c281 Binary files /dev/null and b/__pycache__/cleanName.cpython-311.pyc differ diff --git a/__pycache__/fechaYhora.cpython-311.pyc b/__pycache__/fechaYhora.cpython-311.pyc new file mode 100644 index 0000000..3050b36 Binary files /dev/null and b/__pycache__/fechaYhora.cpython-311.pyc differ diff --git a/__pycache__/getId.cpython-311.pyc b/__pycache__/getId.cpython-311.pyc new file mode 100644 index 0000000..32b62b3 Binary files /dev/null and b/__pycache__/getId.cpython-311.pyc differ diff --git a/__pycache__/headerGen.cpython-311.pyc b/__pycache__/headerGen.cpython-311.pyc new file mode 100644 index 0000000..7edf244 Binary files /dev/null and b/__pycache__/headerGen.cpython-311.pyc differ diff --git a/__pycache__/hrefNameProcessor.cpython-311.pyc b/__pycache__/hrefNameProcessor.cpython-311.pyc new file mode 100644 index 0000000..6ef63e4 Binary files /dev/null and b/__pycache__/hrefNameProcessor.cpython-311.pyc differ diff --git a/__pycache__/main.cpython-311.pyc b/__pycache__/main.cpython-311.pyc new file mode 100644 index 0000000..ec332f6 Binary files /dev/null and b/__pycache__/main.cpython-311.pyc differ diff --git a/__pycache__/priceProcess.cpython-311.pyc b/__pycache__/priceProcess.cpython-311.pyc new file mode 100644 index 0000000..c214c68 Binary files /dev/null and b/__pycache__/priceProcess.cpython-311.pyc differ diff --git a/__pycache__/proxyListGen.cpython-311.pyc b/__pycache__/proxyListGen.cpython-311.pyc new file mode 100644 index 0000000..7fb0fbd Binary files /dev/null and b/__pycache__/proxyListGen.cpython-311.pyc differ diff --git a/__pycache__/urlTransformer.cpython-311.pyc b/__pycache__/urlTransformer.cpython-311.pyc new file mode 100644 index 0000000..b8a1d57 Binary files /dev/null and b/__pycache__/urlTransformer.cpython-311.pyc differ diff --git a/cleanName.py b/cleanName.py new file mode 100644 index 0000000..aeb285a --- /dev/null +++ b/cleanName.py @@ -0,0 +1,8 @@ +import re +def cleanName(texto_fila): + sin_numeros = re.sub(r'\d+', '', texto_fila) + return sin_numeros + +def claneNameTrim(texto): + t = texto.split('\n', 1)[0] + return t \ No newline at end of file diff --git a/downloaded_files/driver_fixing.lock b/downloaded_files/driver_fixing.lock new file mode 100644 index 0000000..e69de29 diff --git a/downloaded_files/pyautogui.lock b/downloaded_files/pyautogui.lock new file mode 100644 index 0000000..e69de29 diff --git a/fechaYhora.py b/fechaYhora.py new file mode 100644 index 0000000..1db256c --- /dev/null +++ b/fechaYhora.py @@ -0,0 +1,10 @@ +import datetime +import pytz + +def fechaYhora(): + mi_zona_horaria = pytz.timezone('America/Santiago') + now = datetime.datetime.now(mi_zona_horaria) + hora_actual = now.strftime("%H:%M:%S") + fecha_actual = now.strftime("%Y-%m-%d") + fecha = f"{fecha_actual} {hora_actual}" + return fecha \ No newline at end of file diff --git a/getId.py b/getId.py new file mode 100644 index 0000000..29a7a9b --- /dev/null +++ b/getId.py @@ -0,0 +1,4 @@ +import re +def getId(txt): + id_juego = re.findall(r'\d+', txt)[0] + return id_juego \ No newline at end of file diff --git a/headerGen.py b/headerGen.py new file mode 100644 index 0000000..d7bcae6 --- /dev/null +++ b/headerGen.py @@ -0,0 +1,42 @@ +import requests +from proxyListGen import proxyListCheck +def buscarEn(url): + try: + triesList = [] + prox = proxyListCheck() + proxy = f"{prox[0]}:{prox[1]}" + session = requests.Session() + while True: + try: + r = session.get(url, proxies={'http':proxy, 'https':proxy}, timeout=7) + break + except requests.exceptions.RequestException as err: + triesList.append(proxy) + prox = proxyListCheck() + proxy = f"{prox[0]}:{prox[1]}" + if proxy in triesList: + while proxy not in triesList: + prox = proxyListCheck() + proxy = f"{prox[0]}:{prox[1]}" + session = requests.Session() + if r.status_code == 200: + return proxy + else: + while True: + try: + r = session.get(url, proxies={'http':proxy, 'https':proxy}, timeout=3) + break + except requests.exceptions.RequestException as err: + triesList.append(proxy) + prox = proxyListCheck() + proxy = f"{prox[0]}:{prox[1]}" + if proxy in triesList: + while proxy not in triesList: + prox = proxyListCheck() + proxy = f"{prox[0]}:{prox[1]}" + session = requests.Session() + return proxy + except requests.exceptions.RequestException as err: + print(err, 'HeaderGen') + + \ No newline at end of file diff --git a/hrefNameProcessor.py b/hrefNameProcessor.py new file mode 100644 index 0000000..5839ed2 --- /dev/null +++ b/hrefNameProcessor.py @@ -0,0 +1,12 @@ + +def hrefNameProcessor(href): + # Extraer la parte después de 'steam-' + contenido = href.split('steam-')[1] + + # Si hay un segundo 'steam-' o final del segmento de la URL, cortar allí + if '-steam' in contenido: + contenido = contenido.split('-steam')[0] + + # Reemplazar guiones por espacios para un nombre más legible + contenido_limpio = contenido.replace('-', ' ') + return contenido_limpio diff --git a/integration.py b/integration.py new file mode 100644 index 0000000..66db842 --- /dev/null +++ b/integration.py @@ -0,0 +1,4 @@ +from main import scrap + +juego = scrap('dark souls') +print(juego) \ No newline at end of file diff --git a/main.py b/main.py new file mode 100644 index 0000000..e799f2a --- /dev/null +++ b/main.py @@ -0,0 +1,47 @@ +from seleniumbase import SB +import re +from urlTransformer import juegoURL +from cleanName import cleanName, claneNameTrim +from priceProcess import priceProcess +from hrefNameProcessor import hrefNameProcessor +from getId import getId +from fechaYhora import fechaYhora + +def scrap(juego): + with SB(uc=True, headless=True) as sb: + try: + nombreJuego = juegoURL(juego) + url = f"https://www.eneba.com/latam/store/all?text={nombreJuego}&enb_campaign=Main%20Search&enb_content=search%20dropdown%20-%20input&enb_medium=input&enb_source=https%3A%2F%2Fwww.eneba.com%2F&enb_term=Submit" + sb.get(url) + cards = sb.find_elements('.oSVLlh') + price = sb.find_elements('css selector', '.L5ErLT') + names = [] + prices = [] + plataforma = [] + fecha = [] + games = [] + for i in cards: + name = i.get_attribute('href') + if name and 'steam' in name: + hrefLimpio = hrefNameProcessor(name) + names.append(hrefLimpio) + plataforma.append('steamDB') + date = fechaYhora() + fecha.append(date) + for i in price: + prices.append(i.text) + for n, pr, pl, fe in zip(names, prices, plataforma, fecha): + obj = { + "juego": n, + "precio": pr, + "plataforma": pl, + "fecha": fe + } + games.append(obj) + return games + except ValueError as err: + print(err) +scrap('dark souls') + + + diff --git a/priceProcess.py b/priceProcess.py new file mode 100644 index 0000000..64fdc36 --- /dev/null +++ b/priceProcess.py @@ -0,0 +1,17 @@ +from bs4 import BeautifulSoup + +def priceProcess(): + + with open('pagina_steamdb.txt', 'r', encoding='utf-8') as file: + html_content = file.read() + soup = BeautifulSoup(html_content, 'html.parser') + price_div = soup.find('div', class_='single-price-line') + if price_div: + price_span = price_div.find('span') + if price_span: + precio_juego = price_span.text + return precio_juego + else: + print('No se encontró el con el precio.') + else: + print('No se encontró el div con la clase "single-price-line".') \ No newline at end of file diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..ced7f58 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +seleniumbase==4.31.6 diff --git a/urlTransformer.py b/urlTransformer.py new file mode 100644 index 0000000..0ecaefe --- /dev/null +++ b/urlTransformer.py @@ -0,0 +1,3 @@ +def juegoURL(juego): + x = juego.replace(' ', '%20') + return x \ No newline at end of file diff --git a/venv/Lib/site-packages/PySocks-1.7.1.dist-info/INSTALLER b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/PySocks-1.7.1.dist-info/LICENSE b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/LICENSE new file mode 100644 index 0000000..04b6b1f --- /dev/null +++ b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/LICENSE @@ -0,0 +1,22 @@ +Copyright 2006 Dan-Haim. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +3. Neither the name of Dan Haim nor the names of his contributors may be used + to endorse or promote products derived from this software without specific + prior written permission. + +THIS SOFTWARE IS PROVIDED BY DAN HAIM "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL DAN HAIM OR HIS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA +OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT +OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMANGE. diff --git a/venv/Lib/site-packages/PySocks-1.7.1.dist-info/METADATA b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/METADATA new file mode 100644 index 0000000..ae2ae34 --- /dev/null +++ b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/METADATA @@ -0,0 +1,321 @@ +Metadata-Version: 2.1 +Name: PySocks +Version: 1.7.1 +Summary: A Python SOCKS client module. See https://github.com/Anorov/PySocks for more information. +Home-page: https://github.com/Anorov/PySocks +Author: Anorov +Author-email: anorov.vorona@gmail.com +License: BSD +Keywords: socks,proxy +Platform: UNKNOWN +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.* +Description-Content-Type: text/markdown + +PySocks +======= + +PySocks lets you send traffic through SOCKS and HTTP proxy servers. It is a modern fork of [SocksiPy](http://socksipy.sourceforge.net/) with bug fixes and extra features. + +Acts as a drop-in replacement to the socket module. Seamlessly configure SOCKS proxies for any socket object by calling `socket_object.set_proxy()`. + +---------------- + +Features +======== + +* SOCKS proxy client for Python 2.7 and 3.4+ +* TCP supported +* UDP mostly supported (issues may occur in some edge cases) +* HTTP proxy client included but not supported or recommended (you should use urllib2's or requests' own HTTP proxy interface) +* urllib2 handler included. `pip install` / `setup.py install` will automatically install the `sockshandler` module. + +Installation +============ + + pip install PySocks + +Or download the tarball / `git clone` and... + + python setup.py install + +These will install both the `socks` and `sockshandler` modules. + +Alternatively, include just `socks.py` in your project. + +-------------------------------------------- + +*Warning:* PySocks/SocksiPy only supports HTTP proxies that use CONNECT tunneling. Certain HTTP proxies may not work with this library. If you wish to use HTTP (not SOCKS) proxies, it is recommended that you rely on your HTTP client's native proxy support (`proxies` dict for `requests`, or `urllib2.ProxyHandler` for `urllib2`) instead. + +-------------------------------------------- + +Usage +===== + +## socks.socksocket ## + + import socks + + s = socks.socksocket() # Same API as socket.socket in the standard lib + + s.set_proxy(socks.SOCKS5, "localhost") # SOCKS4 and SOCKS5 use port 1080 by default + # Or + s.set_proxy(socks.SOCKS4, "localhost", 4444) + # Or + s.set_proxy(socks.HTTP, "5.5.5.5", 8888) + + # Can be treated identical to a regular socket object + s.connect(("www.somesite.com", 80)) + s.sendall("GET / HTTP/1.1 ...") + print s.recv(4096) + +## Monkeypatching ## + +To monkeypatch the entire standard library with a single default proxy: + + import urllib2 + import socket + import socks + + socks.set_default_proxy(socks.SOCKS5, "localhost") + socket.socket = socks.socksocket + + urllib2.urlopen("http://www.somesite.com/") # All requests will pass through the SOCKS proxy + +Note that monkeypatching may not work for all standard modules or for all third party modules, and generally isn't recommended. Monkeypatching is usually an anti-pattern in Python. + +## urllib2 Handler ## + +Example use case with the `sockshandler` urllib2 handler. Note that you must import both `socks` and `sockshandler`, as the handler is its own module separate from PySocks. The module is included in the PyPI package. + + import urllib2 + import socks + from sockshandler import SocksiPyHandler + + opener = urllib2.build_opener(SocksiPyHandler(socks.SOCKS5, "127.0.0.1", 9050)) + print opener.open("http://www.somesite.com/") # All requests made by the opener will pass through the SOCKS proxy + +-------------------------------------------- + +Original SocksiPy README attached below, amended to reflect API changes. + +-------------------------------------------- + +SocksiPy + +A Python SOCKS module. + +(C) 2006 Dan-Haim. All rights reserved. + +See LICENSE file for details. + + +*WHAT IS A SOCKS PROXY?* + +A SOCKS proxy is a proxy server at the TCP level. In other words, it acts as +a tunnel, relaying all traffic going through it without modifying it. +SOCKS proxies can be used to relay traffic using any network protocol that +uses TCP. + +*WHAT IS SOCKSIPY?* + +This Python module allows you to create TCP connections through a SOCKS +proxy without any special effort. +It also supports relaying UDP packets with a SOCKS5 proxy. + +*PROXY COMPATIBILITY* + +SocksiPy is compatible with three different types of proxies: + +1. SOCKS Version 4 (SOCKS4), including the SOCKS4a extension. +2. SOCKS Version 5 (SOCKS5). +3. HTTP Proxies which support tunneling using the CONNECT method. + +*SYSTEM REQUIREMENTS* + +Being written in Python, SocksiPy can run on any platform that has a Python +interpreter and TCP/IP support. +This module has been tested with Python 2.3 and should work with greater versions +just as well. + + +INSTALLATION +------------- + +Simply copy the file "socks.py" to your Python's `lib/site-packages` directory, +and you're ready to go. [Editor's note: it is better to use `python setup.py install` for PySocks] + + +USAGE +------ + +First load the socks module with the command: + + >>> import socks + >>> + +The socks module provides a class called `socksocket`, which is the base to all of the module's functionality. + +The `socksocket` object has the same initialization parameters as the normal socket +object to ensure maximal compatibility, however it should be noted that `socksocket` will only function with family being `AF_INET` and +type being either `SOCK_STREAM` or `SOCK_DGRAM`. +Generally, it is best to initialize the `socksocket` object with no parameters + + >>> s = socks.socksocket() + >>> + +The `socksocket` object has an interface which is very similiar to socket's (in fact +the `socksocket` class is derived from socket) with a few extra methods. +To select the proxy server you would like to use, use the `set_proxy` method, whose +syntax is: + + set_proxy(proxy_type, addr[, port[, rdns[, username[, password]]]]) + +Explanation of the parameters: + +`proxy_type` - The type of the proxy server. This can be one of three possible +choices: `PROXY_TYPE_SOCKS4`, `PROXY_TYPE_SOCKS5` and `PROXY_TYPE_HTTP` for SOCKS4, +SOCKS5 and HTTP servers respectively. `SOCKS4`, `SOCKS5`, and `HTTP` are all aliases, respectively. + +`addr` - The IP address or DNS name of the proxy server. + +`port` - The port of the proxy server. Defaults to 1080 for socks and 8080 for http. + +`rdns` - This is a boolean flag than modifies the behavior regarding DNS resolving. +If it is set to True, DNS resolving will be preformed remotely, on the server. +If it is set to False, DNS resolving will be preformed locally. Please note that +setting this to True with SOCKS4 servers actually use an extension to the protocol, +called SOCKS4a, which may not be supported on all servers (SOCKS5 and http servers +always support DNS). The default is True. + +`username` - For SOCKS5 servers, this allows simple username / password authentication +with the server. For SOCKS4 servers, this parameter will be sent as the userid. +This parameter is ignored if an HTTP server is being used. If it is not provided, +authentication will not be used (servers may accept unauthenticated requests). + +`password` - This parameter is valid only for SOCKS5 servers and specifies the +respective password for the username provided. + +Example of usage: + + >>> s.set_proxy(socks.SOCKS5, "socks.example.com") # uses default port 1080 + >>> s.set_proxy(socks.SOCKS4, "socks.test.com", 1081) + +After the set_proxy method has been called, simply call the connect method with the +traditional parameters to establish a connection through the proxy: + + >>> s.connect(("www.sourceforge.net", 80)) + >>> + +Connection will take a bit longer to allow negotiation with the proxy server. +Please note that calling connect without calling `set_proxy` earlier will connect +without a proxy (just like a regular socket). + +Errors: Any errors in the connection process will trigger exceptions. The exception +may either be generated by the underlying socket layer or may be custom module +exceptions, whose details follow: + +class `ProxyError` - This is a base exception class. It is not raised directly but +rather all other exception classes raised by this module are derived from it. +This allows an easy way to catch all proxy-related errors. It descends from `IOError`. + +All `ProxyError` exceptions have an attribute `socket_err`, which will contain either a +caught `socket.error` exception, or `None` if there wasn't any. + +class `GeneralProxyError` - When thrown, it indicates a problem which does not fall +into another category. + +* `Sent invalid data` - This error means that unexpected data has been received from +the server. The most common reason is that the server specified as the proxy is +not really a SOCKS4/SOCKS5/HTTP proxy, or maybe the proxy type specified is wrong. + +* `Connection closed unexpectedly` - The proxy server unexpectedly closed the connection. +This may indicate that the proxy server is experiencing network or software problems. + +* `Bad proxy type` - This will be raised if the type of the proxy supplied to the +set_proxy function was not one of `SOCKS4`/`SOCKS5`/`HTTP`. + +* `Bad input` - This will be raised if the `connect()` method is called with bad input +parameters. + +class `SOCKS5AuthError` - This indicates that the connection through a SOCKS5 server +failed due to an authentication problem. + +* `Authentication is required` - This will happen if you use a SOCKS5 server which +requires authentication without providing a username / password at all. + +* `All offered authentication methods were rejected` - This will happen if the proxy +requires a special authentication method which is not supported by this module. + +* `Unknown username or invalid password` - Self descriptive. + +class `SOCKS5Error` - This will be raised for SOCKS5 errors which are not related to +authentication. +The parameter is a tuple containing a code, as given by the server, +and a description of the +error. The possible errors, according to the RFC, are: + +* `0x01` - General SOCKS server failure - If for any reason the proxy server is unable to +fulfill your request (internal server error). +* `0x02` - connection not allowed by ruleset - If the address you're trying to connect to +is blacklisted on the server or requires authentication. +* `0x03` - Network unreachable - The target could not be contacted. A router on the network +had replied with a destination net unreachable error. +* `0x04` - Host unreachable - The target could not be contacted. A router on the network +had replied with a destination host unreachable error. +* `0x05` - Connection refused - The target server has actively refused the connection +(the requested port is closed). +* `0x06` - TTL expired - The TTL value of the SYN packet from the proxy to the target server +has expired. This usually means that there are network problems causing the packet +to be caught in a router-to-router "ping-pong". +* `0x07` - Command not supported - For instance if the server does not support UDP. +* `0x08` - Address type not supported - The client has provided an invalid address type. +When using this module, this error should not occur. + +class `SOCKS4Error` - This will be raised for SOCKS4 errors. The parameter is a tuple +containing a code and a description of the error, as given by the server. The +possible error, according to the specification are: + +* `0x5B` - Request rejected or failed - Will be raised in the event of an failure for any +reason other then the two mentioned next. +* `0x5C` - request rejected because SOCKS server cannot connect to identd on the client - +The Socks server had tried an ident lookup on your computer and has failed. In this +case you should run an identd server and/or configure your firewall to allow incoming +connections to local port 113 from the remote server. +* `0x5D` - request rejected because the client program and identd report different user-ids - +The Socks server had performed an ident lookup on your computer and has received a +different userid than the one you have provided. Change your userid (through the +username parameter of the set_proxy method) to match and try again. + +class `HTTPError` - This will be raised for HTTP errors. The message will contain +the HTTP status code and provided error message. + +After establishing the connection, the object behaves like a standard socket. + +Methods like `makefile()` and `settimeout()` should behave just like regular sockets. +Call the `close()` method to close the connection. + +In addition to the `socksocket` class, an additional function worth mentioning is the +`set_default_proxy` function. The parameters are the same as the `set_proxy` method. +This function will set default proxy settings for newly created `socksocket` objects, +in which the proxy settings haven't been changed via the `set_proxy` method. +This is quite useful if you wish to force 3rd party modules to use a SOCKS proxy, +by overriding the socket object. +For example: + + >>> socks.set_default_proxy(socks.SOCKS5, "socks.example.com") + >>> socket.socket = socks.socksocket + >>> urllib.urlopen("http://www.sourceforge.net/") + + +PROBLEMS +--------- + +Please open a GitHub issue at https://github.com/Anorov/PySocks + + diff --git a/venv/Lib/site-packages/PySocks-1.7.1.dist-info/RECORD b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/RECORD new file mode 100644 index 0000000..5967295 --- /dev/null +++ b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/RECORD @@ -0,0 +1,10 @@ +PySocks-1.7.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PySocks-1.7.1.dist-info/LICENSE,sha256=cCfiFOAU63i3rcwc7aWspxOnn8T2oMUsnaWz5wfm_-k,1401 +PySocks-1.7.1.dist-info/METADATA,sha256=zbQMizjPOOP4DhEiEX24XXjNrYuIxF9UGUpN0uFDB6Y,13235 +PySocks-1.7.1.dist-info/RECORD,, +PySocks-1.7.1.dist-info/WHEEL,sha256=t_MpApv386-8PVts2R6wsTifdIn0vbUDTVv61IbqFC8,92 +PySocks-1.7.1.dist-info/top_level.txt,sha256=TKSOIfCFBoK9EY8FBYbYqC3PWd3--G15ph9n8-QHPDk,19 +__pycache__/socks.cpython-311.pyc,, +__pycache__/sockshandler.cpython-311.pyc,, +socks.py,sha256=xOYn27t9IGrbTBzWsUUuPa0YBuplgiUykzkOB5V5iFY,31086 +sockshandler.py,sha256=2SYGj-pwt1kjgLoZAmyeaEXCeZDWRmfVS_QG6kErGtY,3966 diff --git a/venv/Lib/site-packages/PySocks-1.7.1.dist-info/WHEEL b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/WHEEL new file mode 100644 index 0000000..129a673 --- /dev/null +++ b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.33.3) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/PySocks-1.7.1.dist-info/top_level.txt b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/top_level.txt new file mode 100644 index 0000000..9476163 --- /dev/null +++ b/venv/Lib/site-packages/PySocks-1.7.1.dist-info/top_level.txt @@ -0,0 +1,2 @@ +socks +sockshandler diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/INSTALLER b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/LICENSE b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/LICENSE new file mode 100644 index 0000000..2f1b8e1 --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2017-2021 Ingy döt Net +Copyright (c) 2006-2016 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/METADATA b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/METADATA new file mode 100644 index 0000000..db029b7 --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/METADATA @@ -0,0 +1,46 @@ +Metadata-Version: 2.1 +Name: PyYAML +Version: 6.0.2 +Summary: YAML parser and emitter for Python +Home-page: https://pyyaml.org/ +Download-URL: https://pypi.org/project/PyYAML/ +Author: Kirill Simonov +Author-email: xi@resolvent.net +License: MIT +Project-URL: Bug Tracker, https://github.com/yaml/pyyaml/issues +Project-URL: CI, https://github.com/yaml/pyyaml/actions +Project-URL: Documentation, https://pyyaml.org/wiki/PyYAMLDocumentation +Project-URL: Mailing lists, http://lists.sourceforge.net/lists/listinfo/yaml-core +Project-URL: Source Code, https://github.com/yaml/pyyaml +Platform: Any +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Cython +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup +Requires-Python: >=3.8 +License-File: LICENSE + +YAML is a data serialization format designed for human readability +and interaction with scripting languages. PyYAML is a YAML parser +and emitter for Python. + +PyYAML features a complete YAML 1.1 parser, Unicode support, pickle +support, capable extension API, and sensible error messages. PyYAML +supports standard YAML tags and provides Python-specific tags that +allow to represent an arbitrary Python object. + +PyYAML is applicable for a broad range of tasks from complex +configuration files to object serialization and persistence. diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/RECORD b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/RECORD new file mode 100644 index 0000000..dc2fe0d --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/RECORD @@ -0,0 +1,43 @@ +PyYAML-6.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +PyYAML-6.0.2.dist-info/LICENSE,sha256=jTko-dxEkP1jVwfLiOsmvXZBAqcoKVQwfT5RZ6V36KQ,1101 +PyYAML-6.0.2.dist-info/METADATA,sha256=9lwXqTOrXPts-jI2Lo5UwuaAYo0hiRA0BZqjch0WjAk,2106 +PyYAML-6.0.2.dist-info/RECORD,, +PyYAML-6.0.2.dist-info/WHEEL,sha256=yEpuRje-u1Z_HrXQj-UTAfIAegW_HcP2GJ7Ek8BJkUM,102 +PyYAML-6.0.2.dist-info/top_level.txt,sha256=rpj0IVMTisAjh_1vG3Ccf9v5jpCQwAz6cD1IVU5ZdhQ,11 +_yaml/__init__.py,sha256=04Ae_5osxahpJHa3XBZUAf4wi6XX32gR8D6X6p64GEA,1402 +_yaml/__pycache__/__init__.cpython-311.pyc,, +yaml/__init__.py,sha256=N35S01HMesFTe0aRRMWkPj0Pa8IEbHpE9FK7cr5Bdtw,12311 +yaml/__pycache__/__init__.cpython-311.pyc,, +yaml/__pycache__/composer.cpython-311.pyc,, +yaml/__pycache__/constructor.cpython-311.pyc,, +yaml/__pycache__/cyaml.cpython-311.pyc,, +yaml/__pycache__/dumper.cpython-311.pyc,, +yaml/__pycache__/emitter.cpython-311.pyc,, +yaml/__pycache__/error.cpython-311.pyc,, +yaml/__pycache__/events.cpython-311.pyc,, +yaml/__pycache__/loader.cpython-311.pyc,, +yaml/__pycache__/nodes.cpython-311.pyc,, +yaml/__pycache__/parser.cpython-311.pyc,, +yaml/__pycache__/reader.cpython-311.pyc,, +yaml/__pycache__/representer.cpython-311.pyc,, +yaml/__pycache__/resolver.cpython-311.pyc,, +yaml/__pycache__/scanner.cpython-311.pyc,, +yaml/__pycache__/serializer.cpython-311.pyc,, +yaml/__pycache__/tokens.cpython-311.pyc,, +yaml/_yaml.cp311-win_amd64.pyd,sha256=6BXrc7YC-BZJ911z64UDwJV3D0ay3GiyETPzbhl0iJc,272384 +yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883 +yaml/constructor.py,sha256=kNgkfaeLUkwQYY_Q6Ff1Tz2XVw_pG1xVE9Ak7z-viLA,28639 +yaml/cyaml.py,sha256=6ZrAG9fAYvdVe2FK_w0hmXoG7ZYsoYUwapG8CiC72H0,3851 +yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837 +yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006 +yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533 +yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445 +yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061 +yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440 +yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495 +yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794 +yaml/representer.py,sha256=IuWP-cAW9sHKEnS0gCqSa894k1Bg4cgTxaDwIcbRQ-Y,14190 +yaml/resolver.py,sha256=9L-VYfm4mWHxUD1Vg4X7rjDRK_7VZd6b92wzq7Y2IKY,9004 +yaml/scanner.py,sha256=YEM3iLZSaQwXcQRg2l2R4MdT0zGP2F9eHkKGKnHyWQY,51279 +yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165 +yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573 diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/WHEEL b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/WHEEL new file mode 100644 index 0000000..f40c43f --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.44.0) +Root-Is-Purelib: false +Tag: cp311-cp311-win_amd64 + diff --git a/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/top_level.txt b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/top_level.txt new file mode 100644 index 0000000..e6475e9 --- /dev/null +++ b/venv/Lib/site-packages/PyYAML-6.0.2.dist-info/top_level.txt @@ -0,0 +1,2 @@ +_yaml +yaml diff --git a/venv/Lib/site-packages/__pycache__/parse.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/parse.cpython-311.pyc new file mode 100644 index 0000000..9caf3fc Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/parse.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/pdbp.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/pdbp.cpython-311.pyc new file mode 100644 index 0000000..33c2124 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/pdbp.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/py.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/py.cpython-311.pyc new file mode 100644 index 0000000..e57dac1 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/py.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/pytest_ordering.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/pytest_ordering.cpython-311.pyc new file mode 100644 index 0000000..5fdab5e Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/pytest_ordering.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/pytest_rerunfailures.cpython-311-pytest-8.3.3.pyc b/venv/Lib/site-packages/__pycache__/pytest_rerunfailures.cpython-311-pytest-8.3.3.pyc new file mode 100644 index 0000000..16199d6 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/pytest_rerunfailures.cpython-311-pytest-8.3.3.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/pytest_rerunfailures.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/pytest_rerunfailures.cpython-311.pyc new file mode 100644 index 0000000..b558944 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/pytest_rerunfailures.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/readline.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/readline.cpython-311.pyc new file mode 100644 index 0000000..6ebc4b4 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/readline.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/setuptools_behave.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/setuptools_behave.cpython-311.pyc new file mode 100644 index 0000000..c303081 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/setuptools_behave.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/six.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/six.cpython-311.pyc new file mode 100644 index 0000000..2daab20 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/six.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/socks.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/socks.cpython-311.pyc new file mode 100644 index 0000000..0a70a96 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/socks.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/sockshandler.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/sockshandler.cpython-311.pyc new file mode 100644 index 0000000..b6ab669 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/sockshandler.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/tabcompleter.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/tabcompleter.cpython-311.pyc new file mode 100644 index 0000000..38542f8 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/tabcompleter.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/__pycache__/typing_extensions.cpython-311.pyc b/venv/Lib/site-packages/__pycache__/typing_extensions.cpython-311.pyc new file mode 100644 index 0000000..a6b66d9 Binary files /dev/null and b/venv/Lib/site-packages/__pycache__/typing_extensions.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_cffi_backend.cp311-win_amd64.pyd b/venv/Lib/site-packages/_cffi_backend.cp311-win_amd64.pyd new file mode 100644 index 0000000..9bb0309 Binary files /dev/null and b/venv/Lib/site-packages/_cffi_backend.cp311-win_amd64.pyd differ diff --git a/venv/Lib/site-packages/_distutils_hack/__init__.py b/venv/Lib/site-packages/_distutils_hack/__init__.py new file mode 100644 index 0000000..30ac3a7 --- /dev/null +++ b/venv/Lib/site-packages/_distutils_hack/__init__.py @@ -0,0 +1,240 @@ +# don't import any costly modules +import os +import sys + +report_url = ( + "https://github.com/pypa/setuptools/issues/new?" + "template=distutils-deprecation.yml" +) + + +def warn_distutils_present(): + if 'distutils' not in sys.modules: + return + import warnings + + warnings.warn( + "Distutils was imported before Setuptools, but importing Setuptools " + "also replaces the `distutils` module in `sys.modules`. This may lead " + "to undesirable behaviors or errors. To avoid these issues, avoid " + "using distutils directly, ensure that setuptools is installed in the " + "traditional way (e.g. not an editable install), and/or make sure " + "that setuptools is always imported before distutils." + ) + + +def clear_distutils(): + if 'distutils' not in sys.modules: + return + import warnings + + warnings.warn( + "Setuptools is replacing distutils. Support for replacing " + "an already imported distutils is deprecated. In the future, " + "this condition will fail. " + f"Register concerns at {report_url}" + ) + mods = [ + name + for name in sys.modules + if name == "distutils" or name.startswith("distutils.") + ] + for name in mods: + del sys.modules[name] + + +def enabled(): + """ + Allow selection of distutils by environment variable. + """ + which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local') + if which == 'stdlib': + import warnings + + warnings.warn( + "Reliance on distutils from stdlib is deprecated. Users " + "must rely on setuptools to provide the distutils module. " + "Avoid importing distutils or import setuptools first, " + "and avoid setting SETUPTOOLS_USE_DISTUTILS=stdlib. " + f"Register concerns at {report_url}" + ) + return which == 'local' + + +def ensure_local_distutils(): + import importlib + + clear_distutils() + + # With the DistutilsMetaFinder in place, + # perform an import to cause distutils to be + # loaded from setuptools._distutils. Ref #2906. + with shim(): + importlib.import_module('distutils') + + # check that submodules load as expected + core = importlib.import_module('distutils.core') + assert '_distutils' in core.__file__, core.__file__ + assert 'setuptools._distutils.log' not in sys.modules + + +def do_override(): + """ + Ensure that the local copy of distutils is preferred over stdlib. + + See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401 + for more motivation. + """ + if enabled(): + warn_distutils_present() + ensure_local_distutils() + + +class _TrivialRe: + def __init__(self, *patterns): + self._patterns = patterns + + def match(self, string): + return all(pat in string for pat in self._patterns) + + +class DistutilsMetaFinder: + def find_spec(self, fullname, path, target=None): + # optimization: only consider top level modules and those + # found in the CPython test suite. + if path is not None and not fullname.startswith('test.'): + return None + + method_name = 'spec_for_{fullname}'.format(**locals()) + method = getattr(self, method_name, lambda: None) + return method() + + def spec_for_distutils(self): + if self.is_cpython(): + return None + + import importlib + import importlib.abc + import importlib.util + + try: + mod = importlib.import_module('setuptools._distutils') + except Exception: + # There are a couple of cases where setuptools._distutils + # may not be present: + # - An older Setuptools without a local distutils is + # taking precedence. Ref #2957. + # - Path manipulation during sitecustomize removes + # setuptools from the path but only after the hook + # has been loaded. Ref #2980. + # In either case, fall back to stdlib behavior. + return None + + class DistutilsLoader(importlib.abc.Loader): + def create_module(self, spec): + mod.__name__ = 'distutils' + return mod + + def exec_module(self, module): + pass + + return importlib.util.spec_from_loader( + 'distutils', DistutilsLoader(), origin=mod.__file__ + ) + + @staticmethod + def is_cpython(): + """ + Suppress supplying distutils for CPython (build and tests). + Ref #2965 and #3007. + """ + return os.path.isfile('pybuilddir.txt') + + def spec_for_pip(self): + """ + Ensure stdlib distutils when running under pip. + See pypa/pip#8761 for rationale. + """ + if sys.version_info >= (3, 12) or self.pip_imported_during_build(): + return + clear_distutils() + self.spec_for_distutils = lambda: None + + @classmethod + def pip_imported_during_build(cls): + """ + Detect if pip is being imported in a build script. Ref #2355. + """ + import traceback + + return any( + cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None) + ) + + @staticmethod + def frame_file_is_setup(frame): + """ + Return True if the indicated frame suggests a setup.py file. + """ + # some frames may not have __file__ (#2940) + return frame.f_globals.get('__file__', '').endswith('setup.py') + + def spec_for_sensitive_tests(self): + """ + Ensure stdlib distutils when running select tests under CPython. + + python/cpython#91169 + """ + clear_distutils() + self.spec_for_distutils = lambda: None + + sensitive_tests = ( + [ + 'test.test_distutils', + 'test.test_peg_generator', + 'test.test_importlib', + ] + if sys.version_info < (3, 10) + else [ + 'test.test_distutils', + ] + ) + + +for name in DistutilsMetaFinder.sensitive_tests: + setattr( + DistutilsMetaFinder, + f'spec_for_{name}', + DistutilsMetaFinder.spec_for_sensitive_tests, + ) + + +DISTUTILS_FINDER = DistutilsMetaFinder() + + +def add_shim(): + DISTUTILS_FINDER in sys.meta_path or insert_shim() + + +class shim: + def __enter__(self) -> None: + insert_shim() + + def __exit__(self, exc: object, value: object, tb: object) -> None: + _remove_shim() + + +def insert_shim(): + sys.meta_path.insert(0, DISTUTILS_FINDER) + + +def _remove_shim(): + try: + sys.meta_path.remove(DISTUTILS_FINDER) + except ValueError: + pass + + +if sys.version_info < (3, 12): + # DistutilsMetaFinder can only be disabled in Python < 3.12 (PEP 632) + remove_shim = _remove_shim diff --git a/venv/Lib/site-packages/_distutils_hack/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_distutils_hack/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..9bf71a4 Binary files /dev/null and b/venv/Lib/site-packages/_distutils_hack/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_distutils_hack/__pycache__/override.cpython-311.pyc b/venv/Lib/site-packages/_distutils_hack/__pycache__/override.cpython-311.pyc new file mode 100644 index 0000000..6e8905c Binary files /dev/null and b/venv/Lib/site-packages/_distutils_hack/__pycache__/override.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_distutils_hack/override.py b/venv/Lib/site-packages/_distutils_hack/override.py new file mode 100644 index 0000000..2cc433a --- /dev/null +++ b/venv/Lib/site-packages/_distutils_hack/override.py @@ -0,0 +1 @@ +__import__('_distutils_hack').do_override() diff --git a/venv/Lib/site-packages/_pytest/__init__.py b/venv/Lib/site-packages/_pytest/__init__.py new file mode 100644 index 0000000..8eb8ec9 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/__init__.py @@ -0,0 +1,13 @@ +from __future__ import annotations + + +__all__ = ["__version__", "version_tuple"] + +try: + from ._version import version as __version__ + from ._version import version_tuple +except ImportError: # pragma: no cover + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = "unknown" + version_tuple = (0, 0, "unknown") diff --git a/venv/Lib/site-packages/_pytest/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..982a885 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/_argcomplete.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/_argcomplete.cpython-311.pyc new file mode 100644 index 0000000..99f2bf3 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/_argcomplete.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/_version.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/_version.cpython-311.pyc new file mode 100644 index 0000000..f51be0e Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/_version.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/cacheprovider.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/cacheprovider.cpython-311.pyc new file mode 100644 index 0000000..d52bdfc Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/cacheprovider.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/capture.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/capture.cpython-311.pyc new file mode 100644 index 0000000..d549eb8 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/capture.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/compat.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/compat.cpython-311.pyc new file mode 100644 index 0000000..39f93b3 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/compat.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/debugging.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/debugging.cpython-311.pyc new file mode 100644 index 0000000..22230d5 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/debugging.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/deprecated.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/deprecated.cpython-311.pyc new file mode 100644 index 0000000..e53c0ed Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/deprecated.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/doctest.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/doctest.cpython-311.pyc new file mode 100644 index 0000000..fa99810 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/doctest.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/faulthandler.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/faulthandler.cpython-311.pyc new file mode 100644 index 0000000..c997a60 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/faulthandler.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/fixtures.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/fixtures.cpython-311.pyc new file mode 100644 index 0000000..f27b1d8 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/fixtures.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/freeze_support.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/freeze_support.cpython-311.pyc new file mode 100644 index 0000000..d4f2365 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/freeze_support.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/helpconfig.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/helpconfig.cpython-311.pyc new file mode 100644 index 0000000..19484b8 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/helpconfig.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/hookspec.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/hookspec.cpython-311.pyc new file mode 100644 index 0000000..3cf13ea Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/hookspec.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/junitxml.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/junitxml.cpython-311.pyc new file mode 100644 index 0000000..a1c8fec Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/junitxml.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/legacypath.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/legacypath.cpython-311.pyc new file mode 100644 index 0000000..dc1ee87 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/legacypath.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/logging.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/logging.cpython-311.pyc new file mode 100644 index 0000000..3751d90 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/logging.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/main.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/main.cpython-311.pyc new file mode 100644 index 0000000..d51c396 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/main.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/monkeypatch.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/monkeypatch.cpython-311.pyc new file mode 100644 index 0000000..6e77d14 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/monkeypatch.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/nodes.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/nodes.cpython-311.pyc new file mode 100644 index 0000000..addfb2d Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/nodes.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/outcomes.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/outcomes.cpython-311.pyc new file mode 100644 index 0000000..86dfee5 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/outcomes.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/pastebin.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/pastebin.cpython-311.pyc new file mode 100644 index 0000000..65e6ebb Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/pastebin.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/pathlib.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/pathlib.cpython-311.pyc new file mode 100644 index 0000000..62b8819 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/pathlib.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/pytester.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/pytester.cpython-311.pyc new file mode 100644 index 0000000..527eba9 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/pytester.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/pytester_assertions.cpython-311-pytest-8.3.3.pyc b/venv/Lib/site-packages/_pytest/__pycache__/pytester_assertions.cpython-311-pytest-8.3.3.pyc new file mode 100644 index 0000000..555c6db Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/pytester_assertions.cpython-311-pytest-8.3.3.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/pytester_assertions.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/pytester_assertions.cpython-311.pyc new file mode 100644 index 0000000..a316936 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/pytester_assertions.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/python.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/python.cpython-311.pyc new file mode 100644 index 0000000..7eaa002 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/python.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/python_api.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/python_api.cpython-311.pyc new file mode 100644 index 0000000..ad4dd60 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/python_api.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/python_path.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/python_path.cpython-311.pyc new file mode 100644 index 0000000..1ab81ab Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/python_path.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/recwarn.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/recwarn.cpython-311.pyc new file mode 100644 index 0000000..aef92f6 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/recwarn.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/reports.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/reports.cpython-311.pyc new file mode 100644 index 0000000..c6a567b Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/reports.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/runner.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/runner.cpython-311.pyc new file mode 100644 index 0000000..5dfa5c0 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/runner.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/scope.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/scope.cpython-311.pyc new file mode 100644 index 0000000..593dce2 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/scope.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/setuponly.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/setuponly.cpython-311.pyc new file mode 100644 index 0000000..ea71f62 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/setuponly.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/setupplan.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/setupplan.cpython-311.pyc new file mode 100644 index 0000000..9386178 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/setupplan.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/skipping.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/skipping.cpython-311.pyc new file mode 100644 index 0000000..5cc4e7a Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/skipping.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/stash.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/stash.cpython-311.pyc new file mode 100644 index 0000000..f4ff578 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/stash.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/stepwise.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/stepwise.cpython-311.pyc new file mode 100644 index 0000000..3376edc Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/stepwise.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/terminal.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/terminal.cpython-311.pyc new file mode 100644 index 0000000..1302ec6 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/terminal.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/threadexception.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/threadexception.cpython-311.pyc new file mode 100644 index 0000000..95d7964 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/threadexception.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/timing.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/timing.cpython-311.pyc new file mode 100644 index 0000000..f40aca9 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/timing.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/tmpdir.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/tmpdir.cpython-311.pyc new file mode 100644 index 0000000..0b93d2b Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/tmpdir.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/unittest.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/unittest.cpython-311.pyc new file mode 100644 index 0000000..fb17bea Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/unittest.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/unraisableexception.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/unraisableexception.cpython-311.pyc new file mode 100644 index 0000000..83cce75 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/unraisableexception.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/warning_types.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/warning_types.cpython-311.pyc new file mode 100644 index 0000000..ccb730d Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/warning_types.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/__pycache__/warnings.cpython-311.pyc b/venv/Lib/site-packages/_pytest/__pycache__/warnings.cpython-311.pyc new file mode 100644 index 0000000..d3859ba Binary files /dev/null and b/venv/Lib/site-packages/_pytest/__pycache__/warnings.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_argcomplete.py b/venv/Lib/site-packages/_pytest/_argcomplete.py new file mode 100644 index 0000000..59426ef --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_argcomplete.py @@ -0,0 +1,117 @@ +"""Allow bash-completion for argparse with argcomplete if installed. + +Needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail +to find the magic string, so _ARGCOMPLETE env. var is never set, and +this does not need special code). + +Function try_argcomplete(parser) should be called directly before +the call to ArgumentParser.parse_args(). + +The filescompleter is what you normally would use on the positional +arguments specification, in order to get "dirname/" after "dirn" +instead of the default "dirname ": + + optparser.add_argument(Config._file_or_dir, nargs='*').completer=filescompleter + +Other, application specific, completers should go in the file +doing the add_argument calls as they need to be specified as .completer +attributes as well. (If argcomplete is not installed, the function the +attribute points to will not be used). + +SPEEDUP +======= + +The generic argcomplete script for bash-completion +(/etc/bash_completion.d/python-argcomplete.sh) +uses a python program to determine startup script generated by pip. +You can speed up completion somewhat by changing this script to include + # PYTHON_ARGCOMPLETE_OK +so the python-argcomplete-check-easy-install-script does not +need to be called to find the entry point of the code and see if that is +marked with PYTHON_ARGCOMPLETE_OK. + +INSTALL/DEBUGGING +================= + +To include this support in another application that has setup.py generated +scripts: + +- Add the line: + # PYTHON_ARGCOMPLETE_OK + near the top of the main python entry point. + +- Include in the file calling parse_args(): + from _argcomplete import try_argcomplete, filescompleter + Call try_argcomplete just before parse_args(), and optionally add + filescompleter to the positional arguments' add_argument(). + +If things do not work right away: + +- Switch on argcomplete debugging with (also helpful when doing custom + completers): + export _ARC_DEBUG=1 + +- Run: + python-argcomplete-check-easy-install-script $(which appname) + echo $? + will echo 0 if the magic line has been found, 1 if not. + +- Sometimes it helps to find early on errors using: + _ARGCOMPLETE=1 _ARC_DEBUG=1 appname + which should throw a KeyError: 'COMPLINE' (which is properly set by the + global argcomplete script). +""" + +from __future__ import annotations + +import argparse +from glob import glob +import os +import sys +from typing import Any + + +class FastFilesCompleter: + """Fast file completer class.""" + + def __init__(self, directories: bool = True) -> None: + self.directories = directories + + def __call__(self, prefix: str, **kwargs: Any) -> list[str]: + # Only called on non option completions. + if os.sep in prefix[1:]: + prefix_dir = len(os.path.dirname(prefix) + os.sep) + else: + prefix_dir = 0 + completion = [] + globbed = [] + if "*" not in prefix and "?" not in prefix: + # We are on unix, otherwise no bash. + if not prefix or prefix[-1] == os.sep: + globbed.extend(glob(prefix + ".*")) + prefix += "*" + globbed.extend(glob(prefix)) + for x in sorted(globbed): + if os.path.isdir(x): + x += "/" + # Append stripping the prefix (like bash, not like compgen). + completion.append(x[prefix_dir:]) + return completion + + +if os.environ.get("_ARGCOMPLETE"): + try: + import argcomplete.completers + except ImportError: + sys.exit(-1) + filescompleter: FastFilesCompleter | None = FastFilesCompleter() + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + argcomplete.autocomplete(parser, always_complete_options=False) + +else: + + def try_argcomplete(parser: argparse.ArgumentParser) -> None: + pass + + filescompleter = None diff --git a/venv/Lib/site-packages/_pytest/_code/__init__.py b/venv/Lib/site-packages/_pytest/_code/__init__.py new file mode 100644 index 0000000..0bfde42 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_code/__init__.py @@ -0,0 +1,26 @@ +"""Python inspection/code generation API.""" + +from __future__ import annotations + +from .code import Code +from .code import ExceptionInfo +from .code import filter_traceback +from .code import Frame +from .code import getfslineno +from .code import Traceback +from .code import TracebackEntry +from .source import getrawcode +from .source import Source + + +__all__ = [ + "Code", + "ExceptionInfo", + "filter_traceback", + "Frame", + "getfslineno", + "getrawcode", + "Traceback", + "TracebackEntry", + "Source", +] diff --git a/venv/Lib/site-packages/_pytest/_code/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_code/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..ddf4012 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_code/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_code/__pycache__/code.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_code/__pycache__/code.cpython-311.pyc new file mode 100644 index 0000000..39ab730 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_code/__pycache__/code.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_code/__pycache__/source.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_code/__pycache__/source.cpython-311.pyc new file mode 100644 index 0000000..fde333e Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_code/__pycache__/source.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_code/code.py b/venv/Lib/site-packages/_pytest/_code/code.py new file mode 100644 index 0000000..8fac39e --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_code/code.py @@ -0,0 +1,1403 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import ast +import dataclasses +import inspect +from inspect import CO_VARARGS +from inspect import CO_VARKEYWORDS +from io import StringIO +import os +from pathlib import Path +import re +import sys +import traceback +from traceback import format_exception_only +from types import CodeType +from types import FrameType +from types import TracebackType +from typing import Any +from typing import Callable +from typing import ClassVar +from typing import Final +from typing import final +from typing import Generic +from typing import Iterable +from typing import List +from typing import Literal +from typing import Mapping +from typing import overload +from typing import Pattern +from typing import Sequence +from typing import SupportsIndex +from typing import Tuple +from typing import Type +from typing import TypeVar +from typing import Union + +import pluggy + +import _pytest +from _pytest._code.source import findsource +from _pytest._code.source import getrawcode +from _pytest._code.source import getstatementrange_ast +from _pytest._code.source import Source +from _pytest._io import TerminalWriter +from _pytest._io.saferepr import safeformat +from _pytest._io.saferepr import saferepr +from _pytest.compat import get_real_func +from _pytest.deprecated import check_ispytest +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + +TracebackStyle = Literal["long", "short", "line", "no", "native", "value", "auto"] + +EXCEPTION_OR_MORE = Union[Type[BaseException], Tuple[Type[BaseException], ...]] + + +class Code: + """Wrapper around Python code objects.""" + + __slots__ = ("raw",) + + def __init__(self, obj: CodeType) -> None: + self.raw = obj + + @classmethod + def from_function(cls, obj: object) -> Code: + return cls(getrawcode(obj)) + + def __eq__(self, other): + return self.raw == other.raw + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def firstlineno(self) -> int: + return self.raw.co_firstlineno - 1 + + @property + def name(self) -> str: + return self.raw.co_name + + @property + def path(self) -> Path | str: + """Return a path object pointing to source code, or an ``str`` in + case of ``OSError`` / non-existing file.""" + if not self.raw.co_filename: + return "" + try: + p = absolutepath(self.raw.co_filename) + # maybe don't try this checking + if not p.exists(): + raise OSError("path check failed.") + return p + except OSError: + # XXX maybe try harder like the weird logic + # in the standard lib [linecache.updatecache] does? + return self.raw.co_filename + + @property + def fullsource(self) -> Source | None: + """Return a _pytest._code.Source object for the full source file of the code.""" + full, _ = findsource(self.raw) + return full + + def source(self) -> Source: + """Return a _pytest._code.Source object for the code object's source only.""" + # return source only for that part of code + return Source(self.raw) + + def getargs(self, var: bool = False) -> tuple[str, ...]: + """Return a tuple with the argument names for the code object. + + If 'var' is set True also return the names of the variable and + keyword arguments when present. + """ + # Handy shortcut for getting args. + raw = self.raw + argcount = raw.co_argcount + if var: + argcount += raw.co_flags & CO_VARARGS + argcount += raw.co_flags & CO_VARKEYWORDS + return raw.co_varnames[:argcount] + + +class Frame: + """Wrapper around a Python frame holding f_locals and f_globals + in which expressions can be evaluated.""" + + __slots__ = ("raw",) + + def __init__(self, frame: FrameType) -> None: + self.raw = frame + + @property + def lineno(self) -> int: + return self.raw.f_lineno - 1 + + @property + def f_globals(self) -> dict[str, Any]: + return self.raw.f_globals + + @property + def f_locals(self) -> dict[str, Any]: + return self.raw.f_locals + + @property + def code(self) -> Code: + return Code(self.raw.f_code) + + @property + def statement(self) -> Source: + """Statement this frame is at.""" + if self.code.fullsource is None: + return Source("") + return self.code.fullsource.getstatement(self.lineno) + + def eval(self, code, **vars): + """Evaluate 'code' in the frame. + + 'vars' are optional additional local variables. + + Returns the result of the evaluation. + """ + f_locals = self.f_locals.copy() + f_locals.update(vars) + return eval(code, self.f_globals, f_locals) + + def repr(self, object: object) -> str: + """Return a 'safe' (non-recursive, one-line) string repr for 'object'.""" + return saferepr(object) + + def getargs(self, var: bool = False): + """Return a list of tuples (name, value) for all arguments. + + If 'var' is set True, also include the variable and keyword arguments + when present. + """ + retval = [] + for arg in self.code.getargs(var): + try: + retval.append((arg, self.f_locals[arg])) + except KeyError: + pass # this can occur when using Psyco + return retval + + +class TracebackEntry: + """A single entry in a Traceback.""" + + __slots__ = ("_rawentry", "_repr_style") + + def __init__( + self, + rawentry: TracebackType, + repr_style: Literal["short", "long"] | None = None, + ) -> None: + self._rawentry: Final = rawentry + self._repr_style: Final = repr_style + + def with_repr_style( + self, repr_style: Literal["short", "long"] | None + ) -> TracebackEntry: + return TracebackEntry(self._rawentry, repr_style) + + @property + def lineno(self) -> int: + return self._rawentry.tb_lineno - 1 + + @property + def frame(self) -> Frame: + return Frame(self._rawentry.tb_frame) + + @property + def relline(self) -> int: + return self.lineno - self.frame.code.firstlineno + + def __repr__(self) -> str: + return "" % (self.frame.code.path, self.lineno + 1) + + @property + def statement(self) -> Source: + """_pytest._code.Source object for the current statement.""" + source = self.frame.code.fullsource + assert source is not None + return source.getstatement(self.lineno) + + @property + def path(self) -> Path | str: + """Path to the source code.""" + return self.frame.code.path + + @property + def locals(self) -> dict[str, Any]: + """Locals of underlying frame.""" + return self.frame.f_locals + + def getfirstlinesource(self) -> int: + return self.frame.code.firstlineno + + def getsource( + self, astcache: dict[str | Path, ast.AST] | None = None + ) -> Source | None: + """Return failing source code.""" + # we use the passed in astcache to not reparse asttrees + # within exception info printing + source = self.frame.code.fullsource + if source is None: + return None + key = astnode = None + if astcache is not None: + key = self.frame.code.path + if key is not None: + astnode = astcache.get(key, None) + start = self.getfirstlinesource() + try: + astnode, _, end = getstatementrange_ast( + self.lineno, source, astnode=astnode + ) + except SyntaxError: + end = self.lineno + 1 + else: + if key is not None and astcache is not None: + astcache[key] = astnode + return source[start:end] + + source = property(getsource) + + def ishidden(self, excinfo: ExceptionInfo[BaseException] | None) -> bool: + """Return True if the current frame has a var __tracebackhide__ + resolving to True. + + If __tracebackhide__ is a callable, it gets called with the + ExceptionInfo instance and can decide whether to hide the traceback. + + Mostly for internal use. + """ + tbh: bool | Callable[[ExceptionInfo[BaseException] | None], bool] = False + for maybe_ns_dct in (self.frame.f_locals, self.frame.f_globals): + # in normal cases, f_locals and f_globals are dictionaries + # however via `exec(...)` / `eval(...)` they can be other types + # (even incorrect types!). + # as such, we suppress all exceptions while accessing __tracebackhide__ + try: + tbh = maybe_ns_dct["__tracebackhide__"] + except Exception: + pass + else: + break + if tbh and callable(tbh): + return tbh(excinfo) + return tbh + + def __str__(self) -> str: + name = self.frame.code.name + try: + line = str(self.statement).lstrip() + except KeyboardInterrupt: + raise + except BaseException: + line = "???" + # This output does not quite match Python's repr for traceback entries, + # but changing it to do so would break certain plugins. See + # https://github.com/pytest-dev/pytest/pull/7535/ for details. + return " File %r:%d in %s\n %s\n" % ( + str(self.path), + self.lineno + 1, + name, + line, + ) + + @property + def name(self) -> str: + """co_name of underlying code.""" + return self.frame.code.raw.co_name + + +class Traceback(List[TracebackEntry]): + """Traceback objects encapsulate and offer higher level access to Traceback entries.""" + + def __init__( + self, + tb: TracebackType | Iterable[TracebackEntry], + ) -> None: + """Initialize from given python traceback object and ExceptionInfo.""" + if isinstance(tb, TracebackType): + + def f(cur: TracebackType) -> Iterable[TracebackEntry]: + cur_: TracebackType | None = cur + while cur_ is not None: + yield TracebackEntry(cur_) + cur_ = cur_.tb_next + + super().__init__(f(tb)) + else: + super().__init__(tb) + + def cut( + self, + path: os.PathLike[str] | str | None = None, + lineno: int | None = None, + firstlineno: int | None = None, + excludepath: os.PathLike[str] | None = None, + ) -> Traceback: + """Return a Traceback instance wrapping part of this Traceback. + + By providing any combination of path, lineno and firstlineno, the + first frame to start the to-be-returned traceback is determined. + + This allows cutting the first part of a Traceback instance e.g. + for formatting reasons (removing some uninteresting bits that deal + with handling of the exception/traceback). + """ + path_ = None if path is None else os.fspath(path) + excludepath_ = None if excludepath is None else os.fspath(excludepath) + for x in self: + code = x.frame.code + codepath = code.path + if path is not None and str(codepath) != path_: + continue + if ( + excludepath is not None + and isinstance(codepath, Path) + and excludepath_ in (str(p) for p in codepath.parents) # type: ignore[operator] + ): + continue + if lineno is not None and x.lineno != lineno: + continue + if firstlineno is not None and x.frame.code.firstlineno != firstlineno: + continue + return Traceback(x._rawentry) + return self + + @overload + def __getitem__(self, key: SupportsIndex) -> TracebackEntry: ... + + @overload + def __getitem__(self, key: slice) -> Traceback: ... + + def __getitem__(self, key: SupportsIndex | slice) -> TracebackEntry | Traceback: + if isinstance(key, slice): + return self.__class__(super().__getitem__(key)) + else: + return super().__getitem__(key) + + def filter( + self, + excinfo_or_fn: ExceptionInfo[BaseException] | Callable[[TracebackEntry], bool], + /, + ) -> Traceback: + """Return a Traceback instance with certain items removed. + + If the filter is an `ExceptionInfo`, removes all the ``TracebackEntry``s + which are hidden (see ishidden() above). + + Otherwise, the filter is a function that gets a single argument, a + ``TracebackEntry`` instance, and should return True when the item should + be added to the ``Traceback``, False when not. + """ + if isinstance(excinfo_or_fn, ExceptionInfo): + fn = lambda x: not x.ishidden(excinfo_or_fn) # noqa: E731 + else: + fn = excinfo_or_fn + return Traceback(filter(fn, self)) + + def recursionindex(self) -> int | None: + """Return the index of the frame/TracebackEntry where recursion originates if + appropriate, None if no recursion occurred.""" + cache: dict[tuple[Any, int, int], list[dict[str, Any]]] = {} + for i, entry in enumerate(self): + # id for the code.raw is needed to work around + # the strange metaprogramming in the decorator lib from pypi + # which generates code objects that have hash/value equality + # XXX needs a test + key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno + values = cache.setdefault(key, []) + # Since Python 3.13 f_locals is a proxy, freeze it. + loc = dict(entry.frame.f_locals) + if values: + for otherloc in values: + if otherloc == loc: + return i + values.append(loc) + return None + + +E = TypeVar("E", bound=BaseException, covariant=True) + + +@final +@dataclasses.dataclass +class ExceptionInfo(Generic[E]): + """Wraps sys.exc_info() objects and offers help for navigating the traceback.""" + + _assert_start_repr: ClassVar = "AssertionError('assert " + + _excinfo: tuple[type[E], E, TracebackType] | None + _striptext: str + _traceback: Traceback | None + + def __init__( + self, + excinfo: tuple[type[E], E, TracebackType] | None, + striptext: str = "", + traceback: Traceback | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._excinfo = excinfo + self._striptext = striptext + self._traceback = traceback + + @classmethod + def from_exception( + cls, + # Ignoring error: "Cannot use a covariant type variable as a parameter". + # This is OK to ignore because this class is (conceptually) readonly. + # See https://github.com/python/mypy/issues/7049. + exception: E, # type: ignore[misc] + exprinfo: str | None = None, + ) -> ExceptionInfo[E]: + """Return an ExceptionInfo for an existing exception. + + The exception must have a non-``None`` ``__traceback__`` attribute, + otherwise this function fails with an assertion error. This means that + the exception must have been raised, or added a traceback with the + :py:meth:`~BaseException.with_traceback()` method. + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + + .. versionadded:: 7.4 + """ + assert exception.__traceback__, ( + "Exceptions passed to ExcInfo.from_exception(...)" + " must have a non-None __traceback__." + ) + exc_info = (type(exception), exception, exception.__traceback__) + return cls.from_exc_info(exc_info, exprinfo) + + @classmethod + def from_exc_info( + cls, + exc_info: tuple[type[E], E, TracebackType], + exprinfo: str | None = None, + ) -> ExceptionInfo[E]: + """Like :func:`from_exception`, but using old-style exc_info tuple.""" + _striptext = "" + if exprinfo is None and isinstance(exc_info[1], AssertionError): + exprinfo = getattr(exc_info[1], "msg", None) + if exprinfo is None: + exprinfo = saferepr(exc_info[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(exc_info, _striptext, _ispytest=True) + + @classmethod + def from_current(cls, exprinfo: str | None = None) -> ExceptionInfo[BaseException]: + """Return an ExceptionInfo matching the current traceback. + + .. warning:: + + Experimental API + + :param exprinfo: + A text string helping to determine if we should strip + ``AssertionError`` from the output. Defaults to the exception + message/``__str__()``. + """ + tup = sys.exc_info() + assert tup[0] is not None, "no current exception" + assert tup[1] is not None, "no current exception" + assert tup[2] is not None, "no current exception" + exc_info = (tup[0], tup[1], tup[2]) + return ExceptionInfo.from_exc_info(exc_info, exprinfo) + + @classmethod + def for_later(cls) -> ExceptionInfo[E]: + """Return an unfilled ExceptionInfo.""" + return cls(None, _ispytest=True) + + def fill_unfilled(self, exc_info: tuple[type[E], E, TracebackType]) -> None: + """Fill an unfilled ExceptionInfo created with ``for_later()``.""" + assert self._excinfo is None, "ExceptionInfo was already filled" + self._excinfo = exc_info + + @property + def type(self) -> type[E]: + """The exception class.""" + assert ( + self._excinfo is not None + ), ".type can only be used after the context manager exits" + return self._excinfo[0] + + @property + def value(self) -> E: + """The exception value.""" + assert ( + self._excinfo is not None + ), ".value can only be used after the context manager exits" + return self._excinfo[1] + + @property + def tb(self) -> TracebackType: + """The exception raw traceback.""" + assert ( + self._excinfo is not None + ), ".tb can only be used after the context manager exits" + return self._excinfo[2] + + @property + def typename(self) -> str: + """The type name of the exception.""" + assert ( + self._excinfo is not None + ), ".typename can only be used after the context manager exits" + return self.type.__name__ + + @property + def traceback(self) -> Traceback: + """The traceback.""" + if self._traceback is None: + self._traceback = Traceback(self.tb) + return self._traceback + + @traceback.setter + def traceback(self, value: Traceback) -> None: + self._traceback = value + + def __repr__(self) -> str: + if self._excinfo is None: + return "" + return f"<{self.__class__.__name__} {saferepr(self._excinfo[1])} tblen={len(self.traceback)}>" + + def exconly(self, tryshort: bool = False) -> str: + """Return the exception as a string. + + When 'tryshort' resolves to True, and the exception is an + AssertionError, only the actual exception part of the exception + representation is returned (so 'AssertionError: ' is removed from + the beginning). + """ + lines = format_exception_only(self.type, self.value) + text = "".join(lines) + text = text.rstrip() + if tryshort: + if text.startswith(self._striptext): + text = text[len(self._striptext) :] + return text + + def errisinstance(self, exc: EXCEPTION_OR_MORE) -> bool: + """Return True if the exception is an instance of exc. + + Consider using ``isinstance(excinfo.value, exc)`` instead. + """ + return isinstance(self.value, exc) + + def _getreprcrash(self) -> ReprFileLocation | None: + # Find last non-hidden traceback entry that led to the exception of the + # traceback, or None if all hidden. + for i in range(-1, -len(self.traceback) - 1, -1): + entry = self.traceback[i] + if not entry.ishidden(self): + path, lineno = entry.frame.code.raw.co_filename, entry.lineno + exconly = self.exconly(tryshort=True) + return ReprFileLocation(path, lineno + 1, exconly) + return None + + def getrepr( + self, + showlocals: bool = False, + style: TracebackStyle = "long", + abspath: bool = False, + tbfilter: bool + | Callable[[ExceptionInfo[BaseException]], _pytest._code.code.Traceback] = True, + funcargs: bool = False, + truncate_locals: bool = True, + truncate_args: bool = True, + chain: bool = True, + ) -> ReprExceptionInfo | ExceptionChainRepr: + """Return str()able representation of this exception info. + + :param bool showlocals: + Show locals per traceback entry. + Ignored if ``style=="native"``. + + :param str style: + long|short|line|no|native|value traceback style. + + :param bool abspath: + If paths should be changed to absolute or left unchanged. + + :param tbfilter: + A filter for traceback entries. + + * If false, don't hide any entries. + * If true, hide internal entries and entries that contain a local + variable ``__tracebackhide__ = True``. + * If a callable, delegates the filtering to the callable. + + Ignored if ``style`` is ``"native"``. + + :param bool funcargs: + Show fixtures ("funcargs" for legacy purposes) per traceback entry. + + :param bool truncate_locals: + With ``showlocals==True``, make sure locals can be safely represented as strings. + + :param bool truncate_args: + With ``showargs==True``, make sure args can be safely represented as strings. + + :param bool chain: + If chained exceptions in Python 3 should be shown. + + .. versionchanged:: 3.9 + + Added the ``chain`` parameter. + """ + if style == "native": + return ReprExceptionInfo( + reprtraceback=ReprTracebackNative( + traceback.format_exception( + self.type, + self.value, + self.traceback[0]._rawentry if self.traceback else None, + ) + ), + reprcrash=self._getreprcrash(), + ) + + fmt = FormattedExcinfo( + showlocals=showlocals, + style=style, + abspath=abspath, + tbfilter=tbfilter, + funcargs=funcargs, + truncate_locals=truncate_locals, + truncate_args=truncate_args, + chain=chain, + ) + return fmt.repr_excinfo(self) + + def _stringify_exception(self, exc: BaseException) -> str: + try: + notes = getattr(exc, "__notes__", []) + except KeyError: + # Workaround for https://github.com/python/cpython/issues/98778 on + # Python <= 3.9, and some 3.10 and 3.11 patch versions. + HTTPError = getattr(sys.modules.get("urllib.error", None), "HTTPError", ()) + if sys.version_info < (3, 12) and isinstance(exc, HTTPError): + notes = [] + else: + raise + + return "\n".join( + [ + str(exc), + *notes, + ] + ) + + def match(self, regexp: str | Pattern[str]) -> Literal[True]: + """Check whether the regular expression `regexp` matches the string + representation of the exception using :func:`python:re.search`. + + If it matches `True` is returned, otherwise an `AssertionError` is raised. + """ + __tracebackhide__ = True + value = self._stringify_exception(self.value) + msg = f"Regex pattern did not match.\n Regex: {regexp!r}\n Input: {value!r}" + if regexp == value: + msg += "\n Did you mean to `re.escape()` the regex?" + assert re.search(regexp, value), msg + # Return True to allow for "assert excinfo.match()". + return True + + def _group_contains( + self, + exc_group: BaseExceptionGroup[BaseException], + expected_exception: EXCEPTION_OR_MORE, + match: str | Pattern[str] | None, + target_depth: int | None = None, + current_depth: int = 1, + ) -> bool: + """Return `True` if a `BaseExceptionGroup` contains a matching exception.""" + if (target_depth is not None) and (current_depth > target_depth): + # already descended past the target depth + return False + for exc in exc_group.exceptions: + if isinstance(exc, BaseExceptionGroup): + if self._group_contains( + exc, expected_exception, match, target_depth, current_depth + 1 + ): + return True + if (target_depth is not None) and (current_depth != target_depth): + # not at the target depth, no match + continue + if not isinstance(exc, expected_exception): + continue + if match is not None: + value = self._stringify_exception(exc) + if not re.search(match, value): + continue + return True + return False + + def group_contains( + self, + expected_exception: EXCEPTION_OR_MORE, + *, + match: str | Pattern[str] | None = None, + depth: int | None = None, + ) -> bool: + """Check whether a captured exception group contains a matching exception. + + :param Type[BaseException] | Tuple[Type[BaseException]] expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. + + :param str | Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its `PEP-678 ` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + :param Optional[int] depth: + If `None`, will search for a matching exception at any nesting depth. + If >= 1, will only match an exception if it's at the specified depth (depth = 1 being + the exceptions contained within the topmost exception group). + + .. versionadded:: 8.0 + """ + msg = "Captured exception is not an instance of `BaseExceptionGroup`" + assert isinstance(self.value, BaseExceptionGroup), msg + msg = "`depth` must be >= 1 if specified" + assert (depth is None) or (depth >= 1), msg + return self._group_contains(self.value, expected_exception, match, depth) + + +@dataclasses.dataclass +class FormattedExcinfo: + """Presenting information about failing Functions and Generators.""" + + # for traceback entries + flow_marker: ClassVar = ">" + fail_marker: ClassVar = "E" + + showlocals: bool = False + style: TracebackStyle = "long" + abspath: bool = True + tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] = True + funcargs: bool = False + truncate_locals: bool = True + truncate_args: bool = True + chain: bool = True + astcache: dict[str | Path, ast.AST] = dataclasses.field( + default_factory=dict, init=False, repr=False + ) + + def _getindent(self, source: Source) -> int: + # Figure out indent for the given source. + try: + s = str(source.getstatement(len(source) - 1)) + except KeyboardInterrupt: + raise + except BaseException: + try: + s = str(source[-1]) + except KeyboardInterrupt: + raise + except BaseException: + return 0 + return 4 + (len(s) - len(s.lstrip())) + + def _getentrysource(self, entry: TracebackEntry) -> Source | None: + source = entry.getsource(self.astcache) + if source is not None: + source = source.deindent() + return source + + def repr_args(self, entry: TracebackEntry) -> ReprFuncArgs | None: + if self.funcargs: + args = [] + for argname, argvalue in entry.frame.getargs(var=True): + if self.truncate_args: + str_repr = saferepr(argvalue) + else: + str_repr = saferepr(argvalue, maxsize=None) + args.append((argname, str_repr)) + return ReprFuncArgs(args) + return None + + def get_source( + self, + source: Source | None, + line_index: int = -1, + excinfo: ExceptionInfo[BaseException] | None = None, + short: bool = False, + ) -> list[str]: + """Return formatted and marked up source lines.""" + lines = [] + if source is not None and line_index < 0: + line_index += len(source) + if source is None or line_index >= len(source.lines) or line_index < 0: + # `line_index` could still be outside `range(len(source.lines))` if + # we're processing AST with pathological position attributes. + source = Source("???") + line_index = 0 + space_prefix = " " + if short: + lines.append(space_prefix + source.lines[line_index].strip()) + else: + for line in source.lines[:line_index]: + lines.append(space_prefix + line) + lines.append(self.flow_marker + " " + source.lines[line_index]) + for line in source.lines[line_index + 1 :]: + lines.append(space_prefix + line) + if excinfo is not None: + indent = 4 if short else self._getindent(source) + lines.extend(self.get_exconly(excinfo, indent=indent, markall=True)) + return lines + + def get_exconly( + self, + excinfo: ExceptionInfo[BaseException], + indent: int = 4, + markall: bool = False, + ) -> list[str]: + lines = [] + indentstr = " " * indent + # Get the real exception information out. + exlines = excinfo.exconly(tryshort=True).split("\n") + failindent = self.fail_marker + indentstr[1:] + for line in exlines: + lines.append(failindent + line) + if not markall: + failindent = indentstr + return lines + + def repr_locals(self, locals: Mapping[str, object]) -> ReprLocals | None: + if self.showlocals: + lines = [] + keys = [loc for loc in locals if loc[0] != "@"] + keys.sort() + for name in keys: + value = locals[name] + if name == "__builtins__": + lines.append("__builtins__ = ") + else: + # This formatting could all be handled by the + # _repr() function, which is only reprlib.Repr in + # disguise, so is very configurable. + if self.truncate_locals: + str_repr = saferepr(value) + else: + str_repr = safeformat(value) + # if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)): + lines.append(f"{name:<10} = {str_repr}") + # else: + # self._line("%-10s =\\" % (name,)) + # # XXX + # pprint.pprint(value, stream=self.excinfowriter) + return ReprLocals(lines) + return None + + def repr_traceback_entry( + self, + entry: TracebackEntry | None, + excinfo: ExceptionInfo[BaseException] | None = None, + ) -> ReprEntry: + lines: list[str] = [] + style = ( + entry._repr_style + if entry is not None and entry._repr_style is not None + else self.style + ) + if style in ("short", "long") and entry is not None: + source = self._getentrysource(entry) + if source is None: + source = Source("???") + line_index = 0 + else: + line_index = entry.lineno - entry.getfirstlinesource() + short = style == "short" + reprargs = self.repr_args(entry) if not short else None + s = self.get_source(source, line_index, excinfo, short=short) + lines.extend(s) + if short: + message = f"in {entry.name}" + else: + message = excinfo and excinfo.typename or "" + entry_path = entry.path + path = self._makepath(entry_path) + reprfileloc = ReprFileLocation(path, entry.lineno + 1, message) + localsrepr = self.repr_locals(entry.locals) + return ReprEntry(lines, reprargs, localsrepr, reprfileloc, style) + elif style == "value": + if excinfo: + lines.extend(str(excinfo.value).split("\n")) + return ReprEntry(lines, None, None, None, style) + else: + if excinfo: + lines.extend(self.get_exconly(excinfo, indent=4)) + return ReprEntry(lines, None, None, None, style) + + def _makepath(self, path: Path | str) -> str: + if not self.abspath and isinstance(path, Path): + try: + np = bestrelpath(Path.cwd(), path) + except OSError: + return str(path) + if len(np) < len(str(path)): + return np + return str(path) + + def repr_traceback(self, excinfo: ExceptionInfo[BaseException]) -> ReprTraceback: + traceback = excinfo.traceback + if callable(self.tbfilter): + traceback = self.tbfilter(excinfo) + elif self.tbfilter: + traceback = traceback.filter(excinfo) + + if isinstance(excinfo.value, RecursionError): + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + + if not traceback: + if extraline is None: + extraline = "All traceback entries are hidden. Pass `--full-trace` to see hidden and internal frames." + entries = [self.repr_traceback_entry(None, excinfo)] + return ReprTraceback(entries, extraline, style=self.style) + + last = traceback[-1] + if self.style == "value": + entries = [self.repr_traceback_entry(last, excinfo)] + return ReprTraceback(entries, None, style=self.style) + + entries = [ + self.repr_traceback_entry(entry, excinfo if last == entry else None) + for entry in traceback + ] + return ReprTraceback(entries, extraline, style=self.style) + + def _truncate_recursive_traceback( + self, traceback: Traceback + ) -> tuple[Traceback, str | None]: + """Truncate the given recursive traceback trying to find the starting + point of the recursion. + + The detection is done by going through each traceback entry and + finding the point in which the locals of the frame are equal to the + locals of a previous frame (see ``recursionindex()``). + + Handle the situation where the recursion process might raise an + exception (for example comparing numpy arrays using equality raises a + TypeError), in which case we do our best to warn the user of the + error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline: str | None = ( + "!!! Recursion error detected, but an error occurred locating the origin of recursion.\n" + " The following exception happened when comparing locals in the stack frame:\n" + f" {type(e).__name__}: {e!s}\n" + f" Displaying first and last {max_frames} stack frames out of {len(traceback)}." + ) + # Type ignored because adding two instances of a List subtype + # currently incorrectly has type List instead of the subtype. + traceback = traceback[:max_frames] + traceback[-max_frames:] # type: ignore + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[: recursionindex + 1] + else: + extraline = None + + return traceback, extraline + + def repr_excinfo(self, excinfo: ExceptionInfo[BaseException]) -> ExceptionChainRepr: + repr_chain: list[tuple[ReprTraceback, ReprFileLocation | None, str | None]] = [] + e: BaseException | None = excinfo.value + excinfo_: ExceptionInfo[BaseException] | None = excinfo + descr = None + seen: set[int] = set() + while e is not None and id(e) not in seen: + seen.add(id(e)) + + if excinfo_: + # Fall back to native traceback as a temporary workaround until + # full support for exception groups added to ExceptionInfo. + # See https://github.com/pytest-dev/pytest/issues/9159 + if isinstance(e, BaseExceptionGroup): + reprtraceback: ReprTracebackNative | ReprTraceback = ( + ReprTracebackNative( + traceback.format_exception( + type(excinfo_.value), + excinfo_.value, + excinfo_.traceback[0]._rawentry, + ) + ) + ) + else: + reprtraceback = self.repr_traceback(excinfo_) + reprcrash = excinfo_._getreprcrash() + else: + # Fallback to native repr if the exception doesn't have a traceback: + # ExceptionInfo objects require a full traceback to work. + reprtraceback = ReprTracebackNative( + traceback.format_exception(type(e), e, None) + ) + reprcrash = None + repr_chain += [(reprtraceback, reprcrash, descr)] + + if e.__cause__ is not None and self.chain: + e = e.__cause__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "The above exception was the direct cause of the following exception:" + elif ( + e.__context__ is not None and not e.__suppress_context__ and self.chain + ): + e = e.__context__ + excinfo_ = ExceptionInfo.from_exception(e) if e.__traceback__ else None + descr = "During handling of the above exception, another exception occurred:" + else: + e = None + repr_chain.reverse() + return ExceptionChainRepr(repr_chain) + + +@dataclasses.dataclass(eq=False) +class TerminalRepr: + def __str__(self) -> str: + # FYI this is called from pytest-xdist's serialization of exception + # information. + io = StringIO() + tw = TerminalWriter(file=io) + self.toterminal(tw) + return io.getvalue().strip() + + def __repr__(self) -> str: + return f"<{self.__class__} instance at {id(self):0x}>" + + def toterminal(self, tw: TerminalWriter) -> None: + raise NotImplementedError() + + +# This class is abstract -- only subclasses are instantiated. +@dataclasses.dataclass(eq=False) +class ExceptionRepr(TerminalRepr): + # Provided by subclasses. + reprtraceback: ReprTraceback + reprcrash: ReprFileLocation | None + sections: list[tuple[str, str, str]] = dataclasses.field( + init=False, default_factory=list + ) + + def addsection(self, name: str, content: str, sep: str = "-") -> None: + self.sections.append((name, content, sep)) + + def toterminal(self, tw: TerminalWriter) -> None: + for name, content, sep in self.sections: + tw.sep(sep, name) + tw.line(content) + + +@dataclasses.dataclass(eq=False) +class ExceptionChainRepr(ExceptionRepr): + chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]] + + def __init__( + self, + chain: Sequence[tuple[ReprTraceback, ReprFileLocation | None, str | None]], + ) -> None: + # reprcrash and reprtraceback of the outermost (the newest) exception + # in the chain. + super().__init__( + reprtraceback=chain[-1][0], + reprcrash=chain[-1][1], + ) + self.chain = chain + + def toterminal(self, tw: TerminalWriter) -> None: + for element in self.chain: + element[0].toterminal(tw) + if element[2] is not None: + tw.line("") + tw.line(element[2], yellow=True) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprExceptionInfo(ExceptionRepr): + reprtraceback: ReprTraceback + reprcrash: ReprFileLocation | None + + def toterminal(self, tw: TerminalWriter) -> None: + self.reprtraceback.toterminal(tw) + super().toterminal(tw) + + +@dataclasses.dataclass(eq=False) +class ReprTraceback(TerminalRepr): + reprentries: Sequence[ReprEntry | ReprEntryNative] + extraline: str | None + style: TracebackStyle + + entrysep: ClassVar = "_ " + + def toterminal(self, tw: TerminalWriter) -> None: + # The entries might have different styles. + for i, entry in enumerate(self.reprentries): + if entry.style == "long": + tw.line("") + entry.toterminal(tw) + if i < len(self.reprentries) - 1: + next_entry = self.reprentries[i + 1] + if ( + entry.style == "long" + or entry.style == "short" + and next_entry.style == "long" + ): + tw.sep(self.entrysep) + + if self.extraline: + tw.line(self.extraline) + + +class ReprTracebackNative(ReprTraceback): + def __init__(self, tblines: Sequence[str]) -> None: + self.reprentries = [ReprEntryNative(tblines)] + self.extraline = None + self.style = "native" + + +@dataclasses.dataclass(eq=False) +class ReprEntryNative(TerminalRepr): + lines: Sequence[str] + + style: ClassVar[TracebackStyle] = "native" + + def toterminal(self, tw: TerminalWriter) -> None: + tw.write("".join(self.lines)) + + +@dataclasses.dataclass(eq=False) +class ReprEntry(TerminalRepr): + lines: Sequence[str] + reprfuncargs: ReprFuncArgs | None + reprlocals: ReprLocals | None + reprfileloc: ReprFileLocation | None + style: TracebackStyle + + def _write_entry_lines(self, tw: TerminalWriter) -> None: + """Write the source code portions of a list of traceback entries with syntax highlighting. + + Usually entries are lines like these: + + " x = 1" + "> assert x == 2" + "E assert 1 == 2" + + This function takes care of rendering the "source" portions of it (the lines without + the "E" prefix) using syntax highlighting, taking care to not highlighting the ">" + character, as doing so might break line continuations. + """ + if not self.lines: + return + + # separate indents and source lines that are not failures: we want to + # highlight the code but not the indentation, which may contain markers + # such as "> assert 0" + fail_marker = f"{FormattedExcinfo.fail_marker} " + indent_size = len(fail_marker) + indents: list[str] = [] + source_lines: list[str] = [] + failure_lines: list[str] = [] + for index, line in enumerate(self.lines): + is_failure_line = line.startswith(fail_marker) + if is_failure_line: + # from this point on all lines are considered part of the failure + failure_lines.extend(self.lines[index:]) + break + else: + if self.style == "value": + source_lines.append(line) + else: + indents.append(line[:indent_size]) + source_lines.append(line[indent_size:]) + + tw._write_source(source_lines, indents) + + # failure lines are always completely red and bold + for line in failure_lines: + tw.line(line, bold=True, red=True) + + def toterminal(self, tw: TerminalWriter) -> None: + if self.style == "short": + if self.reprfileloc: + self.reprfileloc.toterminal(tw) + self._write_entry_lines(tw) + if self.reprlocals: + self.reprlocals.toterminal(tw, indent=" " * 8) + return + + if self.reprfuncargs: + self.reprfuncargs.toterminal(tw) + + self._write_entry_lines(tw) + + if self.reprlocals: + tw.line("") + self.reprlocals.toterminal(tw) + if self.reprfileloc: + if self.lines: + tw.line("") + self.reprfileloc.toterminal(tw) + + def __str__(self) -> str: + return "{}\n{}\n{}".format( + "\n".join(self.lines), self.reprlocals, self.reprfileloc + ) + + +@dataclasses.dataclass(eq=False) +class ReprFileLocation(TerminalRepr): + path: str + lineno: int + message: str + + def __post_init__(self) -> None: + self.path = str(self.path) + + def toterminal(self, tw: TerminalWriter) -> None: + # Filename and lineno output for each entry, using an output format + # that most editors understand. + msg = self.message + i = msg.find("\n") + if i != -1: + msg = msg[:i] + tw.write(self.path, bold=True, red=True) + tw.line(f":{self.lineno}: {msg}") + + +@dataclasses.dataclass(eq=False) +class ReprLocals(TerminalRepr): + lines: Sequence[str] + + def toterminal(self, tw: TerminalWriter, indent="") -> None: + for line in self.lines: + tw.line(indent + line) + + +@dataclasses.dataclass(eq=False) +class ReprFuncArgs(TerminalRepr): + args: Sequence[tuple[str, object]] + + def toterminal(self, tw: TerminalWriter) -> None: + if self.args: + linesofar = "" + for name, value in self.args: + ns = f"{name} = {value}" + if len(ns) + len(linesofar) + 2 > tw.fullwidth: + if linesofar: + tw.line(linesofar) + linesofar = ns + else: + if linesofar: + linesofar += ", " + ns + else: + linesofar = ns + if linesofar: + tw.line(linesofar) + tw.line("") + + +def getfslineno(obj: object) -> tuple[str | Path, int]: + """Return source location (path, lineno) for the given object. + + If the source cannot be determined return ("", -1). + + The line number is 0-based. + """ + # xxx let decorators etc specify a sane ordering + # NOTE: this used to be done in _pytest.compat.getfslineno, initially added + # in 6ec13a2b9. It ("place_as") appears to be something very custom. + obj = get_real_func(obj) + if hasattr(obj, "place_as"): + obj = obj.place_as + + try: + code = Code.from_function(obj) + except TypeError: + try: + fn = inspect.getsourcefile(obj) or inspect.getfile(obj) # type: ignore[arg-type] + except TypeError: + return "", -1 + + fspath = fn and absolutepath(fn) or "" + lineno = -1 + if fspath: + try: + _, lineno = findsource(obj) + except OSError: + pass + return fspath, lineno + + return code.path, code.firstlineno + + +# Relative paths that we use to filter traceback entries from appearing to the user; +# see filter_traceback. +# note: if we need to add more paths than what we have now we should probably use a list +# for better maintenance. + +_PLUGGY_DIR = Path(pluggy.__file__.rstrip("oc")) +# pluggy is either a package or a single module depending on the version +if _PLUGGY_DIR.name == "__init__.py": + _PLUGGY_DIR = _PLUGGY_DIR.parent +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def filter_traceback(entry: TracebackEntry) -> bool: + """Return True if a TracebackEntry instance should be included in tracebacks. + + We hide traceback entries of: + + * dynamically generated code (no code to show up for it); + * internal traceback from pytest or its internal libraries, py and pluggy. + """ + # entry.path might sometimes return a str object when the entry + # points to dynamically generated code. + # See https://bitbucket.org/pytest-dev/py/issues/71. + raw_filename = entry.frame.code.raw.co_filename + is_generated = "<" in raw_filename and ">" in raw_filename + if is_generated: + return False + + # entry.path might point to a non-existing file, in which case it will + # also return a str object. See #1133. + p = Path(entry.path) + + parents = p.parents + if _PLUGGY_DIR in parents: + return False + if _PYTEST_DIR in parents: + return False + + return True diff --git a/venv/Lib/site-packages/_pytest/_code/source.py b/venv/Lib/site-packages/_pytest/_code/source.py new file mode 100644 index 0000000..604aff8 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_code/source.py @@ -0,0 +1,215 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import ast +from bisect import bisect_right +import inspect +import textwrap +import tokenize +import types +from typing import Iterable +from typing import Iterator +from typing import overload +import warnings + + +class Source: + """An immutable object holding a source code fragment. + + When using Source(...), the source lines are deindented. + """ + + def __init__(self, obj: object = None) -> None: + if not obj: + self.lines: list[str] = [] + elif isinstance(obj, Source): + self.lines = obj.lines + elif isinstance(obj, (tuple, list)): + self.lines = deindent(x.rstrip("\n") for x in obj) + elif isinstance(obj, str): + self.lines = deindent(obj.split("\n")) + else: + try: + rawcode = getrawcode(obj) + src = inspect.getsource(rawcode) + except TypeError: + src = inspect.getsource(obj) # type: ignore[arg-type] + self.lines = deindent(src.split("\n")) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Source): + return NotImplemented + return self.lines == other.lines + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @overload + def __getitem__(self, key: int) -> str: ... + + @overload + def __getitem__(self, key: slice) -> Source: ... + + def __getitem__(self, key: int | slice) -> str | Source: + if isinstance(key, int): + return self.lines[key] + else: + if key.step not in (None, 1): + raise IndexError("cannot slice a Source with a step") + newsource = Source() + newsource.lines = self.lines[key.start : key.stop] + return newsource + + def __iter__(self) -> Iterator[str]: + return iter(self.lines) + + def __len__(self) -> int: + return len(self.lines) + + def strip(self) -> Source: + """Return new Source object with trailing and leading blank lines removed.""" + start, end = 0, len(self) + while start < end and not self.lines[start].strip(): + start += 1 + while end > start and not self.lines[end - 1].strip(): + end -= 1 + source = Source() + source.lines[:] = self.lines[start:end] + return source + + def indent(self, indent: str = " " * 4) -> Source: + """Return a copy of the source object with all lines indented by the + given indent-string.""" + newsource = Source() + newsource.lines = [(indent + line) for line in self.lines] + return newsource + + def getstatement(self, lineno: int) -> Source: + """Return Source statement which contains the given linenumber + (counted from 0).""" + start, end = self.getstatementrange(lineno) + return self[start:end] + + def getstatementrange(self, lineno: int) -> tuple[int, int]: + """Return (start, end) tuple which spans the minimal statement region + which containing the given lineno.""" + if not (0 <= lineno < len(self)): + raise IndexError("lineno out of range") + ast, start, end = getstatementrange_ast(lineno, self) + return start, end + + def deindent(self) -> Source: + """Return a new Source object deindented.""" + newsource = Source() + newsource.lines[:] = deindent(self.lines) + return newsource + + def __str__(self) -> str: + return "\n".join(self.lines) + + +# +# helper functions +# + + +def findsource(obj) -> tuple[Source | None, int]: + try: + sourcelines, lineno = inspect.findsource(obj) + except Exception: + return None, -1 + source = Source() + source.lines = [line.rstrip() for line in sourcelines] + return source, lineno + + +def getrawcode(obj: object, trycall: bool = True) -> types.CodeType: + """Return code object for given function.""" + try: + return obj.__code__ # type: ignore[attr-defined,no-any-return] + except AttributeError: + pass + if trycall: + call = getattr(obj, "__call__", None) + if call and not isinstance(obj, type): + return getrawcode(call, trycall=False) + raise TypeError(f"could not get code object for {obj!r}") + + +def deindent(lines: Iterable[str]) -> list[str]: + return textwrap.dedent("\n".join(lines)).splitlines() + + +def get_statement_startend2(lineno: int, node: ast.AST) -> tuple[int, int | None]: + # Flatten all statements and except handlers into one lineno-list. + # AST's line numbers start indexing at 1. + values: list[int] = [] + for x in ast.walk(node): + if isinstance(x, (ast.stmt, ast.ExceptHandler)): + # The lineno points to the class/def, so need to include the decorators. + if isinstance(x, (ast.ClassDef, ast.FunctionDef, ast.AsyncFunctionDef)): + for d in x.decorator_list: + values.append(d.lineno - 1) + values.append(x.lineno - 1) + for name in ("finalbody", "orelse"): + val: list[ast.stmt] | None = getattr(x, name, None) + if val: + # Treat the finally/orelse part as its own statement. + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): + end = None + else: + end = values[insert_index] + return start, end + + +def getstatementrange_ast( + lineno: int, + source: Source, + assertion: bool = False, + astnode: ast.AST | None = None, +) -> tuple[ast.AST, int, int]: + if astnode is None: + content = str(source) + # See #4260: + # Don't produce duplicate warnings when compiling source to find AST. + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + astnode = ast.parse(content, "source", "exec") + + start, end = get_statement_startend2(lineno, astnode) + # We need to correct the end: + # - ast-parsing strips comments + # - there might be empty lines + # - we might have lesser indented code blocks at the end + if end is None: + end = len(source.lines) + + if end > start + 1: + # Make sure we don't span differently indented code blocks + # by using the BlockFinder helper used which inspect.getsource() uses itself. + block_finder = inspect.BlockFinder() + # If we start with an indented line, put blockfinder to "started" mode. + block_finder.started = ( + bool(source.lines[start]) and source.lines[start][0].isspace() + ) + it = ((x + "\n") for x in source.lines[start:end]) + try: + for tok in tokenize.generate_tokens(lambda: next(it)): + block_finder.tokeneater(*tok) + except (inspect.EndOfBlock, IndentationError): + end = block_finder.last + start + except Exception: + pass + + # The end might still point to a comment or empty line, correct it. + while end: + line = source.lines[end - 1].lstrip() + if line.startswith("#") or not line: + end -= 1 + else: + break + return astnode, start, end diff --git a/venv/Lib/site-packages/_pytest/_io/__init__.py b/venv/Lib/site-packages/_pytest/_io/__init__.py new file mode 100644 index 0000000..b0155b1 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_io/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from .terminalwriter import get_terminal_width +from .terminalwriter import TerminalWriter + + +__all__ = [ + "TerminalWriter", + "get_terminal_width", +] diff --git a/venv/Lib/site-packages/_pytest/_io/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_io/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..b35d836 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_io/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_io/__pycache__/pprint.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_io/__pycache__/pprint.cpython-311.pyc new file mode 100644 index 0000000..501d6b5 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_io/__pycache__/pprint.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_io/__pycache__/saferepr.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_io/__pycache__/saferepr.cpython-311.pyc new file mode 100644 index 0000000..d3e8a20 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_io/__pycache__/saferepr.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_io/__pycache__/terminalwriter.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_io/__pycache__/terminalwriter.cpython-311.pyc new file mode 100644 index 0000000..1747aed Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_io/__pycache__/terminalwriter.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_io/__pycache__/wcwidth.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_io/__pycache__/wcwidth.cpython-311.pyc new file mode 100644 index 0000000..bba6812 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_io/__pycache__/wcwidth.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_io/pprint.py b/venv/Lib/site-packages/_pytest/_io/pprint.py new file mode 100644 index 0000000..fc29989 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_io/pprint.py @@ -0,0 +1,673 @@ +# mypy: allow-untyped-defs +# This module was imported from the cpython standard library +# (https://github.com/python/cpython/) at commit +# c5140945c723ae6c4b7ee81ff720ac8ea4b52cfd (python3.12). +# +# +# Original Author: Fred L. Drake, Jr. +# fdrake@acm.org +# +# This is a simple little module I wrote to make life easier. I didn't +# see anything quite like it in the library, though I may have overlooked +# something. I wrote this when I was trying to read some heavily nested +# tuples with fairly non-descriptive content. This is modeled very much +# after Lisp/Scheme - style pretty-printing of lists. If you find it +# useful, thank small children who sleep at night. +from __future__ import annotations + +import collections as _collections +import dataclasses as _dataclasses +from io import StringIO as _StringIO +import re +import types as _types +from typing import Any +from typing import Callable +from typing import IO +from typing import Iterator + + +class _safe_key: + """Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + """ + + __slots__ = ["obj"] + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + try: + return self.obj < other.obj + except TypeError: + return (str(type(self.obj)), id(self.obj)) < ( + str(type(other.obj)), + id(other.obj), + ) + + +def _safe_tuple(t): + """Helper function for comparing 2-tuples""" + return _safe_key(t[0]), _safe_key(t[1]) + + +class PrettyPrinter: + def __init__( + self, + indent: int = 4, + width: int = 80, + depth: int | None = None, + ) -> None: + """Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + """ + if indent < 0: + raise ValueError("indent must be >= 0") + if depth is not None and depth <= 0: + raise ValueError("depth must be > 0") + if not width: + raise ValueError("width must be != 0") + self._depth = depth + self._indent_per_level = indent + self._width = width + + def pformat(self, object: Any) -> str: + sio = _StringIO() + self._format(object, sio, 0, 0, set(), 0) + return sio.getvalue() + + def _format( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + objid = id(object) + if objid in context: + stream.write(_recursion(object)) + return + + p = self._dispatch.get(type(object).__repr__, None) + if p is not None: + context.add(objid) + p(self, object, stream, indent, allowance, context, level + 1) + context.remove(objid) + elif ( + _dataclasses.is_dataclass(object) # type:ignore[unreachable] + and not isinstance(object, type) + and object.__dataclass_params__.repr + and + # Check dataclass has generated repr method. + hasattr(object.__repr__, "__wrapped__") + and "__create_fn__" in object.__repr__.__wrapped__.__qualname__ + ): + context.add(objid) # type:ignore[unreachable] + self._pprint_dataclass( + object, stream, indent, allowance, context, level + 1 + ) + context.remove(objid) + else: + stream.write(self._repr(object, context, level)) + + def _pprint_dataclass( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + cls_name = object.__class__.__name__ + items = [ + (f.name, getattr(object, f.name)) + for f in _dataclasses.fields(object) + if f.repr + ] + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch: dict[ + Callable[..., str], + Callable[[PrettyPrinter, Any, IO[str], int, int, set[int], int], None], + ] = {} + + def _pprint_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + write("{") + items = sorted(object.items(), key=_safe_tuple) + self._format_dict_items(items, stream, indent, allowance, context, level) + write("}") + + _dispatch[dict.__repr__] = _pprint_dict + + def _pprint_ordered_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + "(") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict + + def _pprint_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("[") + self._format_items(object, stream, indent, allowance, context, level) + stream.write("]") + + _dispatch[list.__repr__] = _pprint_list + + def _pprint_tuple( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("(") + self._format_items(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[tuple.__repr__] = _pprint_tuple + + def _pprint_set( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object): + stream.write(repr(object)) + return + typ = object.__class__ + if typ is set: + stream.write("{") + endchar = "}" + else: + stream.write(typ.__name__ + "({") + endchar = "})" + object = sorted(object, key=_safe_key) + self._format_items(object, stream, indent, allowance, context, level) + stream.write(endchar) + + _dispatch[set.__repr__] = _pprint_set + _dispatch[frozenset.__repr__] = _pprint_set + + def _pprint_str( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + if not len(object): + write(repr(object)) + return + chunks = [] + lines = object.splitlines(True) + if level == 1: + indent += 1 + allowance += 1 + max_width1 = max_width = self._width - indent + for i, line in enumerate(lines): + rep = repr(line) + if i == len(lines) - 1: + max_width1 -= allowance + if len(rep) <= max_width1: + chunks.append(rep) + else: + # A list of alternating (non-space, space) strings + parts = re.findall(r"\S*\s*", line) + assert parts + assert not parts[-1] + parts.pop() # drop empty last part + max_width2 = max_width + current = "" + for j, part in enumerate(parts): + candidate = current + part + if j == len(parts) - 1 and i == len(lines) - 1: + max_width2 -= allowance + if len(repr(candidate)) > max_width2: + if current: + chunks.append(repr(current)) + current = part + else: + current = candidate + if current: + chunks.append(repr(current)) + if len(chunks) == 1: + write(rep) + return + if level == 1: + write("(") + for i, rep in enumerate(chunks): + if i > 0: + write("\n" + " " * indent) + write(rep) + if level == 1: + write(")") + + _dispatch[str.__repr__] = _pprint_str + + def _pprint_bytes( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + if len(object) <= 4: + write(repr(object)) + return + parens = level == 1 + if parens: + indent += 1 + allowance += 1 + write("(") + delim = "" + for rep in _wrap_bytes_repr(object, self._width - indent, allowance): + write(delim) + write(rep) + if not delim: + delim = "\n" + " " * indent + if parens: + write(")") + + _dispatch[bytes.__repr__] = _pprint_bytes + + def _pprint_bytearray( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + write = stream.write + write("bytearray(") + self._pprint_bytes( + bytes(object), stream, indent + 10, allowance + 1, context, level + 1 + ) + write(")") + + _dispatch[bytearray.__repr__] = _pprint_bytearray + + def _pprint_mappingproxy( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write("mappingproxy(") + self._format(object.copy(), stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy + + def _pprint_simplenamespace( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if type(object) is _types.SimpleNamespace: + # The SimpleNamespace repr is "namespace" instead of the class + # name, so we do the same here. For subclasses; use the class name. + cls_name = "namespace" + else: + cls_name = object.__class__.__name__ + items = object.__dict__.items() + stream.write(cls_name + "(") + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace + + def _format_dict_items( + self, + items: list[tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(self._repr(key, context, level)) + write(": ") + self._format(ent, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _format_namespace_items( + self, + items: list[tuple[Any, Any]], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + for key, ent in items: + write(delimnl) + write(key) + write("=") + if id(ent) in context: + # Special-case representation of recursion to match standard + # recursive dataclass repr. + write("...") + else: + self._format( + ent, + stream, + item_indent + len(key) + 1, + 1, + context, + level, + ) + + write(",") + + write("\n" + " " * indent) + + def _format_items( + self, + items: list[Any], + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not items: + return + + write = stream.write + item_indent = indent + self._indent_per_level + delimnl = "\n" + " " * item_indent + + for item in items: + write(delimnl) + self._format(item, stream, item_indent, 1, context, level) + write(",") + + write("\n" + " " * indent) + + def _repr(self, object: Any, context: set[int], level: int) -> str: + return self._safe_repr(object, context.copy(), self._depth, level) + + def _pprint_default_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + rdf = self._repr(object.default_factory, context, level) + stream.write(f"{object.__class__.__name__}({rdf}, ") + self._pprint_dict(object, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict + + def _pprint_counter( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + + if object: + stream.write("{") + items = object.most_common() + self._format_dict_items(items, stream, indent, allowance, context, level) + stream.write("}") + + stream.write(")") + + _dispatch[_collections.Counter.__repr__] = _pprint_counter + + def _pprint_chain_map( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + if not len(object.maps) or (len(object.maps) == 1 and not len(object.maps[0])): + stream.write(repr(object)) + return + + stream.write(object.__class__.__name__ + "(") + self._format_items(object.maps, stream, indent, allowance, context, level) + stream.write(")") + + _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map + + def _pprint_deque( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + stream.write(object.__class__.__name__ + "(") + if object.maxlen is not None: + stream.write("maxlen=%d, " % object.maxlen) + stream.write("[") + + self._format_items(object, stream, indent, allowance + 1, context, level) + stream.write("])") + + _dispatch[_collections.deque.__repr__] = _pprint_deque + + def _pprint_user_dict( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict + + def _pprint_user_list( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserList.__repr__] = _pprint_user_list + + def _pprint_user_string( + self, + object: Any, + stream: IO[str], + indent: int, + allowance: int, + context: set[int], + level: int, + ) -> None: + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserString.__repr__] = _pprint_user_string + + def _safe_repr( + self, object: Any, context: set[int], maxlevels: int | None, level: int + ) -> str: + typ = type(object) + if typ in _builtin_scalars: + return repr(object) + + r = getattr(typ, "__repr__", None) + + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}" + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}" + if objid in context: + return _recursion(object) + context.add(objid) + components: list[str] = [] + append = components.append + level += 1 + for k, v in sorted(object.items(), key=_safe_tuple): + krepr = self._safe_repr(k, context, maxlevels, level) + vrepr = self._safe_repr(v, context, maxlevels, level) + append(f"{krepr}: {vrepr}") + context.remove(objid) + return "{{{}}}".format(", ".join(components)) + + if (issubclass(typ, list) and r is list.__repr__) or ( + issubclass(typ, tuple) and r is tuple.__repr__ + ): + if issubclass(typ, list): + if not object: + return "[]" + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()" + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "..." + if objid in context: + return _recursion(object) + context.add(objid) + components = [] + append = components.append + level += 1 + for o in object: + orepr = self._safe_repr(o, context, maxlevels, level) + append(orepr) + context.remove(objid) + return format % ", ".join(components) + + return repr(object) + + +_builtin_scalars = frozenset( + {str, bytes, bytearray, float, complex, bool, type(None), int} +) + + +def _recursion(object: Any) -> str: + return f"" + + +def _wrap_bytes_repr(object: Any, width: int, allowance: int) -> Iterator[str]: + current = b"" + last = len(object) // 4 * 4 + for i in range(0, len(object), 4): + part = object[i : i + 4] + candidate = current + part + if i == last: + width -= allowance + if len(repr(candidate)) > width: + if current: + yield repr(current) + current = part + else: + current = candidate + if current: + yield repr(current) diff --git a/venv/Lib/site-packages/_pytest/_io/saferepr.py b/venv/Lib/site-packages/_pytest/_io/saferepr.py new file mode 100644 index 0000000..cee70e3 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_io/saferepr.py @@ -0,0 +1,130 @@ +from __future__ import annotations + +import pprint +import reprlib + + +def _try_repr_or_str(obj: object) -> str: + try: + return repr(obj) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException: + return f'{type(obj).__name__}("{obj}")' + + +def _format_repr_exception(exc: BaseException, obj: object) -> str: + try: + exc_info = _try_repr_or_str(exc) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as inner_exc: + exc_info = f"unpresentable exception ({_try_repr_or_str(inner_exc)})" + return ( + f"<[{exc_info} raised in repr()] {type(obj).__name__} object at 0x{id(obj):x}>" + ) + + +def _ellipsize(s: str, maxsize: int) -> str: + if len(s) > maxsize: + i = max(0, (maxsize - 3) // 2) + j = max(0, maxsize - 3 - i) + return s[:i] + "..." + s[len(s) - j :] + return s + + +class SafeRepr(reprlib.Repr): + """ + repr.Repr that limits the resulting size of repr() and includes + information on exceptions raised during the call. + """ + + def __init__(self, maxsize: int | None, use_ascii: bool = False) -> None: + """ + :param maxsize: + If not None, will truncate the resulting repr to that specific size, using ellipsis + somewhere in the middle to hide the extra text. + If None, will not impose any size limits on the returning repr. + """ + super().__init__() + # ``maxstring`` is used by the superclass, and needs to be an int; using a + # very large number in case maxsize is None, meaning we want to disable + # truncation. + self.maxstring = maxsize if maxsize is not None else 1_000_000_000 + self.maxsize = maxsize + self.use_ascii = use_ascii + + def repr(self, x: object) -> str: + try: + if self.use_ascii: + s = ascii(x) + else: + s = super().repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + def repr_instance(self, x: object, level: int) -> str: + try: + s = repr(x) + except (KeyboardInterrupt, SystemExit): + raise + except BaseException as exc: + s = _format_repr_exception(exc, x) + if self.maxsize is not None: + s = _ellipsize(s, self.maxsize) + return s + + +def safeformat(obj: object) -> str: + """Return a pretty printed string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info. + """ + try: + return pprint.pformat(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) + + +# Maximum size of overall repr of objects to display during assertion errors. +DEFAULT_REPR_MAX_SIZE = 240 + + +def saferepr( + obj: object, maxsize: int | None = DEFAULT_REPR_MAX_SIZE, use_ascii: bool = False +) -> str: + """Return a size-limited safe repr-string for the given object. + + Failing __repr__ functions of user instances will be represented + with a short exception info and 'saferepr' generally takes + care to never raise exceptions itself. + + This function is a wrapper around the Repr/reprlib functionality of the + stdlib. + """ + return SafeRepr(maxsize, use_ascii).repr(obj) + + +def saferepr_unlimited(obj: object, use_ascii: bool = True) -> str: + """Return an unlimited-size safe repr-string for the given object. + + As with saferepr, failing __repr__ functions of user instances + will be represented with a short exception info. + + This function is a wrapper around simple repr. + + Note: a cleaner solution would be to alter ``saferepr``this way + when maxsize=None, but that might affect some other code. + """ + try: + if use_ascii: + return ascii(obj) + return repr(obj) + except Exception as exc: + return _format_repr_exception(exc, obj) diff --git a/venv/Lib/site-packages/_pytest/_io/terminalwriter.py b/venv/Lib/site-packages/_pytest/_io/terminalwriter.py new file mode 100644 index 0000000..70ebd3d --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_io/terminalwriter.py @@ -0,0 +1,274 @@ +"""Helper functions for writing to terminals and files.""" + +from __future__ import annotations + +import os +import shutil +import sys +from typing import final +from typing import Literal +from typing import Sequence +from typing import TextIO +from typing import TYPE_CHECKING + +from ..compat import assert_never +from .wcwidth import wcswidth + + +if TYPE_CHECKING: + from pygments.formatter import Formatter + from pygments.lexer import Lexer + + +# This code was initially copied from py 1.8.1, file _io/terminalwriter.py. + + +def get_terminal_width() -> int: + width, _ = shutil.get_terminal_size(fallback=(80, 24)) + + # The Windows get_terminal_size may be bogus, let's sanify a bit. + if width < 40: + width = 80 + + return width + + +def should_do_markup(file: TextIO) -> bool: + if os.environ.get("PY_COLORS") == "1": + return True + if os.environ.get("PY_COLORS") == "0": + return False + if os.environ.get("NO_COLOR"): + return False + if os.environ.get("FORCE_COLOR"): + return True + return ( + hasattr(file, "isatty") and file.isatty() and os.environ.get("TERM") != "dumb" + ) + + +@final +class TerminalWriter: + _esctable = dict( + black=30, + red=31, + green=32, + yellow=33, + blue=34, + purple=35, + cyan=36, + white=37, + Black=40, + Red=41, + Green=42, + Yellow=43, + Blue=44, + Purple=45, + Cyan=46, + White=47, + bold=1, + light=2, + blink=5, + invert=7, + ) + + def __init__(self, file: TextIO | None = None) -> None: + if file is None: + file = sys.stdout + if hasattr(file, "isatty") and file.isatty() and sys.platform == "win32": + try: + import colorama + except ImportError: + pass + else: + file = colorama.AnsiToWin32(file).stream + assert file is not None + self._file = file + self.hasmarkup = should_do_markup(file) + self._current_line = "" + self._terminal_width: int | None = None + self.code_highlight = True + + @property + def fullwidth(self) -> int: + if self._terminal_width is not None: + return self._terminal_width + return get_terminal_width() + + @fullwidth.setter + def fullwidth(self, value: int) -> None: + self._terminal_width = value + + @property + def width_of_current_line(self) -> int: + """Return an estimate of the width so far in the current line.""" + return wcswidth(self._current_line) + + def markup(self, text: str, **markup: bool) -> str: + for name in markup: + if name not in self._esctable: + raise ValueError(f"unknown markup: {name!r}") + if self.hasmarkup: + esc = [self._esctable[name] for name, on in markup.items() if on] + if esc: + text = "".join(f"\x1b[{cod}m" for cod in esc) + text + "\x1b[0m" + return text + + def sep( + self, + sepchar: str, + title: str | None = None, + fullwidth: int | None = None, + **markup: bool, + ) -> None: + if fullwidth is None: + fullwidth = self.fullwidth + # The goal is to have the line be as long as possible + # under the condition that len(line) <= fullwidth. + if sys.platform == "win32": + # If we print in the last column on windows we are on a + # new line but there is no way to verify/neutralize this + # (we may not know the exact line width). + # So let's be defensive to avoid empty lines in the output. + fullwidth -= 1 + if title is not None: + # we want 2 + 2*len(fill) + len(title) <= fullwidth + # i.e. 2 + 2*len(sepchar)*N + len(title) <= fullwidth + # 2*len(sepchar)*N <= fullwidth - len(title) - 2 + # N <= (fullwidth - len(title) - 2) // (2*len(sepchar)) + N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1) + fill = sepchar * N + line = f"{fill} {title} {fill}" + else: + # we want len(sepchar)*N <= fullwidth + # i.e. N <= fullwidth // len(sepchar) + line = sepchar * (fullwidth // len(sepchar)) + # In some situations there is room for an extra sepchar at the right, + # in particular if we consider that with a sepchar like "_ " the + # trailing space is not important at the end of the line. + if len(line) + len(sepchar.rstrip()) <= fullwidth: + line += sepchar.rstrip() + + self.line(line, **markup) + + def write(self, msg: str, *, flush: bool = False, **markup: bool) -> None: + if msg: + current_line = msg.rsplit("\n", 1)[-1] + if "\n" in msg: + self._current_line = current_line + else: + self._current_line += current_line + + msg = self.markup(msg, **markup) + + try: + self._file.write(msg) + except UnicodeEncodeError: + # Some environments don't support printing general Unicode + # strings, due to misconfiguration or otherwise; in that case, + # print the string escaped to ASCII. + # When the Unicode situation improves we should consider + # letting the error propagate instead of masking it (see #7475 + # for one brief attempt). + msg = msg.encode("unicode-escape").decode("ascii") + self._file.write(msg) + + if flush: + self.flush() + + def line(self, s: str = "", **markup: bool) -> None: + self.write(s, **markup) + self.write("\n") + + def flush(self) -> None: + self._file.flush() + + def _write_source(self, lines: Sequence[str], indents: Sequence[str] = ()) -> None: + """Write lines of source code possibly highlighted. + + Keeping this private for now because the API is clunky. We should discuss how + to evolve the terminal writer so we can have more precise color support, for example + being able to write part of a line in one color and the rest in another, and so on. + """ + if indents and len(indents) != len(lines): + raise ValueError( + f"indents size ({len(indents)}) should have same size as lines ({len(lines)})" + ) + if not indents: + indents = [""] * len(lines) + source = "\n".join(lines) + new_lines = self._highlight(source).splitlines() + for indent, new_line in zip(indents, new_lines): + self.line(indent + new_line) + + def _get_pygments_lexer(self, lexer: Literal["python", "diff"]) -> Lexer | None: + try: + if lexer == "python": + from pygments.lexers.python import PythonLexer + + return PythonLexer() + elif lexer == "diff": + from pygments.lexers.diff import DiffLexer + + return DiffLexer() + else: + assert_never(lexer) + except ModuleNotFoundError: + return None + + def _get_pygments_formatter(self) -> Formatter | None: + try: + import pygments.util + except ModuleNotFoundError: + return None + + from _pytest.config.exceptions import UsageError + + theme = os.getenv("PYTEST_THEME") + theme_mode = os.getenv("PYTEST_THEME_MODE", "dark") + + try: + from pygments.formatters.terminal import TerminalFormatter + + return TerminalFormatter(bg=theme_mode, style=theme) + + except pygments.util.ClassNotFound as e: + raise UsageError( + f"PYTEST_THEME environment variable has an invalid value: '{theme}'. " + "Hint: See available pygments styles with `pygmentize -L styles`." + ) from e + except pygments.util.OptionError as e: + raise UsageError( + f"PYTEST_THEME_MODE environment variable has an invalid value: '{theme_mode}'. " + "The allowed values are 'dark' (default) and 'light'." + ) from e + + def _highlight( + self, source: str, lexer: Literal["diff", "python"] = "python" + ) -> str: + """Highlight the given source if we have markup support.""" + if not source or not self.hasmarkup or not self.code_highlight: + return source + + pygments_lexer = self._get_pygments_lexer(lexer) + if pygments_lexer is None: + return source + + pygments_formatter = self._get_pygments_formatter() + if pygments_formatter is None: + return source + + from pygments import highlight + + highlighted: str = highlight(source, pygments_lexer, pygments_formatter) + # pygments terminal formatter may add a newline when there wasn't one. + # We don't want this, remove. + if highlighted[-1] == "\n" and source[-1] != "\n": + highlighted = highlighted[:-1] + + # Some lexers will not set the initial color explicitly + # which may lead to the previous color being propagated to the + # start of the expression, so reset first. + highlighted = "\x1b[0m" + highlighted + + return highlighted diff --git a/venv/Lib/site-packages/_pytest/_io/wcwidth.py b/venv/Lib/site-packages/_pytest/_io/wcwidth.py new file mode 100644 index 0000000..23886ff --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_io/wcwidth.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from functools import lru_cache +import unicodedata + + +@lru_cache(100) +def wcwidth(c: str) -> int: + """Determine how many columns are needed to display a character in a terminal. + + Returns -1 if the character is not printable. + Returns 0, 1 or 2 for other characters. + """ + o = ord(c) + + # ASCII fast path. + if 0x20 <= o < 0x07F: + return 1 + + # Some Cf/Zp/Zl characters which should be zero-width. + if ( + o == 0x0000 + or 0x200B <= o <= 0x200F + or 0x2028 <= o <= 0x202E + or 0x2060 <= o <= 0x2063 + ): + return 0 + + category = unicodedata.category(c) + + # Control characters. + if category == "Cc": + return -1 + + # Combining characters with zero width. + if category in ("Me", "Mn"): + return 0 + + # Full/Wide east asian characters. + if unicodedata.east_asian_width(c) in ("F", "W"): + return 2 + + return 1 + + +def wcswidth(s: str) -> int: + """Determine how many columns are needed to display a string in a terminal. + + Returns -1 if the string contains non-printable characters. + """ + width = 0 + for c in unicodedata.normalize("NFC", s): + wc = wcwidth(c) + if wc < 0: + return -1 + width += wc + return width diff --git a/venv/Lib/site-packages/_pytest/_py/__init__.py b/venv/Lib/site-packages/_pytest/_py/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/venv/Lib/site-packages/_pytest/_py/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_py/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..532efe1 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_py/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_py/__pycache__/error.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_py/__pycache__/error.cpython-311.pyc new file mode 100644 index 0000000..33b0bb6 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_py/__pycache__/error.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_py/__pycache__/path.cpython-311.pyc b/venv/Lib/site-packages/_pytest/_py/__pycache__/path.cpython-311.pyc new file mode 100644 index 0000000..30e417f Binary files /dev/null and b/venv/Lib/site-packages/_pytest/_py/__pycache__/path.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/_py/error.py b/venv/Lib/site-packages/_pytest/_py/error.py new file mode 100644 index 0000000..ab3a4ed --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_py/error.py @@ -0,0 +1,111 @@ +"""create errno-specific classes for IO or os calls.""" + +from __future__ import annotations + +import errno +import os +import sys +from typing import Callable +from typing import TYPE_CHECKING +from typing import TypeVar + + +if TYPE_CHECKING: + from typing_extensions import ParamSpec + + P = ParamSpec("P") + +R = TypeVar("R") + + +class Error(EnvironmentError): + def __repr__(self) -> str: + return "{}.{} {!r}: {} ".format( + self.__class__.__module__, + self.__class__.__name__, + self.__class__.__doc__, + " ".join(map(str, self.args)), + # repr(self.args) + ) + + def __str__(self) -> str: + s = "[{}]: {}".format( + self.__class__.__doc__, + " ".join(map(str, self.args)), + ) + return s + + +_winerrnomap = { + 2: errno.ENOENT, + 3: errno.ENOENT, + 17: errno.EEXIST, + 18: errno.EXDEV, + 13: errno.EBUSY, # empty cd drive, but ENOMEDIUM seems unavailable + 22: errno.ENOTDIR, + 20: errno.ENOTDIR, + 267: errno.ENOTDIR, + 5: errno.EACCES, # anything better? +} + + +class ErrorMaker: + """lazily provides Exception classes for each possible POSIX errno + (as defined per the 'errno' module). All such instances + subclass EnvironmentError. + """ + + _errno2class: dict[int, type[Error]] = {} + + def __getattr__(self, name: str) -> type[Error]: + if name[0] == "_": + raise AttributeError(name) + eno = getattr(errno, name) + cls = self._geterrnoclass(eno) + setattr(self, name, cls) + return cls + + def _geterrnoclass(self, eno: int) -> type[Error]: + try: + return self._errno2class[eno] + except KeyError: + clsname = errno.errorcode.get(eno, "UnknownErrno%d" % (eno,)) + errorcls = type( + clsname, + (Error,), + {"__module__": "py.error", "__doc__": os.strerror(eno)}, + ) + self._errno2class[eno] = errorcls + return errorcls + + def checked_call( + self, func: Callable[P, R], *args: P.args, **kwargs: P.kwargs + ) -> R: + """Call a function and raise an errno-exception if applicable.""" + __tracebackhide__ = True + try: + return func(*args, **kwargs) + except Error: + raise + except OSError as value: + if not hasattr(value, "errno"): + raise + errno = value.errno + if sys.platform == "win32": + try: + cls = self._geterrnoclass(_winerrnomap[errno]) + except KeyError: + raise value + else: + # we are not on Windows, or we got a proper OSError + cls = self._geterrnoclass(errno) + + raise cls(f"{func.__name__}{args!r}") + + +_error_maker = ErrorMaker() +checked_call = _error_maker.checked_call + + +def __getattr__(attr: str) -> type[Error]: + return getattr(_error_maker, attr) # type: ignore[no-any-return] diff --git a/venv/Lib/site-packages/_pytest/_py/path.py b/venv/Lib/site-packages/_pytest/_py/path.py new file mode 100644 index 0000000..c7ab118 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_py/path.py @@ -0,0 +1,1475 @@ +# mypy: allow-untyped-defs +"""local path implementation.""" + +from __future__ import annotations + +import atexit +from contextlib import contextmanager +import fnmatch +import importlib.util +import io +import os +from os.path import abspath +from os.path import dirname +from os.path import exists +from os.path import isabs +from os.path import isdir +from os.path import isfile +from os.path import islink +from os.path import normpath +import posixpath +from stat import S_ISDIR +from stat import S_ISLNK +from stat import S_ISREG +import sys +from typing import Any +from typing import Callable +from typing import cast +from typing import Literal +from typing import overload +from typing import TYPE_CHECKING +import uuid +import warnings + +from . import error + + +# Moved from local.py. +iswin32 = sys.platform == "win32" or (getattr(os, "_name", False) == "nt") + + +class Checkers: + _depend_on_existence = "exists", "link", "dir", "file" + + def __init__(self, path): + self.path = path + + def dotfile(self): + return self.path.basename.startswith(".") + + def ext(self, arg): + if not arg.startswith("."): + arg = "." + arg + return self.path.ext == arg + + def basename(self, arg): + return self.path.basename == arg + + def basestarts(self, arg): + return self.path.basename.startswith(arg) + + def relto(self, arg): + return self.path.relto(arg) + + def fnmatch(self, arg): + return self.path.fnmatch(arg) + + def endswith(self, arg): + return str(self.path).endswith(arg) + + def _evaluate(self, kw): + from .._code.source import getrawcode + + for name, value in kw.items(): + invert = False + meth = None + try: + meth = getattr(self, name) + except AttributeError: + if name[:3] == "not": + invert = True + try: + meth = getattr(self, name[3:]) + except AttributeError: + pass + if meth is None: + raise TypeError(f"no {name!r} checker available for {self.path!r}") + try: + if getrawcode(meth).co_argcount > 1: + if (not meth(value)) ^ invert: + return False + else: + if bool(value) ^ bool(meth()) ^ invert: + return False + except (error.ENOENT, error.ENOTDIR, error.EBUSY): + # EBUSY feels not entirely correct, + # but its kind of necessary since ENOMEDIUM + # is not accessible in python + for name in self._depend_on_existence: + if name in kw: + if kw.get(name): + return False + name = "not" + name + if name in kw: + if not kw.get(name): + return False + return True + + _statcache: Stat + + def _stat(self) -> Stat: + try: + return self._statcache + except AttributeError: + try: + self._statcache = self.path.stat() + except error.ELOOP: + self._statcache = self.path.lstat() + return self._statcache + + def dir(self): + return S_ISDIR(self._stat().mode) + + def file(self): + return S_ISREG(self._stat().mode) + + def exists(self): + return self._stat() + + def link(self): + st = self.path.lstat() + return S_ISLNK(st.mode) + + +class NeverRaised(Exception): + pass + + +class Visitor: + def __init__(self, fil, rec, ignore, bf, sort): + if isinstance(fil, str): + fil = FNMatcher(fil) + if isinstance(rec, str): + self.rec: Callable[[LocalPath], bool] = FNMatcher(rec) + elif not hasattr(rec, "__call__") and rec: + self.rec = lambda path: True + else: + self.rec = rec + self.fil = fil + self.ignore = ignore + self.breadthfirst = bf + self.optsort = cast(Callable[[Any], Any], sorted) if sort else (lambda x: x) + + def gen(self, path): + try: + entries = path.listdir() + except self.ignore: + return + rec = self.rec + dirs = self.optsort( + [p for p in entries if p.check(dir=1) and (rec is None or rec(p))] + ) + if not self.breadthfirst: + for subdir in dirs: + yield from self.gen(subdir) + for p in self.optsort(entries): + if self.fil is None or self.fil(p): + yield p + if self.breadthfirst: + for subdir in dirs: + yield from self.gen(subdir) + + +class FNMatcher: + def __init__(self, pattern): + self.pattern = pattern + + def __call__(self, path): + pattern = self.pattern + + if ( + pattern.find(path.sep) == -1 + and iswin32 + and pattern.find(posixpath.sep) != -1 + ): + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posixpath.sep, path.sep) + + if pattern.find(path.sep) == -1: + name = path.basename + else: + name = str(path) # path.strpath # XXX svn? + if not os.path.isabs(pattern): + pattern = "*" + path.sep + pattern + return fnmatch.fnmatch(name, pattern) + + +def map_as_list(func, iter): + return list(map(func, iter)) + + +class Stat: + if TYPE_CHECKING: + + @property + def size(self) -> int: ... + + @property + def mtime(self) -> float: ... + + def __getattr__(self, name: str) -> Any: + return getattr(self._osstatresult, "st_" + name) + + def __init__(self, path, osstatresult): + self.path = path + self._osstatresult = osstatresult + + @property + def owner(self): + if iswin32: + raise NotImplementedError("XXX win32") + import pwd + + entry = error.checked_call(pwd.getpwuid, self.uid) # type:ignore[attr-defined,unused-ignore] + return entry[0] + + @property + def group(self): + """Return group name of file.""" + if iswin32: + raise NotImplementedError("XXX win32") + import grp + + entry = error.checked_call(grp.getgrgid, self.gid) # type:ignore[attr-defined,unused-ignore] + return entry[0] + + def isdir(self): + return S_ISDIR(self._osstatresult.st_mode) + + def isfile(self): + return S_ISREG(self._osstatresult.st_mode) + + def islink(self): + self.path.lstat() + return S_ISLNK(self._osstatresult.st_mode) + + +def getuserid(user): + import pwd + + if not isinstance(user, int): + user = pwd.getpwnam(user)[2] # type:ignore[attr-defined,unused-ignore] + return user + + +def getgroupid(group): + import grp + + if not isinstance(group, int): + group = grp.getgrnam(group)[2] # type:ignore[attr-defined,unused-ignore] + return group + + +class LocalPath: + """Object oriented interface to os.path and other local filesystem + related information. + """ + + class ImportMismatchError(ImportError): + """raised on pyimport() if there is a mismatch of __file__'s""" + + sep = os.sep + + def __init__(self, path=None, expanduser=False): + """Initialize and return a local Path instance. + + Path can be relative to the current directory. + If path is None it defaults to the current working directory. + If expanduser is True, tilde-expansion is performed. + Note that Path instances always carry an absolute path. + Note also that passing in a local path object will simply return + the exact same path object. Use new() to get a new copy. + """ + if path is None: + self.strpath = error.checked_call(os.getcwd) + else: + try: + path = os.fspath(path) + except TypeError: + raise ValueError( + "can only pass None, Path instances " + "or non-empty strings to LocalPath" + ) + if expanduser: + path = os.path.expanduser(path) + self.strpath = abspath(path) + + if sys.platform != "win32": + + def chown(self, user, group, rec=0): + """Change ownership to the given user and group. + user and group may be specified by a number or + by a name. if rec is True change ownership + recursively. + """ + uid = getuserid(user) + gid = getgroupid(group) + if rec: + for x in self.visit(rec=lambda x: x.check(link=0)): + if x.check(link=0): + error.checked_call(os.chown, str(x), uid, gid) + error.checked_call(os.chown, str(self), uid, gid) + + def readlink(self) -> str: + """Return value of a symbolic link.""" + # https://github.com/python/mypy/issues/12278 + return error.checked_call(os.readlink, self.strpath) # type: ignore[arg-type,return-value,unused-ignore] + + def mklinkto(self, oldname): + """Posix style hard link to another name.""" + error.checked_call(os.link, str(oldname), str(self)) + + def mksymlinkto(self, value, absolute=1): + """Create a symbolic link with the given value (pointing to another name).""" + if absolute: + error.checked_call(os.symlink, str(value), self.strpath) + else: + base = self.common(value) + # with posix local paths '/' is always a common base + relsource = self.__class__(value).relto(base) + reldest = self.relto(base) + n = reldest.count(self.sep) + target = self.sep.join(("..",) * n + (relsource,)) + error.checked_call(os.symlink, target, self.strpath) + + def __div__(self, other): + return self.join(os.fspath(other)) + + __truediv__ = __div__ # py3k + + @property + def basename(self): + """Basename part of path.""" + return self._getbyspec("basename")[0] + + @property + def dirname(self): + """Dirname part of path.""" + return self._getbyspec("dirname")[0] + + @property + def purebasename(self): + """Pure base name of the path.""" + return self._getbyspec("purebasename")[0] + + @property + def ext(self): + """Extension of the path (including the '.').""" + return self._getbyspec("ext")[0] + + def read_binary(self): + """Read and return a bytestring from reading the path.""" + with self.open("rb") as f: + return f.read() + + def read_text(self, encoding): + """Read and return a Unicode string from reading the path.""" + with self.open("r", encoding=encoding) as f: + return f.read() + + def read(self, mode="r"): + """Read and return a bytestring from reading the path.""" + with self.open(mode) as f: + return f.read() + + def readlines(self, cr=1): + """Read and return a list of lines from the path. if cr is False, the + newline will be removed from the end of each line.""" + mode = "r" + + if not cr: + content = self.read(mode) + return content.split("\n") + else: + f = self.open(mode) + try: + return f.readlines() + finally: + f.close() + + def load(self): + """(deprecated) return object unpickled from self.read()""" + f = self.open("rb") + try: + import pickle + + return error.checked_call(pickle.load, f) + finally: + f.close() + + def move(self, target): + """Move this path to target.""" + if target.relto(self): + raise error.EINVAL(target, "cannot move path into a subdirectory of itself") + try: + self.rename(target) + except error.EXDEV: # invalid cross-device link + self.copy(target) + self.remove() + + def fnmatch(self, pattern): + """Return true if the basename/fullname matches the glob-'pattern'. + + valid pattern characters:: + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any char not in seq + + If the pattern contains a path-separator then the full path + is used for pattern matching and a '*' is prepended to the + pattern. + + if the pattern doesn't contain a path-separator the pattern + is only matched against the basename. + """ + return FNMatcher(pattern)(self) + + def relto(self, relpath): + """Return a string which is the relative part of the path + to the given 'relpath'. + """ + if not isinstance(relpath, (str, LocalPath)): + raise TypeError(f"{relpath!r}: not a string or path object") + strrelpath = str(relpath) + if strrelpath and strrelpath[-1] != self.sep: + strrelpath += self.sep + # assert strrelpath[-1] == self.sep + # assert strrelpath[-2] != self.sep + strself = self.strpath + if sys.platform == "win32" or getattr(os, "_name", None) == "nt": + if os.path.normcase(strself).startswith(os.path.normcase(strrelpath)): + return strself[len(strrelpath) :] + elif strself.startswith(strrelpath): + return strself[len(strrelpath) :] + return "" + + def ensure_dir(self, *args): + """Ensure the path joined with args is a directory.""" + return self.ensure(*args, dir=True) + + def bestrelpath(self, dest): + """Return a string which is a relative path from self + (assumed to be a directory) to dest such that + self.join(bestrelpath) == dest and if not such + path can be determined return dest. + """ + try: + if self == dest: + return os.curdir + base = self.common(dest) + if not base: # can be the case on windows + return str(dest) + self2base = self.relto(base) + reldest = dest.relto(base) + if self2base: + n = self2base.count(self.sep) + 1 + else: + n = 0 + lst = [os.pardir] * n + if reldest: + lst.append(reldest) + target = dest.sep.join(lst) + return target + except AttributeError: + return str(dest) + + def exists(self): + return self.check() + + def isdir(self): + return self.check(dir=1) + + def isfile(self): + return self.check(file=1) + + def parts(self, reverse=False): + """Return a root-first list of all ancestor directories + plus the path itself. + """ + current = self + lst = [self] + while 1: + last = current + current = current.dirpath() + if last == current: + break + lst.append(current) + if not reverse: + lst.reverse() + return lst + + def common(self, other): + """Return the common part shared with the other path + or None if there is no common part. + """ + last = None + for x, y in zip(self.parts(), other.parts()): + if x != y: + return last + last = x + return last + + def __add__(self, other): + """Return new path object with 'other' added to the basename""" + return self.new(basename=self.basename + str(other)) + + def visit(self, fil=None, rec=None, ignore=NeverRaised, bf=False, sort=False): + """Yields all paths below the current one + + fil is a filter (glob pattern or callable), if not matching the + path will not be yielded, defaulting to None (everything is + returned) + + rec is a filter (glob pattern or callable) that controls whether + a node is descended, defaulting to None + + ignore is an Exception class that is ignoredwhen calling dirlist() + on any of the paths (by default, all exceptions are reported) + + bf if True will cause a breadthfirst search instead of the + default depthfirst. Default: False + + sort if True will sort entries within each directory level. + """ + yield from Visitor(fil, rec, ignore, bf, sort).gen(self) + + def _sortlist(self, res, sort): + if sort: + if hasattr(sort, "__call__"): + warnings.warn( + DeprecationWarning( + "listdir(sort=callable) is deprecated and breaks on python3" + ), + stacklevel=3, + ) + res.sort(sort) + else: + res.sort() + + def __fspath__(self): + return self.strpath + + def __hash__(self): + s = self.strpath + if iswin32: + s = s.lower() + return hash(s) + + def __eq__(self, other): + s1 = os.fspath(self) + try: + s2 = os.fspath(other) + except TypeError: + return False + if iswin32: + s1 = s1.lower() + try: + s2 = s2.lower() + except AttributeError: + return False + return s1 == s2 + + def __ne__(self, other): + return not (self == other) + + def __lt__(self, other): + return os.fspath(self) < os.fspath(other) + + def __gt__(self, other): + return os.fspath(self) > os.fspath(other) + + def samefile(self, other): + """Return True if 'other' references the same file as 'self'.""" + other = os.fspath(other) + if not isabs(other): + other = abspath(other) + if self == other: + return True + if not hasattr(os.path, "samefile"): + return False + return error.checked_call(os.path.samefile, self.strpath, other) + + def remove(self, rec=1, ignore_errors=False): + """Remove a file or directory (or a directory tree if rec=1). + if ignore_errors is True, errors while removing directories will + be ignored. + """ + if self.check(dir=1, link=0): + if rec: + # force remove of readonly files on windows + if iswin32: + self.chmod(0o700, rec=1) + import shutil + + error.checked_call( + shutil.rmtree, self.strpath, ignore_errors=ignore_errors + ) + else: + error.checked_call(os.rmdir, self.strpath) + else: + if iswin32: + self.chmod(0o700) + error.checked_call(os.remove, self.strpath) + + def computehash(self, hashtype="md5", chunksize=524288): + """Return hexdigest of hashvalue for this file.""" + try: + try: + import hashlib as mod + except ImportError: + if hashtype == "sha1": + hashtype = "sha" + mod = __import__(hashtype) + hash = getattr(mod, hashtype)() + except (AttributeError, ImportError): + raise ValueError(f"Don't know how to compute {hashtype!r} hash") + f = self.open("rb") + try: + while 1: + buf = f.read(chunksize) + if not buf: + return hash.hexdigest() + hash.update(buf) + finally: + f.close() + + def new(self, **kw): + """Create a modified version of this path. + the following keyword arguments modify various path parts:: + + a:/some/path/to/a/file.ext + xx drive + xxxxxxxxxxxxxxxxx dirname + xxxxxxxx basename + xxxx purebasename + xxx ext + """ + obj = object.__new__(self.__class__) + if not kw: + obj.strpath = self.strpath + return obj + drive, dirname, basename, purebasename, ext = self._getbyspec( + "drive,dirname,basename,purebasename,ext" + ) + if "basename" in kw: + if "purebasename" in kw or "ext" in kw: + raise ValueError(f"invalid specification {kw!r}") + else: + pb = kw.setdefault("purebasename", purebasename) + try: + ext = kw["ext"] + except KeyError: + pass + else: + if ext and not ext.startswith("."): + ext = "." + ext + kw["basename"] = pb + ext + + if "dirname" in kw and not kw["dirname"]: + kw["dirname"] = drive + else: + kw.setdefault("dirname", dirname) + kw.setdefault("sep", self.sep) + obj.strpath = normpath("{dirname}{sep}{basename}".format(**kw)) + return obj + + def _getbyspec(self, spec: str) -> list[str]: + """See new for what 'spec' can be.""" + res = [] + parts = self.strpath.split(self.sep) + + args = filter(None, spec.split(",")) + for name in args: + if name == "drive": + res.append(parts[0]) + elif name == "dirname": + res.append(self.sep.join(parts[:-1])) + else: + basename = parts[-1] + if name == "basename": + res.append(basename) + else: + i = basename.rfind(".") + if i == -1: + purebasename, ext = basename, "" + else: + purebasename, ext = basename[:i], basename[i:] + if name == "purebasename": + res.append(purebasename) + elif name == "ext": + res.append(ext) + else: + raise ValueError(f"invalid part specification {name!r}") + return res + + def dirpath(self, *args, **kwargs): + """Return the directory path joined with any given path arguments.""" + if not kwargs: + path = object.__new__(self.__class__) + path.strpath = dirname(self.strpath) + if args: + path = path.join(*args) + return path + return self.new(basename="").join(*args, **kwargs) + + def join(self, *args: os.PathLike[str], abs: bool = False) -> LocalPath: + """Return a new path by appending all 'args' as path + components. if abs=1 is used restart from root if any + of the args is an absolute path. + """ + sep = self.sep + strargs = [os.fspath(arg) for arg in args] + strpath = self.strpath + if abs: + newargs: list[str] = [] + for arg in reversed(strargs): + if isabs(arg): + strpath = arg + strargs = newargs + break + newargs.insert(0, arg) + # special case for when we have e.g. strpath == "/" + actual_sep = "" if strpath.endswith(sep) else sep + for arg in strargs: + arg = arg.strip(sep) + if iswin32: + # allow unix style paths even on windows. + arg = arg.strip("/") + arg = arg.replace("/", sep) + strpath = strpath + actual_sep + arg + actual_sep = sep + obj = object.__new__(self.__class__) + obj.strpath = normpath(strpath) + return obj + + def open(self, mode="r", ensure=False, encoding=None): + """Return an opened file with the given mode. + + If ensure is True, create parent directories if needed. + """ + if ensure: + self.dirpath().ensure(dir=1) + if encoding: + return error.checked_call( + io.open, + self.strpath, + mode, + encoding=encoding, + ) + return error.checked_call(open, self.strpath, mode) + + def _fastjoin(self, name): + child = object.__new__(self.__class__) + child.strpath = self.strpath + self.sep + name + return child + + def islink(self): + return islink(self.strpath) + + def check(self, **kw): + """Check a path for existence and properties. + + Without arguments, return True if the path exists, otherwise False. + + valid checkers:: + + file = 1 # is a file + file = 0 # is not a file (may not even exist) + dir = 1 # is a dir + link = 1 # is a link + exists = 1 # exists + + You can specify multiple checker definitions, for example:: + + path.check(file=1, link=1) # a link pointing to a file + """ + if not kw: + return exists(self.strpath) + if len(kw) == 1: + if "dir" in kw: + return not kw["dir"] ^ isdir(self.strpath) + if "file" in kw: + return not kw["file"] ^ isfile(self.strpath) + if not kw: + kw = {"exists": 1} + return Checkers(self)._evaluate(kw) + + _patternchars = set("*?[" + os.sep) + + def listdir(self, fil=None, sort=None): + """List directory contents, possibly filter by the given fil func + and possibly sorted. + """ + if fil is None and sort is None: + names = error.checked_call(os.listdir, self.strpath) + return map_as_list(self._fastjoin, names) + if isinstance(fil, str): + if not self._patternchars.intersection(fil): + child = self._fastjoin(fil) + if exists(child.strpath): + return [child] + return [] + fil = FNMatcher(fil) + names = error.checked_call(os.listdir, self.strpath) + res = [] + for name in names: + child = self._fastjoin(name) + if fil is None or fil(child): + res.append(child) + self._sortlist(res, sort) + return res + + def size(self) -> int: + """Return size of the underlying file object""" + return self.stat().size + + def mtime(self) -> float: + """Return last modification time of the path.""" + return self.stat().mtime + + def copy(self, target, mode=False, stat=False): + """Copy path to target. + + If mode is True, will copy permission from path to target. + If stat is True, copy permission, last modification + time, last access time, and flags from path to target. + """ + if self.check(file=1): + if target.check(dir=1): + target = target.join(self.basename) + assert self != target + copychunked(self, target) + if mode: + copymode(self.strpath, target.strpath) + if stat: + copystat(self, target) + else: + + def rec(p): + return p.check(link=0) + + for x in self.visit(rec=rec): + relpath = x.relto(self) + newx = target.join(relpath) + newx.dirpath().ensure(dir=1) + if x.check(link=1): + newx.mksymlinkto(x.readlink()) + continue + elif x.check(file=1): + copychunked(x, newx) + elif x.check(dir=1): + newx.ensure(dir=1) + if mode: + copymode(x.strpath, newx.strpath) + if stat: + copystat(x, newx) + + def rename(self, target): + """Rename this path to target.""" + target = os.fspath(target) + return error.checked_call(os.rename, self.strpath, target) + + def dump(self, obj, bin=1): + """Pickle object into path location""" + f = self.open("wb") + import pickle + + try: + error.checked_call(pickle.dump, obj, f, bin) + finally: + f.close() + + def mkdir(self, *args): + """Create & return the directory joined with args.""" + p = self.join(*args) + error.checked_call(os.mkdir, os.fspath(p)) + return p + + def write_binary(self, data, ensure=False): + """Write binary data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("wb") as f: + f.write(data) + + def write_text(self, data, encoding, ensure=False): + """Write text data into path using the specified encoding. + If ensure is True create missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + with self.open("w", encoding=encoding) as f: + f.write(data) + + def write(self, data, mode="w", ensure=False): + """Write data into path. If ensure is True create + missing parent directories. + """ + if ensure: + self.dirpath().ensure(dir=1) + if "b" in mode: + if not isinstance(data, bytes): + raise ValueError("can only process bytes") + else: + if not isinstance(data, str): + if not isinstance(data, bytes): + data = str(data) + else: + data = data.decode(sys.getdefaultencoding()) + f = self.open(mode) + try: + f.write(data) + finally: + f.close() + + def _ensuredirs(self): + parent = self.dirpath() + if parent == self: + return self + if parent.check(dir=0): + parent._ensuredirs() + if self.check(dir=0): + try: + self.mkdir() + except error.EEXIST: + # race condition: file/dir created by another thread/process. + # complain if it is not a dir + if self.check(dir=0): + raise + return self + + def ensure(self, *args, **kwargs): + """Ensure that an args-joined path exists (by default as + a file). if you specify a keyword argument 'dir=True' + then the path is forced to be a directory path. + """ + p = self.join(*args) + if kwargs.get("dir", 0): + return p._ensuredirs() + else: + p.dirpath()._ensuredirs() + if not p.check(file=1): + p.open("wb").close() + return p + + @overload + def stat(self, raising: Literal[True] = ...) -> Stat: ... + + @overload + def stat(self, raising: Literal[False]) -> Stat | None: ... + + def stat(self, raising: bool = True) -> Stat | None: + """Return an os.stat() tuple.""" + if raising: + return Stat(self, error.checked_call(os.stat, self.strpath)) + try: + return Stat(self, os.stat(self.strpath)) + except KeyboardInterrupt: + raise + except Exception: + return None + + def lstat(self) -> Stat: + """Return an os.lstat() tuple.""" + return Stat(self, error.checked_call(os.lstat, self.strpath)) + + def setmtime(self, mtime=None): + """Set modification time for the given path. if 'mtime' is None + (the default) then the file's mtime is set to current time. + + Note that the resolution for 'mtime' is platform dependent. + """ + if mtime is None: + return error.checked_call(os.utime, self.strpath, mtime) + try: + return error.checked_call(os.utime, self.strpath, (-1, mtime)) + except error.EINVAL: + return error.checked_call(os.utime, self.strpath, (self.atime(), mtime)) + + def chdir(self): + """Change directory to self and return old current directory""" + try: + old = self.__class__() + except error.ENOENT: + old = None + error.checked_call(os.chdir, self.strpath) + return old + + @contextmanager + def as_cwd(self): + """ + Return a context manager, which changes to the path's dir during the + managed "with" context. + On __enter__ it returns the old dir, which might be ``None``. + """ + old = self.chdir() + try: + yield old + finally: + if old is not None: + old.chdir() + + def realpath(self): + """Return a new path which contains no symbolic links.""" + return self.__class__(os.path.realpath(self.strpath)) + + def atime(self): + """Return last access time of the path.""" + return self.stat().atime + + def __repr__(self): + return f"local({self.strpath!r})" + + def __str__(self): + """Return string representation of the Path.""" + return self.strpath + + def chmod(self, mode, rec=0): + """Change permissions to the given mode. If mode is an + integer it directly encodes the os-specific modes. + if rec is True perform recursively. + """ + if not isinstance(mode, int): + raise TypeError(f"mode {mode!r} must be an integer") + if rec: + for x in self.visit(rec=rec): + error.checked_call(os.chmod, str(x), mode) + error.checked_call(os.chmod, self.strpath, mode) + + def pypkgpath(self): + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + Return None if a pkgpath cannot be determined. + """ + pkgpath = None + for parent in self.parts(reverse=True): + if parent.isdir(): + if not parent.join("__init__.py").exists(): + break + if not isimportable(parent.basename): + break + pkgpath = parent + return pkgpath + + def _ensuresyspath(self, ensuremode, path): + if ensuremode: + s = str(path) + if ensuremode == "append": + if s not in sys.path: + sys.path.append(s) + else: + if s != sys.path[0]: + sys.path.insert(0, s) + + def pyimport(self, modname=None, ensuresyspath=True): + """Return path as an imported python module. + + If modname is None, look for the containing package + and construct an according module name. + The module will be put/looked up in sys.modules. + if ensuresyspath is True then the root dir for importing + the file (taking __init__.py files into account) will + be prepended to sys.path if it isn't there already. + If ensuresyspath=="append" the root dir will be appended + if it isn't already contained in sys.path. + if ensuresyspath is False no modification of syspath happens. + + Special value of ensuresyspath=="importlib" is intended + purely for using in pytest, it is capable only of importing + separate .py files outside packages, e.g. for test suite + without any __init__.py file. It effectively allows having + same-named test modules in different places and offers + mild opt-in via this option. Note that it works only in + recent versions of python. + """ + if not self.check(): + raise error.ENOENT(self) + + if ensuresyspath == "importlib": + if modname is None: + modname = self.purebasename + spec = importlib.util.spec_from_file_location(modname, str(self)) + if spec is None or spec.loader is None: + raise ImportError(f"Can't find module {modname} at location {self!s}") + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + + pkgpath = None + if modname is None: + pkgpath = self.pypkgpath() + if pkgpath is not None: + pkgroot = pkgpath.dirpath() + names = self.new(ext="").relto(pkgroot).split(self.sep) + if names[-1] == "__init__": + names.pop() + modname = ".".join(names) + else: + pkgroot = self.dirpath() + modname = self.purebasename + + self._ensuresyspath(ensuresyspath, pkgroot) + __import__(modname) + mod = sys.modules[modname] + if self.basename == "__init__.py": + return mod # we don't check anything as we might + # be in a namespace package ... too icky to check + modfile = mod.__file__ + assert modfile is not None + if modfile[-4:] in (".pyc", ".pyo"): + modfile = modfile[:-1] + elif modfile.endswith("$py.class"): + modfile = modfile[:-9] + ".py" + if modfile.endswith(os.sep + "__init__.py"): + if self.basename != "__init__.py": + modfile = modfile[:-12] + try: + issame = self.samefile(modfile) + except error.ENOENT: + issame = False + if not issame: + ignore = os.getenv("PY_IGNORE_IMPORTMISMATCH") + if ignore != "1": + raise self.ImportMismatchError(modname, modfile, self) + return mod + else: + try: + return sys.modules[modname] + except KeyError: + # we have a custom modname, do a pseudo-import + import types + + mod = types.ModuleType(modname) + mod.__file__ = str(self) + sys.modules[modname] = mod + try: + with open(str(self), "rb") as f: + exec(f.read(), mod.__dict__) + except BaseException: + del sys.modules[modname] + raise + return mod + + def sysexec(self, *argv: os.PathLike[str], **popen_opts: Any) -> str: + """Return stdout text from executing a system child process, + where the 'self' path points to executable. + The process is directly invoked and not through a system shell. + """ + from subprocess import PIPE + from subprocess import Popen + + popen_opts.pop("stdout", None) + popen_opts.pop("stderr", None) + proc = Popen( + [str(self)] + [str(arg) for arg in argv], + **popen_opts, + stdout=PIPE, + stderr=PIPE, + ) + stdout: str | bytes + stdout, stderr = proc.communicate() + ret = proc.wait() + if isinstance(stdout, bytes): + stdout = stdout.decode(sys.getdefaultencoding()) + if ret != 0: + if isinstance(stderr, bytes): + stderr = stderr.decode(sys.getdefaultencoding()) + raise RuntimeError( + ret, + ret, + str(self), + stdout, + stderr, + ) + return stdout + + @classmethod + def sysfind(cls, name, checker=None, paths=None): + """Return a path object found by looking at the systems + underlying PATH specification. If the checker is not None + it will be invoked to filter matching paths. If a binary + cannot be found, None is returned + Note: This is probably not working on plain win32 systems + but may work on cygwin. + """ + if isabs(name): + p = local(name) + if p.check(file=1): + return p + else: + if paths is None: + if iswin32: + paths = os.environ["Path"].split(";") + if "" not in paths and "." not in paths: + paths.append(".") + try: + systemroot = os.environ["SYSTEMROOT"] + except KeyError: + pass + else: + paths = [ + path.replace("%SystemRoot%", systemroot) for path in paths + ] + else: + paths = os.environ["PATH"].split(":") + tryadd = [] + if iswin32: + tryadd += os.environ["PATHEXT"].split(os.pathsep) + tryadd.append("") + + for x in paths: + for addext in tryadd: + p = local(x).join(name, abs=True) + addext + try: + if p.check(file=1): + if checker: + if not checker(p): + continue + return p + except error.EACCES: + pass + return None + + @classmethod + def _gethomedir(cls): + try: + x = os.environ["HOME"] + except KeyError: + try: + x = os.environ["HOMEDRIVE"] + os.environ["HOMEPATH"] + except KeyError: + return None + return cls(x) + + # """ + # special class constructors for local filesystem paths + # """ + @classmethod + def get_temproot(cls): + """Return the system's temporary directory + (where tempfiles are usually created in) + """ + import tempfile + + return local(tempfile.gettempdir()) + + @classmethod + def mkdtemp(cls, rootdir=None): + """Return a Path object pointing to a fresh new temporary directory + (which we created ourselves). + """ + import tempfile + + if rootdir is None: + rootdir = cls.get_temproot() + path = error.checked_call(tempfile.mkdtemp, dir=str(rootdir)) + return cls(path) + + @classmethod + def make_numbered_dir( + cls, prefix="session-", rootdir=None, keep=3, lock_timeout=172800 + ): # two days + """Return unique directory with a number greater than the current + maximum one. The number is assumed to start directly after prefix. + if keep is true directories with a number less than (maxnum-keep) + will be removed. If .lock files are used (lock_timeout non-zero), + algorithm is multi-process safe. + """ + if rootdir is None: + rootdir = cls.get_temproot() + + nprefix = prefix.lower() + + def parse_num(path): + """Parse the number out of a path (if it matches the prefix)""" + nbasename = path.basename.lower() + if nbasename.startswith(nprefix): + try: + return int(nbasename[len(nprefix) :]) + except ValueError: + pass + + def create_lockfile(path): + """Exclusively create lockfile. Throws when failed""" + mypid = os.getpid() + lockfile = path.join(".lock") + if hasattr(lockfile, "mksymlinkto"): + lockfile.mksymlinkto(str(mypid)) + else: + fd = error.checked_call( + os.open, str(lockfile), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644 + ) + with os.fdopen(fd, "w") as f: + f.write(str(mypid)) + return lockfile + + def atexit_remove_lockfile(lockfile): + """Ensure lockfile is removed at process exit""" + mypid = os.getpid() + + def try_remove_lockfile(): + # in a fork() situation, only the last process should + # remove the .lock, otherwise the other processes run the + # risk of seeing their temporary dir disappear. For now + # we remove the .lock in the parent only (i.e. we assume + # that the children finish before the parent). + if os.getpid() != mypid: + return + try: + lockfile.remove() + except error.Error: + pass + + atexit.register(try_remove_lockfile) + + # compute the maximum number currently in use with the prefix + lastmax = None + while True: + maxnum = -1 + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None: + maxnum = max(maxnum, num) + + # make the new directory + try: + udir = rootdir.mkdir(prefix + str(maxnum + 1)) + if lock_timeout: + lockfile = create_lockfile(udir) + atexit_remove_lockfile(lockfile) + except (error.EEXIST, error.ENOENT, error.EBUSY): + # race condition (1): another thread/process created the dir + # in the meantime - try again + # race condition (2): another thread/process spuriously acquired + # lock treating empty directory as candidate + # for removal - try again + # race condition (3): another thread/process tried to create the lock at + # the same time (happened in Python 3.3 on Windows) + # https://ci.appveyor.com/project/pytestbot/py/build/1.0.21/job/ffi85j4c0lqwsfwa + if lastmax == maxnum: + raise + lastmax = maxnum + continue + break + + def get_mtime(path): + """Read file modification time""" + try: + return path.lstat().mtime + except error.Error: + pass + + garbage_prefix = prefix + "garbage-" + + def is_garbage(path): + """Check if path denotes directory scheduled for removal""" + bn = path.basename + return bn.startswith(garbage_prefix) + + # prune old directories + udir_time = get_mtime(udir) + if keep and udir_time: + for path in rootdir.listdir(): + num = parse_num(path) + if num is not None and num <= (maxnum - keep): + try: + # try acquiring lock to remove directory as exclusive user + if lock_timeout: + create_lockfile(path) + except (error.EEXIST, error.ENOENT, error.EBUSY): + path_time = get_mtime(path) + if not path_time: + # assume directory doesn't exist now + continue + if abs(udir_time - path_time) < lock_timeout: + # assume directory with lockfile exists + # and lock timeout hasn't expired yet + continue + + # path dir locked for exclusive use + # and scheduled for removal to avoid another thread/process + # treating it as a new directory or removal candidate + garbage_path = rootdir.join(garbage_prefix + str(uuid.uuid4())) + try: + path.rename(garbage_path) + garbage_path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + if is_garbage(path): + try: + path.remove(rec=1) + except KeyboardInterrupt: + raise + except Exception: # this might be error.Error, WindowsError ... + pass + + # make link... + try: + username = os.environ["USER"] # linux, et al + except KeyError: + try: + username = os.environ["USERNAME"] # windows + except KeyError: + username = "current" + + src = str(udir) + dest = src[: src.rfind("-")] + "-" + username + try: + os.unlink(dest) + except OSError: + pass + try: + os.symlink(src, dest) + except (OSError, AttributeError, NotImplementedError): + pass + + return udir + + +def copymode(src, dest): + """Copy permission from src to dst.""" + import shutil + + shutil.copymode(src, dest) + + +def copystat(src, dest): + """Copy permission, last modification time, + last access time, and flags from src to dst.""" + import shutil + + shutil.copystat(str(src), str(dest)) + + +def copychunked(src, dest): + chunksize = 524288 # half a meg of bytes + fsrc = src.open("rb") + try: + fdest = dest.open("wb") + try: + while 1: + buf = fsrc.read(chunksize) + if not buf: + break + fdest.write(buf) + finally: + fdest.close() + finally: + fsrc.close() + + +def isimportable(name): + if name and (name[0].isalpha() or name[0] == "_"): + name = name.replace("_", "") + return not name or name.isalnum() + + +local = LocalPath diff --git a/venv/Lib/site-packages/_pytest/_version.py b/venv/Lib/site-packages/_pytest/_version.py new file mode 100644 index 0000000..cf258c1 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/_version.py @@ -0,0 +1,16 @@ +# file generated by setuptools_scm +# don't change, don't track in version control +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple, Union + VERSION_TUPLE = Tuple[Union[int, str], ...] +else: + VERSION_TUPLE = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE + +__version__ = version = '8.3.3' +__version_tuple__ = version_tuple = (8, 3, 3) diff --git a/venv/Lib/site-packages/_pytest/assertion/__init__.py b/venv/Lib/site-packages/_pytest/assertion/__init__.py new file mode 100644 index 0000000..f2f1d02 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/assertion/__init__.py @@ -0,0 +1,192 @@ +# mypy: allow-untyped-defs +"""Support for presenting detailed information in failing assertions.""" + +from __future__ import annotations + +import sys +from typing import Any +from typing import Generator +from typing import TYPE_CHECKING + +from _pytest.assertion import rewrite +from _pytest.assertion import truncate +from _pytest.assertion import util +from _pytest.assertion.rewrite import assertstate_key +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item + + +if TYPE_CHECKING: + from _pytest.main import Session + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--assert", + action="store", + dest="assertmode", + choices=("rewrite", "plain"), + default="rewrite", + metavar="MODE", + help=( + "Control assertion debugging tools.\n" + "'plain' performs no assertion debugging.\n" + "'rewrite' (the default) rewrites assert statements in test modules" + " on import to provide assert expression information." + ), + ) + parser.addini( + "enable_assertion_pass_hook", + type="bool", + default=False, + help="Enables the pytest_assertion_pass hook. " + "Make sure to delete any previously generated pyc cache files.", + ) + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_ASSERTIONS, + help=( + "Specify a verbosity level for assertions, overriding the main level. " + "Higher levels will provide more detailed explanation when an assertion fails." + ), + ) + + +def register_assert_rewrite(*names: str) -> None: + """Register one or more module names to be rewritten on import. + + This function will make sure that this module or all modules inside + the package will get their assert statements rewritten. + Thus you should make sure to call this before the module is + actually imported, usually in your __init__.py if you are a plugin + using a package. + + :param names: The module names to register. + """ + for name in names: + if not isinstance(name, str): + msg = "expected module names as *args, got {0} instead" # type: ignore[unreachable] + raise TypeError(msg.format(repr(names))) + for hook in sys.meta_path: + if isinstance(hook, rewrite.AssertionRewritingHook): + importhook = hook + break + else: + # TODO(typing): Add a protocol for mark_rewrite() and use it + # for importhook and for PytestPluginManager.rewrite_hook. + importhook = DummyRewriteHook() # type: ignore + importhook.mark_rewrite(*names) + + +class DummyRewriteHook: + """A no-op import hook for when rewriting is disabled.""" + + def mark_rewrite(self, *names: str) -> None: + pass + + +class AssertionState: + """State for the assertion plugin.""" + + def __init__(self, config: Config, mode) -> None: + self.mode = mode + self.trace = config.trace.root.get("assertion") + self.hook: rewrite.AssertionRewritingHook | None = None + + +def install_importhook(config: Config) -> rewrite.AssertionRewritingHook: + """Try to install the rewrite hook, raise SystemError if it fails.""" + config.stash[assertstate_key] = AssertionState(config, "rewrite") + config.stash[assertstate_key].hook = hook = rewrite.AssertionRewritingHook(config) + sys.meta_path.insert(0, hook) + config.stash[assertstate_key].trace("installed rewrite import hook") + + def undo() -> None: + hook = config.stash[assertstate_key].hook + if hook is not None and hook in sys.meta_path: + sys.meta_path.remove(hook) + + config.add_cleanup(undo) + return hook + + +def pytest_collection(session: Session) -> None: + # This hook is only called when test modules are collected + # so for example not in the managing process of pytest-xdist + # (which does not collect test modules). + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(session) + + +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + """Setup the pytest_assertrepr_compare and pytest_assertion_pass hooks. + + The rewrite module will use util._reprcompare if it exists to use custom + reporting via the pytest_assertrepr_compare hook. This sets up this custom + comparison for the test. + """ + ihook = item.ihook + + def callbinrepr(op, left: object, right: object) -> str | None: + """Call the pytest_assertrepr_compare hook and prepare the result. + + This uses the first result from the hook and then ensures the + following: + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). + * Embedded newlines are escaped to help util.format_explanation() + later. + * If the rewrite mode is used embedded %-characters are replaced + to protect later % formatting. + + The result can be formatted by util.format_explanation() for + pretty printing. + """ + hook_result = ihook.pytest_assertrepr_compare( + config=item.config, op=op, left=left, right=right + ) + for new_expl in hook_result: + if new_expl: + new_expl = truncate.truncate_if_required(new_expl, item) + new_expl = [line.replace("\n", "\\n") for line in new_expl] + res = "\n~".join(new_expl) + if item.config.getvalue("assertmode") == "rewrite": + res = res.replace("%", "%%") + return res + return None + + saved_assert_hooks = util._reprcompare, util._assertion_pass + util._reprcompare = callbinrepr + util._config = item.config + + if ihook.pytest_assertion_pass.get_hookimpls(): + + def call_assertion_pass_hook(lineno: int, orig: str, expl: str) -> None: + ihook.pytest_assertion_pass(item=item, lineno=lineno, orig=orig, expl=expl) + + util._assertion_pass = call_assertion_pass_hook + + try: + return (yield) + finally: + util._reprcompare, util._assertion_pass = saved_assert_hooks + util._config = None + + +def pytest_sessionfinish(session: Session) -> None: + assertstate = session.config.stash.get(assertstate_key, None) + if assertstate: + if assertstate.hook is not None: + assertstate.hook.set_session(None) + + +def pytest_assertrepr_compare( + config: Config, op: str, left: Any, right: Any +) -> list[str] | None: + return util.assertrepr_compare(config=config, op=op, left=left, right=right) diff --git a/venv/Lib/site-packages/_pytest/assertion/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_pytest/assertion/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..3deab84 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/assertion/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/assertion/__pycache__/rewrite.cpython-311.pyc b/venv/Lib/site-packages/_pytest/assertion/__pycache__/rewrite.cpython-311.pyc new file mode 100644 index 0000000..edc08f7 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/assertion/__pycache__/rewrite.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/assertion/__pycache__/truncate.cpython-311.pyc b/venv/Lib/site-packages/_pytest/assertion/__pycache__/truncate.cpython-311.pyc new file mode 100644 index 0000000..905fb97 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/assertion/__pycache__/truncate.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/assertion/__pycache__/util.cpython-311.pyc b/venv/Lib/site-packages/_pytest/assertion/__pycache__/util.cpython-311.pyc new file mode 100644 index 0000000..5f44eff Binary files /dev/null and b/venv/Lib/site-packages/_pytest/assertion/__pycache__/util.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/assertion/rewrite.py b/venv/Lib/site-packages/_pytest/assertion/rewrite.py new file mode 100644 index 0000000..a7a92c0 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/assertion/rewrite.py @@ -0,0 +1,1204 @@ +"""Rewrite assertion AST to produce nice error messages.""" + +from __future__ import annotations + +import ast +from collections import defaultdict +import errno +import functools +import importlib.abc +import importlib.machinery +import importlib.util +import io +import itertools +import marshal +import os +from pathlib import Path +from pathlib import PurePath +import struct +import sys +import tokenize +import types +from typing import Callable +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import Sequence +from typing import TYPE_CHECKING + +from _pytest._io.saferepr import DEFAULT_REPR_MAX_SIZE +from _pytest._io.saferepr import saferepr +from _pytest._version import version +from _pytest.assertion import util +from _pytest.config import Config +from _pytest.main import Session +from _pytest.pathlib import absolutepath +from _pytest.pathlib import fnmatch_ex +from _pytest.stash import StashKey + + +# fmt: off +from _pytest.assertion.util import format_explanation as _format_explanation # noqa:F401, isort:skip +# fmt:on + +if TYPE_CHECKING: + from _pytest.assertion import AssertionState + + +class Sentinel: + pass + + +assertstate_key = StashKey["AssertionState"]() + +# pytest caches rewritten pycs in pycache dirs +PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}" +PYC_EXT = ".py" + (__debug__ and "c" or "o") +PYC_TAIL = "." + PYTEST_TAG + PYC_EXT + +# Special marker that denotes we have just left a scope definition +_SCOPE_END_MARKER = Sentinel() + + +class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader): + """PEP302/PEP451 import hook which rewrites asserts.""" + + def __init__(self, config: Config) -> None: + self.config = config + try: + self.fnpats = config.getini("python_files") + except ValueError: + self.fnpats = ["test_*.py", "*_test.py"] + self.session: Session | None = None + self._rewritten_names: dict[str, Path] = {} + self._must_rewrite: set[str] = set() + # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, + # which might result in infinite recursion (#3506) + self._writing_pyc = False + self._basenames_to_check_rewrite = {"conftest"} + self._marked_for_rewrite_cache: dict[str, bool] = {} + self._session_paths_checked = False + + def set_session(self, session: Session | None) -> None: + self.session = session + self._session_paths_checked = False + + # Indirection so we can mock calls to find_spec originated from the hook during testing + _find_spec = importlib.machinery.PathFinder.find_spec + + def find_spec( + self, + name: str, + path: Sequence[str | bytes] | None = None, + target: types.ModuleType | None = None, + ) -> importlib.machinery.ModuleSpec | None: + if self._writing_pyc: + return None + state = self.config.stash[assertstate_key] + if self._early_rewrite_bailout(name, state): + return None + state.trace(f"find_module called for: {name}") + + # Type ignored because mypy is confused about the `self` binding here. + spec = self._find_spec(name, path) # type: ignore + + if spec is None and path is not None: + # With --import-mode=importlib, PathFinder cannot find spec without modifying `sys.path`, + # causing inability to assert rewriting (#12659). + # At this point, try using the file path to find the module spec. + for _path_str in path: + spec = importlib.util.spec_from_file_location(name, _path_str) + if spec is not None: + break + + if ( + # the import machinery could not find a file to import + spec is None + # this is a namespace package (without `__init__.py`) + # there's nothing to rewrite there + or spec.origin is None + # we can only rewrite source files + or not isinstance(spec.loader, importlib.machinery.SourceFileLoader) + # if the file doesn't exist, we can't rewrite it + or not os.path.exists(spec.origin) + ): + return None + else: + fn = spec.origin + + if not self._should_rewrite(name, fn, state): + return None + + return importlib.util.spec_from_file_location( + name, + fn, + loader=self, + submodule_search_locations=spec.submodule_search_locations, + ) + + def create_module( + self, spec: importlib.machinery.ModuleSpec + ) -> types.ModuleType | None: + return None # default behaviour is fine + + def exec_module(self, module: types.ModuleType) -> None: + assert module.__spec__ is not None + assert module.__spec__.origin is not None + fn = Path(module.__spec__.origin) + state = self.config.stash[assertstate_key] + + self._rewritten_names[module.__name__] = fn + + # The requested module looks like a test file, so rewrite it. This is + # the most magical part of the process: load the source, rewrite the + # asserts, and load the rewritten source. We also cache the rewritten + # module code in a special pyc. We must be aware of the possibility of + # concurrent pytest processes rewriting and loading pycs. To avoid + # tricky race conditions, we maintain the following invariant: The + # cached pyc is always a complete, valid pyc. Operations on it must be + # atomic. POSIX's atomic rename comes in handy. + write = not sys.dont_write_bytecode + cache_dir = get_cache_dir(fn) + if write: + ok = try_makedirs(cache_dir) + if not ok: + write = False + state.trace(f"read only directory: {cache_dir}") + + cache_name = fn.name[:-3] + PYC_TAIL + pyc = cache_dir / cache_name + # Notice that even if we're in a read-only directory, I'm going + # to check for a cached pyc. This may not be optimal... + co = _read_pyc(fn, pyc, state.trace) + if co is None: + state.trace(f"rewriting {fn!r}") + source_stat, co = _rewrite_test(fn, self.config) + if write: + self._writing_pyc = True + try: + _write_pyc(state, co, source_stat, pyc) + finally: + self._writing_pyc = False + else: + state.trace(f"found cached rewritten pyc for {fn}") + exec(co, module.__dict__) + + def _early_rewrite_bailout(self, name: str, state: AssertionState) -> bool: + """A fast way to get out of rewriting modules. + + Profiling has shown that the call to PathFinder.find_spec (inside of + the find_spec from this class) is a major slowdown, so, this method + tries to filter what we're sure won't be rewritten before getting to + it. + """ + if self.session is not None and not self._session_paths_checked: + self._session_paths_checked = True + for initial_path in self.session._initialpaths: + # Make something as c:/projects/my_project/path.py -> + # ['c:', 'projects', 'my_project', 'path.py'] + parts = str(initial_path).split(os.sep) + # add 'path' to basenames to be checked. + self._basenames_to_check_rewrite.add(os.path.splitext(parts[-1])[0]) + + # Note: conftest already by default in _basenames_to_check_rewrite. + parts = name.split(".") + if parts[-1] in self._basenames_to_check_rewrite: + return False + + # For matching the name it must be as if it was a filename. + path = PurePath(*parts).with_suffix(".py") + + for pat in self.fnpats: + # if the pattern contains subdirectories ("tests/**.py" for example) we can't bail out based + # on the name alone because we need to match against the full path + if os.path.dirname(pat): + return False + if fnmatch_ex(pat, path): + return False + + if self._is_marked_for_rewrite(name, state): + return False + + state.trace(f"early skip of rewriting module: {name}") + return True + + def _should_rewrite(self, name: str, fn: str, state: AssertionState) -> bool: + # always rewrite conftest files + if os.path.basename(fn) == "conftest.py": + state.trace(f"rewriting conftest file: {fn!r}") + return True + + if self.session is not None: + if self.session.isinitpath(absolutepath(fn)): + state.trace(f"matched test file (was specified on cmdline): {fn!r}") + return True + + # modules not passed explicitly on the command line are only + # rewritten if they match the naming convention for test files + fn_path = PurePath(fn) + for pat in self.fnpats: + if fnmatch_ex(pat, fn_path): + state.trace(f"matched test file {fn!r}") + return True + + return self._is_marked_for_rewrite(name, state) + + def _is_marked_for_rewrite(self, name: str, state: AssertionState) -> bool: + try: + return self._marked_for_rewrite_cache[name] + except KeyError: + for marked in self._must_rewrite: + if name == marked or name.startswith(marked + "."): + state.trace(f"matched marked file {name!r} (from {marked!r})") + self._marked_for_rewrite_cache[name] = True + return True + + self._marked_for_rewrite_cache[name] = False + return False + + def mark_rewrite(self, *names: str) -> None: + """Mark import names as needing to be rewritten. + + The named module or package as well as any nested modules will + be rewritten on import. + """ + already_imported = ( + set(names).intersection(sys.modules).difference(self._rewritten_names) + ) + for name in already_imported: + mod = sys.modules[name] + if not AssertionRewriter.is_rewrite_disabled( + mod.__doc__ or "" + ) and not isinstance(mod.__loader__, type(self)): + self._warn_already_imported(name) + self._must_rewrite.update(names) + self._marked_for_rewrite_cache.clear() + + def _warn_already_imported(self, name: str) -> None: + from _pytest.warning_types import PytestAssertRewriteWarning + + self.config.issue_config_time_warning( + PytestAssertRewriteWarning( + f"Module already imported so cannot be rewritten: {name}" + ), + stacklevel=5, + ) + + def get_data(self, pathname: str | bytes) -> bytes: + """Optional PEP302 get_data API.""" + with open(pathname, "rb") as f: + return f.read() + + if sys.version_info >= (3, 10): + if sys.version_info >= (3, 12): + from importlib.resources.abc import TraversableResources + else: + from importlib.abc import TraversableResources + + def get_resource_reader(self, name: str) -> TraversableResources: + if sys.version_info < (3, 11): + from importlib.readers import FileReader + else: + from importlib.resources.readers import FileReader + + return FileReader(types.SimpleNamespace(path=self._rewritten_names[name])) + + +def _write_pyc_fp( + fp: IO[bytes], source_stat: os.stat_result, co: types.CodeType +) -> None: + # Technically, we don't have to have the same pyc format as + # (C)Python, since these "pycs" should never be seen by builtin + # import. However, there's little reason to deviate. + fp.write(importlib.util.MAGIC_NUMBER) + # https://www.python.org/dev/peps/pep-0552/ + flags = b"\x00\x00\x00\x00" + fp.write(flags) + # as of now, bytecode header expects 32-bit numbers for size and mtime (#4903) + mtime = int(source_stat.st_mtime) & 0xFFFFFFFF + size = source_stat.st_size & 0xFFFFFFFF + # " bool: + proc_pyc = f"{pyc}.{os.getpid()}" + try: + with open(proc_pyc, "wb") as fp: + _write_pyc_fp(fp, source_stat, co) + except OSError as e: + state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}") + return False + + try: + os.replace(proc_pyc, pyc) + except OSError as e: + state.trace(f"error writing pyc file at {pyc}: {e}") + # we ignore any failure to write the cache file + # there are many reasons, permission-denied, pycache dir being a + # file etc. + return False + return True + + +def _rewrite_test(fn: Path, config: Config) -> tuple[os.stat_result, types.CodeType]: + """Read and rewrite *fn* and return the code object.""" + stat = os.stat(fn) + source = fn.read_bytes() + strfn = str(fn) + tree = ast.parse(source, filename=strfn) + rewrite_asserts(tree, source, strfn, config) + co = compile(tree, strfn, "exec", dont_inherit=True) + return stat, co + + +def _read_pyc( + source: Path, pyc: Path, trace: Callable[[str], None] = lambda x: None +) -> types.CodeType | None: + """Possibly read a pytest pyc containing rewritten code. + + Return rewritten code if successful or None if not. + """ + try: + fp = open(pyc, "rb") + except OSError: + return None + with fp: + try: + stat_result = os.stat(source) + mtime = int(stat_result.st_mtime) + size = stat_result.st_size + data = fp.read(16) + except OSError as e: + trace(f"_read_pyc({source}): OSError {e}") + return None + # Check for invalid or out of date pyc file. + if len(data) != (16): + trace(f"_read_pyc({source}): invalid pyc (too short)") + return None + if data[:4] != importlib.util.MAGIC_NUMBER: + trace(f"_read_pyc({source}): invalid pyc (bad magic number)") + return None + if data[4:8] != b"\x00\x00\x00\x00": + trace(f"_read_pyc({source}): invalid pyc (unsupported flags)") + return None + mtime_data = data[8:12] + if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF: + trace(f"_read_pyc({source}): out of date") + return None + size_data = data[12:16] + if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF: + trace(f"_read_pyc({source}): invalid pyc (incorrect size)") + return None + try: + co = marshal.load(fp) + except Exception as e: + trace(f"_read_pyc({source}): marshal.load error {e}") + return None + if not isinstance(co, types.CodeType): + trace(f"_read_pyc({source}): not a code object") + return None + return co + + +def rewrite_asserts( + mod: ast.Module, + source: bytes, + module_path: str | None = None, + config: Config | None = None, +) -> None: + """Rewrite the assert statements in mod.""" + AssertionRewriter(module_path, config, source).run(mod) + + +def _saferepr(obj: object) -> str: + r"""Get a safe repr of an object for assertion error messages. + + The assertion formatting (util.format_explanation()) requires + newlines to be escaped since they are a special character for it. + Normally assertion.util.format_explanation() does this but for a + custom repr it is possible to contain one of the special escape + sequences, especially '\n{' and '\n}' are likely to be present in + JSON reprs. + """ + if isinstance(obj, types.MethodType): + # for bound methods, skip redundant information + return obj.__name__ + + maxsize = _get_maxsize_for_saferepr(util._config) + return saferepr(obj, maxsize=maxsize).replace("\n", "\\n") + + +def _get_maxsize_for_saferepr(config: Config | None) -> int | None: + """Get `maxsize` configuration for saferepr based on the given config object.""" + if config is None: + verbosity = 0 + else: + verbosity = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + if verbosity >= 2: + return None + if verbosity >= 1: + return DEFAULT_REPR_MAX_SIZE * 10 + return DEFAULT_REPR_MAX_SIZE + + +def _format_assertmsg(obj: object) -> str: + r"""Format the custom assertion message given. + + For strings this simply replaces newlines with '\n~' so that + util.format_explanation() will preserve them instead of escaping + newlines. For other objects saferepr() is used first. + """ + # reprlib appears to have a bug which means that if a string + # contains a newline it gets escaped, however if an object has a + # .__repr__() which contains newlines it does not get escaped. + # However in either case we want to preserve the newline. + replaces = [("\n", "\n~"), ("%", "%%")] + if not isinstance(obj, str): + obj = saferepr(obj, _get_maxsize_for_saferepr(util._config)) + replaces.append(("\\n", "\n~")) + + for r1, r2 in replaces: + obj = obj.replace(r1, r2) + + return obj + + +def _should_repr_global_name(obj: object) -> bool: + if callable(obj): + return False + + try: + return not hasattr(obj, "__name__") + except Exception: + return True + + +def _format_boolop(explanations: Iterable[str], is_or: bool) -> str: + explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")" + return explanation.replace("%", "%%") + + +def _call_reprcompare( + ops: Sequence[str], + results: Sequence[bool], + expls: Sequence[str], + each_obj: Sequence[object], +) -> str: + for i, res, expl in zip(range(len(ops)), results, expls): + try: + done = not res + except Exception: + done = True + if done: + break + if util._reprcompare is not None: + custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1]) + if custom is not None: + return custom + return expl + + +def _call_assertion_pass(lineno: int, orig: str, expl: str) -> None: + if util._assertion_pass is not None: + util._assertion_pass(lineno, orig, expl) + + +def _check_if_assertion_pass_impl() -> bool: + """Check if any plugins implement the pytest_assertion_pass hook + in order not to generate explanation unnecessarily (might be expensive).""" + return True if util._assertion_pass else False + + +UNARY_MAP = {ast.Not: "not %s", ast.Invert: "~%s", ast.USub: "-%s", ast.UAdd: "+%s"} + +BINOP_MAP = { + ast.BitOr: "|", + ast.BitXor: "^", + ast.BitAnd: "&", + ast.LShift: "<<", + ast.RShift: ">>", + ast.Add: "+", + ast.Sub: "-", + ast.Mult: "*", + ast.Div: "/", + ast.FloorDiv: "//", + ast.Mod: "%%", # escaped for string formatting + ast.Eq: "==", + ast.NotEq: "!=", + ast.Lt: "<", + ast.LtE: "<=", + ast.Gt: ">", + ast.GtE: ">=", + ast.Pow: "**", + ast.Is: "is", + ast.IsNot: "is not", + ast.In: "in", + ast.NotIn: "not in", + ast.MatMult: "@", +} + + +def traverse_node(node: ast.AST) -> Iterator[ast.AST]: + """Recursively yield node and all its children in depth-first order.""" + yield node + for child in ast.iter_child_nodes(node): + yield from traverse_node(child) + + +@functools.lru_cache(maxsize=1) +def _get_assertion_exprs(src: bytes) -> dict[int, str]: + """Return a mapping from {lineno: "assertion test expression"}.""" + ret: dict[int, str] = {} + + depth = 0 + lines: list[str] = [] + assert_lineno: int | None = None + seen_lines: set[int] = set() + + def _write_and_reset() -> None: + nonlocal depth, lines, assert_lineno, seen_lines + assert assert_lineno is not None + ret[assert_lineno] = "".join(lines).rstrip().rstrip("\\") + depth = 0 + lines = [] + assert_lineno = None + seen_lines = set() + + tokens = tokenize.tokenize(io.BytesIO(src).readline) + for tp, source, (lineno, offset), _, line in tokens: + if tp == tokenize.NAME and source == "assert": + assert_lineno = lineno + elif assert_lineno is not None: + # keep track of depth for the assert-message `,` lookup + if tp == tokenize.OP and source in "([{": + depth += 1 + elif tp == tokenize.OP and source in ")]}": + depth -= 1 + + if not lines: + lines.append(line[offset:]) + seen_lines.add(lineno) + # a non-nested comma separates the expression from the message + elif depth == 0 and tp == tokenize.OP and source == ",": + # one line assert with message + if lineno in seen_lines and len(lines) == 1: + offset_in_trimmed = offset + len(lines[-1]) - len(line) + lines[-1] = lines[-1][:offset_in_trimmed] + # multi-line assert with message + elif lineno in seen_lines: + lines[-1] = lines[-1][:offset] + # multi line assert with escaped newline before message + else: + lines.append(line[:offset]) + _write_and_reset() + elif tp in {tokenize.NEWLINE, tokenize.ENDMARKER}: + _write_and_reset() + elif lines and lineno not in seen_lines: + lines.append(line) + seen_lines.add(lineno) + + return ret + + +class AssertionRewriter(ast.NodeVisitor): + """Assertion rewriting implementation. + + The main entrypoint is to call .run() with an ast.Module instance, + this will then find all the assert statements and rewrite them to + provide intermediate values and a detailed assertion error. See + http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html + for an overview of how this works. + + The entry point here is .run() which will iterate over all the + statements in an ast.Module and for each ast.Assert statement it + finds call .visit() with it. Then .visit_Assert() takes over and + is responsible for creating new ast statements to replace the + original assert statement: it rewrites the test of an assertion + to provide intermediate values and replace it with an if statement + which raises an assertion error with a detailed explanation in + case the expression is false and calls pytest_assertion_pass hook + if expression is true. + + For this .visit_Assert() uses the visitor pattern to visit all the + AST nodes of the ast.Assert.test field, each visit call returning + an AST node and the corresponding explanation string. During this + state is kept in several instance attributes: + + :statements: All the AST statements which will replace the assert + statement. + + :variables: This is populated by .variable() with each variable + used by the statements so that they can all be set to None at + the end of the statements. + + :variable_counter: Counter to create new unique variables needed + by statements. Variables are created using .variable() and + have the form of "@py_assert0". + + :expl_stmts: The AST statements which will be executed to get + data from the assertion. This is the code which will construct + the detailed assertion message that is used in the AssertionError + or for the pytest_assertion_pass hook. + + :explanation_specifiers: A dict filled by .explanation_param() + with %-formatting placeholders and their corresponding + expressions to use in the building of an assertion message. + This is used by .pop_format_context() to build a message. + + :stack: A stack of the explanation_specifiers dicts maintained by + .push_format_context() and .pop_format_context() which allows + to build another %-formatted string while already building one. + + :scope: A tuple containing the current scope used for variables_overwrite. + + :variables_overwrite: A dict filled with references to variables + that change value within an assert. This happens when a variable is + reassigned with the walrus operator + + This state, except the variables_overwrite, is reset on every new assert + statement visited and used by the other visitors. + """ + + def __init__( + self, module_path: str | None, config: Config | None, source: bytes + ) -> None: + super().__init__() + self.module_path = module_path + self.config = config + if config is not None: + self.enable_assertion_pass_hook = config.getini( + "enable_assertion_pass_hook" + ) + else: + self.enable_assertion_pass_hook = False + self.source = source + self.scope: tuple[ast.AST, ...] = () + self.variables_overwrite: defaultdict[tuple[ast.AST, ...], dict[str, str]] = ( + defaultdict(dict) + ) + + def run(self, mod: ast.Module) -> None: + """Find all assert statements in *mod* and rewrite them.""" + if not mod.body: + # Nothing to do. + return + + # We'll insert some special imports at the top of the module, but after any + # docstrings and __future__ imports, so first figure out where that is. + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return + pos = 0 + item = None + for item in mod.body: + if ( + expect_docstring + and isinstance(item, ast.Expr) + and isinstance(item.value, ast.Constant) + and isinstance(item.value.value, str) + ): + doc = item.value.value + if self.is_rewrite_disabled(doc): + return + expect_docstring = False + elif ( + isinstance(item, ast.ImportFrom) + and item.level == 0 + and item.module == "__future__" + ): + pass + else: + break + pos += 1 + # Special case: for a decorated function, set the lineno to that of the + # first decorator, not the `def`. Issue #4984. + if isinstance(item, ast.FunctionDef) and item.decorator_list: + lineno = item.decorator_list[0].lineno + else: + lineno = item.lineno + # Now actually insert the special imports. + if sys.version_info >= (3, 10): + aliases = [ + ast.alias("builtins", "@py_builtins", lineno=lineno, col_offset=0), + ast.alias( + "_pytest.assertion.rewrite", + "@pytest_ar", + lineno=lineno, + col_offset=0, + ), + ] + else: + aliases = [ + ast.alias("builtins", "@py_builtins"), + ast.alias("_pytest.assertion.rewrite", "@pytest_ar"), + ] + imports = [ + ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases + ] + mod.body[pos:pos] = imports + + # Collect asserts. + self.scope = (mod,) + nodes: list[ast.AST | Sentinel] = [mod] + while nodes: + node = nodes.pop() + if isinstance(node, (ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef)): + self.scope = tuple((*self.scope, node)) + nodes.append(_SCOPE_END_MARKER) + if node == _SCOPE_END_MARKER: + self.scope = self.scope[:-1] + continue + assert isinstance(node, ast.AST) + for name, field in ast.iter_fields(node): + if isinstance(field, list): + new: list[ast.AST] = [] + for i, child in enumerate(field): + if isinstance(child, ast.Assert): + # Transform assert. + new.extend(self.visit(child)) + else: + new.append(child) + if isinstance(child, ast.AST): + nodes.append(child) + setattr(node, name, new) + elif ( + isinstance(field, ast.AST) + # Don't recurse into expressions as they can't contain + # asserts. + and not isinstance(field, ast.expr) + ): + nodes.append(field) + + @staticmethod + def is_rewrite_disabled(docstring: str) -> bool: + return "PYTEST_DONT_REWRITE" in docstring + + def variable(self) -> str: + """Get a new variable.""" + # Use a character invalid in python identifiers to avoid clashing. + name = "@py_assert" + str(next(self.variable_counter)) + self.variables.append(name) + return name + + def assign(self, expr: ast.expr) -> ast.Name: + """Give *expr* a name.""" + name = self.variable() + self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr)) + return ast.Name(name, ast.Load()) + + def display(self, expr: ast.expr) -> ast.expr: + """Call saferepr on the expression.""" + return self.helper("_saferepr", expr) + + def helper(self, name: str, *args: ast.expr) -> ast.expr: + """Call a helper in this module.""" + py_name = ast.Name("@pytest_ar", ast.Load()) + attr = ast.Attribute(py_name, name, ast.Load()) + return ast.Call(attr, list(args), []) + + def builtin(self, name: str) -> ast.Attribute: + """Return the builtin called *name*.""" + builtin_name = ast.Name("@py_builtins", ast.Load()) + return ast.Attribute(builtin_name, name, ast.Load()) + + def explanation_param(self, expr: ast.expr) -> str: + """Return a new named %-formatting placeholder for expr. + + This creates a %-formatting placeholder for expr in the + current formatting context, e.g. ``%(py0)s``. The placeholder + and expr are placed in the current format context so that it + can be used on the next call to .pop_format_context(). + """ + specifier = "py" + str(next(self.variable_counter)) + self.explanation_specifiers[specifier] = expr + return "%(" + specifier + ")s" + + def push_format_context(self) -> None: + """Create a new formatting context. + + The format context is used for when an explanation wants to + have a variable value formatted in the assertion message. In + this case the value required can be added using + .explanation_param(). Finally .pop_format_context() is used + to format a string of %-formatted values as added by + .explanation_param(). + """ + self.explanation_specifiers: dict[str, ast.expr] = {} + self.stack.append(self.explanation_specifiers) + + def pop_format_context(self, expl_expr: ast.expr) -> ast.Name: + """Format the %-formatted string with current format context. + + The expl_expr should be an str ast.expr instance constructed from + the %-placeholders created by .explanation_param(). This will + add the required code to format said string to .expl_stmts and + return the ast.Name instance of the formatted string. + """ + current = self.stack.pop() + if self.stack: + self.explanation_specifiers = self.stack[-1] + keys: list[ast.expr | None] = [ast.Constant(key) for key in current.keys()] + format_dict = ast.Dict(keys, list(current.values())) + form = ast.BinOp(expl_expr, ast.Mod(), format_dict) + name = "@py_format" + str(next(self.variable_counter)) + if self.enable_assertion_pass_hook: + self.format_variables.append(name) + self.expl_stmts.append(ast.Assign([ast.Name(name, ast.Store())], form)) + return ast.Name(name, ast.Load()) + + def generic_visit(self, node: ast.AST) -> tuple[ast.Name, str]: + """Handle expressions we don't have custom code for.""" + assert isinstance(node, ast.expr) + res = self.assign(node) + return res, self.explanation_param(self.display(res)) + + def visit_Assert(self, assert_: ast.Assert) -> list[ast.stmt]: + """Return the AST statements to replace the ast.Assert instance. + + This rewrites the test of an assertion to provide + intermediate values and replace it with an if statement which + raises an assertion error with a detailed explanation in case + the expression is false. + """ + if isinstance(assert_.test, ast.Tuple) and len(assert_.test.elts) >= 1: + import warnings + + from _pytest.warning_types import PytestAssertRewriteWarning + + # TODO: This assert should not be needed. + assert self.module_path is not None + warnings.warn_explicit( + PytestAssertRewriteWarning( + "assertion is always true, perhaps remove parentheses?" + ), + category=None, + filename=self.module_path, + lineno=assert_.lineno, + ) + + self.statements: list[ast.stmt] = [] + self.variables: list[str] = [] + self.variable_counter = itertools.count() + + if self.enable_assertion_pass_hook: + self.format_variables: list[str] = [] + + self.stack: list[dict[str, ast.expr]] = [] + self.expl_stmts: list[ast.stmt] = [] + self.push_format_context() + # Rewrite assert into a bunch of statements. + top_condition, explanation = self.visit(assert_.test) + + negation = ast.UnaryOp(ast.Not(), top_condition) + + if self.enable_assertion_pass_hook: # Experimental pytest_assertion_pass hook + msg = self.pop_format_context(ast.Constant(explanation)) + + # Failed + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + gluestr = "\n>assert " + else: + assertmsg = ast.Constant("") + gluestr = "assert " + err_explanation = ast.BinOp(ast.Constant(gluestr), ast.Add(), msg) + err_msg = ast.BinOp(assertmsg, ast.Add(), err_explanation) + err_name = ast.Name("AssertionError", ast.Load()) + fmt = self.helper("_format_explanation", err_msg) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + statements_fail = [] + statements_fail.extend(self.expl_stmts) + statements_fail.append(raise_) + + # Passed + fmt_pass = self.helper("_format_explanation", msg) + orig = _get_assertion_exprs(self.source)[assert_.lineno] + hook_call_pass = ast.Expr( + self.helper( + "_call_assertion_pass", + ast.Constant(assert_.lineno), + ast.Constant(orig), + fmt_pass, + ) + ) + # If any hooks implement assert_pass hook + hook_impl_test = ast.If( + self.helper("_check_if_assertion_pass_impl"), + [*self.expl_stmts, hook_call_pass], + [], + ) + statements_pass: list[ast.stmt] = [hook_impl_test] + + # Test for assertion condition + main_test = ast.If(negation, statements_fail, statements_pass) + self.statements.append(main_test) + if self.format_variables: + variables: list[ast.expr] = [ + ast.Name(name, ast.Store()) for name in self.format_variables + ] + clear_format = ast.Assign(variables, ast.Constant(None)) + self.statements.append(clear_format) + + else: # Original assertion rewriting + # Create failure message. + body = self.expl_stmts + self.statements.append(ast.If(negation, body, [])) + if assert_.msg: + assertmsg = self.helper("_format_assertmsg", assert_.msg) + explanation = "\n>assert " + explanation + else: + assertmsg = ast.Constant("") + explanation = "assert " + explanation + template = ast.BinOp(assertmsg, ast.Add(), ast.Constant(explanation)) + msg = self.pop_format_context(template) + fmt = self.helper("_format_explanation", msg) + err_name = ast.Name("AssertionError", ast.Load()) + exc = ast.Call(err_name, [fmt], []) + raise_ = ast.Raise(exc, None) + + body.append(raise_) + + # Clear temporary variables by setting them to None. + if self.variables: + variables = [ast.Name(name, ast.Store()) for name in self.variables] + clear = ast.Assign(variables, ast.Constant(None)) + self.statements.append(clear) + # Fix locations (line numbers/column offsets). + for stmt in self.statements: + for node in traverse_node(stmt): + ast.copy_location(node, assert_) + return self.statements + + def visit_NamedExpr(self, name: ast.NamedExpr) -> tuple[ast.NamedExpr, str]: + # This method handles the 'walrus operator' repr of the target + # name if it's a local variable or _should_repr_global_name() + # thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + target_id = name.target.id + inlocs = ast.Compare(ast.Constant(target_id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Constant(target_id)) + return name, self.explanation_param(expr) + + def visit_Name(self, name: ast.Name) -> tuple[ast.Name, str]: + # Display the repr of the name if it's a local variable or + # _should_repr_global_name() thinks it's acceptable. + locs = ast.Call(self.builtin("locals"), [], []) + inlocs = ast.Compare(ast.Constant(name.id), [ast.In()], [locs]) + dorepr = self.helper("_should_repr_global_name", name) + test = ast.BoolOp(ast.Or(), [inlocs, dorepr]) + expr = ast.IfExp(test, self.display(name), ast.Constant(name.id)) + return name, self.explanation_param(expr) + + def visit_BoolOp(self, boolop: ast.BoolOp) -> tuple[ast.Name, str]: + res_var = self.variable() + expl_list = self.assign(ast.List([], ast.Load())) + app = ast.Attribute(expl_list, "append", ast.Load()) + is_or = int(isinstance(boolop.op, ast.Or)) + body = save = self.statements + fail_save = self.expl_stmts + levels = len(boolop.values) - 1 + self.push_format_context() + # Process each operand, short-circuiting if needed. + for i, v in enumerate(boolop.values): + if i: + fail_inner: list[ast.stmt] = [] + # cond is set in a prior loop iteration below + self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa: F821 + self.expl_stmts = fail_inner + # Check if the left operand is a ast.NamedExpr and the value has already been visited + if ( + isinstance(v, ast.Compare) + and isinstance(v.left, ast.NamedExpr) + and v.left.target.id + in [ + ast_expr.id + for ast_expr in boolop.values[:i] + if hasattr(ast_expr, "id") + ] + ): + pytest_temp = self.variable() + self.variables_overwrite[self.scope][v.left.target.id] = v.left # type:ignore[assignment] + v.left.target.id = pytest_temp + self.push_format_context() + res, expl = self.visit(v) + body.append(ast.Assign([ast.Name(res_var, ast.Store())], res)) + expl_format = self.pop_format_context(ast.Constant(expl)) + call = ast.Call(app, [expl_format], []) + self.expl_stmts.append(ast.Expr(call)) + if i < levels: + cond: ast.expr = res + if is_or: + cond = ast.UnaryOp(ast.Not(), cond) + inner: list[ast.stmt] = [] + self.statements.append(ast.If(cond, inner, [])) + self.statements = body = inner + self.statements = save + self.expl_stmts = fail_save + expl_template = self.helper("_format_boolop", expl_list, ast.Constant(is_or)) + expl = self.pop_format_context(expl_template) + return ast.Name(res_var, ast.Load()), self.explanation_param(expl) + + def visit_UnaryOp(self, unary: ast.UnaryOp) -> tuple[ast.Name, str]: + pattern = UNARY_MAP[unary.op.__class__] + operand_res, operand_expl = self.visit(unary.operand) + res = self.assign(ast.UnaryOp(unary.op, operand_res)) + return res, pattern % (operand_expl,) + + def visit_BinOp(self, binop: ast.BinOp) -> tuple[ast.Name, str]: + symbol = BINOP_MAP[binop.op.__class__] + left_expr, left_expl = self.visit(binop.left) + right_expr, right_expl = self.visit(binop.right) + explanation = f"({left_expl} {symbol} {right_expl})" + res = self.assign(ast.BinOp(left_expr, binop.op, right_expr)) + return res, explanation + + def visit_Call(self, call: ast.Call) -> tuple[ast.Name, str]: + new_func, func_expl = self.visit(call.func) + arg_expls = [] + new_args = [] + new_kwargs = [] + for arg in call.args: + if isinstance(arg, ast.Name) and arg.id in self.variables_overwrite.get( + self.scope, {} + ): + arg = self.variables_overwrite[self.scope][arg.id] # type:ignore[assignment] + res, expl = self.visit(arg) + arg_expls.append(expl) + new_args.append(res) + for keyword in call.keywords: + if isinstance( + keyword.value, ast.Name + ) and keyword.value.id in self.variables_overwrite.get(self.scope, {}): + keyword.value = self.variables_overwrite[self.scope][keyword.value.id] # type:ignore[assignment] + res, expl = self.visit(keyword.value) + new_kwargs.append(ast.keyword(keyword.arg, res)) + if keyword.arg: + arg_expls.append(keyword.arg + "=" + expl) + else: # **args have `arg` keywords with an .arg of None + arg_expls.append("**" + expl) + + expl = "{}({})".format(func_expl, ", ".join(arg_expls)) + new_call = ast.Call(new_func, new_args, new_kwargs) + res = self.assign(new_call) + res_expl = self.explanation_param(self.display(res)) + outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}" + return res, outer_expl + + def visit_Starred(self, starred: ast.Starred) -> tuple[ast.Starred, str]: + # A Starred node can appear in a function call. + res, expl = self.visit(starred.value) + new_starred = ast.Starred(res, starred.ctx) + return new_starred, "*" + expl + + def visit_Attribute(self, attr: ast.Attribute) -> tuple[ast.Name, str]: + if not isinstance(attr.ctx, ast.Load): + return self.generic_visit(attr) + value, value_expl = self.visit(attr.value) + res = self.assign(ast.Attribute(value, attr.attr, ast.Load())) + res_expl = self.explanation_param(self.display(res)) + pat = "%s\n{%s = %s.%s\n}" + expl = pat % (res_expl, res_expl, value_expl, attr.attr) + return res, expl + + def visit_Compare(self, comp: ast.Compare) -> tuple[ast.expr, str]: + self.push_format_context() + # We first check if we have overwritten a variable in the previous assert + if isinstance( + comp.left, ast.Name + ) and comp.left.id in self.variables_overwrite.get(self.scope, {}): + comp.left = self.variables_overwrite[self.scope][comp.left.id] # type:ignore[assignment] + if isinstance(comp.left, ast.NamedExpr): + self.variables_overwrite[self.scope][comp.left.target.id] = comp.left # type:ignore[assignment] + left_res, left_expl = self.visit(comp.left) + if isinstance(comp.left, (ast.Compare, ast.BoolOp)): + left_expl = f"({left_expl})" + res_variables = [self.variable() for i in range(len(comp.ops))] + load_names: list[ast.expr] = [ast.Name(v, ast.Load()) for v in res_variables] + store_names = [ast.Name(v, ast.Store()) for v in res_variables] + it = zip(range(len(comp.ops)), comp.ops, comp.comparators) + expls: list[ast.expr] = [] + syms: list[ast.expr] = [] + results = [left_res] + for i, op, next_operand in it: + if ( + isinstance(next_operand, ast.NamedExpr) + and isinstance(left_res, ast.Name) + and next_operand.target.id == left_res.id + ): + next_operand.target.id = self.variable() + self.variables_overwrite[self.scope][left_res.id] = next_operand # type:ignore[assignment] + next_res, next_expl = self.visit(next_operand) + if isinstance(next_operand, (ast.Compare, ast.BoolOp)): + next_expl = f"({next_expl})" + results.append(next_res) + sym = BINOP_MAP[op.__class__] + syms.append(ast.Constant(sym)) + expl = f"{left_expl} {sym} {next_expl}" + expls.append(ast.Constant(expl)) + res_expr = ast.Compare(left_res, [op], [next_res]) + self.statements.append(ast.Assign([store_names[i]], res_expr)) + left_res, left_expl = next_res, next_expl + # Use pytest.assertion.util._reprcompare if that's available. + expl_call = self.helper( + "_call_reprcompare", + ast.Tuple(syms, ast.Load()), + ast.Tuple(load_names, ast.Load()), + ast.Tuple(expls, ast.Load()), + ast.Tuple(results, ast.Load()), + ) + if len(comp.ops) > 1: + res: ast.expr = ast.BoolOp(ast.And(), load_names) + else: + res = load_names[0] + + return res, self.explanation_param(self.pop_format_context(expl_call)) + + +def try_makedirs(cache_dir: Path) -> bool: + """Attempt to create the given directory and sub-directories exist. + + Returns True if successful or if it already exists. + """ + try: + os.makedirs(cache_dir, exist_ok=True) + except (FileNotFoundError, NotADirectoryError, FileExistsError): + # One of the path components was not a directory: + # - we're in a zip file + # - it is a file + return False + except PermissionError: + return False + except OSError as e: + # as of now, EROFS doesn't have an equivalent OSError-subclass + # + # squashfuse_ll returns ENOSYS "OSError: [Errno 38] Function not + # implemented" for a read-only error + if e.errno in {errno.EROFS, errno.ENOSYS}: + return False + raise + return True + + +def get_cache_dir(file_path: Path) -> Path: + """Return the cache directory to write .pyc files for the given .py file path.""" + if sys.pycache_prefix: + # given: + # prefix = '/tmp/pycs' + # path = '/home/user/proj/test_app.py' + # we want: + # '/tmp/pycs/home/user/proj' + return Path(sys.pycache_prefix) / Path(*file_path.parts[1:-1]) + else: + # classic pycache directory + return file_path.parent / "__pycache__" diff --git a/venv/Lib/site-packages/_pytest/assertion/truncate.py b/venv/Lib/site-packages/_pytest/assertion/truncate.py new file mode 100644 index 0000000..b67f02c --- /dev/null +++ b/venv/Lib/site-packages/_pytest/assertion/truncate.py @@ -0,0 +1,117 @@ +"""Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +terminal lines, unless running with an assertions verbosity level of at least 2 or running on CI. +""" + +from __future__ import annotations + +from _pytest.assertion import util +from _pytest.config import Config +from _pytest.nodes import Item + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required( + explanation: list[str], item: Item, max_length: int | None = None +) -> list[str]: + """Truncate this assertion explanation if the given test item is eligible.""" + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item: Item) -> bool: + """Whether or not this test item is eligible for truncation.""" + verbose = item.config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + return verbose < 2 and not util.running_on_ci() + + +def _truncate_explanation( + input_lines: list[str], + max_lines: int | None = None, + max_chars: int | None = None, +) -> list[str]: + """Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first, taking the truncation explanation into account. The remaining lines + will be replaced by a usage message. + """ + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + # The length of the truncation explanation depends on the number of lines + # removed but is at least 68 characters: + # The real value is + # 64 (for the base message: + # '...\n...Full output truncated (1 line hidden), use '-vv' to show")' + # ) + # + 1 (for plural) + # + int(math.log10(len(input_lines) - max_lines)) (number of hidden line, at least 1) + # + 3 for the '...' added to the truncated line + # But if there's more than 100 lines it's very likely that we're going to + # truncate, so we don't need the exact value using log10. + tolerable_max_chars = ( + max_chars + 70 # 64 + 1 (for plural) + 2 (for '99') + 3 for '...' + ) + # The truncation explanation add two lines to the output + tolerable_max_lines = max_lines + 2 + if ( + len(input_lines) <= tolerable_max_lines + and input_char_count <= tolerable_max_chars + ): + return input_lines + # Truncate first to max_lines, and then truncate to max_chars if necessary + truncated_explanation = input_lines[:max_lines] + truncated_char = True + # We reevaluate the need to truncate chars following removal of some lines + if len("".join(truncated_explanation)) > tolerable_max_chars: + truncated_explanation = _truncate_by_char_count( + truncated_explanation, max_chars + ) + else: + truncated_char = False + + truncated_line_count = len(input_lines) - len(truncated_explanation) + if truncated_explanation[-1]: + # Add ellipsis and take into account part-truncated final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + if truncated_char: + # It's possible that we did not remove any char from this line + truncated_line_count += 1 + else: + # Add proper ellipsis when we were able to fit a full line exactly + truncated_explanation[-1] = "..." + return [ + *truncated_explanation, + "", + f"...Full output truncated ({truncated_line_count} line" + f"{'' if truncated_line_count == 1 else 's'} hidden), {USAGE_MSG}", + ] + + +def _truncate_by_char_count(input_lines: list[str], max_chars: int) -> list[str]: + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/venv/Lib/site-packages/_pytest/assertion/util.py b/venv/Lib/site-packages/_pytest/assertion/util.py new file mode 100644 index 0000000..4dc1af4 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/assertion/util.py @@ -0,0 +1,609 @@ +# mypy: allow-untyped-defs +"""Utilities for assertion debugging.""" + +from __future__ import annotations + +import collections.abc +import os +import pprint +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import Iterable +from typing import Literal +from typing import Mapping +from typing import Protocol +from typing import Sequence +from unicodedata import normalize + +from _pytest import outcomes +import _pytest._code +from _pytest._io.pprint import PrettyPrinter +from _pytest._io.saferepr import saferepr +from _pytest._io.saferepr import saferepr_unlimited +from _pytest.config import Config + + +# The _reprcompare attribute on the util module is used by the new assertion +# interpretation code and assertion rewriter to detect this plugin was +# loaded and in turn call the hooks defined here as part of the +# DebugInterpreter. +_reprcompare: Callable[[str, object, object], str | None] | None = None + +# Works similarly as _reprcompare attribute. Is populated with the hook call +# when pytest_runtest_setup is called. +_assertion_pass: Callable[[int, str, str], None] | None = None + +# Config object which is assigned during pytest_runtest_protocol. +_config: Config | None = None + + +class _HighlightFunc(Protocol): + def __call__(self, source: str, lexer: Literal["diff", "python"] = "python") -> str: + """Apply highlighting to the given source.""" + + +def format_explanation(explanation: str) -> str: + r"""Format an explanation. + + Normally all embedded newlines are escaped, however there are + three exceptions: \n{, \n} and \n~. The first two are intended + cover nested explanations, see function and attribute explanations + for examples (.visit_Call(), visit_Attribute()). The last one is + for when one explanation needs to span multiple lines, e.g. when + displaying diffs. + """ + lines = _split_explanation(explanation) + result = _format_lines(lines) + return "\n".join(result) + + +def _split_explanation(explanation: str) -> list[str]: + r"""Return a list of individual lines in the explanation. + + This will return a list of lines split on '\n{', '\n}' and '\n~'. + Any other newlines will be escaped and appear in the line as the + literal '\n' characters. + """ + raw_lines = (explanation or "").split("\n") + lines = [raw_lines[0]] + for values in raw_lines[1:]: + if values and values[0] in ["{", "}", "~", ">"]: + lines.append(values) + else: + lines[-1] += "\\n" + values + return lines + + +def _format_lines(lines: Sequence[str]) -> list[str]: + """Format the individual lines. + + This will replace the '{', '}' and '~' characters of our mini formatting + language with the proper 'where ...', 'and ...' and ' + ...' text, taking + care of indentation along the way. + + Return a list of formatted lines. + """ + result = list(lines[:1]) + stack = [0] + stackcnt = [0] + for line in lines[1:]: + if line.startswith("{"): + if stackcnt[-1]: + s = "and " + else: + s = "where " + stack.append(len(result)) + stackcnt[-1] += 1 + stackcnt.append(0) + result.append(" +" + " " * (len(stack) - 1) + s + line[1:]) + elif line.startswith("}"): + stack.pop() + stackcnt.pop() + result[stack[-1]] += line[1:] + else: + assert line[0] in ["~", ">"] + stack[-1] += 1 + indent = len(stack) if line.startswith("~") else len(stack) - 1 + result.append(" " * indent + line[1:]) + assert len(stack) == 1 + return result + + +def issequence(x: Any) -> bool: + return isinstance(x, collections.abc.Sequence) and not isinstance(x, str) + + +def istext(x: Any) -> bool: + return isinstance(x, str) + + +def isdict(x: Any) -> bool: + return isinstance(x, dict) + + +def isset(x: Any) -> bool: + return isinstance(x, (set, frozenset)) + + +def isnamedtuple(obj: Any) -> bool: + return isinstance(obj, tuple) and getattr(obj, "_fields", None) is not None + + +def isdatacls(obj: Any) -> bool: + return getattr(obj, "__dataclass_fields__", None) is not None + + +def isattrs(obj: Any) -> bool: + return getattr(obj, "__attrs_attrs__", None) is not None + + +def isiterable(obj: Any) -> bool: + try: + iter(obj) + return not istext(obj) + except Exception: + return False + + +def has_default_eq( + obj: object, +) -> bool: + """Check if an instance of an object contains the default eq + + First, we check if the object's __eq__ attribute has __code__, + if so, we check the equally of the method code filename (__code__.co_filename) + to the default one generated by the dataclass and attr module + for dataclasses the default co_filename is , for attrs class, the __eq__ should contain "attrs eq generated" + """ + # inspired from https://github.com/willmcgugan/rich/blob/07d51ffc1aee6f16bd2e5a25b4e82850fb9ed778/rich/pretty.py#L68 + if hasattr(obj.__eq__, "__code__") and hasattr(obj.__eq__.__code__, "co_filename"): + code_filename = obj.__eq__.__code__.co_filename + + if isattrs(obj): + return "attrs generated eq" in code_filename + + return code_filename == "" # data class + return True + + +def assertrepr_compare( + config, op: str, left: Any, right: Any, use_ascii: bool = False +) -> list[str] | None: + """Return specialised explanations for some operators/operands.""" + verbose = config.get_verbosity(Config.VERBOSITY_ASSERTIONS) + + # Strings which normalize equal are often hard to distinguish when printed; use ascii() to make this easier. + # See issue #3246. + use_ascii = ( + isinstance(left, str) + and isinstance(right, str) + and normalize("NFD", left) == normalize("NFD", right) + ) + + if verbose > 1: + left_repr = saferepr_unlimited(left, use_ascii=use_ascii) + right_repr = saferepr_unlimited(right, use_ascii=use_ascii) + else: + # XXX: "15 chars indentation" is wrong + # ("E AssertionError: assert "); should use term width. + maxsize = ( + 80 - 15 - len(op) - 2 + ) // 2 # 15 chars indentation, 1 space around op + + left_repr = saferepr(left, maxsize=maxsize, use_ascii=use_ascii) + right_repr = saferepr(right, maxsize=maxsize, use_ascii=use_ascii) + + summary = f"{left_repr} {op} {right_repr}" + highlighter = config.get_terminal_writer()._highlight + + explanation = None + try: + if op == "==": + explanation = _compare_eq_any(left, right, highlighter, verbose) + elif op == "not in": + if istext(left) and istext(right): + explanation = _notin_text(left, right, verbose) + elif op == "!=": + if isset(left) and isset(right): + explanation = ["Both sets are equal"] + elif op == ">=": + if isset(left) and isset(right): + explanation = _compare_gte_set(left, right, highlighter, verbose) + elif op == "<=": + if isset(left) and isset(right): + explanation = _compare_lte_set(left, right, highlighter, verbose) + elif op == ">": + if isset(left) and isset(right): + explanation = _compare_gt_set(left, right, highlighter, verbose) + elif op == "<": + if isset(left) and isset(right): + explanation = _compare_lt_set(left, right, highlighter, verbose) + + except outcomes.Exit: + raise + except Exception: + repr_crash = _pytest._code.ExceptionInfo.from_current()._getreprcrash() + explanation = [ + f"(pytest_assertion plugin: representation of details failed: {repr_crash}.", + " Probably an object has a faulty __repr__.)", + ] + + if not explanation: + return None + + if explanation[0] != "": + explanation = ["", *explanation] + return [summary, *explanation] + + +def _compare_eq_any( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int = 0 +) -> list[str]: + explanation = [] + if istext(left) and istext(right): + explanation = _diff_text(left, right, verbose) + else: + from _pytest.python_api import ApproxBase + + if isinstance(left, ApproxBase) or isinstance(right, ApproxBase): + # Although the common order should be obtained == expected, this ensures both ways + approx_side = left if isinstance(left, ApproxBase) else right + other_side = right if isinstance(left, ApproxBase) else left + + explanation = approx_side._repr_compare(other_side) + elif type(left) is type(right) and ( + isdatacls(left) or isattrs(left) or isnamedtuple(left) + ): + # Note: unlike dataclasses/attrs, namedtuples compare only the + # field values, not the type or field names. But this branch + # intentionally only handles the same-type case, which was often + # used in older code bases before dataclasses/attrs were available. + explanation = _compare_eq_cls(left, right, highlighter, verbose) + elif issequence(left) and issequence(right): + explanation = _compare_eq_sequence(left, right, highlighter, verbose) + elif isset(left) and isset(right): + explanation = _compare_eq_set(left, right, highlighter, verbose) + elif isdict(left) and isdict(right): + explanation = _compare_eq_dict(left, right, highlighter, verbose) + + if isiterable(left) and isiterable(right): + expl = _compare_eq_iterable(left, right, highlighter, verbose) + explanation.extend(expl) + + return explanation + + +def _diff_text(left: str, right: str, verbose: int = 0) -> list[str]: + """Return the explanation for the diff between text. + + Unless --verbose is used this will skip leading and trailing + characters which are identical to keep the diff minimal. + """ + from difflib import ndiff + + explanation: list[str] = [] + + if verbose < 1: + i = 0 # just in case left or right has zero length + for i in range(min(len(left), len(right))): + if left[i] != right[i]: + break + if i > 42: + i -= 10 # Provide some context + explanation = [ + f"Skipping {i} identical leading characters in diff, use -v to show" + ] + left = left[i:] + right = right[i:] + if len(left) == len(right): + for i in range(len(left)): + if left[-i] != right[-i]: + break + if i > 42: + i -= 10 # Provide some context + explanation += [ + f"Skipping {i} identical trailing " + "characters in diff, use -v to show" + ] + left = left[:-i] + right = right[:-i] + keepends = True + if left.isspace() or right.isspace(): + left = repr(str(left)) + right = repr(str(right)) + explanation += ["Strings contain only whitespace, escaping them using repr()"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation += [ + line.strip("\n") + for line in ndiff(right.splitlines(keepends), left.splitlines(keepends)) + ] + return explanation + + +def _compare_eq_iterable( + left: Iterable[Any], + right: Iterable[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + if verbose <= 0 and not running_on_ci(): + return ["Use -v to get more diff"] + # dynamic import to speedup pytest + import difflib + + left_formatting = PrettyPrinter().pformat(left).splitlines() + right_formatting = PrettyPrinter().pformat(right).splitlines() + + explanation = ["", "Full diff:"] + # "right" is the expected base against which we compare "left", + # see https://github.com/pytest-dev/pytest/issues/3333 + explanation.extend( + highlighter( + "\n".join( + line.rstrip() + for line in difflib.ndiff(right_formatting, left_formatting) + ), + lexer="diff", + ).splitlines() + ) + return explanation + + +def _compare_eq_sequence( + left: Sequence[Any], + right: Sequence[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) + explanation: list[str] = [] + len_left = len(left) + len_right = len(right) + for i in range(min(len_left, len_right)): + if left[i] != right[i]: + if comparing_bytes: + # when comparing bytes, we want to see their ascii representation + # instead of their numeric values (#5260) + # using a slice gives us the ascii representation: + # >>> s = b'foo' + # >>> s[0] + # 102 + # >>> s[0:1] + # b'f' + left_value = left[i : i + 1] + right_value = right[i : i + 1] + else: + left_value = left[i] + right_value = right[i] + + explanation.append( + f"At index {i} diff:" + f" {highlighter(repr(left_value))} != {highlighter(repr(right_value))}" + ) + break + + if comparing_bytes: + # when comparing bytes, it doesn't help to show the "sides contain one or more + # items" longer explanation, so skip it + + return explanation + + len_diff = len_left - len_right + if len_diff: + if len_diff > 0: + dir_with_more = "Left" + extra = saferepr(left[len_right]) + else: + len_diff = 0 - len_diff + dir_with_more = "Right" + extra = saferepr(right[len_left]) + + if len_diff == 1: + explanation += [ + f"{dir_with_more} contains one more item: {highlighter(extra)}" + ] + else: + explanation += [ + "%s contains %d more items, first extra item: %s" + % (dir_with_more, len_diff, highlighter(extra)) + ] + return explanation + + +def _compare_eq_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = [] + explanation.extend(_set_one_sided_diff("left", left, right, highlighter)) + explanation.extend(_set_one_sided_diff("right", right, left, highlighter)) + return explanation + + +def _compare_gt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = _compare_gte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_lt_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation = _compare_lte_set(left, right, highlighter) + if not explanation: + return ["Both sets are equal"] + return explanation + + +def _compare_gte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + return _set_one_sided_diff("right", right, left, highlighter) + + +def _compare_lte_set( + left: AbstractSet[Any], + right: AbstractSet[Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + return _set_one_sided_diff("left", left, right, highlighter) + + +def _set_one_sided_diff( + posn: str, + set1: AbstractSet[Any], + set2: AbstractSet[Any], + highlighter: _HighlightFunc, +) -> list[str]: + explanation = [] + diff = set1 - set2 + if diff: + explanation.append(f"Extra items in the {posn} set:") + for item in diff: + explanation.append(highlighter(saferepr(item))) + return explanation + + +def _compare_eq_dict( + left: Mapping[Any, Any], + right: Mapping[Any, Any], + highlighter: _HighlightFunc, + verbose: int = 0, +) -> list[str]: + explanation: list[str] = [] + set_left = set(left) + set_right = set(right) + common = set_left.intersection(set_right) + same = {k: left[k] for k in common if left[k] == right[k]} + if same and verbose < 2: + explanation += [f"Omitting {len(same)} identical items, use -vv to show"] + elif same: + explanation += ["Common items:"] + explanation += highlighter(pprint.pformat(same)).splitlines() + diff = {k for k in common if left[k] != right[k]} + if diff: + explanation += ["Differing items:"] + for k in diff: + explanation += [ + highlighter(saferepr({k: left[k]})) + + " != " + + highlighter(saferepr({k: right[k]})) + ] + extra_left = set_left - set_right + len_extra_left = len(extra_left) + if len_extra_left: + explanation.append( + "Left contains %d more item%s:" + % (len_extra_left, "" if len_extra_left == 1 else "s") + ) + explanation.extend( + highlighter(pprint.pformat({k: left[k] for k in extra_left})).splitlines() + ) + extra_right = set_right - set_left + len_extra_right = len(extra_right) + if len_extra_right: + explanation.append( + "Right contains %d more item%s:" + % (len_extra_right, "" if len_extra_right == 1 else "s") + ) + explanation.extend( + highlighter(pprint.pformat({k: right[k] for k in extra_right})).splitlines() + ) + return explanation + + +def _compare_eq_cls( + left: Any, right: Any, highlighter: _HighlightFunc, verbose: int +) -> list[str]: + if not has_default_eq(left): + return [] + if isdatacls(left): + import dataclasses + + all_fields = dataclasses.fields(left) + fields_to_check = [info.name for info in all_fields if info.compare] + elif isattrs(left): + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if getattr(field, "eq")] + elif isnamedtuple(left): + fields_to_check = left._fields + else: + assert False + + indent = " " + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same or diff: + explanation += [""] + if same and verbose < 2: + explanation.append(f"Omitting {len(same)} identical items, use -vv to show") + elif same: + explanation += ["Matching attributes:"] + explanation += highlighter(pprint.pformat(same)).splitlines() + if diff: + explanation += ["Differing attributes:"] + explanation += highlighter(pprint.pformat(diff)).splitlines() + for field in diff: + field_left = getattr(left, field) + field_right = getattr(right, field) + explanation += [ + "", + f"Drill down into differing attribute {field}:", + f"{indent}{field}: {highlighter(repr(field_left))} != {highlighter(repr(field_right))}", + ] + explanation += [ + indent + line + for line in _compare_eq_any( + field_left, field_right, highlighter, verbose + ) + ] + return explanation + + +def _notin_text(term: str, text: str, verbose: int = 0) -> list[str]: + index = text.find(term) + head = text[:index] + tail = text[index + len(term) :] + correct_text = head + tail + diff = _diff_text(text, correct_text, verbose) + newdiff = [f"{saferepr(term, maxsize=42)} is contained here:"] + for line in diff: + if line.startswith("Skipping"): + continue + if line.startswith("- "): + continue + if line.startswith("+ "): + newdiff.append(" " + line[2:]) + else: + newdiff.append(line) + return newdiff + + +def running_on_ci() -> bool: + """Check if we're currently running on a CI system.""" + env_vars = ["CI", "BUILD_NUMBER"] + return any(var in os.environ for var in env_vars) diff --git a/venv/Lib/site-packages/_pytest/cacheprovider.py b/venv/Lib/site-packages/_pytest/cacheprovider.py new file mode 100644 index 0000000..1b236ef --- /dev/null +++ b/venv/Lib/site-packages/_pytest/cacheprovider.py @@ -0,0 +1,626 @@ +# mypy: allow-untyped-defs +"""Implementation of the cache provider.""" + +# This plugin was not named "cache" to avoid conflicts with the external +# pytest-cache version. +from __future__ import annotations + +import dataclasses +import errno +import json +import os +from pathlib import Path +import tempfile +from typing import final +from typing import Generator +from typing import Iterable + +from .pathlib import resolve_from_str +from .pathlib import rm_rf +from .reports import CollectReport +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.nodes import Directory +from _pytest.nodes import File +from _pytest.reports import TestReport + + +README_CONTENT = """\ +# pytest cache directory # + +This directory contains data from the pytest's cache plugin, +which provides the `--lf` and `--ff` options, as well as the `cache` fixture. + +**Do not** commit this to version control. + +See [the docs](https://docs.pytest.org/en/stable/how-to/cache.html) for more information. +""" + +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# https://bford.info/cachedir/spec.html +""" + + +@final +@dataclasses.dataclass +class Cache: + """Instance of the `cache` fixture.""" + + _cachedir: Path = dataclasses.field(repr=False) + _config: Config = dataclasses.field(repr=False) + + # Sub-directory under cache-dir for directories created by `mkdir()`. + _CACHE_PREFIX_DIRS = "d" + + # Sub-directory under cache-dir for values created by `set()`. + _CACHE_PREFIX_VALUES = "v" + + def __init__( + self, cachedir: Path, config: Config, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._cachedir = cachedir + self._config = config + + @classmethod + def for_config(cls, config: Config, *, _ispytest: bool = False) -> Cache: + """Create the Cache instance for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + cachedir = cls.cache_dir_from_config(config, _ispytest=True) + if config.getoption("cacheclear") and cachedir.is_dir(): + cls.clear_cache(cachedir, _ispytest=True) + return cls(cachedir, config, _ispytest=True) + + @classmethod + def clear_cache(cls, cachedir: Path, _ispytest: bool = False) -> None: + """Clear the sub-directories used to hold cached directories and values. + + :meta private: + """ + check_ispytest(_ispytest) + for prefix in (cls._CACHE_PREFIX_DIRS, cls._CACHE_PREFIX_VALUES): + d = cachedir / prefix + if d.is_dir(): + rm_rf(d) + + @staticmethod + def cache_dir_from_config(config: Config, *, _ispytest: bool = False) -> Path: + """Get the path to the cache directory for a Config. + + :meta private: + """ + check_ispytest(_ispytest) + return resolve_from_str(config.getini("cache_dir"), config.rootpath) + + def warn(self, fmt: str, *, _ispytest: bool = False, **args: object) -> None: + """Issue a cache warning. + + :meta private: + """ + check_ispytest(_ispytest) + import warnings + + from _pytest.warning_types import PytestCacheWarning + + warnings.warn( + PytestCacheWarning(fmt.format(**args) if args else fmt), + self._config.hook, + stacklevel=3, + ) + + def _mkdir(self, path: Path) -> None: + self._ensure_cache_dir_and_supporting_files() + path.mkdir(exist_ok=True, parents=True) + + def mkdir(self, name: str) -> Path: + """Return a directory path object with the given name. + + If the directory does not yet exist, it will be created. You can use + it to manage files to e.g. store/retrieve database dumps across test + sessions. + + .. versionadded:: 7.0 + + :param name: + Must be a string not containing a ``/`` separator. + Make sure the name contains your plugin or application + identifiers to prevent clashes with other cache users. + """ + path = Path(name) + if len(path.parts) > 1: + raise ValueError("name is not allowed to contain path separators") + res = self._cachedir.joinpath(self._CACHE_PREFIX_DIRS, path) + self._mkdir(res) + return res + + def _getvaluepath(self, key: str) -> Path: + return self._cachedir.joinpath(self._CACHE_PREFIX_VALUES, Path(key)) + + def get(self, key: str, default): + """Return the cached value for the given key. + + If no value was yet cached or the value cannot be read, the specified + default is returned. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param default: + The value to return in case of a cache-miss or invalid cache value. + """ + path = self._getvaluepath(key) + try: + with path.open("r", encoding="UTF-8") as f: + return json.load(f) + except (ValueError, OSError): + return default + + def set(self, key: str, value: object) -> None: + """Save value for the given key. + + :param key: + Must be a ``/`` separated value. Usually the first + name is the name of your plugin or your application. + :param value: + Must be of any combination of basic python types, + including nested types like lists of dictionaries. + """ + path = self._getvaluepath(key) + try: + self._mkdir(path.parent) + except OSError as exc: + self.warn( + f"could not create cache path {path}: {exc}", + _ispytest=True, + ) + return + data = json.dumps(value, ensure_ascii=False, indent=2) + try: + f = path.open("w", encoding="UTF-8") + except OSError as exc: + self.warn( + f"cache could not write path {path}: {exc}", + _ispytest=True, + ) + else: + with f: + f.write(data) + + def _ensure_cache_dir_and_supporting_files(self) -> None: + """Create the cache dir and its supporting files.""" + if self._cachedir.is_dir(): + return + + self._cachedir.parent.mkdir(parents=True, exist_ok=True) + with tempfile.TemporaryDirectory( + prefix="pytest-cache-files-", + dir=self._cachedir.parent, + ) as newpath: + path = Path(newpath) + + # Reset permissions to the default, see #12308. + # Note: there's no way to get the current umask atomically, eek. + umask = os.umask(0o022) + os.umask(umask) + path.chmod(0o777 - umask) + + with open(path.joinpath("README.md"), "x", encoding="UTF-8") as f: + f.write(README_CONTENT) + with open(path.joinpath(".gitignore"), "x", encoding="UTF-8") as f: + f.write("# Created by pytest automatically.\n*\n") + with open(path.joinpath("CACHEDIR.TAG"), "xb") as f: + f.write(CACHEDIR_TAG_CONTENT) + + try: + path.rename(self._cachedir) + except OSError as e: + # If 2 concurrent pytests both race to the rename, the loser + # gets "Directory not empty" from the rename. In this case, + # everything is handled so just continue (while letting the + # temporary directory be cleaned up). + # On Windows, the error is a FileExistsError which translates to EEXIST. + if e.errno not in (errno.ENOTEMPTY, errno.EEXIST): + raise + else: + # Create a directory in place of the one we just moved so that + # `TemporaryDirectory`'s cleanup doesn't complain. + # + # TODO: pass ignore_cleanup_errors=True when we no longer support python < 3.10. + # See https://github.com/python/cpython/issues/74168. Note that passing + # delete=False would do the wrong thing in case of errors and isn't supported + # until python 3.12. + path.mkdir() + + +class LFPluginCollWrapper: + def __init__(self, lfplugin: LFPlugin) -> None: + self.lfplugin = lfplugin + self._collected_at_least_one_failure = False + + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> Generator[None, CollectReport, CollectReport]: + res = yield + if isinstance(collector, (Session, Directory)): + # Sort any lf-paths to the beginning. + lf_paths = self.lfplugin._last_failed_paths + + # Use stable sort to prioritize last failed. + def sort_key(node: nodes.Item | nodes.Collector) -> bool: + return node.path in lf_paths + + res.result = sorted( + res.result, + key=sort_key, + reverse=True, + ) + + elif isinstance(collector, File): + if collector.path in self.lfplugin._last_failed_paths: + result = res.result + lastfailed = self.lfplugin.lastfailed + + # Only filter with known failures. + if not self._collected_at_least_one_failure: + if not any(x.nodeid in lastfailed for x in result): + return res + self.lfplugin.config.pluginmanager.register( + LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip" + ) + self._collected_at_least_one_failure = True + + session = collector.session + result[:] = [ + x + for x in result + if x.nodeid in lastfailed + # Include any passed arguments (not trivial to filter). + or session.isinitpath(x.path) + # Keep all sub-collectors. + or isinstance(x, nodes.Collector) + ] + + return res + + +class LFPluginCollSkipfiles: + def __init__(self, lfplugin: LFPlugin) -> None: + self.lfplugin = lfplugin + + @hookimpl + def pytest_make_collect_report( + self, collector: nodes.Collector + ) -> CollectReport | None: + if isinstance(collector, File): + if collector.path not in self.lfplugin._last_failed_paths: + self.lfplugin._skipped_files += 1 + + return CollectReport( + collector.nodeid, "passed", longrepr=None, result=[] + ) + return None + + +class LFPlugin: + """Plugin which implements the --lf (run last-failing) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + active_keys = "lf", "failedfirst" + self.active = any(config.getoption(key) for key in active_keys) + assert config.cache + self.lastfailed: dict[str, bool] = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count: int | None = None + self._report_status: str | None = None + self._skipped_files = 0 # count skipped files during collection due to --lf + + if config.getoption("lf"): + self._last_failed_paths = self.get_last_failed_paths() + config.pluginmanager.register( + LFPluginCollWrapper(self), "lfplugin-collwrapper" + ) + + def get_last_failed_paths(self) -> set[Path]: + """Return a set with all Paths of the previously failed nodeids and + their parents.""" + rootpath = self.config.rootpath + result = set() + for nodeid in self.lastfailed: + path = rootpath / nodeid.split("::")[0] + result.add(path) + result.update(path.parents) + return {x for x in result if x.exists()} + + def pytest_report_collectionfinish(self) -> str | None: + if self.active and self.config.get_verbosity() >= 0: + return f"run-last-failure: {self._report_status}" + return None + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if (report.when == "call" and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: + self.lastfailed[report.nodeid] = True + + def pytest_collectreport(self, report: CollectReport) -> None: + passed = report.outcome in ("passed", "skipped") + if passed: + if report.nodeid in self.lastfailed: + self.lastfailed.pop(report.nodeid) + self.lastfailed.update((item.nodeid, True) for item in report.result) + else: + self.lastfailed[report.nodeid] = True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection_modifyitems( + self, config: Config, items: list[nodes.Item] + ) -> Generator[None]: + res = yield + + if not self.active: + return res + + if self.lastfailed: + previously_failed = [] + previously_passed = [] + for item in items: + if item.nodeid in self.lastfailed: + previously_failed.append(item) + else: + previously_passed.append(item) + self._previously_failed_count = len(previously_failed) + + if not previously_failed: + # Running a subset of all tests with recorded failures + # only outside of it. + self._report_status = "%d known failures not in selected tests" % ( + len(self.lastfailed), + ) + else: + if self.config.getoption("lf"): + items[:] = previously_failed + config.hook.pytest_deselected(items=previously_passed) + else: # --failedfirst + items[:] = previously_failed + previously_passed + + noun = "failure" if self._previously_failed_count == 1 else "failures" + suffix = " first" if self.config.getoption("failedfirst") else "" + self._report_status = ( + f"rerun previous {self._previously_failed_count} {noun}{suffix}" + ) + + if self._skipped_files > 0: + files_noun = "file" if self._skipped_files == 1 else "files" + self._report_status += f" (skipped {self._skipped_files} {files_noun})" + else: + self._report_status = "no previously failed tests, " + if self.config.getoption("last_failed_no_failures") == "none": + self._report_status += "deselecting all items." + config.hook.pytest_deselected(items=items[:]) + items[:] = [] + else: + self._report_status += "not deselecting items." + + return res + + def pytest_sessionfinish(self, session: Session) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + assert config.cache is not None + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: + config.cache.set("cache/lastfailed", self.lastfailed) + + +class NFPlugin: + """Plugin which implements the --nf (run new-first) option.""" + + def __init__(self, config: Config) -> None: + self.config = config + self.active = config.option.newfirst + assert config.cache is not None + self.cached_nodeids = set(config.cache.get("cache/nodeids", [])) + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> Generator[None]: + res = yield + + if self.active: + new_items: dict[str, nodes.Item] = {} + other_items: dict[str, nodes.Item] = {} + for item in items: + if item.nodeid not in self.cached_nodeids: + new_items[item.nodeid] = item + else: + other_items[item.nodeid] = item + + items[:] = self._get_increasing_order( + new_items.values() + ) + self._get_increasing_order(other_items.values()) + self.cached_nodeids.update(new_items) + else: + self.cached_nodeids.update(item.nodeid for item in items) + + return res + + def _get_increasing_order(self, items: Iterable[nodes.Item]) -> list[nodes.Item]: + return sorted(items, key=lambda item: item.path.stat().st_mtime, reverse=True) + + def pytest_sessionfinish(self) -> None: + config = self.config + if config.getoption("cacheshow") or hasattr(config, "workerinput"): + return + + if config.getoption("collectonly"): + return + + assert config.cache is not None + config.cache.set("cache/nodeids", sorted(self.cached_nodeids)) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--lf", + "--last-failed", + action="store_true", + dest="lf", + help="Rerun only the tests that failed " + "at the last run (or all if none failed)", + ) + group.addoption( + "--ff", + "--failed-first", + action="store_true", + dest="failedfirst", + help="Run all tests, but run the last failures first. " + "This may re-order tests and thus lead to " + "repeated fixture setup/teardown.", + ) + group.addoption( + "--nf", + "--new-first", + action="store_true", + dest="newfirst", + help="Run tests from new files first, then the rest of the tests " + "sorted by file mtime", + ) + group.addoption( + "--cache-show", + action="append", + nargs="?", + dest="cacheshow", + help=( + "Show cache contents, don't perform collection or tests. " + "Optional argument: glob (default: '*')." + ), + ) + group.addoption( + "--cache-clear", + action="store_true", + dest="cacheclear", + help="Remove all cache contents at start of test run", + ) + cache_dir_default = ".pytest_cache" + if "TOX_ENV_DIR" in os.environ: + cache_dir_default = os.path.join(os.environ["TOX_ENV_DIR"], cache_dir_default) + parser.addini("cache_dir", default=cache_dir_default, help="Cache directory path") + group.addoption( + "--lfnf", + "--last-failed-no-failures", + action="store", + dest="last_failed_no_failures", + choices=("all", "none"), + default="all", + help="With ``--lf``, determines whether to execute tests when there " + "are no previously (known) failures or when no " + "cached ``lastfailed`` data was found. " + "``all`` (the default) runs the full test suite again. " + "``none`` just emits a message about no known failures and exits successfully.", + ) + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.cacheshow and not config.option.help: + from _pytest.main import wrap_session + + return wrap_session(config, cacheshow) + return None + + +@hookimpl(tryfirst=True) +def pytest_configure(config: Config) -> None: + config.cache = Cache.for_config(config, _ispytest=True) + config.pluginmanager.register(LFPlugin(config), "lfplugin") + config.pluginmanager.register(NFPlugin(config), "nfplugin") + + +@fixture +def cache(request: FixtureRequest) -> Cache: + """Return a cache object that can persist state between testing sessions. + + cache.get(key, default) + cache.set(key, value) + + Keys must be ``/`` separated strings, where the first part is usually the + name of your plugin or application to avoid clashes with other cache users. + + Values can be any object handled by the json stdlib module. + """ + assert request.config.cache is not None + return request.config.cache + + +def pytest_report_header(config: Config) -> str | None: + """Display cachedir with --cache-show and if non-default.""" + if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache": + assert config.cache is not None + cachedir = config.cache._cachedir + # TODO: evaluate generating upward relative paths + # starting with .., ../.. if sensible + + try: + displaypath = cachedir.relative_to(config.rootpath) + except ValueError: + displaypath = cachedir + return f"cachedir: {displaypath}" + return None + + +def cacheshow(config: Config, session: Session) -> int: + from pprint import pformat + + assert config.cache is not None + + tw = TerminalWriter() + tw.line("cachedir: " + str(config.cache._cachedir)) + if not config.cache._cachedir.is_dir(): + tw.line("cache is empty") + return 0 + + glob = config.option.cacheshow[0] + if glob is None: + glob = "*" + + dummy = object() + basedir = config.cache._cachedir + vdir = basedir / Cache._CACHE_PREFIX_VALUES + tw.sep("-", f"cache values for {glob!r}") + for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()): + key = str(valpath.relative_to(vdir)) + val = config.cache.get(key, dummy) + if val is dummy: + tw.line(f"{key} contains unreadable content, will be ignored") + else: + tw.line(f"{key} contains:") + for line in pformat(val).splitlines(): + tw.line(" " + line) + + ddir = basedir / Cache._CACHE_PREFIX_DIRS + if ddir.is_dir(): + contents = sorted(ddir.rglob(glob)) + tw.sep("-", f"cache directories for {glob!r}") + for p in contents: + # if p.is_dir(): + # print("%s/" % p.relative_to(basedir)) + if p.is_file(): + key = str(p.relative_to(basedir)) + tw.line(f"{key} is a file of length {p.stat().st_size:d}") + return 0 diff --git a/venv/Lib/site-packages/_pytest/capture.py b/venv/Lib/site-packages/_pytest/capture.py new file mode 100644 index 0000000..506c0b3 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/capture.py @@ -0,0 +1,1087 @@ +# mypy: allow-untyped-defs +"""Per-test stdout/stderr capturing mechanism.""" + +from __future__ import annotations + +import abc +import collections +import contextlib +import io +from io import UnsupportedOperation +import os +import sys +from tempfile import TemporaryFile +from types import TracebackType +from typing import Any +from typing import AnyStr +from typing import BinaryIO +from typing import Final +from typing import final +from typing import Generator +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import NamedTuple +from typing import TextIO +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from typing_extensions import Self + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import SubRequest +from _pytest.nodes import Collector +from _pytest.nodes import File +from _pytest.nodes import Item +from _pytest.reports import CollectReport + + +_CaptureMethod = Literal["fd", "sys", "no", "tee-sys"] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--capture", + action="store", + default="fd", + metavar="method", + choices=["fd", "sys", "no", "tee-sys"], + help="Per-test capturing method: one of fd|sys|no|tee-sys", + ) + group._addoption( + "-s", + action="store_const", + const="no", + dest="capture", + help="Shortcut for --capture=no", + ) + + +def _colorama_workaround() -> None: + """Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + if sys.platform.startswith("win32"): + try: + import colorama # noqa: F401 + except ImportError: + pass + + +def _windowsconsoleio_workaround(stream: TextIO) -> None: + """Workaround for Windows Unicode console handling. + + Python 3.6 implemented Unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: + In practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103. + """ + if not sys.platform.startswith("win32") or hasattr(sys, "pypy_version_info"): + return + + # Bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666). + if not hasattr(stream, "buffer"): # type: ignore[unreachable,unused-ignore] + return + + raw_stdout = stream.buffer.raw if hasattr(stream.buffer, "raw") else stream.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): # type: ignore[attr-defined,unused-ignore] + return + + def _reopen_stdio(f, mode): + if not hasattr(stream.buffer, "raw") and mode[0] == "w": + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering, + ) + + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") + + +@hookimpl(wrapper=True) +def pytest_load_initial_conftests(early_config: Config) -> Generator[None]: + ns = early_config.known_args_namespace + if ns.capture == "fd": + _windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + pluginmanager = early_config.pluginmanager + capman = CaptureManager(ns.capture) + pluginmanager.register(capman, "capturemanager") + + # Make sure that capturemanager is properly reset at final shutdown. + early_config.add_cleanup(capman.stop_global_capturing) + + # Finally trigger conftest loading but while capturing (issue #93). + capman.start_global_capturing() + try: + try: + yield + finally: + capman.suspend_global_capture() + except BaseException: + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + raise + + +# IO Helpers. + + +class EncodedFile(io.TextIOWrapper): + __slots__ = () + + @property + def name(self) -> str: + # Ensure that file.name is a string. Workaround for a Python bug + # fixed in >=3.7.4: https://bugs.python.org/issue36015 + return repr(self.buffer) + + @property + def mode(self) -> str: + # TextIOWrapper doesn't expose a mode, but at least some of our + # tests check it. + return self.buffer.mode.replace("b", "") + + +class CaptureIO(io.TextIOWrapper): + def __init__(self) -> None: + super().__init__(io.BytesIO(), encoding="UTF-8", newline="", write_through=True) + + def getvalue(self) -> str: + assert isinstance(self.buffer, io.BytesIO) + return self.buffer.getvalue().decode("UTF-8") + + +class TeeCaptureIO(CaptureIO): + def __init__(self, other: TextIO) -> None: + self._other = other + super().__init__() + + def write(self, s: str) -> int: + super().write(s) + return self._other.write(s) + + +class DontReadFromInput(TextIO): + @property + def encoding(self) -> str: + assert sys.__stdin__ is not None + return sys.__stdin__.encoding + + def read(self, size: int = -1) -> str: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + readline = read + + def __next__(self) -> str: + return self.readline() + + def readlines(self, hint: int | None = -1) -> list[str]: + raise OSError( + "pytest: reading from stdin while output is captured! Consider using `-s`." + ) + + def __iter__(self) -> Iterator[str]: + return self + + def fileno(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no fileno()") + + def flush(self) -> None: + raise UnsupportedOperation("redirected stdin is pseudofile, has no flush()") + + def isatty(self) -> bool: + return False + + def close(self) -> None: + pass + + def readable(self) -> bool: + return False + + def seek(self, offset: int, whence: int = 0) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no seek(int)") + + def seekable(self) -> bool: + return False + + def tell(self) -> int: + raise UnsupportedOperation("redirected stdin is pseudofile, has no tell()") + + def truncate(self, size: int | None = None) -> int: + raise UnsupportedOperation("cannot truncate stdin") + + def write(self, data: str) -> int: + raise UnsupportedOperation("cannot write to stdin") + + def writelines(self, lines: Iterable[str]) -> None: + raise UnsupportedOperation("Cannot write to stdin") + + def writable(self) -> bool: + return False + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + type: type[BaseException] | None, + value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + pass + + @property + def buffer(self) -> BinaryIO: + # The str/bytes doesn't actually matter in this type, so OK to fake. + return self # type: ignore[return-value] + + +# Capture classes. + + +class CaptureBase(abc.ABC, Generic[AnyStr]): + EMPTY_BUFFER: AnyStr + + @abc.abstractmethod + def __init__(self, fd: int) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def start(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def done(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def suspend(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def resume(self) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def writeorg(self, data: AnyStr) -> None: + raise NotImplementedError() + + @abc.abstractmethod + def snap(self) -> AnyStr: + raise NotImplementedError() + + +patchsysdict = {0: "stdin", 1: "stdout", 2: "stderr"} + + +class NoCapture(CaptureBase[str]): + EMPTY_BUFFER = "" + + def __init__(self, fd: int) -> None: + pass + + def start(self) -> None: + pass + + def done(self) -> None: + pass + + def suspend(self) -> None: + pass + + def resume(self) -> None: + pass + + def snap(self) -> str: + return "" + + def writeorg(self, data: str) -> None: + pass + + +class SysCaptureBase(CaptureBase[AnyStr]): + def __init__( + self, fd: int, tmpfile: TextIO | None = None, *, tee: bool = False + ) -> None: + name = patchsysdict[fd] + self._old: TextIO = getattr(sys, name) + self.name = name + if tmpfile is None: + if name == "stdin": + tmpfile = DontReadFromInput() + else: + tmpfile = CaptureIO() if not tee else TeeCaptureIO(self._old) + self.tmpfile = tmpfile + self._state = "initialized" + + def repr(self, class_name: str) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + class_name, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def __repr__(self) -> str: + return "<{} {} _old={} _state={!r} tmpfile={!r}>".format( + self.__class__.__name__, + self.name, + hasattr(self, "_old") and repr(self._old) or "", + self._state, + self.tmpfile, + ) + + def _assert_state(self, op: str, states: tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + self._assert_state("start", ("initialized",)) + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + def done(self) -> None: + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + setattr(sys, self.name, self._old) + del self._old + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + setattr(sys, self.name, self._old) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + setattr(sys, self.name, self.tmpfile) + self._state = "started" + + +class SysCaptureBinary(SysCaptureBase[bytes]): + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: bytes) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.flush() + self._old.buffer.write(data) + self._old.buffer.flush() + + +class SysCapture(SysCaptureBase[str]): + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + assert isinstance(self.tmpfile, CaptureIO) + res = self.tmpfile.getvalue() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + self._assert_state("writeorg", ("started", "suspended")) + self._old.write(data) + self._old.flush() + + +class FDCaptureBase(CaptureBase[AnyStr]): + def __init__(self, targetfd: int) -> None: + self.targetfd = targetfd + + try: + os.fstat(targetfd) + except OSError: + # FD capturing is conceptually simple -- create a temporary file, + # redirect the FD to it, redirect back when done. But when the + # target FD is invalid it throws a wrench into this lovely scheme. + # + # Tests themselves shouldn't care if the FD is valid, FD capturing + # should work regardless of external circumstances. So falling back + # to just sys capturing is not a good option. + # + # Further complications are the need to support suspend() and the + # possibility of FD reuse (e.g. the tmpfile getting the very same + # target FD). The following approach is robust, I believe. + self.targetfd_invalid: int | None = os.open(os.devnull, os.O_RDWR) + os.dup2(self.targetfd_invalid, targetfd) + else: + self.targetfd_invalid = None + self.targetfd_save = os.dup(targetfd) + + if targetfd == 0: + self.tmpfile = open(os.devnull, encoding="utf-8") + self.syscapture: CaptureBase[str] = SysCapture(targetfd) + else: + self.tmpfile = EncodedFile( + TemporaryFile(buffering=0), + encoding="utf-8", + errors="replace", + newline="", + write_through=True, + ) + if targetfd in patchsysdict: + self.syscapture = SysCapture(targetfd, self.tmpfile) + else: + self.syscapture = NoCapture(targetfd) + + self._state = "initialized" + + def __repr__(self) -> str: + return ( + f"<{self.__class__.__name__} {self.targetfd} oldfd={self.targetfd_save} " + f"_state={self._state!r} tmpfile={self.tmpfile!r}>" + ) + + def _assert_state(self, op: str, states: tuple[str, ...]) -> None: + assert ( + self._state in states + ), "cannot {} in state {!r}: expected one of {}".format( + op, self._state, ", ".join(states) + ) + + def start(self) -> None: + """Start capturing on targetfd using memorized tmpfile.""" + self._assert_state("start", ("initialized",)) + os.dup2(self.tmpfile.fileno(), self.targetfd) + self.syscapture.start() + self._state = "started" + + def done(self) -> None: + """Stop capturing, restore streams, return original capture file, + seeked to position zero.""" + self._assert_state("done", ("initialized", "started", "suspended", "done")) + if self._state == "done": + return + os.dup2(self.targetfd_save, self.targetfd) + os.close(self.targetfd_save) + if self.targetfd_invalid is not None: + if self.targetfd_invalid != self.targetfd: + os.close(self.targetfd) + os.close(self.targetfd_invalid) + self.syscapture.done() + self.tmpfile.close() + self._state = "done" + + def suspend(self) -> None: + self._assert_state("suspend", ("started", "suspended")) + if self._state == "suspended": + return + self.syscapture.suspend() + os.dup2(self.targetfd_save, self.targetfd) + self._state = "suspended" + + def resume(self) -> None: + self._assert_state("resume", ("started", "suspended")) + if self._state == "started": + return + self.syscapture.resume() + os.dup2(self.tmpfile.fileno(), self.targetfd) + self._state = "started" + + +class FDCaptureBinary(FDCaptureBase[bytes]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces `bytes`. + """ + + EMPTY_BUFFER = b"" + + def snap(self) -> bytes: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.buffer.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: bytes) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + os.write(self.targetfd_save, data) + + +class FDCapture(FDCaptureBase[str]): + """Capture IO to/from a given OS-level file descriptor. + + snap() produces text. + """ + + EMPTY_BUFFER = "" + + def snap(self) -> str: + self._assert_state("snap", ("started", "suspended")) + self.tmpfile.seek(0) + res = self.tmpfile.read() + self.tmpfile.seek(0) + self.tmpfile.truncate() + return res + + def writeorg(self, data: str) -> None: + """Write to original file descriptor.""" + self._assert_state("writeorg", ("started", "suspended")) + # XXX use encoding of original stream + os.write(self.targetfd_save, data.encode("utf-8")) + + +# MultiCapture + + +# Generic NamedTuple only supported since Python 3.11. +if sys.version_info >= (3, 11) or TYPE_CHECKING: + + @final + class CaptureResult(NamedTuple, Generic[AnyStr]): + """The result of :method:`caplog.readouterr() `.""" + + out: AnyStr + err: AnyStr + +else: + + class CaptureResult( + collections.namedtuple("CaptureResult", ["out", "err"]), # noqa: PYI024 + Generic[AnyStr], + ): + """The result of :method:`caplog.readouterr() `.""" + + __slots__ = () + + +class MultiCapture(Generic[AnyStr]): + _state = None + _in_suspended = False + + def __init__( + self, + in_: CaptureBase[AnyStr] | None, + out: CaptureBase[AnyStr] | None, + err: CaptureBase[AnyStr] | None, + ) -> None: + self.in_: CaptureBase[AnyStr] | None = in_ + self.out: CaptureBase[AnyStr] | None = out + self.err: CaptureBase[AnyStr] | None = err + + def __repr__(self) -> str: + return ( + f"" + ) + + def start_capturing(self) -> None: + self._state = "started" + if self.in_: + self.in_.start() + if self.out: + self.out.start() + if self.err: + self.err.start() + + def pop_outerr_to_orig(self) -> tuple[AnyStr, AnyStr]: + """Pop current snapshot out/err capture and flush to orig streams.""" + out, err = self.readouterr() + if out: + assert self.out is not None + self.out.writeorg(out) + if err: + assert self.err is not None + self.err.writeorg(err) + return out, err + + def suspend_capturing(self, in_: bool = False) -> None: + self._state = "suspended" + if self.out: + self.out.suspend() + if self.err: + self.err.suspend() + if in_ and self.in_: + self.in_.suspend() + self._in_suspended = True + + def resume_capturing(self) -> None: + self._state = "started" + if self.out: + self.out.resume() + if self.err: + self.err.resume() + if self._in_suspended: + assert self.in_ is not None + self.in_.resume() + self._in_suspended = False + + def stop_capturing(self) -> None: + """Stop capturing and reset capturing streams.""" + if self._state == "stopped": + raise ValueError("was already stopped") + self._state = "stopped" + if self.out: + self.out.done() + if self.err: + self.err.done() + if self.in_: + self.in_.done() + + def is_started(self) -> bool: + """Whether actively capturing -- not suspended or stopped.""" + return self._state == "started" + + def readouterr(self) -> CaptureResult[AnyStr]: + out = self.out.snap() if self.out else "" + err = self.err.snap() if self.err else "" + # TODO: This type error is real, need to fix. + return CaptureResult(out, err) # type: ignore[arg-type] + + +def _get_multicapture(method: _CaptureMethod) -> MultiCapture[str]: + if method == "fd": + return MultiCapture(in_=FDCapture(0), out=FDCapture(1), err=FDCapture(2)) + elif method == "sys": + return MultiCapture(in_=SysCapture(0), out=SysCapture(1), err=SysCapture(2)) + elif method == "no": + return MultiCapture(in_=None, out=None, err=None) + elif method == "tee-sys": + return MultiCapture( + in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True) + ) + raise ValueError(f"unknown capturing method: {method!r}") + + +# CaptureManager and CaptureFixture + + +class CaptureManager: + """The capture plugin. + + Manages that the appropriate capture method is enabled/disabled during + collection and each test phase (setup, call, teardown). After each of + those points, the captured output is obtained and attached to the + collection/runtest report. + + There are two levels of capture: + + * global: enabled by default and can be suppressed by the ``-s`` + option. This is always enabled/disabled during collection and each test + phase. + + * fixture: when a test function or one of its fixture depend on the + ``capsys`` or ``capfd`` fixtures. In this case special handling is + needed to ensure the fixtures take precedence over the global capture. + """ + + def __init__(self, method: _CaptureMethod) -> None: + self._method: Final = method + self._global_capturing: MultiCapture[str] | None = None + self._capture_fixture: CaptureFixture[Any] | None = None + + def __repr__(self) -> str: + return ( + f"" + ) + + def is_capturing(self) -> str | bool: + if self.is_globally_capturing(): + return "global" + if self._capture_fixture: + return f"fixture {self._capture_fixture.request.fixturename}" + return False + + # Global capturing control + + def is_globally_capturing(self) -> bool: + return self._method != "no" + + def start_global_capturing(self) -> None: + assert self._global_capturing is None + self._global_capturing = _get_multicapture(self._method) + self._global_capturing.start_capturing() + + def stop_global_capturing(self) -> None: + if self._global_capturing is not None: + self._global_capturing.pop_outerr_to_orig() + self._global_capturing.stop_capturing() + self._global_capturing = None + + def resume_global_capture(self) -> None: + # During teardown of the python process, and on rare occasions, capture + # attributes can be `None` while trying to resume global capture. + if self._global_capturing is not None: + self._global_capturing.resume_capturing() + + def suspend_global_capture(self, in_: bool = False) -> None: + if self._global_capturing is not None: + self._global_capturing.suspend_capturing(in_=in_) + + def suspend(self, in_: bool = False) -> None: + # Need to undo local capsys-et-al if it exists before disabling global capture. + self.suspend_fixture() + self.suspend_global_capture(in_) + + def resume(self) -> None: + self.resume_global_capture() + self.resume_fixture() + + def read_global_capture(self) -> CaptureResult[str]: + assert self._global_capturing is not None + return self._global_capturing.readouterr() + + # Fixture Control + + def set_fixture(self, capture_fixture: CaptureFixture[Any]) -> None: + if self._capture_fixture: + current_fixture = self._capture_fixture.request.fixturename + requested_fixture = capture_fixture.request.fixturename + capture_fixture.request.raiseerror( + f"cannot use {requested_fixture} and {current_fixture} at the same time" + ) + self._capture_fixture = capture_fixture + + def unset_fixture(self) -> None: + self._capture_fixture = None + + def activate_fixture(self) -> None: + """If the current item is using ``capsys`` or ``capfd``, activate + them so they take precedence over the global capture.""" + if self._capture_fixture: + self._capture_fixture._start() + + def deactivate_fixture(self) -> None: + """Deactivate the ``capsys`` or ``capfd`` fixture of this item, if any.""" + if self._capture_fixture: + self._capture_fixture.close() + + def suspend_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._suspend() + + def resume_fixture(self) -> None: + if self._capture_fixture: + self._capture_fixture._resume() + + # Helper context managers + + @contextlib.contextmanager + def global_and_fixture_disabled(self) -> Generator[None]: + """Context manager to temporarily disable global and current fixture capturing.""" + do_fixture = self._capture_fixture and self._capture_fixture._is_started() + if do_fixture: + self.suspend_fixture() + do_global = self._global_capturing and self._global_capturing.is_started() + if do_global: + self.suspend_global_capture() + try: + yield + finally: + if do_global: + self.resume_global_capture() + if do_fixture: + self.resume_fixture() + + @contextlib.contextmanager + def item_capture(self, when: str, item: Item) -> Generator[None]: + self.resume_global_capture() + self.activate_fixture() + try: + yield + finally: + self.deactivate_fixture() + self.suspend_global_capture(in_=False) + + out, err = self.read_global_capture() + item.add_report_section(when, "stdout", out) + item.add_report_section(when, "stderr", err) + + # Hooks + + @hookimpl(wrapper=True) + def pytest_make_collect_report( + self, collector: Collector + ) -> Generator[None, CollectReport, CollectReport]: + if isinstance(collector, File): + self.resume_global_capture() + try: + rep = yield + finally: + self.suspend_global_capture() + out, err = self.read_global_capture() + if out: + rep.sections.append(("Captured stdout", out)) + if err: + rep.sections.append(("Captured stderr", err)) + else: + rep = yield + return rep + + @hookimpl(wrapper=True) + def pytest_runtest_setup(self, item: Item) -> Generator[None]: + with self.item_capture("setup", item): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtest_call(self, item: Item) -> Generator[None]: + with self.item_capture("call", item): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtest_teardown(self, item: Item) -> Generator[None]: + with self.item_capture("teardown", item): + return (yield) + + @hookimpl(tryfirst=True) + def pytest_keyboard_interrupt(self) -> None: + self.stop_global_capturing() + + @hookimpl(tryfirst=True) + def pytest_internalerror(self) -> None: + self.stop_global_capturing() + + +class CaptureFixture(Generic[AnyStr]): + """Object returned by the :fixture:`capsys`, :fixture:`capsysbinary`, + :fixture:`capfd` and :fixture:`capfdbinary` fixtures.""" + + def __init__( + self, + captureclass: type[CaptureBase[AnyStr]], + request: SubRequest, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.captureclass: type[CaptureBase[AnyStr]] = captureclass + self.request = request + self._capture: MultiCapture[AnyStr] | None = None + self._captured_out: AnyStr = self.captureclass.EMPTY_BUFFER + self._captured_err: AnyStr = self.captureclass.EMPTY_BUFFER + + def _start(self) -> None: + if self._capture is None: + self._capture = MultiCapture( + in_=None, + out=self.captureclass(1), + err=self.captureclass(2), + ) + self._capture.start_capturing() + + def close(self) -> None: + if self._capture is not None: + out, err = self._capture.pop_outerr_to_orig() + self._captured_out += out + self._captured_err += err + self._capture.stop_capturing() + self._capture = None + + def readouterr(self) -> CaptureResult[AnyStr]: + """Read and return the captured output so far, resetting the internal + buffer. + + :returns: + The captured content as a namedtuple with ``out`` and ``err`` + string attributes. + """ + captured_out, captured_err = self._captured_out, self._captured_err + if self._capture is not None: + out, err = self._capture.readouterr() + captured_out += out + captured_err += err + self._captured_out = self.captureclass.EMPTY_BUFFER + self._captured_err = self.captureclass.EMPTY_BUFFER + return CaptureResult(captured_out, captured_err) + + def _suspend(self) -> None: + """Suspend this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.suspend_capturing() + + def _resume(self) -> None: + """Resume this fixture's own capturing temporarily.""" + if self._capture is not None: + self._capture.resume_capturing() + + def _is_started(self) -> bool: + """Whether actively capturing -- not disabled or closed.""" + if self._capture is not None: + return self._capture.is_started() + return False + + @contextlib.contextmanager + def disabled(self) -> Generator[None]: + """Temporarily disable capturing while inside the ``with`` block.""" + capmanager: CaptureManager = self.request.config.pluginmanager.getplugin( + "capturemanager" + ) + with capmanager.global_and_fixture_disabled(): + yield + + +# The fixtures. + + +@fixture +def capsys(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsys.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_output(capsys): + print("hello") + captured = capsys.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capsysbinary(request: SubRequest) -> Generator[CaptureFixture[bytes]]: + r"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``. + + The captured output is made available via ``capsysbinary.readouterr()`` + method calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``bytes`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_output(capsysbinary): + print("hello") + captured = capsysbinary.readouterr() + assert captured.out == b"hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(SysCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfd(request: SubRequest) -> Generator[CaptureFixture[str]]: + r"""Enable text capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``text`` objects. + + Returns an instance of :class:`CaptureFixture[str] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfd): + os.system('echo "hello"') + captured = capfd.readouterr() + assert captured.out == "hello\n" + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCapture, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() + + +@fixture +def capfdbinary(request: SubRequest) -> Generator[CaptureFixture[bytes]]: + r"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``. + + The captured output is made available via ``capfd.readouterr()`` method + calls, which return a ``(out, err)`` namedtuple. + ``out`` and ``err`` will be ``byte`` objects. + + Returns an instance of :class:`CaptureFixture[bytes] `. + + Example: + + .. code-block:: python + + def test_system_echo(capfdbinary): + os.system('echo "hello"') + captured = capfdbinary.readouterr() + assert captured.out == b"hello\n" + + """ + capman: CaptureManager = request.config.pluginmanager.getplugin("capturemanager") + capture_fixture = CaptureFixture(FDCaptureBinary, request, _ispytest=True) + capman.set_fixture(capture_fixture) + capture_fixture._start() + yield capture_fixture + capture_fixture.close() + capman.unset_fixture() diff --git a/venv/Lib/site-packages/_pytest/compat.py b/venv/Lib/site-packages/_pytest/compat.py new file mode 100644 index 0000000..614848e --- /dev/null +++ b/venv/Lib/site-packages/_pytest/compat.py @@ -0,0 +1,351 @@ +# mypy: allow-untyped-defs +"""Python version compatibility code.""" + +from __future__ import annotations + +import dataclasses +import enum +import functools +import inspect +from inspect import Parameter +from inspect import signature +import os +from pathlib import Path +import sys +from typing import Any +from typing import Callable +from typing import Final +from typing import NoReturn + +import py + + +#: constant to prepare valuing pylib path replacements/lazy proxies later on +# intended for removal in pytest 8.0 or 9.0 + +# fmt: off +# intentional space to create a fake difference for the verification +LEGACY_PATH = py.path. local +# fmt: on + + +def legacy_path(path: str | os.PathLike[str]) -> LEGACY_PATH: + """Internal wrapper to prepare lazy proxies for legacy_path instances""" + return LEGACY_PATH(path) + + +# fmt: off +# Singleton type for NOTSET, as described in: +# https://www.python.org/dev/peps/pep-0484/#support-for-singleton-types-in-unions +class NotSetType(enum.Enum): + token = 0 +NOTSET: Final = NotSetType.token +# fmt: on + + +def is_generator(func: object) -> bool: + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func: object) -> bool: + """Return True if func is a coroutine function (a function defined with async + def syntax, and doesn't contain yield), or a function decorated with + @asyncio.coroutine. + + Note: copied and modified from Python 3.5's builtin coroutines.py to avoid + importing asyncio directly, which in turns also initializes the "logging" + module as a side-effect (see issue #8). + """ + return inspect.iscoroutinefunction(func) or getattr(func, "_is_coroutine", False) + + +def is_async_function(func: object) -> bool: + """Return True if the given function seems to be an async function or + an async generator.""" + return iscoroutinefunction(func) or inspect.isasyncgenfunction(func) + + +def getlocation(function, curdir: str | os.PathLike[str] | None = None) -> str: + function = get_real_func(function) + fn = Path(inspect.getfile(function)) + lineno = function.__code__.co_firstlineno + if curdir is not None: + try: + relfn = fn.relative_to(curdir) + except ValueError: + pass + else: + return "%s:%d" % (relfn, lineno + 1) + return "%s:%d" % (fn, lineno + 1) + + +def num_mock_patch_args(function) -> int: + """Return number of arguments used up by mock arguments (if any).""" + patchings = getattr(function, "patchings", None) + if not patchings: + return 0 + + mock_sentinel = getattr(sys.modules.get("mock"), "DEFAULT", object()) + ut_mock_sentinel = getattr(sys.modules.get("unittest.mock"), "DEFAULT", object()) + + return len( + [ + p + for p in patchings + if not p.attribute_name + and (p.new is mock_sentinel or p.new is ut_mock_sentinel) + ] + ) + + +def getfuncargnames( + function: Callable[..., object], + *, + name: str = "", + cls: type | None = None, +) -> tuple[str, ...]: + """Return the names of a function's mandatory arguments. + + Should return the names of all function arguments that: + * Aren't bound to an instance or type as in instance or class methods. + * Don't have default values. + * Aren't bound with functools.partial. + * Aren't replaced with mocks. + + The cls arguments indicate that the function should be treated as a bound + method even though it's not unless the function is a static method. + + The name parameter should be the original name in which the function was collected. + """ + # TODO(RonnyPfannschmidt): This function should be refactored when we + # revisit fixtures. The fixture mechanism should ask the node for + # the fixture names, and not try to obtain directly from the + # function object well after collection has occurred. + + # The parameters attribute of a Signature object contains an + # ordered mapping of parameter names to Parameter instances. This + # creates a tuple of the names of the parameters that don't have + # defaults. + try: + parameters = signature(function).parameters + except (ValueError, TypeError) as e: + from _pytest.outcomes import fail + + fail( + f"Could not determine arguments of {function!r}: {e}", + pytrace=False, + ) + + arg_names = tuple( + p.name + for p in parameters.values() + if ( + p.kind is Parameter.POSITIONAL_OR_KEYWORD + or p.kind is Parameter.KEYWORD_ONLY + ) + and p.default is Parameter.empty + ) + if not name: + name = function.__name__ + + # If this function should be treated as a bound method even though + # it's passed as an unbound method or function, remove the first + # parameter name. + if ( + # Not using `getattr` because we don't want to resolve the staticmethod. + # Not using `cls.__dict__` because we want to check the entire MRO. + cls + and not isinstance( + inspect.getattr_static(cls, name, default=None), staticmethod + ) + ): + arg_names = arg_names[1:] + # Remove any names that will be replaced with mocks. + if hasattr(function, "__wrapped__"): + arg_names = arg_names[num_mock_patch_args(function) :] + return arg_names + + +def get_default_arg_names(function: Callable[..., Any]) -> tuple[str, ...]: + # Note: this code intentionally mirrors the code at the beginning of + # getfuncargnames, to get the arguments which were excluded from its result + # because they had default values. + return tuple( + p.name + for p in signature(function).parameters.values() + if p.kind in (Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY) + and p.default is not Parameter.empty + ) + + +_non_printable_ascii_translate_table = { + i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127) +} +_non_printable_ascii_translate_table.update( + {ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"} +) + + +def ascii_escaped(val: bytes | str) -> str: + r"""If val is pure ASCII, return it as an str, otherwise, escape + bytes objects into a sequence of escaped bytes: + + b'\xc3\xb4\xc5\xd6' -> r'\xc3\xb4\xc5\xd6' + + and escapes strings into a sequence of escaped unicode ids, e.g.: + + r'4\nV\U00043efa\x0eMXWB\x1e\u3028\u15fd\xcd\U0007d944' + + Note: + The obvious "v.decode('unicode-escape')" will return + valid UTF-8 unicode if it finds them in bytes, but we + want to return escaped bytes for any byte, even if they match + a UTF-8 string. + """ + if isinstance(val, bytes): + ret = val.decode("ascii", "backslashreplace") + else: + ret = val.encode("unicode_escape").decode("ascii") + return ret.translate(_non_printable_ascii_translate_table) + + +@dataclasses.dataclass +class _PytestWrapper: + """Dummy wrapper around a function object for internal use only. + + Used to correctly unwrap the underlying function object when we are + creating fixtures, because we wrap the function object ourselves with a + decorator to issue warnings when the fixture function is called directly. + """ + + obj: Any + + +def get_real_func(obj): + """Get the real function object of the (possibly) wrapped object by + functools.wraps or functools.partial.""" + start_obj = obj + for i in range(100): + # __pytest_wrapped__ is set by @pytest.fixture when wrapping the fixture function + # to trigger a warning if it gets called directly instead of by pytest: we don't + # want to unwrap further than this otherwise we lose useful wrappings like @mock.patch (#3774) + new_obj = getattr(obj, "__pytest_wrapped__", None) + if isinstance(new_obj, _PytestWrapper): + obj = new_obj.obj + break + new_obj = getattr(obj, "__wrapped__", None) + if new_obj is None: + break + obj = new_obj + else: + from _pytest._io.saferepr import saferepr + + raise ValueError( + f"could not find real function of {saferepr(start_obj)}\nstopped at {saferepr(obj)}" + ) + if isinstance(obj, functools.partial): + obj = obj.func + return obj + + +def get_real_method(obj, holder): + """Attempt to obtain the real function object that might be wrapping + ``obj``, while at the same time returning a bound method to ``holder`` if + the original object was a bound method.""" + try: + is_method = hasattr(obj, "__func__") + obj = get_real_func(obj) + except Exception: # pragma: no cover + return obj + if is_method and hasattr(obj, "__get__") and callable(obj.__get__): + obj = obj.__get__(holder) + return obj + + +def getimfunc(func): + try: + return func.__func__ + except AttributeError: + return func + + +def safe_getattr(object: Any, name: str, default: Any) -> Any: + """Like getattr but return default upon any Exception or any OutcomeException. + + Attribute access can potentially fail for 'evil' Python objects. + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes + are derived from BaseException instead of Exception (for more details + check #2707). + """ + from _pytest.outcomes import TEST_OUTCOME + + try: + return getattr(object, name, default) + except TEST_OUTCOME: + return default + + +def safe_isclass(obj: object) -> bool: + """Ignore any exception via isinstance on Python 3.""" + try: + return inspect.isclass(obj) + except Exception: + return False + + +def get_user_id() -> int | None: + """Return the current process's real user id or None if it could not be + determined. + + :return: The user id or None if it could not be determined. + """ + # mypy follows the version and platform checking expectation of PEP 484: + # https://mypy.readthedocs.io/en/stable/common_issues.html?highlight=platform#python-version-and-system-platform-checks + # Containment checks are too complex for mypy v1.5.0 and cause failure. + if sys.platform == "win32" or sys.platform == "emscripten": + # win32 does not have a getuid() function. + # Emscripten has a return 0 stub. + return None + else: + # On other platforms, a return value of -1 is assumed to indicate that + # the current process's real user id could not be determined. + ERROR = -1 + uid = os.getuid() + return uid if uid != ERROR else None + + +# Perform exhaustiveness checking. +# +# Consider this example: +# +# MyUnion = Union[int, str] +# +# def handle(x: MyUnion) -> int { +# if isinstance(x, int): +# return 1 +# elif isinstance(x, str): +# return 2 +# else: +# raise Exception('unreachable') +# +# Now suppose we add a new variant: +# +# MyUnion = Union[int, str, bytes] +# +# After doing this, we must remember ourselves to go and update the handle +# function to handle the new variant. +# +# With `assert_never` we can do better: +# +# // raise Exception('unreachable') +# return assert_never(x) +# +# Now, if we forget to handle the new variant, the type-checker will emit a +# compile-time error, instead of the runtime error we would have gotten +# previously. +# +# This also work for Enums (if you use `is` to compare) and Literals. +def assert_never(value: NoReturn) -> NoReturn: + assert False, f"Unhandled value: {value} ({type(value).__name__})" diff --git a/venv/Lib/site-packages/_pytest/config/__init__.py b/venv/Lib/site-packages/_pytest/config/__init__.py new file mode 100644 index 0000000..3cca147 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/__init__.py @@ -0,0 +1,1972 @@ +# mypy: allow-untyped-defs +"""Command line options, ini-file and conftest.py processing.""" + +from __future__ import annotations + +import argparse +import collections.abc +import copy +import dataclasses +import enum +from functools import lru_cache +import glob +import importlib.metadata +import inspect +import os +import pathlib +import re +import shlex +import sys +from textwrap import dedent +import types +from types import FunctionType +from typing import Any +from typing import Callable +from typing import cast +from typing import Final +from typing import final +from typing import Generator +from typing import IO +from typing import Iterable +from typing import Iterator +from typing import Sequence +from typing import TextIO +from typing import Type +from typing import TYPE_CHECKING +import warnings + +import pluggy +from pluggy import HookimplMarker +from pluggy import HookimplOpts +from pluggy import HookspecMarker +from pluggy import HookspecOpts +from pluggy import PluginManager + +from .compat import PathAwareHookProxy +from .exceptions import PrintHelp as PrintHelp +from .exceptions import UsageError as UsageError +from .findpaths import determine_setup +from _pytest import __version__ +import _pytest._code +from _pytest._code import ExceptionInfo +from _pytest._code import filter_traceback +from _pytest._code.code import TracebackStyle +from _pytest._io import TerminalWriter +from _pytest.config.argparsing import Argument +from _pytest.config.argparsing import Parser +import _pytest.deprecated +import _pytest.hookspec +from _pytest.outcomes import fail +from _pytest.outcomes import Skipped +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportMode +from _pytest.pathlib import resolve_package_path +from _pytest.pathlib import safe_exists +from _pytest.stash import Stash +from _pytest.warning_types import PytestConfigWarning +from _pytest.warning_types import warn_explicit_for + + +if TYPE_CHECKING: + from _pytest.cacheprovider import Cache + from _pytest.terminal import TerminalReporter + + +_PluggyPlugin = object +"""A type to represent plugin objects. + +Plugins can be any namespace, so we can't narrow it down much, but we use an +alias to make the intent clear. + +Ideally this type would be provided by pluggy itself. +""" + + +hookimpl = HookimplMarker("pytest") +hookspec = HookspecMarker("pytest") + + +@final +class ExitCode(enum.IntEnum): + """Encodes the valid exit codes by pytest. + + Currently users and plugins may supply other exit codes as well. + + .. versionadded:: 5.0 + """ + + #: Tests passed. + OK = 0 + #: Tests failed. + TESTS_FAILED = 1 + #: pytest was interrupted. + INTERRUPTED = 2 + #: An internal error got in the way. + INTERNAL_ERROR = 3 + #: pytest was misused. + USAGE_ERROR = 4 + #: pytest couldn't find tests. + NO_TESTS_COLLECTED = 5 + + +class ConftestImportFailure(Exception): + def __init__( + self, + path: pathlib.Path, + *, + cause: Exception, + ) -> None: + self.path = path + self.cause = cause + + def __str__(self) -> str: + return f"{type(self.cause).__name__}: {self.cause} (from {self.path})" + + +def filter_traceback_for_conftest_import_failure( + entry: _pytest._code.TracebackEntry, +) -> bool: + """Filter tracebacks entries which point to pytest internals or importlib. + + Make a special case for importlib because we use it to import test modules and conftest files + in _pytest.pathlib.import_path. + """ + return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) + + +def main( + args: list[str] | os.PathLike[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> int | ExitCode: + """Perform an in-process test run. + + :param args: + List of command line arguments. If `None` or not given, defaults to reading + arguments directly from the process command line (:data:`sys.argv`). + :param plugins: List of plugin objects to be auto-registered during initialization. + + :returns: An exit code. + """ + old_pytest_version = os.environ.get("PYTEST_VERSION") + try: + os.environ["PYTEST_VERSION"] = __version__ + try: + config = _prepareconfig(args, plugins) + except ConftestImportFailure as e: + exc_info = ExceptionInfo.from_exception(e.cause) + tw = TerminalWriter(sys.stderr) + tw.line(f"ImportError while loading conftest '{e.path}'.", red=True) + exc_info.traceback = exc_info.traceback.filter( + filter_traceback_for_conftest_import_failure + ) + exc_repr = ( + exc_info.getrepr(style="short", chain=False) + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + for line in formatted_tb.splitlines(): + tw.line(line.rstrip(), red=True) + return ExitCode.USAGE_ERROR + else: + try: + ret: ExitCode | int = config.hook.pytest_cmdline_main(config=config) + try: + return ExitCode(ret) + except ValueError: + return ret + finally: + config._ensure_unconfigure() + except UsageError as e: + tw = TerminalWriter(sys.stderr) + for msg in e.args: + tw.line(f"ERROR: {msg}\n", red=True) + return ExitCode.USAGE_ERROR + finally: + if old_pytest_version is None: + os.environ.pop("PYTEST_VERSION", None) + else: + os.environ["PYTEST_VERSION"] = old_pytest_version + + +def console_main() -> int: + """The CLI entry point of pytest. + + This function is not meant for programmable use; use `main()` instead. + """ + # https://docs.python.org/3/library/signal.html#note-on-sigpipe + try: + code = main() + sys.stdout.flush() + return code + except BrokenPipeError: + # Python flushes standard streams on exit; redirect remaining output + # to devnull to avoid another BrokenPipeError at shutdown + devnull = os.open(os.devnull, os.O_WRONLY) + os.dup2(devnull, sys.stdout.fileno()) + return 1 # Python exits with error code 1 on EPIPE + + +class cmdline: # compatibility namespace + main = staticmethod(main) + + +def filename_arg(path: str, optname: str) -> str: + """Argparse type validator for filename arguments. + + :path: Path of filename. + :optname: Name of the option. + """ + if os.path.isdir(path): + raise UsageError(f"{optname} must be a filename, given: {path}") + return path + + +def directory_arg(path: str, optname: str) -> str: + """Argparse type validator for directory arguments. + + :path: Path of directory. + :optname: Name of the option. + """ + if not os.path.isdir(path): + raise UsageError(f"{optname} must be a directory, given: {path}") + return path + + +# Plugins that cannot be disabled via "-p no:X" currently. +essential_plugins = ( + "mark", + "main", + "runner", + "fixtures", + "helpconfig", # Provides -p. +) + +default_plugins = ( + *essential_plugins, + "python", + "terminal", + "debugging", + "unittest", + "capture", + "skipping", + "legacypath", + "tmpdir", + "monkeypatch", + "recwarn", + "pastebin", + "assertion", + "junitxml", + "doctest", + "cacheprovider", + "freeze_support", + "setuponly", + "setupplan", + "stepwise", + "warnings", + "logging", + "reports", + "python_path", + "unraisableexception", + "threadexception", + "faulthandler", +) + +builtin_plugins = set(default_plugins) +builtin_plugins.add("pytester") +builtin_plugins.add("pytester_assertions") + + +def get_config( + args: list[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> Config: + # subsequent calls to main will create a fresh instance + pluginmanager = PytestPluginManager() + config = Config( + pluginmanager, + invocation_params=Config.InvocationParams( + args=args or (), + plugins=plugins, + dir=pathlib.Path.cwd(), + ), + ) + + if args is not None: + # Handle any "-p no:plugin" args. + pluginmanager.consider_preparse(args, exclude_only=True) + + for spec in default_plugins: + pluginmanager.import_plugin(spec) + + return config + + +def get_plugin_manager() -> PytestPluginManager: + """Obtain a new instance of the + :py:class:`pytest.PytestPluginManager`, with default plugins + already loaded. + + This function can be used by integration with other tools, like hooking + into pytest to run tests into an IDE. + """ + return get_config().pluginmanager + + +def _prepareconfig( + args: list[str] | os.PathLike[str] | None = None, + plugins: Sequence[str | _PluggyPlugin] | None = None, +) -> Config: + if args is None: + args = sys.argv[1:] + elif isinstance(args, os.PathLike): + args = [os.fspath(args)] + elif not isinstance(args, list): + msg = ( # type:ignore[unreachable] + "`args` parameter expected to be a list of strings, got: {!r} (type: {})" + ) + raise TypeError(msg.format(args, type(args))) + + config = get_config(args, plugins) + pluginmanager = config.pluginmanager + try: + if plugins: + for plugin in plugins: + if isinstance(plugin, str): + pluginmanager.consider_pluginarg(plugin) + else: + pluginmanager.register(plugin) + config = pluginmanager.hook.pytest_cmdline_parse( + pluginmanager=pluginmanager, args=args + ) + return config + except BaseException: + config._ensure_unconfigure() + raise + + +def _get_directory(path: pathlib.Path) -> pathlib.Path: + """Get the directory of a path - itself if already a directory.""" + if path.is_file(): + return path.parent + else: + return path + + +def _get_legacy_hook_marks( + method: Any, + hook_type: str, + opt_names: tuple[str, ...], +) -> dict[str, bool]: + if TYPE_CHECKING: + # abuse typeguard from importlib to avoid massive method type union that's lacking an alias + assert inspect.isroutine(method) + known_marks: set[str] = {m.name for m in getattr(method, "pytestmark", [])} + must_warn: list[str] = [] + opts: dict[str, bool] = {} + for opt_name in opt_names: + opt_attr = getattr(method, opt_name, AttributeError) + if opt_attr is not AttributeError: + must_warn.append(f"{opt_name}={opt_attr}") + opts[opt_name] = True + elif opt_name in known_marks: + must_warn.append(f"{opt_name}=True") + opts[opt_name] = True + else: + opts[opt_name] = False + if must_warn: + hook_opts = ", ".join(must_warn) + message = _pytest.deprecated.HOOK_LEGACY_MARKING.format( + type=hook_type, + fullname=method.__qualname__, + hook_opts=hook_opts, + ) + warn_explicit_for(cast(FunctionType, method), message) + return opts + + +@final +class PytestPluginManager(PluginManager): + """A :py:class:`pluggy.PluginManager ` with + additional pytest-specific functionality: + + * Loading plugins from the command line, ``PYTEST_PLUGINS`` env variable and + ``pytest_plugins`` global variables found in plugins being loaded. + * ``conftest.py`` loading during start-up. + """ + + def __init__(self) -> None: + import _pytest.assertion + + super().__init__("pytest") + + # -- State related to local conftest plugins. + # All loaded conftest modules. + self._conftest_plugins: set[types.ModuleType] = set() + # All conftest modules applicable for a directory. + # This includes the directory's own conftest modules as well + # as those of its parent directories. + self._dirpath2confmods: dict[pathlib.Path, list[types.ModuleType]] = {} + # Cutoff directory above which conftests are no longer discovered. + self._confcutdir: pathlib.Path | None = None + # If set, conftest loading is skipped. + self._noconftest = False + + # _getconftestmodules()'s call to _get_directory() causes a stat + # storm when it's called potentially thousands of times in a test + # session (#9478), often with the same path, so cache it. + self._get_directory = lru_cache(256)(_get_directory) + + # plugins that were explicitly skipped with pytest.skip + # list of (module name, skip reason) + # previously we would issue a warning when a plugin was skipped, but + # since we refactored warnings as first citizens of Config, they are + # just stored here to be used later. + self.skipped_plugins: list[tuple[str, str]] = [] + + self.add_hookspecs(_pytest.hookspec) + self.register(self) + if os.environ.get("PYTEST_DEBUG"): + err: IO[str] = sys.stderr + encoding: str = getattr(err, "encoding", "utf8") + try: + err = open( + os.dup(err.fileno()), + mode=err.mode, + buffering=1, + encoding=encoding, + ) + except Exception: + pass + self.trace.root.setwriter(err.write) + self.enable_tracing() + + # Config._consider_importhook will set a real object if required. + self.rewrite_hook = _pytest.assertion.DummyRewriteHook() + # Used to know when we are importing conftests after the pytest_configure stage. + self._configured = False + + def parse_hookimpl_opts( + self, plugin: _PluggyPlugin, name: str + ) -> HookimplOpts | None: + """:meta private:""" + # pytest hooks are always prefixed with "pytest_", + # so we avoid accessing possibly non-readable attributes + # (see issue #1073). + if not name.startswith("pytest_"): + return None + # Ignore names which cannot be hooks. + if name == "pytest_plugins": + return None + + opts = super().parse_hookimpl_opts(plugin, name) + if opts is not None: + return opts + + method = getattr(plugin, name) + # Consider only actual functions for hooks (#3775). + if not inspect.isroutine(method): + return None + # Collect unmarked hooks as long as they have the `pytest_' prefix. + return _get_legacy_hook_marks( # type: ignore[return-value] + method, "impl", ("tryfirst", "trylast", "optionalhook", "hookwrapper") + ) + + def parse_hookspec_opts(self, module_or_class, name: str) -> HookspecOpts | None: + """:meta private:""" + opts = super().parse_hookspec_opts(module_or_class, name) + if opts is None: + method = getattr(module_or_class, name) + if name.startswith("pytest_"): + opts = _get_legacy_hook_marks( # type: ignore[assignment] + method, + "spec", + ("firstresult", "historic"), + ) + return opts + + def register(self, plugin: _PluggyPlugin, name: str | None = None) -> str | None: + if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: + warnings.warn( + PytestConfigWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) + ) + ) + return None + plugin_name = super().register(plugin, name) + if plugin_name is not None: + self.hook.pytest_plugin_registered.call_historic( + kwargs=dict( + plugin=plugin, + plugin_name=plugin_name, + manager=self, + ) + ) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) + return plugin_name + + def getplugin(self, name: str): + # Support deprecated naming because plugins (xdist e.g.) use it. + plugin: _PluggyPlugin | None = self.get_plugin(name) + return plugin + + def hasplugin(self, name: str) -> bool: + """Return whether a plugin with the given name is registered.""" + return bool(self.get_plugin(name)) + + def pytest_configure(self, config: Config) -> None: + """:meta private:""" + # XXX now that the pluginmanager exposes hookimpl(tryfirst...) + # we should remove tryfirst/trylast as markers. + config.addinivalue_line( + "markers", + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible. " + "DEPRECATED, use @pytest.hookimpl(tryfirst=True) instead.", + ) + config.addinivalue_line( + "markers", + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible. " + "DEPRECATED, use @pytest.hookimpl(trylast=True) instead.", + ) + self._configured = True + + # + # Internal API for local conftest plugin handling. + # + def _set_initial_conftests( + self, + args: Sequence[str | pathlib.Path], + pyargs: bool, + noconftest: bool, + rootpath: pathlib.Path, + confcutdir: pathlib.Path | None, + invocation_dir: pathlib.Path, + importmode: ImportMode | str, + *, + consider_namespace_packages: bool, + ) -> None: + """Load initial conftest files given a preparsed "namespace". + + As conftest files may add their own command line options which have + arguments ('--my-opt somepath') we might get some false positives. + All builtin and 3rd party plugins will have been loaded, however, so + common options will not confuse our logic here. + """ + self._confcutdir = ( + absolutepath(invocation_dir / confcutdir) if confcutdir else None + ) + self._noconftest = noconftest + self._using_pyargs = pyargs + foundanchor = False + for initial_path in args: + path = str(initial_path) + # remove node-id syntax + i = path.find("::") + if i != -1: + path = path[:i] + anchor = absolutepath(invocation_dir / path) + + # Ensure we do not break if what appears to be an anchor + # is in fact a very long option (#10169, #11394). + if safe_exists(anchor): + self._try_load_conftest( + anchor, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + foundanchor = True + if not foundanchor: + self._try_load_conftest( + invocation_dir, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + + def _is_in_confcutdir(self, path: pathlib.Path) -> bool: + """Whether to consider the given path to load conftests from.""" + if self._confcutdir is None: + return True + # The semantics here are literally: + # Do not load a conftest if it is found upwards from confcut dir. + # But this is *not* the same as: + # Load only conftests from confcutdir or below. + # At first glance they might seem the same thing, however we do support use cases where + # we want to load conftests that are not found in confcutdir or below, but are found + # in completely different directory hierarchies like packages installed + # in out-of-source trees. + # (see #9767 for a regression where the logic was inverted). + return path not in self._confcutdir.parents + + def _try_load_conftest( + self, + anchor: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> None: + self._loadconftestmodules( + anchor, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + # let's also consider test* subdirs + if anchor.is_dir(): + for x in anchor.glob("test*"): + if x.is_dir(): + self._loadconftestmodules( + x, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + + def _loadconftestmodules( + self, + path: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> None: + if self._noconftest: + return + + directory = self._get_directory(path) + + # Optimization: avoid repeated searches in the same directory. + # Assumes always called with same importmode and rootpath. + if directory in self._dirpath2confmods: + return + + clist = [] + for parent in reversed((directory, *directory.parents)): + if self._is_in_confcutdir(parent): + conftestpath = parent / "conftest.py" + if conftestpath.is_file(): + mod = self._importconftest( + conftestpath, + importmode, + rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + clist.append(mod) + self._dirpath2confmods[directory] = clist + + def _getconftestmodules(self, path: pathlib.Path) -> Sequence[types.ModuleType]: + directory = self._get_directory(path) + return self._dirpath2confmods.get(directory, ()) + + def _rget_with_confmod( + self, + name: str, + path: pathlib.Path, + ) -> tuple[types.ModuleType, Any]: + modules = self._getconftestmodules(path) + for mod in reversed(modules): + try: + return mod, getattr(mod, name) + except AttributeError: + continue + raise KeyError(name) + + def _importconftest( + self, + conftestpath: pathlib.Path, + importmode: str | ImportMode, + rootpath: pathlib.Path, + *, + consider_namespace_packages: bool, + ) -> types.ModuleType: + conftestpath_plugin_name = str(conftestpath) + existing = self.get_plugin(conftestpath_plugin_name) + if existing is not None: + return cast(types.ModuleType, existing) + + # conftest.py files there are not in a Python package all have module + # name "conftest", and thus conflict with each other. Clear the existing + # before loading the new one, otherwise the existing one will be + # returned from the module cache. + pkgpath = resolve_package_path(conftestpath) + if pkgpath is None: + try: + del sys.modules[conftestpath.stem] + except KeyError: + pass + + try: + mod = import_path( + conftestpath, + mode=importmode, + root=rootpath, + consider_namespace_packages=consider_namespace_packages, + ) + except Exception as e: + assert e.__traceback__ is not None + raise ConftestImportFailure(conftestpath, cause=e) from e + + self._check_non_top_pytest_plugins(mod, conftestpath) + + self._conftest_plugins.add(mod) + dirpath = conftestpath.parent + if dirpath in self._dirpath2confmods: + for path, mods in self._dirpath2confmods.items(): + if dirpath in path.parents or path == dirpath: + if mod in mods: + raise AssertionError( + f"While trying to load conftest path {conftestpath!s}, " + f"found that the module {mod} is already loaded with path {mod.__file__}. " + "This is not supposed to happen. Please report this issue to pytest." + ) + mods.append(mod) + self.trace(f"loading conftestmodule {mod!r}") + self.consider_conftest(mod, registration_name=conftestpath_plugin_name) + return mod + + def _check_non_top_pytest_plugins( + self, + mod: types.ModuleType, + conftestpath: pathlib.Path, + ) -> None: + if ( + hasattr(mod, "pytest_plugins") + and self._configured + and not self._using_pyargs + ): + msg = ( + "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported:\n" + "It affects the entire test suite instead of just below the conftest as expected.\n" + " {}\n" + "Please move it to a top level conftest file at the rootdir:\n" + " {}\n" + "For more information, visit:\n" + " https://docs.pytest.org/en/stable/deprecations.html#pytest-plugins-in-non-top-level-conftest-files" + ) + fail(msg.format(conftestpath, self._confcutdir), pytrace=False) + + # + # API for bootstrapping plugin loading + # + # + + def consider_preparse( + self, args: Sequence[str], *, exclude_only: bool = False + ) -> None: + """:meta private:""" + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, str): + if opt == "-p": + try: + parg = args[i] + except IndexError: + return + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + parg = parg.strip() + if exclude_only and not parg.startswith("no:"): + continue + self.consider_pluginarg(parg) + + def consider_pluginarg(self, arg: str) -> None: + """:meta private:""" + if arg.startswith("no:"): + name = arg[3:] + if name in essential_plugins: + raise UsageError(f"plugin {name} cannot be disabled") + + # PR #4304: remove stepwise if cacheprovider is blocked. + if name == "cacheprovider": + self.set_blocked("stepwise") + self.set_blocked("pytest_stepwise") + + self.set_blocked(name) + if not name.startswith("pytest_"): + self.set_blocked("pytest_" + name) + else: + name = arg + # Unblock the plugin. + self.unblock(name) + if not name.startswith("pytest_"): + self.unblock("pytest_" + name) + self.import_plugin(arg, consider_entry_points=True) + + def consider_conftest( + self, conftestmodule: types.ModuleType, registration_name: str + ) -> None: + """:meta private:""" + self.register(conftestmodule, name=registration_name) + + def consider_env(self) -> None: + """:meta private:""" + self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) + + def consider_module(self, mod: types.ModuleType) -> None: + """:meta private:""" + self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) + + def _import_plugin_specs( + self, spec: None | types.ModuleType | str | Sequence[str] + ) -> None: + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) + + def import_plugin(self, modname: str, consider_entry_points: bool = False) -> None: + """Import a plugin with ``modname``. + + If ``consider_entry_points`` is True, entry point names are also + considered to find a plugin. + """ + # Most often modname refers to builtin modules, e.g. "pytester", + # "terminal" or "capture". Those plugins are registered under their + # basename for historic purposes but must be imported with the + # _pytest prefix. + assert isinstance( + modname, str + ), f"module name as text required, got {modname!r}" + if self.is_blocked(modname) or self.get_plugin(modname) is not None: + return + + importspec = "_pytest." + modname if modname in builtin_plugins else modname + self.rewrite_hook.mark_rewrite(importspec) + + if consider_entry_points: + loaded = self.load_setuptools_entrypoints("pytest11", name=modname) + if loaded: + return + + try: + __import__(importspec) + except ImportError as e: + raise ImportError( + f'Error importing plugin "{modname}": {e.args[0]}' + ).with_traceback(e.__traceback__) from e + + except Skipped as e: + self.skipped_plugins.append((modname, e.msg or "")) + else: + mod = sys.modules[importspec] + self.register(mod, modname) + + +def _get_plugin_specs_as_list( + specs: None | types.ModuleType | str | Sequence[str], +) -> list[str]: + """Parse a plugins specification into a list of plugin names.""" + # None means empty. + if specs is None: + return [] + # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". + if isinstance(specs, types.ModuleType): + return [] + # Comma-separated list. + if isinstance(specs, str): + return specs.split(",") if specs else [] + # Direct specification. + if isinstance(specs, collections.abc.Sequence): + return list(specs) + raise UsageError( + f"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: {specs!r}" + ) + + +class Notset: + def __repr__(self): + return "" + + +notset = Notset() + + +def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: + """Given an iterable of file names in a source distribution, return the "names" that should + be marked for assertion rewrite. + + For example the package "pytest_mock/__init__.py" should be added as "pytest_mock" in + the assertion rewrite mechanism. + + This function has to deal with dist-info based distributions and egg based distributions + (which are still very much in use for "editable" installs). + + Here are the file names as seen in a dist-info based distribution: + + pytest_mock/__init__.py + pytest_mock/_version.py + pytest_mock/plugin.py + pytest_mock.egg-info/PKG-INFO + + Here are the file names as seen in an egg based distribution: + + src/pytest_mock/__init__.py + src/pytest_mock/_version.py + src/pytest_mock/plugin.py + src/pytest_mock.egg-info/PKG-INFO + LICENSE + setup.py + + We have to take in account those two distribution flavors in order to determine which + names should be considered for assertion rewriting. + + More information: + https://github.com/pytest-dev/pytest-mock/issues/167 + """ + package_files = list(package_files) + seen_some = False + for fn in package_files: + is_simple_module = "/" not in fn and fn.endswith(".py") + is_package = fn.count("/") == 1 and fn.endswith("__init__.py") + if is_simple_module: + module_name, _ = os.path.splitext(fn) + # we ignore "setup.py" at the root of the distribution + # as well as editable installation finder modules made by setuptools + if module_name != "setup" and not module_name.startswith("__editable__"): + seen_some = True + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + seen_some = True + yield package_name + + if not seen_some: + # At this point we did not find any packages or modules suitable for assertion + # rewriting, so we try again by stripping the first path component (to account for + # "src" based source trees for example). + # This approach lets us have the common case continue to be fast, as egg-distributions + # are rarer. + new_package_files = [] + for fn in package_files: + parts = fn.split("/") + new_fn = "/".join(parts[1:]) + if new_fn: + new_package_files.append(new_fn) + if new_package_files: + yield from _iter_rewritable_modules(new_package_files) + + +@final +class Config: + """Access to configuration values, pluginmanager and plugin hooks. + + :param PytestPluginManager pluginmanager: + A pytest PluginManager. + + :param InvocationParams invocation_params: + Object containing parameters regarding the :func:`pytest.main` + invocation. + """ + + @final + @dataclasses.dataclass(frozen=True) + class InvocationParams: + """Holds parameters passed during :func:`pytest.main`. + + The object attributes are read-only. + + .. versionadded:: 5.1 + + .. note:: + + Note that the environment variable ``PYTEST_ADDOPTS`` and the ``addopts`` + ini option are handled by pytest, not being included in the ``args`` attribute. + + Plugins accessing ``InvocationParams`` must be aware of that. + """ + + args: tuple[str, ...] + """The command-line arguments as passed to :func:`pytest.main`.""" + plugins: Sequence[str | _PluggyPlugin] | None + """Extra plugins, might be `None`.""" + dir: pathlib.Path + """The directory from which :func:`pytest.main` was invoked. :type: pathlib.Path""" + + def __init__( + self, + *, + args: Iterable[str], + plugins: Sequence[str | _PluggyPlugin] | None, + dir: pathlib.Path, + ) -> None: + object.__setattr__(self, "args", tuple(args)) + object.__setattr__(self, "plugins", plugins) + object.__setattr__(self, "dir", dir) + + class ArgsSource(enum.Enum): + """Indicates the source of the test arguments. + + .. versionadded:: 7.2 + """ + + #: Command line arguments. + ARGS = enum.auto() + #: Invocation directory. + INVOCATION_DIR = enum.auto() + INCOVATION_DIR = INVOCATION_DIR # backwards compatibility alias + #: 'testpaths' configuration value. + TESTPATHS = enum.auto() + + # Set by cacheprovider plugin. + cache: Cache + + def __init__( + self, + pluginmanager: PytestPluginManager, + *, + invocation_params: InvocationParams | None = None, + ) -> None: + from .argparsing import FILE_OR_DIR + from .argparsing import Parser + + if invocation_params is None: + invocation_params = self.InvocationParams( + args=(), plugins=None, dir=pathlib.Path.cwd() + ) + + self.option = argparse.Namespace() + """Access to command line option as attributes. + + :type: argparse.Namespace + """ + + self.invocation_params = invocation_params + """The parameters with which pytest was invoked. + + :type: InvocationParams + """ + + _a = FILE_OR_DIR + self._parser = Parser( + usage=f"%(prog)s [options] [{_a}] [{_a}] [...]", + processopt=self._processopt, + _ispytest=True, + ) + self.pluginmanager = pluginmanager + """The plugin manager handles plugin registration and hook invocation. + + :type: PytestPluginManager + """ + + self.stash = Stash() + """A place where plugins can store information on the config for their + own use. + + :type: Stash + """ + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + self.trace = self.pluginmanager.trace.root.get("config") + self.hook: pluggy.HookRelay = PathAwareHookProxy(self.pluginmanager.hook) # type: ignore[assignment] + self._inicache: dict[str, Any] = {} + self._override_ini: Sequence[str] = () + self._opt2dest: dict[str, str] = {} + self._cleanup: list[Callable[[], None]] = [] + self.pluginmanager.register(self, "pytestconfig") + self._configured = False + self.hook.pytest_addoption.call_historic( + kwargs=dict(parser=self._parser, pluginmanager=self.pluginmanager) + ) + self.args_source = Config.ArgsSource.ARGS + self.args: list[str] = [] + + @property + def rootpath(self) -> pathlib.Path: + """The path to the :ref:`rootdir `. + + :type: pathlib.Path + + .. versionadded:: 6.1 + """ + return self._rootpath + + @property + def inipath(self) -> pathlib.Path | None: + """The path to the :ref:`configfile `. + + .. versionadded:: 6.1 + """ + return self._inipath + + def add_cleanup(self, func: Callable[[], None]) -> None: + """Add a function to be called when the config object gets out of + use (usually coinciding with pytest_unconfigure).""" + self._cleanup.append(func) + + def _do_configure(self) -> None: + assert not self._configured + self._configured = True + with warnings.catch_warnings(): + warnings.simplefilter("default") + self.hook.pytest_configure.call_historic(kwargs=dict(config=self)) + + def _ensure_unconfigure(self) -> None: + if self._configured: + self._configured = False + self.hook.pytest_unconfigure(config=self) + self.hook.pytest_configure._call_history = [] + while self._cleanup: + fin = self._cleanup.pop() + fin() + + def get_terminal_writer(self) -> TerminalWriter: + terminalreporter: TerminalReporter | None = self.pluginmanager.get_plugin( + "terminalreporter" + ) + assert terminalreporter is not None + return terminalreporter._tw + + def pytest_cmdline_parse( + self, pluginmanager: PytestPluginManager, args: list[str] + ) -> Config: + try: + self.parse(args) + except UsageError: + # Handle --version and --help here in a minimal fashion. + # This gets done via helpconfig normally, but its + # pytest_cmdline_main is not called in case of errors. + if getattr(self.option, "version", False) or "--version" in args: + from _pytest.helpconfig import showversion + + showversion(self) + elif ( + getattr(self.option, "help", False) or "--help" in args or "-h" in args + ): + self._parser._getparser().print_help() + sys.stdout.write( + "\nNOTE: displaying only minimal help due to UsageError.\n\n" + ) + + raise + + return self + + def notify_exception( + self, + excinfo: ExceptionInfo[BaseException], + option: argparse.Namespace | None = None, + ) -> None: + if option and getattr(option, "fulltrace", False): + style: TracebackStyle = "long" + else: + style = "native" + excrepr = excinfo.getrepr( + funcargs=True, showlocals=getattr(option, "showlocals", False), style=style + ) + res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) + if not any(res): + for line in str(excrepr).split("\n"): + sys.stderr.write(f"INTERNALERROR> {line}\n") + sys.stderr.flush() + + def cwd_relative_nodeid(self, nodeid: str) -> str: + # nodeid's are relative to the rootpath, compute relative to cwd. + if self.invocation_params.dir != self.rootpath: + base_path_part, *nodeid_part = nodeid.split("::") + # Only process path part + fullpath = self.rootpath / base_path_part + relative_path = bestrelpath(self.invocation_params.dir, fullpath) + + nodeid = "::".join([relative_path, *nodeid_part]) + return nodeid + + @classmethod + def fromdictargs(cls, option_dict, args) -> Config: + """Constructor usable for subprocesses.""" + config = get_config(args) + config.option.__dict__.update(option_dict) + config.parse(args, addopts=False) + for x in config.option.plugins: + config.pluginmanager.consider_pluginarg(x) + return config + + def _processopt(self, opt: Argument) -> None: + for name in opt._short_opts + opt._long_opts: + self._opt2dest[name] = opt.dest + + if hasattr(opt, "default"): + if not hasattr(self.option, opt.dest): + setattr(self.option, opt.dest, opt.default) + + @hookimpl(trylast=True) + def pytest_load_initial_conftests(self, early_config: Config) -> None: + # We haven't fully parsed the command line arguments yet, so + # early_config.args it not set yet. But we need it for + # discovering the initial conftests. So "pre-run" the logic here. + # It will be done for real in `parse()`. + args, args_source = early_config._decide_args( + args=early_config.known_args_namespace.file_or_dir, + pyargs=early_config.known_args_namespace.pyargs, + testpaths=early_config.getini("testpaths"), + invocation_dir=early_config.invocation_params.dir, + rootpath=early_config.rootpath, + warn=False, + ) + self.pluginmanager._set_initial_conftests( + args=args, + pyargs=early_config.known_args_namespace.pyargs, + noconftest=early_config.known_args_namespace.noconftest, + rootpath=early_config.rootpath, + confcutdir=early_config.known_args_namespace.confcutdir, + invocation_dir=early_config.invocation_params.dir, + importmode=early_config.known_args_namespace.importmode, + consider_namespace_packages=early_config.getini( + "consider_namespace_packages" + ), + ) + + def _initini(self, args: Sequence[str]) -> None: + ns, unknown_args = self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + rootpath, inipath, inicfg = determine_setup( + inifile=ns.inifilename, + args=ns.file_or_dir + unknown_args, + rootdir_cmd_arg=ns.rootdir or None, + invocation_dir=self.invocation_params.dir, + ) + self._rootpath = rootpath + self._inipath = inipath + self.inicfg = inicfg + self._parser.extra_info["rootdir"] = str(self.rootpath) + self._parser.extra_info["inifile"] = str(self.inipath) + self._parser.addini("addopts", "Extra command line options", "args") + self._parser.addini("minversion", "Minimally required pytest version") + self._parser.addini( + "required_plugins", + "Plugins that must be present for pytest to run", + type="args", + default=[], + ) + self._override_ini = ns.override_ini or () + + def _consider_importhook(self, args: Sequence[str]) -> None: + """Install the PEP 302 import hook if using assertion rewriting. + + Needs to parse the --assert= option from the commandline + and find all the installed plugins to mark them for rewriting + by the importhook. + """ + ns, unknown_args = self._parser.parse_known_and_unknown_args(args) + mode = getattr(ns, "assertmode", "plain") + if mode == "rewrite": + import _pytest.assertion + + try: + hook = _pytest.assertion.install_importhook(self) + except SystemError: + mode = "plain" + else: + self._mark_plugins_for_rewrite(hook) + self._warn_about_missing_assertion(mode) + + def _mark_plugins_for_rewrite(self, hook) -> None: + """Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins.""" + self.pluginmanager.rewrite_hook = hook + + if os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # We don't autoload from distribution package entry points, + # no need to continue. + return + + package_files = ( + str(file) + for dist in importlib.metadata.distributions() + if any(ep.group == "pytest11" for ep in dist.entry_points) + for file in dist.files or [] + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + + def _validate_args(self, args: list[str], via: str) -> list[str]: + """Validate known args.""" + self._parser._config_source_hint = via # type: ignore + try: + self._parser.parse_known_and_unknown_args( + args, namespace=copy.copy(self.option) + ) + finally: + del self._parser._config_source_hint # type: ignore + + return args + + def _decide_args( + self, + *, + args: list[str], + pyargs: bool, + testpaths: list[str], + invocation_dir: pathlib.Path, + rootpath: pathlib.Path, + warn: bool, + ) -> tuple[list[str], ArgsSource]: + """Decide the args (initial paths/nodeids) to use given the relevant inputs. + + :param warn: Whether can issue warnings. + + :returns: The args and the args source. Guaranteed to be non-empty. + """ + if args: + source = Config.ArgsSource.ARGS + result = args + else: + if invocation_dir == rootpath: + source = Config.ArgsSource.TESTPATHS + if pyargs: + result = testpaths + else: + result = [] + for path in testpaths: + result.extend(sorted(glob.iglob(path, recursive=True))) + if testpaths and not result: + if warn: + warning_text = ( + "No files were found in testpaths; " + "consider removing or adjusting your testpaths configuration. " + "Searching recursively from the current directory instead." + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), stacklevel=3 + ) + else: + result = [] + if not result: + source = Config.ArgsSource.INVOCATION_DIR + result = [str(invocation_dir)] + return result, source + + def _preparse(self, args: list[str], addopts: bool = True) -> None: + if addopts: + env_addopts = os.environ.get("PYTEST_ADDOPTS", "") + if len(env_addopts): + args[:] = ( + self._validate_args(shlex.split(env_addopts), "via PYTEST_ADDOPTS") + + args + ) + self._initini(args) + if addopts: + args[:] = ( + self._validate_args(self.getini("addopts"), "via addopts config") + args + ) + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.option) + ) + self._checkversion() + self._consider_importhook(args) + self.pluginmanager.consider_preparse(args, exclude_only=False) + if not os.environ.get("PYTEST_DISABLE_PLUGIN_AUTOLOAD"): + # Don't autoload from distribution package entry point. Only + # explicitly specified plugins are going to be loaded. + self.pluginmanager.load_setuptools_entrypoints("pytest11") + self.pluginmanager.consider_env() + + self.known_args_namespace = self._parser.parse_known_args( + args, namespace=copy.copy(self.known_args_namespace) + ) + + self._validate_plugins() + self._warn_about_skipped_plugins() + + if self.known_args_namespace.confcutdir is None: + if self.inipath is not None: + confcutdir = str(self.inipath.parent) + else: + confcutdir = str(self.rootpath) + self.known_args_namespace.confcutdir = confcutdir + try: + self.hook.pytest_load_initial_conftests( + early_config=self, args=args, parser=self._parser + ) + except ConftestImportFailure as e: + if self.known_args_namespace.help or self.known_args_namespace.version: + # we don't want to prevent --help/--version to work + # so just let is pass and print a warning at the end + self.issue_config_time_warning( + PytestConfigWarning(f"could not load initial conftests: {e.path}"), + stacklevel=2, + ) + else: + raise + + @hookimpl(wrapper=True) + def pytest_collection(self) -> Generator[None, object, object]: + # Validate invalid ini keys after collection is done so we take in account + # options added by late-loading conftest files. + try: + return (yield) + finally: + self._validate_config_options() + + def _checkversion(self) -> None: + import pytest + + minver = self.inicfg.get("minversion", None) + if minver: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if not isinstance(minver, str): + raise pytest.UsageError( + f"{self.inipath}: 'minversion' must be a single value" + ) + + if Version(minver) > Version(pytest.__version__): + raise pytest.UsageError( + f"{self.inipath}: 'minversion' requires pytest-{minver}, actual pytest-{pytest.__version__}'" + ) + + def _validate_config_options(self) -> None: + for key in sorted(self._get_unknown_ini_keys()): + self._warn_or_fail_if_strict(f"Unknown config option: {key}\n") + + def _validate_plugins(self) -> None: + required_plugins = sorted(self.getini("required_plugins")) + if not required_plugins: + return + + # Imported lazily to improve start-up time. + from packaging.requirements import InvalidRequirement + from packaging.requirements import Requirement + from packaging.version import Version + + plugin_info = self.pluginmanager.list_plugin_distinfo() + plugin_dist_info = {dist.project_name: dist.version for _, dist in plugin_info} + + missing_plugins = [] + for required_plugin in required_plugins: + try: + req = Requirement(required_plugin) + except InvalidRequirement: + missing_plugins.append(required_plugin) + continue + + if req.name not in plugin_dist_info: + missing_plugins.append(required_plugin) + elif not req.specifier.contains( + Version(plugin_dist_info[req.name]), prereleases=True + ): + missing_plugins.append(required_plugin) + + if missing_plugins: + raise UsageError( + "Missing required plugins: {}".format(", ".join(missing_plugins)), + ) + + def _warn_or_fail_if_strict(self, message: str) -> None: + if self.known_args_namespace.strict_config: + raise UsageError(message) + + self.issue_config_time_warning(PytestConfigWarning(message), stacklevel=3) + + def _get_unknown_ini_keys(self) -> list[str]: + parser_inicfg = self._parser._inidict + return [name for name in self.inicfg if name not in parser_inicfg] + + def parse(self, args: list[str], addopts: bool = True) -> None: + # Parse given cmdline arguments into this config object. + assert ( + self.args == [] + ), "can only parse cmdline args at most once per Config object" + self.hook.pytest_addhooks.call_historic( + kwargs=dict(pluginmanager=self.pluginmanager) + ) + self._preparse(args, addopts=addopts) + self._parser.after_preparse = True # type: ignore + try: + args = self._parser.parse_setoption( + args, self.option, namespace=self.option + ) + self.args, self.args_source = self._decide_args( + args=args, + pyargs=self.known_args_namespace.pyargs, + testpaths=self.getini("testpaths"), + invocation_dir=self.invocation_params.dir, + rootpath=self.rootpath, + warn=True, + ) + except PrintHelp: + pass + + def issue_config_time_warning(self, warning: Warning, stacklevel: int) -> None: + """Issue and handle a warning during the "configure" stage. + + During ``pytest_configure`` we can't capture warnings using the ``catch_warnings_for_item`` + function because it is not possible to have hook wrappers around ``pytest_configure``. + + This function is mainly intended for plugins that need to issue warnings during + ``pytest_configure`` (or similar stages). + + :param warning: The warning instance. + :param stacklevel: stacklevel forwarded to warnings.warn. + """ + if self.pluginmanager.is_blocked("warnings"): + return + + cmdline_filters = self.known_args_namespace.pythonwarnings or [] + config_filters = self.getini("filterwarnings") + + with warnings.catch_warnings(record=True) as records: + warnings.simplefilter("always", type(warning)) + apply_warning_filters(config_filters, cmdline_filters) + warnings.warn(warning, stacklevel=stacklevel) + + if records: + frame = sys._getframe(stacklevel - 1) + location = frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + self.hook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=records[0], + when="config", + nodeid="", + location=location, + ) + ) + + def addinivalue_line(self, name: str, line: str) -> None: + """Add a line to an ini-file option. The option must have been + declared but might not yet be set in which case the line becomes + the first line in its value.""" + x = self.getini(name) + assert isinstance(x, list) + x.append(line) # modifies the cached list inline + + def getini(self, name: str): + """Return configuration value from an :ref:`ini file `. + + If a configuration value is not defined in an + :ref:`ini file `, then the ``default`` value provided while + registering the configuration through + :func:`parser.addini ` will be returned. + Please note that you can even provide ``None`` as a valid + default value. + + If ``default`` is not provided while registering using + :func:`parser.addini `, then a default value + based on the ``type`` parameter passed to + :func:`parser.addini ` will be returned. + The default values based on ``type`` are: + ``paths``, ``pathlist``, ``args`` and ``linelist`` : empty list ``[]`` + ``bool`` : ``False`` + ``string`` : empty string ``""`` + + If neither the ``default`` nor the ``type`` parameter is passed + while registering the configuration through + :func:`parser.addini `, then the configuration + is treated as a string and a default empty string '' is returned. + + If the specified name hasn't been registered through a prior + :func:`parser.addini ` call (usually from a + plugin), a ValueError is raised. + """ + try: + return self._inicache[name] + except KeyError: + self._inicache[name] = val = self._getini(name) + return val + + # Meant for easy monkeypatching by legacypath plugin. + # Can be inlined back (with no cover removed) once legacypath is gone. + def _getini_unknown_type(self, name: str, type: str, value: str | list[str]): + msg = f"unknown configuration type: {type}" + raise ValueError(msg, value) # pragma: no cover + + def _getini(self, name: str): + try: + description, type, default = self._parser._inidict[name] + except KeyError as e: + raise ValueError(f"unknown configuration value: {name!r}") from e + override_value = self._get_override_ini_value(name) + if override_value is None: + try: + value = self.inicfg[name] + except KeyError: + return default + else: + value = override_value + # Coerce the values based on types. + # + # Note: some coercions are only required if we are reading from .ini files, because + # the file format doesn't contain type information, but when reading from toml we will + # get either str or list of str values (see _parse_ini_config_from_pyproject_toml). + # For example: + # + # ini: + # a_line_list = "tests acceptance" + # in this case, we need to split the string to obtain a list of strings. + # + # toml: + # a_line_list = ["tests", "acceptance"] + # in this case, we already have a list ready to use. + # + if type == "paths": + dp = ( + self.inipath.parent + if self.inipath is not None + else self.invocation_params.dir + ) + input_values = shlex.split(value) if isinstance(value, str) else value + return [dp / x for x in input_values] + elif type == "args": + return shlex.split(value) if isinstance(value, str) else value + elif type == "linelist": + if isinstance(value, str): + return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] + else: + return value + elif type == "bool": + return _strtobool(str(value).strip()) + elif type == "string": + return value + elif type is None: + return value + else: + return self._getini_unknown_type(name, type, value) + + def _getconftest_pathlist( + self, name: str, path: pathlib.Path + ) -> list[pathlib.Path] | None: + try: + mod, relroots = self.pluginmanager._rget_with_confmod(name, path) + except KeyError: + return None + assert mod.__file__ is not None + modpath = pathlib.Path(mod.__file__).parent + values: list[pathlib.Path] = [] + for relroot in relroots: + if isinstance(relroot, os.PathLike): + relroot = pathlib.Path(relroot) + else: + relroot = relroot.replace("/", os.sep) + relroot = absolutepath(modpath / relroot) + values.append(relroot) + return values + + def _get_override_ini_value(self, name: str) -> str | None: + value = None + # override_ini is a list of "ini=value" options. + # Always use the last item if multiple values are set for same ini-name, + # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2. + for ini_config in self._override_ini: + try: + key, user_ini_value = ini_config.split("=", 1) + except ValueError as e: + raise UsageError( + f"-o/--override-ini expects option=value style (got: {ini_config!r})." + ) from e + else: + if key == name: + value = user_ini_value + return value + + def getoption(self, name: str, default=notset, skip: bool = False): + """Return command line option value. + + :param name: Name of the option. You may also specify + the literal ``--OPT`` option instead of the "dest" option name. + :param default: Default value if no option of that name exists. + :param skip: If True, raise pytest.skip if option does not exists + or has a None value. + """ + name = self._opt2dest.get(name, name) + try: + val = getattr(self.option, name) + if val is None and skip: + raise AttributeError(name) + return val + except AttributeError as e: + if default is not notset: + return default + if skip: + import pytest + + pytest.skip(f"no {name!r} option found") + raise ValueError(f"no option named {name!r}") from e + + def getvalue(self, name: str, path=None): + """Deprecated, use getoption() instead.""" + return self.getoption(name) + + def getvalueorskip(self, name: str, path=None): + """Deprecated, use getoption(skip=True) instead.""" + return self.getoption(name, skip=True) + + #: Verbosity type for failed assertions (see :confval:`verbosity_assertions`). + VERBOSITY_ASSERTIONS: Final = "assertions" + #: Verbosity type for test case execution (see :confval:`verbosity_test_cases`). + VERBOSITY_TEST_CASES: Final = "test_cases" + _VERBOSITY_INI_DEFAULT: Final = "auto" + + def get_verbosity(self, verbosity_type: str | None = None) -> int: + r"""Retrieve the verbosity level for a fine-grained verbosity type. + + :param verbosity_type: Verbosity type to get level for. If a level is + configured for the given type, that value will be returned. If the + given type is not a known verbosity type, the global verbosity + level will be returned. If the given type is None (default), the + global verbosity level will be returned. + + To configure a level for a fine-grained verbosity type, the + configuration file should have a setting for the configuration name + and a numeric value for the verbosity level. A special value of "auto" + can be used to explicitly use the global verbosity level. + + Example: + + .. code-block:: ini + + # content of pytest.ini + [pytest] + verbosity_assertions = 2 + + .. code-block:: console + + pytest -v + + .. code-block:: python + + print(config.get_verbosity()) # 1 + print(config.get_verbosity(Config.VERBOSITY_ASSERTIONS)) # 2 + """ + global_level = self.getoption("verbose", default=0) + assert isinstance(global_level, int) + if verbosity_type is None: + return global_level + + ini_name = Config._verbosity_ini_name(verbosity_type) + if ini_name not in self._parser._inidict: + return global_level + + level = self.getini(ini_name) + if level == Config._VERBOSITY_INI_DEFAULT: + return global_level + + return int(level) + + @staticmethod + def _verbosity_ini_name(verbosity_type: str) -> str: + return f"verbosity_{verbosity_type}" + + @staticmethod + def _add_verbosity_ini(parser: Parser, verbosity_type: str, help: str) -> None: + """Add a output verbosity configuration option for the given output type. + + :param parser: Parser for command line arguments and ini-file values. + :param verbosity_type: Fine-grained verbosity category. + :param help: Description of the output this type controls. + + The value should be retrieved via a call to + :py:func:`config.get_verbosity(type) `. + """ + parser.addini( + Config._verbosity_ini_name(verbosity_type), + help=help, + type="string", + default=Config._VERBOSITY_INI_DEFAULT, + ) + + def _warn_about_missing_assertion(self, mode: str) -> None: + if not _assertion_supported(): + if mode == "plain": + warning_text = ( + "ASSERTIONS ARE NOT EXECUTED" + " and FAILING TESTS WILL PASS. Are you" + " using python -O?" + ) + else: + warning_text = ( + "assertions not in test modules or" + " plugins will be ignored" + " because assert statements are not executed " + "by the underlying Python interpreter " + "(are you using python -O?)\n" + ) + self.issue_config_time_warning( + PytestConfigWarning(warning_text), + stacklevel=3, + ) + + def _warn_about_skipped_plugins(self) -> None: + for module_name, msg in self.pluginmanager.skipped_plugins: + self.issue_config_time_warning( + PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"), + stacklevel=2, + ) + + +def _assertion_supported() -> bool: + try: + assert False + except AssertionError: + return True + else: + return False # type: ignore[unreachable] + + +def create_terminal_writer( + config: Config, file: TextIO | None = None +) -> TerminalWriter: + """Create a TerminalWriter instance configured according to the options + in the config object. + + Every code which requires a TerminalWriter object and has access to a + config object should use this function. + """ + tw = TerminalWriter(file=file) + + if config.option.color == "yes": + tw.hasmarkup = True + elif config.option.color == "no": + tw.hasmarkup = False + + if config.option.code_highlight == "yes": + tw.code_highlight = True + elif config.option.code_highlight == "no": + tw.code_highlight = False + + return tw + + +def _strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. + + True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values + are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if + 'val' is anything else. + + .. note:: Copied from distutils.util. + """ + val = val.lower() + if val in ("y", "yes", "t", "true", "on", "1"): + return True + elif val in ("n", "no", "f", "false", "off", "0"): + return False + else: + raise ValueError(f"invalid truth value {val!r}") + + +@lru_cache(maxsize=50) +def parse_warning_filter( + arg: str, *, escape: bool +) -> tuple[warnings._ActionKind, str, type[Warning], str, int]: + """Parse a warnings filter string. + + This is copied from warnings._setoption with the following changes: + + * Does not apply the filter. + * Escaping is optional. + * Raises UsageError so we get nice error messages on failure. + """ + __tracebackhide__ = True + error_template = dedent( + f"""\ + while parsing the following warning configuration: + + {arg} + + This error occurred: + + {{error}} + """ + ) + + parts = arg.split(":") + if len(parts) > 5: + doc_url = ( + "https://docs.python.org/3/library/warnings.html#describing-warning-filters" + ) + error = dedent( + f"""\ + Too many fields ({len(parts)}), expected at most 5 separated by colons: + + action:message:category:module:line + + For more information please consult: {doc_url} + """ + ) + raise UsageError(error_template.format(error=error)) + + while len(parts) < 5: + parts.append("") + action_, message, category_, module, lineno_ = (s.strip() for s in parts) + try: + action: warnings._ActionKind = warnings._getaction(action_) # type: ignore[attr-defined] + except warnings._OptionError as e: + raise UsageError(error_template.format(error=str(e))) from None + try: + category: type[Warning] = _resolve_warning_category(category_) + except Exception: + exc_info = ExceptionInfo.from_current() + exception_text = exc_info.getrepr(style="native") + raise UsageError(error_template.format(error=exception_text)) from None + if message and escape: + message = re.escape(message) + if module and escape: + module = re.escape(module) + r"\Z" + if lineno_: + try: + lineno = int(lineno_) + if lineno < 0: + raise ValueError("number is negative") + except ValueError as e: + raise UsageError( + error_template.format(error=f"invalid lineno {lineno_!r}: {e}") + ) from None + else: + lineno = 0 + return action, message, category, module, lineno + + +def _resolve_warning_category(category: str) -> type[Warning]: + """ + Copied from warnings._getcategory, but changed so it lets exceptions (specially ImportErrors) + propagate so we can get access to their tracebacks (#9218). + """ + __tracebackhide__ = True + if not category: + return Warning + + if "." not in category: + import builtins as m + + klass = category + else: + module, _, klass = category.rpartition(".") + m = __import__(module, None, None, [klass]) + cat = getattr(m, klass) + if not issubclass(cat, Warning): + raise UsageError(f"{cat} is not a Warning subclass") + return cast(Type[Warning], cat) + + +def apply_warning_filters( + config_filters: Iterable[str], cmdline_filters: Iterable[str] +) -> None: + """Applies pytest-configured filters to the warnings module""" + # Filters should have this precedence: cmdline options, config. + # Filters should be applied in the inverse order of precedence. + for arg in config_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + for arg in cmdline_filters: + warnings.filterwarnings(*parse_warning_filter(arg, escape=True)) diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..39f61db Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/argparsing.cpython-311.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/argparsing.cpython-311.pyc new file mode 100644 index 0000000..090f0ef Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/argparsing.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/compat.cpython-311.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/compat.cpython-311.pyc new file mode 100644 index 0000000..e84c1bf Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/compat.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/exceptions.cpython-311.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/exceptions.cpython-311.pyc new file mode 100644 index 0000000..bedd4a7 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/exceptions.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/__pycache__/findpaths.cpython-311.pyc b/venv/Lib/site-packages/_pytest/config/__pycache__/findpaths.cpython-311.pyc new file mode 100644 index 0000000..78f85bc Binary files /dev/null and b/venv/Lib/site-packages/_pytest/config/__pycache__/findpaths.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/config/argparsing.py b/venv/Lib/site-packages/_pytest/config/argparsing.py new file mode 100644 index 0000000..85aa463 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/argparsing.py @@ -0,0 +1,551 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import argparse +from gettext import gettext +import os +import sys +from typing import Any +from typing import Callable +from typing import cast +from typing import final +from typing import List +from typing import Literal +from typing import Mapping +from typing import NoReturn +from typing import Sequence + +import _pytest._io +from _pytest.config.exceptions import UsageError +from _pytest.deprecated import check_ispytest + + +FILE_OR_DIR = "file_or_dir" + + +class NotSet: + def __repr__(self) -> str: + return "" + + +NOT_SET = NotSet() + + +@final +class Parser: + """Parser for command line arguments and ini-file values. + + :ivar extra_info: Dict of generic param -> value to display in case + there's an error processing the command line arguments. + """ + + prog: str | None = None + + def __init__( + self, + usage: str | None = None, + processopt: Callable[[Argument], None] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._anonymous = OptionGroup("Custom options", parser=self, _ispytest=True) + self._groups: list[OptionGroup] = [] + self._processopt = processopt + self._usage = usage + self._inidict: dict[str, tuple[str, str | None, Any]] = {} + self._ininames: list[str] = [] + self.extra_info: dict[str, Any] = {} + + def processoption(self, option: Argument) -> None: + if self._processopt: + if option.dest: + self._processopt(option) + + def getgroup( + self, name: str, description: str = "", after: str | None = None + ) -> OptionGroup: + """Get (or create) a named option Group. + + :param name: Name of the option group. + :param description: Long description for --help output. + :param after: Name of another group, used for ordering --help output. + :returns: The option group. + + The returned group object has an ``addoption`` method with the same + signature as :func:`parser.addoption ` but + will be shown in the respective group in the output of + ``pytest --help``. + """ + for group in self._groups: + if group.name == name: + return group + group = OptionGroup(name, description, parser=self, _ispytest=True) + i = 0 + for i, grp in enumerate(self._groups): + if grp.name == after: + break + self._groups.insert(i + 1, group) + return group + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Register a command line option. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :meth:`add_argument() + ` function accepts. + + After command line parsing, options are available on the pytest config + object via ``config.option.NAME`` where ``NAME`` is usually set + by passing a ``dest`` attribute, for example + ``addoption("--long", dest="NAME", ...)``. + """ + self._anonymous.addoption(*opts, **attrs) + + def parse( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + from _pytest._argcomplete import try_argcomplete + + self.optparser = self._getparser() + try_argcomplete(self.optparser) + strargs = [os.fspath(x) for x in args] + return self.optparser.parse_args(strargs, namespace=namespace) + + def _getparser(self) -> MyOptionParser: + from _pytest._argcomplete import filescompleter + + optparser = MyOptionParser(self, self.extra_info, prog=self.prog) + groups = [*self._groups, self._anonymous] + for group in groups: + if group.options: + desc = group.description or group.name + arggroup = optparser.add_argument_group(desc) + for option in group.options: + n = option.names() + a = option.attrs() + arggroup.add_argument(*n, **a) + file_or_dir_arg = optparser.add_argument(FILE_OR_DIR, nargs="*") + # bash like autocompletion for dirs (appending '/') + # Type ignored because typeshed doesn't know about argcomplete. + file_or_dir_arg.completer = filescompleter # type: ignore + return optparser + + def parse_setoption( + self, + args: Sequence[str | os.PathLike[str]], + option: argparse.Namespace, + namespace: argparse.Namespace | None = None, + ) -> list[str]: + parsedoption = self.parse(args, namespace=namespace) + for name, value in parsedoption.__dict__.items(): + setattr(option, name, value) + return cast(List[str], getattr(parsedoption, FILE_OR_DIR)) + + def parse_known_args( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + """Parse the known arguments at this point. + + :returns: An argparse namespace object. + """ + return self.parse_known_and_unknown_args(args, namespace=namespace)[0] + + def parse_known_and_unknown_args( + self, + args: Sequence[str | os.PathLike[str]], + namespace: argparse.Namespace | None = None, + ) -> tuple[argparse.Namespace, list[str]]: + """Parse the known arguments at this point, and also return the + remaining unknown arguments. + + :returns: + A tuple containing an argparse namespace object for the known + arguments, and a list of the unknown arguments. + """ + optparser = self._getparser() + strargs = [os.fspath(x) for x in args] + return optparser.parse_known_args(strargs, namespace=namespace) + + def addini( + self, + name: str, + help: str, + type: Literal["string", "paths", "pathlist", "args", "linelist", "bool"] + | None = None, + default: Any = NOT_SET, + ) -> None: + """Register an ini-file option. + + :param name: + Name of the ini-variable. + :param type: + Type of the variable. Can be: + + * ``string``: a string + * ``bool``: a boolean + * ``args``: a list of strings, separated as in a shell + * ``linelist``: a list of strings, separated by line breaks + * ``paths``: a list of :class:`pathlib.Path`, separated as in a shell + * ``pathlist``: a list of ``py.path``, separated as in a shell + + For ``paths`` and ``pathlist`` types, they are considered relative to the ini-file. + In case the execution is happening without an ini-file defined, + they will be considered relative to the current working directory (for example with ``--override-ini``). + + .. versionadded:: 7.0 + The ``paths`` variable type. + + .. versionadded:: 8.1 + Use the current working directory to resolve ``paths`` and ``pathlist`` in the absence of an ini-file. + + Defaults to ``string`` if ``None`` or not passed. + :param default: + Default value if no ini-file option exists but is queried. + + The value of ini-variables can be retrieved via a call to + :py:func:`config.getini(name) `. + """ + assert type in (None, "string", "paths", "pathlist", "args", "linelist", "bool") + if default is NOT_SET: + default = get_ini_default_for_type(type) + + self._inidict[name] = (help, type, default) + self._ininames.append(name) + + +def get_ini_default_for_type( + type: Literal["string", "paths", "pathlist", "args", "linelist", "bool"] | None, +) -> Any: + """ + Used by addini to get the default value for a given ini-option type, when + default is not supplied. + """ + if type is None: + return "" + elif type in ("paths", "pathlist", "args", "linelist"): + return [] + elif type == "bool": + return False + else: + return "" + + +class ArgumentError(Exception): + """Raised if an Argument instance is created with invalid or + inconsistent arguments.""" + + def __init__(self, msg: str, option: Argument | str) -> None: + self.msg = msg + self.option_id = str(option) + + def __str__(self) -> str: + if self.option_id: + return f"option {self.option_id}: {self.msg}" + else: + return self.msg + + +class Argument: + """Class that mimics the necessary behaviour of optparse.Option. + + It's currently a least effort implementation and ignoring choices + and integer prefixes. + + https://docs.python.org/3/library/optparse.html#optparse-standard-option-types + """ + + def __init__(self, *names: str, **attrs: Any) -> None: + """Store params in private vars for use in add_argument.""" + self._attrs = attrs + self._short_opts: list[str] = [] + self._long_opts: list[str] = [] + try: + self.type = attrs["type"] + except KeyError: + pass + try: + # Attribute existence is tested in Config._processopt. + self.default = attrs["default"] + except KeyError: + pass + self._set_opt_strings(names) + dest: str | None = attrs.get("dest") + if dest: + self.dest = dest + elif self._long_opts: + self.dest = self._long_opts[0][2:].replace("-", "_") + else: + try: + self.dest = self._short_opts[0][1:] + except IndexError as e: + self.dest = "???" # Needed for the error repr. + raise ArgumentError("need a long or short option", self) from e + + def names(self) -> list[str]: + return self._short_opts + self._long_opts + + def attrs(self) -> Mapping[str, Any]: + # Update any attributes set by processopt. + attrs = "default dest help".split() + attrs.append(self.dest) + for attr in attrs: + try: + self._attrs[attr] = getattr(self, attr) + except AttributeError: + pass + return self._attrs + + def _set_opt_strings(self, opts: Sequence[str]) -> None: + """Directly from optparse. + + Might not be necessary as this is passed to argparse later on. + """ + for opt in opts: + if len(opt) < 2: + raise ArgumentError( + f"invalid option string {opt!r}: " + "must be at least two characters long", + self, + ) + elif len(opt) == 2: + if not (opt[0] == "-" and opt[1] != "-"): + raise ArgumentError( + f"invalid short option string {opt!r}: " + "must be of the form -x, (x any non-dash char)", + self, + ) + self._short_opts.append(opt) + else: + if not (opt[0:2] == "--" and opt[2] != "-"): + raise ArgumentError( + f"invalid long option string {opt!r}: " + "must start with --, followed by non-dash", + self, + ) + self._long_opts.append(opt) + + def __repr__(self) -> str: + args: list[str] = [] + if self._short_opts: + args += ["_short_opts: " + repr(self._short_opts)] + if self._long_opts: + args += ["_long_opts: " + repr(self._long_opts)] + args += ["dest: " + repr(self.dest)] + if hasattr(self, "type"): + args += ["type: " + repr(self.type)] + if hasattr(self, "default"): + args += ["default: " + repr(self.default)] + return "Argument({})".format(", ".join(args)) + + +class OptionGroup: + """A group of options shown in its own section.""" + + def __init__( + self, + name: str, + description: str = "", + parser: Parser | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self.name = name + self.description = description + self.options: list[Argument] = [] + self.parser = parser + + def addoption(self, *opts: str, **attrs: Any) -> None: + """Add an option to this group. + + If a shortened version of a long option is specified, it will + be suppressed in the help. ``addoption('--twowords', '--two-words')`` + results in help showing ``--two-words`` only, but ``--twowords`` gets + accepted **and** the automatic destination is in ``args.twowords``. + + :param opts: + Option names, can be short or long options. + :param attrs: + Same attributes as the argparse library's :meth:`add_argument() + ` function accepts. + """ + conflict = set(opts).intersection( + name for opt in self.options for name in opt.names() + ) + if conflict: + raise ValueError(f"option names {conflict} already added") + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=False) + + def _addoption(self, *opts: str, **attrs: Any) -> None: + option = Argument(*opts, **attrs) + self._addoption_instance(option, shortupper=True) + + def _addoption_instance(self, option: Argument, shortupper: bool = False) -> None: + if not shortupper: + for opt in option._short_opts: + if opt[0] == "-" and opt[1].islower(): + raise ValueError("lowercase shortoptions reserved") + if self.parser: + self.parser.processoption(option) + self.options.append(option) + + +class MyOptionParser(argparse.ArgumentParser): + def __init__( + self, + parser: Parser, + extra_info: dict[str, Any] | None = None, + prog: str | None = None, + ) -> None: + self._parser = parser + super().__init__( + prog=prog, + usage=parser._usage, + add_help=False, + formatter_class=DropShorterLongHelpFormatter, + allow_abbrev=False, + fromfile_prefix_chars="@", + ) + # extra_info is a dict of (param -> value) to display if there's + # an usage error to provide more contextual information to the user. + self.extra_info = extra_info if extra_info else {} + + def error(self, message: str) -> NoReturn: + """Transform argparse error message into UsageError.""" + msg = f"{self.prog}: error: {message}" + + if hasattr(self._parser, "_config_source_hint"): + msg = f"{msg} ({self._parser._config_source_hint})" + + raise UsageError(self.format_usage() + msg) + + # Type ignored because typeshed has a very complex type in the superclass. + def parse_args( # type: ignore + self, + args: Sequence[str] | None = None, + namespace: argparse.Namespace | None = None, + ) -> argparse.Namespace: + """Allow splitting of positional arguments.""" + parsed, unrecognized = self.parse_known_args(args, namespace) + if unrecognized: + for arg in unrecognized: + if arg and arg[0] == "-": + lines = [ + "unrecognized arguments: {}".format(" ".join(unrecognized)) + ] + for k, v in sorted(self.extra_info.items()): + lines.append(f" {k}: {v}") + self.error("\n".join(lines)) + getattr(parsed, FILE_OR_DIR).extend(unrecognized) + return parsed + + if sys.version_info < (3, 9): # pragma: no cover + # Backport of https://github.com/python/cpython/pull/14316 so we can + # disable long --argument abbreviations without breaking short flags. + def _parse_optional( + self, arg_string: str + ) -> tuple[argparse.Action | None, str, str | None] | None: + if not arg_string: + return None + if arg_string[0] not in self.prefix_chars: + return None + if arg_string in self._option_string_actions: + action = self._option_string_actions[arg_string] + return action, arg_string, None + if len(arg_string) == 1: + return None + if "=" in arg_string: + option_string, explicit_arg = arg_string.split("=", 1) + if option_string in self._option_string_actions: + action = self._option_string_actions[option_string] + return action, option_string, explicit_arg + if self.allow_abbrev or not arg_string.startswith("--"): + option_tuples = self._get_option_tuples(arg_string) + if len(option_tuples) > 1: + msg = gettext( + "ambiguous option: %(option)s could match %(matches)s" + ) + options = ", ".join(option for _, option, _ in option_tuples) + self.error(msg % {"option": arg_string, "matches": options}) + elif len(option_tuples) == 1: + (option_tuple,) = option_tuples + return option_tuple + if self._negative_number_matcher.match(arg_string): + if not self._has_negative_number_optionals: + return None + if " " in arg_string: + return None + return None, arg_string, None + + +class DropShorterLongHelpFormatter(argparse.HelpFormatter): + """Shorten help for long options that differ only in extra hyphens. + + - Collapse **long** options that are the same except for extra hyphens. + - Shortcut if there are only two options and one of them is a short one. + - Cache result on the action object as this is called at least 2 times. + """ + + def __init__(self, *args: Any, **kwargs: Any) -> None: + # Use more accurate terminal width. + if "width" not in kwargs: + kwargs["width"] = _pytest._io.get_terminal_width() + super().__init__(*args, **kwargs) + + def _format_action_invocation(self, action: argparse.Action) -> str: + orgstr = super()._format_action_invocation(action) + if orgstr and orgstr[0] != "-": # only optional arguments + return orgstr + res: str | None = getattr(action, "_formatted_action_invocation", None) + if res: + return res + options = orgstr.split(", ") + if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2): + # a shortcut for '-h, --help' or '--abc', '-a' + action._formatted_action_invocation = orgstr # type: ignore + return orgstr + return_list = [] + short_long: dict[str, str] = {} + for option in options: + if len(option) == 2 or option[2] == " ": + continue + if not option.startswith("--"): + raise ArgumentError( + f'long optional argument without "--": [{option}]', option + ) + xxoption = option[2:] + shortened = xxoption.replace("-", "") + if shortened not in short_long or len(short_long[shortened]) < len( + xxoption + ): + short_long[shortened] = xxoption + # now short_long has been filled out to the longest with dashes + # **and** we keep the right option ordering from add_argument + for option in options: + if len(option) == 2 or option[2] == " ": + return_list.append(option) + if option[2:] == short_long.get(option.replace("-", "")): + return_list.append(option.replace(" ", "=", 1)) + formatted_action_invocation = ", ".join(return_list) + action._formatted_action_invocation = formatted_action_invocation # type: ignore + return formatted_action_invocation + + def _split_lines(self, text, width): + """Wrap lines after splitting on original newlines. + + This allows to have explicit line breaks in the help text. + """ + import textwrap + + lines = [] + for line in text.splitlines(): + lines.extend(textwrap.wrap(line.strip(), width)) + return lines diff --git a/venv/Lib/site-packages/_pytest/config/compat.py b/venv/Lib/site-packages/_pytest/config/compat.py new file mode 100644 index 0000000..2856d85 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/compat.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +import functools +from pathlib import Path +from typing import Any +from typing import Mapping +import warnings + +import pluggy + +from ..compat import LEGACY_PATH +from ..compat import legacy_path +from ..deprecated import HOOK_LEGACY_PATH_ARG + + +# hookname: (Path, LEGACY_PATH) +imply_paths_hooks: Mapping[str, tuple[str, str]] = { + "pytest_ignore_collect": ("collection_path", "path"), + "pytest_collect_file": ("file_path", "path"), + "pytest_pycollect_makemodule": ("module_path", "path"), + "pytest_report_header": ("start_path", "startdir"), + "pytest_report_collectionfinish": ("start_path", "startdir"), +} + + +def _check_path(path: Path, fspath: LEGACY_PATH) -> None: + if Path(fspath) != path: + raise ValueError( + f"Path({fspath!r}) != {path!r}\n" + "if both path and fspath are given they need to be equal" + ) + + +class PathAwareHookProxy: + """ + this helper wraps around hook callers + until pluggy supports fixingcalls, this one will do + + it currently doesn't return full hook caller proxies for fixed hooks, + this may have to be changed later depending on bugs + """ + + def __init__(self, hook_relay: pluggy.HookRelay) -> None: + self._hook_relay = hook_relay + + def __dir__(self) -> list[str]: + return dir(self._hook_relay) + + def __getattr__(self, key: str) -> pluggy.HookCaller: + hook: pluggy.HookCaller = getattr(self._hook_relay, key) + if key not in imply_paths_hooks: + self.__dict__[key] = hook + return hook + else: + path_var, fspath_var = imply_paths_hooks[key] + + @functools.wraps(hook) + def fixed_hook(**kw: Any) -> Any: + path_value: Path | None = kw.pop(path_var, None) + fspath_value: LEGACY_PATH | None = kw.pop(fspath_var, None) + if fspath_value is not None: + warnings.warn( + HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg=fspath_var, pathlib_path_arg=path_var + ), + stacklevel=2, + ) + if path_value is not None: + if fspath_value is not None: + _check_path(path_value, fspath_value) + else: + fspath_value = legacy_path(path_value) + else: + assert fspath_value is not None + path_value = Path(fspath_value) + + kw[path_var] = path_value + kw[fspath_var] = fspath_value + return hook(**kw) + + fixed_hook.name = hook.name # type: ignore[attr-defined] + fixed_hook.spec = hook.spec # type: ignore[attr-defined] + fixed_hook.__name__ = key + self.__dict__[key] = fixed_hook + return fixed_hook # type: ignore[return-value] diff --git a/venv/Lib/site-packages/_pytest/config/exceptions.py b/venv/Lib/site-packages/_pytest/config/exceptions.py new file mode 100644 index 0000000..90108ec --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/exceptions.py @@ -0,0 +1,13 @@ +from __future__ import annotations + +from typing import final + + +@final +class UsageError(Exception): + """Error in pytest usage or invocation.""" + + +class PrintHelp(Exception): + """Raised when pytest should print its help to skip the rest of the + argument parsing and validation.""" diff --git a/venv/Lib/site-packages/_pytest/config/findpaths.py b/venv/Lib/site-packages/_pytest/config/findpaths.py new file mode 100644 index 0000000..ce4c990 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/config/findpaths.py @@ -0,0 +1,228 @@ +from __future__ import annotations + +import os +from pathlib import Path +import sys +from typing import Iterable +from typing import Sequence + +import iniconfig + +from .exceptions import UsageError +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.pathlib import safe_exists + + +def _parse_ini_config(path: Path) -> iniconfig.IniConfig: + """Parse the given generic '.ini' file using legacy IniConfig parser, returning + the parsed object. + + Raise UsageError if the file cannot be parsed. + """ + try: + return iniconfig.IniConfig(str(path)) + except iniconfig.ParseError as exc: + raise UsageError(str(exc)) from exc + + +def load_config_dict_from_file( + filepath: Path, +) -> dict[str, str | list[str]] | None: + """Load pytest configuration from the given file path, if supported. + + Return None if the file does not contain valid pytest configuration. + """ + # Configuration from ini files are obtained from the [pytest] section, if present. + if filepath.suffix == ".ini": + iniconfig = _parse_ini_config(filepath) + + if "pytest" in iniconfig: + return dict(iniconfig["pytest"].items()) + else: + # "pytest.ini" files are always the source of configuration, even if empty. + if filepath.name == "pytest.ini": + return {} + + # '.cfg' files are considered if they contain a "[tool:pytest]" section. + elif filepath.suffix == ".cfg": + iniconfig = _parse_ini_config(filepath) + + if "tool:pytest" in iniconfig.sections: + return dict(iniconfig["tool:pytest"].items()) + elif "pytest" in iniconfig.sections: + # If a setup.cfg contains a "[pytest]" section, we raise a failure to indicate users that + # plain "[pytest]" sections in setup.cfg files is no longer supported (#3086). + fail(CFG_PYTEST_SECTION.format(filename="setup.cfg"), pytrace=False) + + # '.toml' files are considered if they contain a [tool.pytest.ini_options] table. + elif filepath.suffix == ".toml": + if sys.version_info >= (3, 11): + import tomllib + else: + import tomli as tomllib + + toml_text = filepath.read_text(encoding="utf-8") + try: + config = tomllib.loads(toml_text) + except tomllib.TOMLDecodeError as exc: + raise UsageError(f"{filepath}: {exc}") from exc + + result = config.get("tool", {}).get("pytest", {}).get("ini_options", None) + if result is not None: + # TOML supports richer data types than ini files (strings, arrays, floats, ints, etc), + # however we need to convert all scalar values to str for compatibility with the rest + # of the configuration system, which expects strings only. + def make_scalar(v: object) -> str | list[str]: + return v if isinstance(v, list) else str(v) + + return {k: make_scalar(v) for k, v in result.items()} + + return None + + +def locate_config( + invocation_dir: Path, + args: Iterable[Path], +) -> tuple[Path | None, Path | None, dict[str, str | list[str]]]: + """Search in the list of arguments for a valid ini-file for pytest, + and return a tuple of (rootdir, inifile, cfg-dict).""" + config_names = [ + "pytest.ini", + ".pytest.ini", + "pyproject.toml", + "tox.ini", + "setup.cfg", + ] + args = [x for x in args if not str(x).startswith("-")] + if not args: + args = [invocation_dir] + found_pyproject_toml: Path | None = None + for arg in args: + argpath = absolutepath(arg) + for base in (argpath, *argpath.parents): + for config_name in config_names: + p = base / config_name + if p.is_file(): + if p.name == "pyproject.toml" and found_pyproject_toml is None: + found_pyproject_toml = p + ini_config = load_config_dict_from_file(p) + if ini_config is not None: + return base, p, ini_config + if found_pyproject_toml is not None: + return found_pyproject_toml.parent, found_pyproject_toml, {} + return None, None, {} + + +def get_common_ancestor( + invocation_dir: Path, + paths: Iterable[Path], +) -> Path: + common_ancestor: Path | None = None + for path in paths: + if not path.exists(): + continue + if common_ancestor is None: + common_ancestor = path + else: + if common_ancestor in path.parents or path == common_ancestor: + continue + elif path in common_ancestor.parents: + common_ancestor = path + else: + shared = commonpath(path, common_ancestor) + if shared is not None: + common_ancestor = shared + if common_ancestor is None: + common_ancestor = invocation_dir + elif common_ancestor.is_file(): + common_ancestor = common_ancestor.parent + return common_ancestor + + +def get_dirs_from_args(args: Iterable[str]) -> list[Path]: + def is_option(x: str) -> bool: + return x.startswith("-") + + def get_file_part_from_node_id(x: str) -> str: + return x.split("::")[0] + + def get_dir_from_path(path: Path) -> Path: + if path.is_dir(): + return path + return path.parent + + # These look like paths but may not exist + possible_paths = ( + absolutepath(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [get_dir_from_path(path) for path in possible_paths if safe_exists(path)] + + +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." + + +def determine_setup( + *, + inifile: str | None, + args: Sequence[str], + rootdir_cmd_arg: str | None, + invocation_dir: Path, +) -> tuple[Path, Path | None, dict[str, str | list[str]]]: + """Determine the rootdir, inifile and ini configuration values from the + command line arguments. + + :param inifile: + The `--inifile` command line argument, if given. + :param args: + The free command line arguments. + :param rootdir_cmd_arg: + The `--rootdir` command line argument, if given. + :param invocation_dir: + The working directory when pytest was invoked. + """ + rootdir = None + dirs = get_dirs_from_args(args) + if inifile: + inipath_ = absolutepath(inifile) + inipath: Path | None = inipath_ + inicfg = load_config_dict_from_file(inipath_) or {} + if rootdir_cmd_arg is None: + rootdir = inipath_.parent + else: + ancestor = get_common_ancestor(invocation_dir, dirs) + rootdir, inipath, inicfg = locate_config(invocation_dir, [ancestor]) + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in (ancestor, *ancestor.parents): + if (possible_rootdir / "setup.py").is_file(): + rootdir = possible_rootdir + break + else: + if dirs != [ancestor]: + rootdir, inipath, inicfg = locate_config(invocation_dir, dirs) + if rootdir is None: + rootdir = get_common_ancestor( + invocation_dir, [invocation_dir, ancestor] + ) + if is_fs_root(rootdir): + rootdir = ancestor + if rootdir_cmd_arg: + rootdir = absolutepath(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.is_dir(): + raise UsageError( + f"Directory '{rootdir}' not found. Check your '--rootdir' option." + ) + assert rootdir is not None + return rootdir, inipath, inicfg or {} + + +def is_fs_root(p: Path) -> bool: + r""" + Return True if the given path is pointing to the root of the + file system ("/" on Unix and "C:\\" on Windows for example). + """ + return os.path.splitdrive(str(p))[1] == os.sep diff --git a/venv/Lib/site-packages/_pytest/debugging.py b/venv/Lib/site-packages/_pytest/debugging.py new file mode 100644 index 0000000..2dfe321 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/debugging.py @@ -0,0 +1,385 @@ +# mypy: allow-untyped-defs +# ruff: noqa: T100 +"""Interactive debugging with PDB, the Python Debugger.""" + +from __future__ import annotations + +import argparse +import functools +import sys +import types +from typing import Any +from typing import Callable +from typing import Generator +import unittest + +from _pytest import outcomes +from _pytest._code import ExceptionInfo +from _pytest.capture import CaptureManager +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.config.exceptions import UsageError +from _pytest.nodes import Node +from _pytest.reports import BaseReport +from _pytest.runner import CallInfo + + +def _validate_usepdb_cls(value: str) -> tuple[str, str]: + """Validate syntax of --pdbcls option.""" + try: + modname, classname = value.split(":") + except ValueError as e: + raise argparse.ArgumentTypeError( + f"{value!r} is not in the format 'modname:classname'" + ) from e + return (modname, classname) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "--pdb", + dest="usepdb", + action="store_true", + help="Start the interactive Python debugger on errors or KeyboardInterrupt", + ) + group._addoption( + "--pdbcls", + dest="usepdb_cls", + metavar="modulename:classname", + type=_validate_usepdb_cls, + help="Specify a custom interactive Python debugger for use with --pdb." + "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb", + ) + group._addoption( + "--trace", + dest="trace", + action="store_true", + help="Immediately break when running each test", + ) + + +def pytest_configure(config: Config) -> None: + import pdb + + if config.getvalue("trace"): + config.pluginmanager.register(PdbTrace(), "pdbtrace") + if config.getvalue("usepdb"): + config.pluginmanager.register(PdbInvoke(), "pdbinvoke") + + pytestPDB._saved.append( + (pdb.set_trace, pytestPDB._pluginmanager, pytestPDB._config) + ) + pdb.set_trace = pytestPDB.set_trace + pytestPDB._pluginmanager = config.pluginmanager + pytestPDB._config = config + + # NOTE: not using pytest_unconfigure, since it might get called although + # pytest_configure was not (if another plugin raises UsageError). + def fin() -> None: + ( + pdb.set_trace, + pytestPDB._pluginmanager, + pytestPDB._config, + ) = pytestPDB._saved.pop() + + config.add_cleanup(fin) + + +class pytestPDB: + """Pseudo PDB that defers to the real pdb.""" + + _pluginmanager: PytestPluginManager | None = None + _config: Config | None = None + _saved: list[ + tuple[Callable[..., None], PytestPluginManager | None, Config | None] + ] = [] + _recursive_debug = 0 + _wrapped_pdb_cls: tuple[type[Any], type[Any]] | None = None + + @classmethod + def _is_capturing(cls, capman: CaptureManager | None) -> str | bool: + if capman: + return capman.is_capturing() + return False + + @classmethod + def _import_pdb_cls(cls, capman: CaptureManager | None): + if not cls._config: + import pdb + + # Happens when using pytest.set_trace outside of a test. + return pdb.Pdb + + usepdb_cls = cls._config.getvalue("usepdb_cls") + + if cls._wrapped_pdb_cls and cls._wrapped_pdb_cls[0] == usepdb_cls: + return cls._wrapped_pdb_cls[1] + + if usepdb_cls: + modname, classname = usepdb_cls + + try: + __import__(modname) + mod = sys.modules[modname] + + # Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp). + parts = classname.split(".") + pdb_cls = getattr(mod, parts[0]) + for part in parts[1:]: + pdb_cls = getattr(pdb_cls, part) + except Exception as exc: + value = ":".join((modname, classname)) + raise UsageError( + f"--pdbcls: could not import {value!r}: {exc}" + ) from exc + else: + import pdb + + pdb_cls = pdb.Pdb + + wrapped_cls = cls._get_pdb_wrapper_class(pdb_cls, capman) + cls._wrapped_pdb_cls = (usepdb_cls, wrapped_cls) + return wrapped_cls + + @classmethod + def _get_pdb_wrapper_class(cls, pdb_cls, capman: CaptureManager | None): + import _pytest.config + + class PytestPdbWrapper(pdb_cls): + _pytest_capman = capman + _continued = False + + def do_debug(self, arg): + cls._recursive_debug += 1 + ret = super().do_debug(arg) + cls._recursive_debug -= 1 + return ret + + def do_continue(self, arg): + ret = super().do_continue(arg) + if cls._recursive_debug == 0: + assert cls._config is not None + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + capman = self._pytest_capman + capturing = pytestPDB._is_capturing(capman) + if capturing: + if capturing == "global": + tw.sep(">", "PDB continue (IO-capturing resumed)") + else: + tw.sep( + ">", + f"PDB continue (IO-capturing resumed for {capturing})", + ) + assert capman is not None + capman.resume() + else: + tw.sep(">", "PDB continue") + assert cls._pluginmanager is not None + cls._pluginmanager.hook.pytest_leave_pdb(config=cls._config, pdb=self) + self._continued = True + return ret + + do_c = do_cont = do_continue + + def do_quit(self, arg): + """Raise Exit outcome when quit command is used in pdb. + + This is a bit of a hack - it would be better if BdbQuit + could be handled, but this would require to wrap the + whole pytest run, and adjust the report etc. + """ + ret = super().do_quit(arg) + + if cls._recursive_debug == 0: + outcomes.exit("Quitting debugger") + + return ret + + do_q = do_quit + do_exit = do_quit + + def setup(self, f, tb): + """Suspend on setup(). + + Needed after do_continue resumed, and entering another + breakpoint again. + """ + ret = super().setup(f, tb) + if not ret and self._continued: + # pdb.setup() returns True if the command wants to exit + # from the interaction: do not suspend capturing then. + if self._pytest_capman: + self._pytest_capman.suspend_global_capture(in_=True) + return ret + + def get_stack(self, f, t): + stack, i = super().get_stack(f, t) + if f is None: + # Find last non-hidden frame. + i = max(0, len(stack) - 1) + while i and stack[i][0].f_locals.get("__tracebackhide__", False): + i -= 1 + return stack, i + + return PytestPdbWrapper + + @classmethod + def _init_pdb(cls, method, *args, **kwargs): + """Initialize PDB debugging, dropping any IO capturing.""" + import _pytest.config + + if cls._pluginmanager is None: + capman: CaptureManager | None = None + else: + capman = cls._pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend(in_=True) + + if cls._config: + tw = _pytest.config.create_terminal_writer(cls._config) + tw.line() + + if cls._recursive_debug == 0: + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + else: + capturing = cls._is_capturing(capman) + if capturing == "global": + tw.sep(">", f"PDB {method} (IO-capturing turned off)") + elif capturing: + tw.sep( + ">", + f"PDB {method} (IO-capturing turned off for {capturing})", + ) + else: + tw.sep(">", f"PDB {method}") + + _pdb = cls._import_pdb_cls(capman)(**kwargs) + + if cls._pluginmanager: + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) + return _pdb + + @classmethod + def set_trace(cls, *args, **kwargs) -> None: + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb("set_trace", *args, **kwargs) + _pdb.set_trace(frame) + + +class PdbInvoke: + def pytest_exception_interact( + self, node: Node, call: CallInfo[Any], report: BaseReport + ) -> None: + capman = node.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stdout.write(err) + assert call.excinfo is not None + + if not isinstance(call.excinfo.value, unittest.SkipTest): + _enter_pdb(node, call.excinfo, report) + + def pytest_internalerror(self, excinfo: ExceptionInfo[BaseException]) -> None: + tb = _postmortem_traceback(excinfo) + post_mortem(tb) + + +class PdbTrace: + @hookimpl(wrapper=True) + def pytest_pyfunc_call(self, pyfuncitem) -> Generator[None, object, object]: + wrap_pytest_function_for_tracing(pyfuncitem) + return (yield) + + +def wrap_pytest_function_for_tracing(pyfuncitem) -> None: + """Change the Python function object of the given Function item by a + wrapper which actually enters pdb before calling the python function + itself, effectively leaving the user in the pdb prompt in the first + statement of the function.""" + _pdb = pytestPDB._init_pdb("runcall") + testfunction = pyfuncitem.obj + + # we can't just return `partial(pdb.runcall, testfunction)` because (on + # python < 3.7.4) runcall's first param is `func`, which means we'd get + # an exception if one of the kwargs to testfunction was called `func`. + @functools.wraps(testfunction) + def wrapper(*args, **kwargs) -> None: + func = functools.partial(testfunction, *args, **kwargs) + _pdb.runcall(func) + + pyfuncitem.obj = wrapper + + +def maybe_wrap_pytest_function_for_tracing(pyfuncitem) -> None: + """Wrap the given pytestfunct item for tracing support if --trace was given in + the command line.""" + if pyfuncitem.config.getvalue("trace"): + wrap_pytest_function_for_tracing(pyfuncitem) + + +def _enter_pdb( + node: Node, excinfo: ExceptionInfo[BaseException], rep: BaseReport +) -> BaseReport: + # XXX we reuse the TerminalReporter's terminalwriter + # because this seems to avoid some encoding related troubles + # for not completely clear reasons. + tw = node.config.pluginmanager.getplugin("terminalreporter")._tw + tw.line() + + showcapture = node.config.option.showcapture + + for sectionname, content in ( + ("stdout", rep.capstdout), + ("stderr", rep.capstderr), + ("log", rep.caplog), + ): + if showcapture in (sectionname, "all") and content: + tw.sep(">", "captured " + sectionname) + if content[-1:] == "\n": + content = content[:-1] + tw.line(content) + + tw.sep(">", "traceback") + rep.toterminal(tw) + tw.sep(">", "entering PDB") + tb = _postmortem_traceback(excinfo) + rep._pdbshown = True # type: ignore[attr-defined] + post_mortem(tb) + return rep + + +def _postmortem_traceback(excinfo: ExceptionInfo[BaseException]) -> types.TracebackType: + from doctest import UnexpectedException + + if isinstance(excinfo.value, UnexpectedException): + # A doctest.UnexpectedException is not useful for post_mortem. + # Use the underlying exception instead: + return excinfo.value.exc_info[2] + elif isinstance(excinfo.value, ConftestImportFailure): + # A config.ConftestImportFailure is not useful for post_mortem. + # Use the underlying exception instead: + assert excinfo.value.cause.__traceback__ is not None + return excinfo.value.cause.__traceback__ + else: + assert excinfo._excinfo is not None + return excinfo._excinfo[2] + + +def post_mortem(t: types.TracebackType) -> None: + p = pytestPDB._init_pdb("post_mortem") + p.reset() + p.interaction(None, t) + if p.quitting: + outcomes.exit("Quitting debugger") diff --git a/venv/Lib/site-packages/_pytest/deprecated.py b/venv/Lib/site-packages/_pytest/deprecated.py new file mode 100644 index 0000000..a605c24 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/deprecated.py @@ -0,0 +1,91 @@ +"""Deprecation messages and bits of code used elsewhere in the codebase that +is planned to be removed in the next pytest release. + +Keeping it in a central location makes it easy to track what is deprecated and should +be removed when the time comes. + +All constants defined in this module should be either instances of +:class:`PytestWarning`, or :class:`UnformattedWarning` +in case of warnings which need to format their messages. +""" + +from __future__ import annotations + +from warnings import warn + +from _pytest.warning_types import PytestDeprecationWarning +from _pytest.warning_types import PytestRemovedIn9Warning +from _pytest.warning_types import UnformattedWarning + + +# set of plugins which have been integrated into the core; we use this list to ignore +# them during registration to avoid conflicts +DEPRECATED_EXTERNAL_PLUGINS = { + "pytest_catchlog", + "pytest_capturelog", + "pytest_faulthandler", +} + + +# This can be* removed pytest 8, but it's harmless and common, so no rush to remove. +# * If you're in the future: "could have been". +YIELD_FIXTURE = PytestDeprecationWarning( + "@pytest.yield_fixture is deprecated.\n" + "Use @pytest.fixture instead; they are the same." +) + +# This deprecation is never really meant to be removed. +PRIVATE = PytestDeprecationWarning("A private pytest class or function was used.") + + +HOOK_LEGACY_PATH_ARG = UnformattedWarning( + PytestRemovedIn9Warning, + "The ({pylib_path_arg}: py.path.local) argument is deprecated, please use ({pathlib_path_arg}: pathlib.Path)\n" + "see https://docs.pytest.org/en/latest/deprecations.html" + "#py-path-local-arguments-for-hooks-replaced-with-pathlib-path", +) + +NODE_CTOR_FSPATH_ARG = UnformattedWarning( + PytestRemovedIn9Warning, + "The (fspath: py.path.local) argument to {node_type_name} is deprecated. " + "Please use the (path: pathlib.Path) argument instead.\n" + "See https://docs.pytest.org/en/latest/deprecations.html" + "#fspath-argument-for-node-constructors-replaced-with-pathlib-path", +) + +HOOK_LEGACY_MARKING = UnformattedWarning( + PytestDeprecationWarning, + "The hook{type} {fullname} uses old-style configuration options (marks or attributes).\n" + "Please use the pytest.hook{type}({hook_opts}) decorator instead\n" + " to configure the hooks.\n" + " See https://docs.pytest.org/en/latest/deprecations.html" + "#configuring-hook-specs-impls-using-markers", +) + +MARKED_FIXTURE = PytestRemovedIn9Warning( + "Marks applied to fixtures have no effect\n" + "See docs: https://docs.pytest.org/en/stable/deprecations.html#applying-a-mark-to-a-fixture-function" +) + +# You want to make some `__init__` or function "private". +# +# def my_private_function(some, args): +# ... +# +# Do this: +# +# def my_private_function(some, args, *, _ispytest: bool = False): +# check_ispytest(_ispytest) +# ... +# +# Change all internal/allowed calls to +# +# my_private_function(some, args, _ispytest=True) +# +# All other calls will get the default _ispytest=False and trigger +# the warning (possibly error in the future). + + +def check_ispytest(ispytest: bool) -> None: + if not ispytest: + warn(PRIVATE, stacklevel=3) diff --git a/venv/Lib/site-packages/_pytest/doctest.py b/venv/Lib/site-packages/_pytest/doctest.py new file mode 100644 index 0000000..384dea9 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/doctest.py @@ -0,0 +1,755 @@ +# mypy: allow-untyped-defs +"""Discover and run doctests in modules and test files.""" + +from __future__ import annotations + +import bdb +from contextlib import contextmanager +import functools +import inspect +import os +from pathlib import Path +import platform +import sys +import traceback +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterable +from typing import Pattern +from typing import Sequence +from typing import TYPE_CHECKING +import warnings + +from _pytest import outcomes +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import safe_getattr +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.fixtures import fixture +from _pytest.fixtures import TopRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.python import Module +from _pytest.python_api import approx +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + import doctest + + from typing_extensions import Self + +DOCTEST_REPORT_CHOICE_NONE = "none" +DOCTEST_REPORT_CHOICE_CDIFF = "cdiff" +DOCTEST_REPORT_CHOICE_NDIFF = "ndiff" +DOCTEST_REPORT_CHOICE_UDIFF = "udiff" +DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE = "only_first_failure" + +DOCTEST_REPORT_CHOICES = ( + DOCTEST_REPORT_CHOICE_NONE, + DOCTEST_REPORT_CHOICE_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF, + DOCTEST_REPORT_CHOICE_UDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, +) + +# Lazy definition of runner class +RUNNER_CLASS = None +# Lazy definition of output checker class +CHECKER_CLASS: type[doctest.OutputChecker] | None = None + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "doctest_optionflags", + "Option flags for doctests", + type="args", + default=["ELLIPSIS"], + ) + parser.addini( + "doctest_encoding", "Encoding used for doctest files", default="utf-8" + ) + group = parser.getgroup("collect") + group.addoption( + "--doctest-modules", + action="store_true", + default=False, + help="Run doctests in all .py modules", + dest="doctestmodules", + ) + group.addoption( + "--doctest-report", + type=str.lower, + default="udiff", + help="Choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport", + ) + group.addoption( + "--doctest-glob", + action="append", + default=[], + metavar="pat", + help="Doctests file matching pattern, default: test*.txt", + dest="doctestglob", + ) + group.addoption( + "--doctest-ignore-import-errors", + action="store_true", + default=False, + help="Ignore doctest collection errors", + dest="doctest_ignore_import_errors", + ) + group.addoption( + "--doctest-continue-on-failure", + action="store_true", + default=False, + help="For a given doctest, continue to run after the first failure", + dest="doctest_continue_on_failure", + ) + + +def pytest_unconfigure() -> None: + global RUNNER_CLASS + + RUNNER_CLASS = None + + +def pytest_collect_file( + file_path: Path, + parent: Collector, +) -> DoctestModule | DoctestTextfile | None: + config = parent.config + if file_path.suffix == ".py": + if config.option.doctestmodules and not any( + (_is_setup_py(file_path), _is_main_py(file_path)) + ): + return DoctestModule.from_parent(parent, path=file_path) + elif _is_doctest(config, file_path, parent): + return DoctestTextfile.from_parent(parent, path=file_path) + return None + + +def _is_setup_py(path: Path) -> bool: + if path.name != "setup.py": + return False + contents = path.read_bytes() + return b"setuptools" in contents or b"distutils" in contents + + +def _is_doctest(config: Config, path: Path, parent: Collector) -> bool: + if path.suffix in (".txt", ".rst") and parent.session.isinitpath(path): + return True + globs = config.getoption("doctestglob") or ["test*.txt"] + return any(fnmatch_ex(glob, path) for glob in globs) + + +def _is_main_py(path: Path) -> bool: + return path.name == "__main__.py" + + +class ReprFailDoctest(TerminalRepr): + def __init__( + self, reprlocation_lines: Sequence[tuple[ReprFileLocation, Sequence[str]]] + ) -> None: + self.reprlocation_lines = reprlocation_lines + + def toterminal(self, tw: TerminalWriter) -> None: + for reprlocation, lines in self.reprlocation_lines: + for line in lines: + tw.line(line) + reprlocation.toterminal(tw) + + +class MultipleDoctestFailures(Exception): + def __init__(self, failures: Sequence[doctest.DocTestFailure]) -> None: + super().__init__() + self.failures = failures + + +def _init_runner_class() -> type[doctest.DocTestRunner]: + import doctest + + class PytestDoctestRunner(doctest.DebugRunner): + """Runner to collect failures. + + Note that the out variable in this case is a list instead of a + stdout-like object. + """ + + def __init__( + self, + checker: doctest.OutputChecker | None = None, + verbose: bool | None = None, + optionflags: int = 0, + continue_on_failure: bool = True, + ) -> None: + super().__init__(checker=checker, verbose=verbose, optionflags=optionflags) + self.continue_on_failure = continue_on_failure + + def report_failure( + self, + out, + test: doctest.DocTest, + example: doctest.Example, + got: str, + ) -> None: + failure = doctest.DocTestFailure(test, example, got) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + def report_unexpected_exception( + self, + out, + test: doctest.DocTest, + example: doctest.Example, + exc_info: tuple[type[BaseException], BaseException, types.TracebackType], + ) -> None: + if isinstance(exc_info[1], OutcomeException): + raise exc_info[1] + if isinstance(exc_info[1], bdb.BdbQuit): + outcomes.exit("Quitting debugger") + failure = doctest.UnexpectedException(test, example, exc_info) + if self.continue_on_failure: + out.append(failure) + else: + raise failure + + return PytestDoctestRunner + + +def _get_runner( + checker: doctest.OutputChecker | None = None, + verbose: bool | None = None, + optionflags: int = 0, + continue_on_failure: bool = True, +) -> doctest.DocTestRunner: + # We need this in order to do a lazy import on doctest + global RUNNER_CLASS + if RUNNER_CLASS is None: + RUNNER_CLASS = _init_runner_class() + # Type ignored because the continue_on_failure argument is only defined on + # PytestDoctestRunner, which is lazily defined so can't be used as a type. + return RUNNER_CLASS( # type: ignore + checker=checker, + verbose=verbose, + optionflags=optionflags, + continue_on_failure=continue_on_failure, + ) + + +class DoctestItem(Item): + def __init__( + self, + name: str, + parent: DoctestTextfile | DoctestModule, + runner: doctest.DocTestRunner, + dtest: doctest.DocTest, + ) -> None: + super().__init__(name, parent) + self.runner = runner + self.dtest = dtest + + # Stuff needed for fixture support. + self.obj = None + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(node=self, func=None, cls=None) + self._fixtureinfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: DoctestTextfile | DoctestModule, + *, + name: str, + runner: doctest.DocTestRunner, + dtest: doctest.DocTest, + ) -> Self: + # incompatible signature due to imposed limits on subclass + """The public named constructor.""" + return super().from_parent(name=name, parent=parent, runner=runner, dtest=dtest) + + def _initrequest(self) -> None: + self.funcargs: dict[str, object] = {} + self._request = TopRequest(self, _ispytest=True) # type: ignore[arg-type] + + def setup(self) -> None: + self._request._fillfixtures() + globs = dict(getfixture=self._request.getfixturevalue) + for name, value in self._request.getfixturevalue("doctest_namespace").items(): + globs[name] = value + self.dtest.globs.update(globs) + + def runtest(self) -> None: + _check_all_skipped(self.dtest) + self._disable_output_capturing_for_darwin() + failures: list[doctest.DocTestFailure] = [] + # Type ignored because we change the type of `out` from what + # doctest expects. + self.runner.run(self.dtest, out=failures) # type: ignore[arg-type] + if failures: + raise MultipleDoctestFailures(failures) + + def _disable_output_capturing_for_darwin(self) -> None: + """Disable output capturing. Otherwise, stdout is lost to doctest (#985).""" + if platform.system() != "Darwin": + return + capman = self.config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture(in_=True) + out, err = capman.read_global_capture() + sys.stdout.write(out) + sys.stderr.write(err) + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> str | TerminalRepr: + import doctest + + failures: ( + Sequence[doctest.DocTestFailure | doctest.UnexpectedException] | None + ) = None + if isinstance( + excinfo.value, (doctest.DocTestFailure, doctest.UnexpectedException) + ): + failures = [excinfo.value] + elif isinstance(excinfo.value, MultipleDoctestFailures): + failures = excinfo.value.failures + + if failures is None: + return super().repr_failure(excinfo) + + reprlocation_lines = [] + for failure in failures: + example = failure.example + test = failure.test + filename = test.filename + if test.lineno is None: + lineno = None + else: + lineno = test.lineno + example.lineno + 1 + message = type(failure).__name__ + # TODO: ReprFileLocation doesn't expect a None lineno. + reprlocation = ReprFileLocation(filename, lineno, message) # type: ignore[arg-type] + checker = _get_checker() + report_choice = _get_report_choice(self.config.getoption("doctestreport")) + if lineno is not None: + assert failure.test.docstring is not None + lines = failure.test.docstring.splitlines(False) + # add line numbers to the left of the error message + assert test.lineno is not None + lines = [ + "%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines) + ] + # trim docstring error lines to 10 + lines = lines[max(example.lineno - 9, 0) : example.lineno + 1] + else: + lines = [ + "EXAMPLE LOCATION UNKNOWN, not showing all tests of that example" + ] + indent = ">>>" + for line in example.source.splitlines(): + lines.append(f"??? {indent} {line}") + indent = "..." + if isinstance(failure, doctest.DocTestFailure): + lines += checker.output_difference( + example, failure.got, report_choice + ).split("\n") + else: + inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info) + lines += [f"UNEXPECTED EXCEPTION: {inner_excinfo.value!r}"] + lines += [ + x.strip("\n") for x in traceback.format_exception(*failure.exc_info) + ] + reprlocation_lines.append((reprlocation, lines)) + return ReprFailDoctest(reprlocation_lines) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + return self.path, self.dtest.lineno, f"[doctest] {self.name}" + + +def _get_flag_lookup() -> dict[str, int]: + import doctest + + return dict( + DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, + DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, + NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, + ELLIPSIS=doctest.ELLIPSIS, + IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, + COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, + ALLOW_UNICODE=_get_allow_unicode_flag(), + ALLOW_BYTES=_get_allow_bytes_flag(), + NUMBER=_get_number_flag(), + ) + + +def get_optionflags(config: Config) -> int: + optionflags_str = config.getini("doctest_optionflags") + flag_lookup_table = _get_flag_lookup() + flag_acc = 0 + for flag in optionflags_str: + flag_acc |= flag_lookup_table[flag] + return flag_acc + + +def _get_continue_on_failure(config: Config) -> bool: + continue_on_failure: bool = config.getvalue("doctest_continue_on_failure") + if continue_on_failure: + # We need to turn off this if we use pdb since we should stop at + # the first failure. + if config.getvalue("usepdb"): + continue_on_failure = False + return continue_on_failure + + +class DoctestTextfile(Module): + obj = None + + def collect(self) -> Iterable[DoctestItem]: + import doctest + + # Inspired by doctest.testfile; ideally we would use it directly, + # but it doesn't support passing a custom checker. + encoding = self.config.getini("doctest_encoding") + text = self.path.read_text(encoding) + filename = str(self.path) + name = self.path.name + globs = {"__name__": "__main__"} + + optionflags = get_optionflags(self.config) + + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + parser = doctest.DocTestParser() + test = parser.get_doctest(text, globs, name, filename, 0) + if test.examples: + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _check_all_skipped(test: doctest.DocTest) -> None: + """Raise pytest.skip() if all examples in the given DocTest have the SKIP + option set.""" + import doctest + + all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples) + if all_skipped: + skip("all tests skipped by +SKIP option") + + +def _is_mocked(obj: object) -> bool: + """Return if an object is possibly a mock object by checking the + existence of a highly improbable attribute.""" + return ( + safe_getattr(obj, "pytest_mock_example_attribute_that_shouldnt_exist", None) + is not None + ) + + +@contextmanager +def _patch_unwrap_mock_aware() -> Generator[None]: + """Context manager which replaces ``inspect.unwrap`` with a version + that's aware of mock objects and doesn't recurse into them.""" + real_unwrap = inspect.unwrap + + def _mock_aware_unwrap( + func: Callable[..., Any], *, stop: Callable[[Any], Any] | None = None + ) -> Any: + try: + if stop is None or stop is _is_mocked: + return real_unwrap(func, stop=_is_mocked) + _stop = stop + return real_unwrap(func, stop=lambda obj: _is_mocked(obj) or _stop(func)) + except Exception as e: + warnings.warn( + f"Got {e!r} when unwrapping {func!r}. This is usually caused " + "by a violation of Python's object protocol; see e.g. " + "https://github.com/pytest-dev/pytest/issues/5080", + PytestWarning, + ) + raise + + inspect.unwrap = _mock_aware_unwrap + try: + yield + finally: + inspect.unwrap = real_unwrap + + +class DoctestModule(Module): + def collect(self) -> Iterable[DoctestItem]: + import doctest + + class MockAwareDocTestFinder(doctest.DocTestFinder): + py_ver_info_minor = sys.version_info[:2] + is_find_lineno_broken = ( + py_ver_info_minor < (3, 11) + or (py_ver_info_minor == (3, 11) and sys.version_info.micro < 9) + or (py_ver_info_minor == (3, 12) and sys.version_info.micro < 3) + ) + if is_find_lineno_broken: + + def _find_lineno(self, obj, source_lines): + """On older Pythons, doctest code does not take into account + `@property`. https://github.com/python/cpython/issues/61648 + + Moreover, wrapped Doctests need to be unwrapped so the correct + line number is returned. #8796 + """ + if isinstance(obj, property): + obj = getattr(obj, "fget", obj) + + if hasattr(obj, "__wrapped__"): + # Get the main obj in case of it being wrapped + obj = inspect.unwrap(obj) + + # Type ignored because this is a private function. + return super()._find_lineno( # type:ignore[misc] + obj, + source_lines, + ) + + if sys.version_info < (3, 10): + + def _find( + self, tests, obj, name, module, source_lines, globs, seen + ) -> None: + """Override _find to work around issue in stdlib. + + https://github.com/pytest-dev/pytest/issues/3456 + https://github.com/python/cpython/issues/69718 + """ + if _is_mocked(obj): + return # pragma: no cover + with _patch_unwrap_mock_aware(): + # Type ignored because this is a private function. + super()._find( # type:ignore[misc] + tests, obj, name, module, source_lines, globs, seen + ) + + if sys.version_info < (3, 13): + + def _from_module(self, module, object): + """`cached_property` objects are never considered a part + of the 'current module'. As such they are skipped by doctest. + Here we override `_from_module` to check the underlying + function instead. https://github.com/python/cpython/issues/107995 + """ + if isinstance(object, functools.cached_property): + object = object.func + + # Type ignored because this is a private function. + return super()._from_module(module, object) # type: ignore[misc] + + try: + module = self.obj + except Collector.CollectError: + if self.config.getvalue("doctest_ignore_import_errors"): + skip(f"unable to import module {self.path!r}") + else: + raise + + # While doctests currently don't support fixtures directly, we still + # need to pick up autouse fixtures. + self.session._fixturemanager.parsefactories(self) + + # Uses internal doctest module parsing mechanism. + finder = MockAwareDocTestFinder() + optionflags = get_optionflags(self.config) + runner = _get_runner( + verbose=False, + optionflags=optionflags, + checker=_get_checker(), + continue_on_failure=_get_continue_on_failure(self.config), + ) + + for test in finder.find(module, module.__name__): + if test.examples: # skip empty doctests + yield DoctestItem.from_parent( + self, name=test.name, runner=runner, dtest=test + ) + + +def _init_checker_class() -> type[doctest.OutputChecker]: + import doctest + import re + + class LiteralsOutputChecker(doctest.OutputChecker): + # Based on doctest_nose_plugin.py from the nltk project + # (https://github.com/nltk/nltk) and on the "numtest" doctest extension + # by Sebastien Boisgerault (https://github.com/boisgera/numtest). + + _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE) + _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE) + _number_re = re.compile( + r""" + (?P + (?P + (?P [+-]?\d*)\.(?P\d+) + | + (?P [+-]?\d+)\. + ) + (?: + [Ee] + (?P [+-]?\d+) + )? + | + (?P [+-]?\d+) + (?: + [Ee] + (?P [+-]?\d+) + ) + ) + """, + re.VERBOSE, + ) + + def check_output(self, want: str, got: str, optionflags: int) -> bool: + if super().check_output(want, got, optionflags): + return True + + allow_unicode = optionflags & _get_allow_unicode_flag() + allow_bytes = optionflags & _get_allow_bytes_flag() + allow_number = optionflags & _get_number_flag() + + if not allow_unicode and not allow_bytes and not allow_number: + return False + + def remove_prefixes(regex: Pattern[str], txt: str) -> str: + return re.sub(regex, r"\1\2", txt) + + if allow_unicode: + want = remove_prefixes(self._unicode_literal_re, want) + got = remove_prefixes(self._unicode_literal_re, got) + + if allow_bytes: + want = remove_prefixes(self._bytes_literal_re, want) + got = remove_prefixes(self._bytes_literal_re, got) + + if allow_number: + got = self._remove_unwanted_precision(want, got) + + return super().check_output(want, got, optionflags) + + def _remove_unwanted_precision(self, want: str, got: str) -> str: + wants = list(self._number_re.finditer(want)) + gots = list(self._number_re.finditer(got)) + if len(wants) != len(gots): + return got + offset = 0 + for w, g in zip(wants, gots): + fraction: str | None = w.group("fraction") + exponent: str | None = w.group("exponent1") + if exponent is None: + exponent = w.group("exponent2") + precision = 0 if fraction is None else len(fraction) + if exponent is not None: + precision -= int(exponent) + if float(w.group()) == approx(float(g.group()), abs=10**-precision): + # They're close enough. Replace the text we actually + # got with the text we want, so that it will match when we + # check the string literally. + got = ( + got[: g.start() + offset] + w.group() + got[g.end() + offset :] + ) + offset += w.end() - w.start() - (g.end() - g.start()) + return got + + return LiteralsOutputChecker + + +def _get_checker() -> doctest.OutputChecker: + """Return a doctest.OutputChecker subclass that supports some + additional options: + + * ALLOW_UNICODE and ALLOW_BYTES options to ignore u'' and b'' + prefixes (respectively) in string literals. Useful when the same + doctest should run in Python 2 and Python 3. + + * NUMBER to ignore floating-point differences smaller than the + precision of the literal number in the doctest. + + An inner class is used to avoid importing "doctest" at the module + level. + """ + global CHECKER_CLASS + if CHECKER_CLASS is None: + CHECKER_CLASS = _init_checker_class() + return CHECKER_CLASS() + + +def _get_allow_unicode_flag() -> int: + """Register and return the ALLOW_UNICODE flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_UNICODE") + + +def _get_allow_bytes_flag() -> int: + """Register and return the ALLOW_BYTES flag.""" + import doctest + + return doctest.register_optionflag("ALLOW_BYTES") + + +def _get_number_flag() -> int: + """Register and return the NUMBER flag.""" + import doctest + + return doctest.register_optionflag("NUMBER") + + +def _get_report_choice(key: str) -> int: + """Return the actual `doctest` module flag value. + + We want to do it as late as possible to avoid importing `doctest` and all + its dependencies when parsing options, as it adds overhead and breaks tests. + """ + import doctest + + return { + DOCTEST_REPORT_CHOICE_UDIFF: doctest.REPORT_UDIFF, + DOCTEST_REPORT_CHOICE_CDIFF: doctest.REPORT_CDIFF, + DOCTEST_REPORT_CHOICE_NDIFF: doctest.REPORT_NDIFF, + DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE: doctest.REPORT_ONLY_FIRST_FAILURE, + DOCTEST_REPORT_CHOICE_NONE: 0, + }[key] + + +@fixture(scope="session") +def doctest_namespace() -> dict[str, Any]: + """Fixture that returns a :py:class:`dict` that will be injected into the + namespace of doctests. + + Usually this fixture is used in conjunction with another ``autouse`` fixture: + + .. code-block:: python + + @pytest.fixture(autouse=True) + def add_np(doctest_namespace): + doctest_namespace["np"] = numpy + + For more details: :ref:`doctest_namespace`. + """ + return dict() diff --git a/venv/Lib/site-packages/_pytest/faulthandler.py b/venv/Lib/site-packages/_pytest/faulthandler.py new file mode 100644 index 0000000..d16aea1 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/faulthandler.py @@ -0,0 +1,105 @@ +from __future__ import annotations + +import os +import sys +from typing import Generator + +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.stash import StashKey +import pytest + + +fault_handler_original_stderr_fd_key = StashKey[int]() +fault_handler_stderr_fd_key = StashKey[int]() + + +def pytest_addoption(parser: Parser) -> None: + help = ( + "Dump the traceback of all threads if a test takes " + "more than TIMEOUT seconds to finish" + ) + parser.addini("faulthandler_timeout", help, default=0.0) + + +def pytest_configure(config: Config) -> None: + import faulthandler + + # at teardown we want to restore the original faulthandler fileno + # but faulthandler has no api to return the original fileno + # so here we stash the stderr fileno to be used at teardown + # sys.stderr and sys.__stderr__ may be closed or patched during the session + # so we can't rely on their values being good at that point (#11572). + stderr_fileno = get_stderr_fileno() + if faulthandler.is_enabled(): + config.stash[fault_handler_original_stderr_fd_key] = stderr_fileno + config.stash[fault_handler_stderr_fd_key] = os.dup(stderr_fileno) + faulthandler.enable(file=config.stash[fault_handler_stderr_fd_key]) + + +def pytest_unconfigure(config: Config) -> None: + import faulthandler + + faulthandler.disable() + # Close the dup file installed during pytest_configure. + if fault_handler_stderr_fd_key in config.stash: + os.close(config.stash[fault_handler_stderr_fd_key]) + del config.stash[fault_handler_stderr_fd_key] + # Re-enable the faulthandler if it was originally enabled. + if fault_handler_original_stderr_fd_key in config.stash: + faulthandler.enable(config.stash[fault_handler_original_stderr_fd_key]) + del config.stash[fault_handler_original_stderr_fd_key] + + +def get_stderr_fileno() -> int: + try: + fileno = sys.stderr.fileno() + # The Twisted Logger will return an invalid file descriptor since it is not backed + # by an FD. So, let's also forward this to the same code path as with pytest-xdist. + if fileno == -1: + raise AttributeError() + return fileno + except (AttributeError, ValueError): + # pytest-xdist monkeypatches sys.stderr with an object that is not an actual file. + # https://docs.python.org/3/library/faulthandler.html#issue-with-file-descriptors + # This is potentially dangerous, but the best we can do. + assert sys.__stderr__ is not None + return sys.__stderr__.fileno() + + +def get_timeout_config_value(config: Config) -> float: + return float(config.getini("faulthandler_timeout") or 0.0) + + +@pytest.hookimpl(wrapper=True, trylast=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + timeout = get_timeout_config_value(item.config) + if timeout > 0: + import faulthandler + + stderr = item.config.stash[fault_handler_stderr_fd_key] + faulthandler.dump_traceback_later(timeout, file=stderr) + try: + return (yield) + finally: + faulthandler.cancel_dump_traceback_later() + else: + return (yield) + + +@pytest.hookimpl(tryfirst=True) +def pytest_enter_pdb() -> None: + """Cancel any traceback dumping due to timeout before entering pdb.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() + + +@pytest.hookimpl(tryfirst=True) +def pytest_exception_interact() -> None: + """Cancel any traceback dumping due to an interactive exception being + raised.""" + import faulthandler + + faulthandler.cancel_dump_traceback_later() diff --git a/venv/Lib/site-packages/_pytest/fixtures.py b/venv/Lib/site-packages/_pytest/fixtures.py new file mode 100644 index 0000000..6b882fa --- /dev/null +++ b/venv/Lib/site-packages/_pytest/fixtures.py @@ -0,0 +1,1932 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +from collections import defaultdict +from collections import deque +import dataclasses +import functools +import inspect +import os +from pathlib import Path +import sys +import types +from typing import AbstractSet +from typing import Any +from typing import Callable +from typing import cast +from typing import Dict +from typing import Final +from typing import final +from typing import Generator +from typing import Generic +from typing import Iterable +from typing import Iterator +from typing import Mapping +from typing import MutableMapping +from typing import NoReturn +from typing import Optional +from typing import OrderedDict +from typing import overload +from typing import Sequence +from typing import Tuple +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import warnings + +import _pytest +from _pytest import nodes +from _pytest._code import getfslineno +from _pytest._code import Source +from _pytest._code.code import FormattedExcinfo +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.compat import _PytestWrapper +from _pytest.compat import assert_never +from _pytest.compat import get_real_func +from _pytest.compat import get_real_method +from _pytest.compat import getfuncargnames +from _pytest.compat import getimfunc +from _pytest.compat import getlocation +from _pytest.compat import is_generator +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE +from _pytest.deprecated import YIELD_FIXTURE +from _pytest.main import Session +from _pytest.mark import Mark +from _pytest.mark import ParameterSet +from _pytest.mark.structures import MarkDecorator +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import TEST_OUTCOME +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.scope import _ScopeName +from _pytest.scope import HIGH_SCOPES +from _pytest.scope import Scope + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + + +if TYPE_CHECKING: + from _pytest.python import CallSpec2 + from _pytest.python import Function + from _pytest.python import Metafunc + + +# The value of the fixture -- return/yield of the fixture function (type variable). +FixtureValue = TypeVar("FixtureValue") +# The type of the fixture function (type variable). +FixtureFunction = TypeVar("FixtureFunction", bound=Callable[..., object]) +# The type of a fixture function (type alias generic in fixture value). +_FixtureFunc = Union[ + Callable[..., FixtureValue], Callable[..., Generator[FixtureValue, None, None]] +] +# The type of FixtureDef.cached_result (type alias generic in fixture value). +_FixtureCachedResult = Union[ + Tuple[ + # The result. + FixtureValue, + # Cache key. + object, + None, + ], + Tuple[ + None, + # Cache key. + object, + # The exception and the original traceback. + Tuple[BaseException, Optional[types.TracebackType]], + ], +] + + +@dataclasses.dataclass(frozen=True) +class PseudoFixtureDef(Generic[FixtureValue]): + cached_result: _FixtureCachedResult[FixtureValue] + _scope: Scope + + +def pytest_sessionstart(session: Session) -> None: + session._fixturemanager = FixtureManager(session) + + +def get_scope_package( + node: nodes.Item, + fixturedef: FixtureDef[object], +) -> nodes.Node | None: + from _pytest.python import Package + + for parent in node.iter_parents(): + if isinstance(parent, Package) and parent.nodeid == fixturedef.baseid: + return parent + return node.session + + +def get_scope_node(node: nodes.Node, scope: Scope) -> nodes.Node | None: + import _pytest.python + + if scope is Scope.Function: + # Type ignored because this is actually safe, see: + # https://github.com/python/mypy/issues/4717 + return node.getparent(nodes.Item) # type: ignore[type-abstract] + elif scope is Scope.Class: + return node.getparent(_pytest.python.Class) + elif scope is Scope.Module: + return node.getparent(_pytest.python.Module) + elif scope is Scope.Package: + return node.getparent(_pytest.python.Package) + elif scope is Scope.Session: + return node.getparent(_pytest.main.Session) + else: + assert_never(scope) + + +def getfixturemarker(obj: object) -> FixtureFunctionMarker | None: + """Return fixturemarker or None if it doesn't exist or raised + exceptions.""" + return cast( + Optional[FixtureFunctionMarker], + safe_getattr(obj, "_pytestfixturefunction", None), + ) + + +# Algorithm for sorting on a per-parametrized resource setup basis. +# It is called for Session scope first and performs sorting +# down to the lower scopes such as to minimize number of "high scope" +# setups and teardowns. + + +@dataclasses.dataclass(frozen=True) +class FixtureArgKey: + argname: str + param_index: int + scoped_item_path: Path | None + item_cls: type | None + + +_V = TypeVar("_V") +OrderedSet = Dict[_V, None] + + +def get_parametrized_fixture_argkeys( + item: nodes.Item, scope: Scope +) -> Iterator[FixtureArgKey]: + """Return list of keys for all parametrized arguments which match + the specified scope.""" + assert scope is not Scope.Function + + try: + callspec: CallSpec2 = item.callspec # type: ignore[attr-defined] + except AttributeError: + return + + item_cls = None + if scope is Scope.Session: + scoped_item_path = None + elif scope is Scope.Package: + # Package key = module's directory. + scoped_item_path = item.path.parent + elif scope is Scope.Module: + scoped_item_path = item.path + elif scope is Scope.Class: + scoped_item_path = item.path + item_cls = item.cls # type: ignore[attr-defined] + else: + assert_never(scope) + + for argname in callspec.indices: + if callspec._arg2scope[argname] != scope: + continue + param_index = callspec.indices[argname] + yield FixtureArgKey(argname, param_index, scoped_item_path, item_cls) + + +def reorder_items(items: Sequence[nodes.Item]) -> list[nodes.Item]: + argkeys_by_item: dict[Scope, dict[nodes.Item, OrderedSet[FixtureArgKey]]] = {} + items_by_argkey: dict[ + Scope, dict[FixtureArgKey, OrderedDict[nodes.Item, None]] + ] = {} + for scope in HIGH_SCOPES: + scoped_argkeys_by_item = argkeys_by_item[scope] = {} + scoped_items_by_argkey = items_by_argkey[scope] = defaultdict(OrderedDict) + for item in items: + argkeys = dict.fromkeys(get_parametrized_fixture_argkeys(item, scope)) + if argkeys: + scoped_argkeys_by_item[item] = argkeys + for argkey in argkeys: + scoped_items_by_argkey[argkey][item] = None + + items_set = dict.fromkeys(items) + return list( + reorder_items_atscope( + items_set, argkeys_by_item, items_by_argkey, Scope.Session + ) + ) + + +def reorder_items_atscope( + items: OrderedSet[nodes.Item], + argkeys_by_item: Mapping[Scope, Mapping[nodes.Item, OrderedSet[FixtureArgKey]]], + items_by_argkey: Mapping[ + Scope, Mapping[FixtureArgKey, OrderedDict[nodes.Item, None]] + ], + scope: Scope, +) -> OrderedSet[nodes.Item]: + if scope is Scope.Function or len(items) < 3: + return items + + scoped_items_by_argkey = items_by_argkey[scope] + scoped_argkeys_by_item = argkeys_by_item[scope] + + ignore: set[FixtureArgKey] = set() + items_deque = deque(items) + items_done: OrderedSet[nodes.Item] = {} + while items_deque: + no_argkey_items: OrderedSet[nodes.Item] = {} + slicing_argkey = None + while items_deque: + item = items_deque.popleft() + if item in items_done or item in no_argkey_items: + continue + argkeys = dict.fromkeys( + k for k in scoped_argkeys_by_item.get(item, ()) if k not in ignore + ) + if not argkeys: + no_argkey_items[item] = None + else: + slicing_argkey, _ = argkeys.popitem() + # We don't have to remove relevant items from later in the + # deque because they'll just be ignored. + matching_items = [ + i for i in scoped_items_by_argkey[slicing_argkey] if i in items + ] + for i in reversed(matching_items): + items_deque.appendleft(i) + # Fix items_by_argkey order. + for other_scope in HIGH_SCOPES: + other_scoped_items_by_argkey = items_by_argkey[other_scope] + for argkey in argkeys_by_item[other_scope].get(i, ()): + other_scoped_items_by_argkey[argkey][i] = None + other_scoped_items_by_argkey[argkey].move_to_end( + i, last=False + ) + break + if no_argkey_items: + reordered_no_argkey_items = reorder_items_atscope( + no_argkey_items, argkeys_by_item, items_by_argkey, scope.next_lower() + ) + items_done.update(reordered_no_argkey_items) + if slicing_argkey is not None: + ignore.add(slicing_argkey) + return items_done + + +@dataclasses.dataclass(frozen=True) +class FuncFixtureInfo: + """Fixture-related information for a fixture-requesting item (e.g. test + function). + + This is used to examine the fixtures which an item requests statically + (known during collection). This includes autouse fixtures, fixtures + requested by the `usefixtures` marker, fixtures requested in the function + parameters, and the transitive closure of these. + + An item may also request fixtures dynamically (using `request.getfixturevalue`); + these are not reflected here. + """ + + __slots__ = ("argnames", "initialnames", "names_closure", "name2fixturedefs") + + # Fixture names that the item requests directly by function parameters. + argnames: tuple[str, ...] + # Fixture names that the item immediately requires. These include + # argnames + fixture names specified via usefixtures and via autouse=True in + # fixture definitions. + initialnames: tuple[str, ...] + # The transitive closure of the fixture names that the item requires. + # Note: can't include dynamic dependencies (`request.getfixturevalue` calls). + names_closure: list[str] + # A map from a fixture name in the transitive closure to the FixtureDefs + # matching the name which are applicable to this function. + # There may be multiple overriding fixtures with the same name. The + # sequence is ordered from furthest to closes to the function. + name2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] + + def prune_dependency_tree(self) -> None: + """Recompute names_closure from initialnames and name2fixturedefs. + + Can only reduce names_closure, which means that the new closure will + always be a subset of the old one. The order is preserved. + + This method is needed because direct parametrization may shadow some + of the fixtures that were included in the originally built dependency + tree. In this way the dependency tree can get pruned, and the closure + of argnames may get reduced. + """ + closure: set[str] = set() + working_set = set(self.initialnames) + while working_set: + argname = working_set.pop() + # Argname may be something not included in the original names_closure, + # in which case we ignore it. This currently happens with pseudo + # FixtureDefs which wrap 'get_direct_param_fixture_func(request)'. + # So they introduce the new dependency 'request' which might have + # been missing in the original tree (closure). + if argname not in closure and argname in self.names_closure: + closure.add(argname) + if argname in self.name2fixturedefs: + working_set.update(self.name2fixturedefs[argname][-1].argnames) + + self.names_closure[:] = sorted(closure, key=self.names_closure.index) + + +class FixtureRequest(abc.ABC): + """The type of the ``request`` fixture. + + A request object gives access to the requesting test context and has a + ``param`` attribute in case the fixture is parametrized. + """ + + def __init__( + self, + pyfuncitem: Function, + fixturename: str | None, + arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]], + fixture_defs: dict[str, FixtureDef[Any]], + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + #: Fixture for which this request is being performed. + self.fixturename: Final = fixturename + self._pyfuncitem: Final = pyfuncitem + # The FixtureDefs for each fixture name requested by this item. + # Starts from the statically-known fixturedefs resolved during + # collection. Dynamically requested fixtures (using + # `request.getfixturevalue("foo")`) are added dynamically. + self._arg2fixturedefs: Final = arg2fixturedefs + # The evaluated argnames so far, mapping to the FixtureDef they resolved + # to. + self._fixture_defs: Final = fixture_defs + # Notes on the type of `param`: + # -`request.param` is only defined in parametrized fixtures, and will raise + # AttributeError otherwise. Python typing has no notion of "undefined", so + # this cannot be reflected in the type. + # - Technically `param` is only (possibly) defined on SubRequest, not + # FixtureRequest, but the typing of that is still in flux so this cheats. + # - In the future we might consider using a generic for the param type, but + # for now just using Any. + self.param: Any + + @property + def _fixturemanager(self) -> FixtureManager: + return self._pyfuncitem.session._fixturemanager + + @property + @abc.abstractmethod + def _scope(self) -> Scope: + raise NotImplementedError() + + @property + def scope(self) -> _ScopeName: + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + @abc.abstractmethod + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + raise NotImplementedError() + + @property + def fixturenames(self) -> list[str]: + """Names of all active fixtures in this request.""" + result = list(self._pyfuncitem.fixturenames) + result.extend(set(self._fixture_defs).difference(result)) + return result + + @property + @abc.abstractmethod + def node(self): + """Underlying collection node (depends on current request scope).""" + raise NotImplementedError() + + @property + def config(self) -> Config: + """The pytest config object associated with this request.""" + return self._pyfuncitem.config + + @property + def function(self): + """Test function object if the request has a per-function scope.""" + if self.scope != "function": + raise AttributeError( + f"function not available in {self.scope}-scoped context" + ) + return self._pyfuncitem.obj + + @property + def cls(self): + """Class (can be None) where the test function was collected.""" + if self.scope not in ("class", "function"): + raise AttributeError(f"cls not available in {self.scope}-scoped context") + clscol = self._pyfuncitem.getparent(_pytest.python.Class) + if clscol: + return clscol.obj + + @property + def instance(self): + """Instance (can be None) on which test function was collected.""" + if self.scope != "function": + return None + return getattr(self._pyfuncitem, "instance", None) + + @property + def module(self): + """Python module object where the test function was collected.""" + if self.scope not in ("function", "class", "module"): + raise AttributeError(f"module not available in {self.scope}-scoped context") + mod = self._pyfuncitem.getparent(_pytest.python.Module) + assert mod is not None + return mod.obj + + @property + def path(self) -> Path: + """Path where the test function was collected.""" + if self.scope not in ("function", "class", "module", "package"): + raise AttributeError(f"path not available in {self.scope}-scoped context") + return self._pyfuncitem.path + + @property + def keywords(self) -> MutableMapping[str, Any]: + """Keywords/markers dictionary for the underlying node.""" + node: nodes.Node = self.node + return node.keywords + + @property + def session(self) -> Session: + """Pytest session object.""" + return self._pyfuncitem.session + + @abc.abstractmethod + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + """Add finalizer/teardown function to be called without arguments after + the last test within the requesting test context finished execution.""" + raise NotImplementedError() + + def applymarker(self, marker: str | MarkDecorator) -> None: + """Apply a marker to a single test function invocation. + + This method is useful if you don't want to have a keyword/marker + on all function invocations. + + :param marker: + An object created by a call to ``pytest.mark.NAME(...)``. + """ + self.node.add_marker(marker) + + def raiseerror(self, msg: str | None) -> NoReturn: + """Raise a FixtureLookupError exception. + + :param msg: + An optional custom error message. + """ + raise FixtureLookupError(None, self, msg) + + def getfixturevalue(self, argname: str) -> Any: + """Dynamically run a named fixture function. + + Declaring fixtures via function argument is recommended where possible. + But if you can only decide whether to use another fixture at test + setup time, you may use this function to retrieve it inside a fixture + or test function body. + + This method can be used during the test setup phase or the test run + phase, but during the test teardown phase a fixture's value may not + be available. + + :param argname: + The fixture name. + :raises pytest.FixtureLookupError: + If the given fixture could not be found. + """ + # Note that in addition to the use case described in the docstring, + # getfixturevalue() is also called by pytest itself during item and fixture + # setup to evaluate the fixtures that are requested statically + # (using function parameters, autouse, etc). + + fixturedef = self._get_active_fixturedef(argname) + assert fixturedef.cached_result is not None, ( + f'The fixture value for "{argname}" is not available. ' + "This can happen when the fixture has already been torn down." + ) + return fixturedef.cached_result[0] + + def _iter_chain(self) -> Iterator[SubRequest]: + """Yield all SubRequests in the chain, from self up. + + Note: does *not* yield the TopRequest. + """ + current = self + while isinstance(current, SubRequest): + yield current + current = current._parent_request + + def _get_active_fixturedef( + self, argname: str + ) -> FixtureDef[object] | PseudoFixtureDef[object]: + if argname == "request": + cached_result = (self, [0], None) + return PseudoFixtureDef(cached_result, Scope.Function) + + # If we already finished computing a fixture by this name in this item, + # return it. + fixturedef = self._fixture_defs.get(argname) + if fixturedef is not None: + self._check_scope(fixturedef, fixturedef._scope) + return fixturedef + + # Find the appropriate fixturedef. + fixturedefs = self._arg2fixturedefs.get(argname, None) + if fixturedefs is None: + # We arrive here because of a dynamic call to + # getfixturevalue(argname) which was naturally + # not known at parsing/collection time. + fixturedefs = self._fixturemanager.getfixturedefs(argname, self._pyfuncitem) + if fixturedefs is not None: + self._arg2fixturedefs[argname] = fixturedefs + # No fixtures defined with this name. + if fixturedefs is None: + raise FixtureLookupError(argname, self) + # The are no fixtures with this name applicable for the function. + if not fixturedefs: + raise FixtureLookupError(argname, self) + # A fixture may override another fixture with the same name, e.g. a + # fixture in a module can override a fixture in a conftest, a fixture in + # a class can override a fixture in the module, and so on. + # An overriding fixture can request its own name (possibly indirectly); + # in this case it gets the value of the fixture it overrides, one level + # up. + # Check how many `argname`s deep we are, and take the next one. + # `fixturedefs` is sorted from furthest to closest, so use negative + # indexing to go in reverse. + index = -1 + for request in self._iter_chain(): + if request.fixturename == argname: + index -= 1 + # If already consumed all of the available levels, fail. + if -index > len(fixturedefs): + raise FixtureLookupError(argname, self) + fixturedef = fixturedefs[index] + + # Prepare a SubRequest object for calling the fixture. + try: + callspec = self._pyfuncitem.callspec + except AttributeError: + callspec = None + if callspec is not None and argname in callspec.params: + param = callspec.params[argname] + param_index = callspec.indices[argname] + # The parametrize invocation scope overrides the fixture's scope. + scope = callspec._arg2scope[argname] + else: + param = NOTSET + param_index = 0 + scope = fixturedef._scope + self._check_fixturedef_without_param(fixturedef) + self._check_scope(fixturedef, scope) + subrequest = SubRequest( + self, scope, param, param_index, fixturedef, _ispytest=True + ) + + # Make sure the fixture value is cached, running it if it isn't + fixturedef.execute(request=subrequest) + + self._fixture_defs[argname] = fixturedef + return fixturedef + + def _check_fixturedef_without_param(self, fixturedef: FixtureDef[object]) -> None: + """Check that this request is allowed to execute this fixturedef without + a param.""" + funcitem = self._pyfuncitem + has_params = fixturedef.params is not None + fixtures_not_supported = getattr(funcitem, "nofuncargs", False) + if has_params and fixtures_not_supported: + msg = ( + f"{funcitem.name} does not support fixtures, maybe unittest.TestCase subclass?\n" + f"Node id: {funcitem.nodeid}\n" + f"Function type: {type(funcitem).__name__}" + ) + fail(msg, pytrace=False) + if has_params: + frame = inspect.stack()[3] + frameinfo = inspect.getframeinfo(frame[0]) + source_path = absolutepath(frameinfo.filename) + source_lineno = frameinfo.lineno + try: + source_path_str = str(source_path.relative_to(funcitem.config.rootpath)) + except ValueError: + source_path_str = str(source_path) + location = getlocation(fixturedef.func, funcitem.config.rootpath) + msg = ( + "The requested fixture has no parameter defined for test:\n" + f" {funcitem.nodeid}\n\n" + f"Requested fixture '{fixturedef.argname}' defined in:\n" + f"{location}\n\n" + f"Requested here:\n" + f"{source_path_str}:{source_lineno}" + ) + fail(msg, pytrace=False) + + def _get_fixturestack(self) -> list[FixtureDef[Any]]: + values = [request._fixturedef for request in self._iter_chain()] + values.reverse() + return values + + +@final +class TopRequest(FixtureRequest): + """The type of the ``request`` fixture in a test function.""" + + def __init__(self, pyfuncitem: Function, *, _ispytest: bool = False) -> None: + super().__init__( + fixturename=None, + pyfuncitem=pyfuncitem, + arg2fixturedefs=pyfuncitem._fixtureinfo.name2fixturedefs.copy(), + fixture_defs={}, + _ispytest=_ispytest, + ) + + @property + def _scope(self) -> Scope: + return Scope.Function + + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + # TopRequest always has function scope so always valid. + pass + + @property + def node(self): + return self._pyfuncitem + + def __repr__(self) -> str: + return f"" + + def _fillfixtures(self) -> None: + item = self._pyfuncitem + for argname in item.fixturenames: + if argname not in item.funcargs: + item.funcargs[argname] = self.getfixturevalue(argname) + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self.node.addfinalizer(finalizer) + + +@final +class SubRequest(FixtureRequest): + """The type of the ``request`` fixture in a fixture function requested + (transitively) by a test function.""" + + def __init__( + self, + request: FixtureRequest, + scope: Scope, + param: Any, + param_index: int, + fixturedef: FixtureDef[object], + *, + _ispytest: bool = False, + ) -> None: + super().__init__( + pyfuncitem=request._pyfuncitem, + fixturename=fixturedef.argname, + fixture_defs=request._fixture_defs, + arg2fixturedefs=request._arg2fixturedefs, + _ispytest=_ispytest, + ) + self._parent_request: Final[FixtureRequest] = request + self._scope_field: Final = scope + self._fixturedef: Final[FixtureDef[object]] = fixturedef + if param is not NOTSET: + self.param = param + self.param_index: Final = param_index + + def __repr__(self) -> str: + return f"" + + @property + def _scope(self) -> Scope: + return self._scope_field + + @property + def node(self): + scope = self._scope + if scope is Scope.Function: + # This might also be a non-function Item despite its attribute name. + node: nodes.Node | None = self._pyfuncitem + elif scope is Scope.Package: + node = get_scope_package(self._pyfuncitem, self._fixturedef) + else: + node = get_scope_node(self._pyfuncitem, scope) + if node is None and scope is Scope.Class: + # Fallback to function item itself. + node = self._pyfuncitem + assert node, f'Could not obtain a node for scope "{scope}" for function {self._pyfuncitem!r}' + return node + + def _check_scope( + self, + requested_fixturedef: FixtureDef[object] | PseudoFixtureDef[object], + requested_scope: Scope, + ) -> None: + if isinstance(requested_fixturedef, PseudoFixtureDef): + return + if self._scope > requested_scope: + # Try to report something helpful. + argname = requested_fixturedef.argname + fixture_stack = "\n".join( + self._format_fixturedef_line(fixturedef) + for fixturedef in self._get_fixturestack() + ) + requested_fixture = self._format_fixturedef_line(requested_fixturedef) + fail( + f"ScopeMismatch: You tried to access the {requested_scope.value} scoped " + f"fixture {argname} with a {self._scope.value} scoped request object. " + f"Requesting fixture stack:\n{fixture_stack}\n" + f"Requested fixture:\n{requested_fixture}", + pytrace=False, + ) + + def _format_fixturedef_line(self, fixturedef: FixtureDef[object]) -> str: + factory = fixturedef.func + path, lineno = getfslineno(factory) + if isinstance(path, Path): + path = bestrelpath(self._pyfuncitem.session.path, path) + signature = inspect.signature(factory) + return f"{path}:{lineno + 1}: def {factory.__name__}{signature}" + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._fixturedef.addfinalizer(finalizer) + + +@final +class FixtureLookupError(LookupError): + """Could not return a requested fixture (missing or invalid).""" + + def __init__( + self, argname: str | None, request: FixtureRequest, msg: str | None = None + ) -> None: + self.argname = argname + self.request = request + self.fixturestack = request._get_fixturestack() + self.msg = msg + + def formatrepr(self) -> FixtureLookupErrorRepr: + tblines: list[str] = [] + addline = tblines.append + stack = [self.request._pyfuncitem.obj] + stack.extend(map(lambda x: x.func, self.fixturestack)) + msg = self.msg + if msg is not None: + # The last fixture raise an error, let's present + # it at the requesting side. + stack = stack[:-1] + for function in stack: + fspath, lineno = getfslineno(function) + try: + lines, _ = inspect.getsourcelines(get_real_func(function)) + except (OSError, IndexError, TypeError): + error_msg = "file %s, line %s: source code not available" + addline(error_msg % (fspath, lineno + 1)) + else: + addline(f"file {fspath}, line {lineno + 1}") + for i, line in enumerate(lines): + line = line.rstrip() + addline(" " + line) + if line.lstrip().startswith("def"): + break + + if msg is None: + fm = self.request._fixturemanager + available = set() + parent = self.request._pyfuncitem.parent + assert parent is not None + for name, fixturedefs in fm._arg2fixturedefs.items(): + faclist = list(fm._matchfactories(fixturedefs, parent)) + if faclist: + available.add(name) + if self.argname in available: + msg = ( + f" recursive dependency involving fixture '{self.argname}' detected" + ) + else: + msg = f"fixture '{self.argname}' not found" + msg += "\n available fixtures: {}".format(", ".join(sorted(available))) + msg += "\n use 'pytest --fixtures [testpath]' for help on them." + + return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) + + +class FixtureLookupErrorRepr(TerminalRepr): + def __init__( + self, + filename: str | os.PathLike[str], + firstlineno: int, + tblines: Sequence[str], + errorstring: str, + argname: str | None, + ) -> None: + self.tblines = tblines + self.errorstring = errorstring + self.filename = filename + self.firstlineno = firstlineno + self.argname = argname + + def toterminal(self, tw: TerminalWriter) -> None: + # tw.line("FixtureLookupError: %s" %(self.argname), red=True) + for tbline in self.tblines: + tw.line(tbline.rstrip()) + lines = self.errorstring.split("\n") + if lines: + tw.line( + f"{FormattedExcinfo.fail_marker} {lines[0].strip()}", + red=True, + ) + for line in lines[1:]: + tw.line( + f"{FormattedExcinfo.flow_marker} {line.strip()}", + red=True, + ) + tw.line() + tw.line("%s:%d" % (os.fspath(self.filename), self.firstlineno + 1)) + + +def call_fixture_func( + fixturefunc: _FixtureFunc[FixtureValue], request: FixtureRequest, kwargs +) -> FixtureValue: + if is_generator(fixturefunc): + fixturefunc = cast( + Callable[..., Generator[FixtureValue, None, None]], fixturefunc + ) + generator = fixturefunc(**kwargs) + try: + fixture_result = next(generator) + except StopIteration: + raise ValueError(f"{request.fixturename} did not yield a value") from None + finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator) + request.addfinalizer(finalizer) + else: + fixturefunc = cast(Callable[..., FixtureValue], fixturefunc) + fixture_result = fixturefunc(**kwargs) + return fixture_result + + +def _teardown_yield_fixture(fixturefunc, it) -> None: + """Execute the teardown of a fixture function by advancing the iterator + after the yield and ensure the iteration ends (if not it means there is + more than one yield in the function).""" + try: + next(it) + except StopIteration: + pass + else: + fs, lineno = getfslineno(fixturefunc) + fail( + f"fixture function has more than one 'yield':\n\n" + f"{Source(fixturefunc).indent()}\n" + f"{fs}:{lineno + 1}", + pytrace=False, + ) + + +def _eval_scope_callable( + scope_callable: Callable[[str, Config], _ScopeName], + fixture_name: str, + config: Config, +) -> _ScopeName: + try: + # Type ignored because there is no typing mechanism to specify + # keyword arguments, currently. + result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] + except Exception as e: + raise TypeError( + f"Error evaluating {scope_callable} while defining fixture '{fixture_name}'.\n" + "Expected a function with the signature (*, fixture_name, config)" + ) from e + if not isinstance(result, str): + fail( + f"Expected {scope_callable} to return a 'str' while defining fixture '{fixture_name}', but it returned:\n" + f"{result!r}", + pytrace=False, + ) + return result + + +@final +class FixtureDef(Generic[FixtureValue]): + """A container for a fixture definition. + + Note: At this time, only explicitly documented fields and methods are + considered public stable API. + """ + + def __init__( + self, + config: Config, + baseid: str | None, + argname: str, + func: _FixtureFunc[FixtureValue], + scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] | None, + params: Sequence[object] | None, + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + # The "base" node ID for the fixture. + # + # This is a node ID prefix. A fixture is only available to a node (e.g. + # a `Function` item) if the fixture's baseid is a nodeid of a parent of + # node. + # + # For a fixture found in a Collector's object (e.g. a `Module`s module, + # a `Class`'s class), the baseid is the Collector's nodeid. + # + # For a fixture found in a conftest plugin, the baseid is the conftest's + # directory path relative to the rootdir. + # + # For other plugins, the baseid is the empty string (always matches). + self.baseid: Final = baseid or "" + # Whether the fixture was found from a node or a conftest in the + # collection tree. Will be false for fixtures defined in non-conftest + # plugins. + self.has_location: Final = baseid is not None + # The fixture factory function. + self.func: Final = func + # The name by which the fixture may be requested. + self.argname: Final = argname + if scope is None: + scope = Scope.Function + elif callable(scope): + scope = _eval_scope_callable(scope, argname, config) + if isinstance(scope, str): + scope = Scope.from_user( + scope, descr=f"Fixture '{func.__name__}'", where=baseid + ) + self._scope: Final = scope + # If the fixture is directly parametrized, the parameter values. + self.params: Final = params + # If the fixture is directly parametrized, a tuple of explicit IDs to + # assign to the parameter values, or a callable to generate an ID given + # a parameter value. + self.ids: Final = ids + # The names requested by the fixtures. + self.argnames: Final = getfuncargnames(func, name=argname) + # If the fixture was executed, the current value of the fixture. + # Can change if the fixture is executed with different parameters. + self.cached_result: _FixtureCachedResult[FixtureValue] | None = None + self._finalizers: Final[list[Callable[[], object]]] = [] + + @property + def scope(self) -> _ScopeName: + """Scope string, one of "function", "class", "module", "package", "session".""" + return self._scope.value + + def addfinalizer(self, finalizer: Callable[[], object]) -> None: + self._finalizers.append(finalizer) + + def finish(self, request: SubRequest) -> None: + exceptions: list[BaseException] = [] + while self._finalizers: + fin = self._finalizers.pop() + try: + fin() + except BaseException as e: + exceptions.append(e) + node = request.node + node.ihook.pytest_fixture_post_finalizer(fixturedef=self, request=request) + # Even if finalization fails, we invalidate the cached fixture + # value and remove all finalizers because they may be bound methods + # which will keep instances alive. + self.cached_result = None + self._finalizers.clear() + if len(exceptions) == 1: + raise exceptions[0] + elif len(exceptions) > 1: + msg = f'errors while tearing down fixture "{self.argname}" of {node}' + raise BaseExceptionGroup(msg, exceptions[::-1]) + + def execute(self, request: SubRequest) -> FixtureValue: + """Return the value of this fixture, executing it if not cached.""" + # Ensure that the dependent fixtures requested by this fixture are loaded. + # This needs to be done before checking if we have a cached value, since + # if a dependent fixture has their cache invalidated, e.g. due to + # parametrization, they finalize themselves and fixtures depending on it + # (which will likely include this fixture) setting `self.cached_result = None`. + # See #4871 + requested_fixtures_that_should_finalize_us = [] + for argname in self.argnames: + fixturedef = request._get_active_fixturedef(argname) + # Saves requested fixtures in a list so we later can add our finalizer + # to them, ensuring that if a requested fixture gets torn down we get torn + # down first. This is generally handled by SetupState, but still currently + # needed when this fixture is not parametrized but depends on a parametrized + # fixture. + if not isinstance(fixturedef, PseudoFixtureDef): + requested_fixtures_that_should_finalize_us.append(fixturedef) + + # Check for (and return) cached value/exception. + if self.cached_result is not None: + request_cache_key = self.cache_key(request) + cache_key = self.cached_result[1] + try: + # Attempt to make a normal == check: this might fail for objects + # which do not implement the standard comparison (like numpy arrays -- #6497). + cache_hit = bool(request_cache_key == cache_key) + except (ValueError, RuntimeError): + # If the comparison raises, use 'is' as fallback. + cache_hit = request_cache_key is cache_key + + if cache_hit: + if self.cached_result[2] is not None: + exc, exc_tb = self.cached_result[2] + raise exc.with_traceback(exc_tb) + else: + result = self.cached_result[0] + return result + # We have a previous but differently parametrized fixture instance + # so we need to tear it down before creating a new one. + self.finish(request) + assert self.cached_result is None + + # Add finalizer to requested fixtures we saved previously. + # We make sure to do this after checking for cached value to avoid + # adding our finalizer multiple times. (#12135) + finalizer = functools.partial(self.finish, request=request) + for parent_fixture in requested_fixtures_that_should_finalize_us: + parent_fixture.addfinalizer(finalizer) + + ihook = request.node.ihook + try: + # Setup the fixture, run the code in it, and cache the value + # in self.cached_result + result = ihook.pytest_fixture_setup(fixturedef=self, request=request) + finally: + # schedule our finalizer, even if the setup failed + request.node.addfinalizer(finalizer) + + return result + + def cache_key(self, request: SubRequest) -> object: + return getattr(request, "param", None) + + def __repr__(self) -> str: + return f"" + + +def resolve_fixture_function( + fixturedef: FixtureDef[FixtureValue], request: FixtureRequest +) -> _FixtureFunc[FixtureValue]: + """Get the actual callable that can be called to obtain the fixture + value.""" + fixturefunc = fixturedef.func + # The fixture function needs to be bound to the actual + # request.instance so that code working with "fixturedef" behaves + # as expected. + instance = request.instance + if instance is not None: + # Handle the case where fixture is defined not in a test class, but some other class + # (for example a plugin class with a fixture), see #2270. + if hasattr(fixturefunc, "__self__") and not isinstance( + instance, + fixturefunc.__self__.__class__, + ): + return fixturefunc + fixturefunc = getimfunc(fixturedef.func) + if fixturefunc != fixturedef.func: + fixturefunc = fixturefunc.__get__(instance) + return fixturefunc + + +def pytest_fixture_setup( + fixturedef: FixtureDef[FixtureValue], request: SubRequest +) -> FixtureValue: + """Execution of fixture setup.""" + kwargs = {} + for argname in fixturedef.argnames: + kwargs[argname] = request.getfixturevalue(argname) + + fixturefunc = resolve_fixture_function(fixturedef, request) + my_cache_key = fixturedef.cache_key(request) + try: + result = call_fixture_func(fixturefunc, request, kwargs) + except TEST_OUTCOME as e: + if isinstance(e, skip.Exception): + # The test requested a fixture which caused a skip. + # Don't show the fixture as the skip location, as then the user + # wouldn't know which test skipped. + e._use_item_location = True + fixturedef.cached_result = (None, my_cache_key, (e, e.__traceback__)) + raise + fixturedef.cached_result = (result, my_cache_key, None) + return result + + +def wrap_function_to_error_out_if_called_directly( + function: FixtureFunction, + fixture_marker: FixtureFunctionMarker, +) -> FixtureFunction: + """Wrap the given fixture function so we can raise an error about it being called directly, + instead of used as an argument in a test function.""" + name = fixture_marker.name or function.__name__ + message = ( + f'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/stable/explanation/fixtures.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/stable/deprecations.html#calling-fixtures-directly about how to update your code." + ) + + @functools.wraps(function) + def result(*args, **kwargs): + fail(message, pytrace=False) + + # Keep reference to the original function in our own custom attribute so we don't unwrap + # further than this point and lose useful wrappings like @mock.patch (#3774). + result.__pytest_wrapped__ = _PytestWrapper(function) # type: ignore[attr-defined] + + return cast(FixtureFunction, result) + + +@final +@dataclasses.dataclass(frozen=True) +class FixtureFunctionMarker: + scope: _ScopeName | Callable[[str, Config], _ScopeName] + params: tuple[object, ...] | None + autouse: bool = False + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None + name: str | None = None + + _ispytest: dataclasses.InitVar[bool] = False + + def __post_init__(self, _ispytest: bool) -> None: + check_ispytest(_ispytest) + + def __call__(self, function: FixtureFunction) -> FixtureFunction: + if inspect.isclass(function): + raise ValueError("class fixtures not supported (maybe in the future)") + + if getattr(function, "_pytestfixturefunction", False): + raise ValueError( + f"@pytest.fixture is being applied more than once to the same function {function.__name__!r}" + ) + + if hasattr(function, "pytestmark"): + warnings.warn(MARKED_FIXTURE, stacklevel=2) + + function = wrap_function_to_error_out_if_called_directly(function, self) + + name = self.name or function.__name__ + if name == "request": + location = getlocation(function) + fail( + f"'request' is a reserved word for fixtures, use another name:\n {location}", + pytrace=False, + ) + + # Type ignored because https://github.com/python/mypy/issues/2087. + function._pytestfixturefunction = self # type: ignore[attr-defined] + return function + + +@overload +def fixture( + fixture_function: FixtureFunction, + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = ..., + params: Iterable[object] | None = ..., + autouse: bool = ..., + ids: Sequence[object | None] | Callable[[Any], object | None] | None = ..., + name: str | None = ..., +) -> FixtureFunction: ... + + +@overload +def fixture( + fixture_function: None = ..., + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = ..., + params: Iterable[object] | None = ..., + autouse: bool = ..., + ids: Sequence[object | None] | Callable[[Any], object | None] | None = ..., + name: str | None = None, +) -> FixtureFunctionMarker: ... + + +def fixture( + fixture_function: FixtureFunction | None = None, + *, + scope: _ScopeName | Callable[[str, Config], _ScopeName] = "function", + params: Iterable[object] | None = None, + autouse: bool = False, + ids: Sequence[object | None] | Callable[[Any], object | None] | None = None, + name: str | None = None, +) -> FixtureFunctionMarker | FixtureFunction: + """Decorator to mark a fixture factory function. + + This decorator can be used, with or without parameters, to define a + fixture function. + + The name of the fixture function can later be referenced to cause its + invocation ahead of running tests: test modules or classes can use the + ``pytest.mark.usefixtures(fixturename)`` marker. + + Test functions can directly use fixture names as input arguments in which + case the fixture instance returned from the fixture function will be + injected. + + Fixtures can provide their values to test functions using ``return`` or + ``yield`` statements. When using ``yield`` the code block after the + ``yield`` statement is executed as teardown code regardless of the test + outcome, and must yield exactly once. + + :param scope: + The scope for which this fixture is shared; one of ``"function"`` + (default), ``"class"``, ``"module"``, ``"package"`` or ``"session"``. + + This parameter may also be a callable which receives ``(fixture_name, config)`` + as parameters, and must return a ``str`` with one of the values mentioned above. + + See :ref:`dynamic scope` in the docs for more information. + + :param params: + An optional list of parameters which will cause multiple invocations + of the fixture function and all of the tests using it. The current + parameter is available in ``request.param``. + + :param autouse: + If True, the fixture func is activated for all tests that can see it. + If False (the default), an explicit reference is needed to activate + the fixture. + + :param ids: + Sequence of ids each corresponding to the params so that they are + part of the test id. If no ids are provided they will be generated + automatically from the params. + + :param name: + The name of the fixture. This defaults to the name of the decorated + function. If a fixture is used in the same module in which it is + defined, the function name of the fixture will be shadowed by the + function arg that requests the fixture; one way to resolve this is to + name the decorated function ``fixture_`` and then use + ``@pytest.fixture(name='')``. + """ + fixture_marker = FixtureFunctionMarker( + scope=scope, + params=tuple(params) if params is not None else None, + autouse=autouse, + ids=None if ids is None else ids if callable(ids) else tuple(ids), + name=name, + _ispytest=True, + ) + + # Direct decoration. + if fixture_function: + return fixture_marker(fixture_function) + + return fixture_marker + + +def yield_fixture( + fixture_function=None, + *args, + scope="function", + params=None, + autouse=False, + ids=None, + name=None, +): + """(Return a) decorator to mark a yield-fixture factory function. + + .. deprecated:: 3.0 + Use :py:func:`pytest.fixture` directly instead. + """ + warnings.warn(YIELD_FIXTURE, stacklevel=2) + return fixture( + fixture_function, + *args, + scope=scope, + params=params, + autouse=autouse, + ids=ids, + name=name, + ) + + +@fixture(scope="session") +def pytestconfig(request: FixtureRequest) -> Config: + """Session-scoped fixture that returns the session's :class:`pytest.Config` + object. + + Example:: + + def test_foo(pytestconfig): + if pytestconfig.get_verbosity() > 0: + ... + + """ + return request.config + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "usefixtures", + type="args", + default=[], + help="List of default fixtures to be used with this project", + ) + group = parser.getgroup("general") + group.addoption( + "--fixtures", + "--funcargs", + action="store_true", + dest="showfixtures", + default=False, + help="Show available fixtures, sorted by plugin appearance " + "(fixtures with leading '_' are only shown with '-v')", + ) + group.addoption( + "--fixtures-per-test", + action="store_true", + dest="show_fixtures_per_test", + default=False, + help="Show fixtures per test", + ) + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.showfixtures: + showfixtures(config) + return 0 + if config.option.show_fixtures_per_test: + show_fixtures_per_test(config) + return 0 + return None + + +def _get_direct_parametrize_args(node: nodes.Node) -> set[str]: + """Return all direct parametrization arguments of a node, so we don't + mistake them for fixtures. + + Check https://github.com/pytest-dev/pytest/issues/5036. + + These things are done later as well when dealing with parametrization + so this could be improved. + """ + parametrize_argnames: set[str] = set() + for marker in node.iter_markers(name="parametrize"): + if not marker.kwargs.get("indirect", False): + p_argnames, _ = ParameterSet._parse_parametrize_args( + *marker.args, **marker.kwargs + ) + parametrize_argnames.update(p_argnames) + return parametrize_argnames + + +def deduplicate_names(*seqs: Iterable[str]) -> tuple[str, ...]: + """De-duplicate the sequence of names while keeping the original order.""" + # Ideally we would use a set, but it does not preserve insertion order. + return tuple(dict.fromkeys(name for seq in seqs for name in seq)) + + +class FixtureManager: + """pytest fixture definitions and information is stored and managed + from this class. + + During collection fm.parsefactories() is called multiple times to parse + fixture function definitions into FixtureDef objects and internal + data structures. + + During collection of test functions, metafunc-mechanics instantiate + a FuncFixtureInfo object which is cached per node/func-name. + This FuncFixtureInfo object is later retrieved by Function nodes + which themselves offer a fixturenames attribute. + + The FuncFixtureInfo object holds information about fixtures and FixtureDefs + relevant for a particular function. An initial list of fixtures is + assembled like this: + + - ini-defined usefixtures + - autouse-marked fixtures along the collection chain up from the function + - usefixtures markers at module/class/function level + - test function funcargs + + Subsequently the funcfixtureinfo.fixturenames attribute is computed + as the closure of the fixtures needed to setup the initial fixtures, + i.e. fixtures needed by fixture functions themselves are appended + to the fixturenames list. + + Upon the test-setup phases all fixturenames are instantiated, retrieved + by a lookup of their FuncFixtureInfo. + """ + + def __init__(self, session: Session) -> None: + self.session = session + self.config: Config = session.config + # Maps a fixture name (argname) to all of the FixtureDefs in the test + # suite/plugins defined with this name. Populated by parsefactories(). + # TODO: The order of the FixtureDefs list of each arg is significant, + # explain. + self._arg2fixturedefs: Final[dict[str, list[FixtureDef[Any]]]] = {} + self._holderobjseen: Final[set[object]] = set() + # A mapping from a nodeid to a list of autouse fixtures it defines. + self._nodeid_autousenames: Final[dict[str, list[str]]] = { + "": self.config.getini("usefixtures"), + } + session.config.pluginmanager.register(self, "funcmanage") + + def getfixtureinfo( + self, + node: nodes.Item, + func: Callable[..., object] | None, + cls: type | None, + ) -> FuncFixtureInfo: + """Calculate the :class:`FuncFixtureInfo` for an item. + + If ``func`` is None, or if the item sets an attribute + ``nofuncargs = True``, then ``func`` is not examined at all. + + :param node: + The item requesting the fixtures. + :param func: + The item's function. + :param cls: + If the function is a method, the method's class. + """ + if func is not None and not getattr(node, "nofuncargs", False): + argnames = getfuncargnames(func, name=node.name, cls=cls) + else: + argnames = () + usefixturesnames = self._getusefixturesnames(node) + autousenames = self._getautousenames(node) + initialnames = deduplicate_names(autousenames, usefixturesnames, argnames) + + direct_parametrize_args = _get_direct_parametrize_args(node) + + names_closure, arg2fixturedefs = self.getfixtureclosure( + parentnode=node, + initialnames=initialnames, + ignore_args=direct_parametrize_args, + ) + + return FuncFixtureInfo(argnames, initialnames, names_closure, arg2fixturedefs) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin, plugin_name: str) -> None: + # Fixtures defined in conftest plugins are only visible to within the + # conftest's directory. This is unlike fixtures in non-conftest plugins + # which have global visibility. So for conftests, construct the base + # nodeid from the plugin name (which is the conftest path). + if plugin_name and plugin_name.endswith("conftest.py"): + # Note: we explicitly do *not* use `plugin.__file__` here -- The + # difference is that plugin_name has the correct capitalization on + # case-insensitive systems (Windows) and other normalization issues + # (issue #11816). + conftestpath = absolutepath(plugin_name) + try: + nodeid = str(conftestpath.parent.relative_to(self.config.rootpath)) + except ValueError: + nodeid = "" + if nodeid == ".": + nodeid = "" + if os.sep != nodes.SEP: + nodeid = nodeid.replace(os.sep, nodes.SEP) + else: + nodeid = None + + self.parsefactories(plugin, nodeid) + + def _getautousenames(self, node: nodes.Node) -> Iterator[str]: + """Return the names of autouse fixtures applicable to node.""" + for parentnode in node.listchain(): + basenames = self._nodeid_autousenames.get(parentnode.nodeid) + if basenames: + yield from basenames + + def _getusefixturesnames(self, node: nodes.Item) -> Iterator[str]: + """Return the names of usefixtures fixtures applicable to node.""" + for mark in node.iter_markers(name="usefixtures"): + yield from mark.args + + def getfixtureclosure( + self, + parentnode: nodes.Node, + initialnames: tuple[str, ...], + ignore_args: AbstractSet[str], + ) -> tuple[list[str], dict[str, Sequence[FixtureDef[Any]]]]: + # Collect the closure of all fixtures, starting with the given + # fixturenames as the initial set. As we have to visit all + # factory definitions anyway, we also return an arg2fixturedefs + # mapping so that the caller can reuse it and does not have + # to re-discover fixturedefs again for each fixturename + # (discovering matching fixtures for a given name/node is expensive). + + fixturenames_closure = list(initialnames) + + arg2fixturedefs: dict[str, Sequence[FixtureDef[Any]]] = {} + lastlen = -1 + while lastlen != len(fixturenames_closure): + lastlen = len(fixturenames_closure) + for argname in fixturenames_closure: + if argname in ignore_args: + continue + if argname in arg2fixturedefs: + continue + fixturedefs = self.getfixturedefs(argname, parentnode) + if fixturedefs: + arg2fixturedefs[argname] = fixturedefs + for arg in fixturedefs[-1].argnames: + if arg not in fixturenames_closure: + fixturenames_closure.append(arg) + + def sort_by_scope(arg_name: str) -> Scope: + try: + fixturedefs = arg2fixturedefs[arg_name] + except KeyError: + return Scope.Function + else: + return fixturedefs[-1]._scope + + fixturenames_closure.sort(key=sort_by_scope, reverse=True) + return fixturenames_closure, arg2fixturedefs + + def pytest_generate_tests(self, metafunc: Metafunc) -> None: + """Generate new tests based on parametrized fixtures used by the given metafunc""" + + def get_parametrize_mark_argnames(mark: Mark) -> Sequence[str]: + args, _ = ParameterSet._parse_parametrize_args(*mark.args, **mark.kwargs) + return args + + for argname in metafunc.fixturenames: + # Get the FixtureDefs for the argname. + fixture_defs = metafunc._arg2fixturedefs.get(argname) + if not fixture_defs: + # Will raise FixtureLookupError at setup time if not parametrized somewhere + # else (e.g @pytest.mark.parametrize) + continue + + # If the test itself parametrizes using this argname, give it + # precedence. + if any( + argname in get_parametrize_mark_argnames(mark) + for mark in metafunc.definition.iter_markers("parametrize") + ): + continue + + # In the common case we only look at the fixture def with the + # closest scope (last in the list). But if the fixture overrides + # another fixture, while requesting the super fixture, keep going + # in case the super fixture is parametrized (#1953). + for fixturedef in reversed(fixture_defs): + # Fixture is parametrized, apply it and stop. + if fixturedef.params is not None: + metafunc.parametrize( + argname, + fixturedef.params, + indirect=True, + scope=fixturedef.scope, + ids=fixturedef.ids, + ) + break + + # Not requesting the overridden super fixture, stop. + if argname not in fixturedef.argnames: + break + + # Try next super fixture, if any. + + def pytest_collection_modifyitems(self, items: list[nodes.Item]) -> None: + # Separate parametrized setups. + items[:] = reorder_items(items) + + def _register_fixture( + self, + *, + name: str, + func: _FixtureFunc[object], + nodeid: str | None, + scope: Scope | _ScopeName | Callable[[str, Config], _ScopeName] = "function", + params: Sequence[object] | None = None, + ids: tuple[object | None, ...] | Callable[[Any], object | None] | None = None, + autouse: bool = False, + ) -> None: + """Register a fixture + + :param name: + The fixture's name. + :param func: + The fixture's implementation function. + :param nodeid: + The visibility of the fixture. The fixture will be available to the + node with this nodeid and its children in the collection tree. + None means that the fixture is visible to the entire collection tree, + e.g. a fixture defined for general use in a plugin. + :param scope: + The fixture's scope. + :param params: + The fixture's parametrization params. + :param ids: + The fixture's IDs. + :param autouse: + Whether this is an autouse fixture. + """ + fixture_def = FixtureDef( + config=self.config, + baseid=nodeid, + argname=name, + func=func, + scope=scope, + params=params, + ids=ids, + _ispytest=True, + ) + + faclist = self._arg2fixturedefs.setdefault(name, []) + if fixture_def.has_location: + faclist.append(fixture_def) + else: + # fixturedefs with no location are at the front + # so this inserts the current fixturedef after the + # existing fixturedefs from external plugins but + # before the fixturedefs provided in conftests. + i = len([f for f in faclist if not f.has_location]) + faclist.insert(i, fixture_def) + if autouse: + self._nodeid_autousenames.setdefault(nodeid or "", []).append(name) + + @overload + def parsefactories( + self, + node_or_obj: nodes.Node, + ) -> None: + raise NotImplementedError() + + @overload + def parsefactories( + self, + node_or_obj: object, + nodeid: str | None, + ) -> None: + raise NotImplementedError() + + def parsefactories( + self, + node_or_obj: nodes.Node | object, + nodeid: str | NotSetType | None = NOTSET, + ) -> None: + """Collect fixtures from a collection node or object. + + Found fixtures are parsed into `FixtureDef`s and saved. + + If `node_or_object` is a collection node (with an underlying Python + object), the node's object is traversed and the node's nodeid is used to + determine the fixtures' visibility. `nodeid` must not be specified in + this case. + + If `node_or_object` is an object (e.g. a plugin), the object is + traversed and the given `nodeid` is used to determine the fixtures' + visibility. `nodeid` must be specified in this case; None and "" mean + total visibility. + """ + if nodeid is not NOTSET: + holderobj = node_or_obj + else: + assert isinstance(node_or_obj, nodes.Node) + holderobj = cast(object, node_or_obj.obj) # type: ignore[attr-defined] + assert isinstance(node_or_obj.nodeid, str) + nodeid = node_or_obj.nodeid + if holderobj in self._holderobjseen: + return + + # Avoid accessing `@property` (and other descriptors) when iterating fixtures. + if not safe_isclass(holderobj) and not isinstance(holderobj, types.ModuleType): + holderobj_tp: object = type(holderobj) + else: + holderobj_tp = holderobj + + self._holderobjseen.add(holderobj) + for name in dir(holderobj): + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getattr() ignores such exceptions. + obj_ub = safe_getattr(holderobj_tp, name, None) + marker = getfixturemarker(obj_ub) + if not isinstance(marker, FixtureFunctionMarker): + # Magic globals with __getattr__ might have got us a wrong + # fixture attribute. + continue + + # OK we know it is a fixture -- now safe to look up on the _instance_. + obj = getattr(holderobj, name) + + if marker.name: + name = marker.name + + # During fixture definition we wrap the original fixture function + # to issue a warning if called directly, so here we unwrap it in + # order to not emit the warning when pytest itself calls the + # fixture function. + func = get_real_method(obj, holderobj) + + self._register_fixture( + name=name, + nodeid=nodeid, + func=func, + scope=marker.scope, + params=marker.params, + ids=marker.ids, + autouse=marker.autouse, + ) + + def getfixturedefs( + self, argname: str, node: nodes.Node + ) -> Sequence[FixtureDef[Any]] | None: + """Get FixtureDefs for a fixture name which are applicable + to a given node. + + Returns None if there are no fixtures at all defined with the given + name. (This is different from the case in which there are fixtures + with the given name, but none applicable to the node. In this case, + an empty result is returned). + + :param argname: Name of the fixture to search for. + :param node: The requesting Node. + """ + try: + fixturedefs = self._arg2fixturedefs[argname] + except KeyError: + return None + return tuple(self._matchfactories(fixturedefs, node)) + + def _matchfactories( + self, fixturedefs: Iterable[FixtureDef[Any]], node: nodes.Node + ) -> Iterator[FixtureDef[Any]]: + parentnodeids = {n.nodeid for n in node.iter_parents()} + for fixturedef in fixturedefs: + if fixturedef.baseid in parentnodeids: + yield fixturedef + + +def show_fixtures_per_test(config: Config) -> int | ExitCode: + from _pytest.main import wrap_session + + return wrap_session(config, _show_fixtures_per_test) + + +_PYTEST_DIR = Path(_pytest.__file__).parent + + +def _pretty_fixture_path(invocation_dir: Path, func) -> str: + loc = Path(getlocation(func, invocation_dir)) + prefix = Path("...", "_pytest") + try: + return str(prefix / loc.relative_to(_PYTEST_DIR)) + except ValueError: + return bestrelpath(invocation_dir, loc) + + +def _show_fixtures_per_test(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + invocation_dir = config.invocation_params.dir + tw = _pytest.config.create_terminal_writer(config) + verbose = config.get_verbosity() + + def get_best_relpath(func) -> str: + loc = getlocation(func, invocation_dir) + return bestrelpath(invocation_dir, Path(loc)) + + def write_fixture(fixture_def: FixtureDef[object]) -> None: + argname = fixture_def.argname + if verbose <= 0 and argname.startswith("_"): + return + prettypath = _pretty_fixture_path(invocation_dir, fixture_def.func) + tw.write(f"{argname}", green=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + fixture_doc = inspect.getdoc(fixture_def.func) + if fixture_doc: + write_docstring( + tw, + fixture_doc.split("\n\n", maxsplit=1)[0] + if verbose <= 0 + else fixture_doc, + ) + else: + tw.line(" no docstring available", red=True) + + def write_item(item: nodes.Item) -> None: + # Not all items have _fixtureinfo attribute. + info: FuncFixtureInfo | None = getattr(item, "_fixtureinfo", None) + if info is None or not info.name2fixturedefs: + # This test item does not use any fixtures. + return + tw.line() + tw.sep("-", f"fixtures used by {item.name}") + # TODO: Fix this type ignore. + tw.sep("-", f"({get_best_relpath(item.function)})") # type: ignore[attr-defined] + # dict key not used in loop but needed for sorting. + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: + continue + # Last item is expected to be the one used by the test item. + write_fixture(fixturedefs[-1]) + + for session_item in session.items: + write_item(session_item) + + +def showfixtures(config: Config) -> int | ExitCode: + from _pytest.main import wrap_session + + return wrap_session(config, _showfixtures_main) + + +def _showfixtures_main(config: Config, session: Session) -> None: + import _pytest.config + + session.perform_collect() + invocation_dir = config.invocation_params.dir + tw = _pytest.config.create_terminal_writer(config) + verbose = config.get_verbosity() + + fm = session._fixturemanager + + available = [] + seen: set[tuple[str, str]] = set() + + for argname, fixturedefs in fm._arg2fixturedefs.items(): + assert fixturedefs is not None + if not fixturedefs: + continue + for fixturedef in fixturedefs: + loc = getlocation(fixturedef.func, invocation_dir) + if (fixturedef.argname, loc) in seen: + continue + seen.add((fixturedef.argname, loc)) + available.append( + ( + len(fixturedef.baseid), + fixturedef.func.__module__, + _pretty_fixture_path(invocation_dir, fixturedef.func), + fixturedef.argname, + fixturedef, + ) + ) + + available.sort() + currentmodule = None + for baseid, module, prettypath, argname, fixturedef in available: + if currentmodule != module: + if not module.startswith("_pytest."): + tw.line() + tw.sep("-", f"fixtures defined from {module}") + currentmodule = module + if verbose <= 0 and argname.startswith("_"): + continue + tw.write(f"{argname}", green=True) + if fixturedef.scope != "function": + tw.write(f" [{fixturedef.scope} scope]", cyan=True) + tw.write(f" -- {prettypath}", yellow=True) + tw.write("\n") + doc = inspect.getdoc(fixturedef.func) + if doc: + write_docstring( + tw, doc.split("\n\n", maxsplit=1)[0] if verbose <= 0 else doc + ) + else: + tw.line(" no docstring available", red=True) + tw.line() + + +def write_docstring(tw: TerminalWriter, doc: str, indent: str = " ") -> None: + for line in doc.split("\n"): + tw.line(indent + line) diff --git a/venv/Lib/site-packages/_pytest/freeze_support.py b/venv/Lib/site-packages/_pytest/freeze_support.py new file mode 100644 index 0000000..2ba6f9b --- /dev/null +++ b/venv/Lib/site-packages/_pytest/freeze_support.py @@ -0,0 +1,45 @@ +"""Provides a function to report all internal modules for using freezing +tools.""" + +from __future__ import annotations + +import types +from typing import Iterator + + +def freeze_includes() -> list[str]: + """Return a list of module names used by pytest that should be + included by cx_freeze.""" + import _pytest + + result = list(_iter_all_modules(_pytest)) + return result + + +def _iter_all_modules( + package: str | types.ModuleType, + prefix: str = "", +) -> Iterator[str]: + """Iterate over the names of all modules that can be found in the given + package, recursively. + + >>> import _pytest + >>> list(_iter_all_modules(_pytest)) + ['_pytest._argcomplete', '_pytest._code.code', ...] + """ + import os + import pkgutil + + if isinstance(package, str): + path = package + else: + # Type ignored because typeshed doesn't define ModuleType.__path__ + # (only defined on packages). + package_path = package.__path__ + path, prefix = package_path[0], package.__name__ + "." + for _, name, is_package in pkgutil.iter_modules([path]): + if is_package: + for m in _iter_all_modules(os.path.join(path, name), prefix=name + "."): + yield prefix + m + else: + yield prefix + name diff --git a/venv/Lib/site-packages/_pytest/helpconfig.py b/venv/Lib/site-packages/_pytest/helpconfig.py new file mode 100644 index 0000000..1886d5c --- /dev/null +++ b/venv/Lib/site-packages/_pytest/helpconfig.py @@ -0,0 +1,276 @@ +# mypy: allow-untyped-defs +"""Version info, help messages, tracing configuration.""" + +from __future__ import annotations + +from argparse import Action +import os +import sys +from typing import Generator + +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import PrintHelp +from _pytest.config.argparsing import Parser +from _pytest.terminal import TerminalReporter +import pytest + + +class HelpAction(Action): + """An argparse Action that will raise an exception in order to skip the + rest of the argument parsing when --help is passed. + + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, option_strings, dest=None, default=False, help=None): + super().__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help, + ) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done. + if getattr(parser._parser, "after_preparse", False): + raise PrintHelp + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--version", + "-V", + action="count", + default=0, + dest="version", + help="Display pytest version and information about plugins. " + "When given twice, also display information about plugins.", + ) + group._addoption( + "-h", + "--help", + action=HelpAction, + dest="help", + help="Show help message and configuration info", + ) + group._addoption( + "-p", + action="append", + dest="plugins", + default=[], + metavar="name", + help="Early-load given plugin module name or entry point (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.", + ) + group.addoption( + "--traceconfig", + "--trace-config", + action="store_true", + default=False, + help="Trace considerations of conftest.py files", + ) + group.addoption( + "--debug", + action="store", + nargs="?", + const="pytestdebug.log", + dest="debug", + metavar="DEBUG_FILE_NAME", + help="Store internal tracing debug information in this log file. " + "This file is opened with 'w' and truncated as a result, care advised. " + "Default: pytestdebug.log.", + ) + group._addoption( + "-o", + "--override-ini", + dest="override_ini", + action="append", + help='Override ini option with "option=value" style, ' + "e.g. `-o xfail_strict=True -o cache_dir=cache`.", + ) + + +@pytest.hookimpl(wrapper=True) +def pytest_cmdline_parse() -> Generator[None, Config, Config]: + config = yield + + if config.option.debug: + # --debug | --debug was provided. + path = config.option.debug + debugfile = open(path, "w", encoding="utf-8") + debugfile.write( + "versions pytest-{}, " + "python-{}\ninvocation_dir={}\ncwd={}\nargs={}\n\n".format( + pytest.__version__, + ".".join(map(str, sys.version_info)), + config.invocation_params.dir, + os.getcwd(), + config.invocation_params.args, + ) + ) + config.trace.root.setwriter(debugfile.write) + undo_tracing = config.pluginmanager.enable_tracing() + sys.stderr.write(f"writing pytest debug information to {path}\n") + + def unset_tracing() -> None: + debugfile.close() + sys.stderr.write(f"wrote pytest debug information to {debugfile.name}\n") + config.trace.root.setwriter(None) + undo_tracing() + + config.add_cleanup(unset_tracing) + + return config + + +def showversion(config: Config) -> None: + if config.option.version > 1: + sys.stdout.write( + f"This is pytest version {pytest.__version__}, imported from {pytest.__file__}\n" + ) + plugininfo = getpluginversioninfo(config) + if plugininfo: + for line in plugininfo: + sys.stdout.write(line + "\n") + else: + sys.stdout.write(f"pytest {pytest.__version__}\n") + + +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.version > 0: + showversion(config) + return 0 + elif config.option.help: + config._do_configure() + showhelp(config) + config._ensure_unconfigure() + return 0 + return None + + +def showhelp(config: Config) -> None: + import textwrap + + reporter: TerminalReporter | None = config.pluginmanager.get_plugin( + "terminalreporter" + ) + assert reporter is not None + tw = reporter._tw + tw.write(config._parser.optparser.format_help()) + tw.line() + tw.line( + "[pytest] ini-options in the first " + "pytest.ini|tox.ini|setup.cfg|pyproject.toml file found:" + ) + tw.line() + + columns = tw.fullwidth # costly call + indent_len = 24 # based on argparse's max_help_position=24 + indent = " " * indent_len + for name in config._parser._ininames: + help, type, default = config._parser._inidict[name] + if type is None: + type = "string" + if help is None: + raise TypeError(f"help argument cannot be None for {name}") + spec = f"{name} ({type}):" + tw.write(f" {spec}") + spec_len = len(spec) + if spec_len > (indent_len - 3): + # Display help starting at a new line. + tw.line() + helplines = textwrap.wrap( + help, + columns, + initial_indent=indent, + subsequent_indent=indent, + break_on_hyphens=False, + ) + + for line in helplines: + tw.line(line) + else: + # Display help starting after the spec, following lines indented. + tw.write(" " * (indent_len - spec_len - 2)) + wrapped = textwrap.wrap(help, columns - indent_len, break_on_hyphens=False) + + if wrapped: + tw.line(wrapped[0]) + for line in wrapped[1:]: + tw.line(indent + line) + + tw.line() + tw.line("Environment variables:") + vars = [ + ( + "CI", + "When set (regardless of value), pytest knows it is running in a " + "CI process and does not truncate summary info", + ), + ("BUILD_NUMBER", "Equivalent to CI"), + ("PYTEST_ADDOPTS", "Extra command line options"), + ("PYTEST_PLUGINS", "Comma-separated plugins to load during startup"), + ("PYTEST_DISABLE_PLUGIN_AUTOLOAD", "Set to disable plugin auto-loading"), + ("PYTEST_DEBUG", "Set to enable debug tracing of pytest's internals"), + ] + for name, help in vars: + tw.line(f" {name:<24} {help}") + tw.line() + tw.line() + + tw.line("to see available markers type: pytest --markers") + tw.line("to see available fixtures type: pytest --fixtures") + tw.line( + "(shown according to specified file_or_dir or current dir " + "if not specified; fixtures with leading '_' are only shown " + "with the '-v' option" + ) + + for warningreport in reporter.stats.get("warnings", []): + tw.line("warning : " + warningreport.message, red=True) + + +conftest_options = [("pytest_plugins", "list of plugin names to load")] + + +def getpluginversioninfo(config: Config) -> list[str]: + lines = [] + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + lines.append("registered third-party plugins:") + for plugin, dist in plugininfo: + loc = getattr(plugin, "__file__", repr(plugin)) + content = f"{dist.project_name}-{dist.version} at {loc}" + lines.append(" " + content) + return lines + + +def pytest_report_header(config: Config) -> list[str]: + lines = [] + if config.option.debug or config.option.traceconfig: + lines.append(f"using: pytest-{pytest.__version__}") + + verinfo = getpluginversioninfo(config) + if verinfo: + lines.extend(verinfo) + + if config.option.traceconfig: + lines.append("active plugins:") + items = config.pluginmanager.list_name_plugin() + for name, plugin in items: + if hasattr(plugin, "__file__"): + r = plugin.__file__ + else: + r = repr(plugin) + lines.append(f" {name:<20}: {r}") + return lines diff --git a/venv/Lib/site-packages/_pytest/hookspec.py b/venv/Lib/site-packages/_pytest/hookspec.py new file mode 100644 index 0000000..0a41b0a --- /dev/null +++ b/venv/Lib/site-packages/_pytest/hookspec.py @@ -0,0 +1,1333 @@ +# mypy: allow-untyped-defs +# ruff: noqa: T100 +"""Hook specifications for pytest plugins which are invoked by pytest itself +and by builtin plugins.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any +from typing import Mapping +from typing import Sequence +from typing import TYPE_CHECKING + +from pluggy import HookspecMarker + +from .deprecated import HOOK_LEGACY_PATH_ARG + + +if TYPE_CHECKING: + import pdb + from typing import Literal + import warnings + + from _pytest._code.code import ExceptionInfo + from _pytest._code.code import ExceptionRepr + from _pytest.compat import LEGACY_PATH + from _pytest.config import _PluggyPlugin + from _pytest.config import Config + from _pytest.config import ExitCode + from _pytest.config import PytestPluginManager + from _pytest.config.argparsing import Parser + from _pytest.fixtures import FixtureDef + from _pytest.fixtures import SubRequest + from _pytest.main import Session + from _pytest.nodes import Collector + from _pytest.nodes import Item + from _pytest.outcomes import Exit + from _pytest.python import Class + from _pytest.python import Function + from _pytest.python import Metafunc + from _pytest.python import Module + from _pytest.reports import CollectReport + from _pytest.reports import TestReport + from _pytest.runner import CallInfo + from _pytest.terminal import TerminalReporter + from _pytest.terminal import TestShortLogReport + + +hookspec = HookspecMarker("pytest") + +# ------------------------------------------------------------------------- +# Initialization hooks called for every plugin +# ------------------------------------------------------------------------- + + +@hookspec(historic=True) +def pytest_addhooks(pluginmanager: PytestPluginManager) -> None: + """Called at plugin registration time to allow adding new hooks via a call to + :func:`pluginmanager.add_hookspecs(module_or_class, prefix) `. + + :param pluginmanager: The pytest plugin manager. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered. + """ + + +@hookspec(historic=True) +def pytest_plugin_registered( + plugin: _PluggyPlugin, + plugin_name: str, + manager: PytestPluginManager, +) -> None: + """A new pytest plugin got registered. + + :param plugin: The plugin module or instance. + :param plugin_name: The name by which the plugin is registered. + :param manager: The pytest plugin manager. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered, once for each plugin registered thus far + (including itself!), and for all plugins thereafter when they are + registered. + """ + + +@hookspec(historic=True) +def pytest_addoption(parser: Parser, pluginmanager: PytestPluginManager) -> None: + """Register argparse-style options and ini-style config values, + called once at the beginning of a test run. + + :param parser: + To add command line options, call + :py:func:`parser.addoption(...) `. + To add ini-file values call :py:func:`parser.addini(...) + `. + + :param pluginmanager: + The pytest plugin manager, which can be used to install :py:func:`~pytest.hookspec`'s + or :py:func:`~pytest.hookimpl`'s and allow one plugin to call another plugin's hooks + to change how command line options are added. + + Options can later be accessed through the + :py:class:`config ` object, respectively: + + - :py:func:`config.getoption(name) ` to + retrieve the value of a command line option. + + - :py:func:`config.getini(name) ` to retrieve + a value read from an ini-style file. + + The config object is passed around on many internal objects via the ``.config`` + attribute or can be retrieved as the ``pytestconfig`` fixture. + + .. note:: + This hook is incompatible with hook wrappers. + + Use in conftest plugins + ======================= + + If a conftest plugin implements this hook, it will be called immediately + when the conftest is registered. + + This hook is only called for :ref:`initial conftests `. + """ + + +@hookspec(historic=True) +def pytest_configure(config: Config) -> None: + """Allow plugins and conftest files to perform initial configuration. + + .. note:: + This hook is incompatible with hook wrappers. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + This hook is called for every :ref:`initial conftest ` file + after command line options have been parsed. After that, the hook is called + for other conftest files as they are registered. + """ + + +# ------------------------------------------------------------------------- +# Bootstrapping hooks called for plugins registered early enough: +# internal and 3rd party plugins. +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_cmdline_parse( + pluginmanager: PytestPluginManager, args: list[str] +) -> Config | None: + """Return an initialized :class:`~pytest.Config`, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + This hook is only called for plugin classes passed to the + ``plugins`` arg when using `pytest.main`_ to perform an in-process + test run. + + :param pluginmanager: The pytest plugin manager. + :param args: List of arguments passed on the command line. + :returns: A pytest config object. + + Use in conftest plugins + ======================= + + This hook is not called for conftest files. + """ + + +def pytest_load_initial_conftests( + early_config: Config, parser: Parser, args: list[str] +) -> None: + """Called to implement the loading of :ref:`initial conftest files + ` ahead of command line option parsing. + + :param early_config: The pytest config object. + :param args: Arguments passed on the command line. + :param parser: To add command line options. + + Use in conftest plugins + ======================= + + This hook is not called for conftest files. + """ + + +@hookspec(firstresult=True) +def pytest_cmdline_main(config: Config) -> ExitCode | int | None: + """Called for performing the main command line action. + + The default implementation will invoke the configure hooks and + :hook:`pytest_runtestloop`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :returns: The exit code. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +# ------------------------------------------------------------------------- +# collection hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_collection(session: Session) -> object | None: + """Perform the collection phase for the given session. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + The default collection phase is this (see individual hooks for full details): + + 1. Starting from ``session`` as the initial collector: + + 1. ``pytest_collectstart(collector)`` + 2. ``report = pytest_make_collect_report(collector)`` + 3. ``pytest_exception_interact(collector, call, report)`` if an interactive exception occurred + 4. For each collected node: + + 1. If an item, ``pytest_itemcollected(item)`` + 2. If a collector, recurse into it. + + 5. ``pytest_collectreport(report)`` + + 2. ``pytest_collection_modifyitems(session, config, items)`` + + 1. ``pytest_deselected(items)`` for any deselected items (may be called multiple times) + + 3. ``pytest_collection_finish(session)`` + 4. Set ``session.items`` to the list of collected items + 5. Set ``session.testscollected`` to the number of collected items + + You can implement this hook to only perform some action before collection, + for example the terminal plugin uses it to start displaying the collection + counter (and returns `None`). + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +def pytest_collection_modifyitems( + session: Session, config: Config, items: list[Item] +) -> None: + """Called after collection has been performed. May filter or re-order + the items in-place. + + When items are deselected (filtered out from ``items``), + the hook :hook:`pytest_deselected` must be called explicitly + with the deselected items to properly notify other plugins, + e.g. with ``config.hook.pytest_deselected(deselected_items)``. + + :param session: The pytest session object. + :param config: The pytest config object. + :param items: List of item objects. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_collection_finish(session: Session) -> None: + """Called after collection has been performed and modified. + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec( + firstresult=True, + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="collection_path" + ), + }, +) +def pytest_ignore_collect( + collection_path: Path, path: LEGACY_PATH, config: Config +) -> bool | None: + """Return ``True`` to ignore this path for collection. + + Return ``None`` to let other plugins ignore the path for collection. + + Returning ``False`` will forcefully *not* ignore this path for collection, + without giving a chance for other plugins to ignore this path. + + This hook is consulted for all files and directories prior to calling + more specific hooks. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collection_path: The path to analyze. + :type collection_path: pathlib.Path + :param path: The path to analyze (deprecated). + :param config: The pytest config object. + + .. versionchanged:: 7.0.0 + The ``collection_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collection path, only + conftest files in parent directories of the collection path are consulted + (if the path is a directory, its own conftest file is *not* consulted - a + directory cannot ignore itself!). + """ + + +@hookspec(firstresult=True) +def pytest_collect_directory(path: Path, parent: Collector) -> Collector | None: + """Create a :class:`~pytest.Collector` for the given directory, or None if + not relevant. + + .. versionadded:: 8.0 + + For best results, the returned collector should be a subclass of + :class:`~pytest.Directory`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + Stops at first non-None result, see :ref:`firstresult`. + + :param path: The path to analyze. + :type path: pathlib.Path + + See :ref:`custom directory collectors` for a simple example of use of this + hook. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collection path, only + conftest files in parent directories of the collection path are consulted + (if the path is a directory, its own conftest file is *not* consulted - a + directory cannot collect itself!). + """ + + +@hookspec( + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="file_path" + ), + }, +) +def pytest_collect_file( + file_path: Path, path: LEGACY_PATH, parent: Collector +) -> Collector | None: + """Create a :class:`~pytest.Collector` for the given path, or None if not relevant. + + For best results, the returned collector should be a subclass of + :class:`~pytest.File`, but this is not required. + + The new node needs to have the specified ``parent`` as a parent. + + :param file_path: The path to analyze. + :type file_path: pathlib.Path + :param path: The path to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``file_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. The ``path`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given file path, only + conftest files in parent directories of the file path are consulted. + """ + + +# logging hooks for collection + + +def pytest_collectstart(collector: Collector) -> None: + """Collector starts collecting. + + :param collector: + The collector. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +def pytest_itemcollected(item: Item) -> None: + """We just collected a test item. + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_collectreport(report: CollectReport) -> None: + """Collector finished collecting. + + :param report: + The collect report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +def pytest_deselected(items: Sequence[Item]) -> None: + """Called for deselected test items, e.g. by keyword. + + Note that this hook has two integration aspects for plugins: + + - it can be *implemented* to be notified of deselected items + - it must be *called* from :hook:`pytest_collection_modifyitems` + implementations when items are deselected (to properly notify other plugins). + + May be called multiple times. + + :param items: + The items. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_make_collect_report(collector: Collector) -> CollectReport | None: + """Perform :func:`collector.collect() ` and return + a :class:`~pytest.CollectReport`. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The collector. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories are + consulted. + """ + + +# ------------------------------------------------------------------------- +# Python test function related hooks +# ------------------------------------------------------------------------- + + +@hookspec( + firstresult=True, + warn_on_impl_args={ + "path": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="path", pathlib_path_arg="module_path" + ), + }, +) +def pytest_pycollect_makemodule( + module_path: Path, path: LEGACY_PATH, parent +) -> Module | None: + """Return a :class:`pytest.Module` collector or None for the given path. + + This hook will be called for each matching test module path. + The :hook:`pytest_collect_file` hook needs to be used if you want to + create test modules for files that do not match as a test module. + + Stops at first non-None result, see :ref:`firstresult`. + + :param module_path: The path of the module to collect. + :type module_path: pathlib.Path + :param path: The path of the module to collect (deprecated). + + .. versionchanged:: 7.0.0 + The ``module_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``path`` parameter. + + The ``path`` parameter has been deprecated in favor of ``fspath``. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given parent collector, + only conftest files in the collector's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> None | Item | Collector | list[Item | Collector]: + """Return a custom item/collector for a Python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult`. + + :param collector: + The module/class collector. + :param name: + The name of the object in the module/class. + :param obj: + The object. + :returns: + The created items/collectors. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given collector, only + conftest files in the collector's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: + """Call underlying test function. + + Stops at first non-None result, see :ref:`firstresult`. + + :param pyfuncitem: + The function item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only + conftest files in the item's directory and its parent directories + are consulted. + """ + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + """Generate (multiple) parametrized calls to a test function. + + :param metafunc: + The :class:`~pytest.Metafunc` helper for the test function. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given function definition, + only conftest files in the functions's directory and its parent directories + are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_make_parametrize_id(config: Config, val: object, argname: str) -> str | None: + """Return a user-friendly string representation of the given ``val`` + that will be used by @pytest.mark.parametrize calls, or None if the hook + doesn't know about ``val``. + + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult`. + + :param config: The pytest config object. + :param val: The parametrized value. + :param argname: The automatic parameter name produced by pytest. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +# ------------------------------------------------------------------------- +# runtest related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_runtestloop(session: Session) -> object | None: + """Perform the main runtest loop (after collection finished). + + The default hook implementation performs the runtest protocol for all items + collected in the session (``session.items``), unless the collection failed + or the ``collectonly`` pytest option is set. + + If at any point :py:func:`pytest.exit` is called, the loop is + terminated immediately. + + If at any point ``session.shouldfail`` or ``session.shouldstop`` are set, the + loop is terminated after the runtest protocol for the current item is finished. + + :param session: The pytest session object. + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> object | None: + """Perform the runtest protocol for a single test item. + + The default runtest protocol is this (see individual hooks for full details): + + - ``pytest_runtest_logstart(nodeid, location)`` + + - Setup phase: + - ``call = pytest_runtest_setup(item)`` (wrapped in ``CallInfo(when="setup")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Call phase, if the setup passed and the ``setuponly`` pytest option is not set: + - ``call = pytest_runtest_call(item)`` (wrapped in ``CallInfo(when="call")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - Teardown phase: + - ``call = pytest_runtest_teardown(item, nextitem)`` (wrapped in ``CallInfo(when="teardown")``) + - ``report = pytest_runtest_makereport(item, call)`` + - ``pytest_runtest_logreport(report)`` + - ``pytest_exception_interact(call, report)`` if an interactive exception occurred + + - ``pytest_runtest_logfinish(nodeid, location)`` + + :param item: Test item for which the runtest protocol is performed. + :param nextitem: The scheduled-to-be-next test item (or None if this is the end my friend). + + Stops at first non-None result, see :ref:`firstresult`. + The return value is not used, but only stops further processing. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +def pytest_runtest_logstart(nodeid: str, location: tuple[str, int | None, str]) -> None: + """Called at the start of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_logfinish( + nodeid: str, location: tuple[str, int | None, str] +) -> None: + """Called at the end of running the runtest protocol for a single item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param nodeid: Full node ID of the item. + :param location: A tuple of ``(filename, lineno, testname)`` + where ``filename`` is a file path relative to ``config.rootpath`` + and ``lineno`` is 0-based. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_setup(item: Item) -> None: + """Called to perform the setup phase for a test item. + + The default implementation runs ``setup()`` on ``item`` and all of its + parents (which haven't been setup yet). This includes obtaining the + values of fixtures required by the item (which haven't been obtained + yet). + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_call(item: Item) -> None: + """Called to run the test for test item (the call phase). + + The default implementation calls ``item.runtest()``. + + :param item: + The item. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None: + """Called to perform the teardown phase for a test item. + + The default implementation runs the finalizers and calls ``teardown()`` + on ``item`` and all of its parents (which need to be torn down). This + includes running the teardown phase of fixtures required by the item (if + they go out of scope). + + :param item: + The item. + :param nextitem: + The scheduled-to-be-next test item (None if no further test item is + scheduled). This argument is used to perform exact teardowns, i.e. + calling just enough finalizers so that nextitem only needs to call + setup functions. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport | None: + """Called to create a :class:`~pytest.TestReport` for each of + the setup, call and teardown runtest phases of a test item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + :param item: The item. + :param call: The :class:`~pytest.CallInfo` for the phase. + + Stops at first non-None result, see :ref:`firstresult`. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_runtest_logreport(report: TestReport) -> None: + """Process the :class:`~pytest.TestReport` produced for each + of the setup, call and teardown runtest phases of an item. + + See :hook:`pytest_runtest_protocol` for a description of the runtest protocol. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +@hookspec(firstresult=True) +def pytest_report_to_serializable( + config: Config, + report: CollectReport | TestReport, +) -> dict[str, Any] | None: + """Serialize the given report object into a data structure suitable for + sending over the wire, e.g. converted to JSON. + + :param config: The pytest config object. + :param report: The report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. The exact details may depend + on the plugin which calls the hook. + """ + + +@hookspec(firstresult=True) +def pytest_report_from_serializable( + config: Config, + data: dict[str, Any], +) -> CollectReport | TestReport | None: + """Restore a report object previously serialized with + :hook:`pytest_report_to_serializable`. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. The exact details may depend + on the plugin which calls the hook. + """ + + +# ------------------------------------------------------------------------- +# Fixture related hooks +# ------------------------------------------------------------------------- + + +@hookspec(firstresult=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[Any], request: SubRequest +) -> object | None: + """Perform fixture setup execution. + + :param fixturedef: + The fixture definition object. + :param request: + The fixture request object. + :returns: + The return value of the call to the fixture function. + + Stops at first non-None result, see :ref:`firstresult`. + + .. note:: + If the fixture function returns None, other implementations of + this hook function will continue to be called, according to the + behavior of the :ref:`firstresult` option. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given fixture, only + conftest files in the fixture scope's directory and its parent directories + are consulted. + """ + + +def pytest_fixture_post_finalizer( + fixturedef: FixtureDef[Any], request: SubRequest +) -> None: + """Called after fixture teardown, but before the cache is cleared, so + the fixture result ``fixturedef.cached_result`` is still available (not + ``None``). + + :param fixturedef: + The fixture definition object. + :param request: + The fixture request object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given fixture, only + conftest files in the fixture scope's directory and its parent directories + are consulted. + """ + + +# ------------------------------------------------------------------------- +# test session related hooks +# ------------------------------------------------------------------------- + + +def pytest_sessionstart(session: Session) -> None: + """Called after the ``Session`` object has been created and before performing collection + and entering the run test loop. + + :param session: The pytest session object. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +def pytest_sessionfinish( + session: Session, + exitstatus: int | ExitCode, +) -> None: + """Called after whole test run finished, right before returning the exit status to the system. + + :param session: The pytest session object. + :param exitstatus: The status which pytest will return to the system. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +def pytest_unconfigure(config: Config) -> None: + """Called before test process is exited. + + :param config: The pytest config object. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. + """ + + +# ------------------------------------------------------------------------- +# hooks for customizing the assert methods +# ------------------------------------------------------------------------- + + +def pytest_assertrepr_compare( + config: Config, op: str, left: object, right: object +) -> list[str] | None: + """Return explanation for comparisons in failing assert expressions. + + Return None for no custom explanation, otherwise return a list + of strings. The strings will be joined by newlines but any newlines + *in* a string will be escaped. Note that all but the first line will + be indented slightly, the intention is for the first line to be a summary. + + :param config: The pytest config object. + :param op: The operator, e.g. `"=="`, `"!="`, `"not in"`. + :param left: The left operand. + :param right: The right operand. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +def pytest_assertion_pass(item: Item, lineno: int, orig: str, expl: str) -> None: + """Called whenever an assertion passes. + + .. versionadded:: 5.0 + + Use this hook to do some processing after a passing assertion. + The original assertion information is available in the `orig` string + and the pytest introspected assertion information is available in the + `expl` string. + + This hook must be explicitly enabled by the ``enable_assertion_pass_hook`` + ini-file option: + + .. code-block:: ini + + [pytest] + enable_assertion_pass_hook=true + + You need to **clean the .pyc** files in your project directory and interpreter libraries + when enabling this option, as assertions will require to be re-written. + + :param item: pytest item object of current test. + :param lineno: Line number of the assert statement. + :param orig: String with the original assertion. + :param expl: String with the assert explanation. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in the item's directory and its parent directories are consulted. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing reporting (invoked from _pytest_terminal). +# ------------------------------------------------------------------------- + + +@hookspec( + warn_on_impl_args={ + "startdir": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="startdir", pathlib_path_arg="start_path" + ), + }, +) +def pytest_report_header( # type:ignore[empty-body] + config: Config, start_path: Path, startdir: LEGACY_PATH +) -> str | list[str]: + """Return a string or list of strings to be displayed as header info for terminal reporting. + + :param config: The pytest config object. + :param start_path: The starting dir. + :type start_path: pathlib.Path + :param startdir: The starting dir (deprecated). + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + This hook is only called for :ref:`initial conftests `. + """ + + +@hookspec( + warn_on_impl_args={ + "startdir": HOOK_LEGACY_PATH_ARG.format( + pylib_path_arg="startdir", pathlib_path_arg="start_path" + ), + }, +) +def pytest_report_collectionfinish( # type:ignore[empty-body] + config: Config, + start_path: Path, + startdir: LEGACY_PATH, + items: Sequence[Item], +) -> str | list[str]: + """Return a string or list of strings to be displayed after collection + has finished successfully. + + These strings will be displayed after the standard "collected X items" message. + + .. versionadded:: 3.2 + + :param config: The pytest config object. + :param start_path: The starting dir. + :type start_path: pathlib.Path + :param startdir: The starting dir (deprecated). + :param items: List of pytest items that are going to be executed; this list should not be modified. + + .. note:: + + Lines returned by a plugin are displayed before those of plugins which + ran before it. + If you want to have your line(s) displayed first, use + :ref:`trylast=True `. + + .. versionchanged:: 7.0.0 + The ``start_path`` parameter was added as a :class:`pathlib.Path` + equivalent of the ``startdir`` parameter. The ``startdir`` parameter + has been deprecated. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec(firstresult=True) +def pytest_report_teststatus( # type:ignore[empty-body] + report: CollectReport | TestReport, config: Config +) -> TestShortLogReport | tuple[str, str, str | tuple[str, Mapping[str, bool]]]: + """Return result-category, shortletter and verbose word for status + reporting. + + The result-category is a category in which to count the result, for + example "passed", "skipped", "error" or the empty string. + + The shortletter is shown as testing progresses, for example ".", "s", + "E" or the empty string. + + The verbose word is shown as testing progresses in verbose mode, for + example "PASSED", "SKIPPED", "ERROR" or the empty string. + + pytest may style these implicitly according to the report outcome. + To provide explicit styling, return a tuple for the verbose word, + for example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :param report: The report object whose status is to be returned. + :param config: The pytest config object. + :returns: The test status. + + Stops at first non-None result, see :ref:`firstresult`. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_terminal_summary( + terminalreporter: TerminalReporter, + exitstatus: ExitCode, + config: Config, +) -> None: + """Add a section to terminal summary reporting. + + :param terminalreporter: The internal terminal reporter object. + :param exitstatus: The exit status that will be reported back to the OS. + :param config: The pytest config object. + + .. versionadded:: 4.2 + The ``config`` parameter. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +@hookspec(historic=True) +def pytest_warning_recorded( + warning_message: warnings.WarningMessage, + when: Literal["config", "collect", "runtest"], + nodeid: str, + location: tuple[str, int, str] | None, +) -> None: + """Process a warning captured by the internal pytest warnings plugin. + + :param warning_message: + The captured warning. This is the same object produced by :class:`warnings.catch_warnings`, + and contains the same attributes as the parameters of :py:func:`warnings.showwarning`. + + :param when: + Indicates when the warning was captured. Possible values: + + * ``"config"``: during pytest configuration/initialization stage. + * ``"collect"``: during test collection. + * ``"runtest"``: during test execution. + + :param nodeid: + Full id of the item. Empty string for warnings that are not specific to + a particular node. + + :param location: + When available, holds information about the execution context of the captured + warning (filename, linenumber, function). ``function`` evaluates to + when the execution context is at the module level. + + .. versionadded:: 6.0 + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. If the warning is specific to a + particular node, only conftest files in parent directories of the node are + consulted. + """ + + +# ------------------------------------------------------------------------- +# Hooks for influencing skipping +# ------------------------------------------------------------------------- + + +def pytest_markeval_namespace( # type:ignore[empty-body] + config: Config, +) -> dict[str, Any]: + """Called when constructing the globals dictionary used for + evaluating string conditions in xfail/skipif markers. + + This is useful when the condition for a marker requires + objects that are expensive or impossible to obtain during + collection time, which is required by normal boolean + conditions. + + .. versionadded:: 6.2 + + :param config: The pytest config object. + :returns: A dictionary of additional globals to add. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given item, only conftest + files in parent directories of the item are consulted. + """ + + +# ------------------------------------------------------------------------- +# error handling and internal debugging hooks +# ------------------------------------------------------------------------- + + +def pytest_internalerror( + excrepr: ExceptionRepr, + excinfo: ExceptionInfo[BaseException], +) -> bool | None: + """Called for internal errors. + + Return True to suppress the fallback handling of printing an + INTERNALERROR message directly to sys.stderr. + + :param excrepr: The exception repr object. + :param excinfo: The exception info. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_keyboard_interrupt( + excinfo: ExceptionInfo[KeyboardInterrupt | Exit], +) -> None: + """Called for keyboard interrupt. + + :param excinfo: The exception info. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_exception_interact( + node: Item | Collector, + call: CallInfo[Any], + report: CollectReport | TestReport, +) -> None: + """Called when an exception was raised which can potentially be + interactively handled. + + May be called during collection (see :hook:`pytest_make_collect_report`), + in which case ``report`` is a :class:`~pytest.CollectReport`. + + May be called during runtest of an item (see :hook:`pytest_runtest_protocol`), + in which case ``report`` is a :class:`~pytest.TestReport`. + + This hook is not called if the exception that was raised is an internal + exception like ``skip.Exception``. + + :param node: + The item or collector. + :param call: + The call information. Contains the exception. + :param report: + The collection or test report. + + Use in conftest plugins + ======================= + + Any conftest file can implement this hook. For a given node, only conftest + files in parent directories of the node are consulted. + """ + + +def pytest_enter_pdb(config: Config, pdb: pdb.Pdb) -> None: + """Called upon pdb.set_trace(). + + Can be used by plugins to take special action just before the python + debugger enters interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ + + +def pytest_leave_pdb(config: Config, pdb: pdb.Pdb) -> None: + """Called when leaving pdb (e.g. with continue after pdb.set_trace()). + + Can be used by plugins to take special action just after the python + debugger leaves interactive mode. + + :param config: The pytest config object. + :param pdb: The Pdb instance. + + Use in conftest plugins + ======================= + + Any conftest plugin can implement this hook. + """ diff --git a/venv/Lib/site-packages/_pytest/junitxml.py b/venv/Lib/site-packages/_pytest/junitxml.py new file mode 100644 index 0000000..3a2cb59 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/junitxml.py @@ -0,0 +1,697 @@ +# mypy: allow-untyped-defs +"""Report test results in JUnit-XML format, for use with Jenkins and build +integration servers. + +Based on initial code from Ross Lawley. + +Output conforms to +https://github.com/jenkinsci/xunit-plugin/blob/master/src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +""" + +from __future__ import annotations + +from datetime import datetime +from datetime import timezone +import functools +import os +import platform +import re +from typing import Callable +from typing import Match +import xml.etree.ElementTree as ET + +from _pytest import nodes +from _pytest import timing +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprFileLocation +from _pytest.config import Config +from _pytest.config import filename_arg +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureRequest +from _pytest.reports import TestReport +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter +import pytest + + +xml_key = StashKey["LogXML"]() + + +def bin_xml_escape(arg: object) -> str: + r"""Visually escape invalid XML characters. + + For example, transforms + 'hello\aworld\b' + into + 'hello#x07world#x08' + Note that the #xABs are *not* XML escapes - missing the ampersand «. + The idea is to escape visually for the user rather than for XML itself. + """ + + def repl(matchobj: Match[str]) -> str: + i = ord(matchobj.group()) + if i <= 0xFF: + return f"#x{i:02X}" + else: + return f"#x{i:04X}" + + # The spec range of valid chars is: + # Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF] + # For an unknown(?) reason, we disallow #x7F (DEL) as well. + illegal_xml_re = ( + "[^\u0009\u000a\u000d\u0020-\u007e\u0080-\ud7ff\ue000-\ufffd\u10000-\u10ffff]" + ) + return re.sub(illegal_xml_re, repl, str(arg)) + + +def merge_family(left, right) -> None: + result = {} + for kl, vl in left.items(): + for kr, vr in right.items(): + if not isinstance(vl, list): + raise TypeError(type(vl)) + result[kl] = vl + vr + left.update(result) + + +families = {} +families["_base"] = {"testcase": ["classname", "name"]} +families["_base_legacy"] = {"testcase": ["file", "line", "url"]} + +# xUnit 1.x inherits legacy attributes. +families["xunit1"] = families["_base"].copy() +merge_family(families["xunit1"], families["_base_legacy"]) + +# xUnit 2.x uses strict base attributes. +families["xunit2"] = families["_base"] + + +class _NodeReporter: + def __init__(self, nodeid: str | TestReport, xml: LogXML) -> None: + self.id = nodeid + self.xml = xml + self.add_stats = self.xml.add_stats + self.family = self.xml.family + self.duration = 0.0 + self.properties: list[tuple[str, str]] = [] + self.nodes: list[ET.Element] = [] + self.attrs: dict[str, str] = {} + + def append(self, node: ET.Element) -> None: + self.xml.add_stats(node.tag) + self.nodes.append(node) + + def add_property(self, name: str, value: object) -> None: + self.properties.append((str(name), bin_xml_escape(value))) + + def add_attribute(self, name: str, value: object) -> None: + self.attrs[str(name)] = bin_xml_escape(value) + + def make_properties_node(self) -> ET.Element | None: + """Return a Junit node containing custom properties, if any.""" + if self.properties: + properties = ET.Element("properties") + for name, value in self.properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None + + def record_testreport(self, testreport: TestReport) -> None: + names = mangle_test_address(testreport.nodeid) + existing_attrs = self.attrs + classnames = names[:-1] + if self.xml.prefix: + classnames.insert(0, self.xml.prefix) + attrs: dict[str, str] = { + "classname": ".".join(classnames), + "name": bin_xml_escape(names[-1]), + "file": testreport.location[0], + } + if testreport.location[1] is not None: + attrs["line"] = str(testreport.location[1]) + if hasattr(testreport, "url"): + attrs["url"] = testreport.url + self.attrs = attrs + self.attrs.update(existing_attrs) # Restore any user-defined attributes. + + # Preserve legacy testcase behavior. + if self.family == "xunit1": + return + + # Filter out attributes not permitted by this test family. + # Including custom attributes because they are not valid here. + temp_attrs = {} + for key in self.attrs: + if key in families[self.family]["testcase"]: + temp_attrs[key] = self.attrs[key] + self.attrs = temp_attrs + + def to_xml(self) -> ET.Element: + testcase = ET.Element("testcase", self.attrs, time=f"{self.duration:.3f}") + properties = self.make_properties_node() + if properties is not None: + testcase.append(properties) + testcase.extend(self.nodes) + return testcase + + def _add_simple(self, tag: str, message: str, data: str | None = None) -> None: + node = ET.Element(tag, message=message) + node.text = bin_xml_escape(data) + self.append(node) + + def write_captured_output(self, report: TestReport) -> None: + if not self.xml.log_passing_tests and report.passed: + return + + content_out = report.capstdout + content_log = report.caplog + content_err = report.capstderr + if self.xml.logging == "no": + return + content_all = "" + if self.xml.logging in ["log", "all"]: + content_all = self._prepare_content(content_log, " Captured Log ") + if self.xml.logging in ["system-out", "out-err", "all"]: + content_all += self._prepare_content(content_out, " Captured Out ") + self._write_content(report, content_all, "system-out") + content_all = "" + if self.xml.logging in ["system-err", "out-err", "all"]: + content_all += self._prepare_content(content_err, " Captured Err ") + self._write_content(report, content_all, "system-err") + content_all = "" + if content_all: + self._write_content(report, content_all, "system-out") + + def _prepare_content(self, content: str, header: str) -> str: + return "\n".join([header.center(80, "-"), content, ""]) + + def _write_content(self, report: TestReport, content: str, jheader: str) -> None: + tag = ET.Element(jheader) + tag.text = bin_xml_escape(content) + self.append(tag) + + def append_pass(self, report: TestReport) -> None: + self.add_stats("passed") + + def append_failure(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + if hasattr(report, "wasxfail"): + self._add_simple("skipped", "xfail-marked test passes unexpectedly") + else: + assert report.longrepr is not None + reprcrash: ReprFileLocation | None = getattr( + report.longrepr, "reprcrash", None + ) + if reprcrash is not None: + message = reprcrash.message + else: + message = str(report.longrepr) + message = bin_xml_escape(message) + self._add_simple("failure", message, str(report.longrepr)) + + def append_collect_error(self, report: TestReport) -> None: + # msg = str(report.longrepr.reprtraceback.extraline) + assert report.longrepr is not None + self._add_simple("error", "collection failure", str(report.longrepr)) + + def append_collect_skipped(self, report: TestReport) -> None: + self._add_simple("skipped", "collection skipped", str(report.longrepr)) + + def append_error(self, report: TestReport) -> None: + assert report.longrepr is not None + reprcrash: ReprFileLocation | None = getattr(report.longrepr, "reprcrash", None) + if reprcrash is not None: + reason = reprcrash.message + else: + reason = str(report.longrepr) + + if report.when == "teardown": + msg = f'failed on teardown with "{reason}"' + else: + msg = f'failed on setup with "{reason}"' + self._add_simple("error", bin_xml_escape(msg), str(report.longrepr)) + + def append_skipped(self, report: TestReport) -> None: + if hasattr(report, "wasxfail"): + xfailreason = report.wasxfail + if xfailreason.startswith("reason: "): + xfailreason = xfailreason[8:] + xfailreason = bin_xml_escape(xfailreason) + skipped = ET.Element("skipped", type="pytest.xfail", message=xfailreason) + self.append(skipped) + else: + assert isinstance(report.longrepr, tuple) + filename, lineno, skipreason = report.longrepr + if skipreason.startswith("Skipped: "): + skipreason = skipreason[9:] + details = f"{filename}:{lineno}: {skipreason}" + + skipped = ET.Element( + "skipped", type="pytest.skip", message=bin_xml_escape(skipreason) + ) + skipped.text = bin_xml_escape(details) + self.append(skipped) + self.write_captured_output(report) + + def finalize(self) -> None: + data = self.to_xml() + self.__dict__.clear() + # Type ignored because mypy doesn't like overriding a method. + # Also the return value doesn't match... + self.to_xml = lambda: data # type: ignore[method-assign] + + +def _warn_incompatibility_with_xunit2( + request: FixtureRequest, fixture_name: str +) -> None: + """Emit a PytestWarning about the given fixture being incompatible with newer xunit revisions.""" + from _pytest.warning_types import PytestWarning + + xml = request.config.stash.get(xml_key, None) + if xml is not None and xml.family not in ("xunit1", "legacy"): + request.node.warn( + PytestWarning( + f"{fixture_name} is incompatible with junit_family '{xml.family}' (use 'legacy' or 'xunit1')" + ) + ) + + +@pytest.fixture +def record_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra properties to the calling test. + + User properties become part of the test report and are available to the + configured reporters, like JUnit XML. + + The fixture is callable with ``name, value``. The value is automatically + XML-encoded. + + Example:: + + def test_function(record_property): + record_property("example_key", 1) + """ + _warn_incompatibility_with_xunit2(request, "record_property") + + def append_property(name: str, value: object) -> None: + request.node.user_properties.append((name, value)) + + return append_property + + +@pytest.fixture +def record_xml_attribute(request: FixtureRequest) -> Callable[[str, object], None]: + """Add extra xml attributes to the tag for the calling test. + + The fixture is callable with ``name, value``. The value is + automatically XML-encoded. + """ + from _pytest.warning_types import PytestExperimentalApiWarning + + request.node.warn( + PytestExperimentalApiWarning("record_xml_attribute is an experimental feature") + ) + + _warn_incompatibility_with_xunit2(request, "record_xml_attribute") + + # Declare noop + def add_attr_noop(name: str, value: object) -> None: + pass + + attr_func = add_attr_noop + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + node_reporter = xml.node_reporter(request.node.nodeid) + attr_func = node_reporter.add_attribute + + return attr_func + + +def _check_record_param_type(param: str, v: str) -> None: + """Used by record_testsuite_property to check that the given parameter name is of the proper + type.""" + __tracebackhide__ = True + if not isinstance(v, str): + msg = "{param} parameter needs to be a string, but {g} given" # type: ignore[unreachable] + raise TypeError(msg.format(param=param, g=type(v).__name__)) + + +@pytest.fixture(scope="session") +def record_testsuite_property(request: FixtureRequest) -> Callable[[str, object], None]: + """Record a new ```` tag as child of the root ````. + + This is suitable to writing global information regarding the entire test + suite, and is compatible with ``xunit2`` JUnit family. + + This is a ``session``-scoped fixture which is called with ``(name, value)``. Example: + + .. code-block:: python + + def test_foo(record_testsuite_property): + record_testsuite_property("ARCH", "PPC") + record_testsuite_property("STORAGE_TYPE", "CEPH") + + :param name: + The property name. + :param value: + The property value. Will be converted to a string. + + .. warning:: + + Currently this fixture **does not work** with the + `pytest-xdist `__ plugin. See + :issue:`7767` for details. + """ + __tracebackhide__ = True + + def record_func(name: str, value: object) -> None: + """No-op function in case --junit-xml was not passed in the command-line.""" + __tracebackhide__ = True + _check_record_param_type("name", name) + + xml = request.config.stash.get(xml_key, None) + if xml is not None: + record_func = xml.add_global_property + return record_func + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group.addoption( + "--junitxml", + "--junit-xml", + action="store", + dest="xmlpath", + metavar="path", + type=functools.partial(filename_arg, optname="--junitxml"), + default=None, + help="Create junit-xml style report file at given path", + ) + group.addoption( + "--junitprefix", + "--junit-prefix", + action="store", + metavar="str", + default=None, + help="Prepend prefix to classnames in junit-xml output", + ) + parser.addini( + "junit_suite_name", "Test suite name for JUnit report", default="pytest" + ) + parser.addini( + "junit_logging", + "Write captured log messages to JUnit report: " + "one of no|log|system-out|system-err|out-err|all", + default="no", + ) + parser.addini( + "junit_log_passing_tests", + "Capture log information for passing tests to JUnit report: ", + type="bool", + default=True, + ) + parser.addini( + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", + ) # choices=['total', 'call']) + parser.addini( + "junit_family", + "Emit XML for schema: one of legacy|xunit1|xunit2", + default="xunit2", + ) + + +def pytest_configure(config: Config) -> None: + xmlpath = config.option.xmlpath + # Prevent opening xmllog on worker nodes (xdist). + if xmlpath and not hasattr(config, "workerinput"): + junit_family = config.getini("junit_family") + config.stash[xml_key] = LogXML( + xmlpath, + config.option.junitprefix, + config.getini("junit_suite_name"), + config.getini("junit_logging"), + config.getini("junit_duration_report"), + junit_family, + config.getini("junit_log_passing_tests"), + ) + config.pluginmanager.register(config.stash[xml_key]) + + +def pytest_unconfigure(config: Config) -> None: + xml = config.stash.get(xml_key, None) + if xml: + del config.stash[xml_key] + config.pluginmanager.unregister(xml) + + +def mangle_test_address(address: str) -> list[str]: + path, possible_open_bracket, params = address.partition("[") + names = path.split("::") + # Convert file path to dotted path. + names[0] = names[0].replace(nodes.SEP, ".") + names[0] = re.sub(r"\.py$", "", names[0]) + # Put any params back. + names[-1] += possible_open_bracket + params + return names + + +class LogXML: + def __init__( + self, + logfile, + prefix: str | None, + suite_name: str = "pytest", + logging: str = "no", + report_duration: str = "total", + family="xunit1", + log_passing_tests: bool = True, + ) -> None: + logfile = os.path.expanduser(os.path.expandvars(logfile)) + self.logfile = os.path.normpath(os.path.abspath(logfile)) + self.prefix = prefix + self.suite_name = suite_name + self.logging = logging + self.log_passing_tests = log_passing_tests + self.report_duration = report_duration + self.family = family + self.stats: dict[str, int] = dict.fromkeys( + ["error", "passed", "failure", "skipped"], 0 + ) + self.node_reporters: dict[tuple[str | TestReport, object], _NodeReporter] = {} + self.node_reporters_ordered: list[_NodeReporter] = [] + self.global_properties: list[tuple[str, str]] = [] + + # List of reports that failed on call but teardown is pending. + self.open_reports: list[TestReport] = [] + self.cnt_double_fail_tests = 0 + + # Replaces convenience family with real family. + if self.family == "legacy": + self.family = "xunit1" + + def finalize(self, report: TestReport) -> None: + nodeid = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + reporter = self.node_reporters.pop((nodeid, workernode)) + + for propname, propvalue in report.user_properties: + reporter.add_property(propname, str(propvalue)) + + if reporter is not None: + reporter.finalize() + + def node_reporter(self, report: TestReport | str) -> _NodeReporter: + nodeid: str | TestReport = getattr(report, "nodeid", report) + # Local hack to handle xdist report order. + workernode = getattr(report, "node", None) + + key = nodeid, workernode + + if key in self.node_reporters: + # TODO: breaks for --dist=each + return self.node_reporters[key] + + reporter = _NodeReporter(nodeid, self) + + self.node_reporters[key] = reporter + self.node_reporters_ordered.append(reporter) + + return reporter + + def add_stats(self, key: str) -> None: + if key in self.stats: + self.stats[key] += 1 + + def _opentestcase(self, report: TestReport) -> _NodeReporter: + reporter = self.node_reporter(report) + reporter.record_testreport(report) + return reporter + + def pytest_runtest_logreport(self, report: TestReport) -> None: + """Handle a setup/call/teardown report, generating the appropriate + XML tags as necessary. + + Note: due to plugins like xdist, this hook may be called in interlaced + order with reports from other nodes. For example: + + Usual call order: + -> setup node1 + -> call node1 + -> teardown node1 + -> setup node2 + -> call node2 + -> teardown node2 + + Possible call order in xdist: + -> setup node1 + -> call node1 + -> setup node2 + -> call node2 + -> teardown node2 + -> teardown node1 + """ + close_report = None + if report.passed: + if report.when == "call": # ignore setup/teardown + reporter = self._opentestcase(report) + reporter.append_pass(report) + elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used. + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema. + self.finalize(close_report) + self.cnt_double_fail_tests += 1 + reporter = self._opentestcase(report) + if report.when == "call": + reporter.append_failure(report) + self.open_reports.append(report) + if not self.log_passing_tests: + reporter.write_captured_output(report) + else: + reporter.append_error(report) + elif report.skipped: + reporter = self._opentestcase(report) + reporter.append_skipped(report) + self.update_testcase_duration(report) + if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) + + self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + ( + rep + for rep in self.open_reports + if ( + rep.nodeid == report.nodeid + and getattr(rep, "item_index", None) == report_ii + and getattr(rep, "worker_id", None) == report_wid + ) + ), + None, + ) + if close_report: + self.open_reports.remove(close_report) + + def update_testcase_duration(self, report: TestReport) -> None: + """Accumulate total duration for nodeid from given report and update + the Junit.testcase with the new total if already created.""" + if self.report_duration in {"total", report.when}: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) + + def pytest_collectreport(self, report: TestReport) -> None: + if not report.passed: + reporter = self._opentestcase(report) + if report.failed: + reporter.append_collect_error(report) + else: + reporter.append_collect_skipped(report) + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> None: + reporter = self.node_reporter("internal") + reporter.attrs.update(classname="pytest", name="internal") + reporter._add_simple("error", "internal error", str(excrepr)) + + def pytest_sessionstart(self) -> None: + self.suite_start_time = timing.time() + + def pytest_sessionfinish(self) -> None: + dirname = os.path.dirname(os.path.abspath(self.logfile)) + # exist_ok avoids filesystem race conditions between checking path existence and requesting creation + os.makedirs(dirname, exist_ok=True) + + with open(self.logfile, "w", encoding="utf-8") as logfile: + suite_stop_time = timing.time() + suite_time_delta = suite_stop_time - self.suite_start_time + + numtests = ( + self.stats["passed"] + + self.stats["failure"] + + self.stats["skipped"] + + self.stats["error"] + - self.cnt_double_fail_tests + ) + logfile.write('') + + suite_node = ET.Element( + "testsuite", + name=self.suite_name, + errors=str(self.stats["error"]), + failures=str(self.stats["failure"]), + skipped=str(self.stats["skipped"]), + tests=str(numtests), + time=f"{suite_time_delta:.3f}", + timestamp=datetime.fromtimestamp(self.suite_start_time, timezone.utc) + .astimezone() + .isoformat(), + hostname=platform.node(), + ) + global_properties = self._get_global_properties_node() + if global_properties is not None: + suite_node.append(global_properties) + for node_reporter in self.node_reporters_ordered: + suite_node.append(node_reporter.to_xml()) + testsuites = ET.Element("testsuites") + testsuites.append(suite_node) + logfile.write(ET.tostring(testsuites, encoding="unicode")) + + def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None: + terminalreporter.write_sep("-", f"generated xml file: {self.logfile}") + + def add_global_property(self, name: str, value: object) -> None: + __tracebackhide__ = True + _check_record_param_type("name", name) + self.global_properties.append((name, bin_xml_escape(value))) + + def _get_global_properties_node(self) -> ET.Element | None: + """Return a Junit node containing custom properties, if any.""" + if self.global_properties: + properties = ET.Element("properties") + for name, value in self.global_properties: + properties.append(ET.Element("property", name=name, value=value)) + return properties + return None diff --git a/venv/Lib/site-packages/_pytest/legacypath.py b/venv/Lib/site-packages/_pytest/legacypath.py new file mode 100644 index 0000000..61476d6 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/legacypath.py @@ -0,0 +1,473 @@ +# mypy: allow-untyped-defs +"""Add backward compatibility support for the legacy py path type.""" + +from __future__ import annotations + +import dataclasses +from pathlib import Path +import shlex +import subprocess +from typing import Final +from typing import final +from typing import TYPE_CHECKING + +from iniconfig import SectionWrapper + +from _pytest.cacheprovider import Cache +from _pytest.compat import LEGACY_PATH +from _pytest.compat import legacy_path +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pytester import HookRecorder +from _pytest.pytester import Pytester +from _pytest.pytester import RunResult +from _pytest.terminal import TerminalReporter +from _pytest.tmpdir import TempPathFactory + + +if TYPE_CHECKING: + import pexpect + + +@final +class Testdir: + """ + Similar to :class:`Pytester`, but this class works with legacy legacy_path objects instead. + + All methods just forward to an internal :class:`Pytester` instance, converting results + to `legacy_path` objects as necessary. + """ + + __test__ = False + + CLOSE_STDIN: Final = Pytester.CLOSE_STDIN + TimeoutExpired: Final = Pytester.TimeoutExpired + + def __init__(self, pytester: Pytester, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._pytester = pytester + + @property + def tmpdir(self) -> LEGACY_PATH: + """Temporary directory where tests are executed.""" + return legacy_path(self._pytester.path) + + @property + def test_tmproot(self) -> LEGACY_PATH: + return legacy_path(self._pytester._test_tmproot) + + @property + def request(self): + return self._pytester._request + + @property + def plugins(self): + return self._pytester.plugins + + @plugins.setter + def plugins(self, plugins): + self._pytester.plugins = plugins + + @property + def monkeypatch(self) -> MonkeyPatch: + return self._pytester._monkeypatch + + def make_hook_recorder(self, pluginmanager) -> HookRecorder: + """See :meth:`Pytester.make_hook_recorder`.""" + return self._pytester.make_hook_recorder(pluginmanager) + + def chdir(self) -> None: + """See :meth:`Pytester.chdir`.""" + return self._pytester.chdir() + + def finalize(self) -> None: + return self._pytester._finalize() + + def makefile(self, ext, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makefile`.""" + if ext and not ext.startswith("."): + # pytester.makefile is going to throw a ValueError in a way that + # testdir.makefile did not, because + # pathlib.Path is stricter suffixes than py.path + # This ext arguments is likely user error, but since testdir has + # allowed this, we will prepend "." as a workaround to avoid breaking + # testdir usage that worked before + ext = "." + ext + return legacy_path(self._pytester.makefile(ext, *args, **kwargs)) + + def makeconftest(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeconftest`.""" + return legacy_path(self._pytester.makeconftest(source)) + + def makeini(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makeini`.""" + return legacy_path(self._pytester.makeini(source)) + + def getinicfg(self, source: str) -> SectionWrapper: + """See :meth:`Pytester.getinicfg`.""" + return self._pytester.getinicfg(source) + + def makepyprojecttoml(self, source) -> LEGACY_PATH: + """See :meth:`Pytester.makepyprojecttoml`.""" + return legacy_path(self._pytester.makepyprojecttoml(source)) + + def makepyfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.makepyfile`.""" + return legacy_path(self._pytester.makepyfile(*args, **kwargs)) + + def maketxtfile(self, *args, **kwargs) -> LEGACY_PATH: + """See :meth:`Pytester.maketxtfile`.""" + return legacy_path(self._pytester.maketxtfile(*args, **kwargs)) + + def syspathinsert(self, path=None) -> None: + """See :meth:`Pytester.syspathinsert`.""" + return self._pytester.syspathinsert(path) + + def mkdir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkdir`.""" + return legacy_path(self._pytester.mkdir(name)) + + def mkpydir(self, name) -> LEGACY_PATH: + """See :meth:`Pytester.mkpydir`.""" + return legacy_path(self._pytester.mkpydir(name)) + + def copy_example(self, name=None) -> LEGACY_PATH: + """See :meth:`Pytester.copy_example`.""" + return legacy_path(self._pytester.copy_example(name)) + + def getnode(self, config: Config, arg) -> Item | Collector | None: + """See :meth:`Pytester.getnode`.""" + return self._pytester.getnode(config, arg) + + def getpathnode(self, path): + """See :meth:`Pytester.getpathnode`.""" + return self._pytester.getpathnode(path) + + def genitems(self, colitems: list[Item | Collector]) -> list[Item]: + """See :meth:`Pytester.genitems`.""" + return self._pytester.genitems(colitems) + + def runitem(self, source): + """See :meth:`Pytester.runitem`.""" + return self._pytester.runitem(source) + + def inline_runsource(self, source, *cmdlineargs): + """See :meth:`Pytester.inline_runsource`.""" + return self._pytester.inline_runsource(source, *cmdlineargs) + + def inline_genitems(self, *args): + """See :meth:`Pytester.inline_genitems`.""" + return self._pytester.inline_genitems(*args) + + def inline_run(self, *args, plugins=(), no_reraise_ctrlc: bool = False): + """See :meth:`Pytester.inline_run`.""" + return self._pytester.inline_run( + *args, plugins=plugins, no_reraise_ctrlc=no_reraise_ctrlc + ) + + def runpytest_inprocess(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest_inprocess`.""" + return self._pytester.runpytest_inprocess(*args, **kwargs) + + def runpytest(self, *args, **kwargs) -> RunResult: + """See :meth:`Pytester.runpytest`.""" + return self._pytester.runpytest(*args, **kwargs) + + def parseconfig(self, *args) -> Config: + """See :meth:`Pytester.parseconfig`.""" + return self._pytester.parseconfig(*args) + + def parseconfigure(self, *args) -> Config: + """See :meth:`Pytester.parseconfigure`.""" + return self._pytester.parseconfigure(*args) + + def getitem(self, source, funcname="test_func"): + """See :meth:`Pytester.getitem`.""" + return self._pytester.getitem(source, funcname) + + def getitems(self, source): + """See :meth:`Pytester.getitems`.""" + return self._pytester.getitems(source) + + def getmodulecol(self, source, configargs=(), withinit=False): + """See :meth:`Pytester.getmodulecol`.""" + return self._pytester.getmodulecol( + source, configargs=configargs, withinit=withinit + ) + + def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: + """See :meth:`Pytester.collect_by_name`.""" + return self._pytester.collect_by_name(modcol, name) + + def popen( + self, + cmdargs, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=CLOSE_STDIN, + **kw, + ): + """See :meth:`Pytester.popen`.""" + return self._pytester.popen(cmdargs, stdout, stderr, stdin, **kw) + + def run(self, *cmdargs, timeout=None, stdin=CLOSE_STDIN) -> RunResult: + """See :meth:`Pytester.run`.""" + return self._pytester.run(*cmdargs, timeout=timeout, stdin=stdin) + + def runpython(self, script) -> RunResult: + """See :meth:`Pytester.runpython`.""" + return self._pytester.runpython(script) + + def runpython_c(self, command): + """See :meth:`Pytester.runpython_c`.""" + return self._pytester.runpython_c(command) + + def runpytest_subprocess(self, *args, timeout=None) -> RunResult: + """See :meth:`Pytester.runpytest_subprocess`.""" + return self._pytester.runpytest_subprocess(*args, timeout=timeout) + + def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """See :meth:`Pytester.spawn_pytest`.""" + return self._pytester.spawn_pytest(string, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """See :meth:`Pytester.spawn`.""" + return self._pytester.spawn(cmd, expect_timeout=expect_timeout) + + def __repr__(self) -> str: + return f"" + + def __str__(self) -> str: + return str(self.tmpdir) + + +class LegacyTestdirPlugin: + @staticmethod + @fixture + def testdir(pytester: Pytester) -> Testdir: + """ + Identical to :fixture:`pytester`, and provides an instance whose methods return + legacy ``LEGACY_PATH`` objects instead when applicable. + + New code should avoid using :fixture:`testdir` in favor of :fixture:`pytester`. + """ + return Testdir(pytester, _ispytest=True) + + +@final +@dataclasses.dataclass +class TempdirFactory: + """Backward compatibility wrapper that implements ``py.path.local`` + for :class:`TempPathFactory`. + + .. note:: + These days, it is preferred to use ``tmp_path_factory``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + """ + + _tmppath_factory: TempPathFactory + + def __init__( + self, tmppath_factory: TempPathFactory, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + self._tmppath_factory = tmppath_factory + + def mktemp(self, basename: str, numbered: bool = True) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.mktemp`, but returns a ``py.path.local`` object.""" + return legacy_path(self._tmppath_factory.mktemp(basename, numbered).resolve()) + + def getbasetemp(self) -> LEGACY_PATH: + """Same as :meth:`TempPathFactory.getbasetemp`, but returns a ``py.path.local`` object.""" + return legacy_path(self._tmppath_factory.getbasetemp().resolve()) + + +class LegacyTmpdirPlugin: + @staticmethod + @fixture(scope="session") + def tmpdir_factory(request: FixtureRequest) -> TempdirFactory: + """Return a :class:`pytest.TempdirFactory` instance for the test session.""" + # Set dynamically by pytest_configure(). + return request.config._tmpdirhandler # type: ignore + + @staticmethod + @fixture + def tmpdir(tmp_path: Path) -> LEGACY_PATH: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. If + ``--basetemp`` is used then it is cleared each session. See + :ref:`temporary directory location and retention`. + + The returned object is a `legacy_path`_ object. + + .. note:: + These days, it is preferred to use ``tmp_path``. + + :ref:`About the tmpdir and tmpdir_factory fixtures`. + + .. _legacy_path: https://py.readthedocs.io/en/latest/path.html + """ + return legacy_path(tmp_path) + + +def Cache_makedir(self: Cache, name: str) -> LEGACY_PATH: + """Return a directory path object with the given name. + + Same as :func:`mkdir`, but returns a legacy py path instance. + """ + return legacy_path(self.mkdir(name)) + + +def FixtureRequest_fspath(self: FixtureRequest) -> LEGACY_PATH: + """(deprecated) The file system path of the test module which collected this test.""" + return legacy_path(self.path) + + +def TerminalReporter_startdir(self: TerminalReporter) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config_invocation_dir(self: Config) -> LEGACY_PATH: + """The directory from which pytest was invoked. + + Prefer to use :attr:`invocation_params.dir `, + which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.invocation_params.dir)) + + +def Config_rootdir(self: Config) -> LEGACY_PATH: + """The path to the :ref:`rootdir `. + + Prefer to use :attr:`rootpath`, which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(str(self.rootpath)) + + +def Config_inifile(self: Config) -> LEGACY_PATH | None: + """The path to the :ref:`configfile `. + + Prefer to use :attr:`inipath`, which is a :class:`pathlib.Path`. + + :type: Optional[LEGACY_PATH] + """ + return legacy_path(str(self.inipath)) if self.inipath else None + + +def Session_startdir(self: Session) -> LEGACY_PATH: + """The path from which pytest was invoked. + + Prefer to use ``startpath`` which is a :class:`pathlib.Path`. + + :type: LEGACY_PATH + """ + return legacy_path(self.startpath) + + +def Config__getini_unknown_type(self, name: str, type: str, value: str | list[str]): + if type == "pathlist": + # TODO: This assert is probably not valid in all cases. + assert self.inipath is not None + dp = self.inipath.parent + input_values = shlex.split(value) if isinstance(value, str) else value + return [legacy_path(str(dp / x)) for x in input_values] + else: + raise ValueError(f"unknown configuration type: {type}", value) + + +def Node_fspath(self: Node) -> LEGACY_PATH: + """(deprecated) returns a legacy_path copy of self.path""" + return legacy_path(self.path) + + +def Node_fspath_set(self: Node, value: LEGACY_PATH) -> None: + self.path = Path(value) + + +@hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + """Monkeypatch legacy path attributes in several classes, as early as possible.""" + mp = MonkeyPatch() + early_config.add_cleanup(mp.undo) + + # Add Cache.makedir(). + mp.setattr(Cache, "makedir", Cache_makedir, raising=False) + + # Add FixtureRequest.fspath property. + mp.setattr(FixtureRequest, "fspath", property(FixtureRequest_fspath), raising=False) + + # Add TerminalReporter.startdir property. + mp.setattr( + TerminalReporter, "startdir", property(TerminalReporter_startdir), raising=False + ) + + # Add Config.{invocation_dir,rootdir,inifile} properties. + mp.setattr(Config, "invocation_dir", property(Config_invocation_dir), raising=False) + mp.setattr(Config, "rootdir", property(Config_rootdir), raising=False) + mp.setattr(Config, "inifile", property(Config_inifile), raising=False) + + # Add Session.startdir property. + mp.setattr(Session, "startdir", property(Session_startdir), raising=False) + + # Add pathlist configuration type. + mp.setattr(Config, "_getini_unknown_type", Config__getini_unknown_type) + + # Add Node.fspath property. + mp.setattr(Node, "fspath", property(Node_fspath, Node_fspath_set), raising=False) + + +@hookimpl +def pytest_configure(config: Config) -> None: + """Installs the LegacyTmpdirPlugin if the ``tmpdir`` plugin is also installed.""" + if config.pluginmanager.has_plugin("tmpdir"): + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + # Create TmpdirFactory and attach it to the config object. + # + # This is to comply with existing plugins which expect the handler to be + # available at pytest_configure time, but ideally should be moved entirely + # to the tmpdir_factory session fixture. + try: + tmp_path_factory = config._tmp_path_factory # type: ignore[attr-defined] + except AttributeError: + # tmpdir plugin is blocked. + pass + else: + _tmpdirhandler = TempdirFactory(tmp_path_factory, _ispytest=True) + mp.setattr(config, "_tmpdirhandler", _tmpdirhandler, raising=False) + + config.pluginmanager.register(LegacyTmpdirPlugin, "legacypath-tmpdir") + + +@hookimpl +def pytest_plugin_registered(plugin: object, manager: PytestPluginManager) -> None: + # pytester is not loaded by default and is commonly loaded from a conftest, + # so checking for it in `pytest_configure` is not enough. + is_pytester = plugin is manager.get_plugin("pytester") + if is_pytester and not manager.is_registered(LegacyTestdirPlugin): + manager.register(LegacyTestdirPlugin, "legacypath-pytester") diff --git a/venv/Lib/site-packages/_pytest/logging.py b/venv/Lib/site-packages/_pytest/logging.py new file mode 100644 index 0000000..08c826f --- /dev/null +++ b/venv/Lib/site-packages/_pytest/logging.py @@ -0,0 +1,955 @@ +# mypy: allow-untyped-defs +"""Access and control log capturing.""" + +from __future__ import annotations + +from contextlib import contextmanager +from contextlib import nullcontext +from datetime import datetime +from datetime import timedelta +from datetime import timezone +import io +from io import StringIO +import logging +from logging import LogRecord +import os +from pathlib import Path +import re +from types import TracebackType +from typing import AbstractSet +from typing import Dict +from typing import final +from typing import Generator +from typing import Generic +from typing import List +from typing import Literal +from typing import Mapping +from typing import TYPE_CHECKING +from typing import TypeVar + +from _pytest import nodes +from _pytest._io import TerminalWriter +from _pytest.capture import CaptureManager +from _pytest.config import _strtobool +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter + + +if TYPE_CHECKING: + logging_StreamHandler = logging.StreamHandler[StringIO] +else: + logging_StreamHandler = logging.StreamHandler + +DEFAULT_LOG_FORMAT = "%(levelname)-8s %(name)s:%(filename)s:%(lineno)d %(message)s" +DEFAULT_LOG_DATE_FORMAT = "%H:%M:%S" +_ANSI_ESCAPE_SEQ = re.compile(r"\x1b\[[\d;]+m") +caplog_handler_key = StashKey["LogCaptureHandler"]() +caplog_records_key = StashKey[Dict[str, List[logging.LogRecord]]]() + + +def _remove_ansi_escape_sequences(text: str) -> str: + return _ANSI_ESCAPE_SEQ.sub("", text) + + +class DatetimeFormatter(logging.Formatter): + """A logging formatter which formats record with + :func:`datetime.datetime.strftime` formatter instead of + :func:`time.strftime` in case of microseconds in format string. + """ + + def formatTime(self, record: LogRecord, datefmt: str | None = None) -> str: + if datefmt and "%f" in datefmt: + ct = self.converter(record.created) + tz = timezone(timedelta(seconds=ct.tm_gmtoff), ct.tm_zone) + # Construct `datetime.datetime` object from `struct_time` + # and msecs information from `record` + # Using int() instead of round() to avoid it exceeding 1_000_000 and causing a ValueError (#11861). + dt = datetime(*ct[0:6], microsecond=int(record.msecs * 1000), tzinfo=tz) + return dt.strftime(datefmt) + # Use `logging.Formatter` for non-microsecond formats + return super().formatTime(record, datefmt) + + +class ColoredLevelFormatter(DatetimeFormatter): + """A logging formatter which colorizes the %(levelname)..s part of the + log format passed to __init__.""" + + LOGLEVEL_COLOROPTS: Mapping[int, AbstractSet[str]] = { + logging.CRITICAL: {"red"}, + logging.ERROR: {"red", "bold"}, + logging.WARNING: {"yellow"}, + logging.WARN: {"yellow"}, + logging.INFO: {"green"}, + logging.DEBUG: {"purple"}, + logging.NOTSET: set(), + } + LEVELNAME_FMT_REGEX = re.compile(r"%\(levelname\)([+-.]?\d*(?:\.\d+)?s)") + + def __init__(self, terminalwriter: TerminalWriter, *args, **kwargs) -> None: + super().__init__(*args, **kwargs) + self._terminalwriter = terminalwriter + self._original_fmt = self._style._fmt + self._level_to_fmt_mapping: dict[int, str] = {} + + for level, color_opts in self.LOGLEVEL_COLOROPTS.items(): + self.add_color_level(level, *color_opts) + + def add_color_level(self, level: int, *color_opts: str) -> None: + """Add or update color opts for a log level. + + :param level: + Log level to apply a style to, e.g. ``logging.INFO``. + :param color_opts: + ANSI escape sequence color options. Capitalized colors indicates + background color, i.e. ``'green', 'Yellow', 'bold'`` will give bold + green text on yellow background. + + .. warning:: + This is an experimental API. + """ + assert self._fmt is not None + levelname_fmt_match = self.LEVELNAME_FMT_REGEX.search(self._fmt) + if not levelname_fmt_match: + return + levelname_fmt = levelname_fmt_match.group() + + formatted_levelname = levelname_fmt % {"levelname": logging.getLevelName(level)} + + # add ANSI escape sequences around the formatted levelname + color_kwargs = {name: True for name in color_opts} + colorized_formatted_levelname = self._terminalwriter.markup( + formatted_levelname, **color_kwargs + ) + self._level_to_fmt_mapping[level] = self.LEVELNAME_FMT_REGEX.sub( + colorized_formatted_levelname, self._fmt + ) + + def format(self, record: logging.LogRecord) -> str: + fmt = self._level_to_fmt_mapping.get(record.levelno, self._original_fmt) + self._style._fmt = fmt + return super().format(record) + + +class PercentStyleMultiline(logging.PercentStyle): + """A logging style with special support for multiline messages. + + If the message of a record consists of multiple lines, this style + formats the message as if each line were logged separately. + """ + + def __init__(self, fmt: str, auto_indent: int | str | bool | None) -> None: + super().__init__(fmt) + self._auto_indent = self._get_auto_indent(auto_indent) + + @staticmethod + def _get_auto_indent(auto_indent_option: int | str | bool | None) -> int: + """Determine the current auto indentation setting. + + Specify auto indent behavior (on/off/fixed) by passing in + extra={"auto_indent": [value]} to the call to logging.log() or + using a --log-auto-indent [value] command line or the + log_auto_indent [value] config option. + + Default behavior is auto-indent off. + + Using the string "True" or "on" or the boolean True as the value + turns auto indent on, using the string "False" or "off" or the + boolean False or the int 0 turns it off, and specifying a + positive integer fixes the indentation position to the value + specified. + + Any other values for the option are invalid, and will silently be + converted to the default. + + :param None|bool|int|str auto_indent_option: + User specified option for indentation from command line, config + or extra kwarg. Accepts int, bool or str. str option accepts the + same range of values as boolean config options, as well as + positive integers represented in str form. + + :returns: + Indentation value, which can be + -1 (automatically determine indentation) or + 0 (auto-indent turned off) or + >0 (explicitly set indentation position). + """ + if auto_indent_option is None: + return 0 + elif isinstance(auto_indent_option, bool): + if auto_indent_option: + return -1 + else: + return 0 + elif isinstance(auto_indent_option, int): + return int(auto_indent_option) + elif isinstance(auto_indent_option, str): + try: + return int(auto_indent_option) + except ValueError: + pass + try: + if _strtobool(auto_indent_option): + return -1 + except ValueError: + return 0 + + return 0 + + def format(self, record: logging.LogRecord) -> str: + if "\n" in record.message: + if hasattr(record, "auto_indent"): + # Passed in from the "extra={}" kwarg on the call to logging.log(). + auto_indent = self._get_auto_indent(record.auto_indent) + else: + auto_indent = self._auto_indent + + if auto_indent: + lines = record.message.splitlines() + formatted = self._fmt % {**record.__dict__, "message": lines[0]} + + if auto_indent < 0: + indentation = _remove_ansi_escape_sequences(formatted).find( + lines[0] + ) + else: + # Optimizes logging by allowing a fixed indentation. + indentation = auto_indent + lines[0] = formatted + return ("\n" + " " * indentation).join(lines) + return self._fmt % record.__dict__ + + +def get_option_ini(config: Config, *names: str): + for name in names: + ret = config.getoption(name) # 'default' arg won't work as expected + if ret is None: + ret = config.getini(name) + if ret: + return ret + + +def pytest_addoption(parser: Parser) -> None: + """Add options to control log capturing.""" + group = parser.getgroup("logging") + + def add_option_ini(option, dest, default=None, type=None, **kwargs): + parser.addini( + dest, default=default, type=type, help="Default value for " + option + ) + group.addoption(option, dest=dest, **kwargs) + + add_option_ini( + "--log-level", + dest="log_level", + default=None, + metavar="LEVEL", + help=( + "Level of messages to catch/display." + " Not set by default, so it depends on the root/parent log handler's" + ' effective level, where it is "WARNING" by default.' + ), + ) + add_option_ini( + "--log-format", + dest="log_format", + default=DEFAULT_LOG_FORMAT, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-date-format", + dest="log_date_format", + default=DEFAULT_LOG_DATE_FORMAT, + help="Log date format used by the logging module", + ) + parser.addini( + "log_cli", + default=False, + type="bool", + help='Enable log display during test run (also known as "live logging")', + ) + add_option_ini( + "--log-cli-level", dest="log_cli_level", default=None, help="CLI logging level" + ) + add_option_ini( + "--log-cli-format", + dest="log_cli_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-cli-date-format", + dest="log_cli_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-file", + dest="log_file", + default=None, + help="Path to a file when logging will be written to", + ) + add_option_ini( + "--log-file-mode", + dest="log_file_mode", + default="w", + choices=["w", "a"], + help="Log file open mode", + ) + add_option_ini( + "--log-file-level", + dest="log_file_level", + default=None, + help="Log file logging level", + ) + add_option_ini( + "--log-file-format", + dest="log_file_format", + default=None, + help="Log format used by the logging module", + ) + add_option_ini( + "--log-file-date-format", + dest="log_file_date_format", + default=None, + help="Log date format used by the logging module", + ) + add_option_ini( + "--log-auto-indent", + dest="log_auto_indent", + default=None, + help="Auto-indent multiline messages passed to the logging module. Accepts true|on, false|off or an integer.", + ) + group.addoption( + "--log-disable", + action="append", + default=[], + dest="logger_disable", + help="Disable a logger by name. Can be passed multiple times.", + ) + + +_HandlerType = TypeVar("_HandlerType", bound=logging.Handler) + + +# Not using @contextmanager for performance reasons. +class catching_logs(Generic[_HandlerType]): + """Context manager that prepares the whole logging machinery properly.""" + + __slots__ = ("handler", "level", "orig_level") + + def __init__(self, handler: _HandlerType, level: int | None = None) -> None: + self.handler = handler + self.level = level + + def __enter__(self) -> _HandlerType: + root_logger = logging.getLogger() + if self.level is not None: + self.handler.setLevel(self.level) + root_logger.addHandler(self.handler) + if self.level is not None: + self.orig_level = root_logger.level + root_logger.setLevel(min(self.orig_level, self.level)) + return self.handler + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + root_logger = logging.getLogger() + if self.level is not None: + root_logger.setLevel(self.orig_level) + root_logger.removeHandler(self.handler) + + +class LogCaptureHandler(logging_StreamHandler): + """A logging handler that stores log records and the log text.""" + + def __init__(self) -> None: + """Create a new log handler.""" + super().__init__(StringIO()) + self.records: list[logging.LogRecord] = [] + + def emit(self, record: logging.LogRecord) -> None: + """Keep the log records in a list in addition to the log text.""" + self.records.append(record) + super().emit(record) + + def reset(self) -> None: + self.records = [] + self.stream = StringIO() + + def clear(self) -> None: + self.records.clear() + self.stream = StringIO() + + def handleError(self, record: logging.LogRecord) -> None: + if logging.raiseExceptions: + # Fail the test if the log message is bad (emit failed). + # The default behavior of logging is to print "Logging error" + # to stderr with the call stack and some extra details. + # pytest wants to make such mistakes visible during testing. + raise # noqa: PLE0704 + + +@final +class LogCaptureFixture: + """Provides access and control of log capturing.""" + + def __init__(self, item: nodes.Node, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._item = item + self._initial_handler_level: int | None = None + # Dict of log name -> log level. + self._initial_logger_levels: dict[str | None, int] = {} + self._initial_disabled_logging_level: int | None = None + + def _finalize(self) -> None: + """Finalize the fixture. + + This restores the log levels and the disabled logging levels changed by :meth:`set_level`. + """ + # Restore log levels. + if self._initial_handler_level is not None: + self.handler.setLevel(self._initial_handler_level) + for logger_name, level in self._initial_logger_levels.items(): + logger = logging.getLogger(logger_name) + logger.setLevel(level) + # Disable logging at the original disabled logging level. + if self._initial_disabled_logging_level is not None: + logging.disable(self._initial_disabled_logging_level) + self._initial_disabled_logging_level = None + + @property + def handler(self) -> LogCaptureHandler: + """Get the logging handler used by the fixture.""" + return self._item.stash[caplog_handler_key] + + def get_records( + self, when: Literal["setup", "call", "teardown"] + ) -> list[logging.LogRecord]: + """Get the logging records for one of the possible test phases. + + :param when: + Which test phase to obtain the records from. + Valid values are: "setup", "call" and "teardown". + + :returns: The list of captured records at the given stage. + + .. versionadded:: 3.4 + """ + return self._item.stash[caplog_records_key].get(when, []) + + @property + def text(self) -> str: + """The formatted log text.""" + return _remove_ansi_escape_sequences(self.handler.stream.getvalue()) + + @property + def records(self) -> list[logging.LogRecord]: + """The list of log records.""" + return self.handler.records + + @property + def record_tuples(self) -> list[tuple[str, int, str]]: + """A list of a stripped down version of log records intended + for use in assertion comparison. + + The format of the tuple is: + + (logger_name, log_level, message) + """ + return [(r.name, r.levelno, r.getMessage()) for r in self.records] + + @property + def messages(self) -> list[str]: + """A list of format-interpolated log messages. + + Unlike 'records', which contains the format string and parameters for + interpolation, log messages in this list are all interpolated. + + Unlike 'text', which contains the output from the handler, log + messages in this list are unadorned with levels, timestamps, etc, + making exact comparisons more reliable. + + Note that traceback or stack info (from :func:`logging.exception` or + the `exc_info` or `stack_info` arguments to the logging functions) is + not included, as this is added by the formatter in the handler. + + .. versionadded:: 3.7 + """ + return [r.getMessage() for r in self.records] + + def clear(self) -> None: + """Reset the list of log records and the captured log text.""" + self.handler.clear() + + def _force_enable_logging( + self, level: int | str, logger_obj: logging.Logger + ) -> int: + """Enable the desired logging level if the global level was disabled via ``logging.disabled``. + + Only enables logging levels greater than or equal to the requested ``level``. + + Does nothing if the desired ``level`` wasn't disabled. + + :param level: + The logger level caplog should capture. + All logging is enabled if a non-standard logging level string is supplied. + Valid level strings are in :data:`logging._nameToLevel`. + :param logger_obj: The logger object to check. + + :return: The original disabled logging level. + """ + original_disable_level: int = logger_obj.manager.disable + + if isinstance(level, str): + # Try to translate the level string to an int for `logging.disable()` + level = logging.getLevelName(level) + + if not isinstance(level, int): + # The level provided was not valid, so just un-disable all logging. + logging.disable(logging.NOTSET) + elif not logger_obj.isEnabledFor(level): + # Each level is `10` away from other levels. + # https://docs.python.org/3/library/logging.html#logging-levels + disable_level = max(level - 10, logging.NOTSET) + logging.disable(disable_level) + + return original_disable_level + + def set_level(self, level: int | str, logger: str | None = None) -> None: + """Set the threshold level of a logger for the duration of a test. + + Logging messages which are less severe than this level will not be captured. + + .. versionchanged:: 3.4 + The levels of the loggers changed by this function will be + restored to their initial values at the end of the test. + + Will enable the requested logging level if it was disabled via :func:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + # Save the original log-level to restore it during teardown. + self._initial_logger_levels.setdefault(logger, logger_obj.level) + logger_obj.setLevel(level) + if self._initial_handler_level is None: + self._initial_handler_level = self.handler.level + self.handler.setLevel(level) + initial_disabled_logging_level = self._force_enable_logging(level, logger_obj) + if self._initial_disabled_logging_level is None: + self._initial_disabled_logging_level = initial_disabled_logging_level + + @contextmanager + def at_level(self, level: int | str, logger: str | None = None) -> Generator[None]: + """Context manager that sets the level for capturing of logs. After + the end of the 'with' statement the level is restored to its original + value. + + Will enable the requested logging level if it was disabled via :func:`logging.disable`. + + :param level: The level. + :param logger: The logger to update. If not given, the root logger. + """ + logger_obj = logging.getLogger(logger) + orig_level = logger_obj.level + logger_obj.setLevel(level) + handler_orig_level = self.handler.level + self.handler.setLevel(level) + original_disable_level = self._force_enable_logging(level, logger_obj) + try: + yield + finally: + logger_obj.setLevel(orig_level) + self.handler.setLevel(handler_orig_level) + logging.disable(original_disable_level) + + @contextmanager + def filtering(self, filter_: logging.Filter) -> Generator[None]: + """Context manager that temporarily adds the given filter to the caplog's + :meth:`handler` for the 'with' statement block, and removes that filter at the + end of the block. + + :param filter_: A custom :class:`logging.Filter` object. + + .. versionadded:: 7.5 + """ + self.handler.addFilter(filter_) + try: + yield + finally: + self.handler.removeFilter(filter_) + + +@fixture +def caplog(request: FixtureRequest) -> Generator[LogCaptureFixture]: + """Access and control log capturing. + + Captured logs are available through the following properties/methods:: + + * caplog.messages -> list of format-interpolated log messages + * caplog.text -> string containing formatted log output + * caplog.records -> list of logging.LogRecord instances + * caplog.record_tuples -> list of (logger_name, level, message) tuples + * caplog.clear() -> clear captured records and formatted log output string + """ + result = LogCaptureFixture(request.node, _ispytest=True) + yield result + result._finalize() + + +def get_log_level_for_setting(config: Config, *setting_names: str) -> int | None: + for setting_name in setting_names: + log_level = config.getoption(setting_name) + if log_level is None: + log_level = config.getini(setting_name) + if log_level: + break + else: + return None + + if isinstance(log_level, str): + log_level = log_level.upper() + try: + return int(getattr(logging, log_level, log_level)) + except ValueError as e: + # Python logging does not recognise this as a logging level + raise UsageError( + f"'{log_level}' is not recognized as a logging level name for " + f"'{setting_name}'. Please consider passing the " + "logging level num instead." + ) from e + + +# run after terminalreporter/capturemanager are configured +@hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + config.pluginmanager.register(LoggingPlugin(config), "logging-plugin") + + +class LoggingPlugin: + """Attaches to the logging module and captures log messages for each test.""" + + def __init__(self, config: Config) -> None: + """Create a new plugin to capture log messages. + + The formatter can be safely shared across all handlers so + create a single one for the entire test session here. + """ + self._config = config + + # Report logging. + self.formatter = self._create_formatter( + get_option_ini(config, "log_format"), + get_option_ini(config, "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_level = get_log_level_for_setting(config, "log_level") + self.caplog_handler = LogCaptureHandler() + self.caplog_handler.setFormatter(self.formatter) + self.report_handler = LogCaptureHandler() + self.report_handler.setFormatter(self.formatter) + + # File logging. + self.log_file_level = get_log_level_for_setting( + config, "log_file_level", "log_level" + ) + log_file = get_option_ini(config, "log_file") or os.devnull + if log_file != os.devnull: + directory = os.path.dirname(os.path.abspath(log_file)) + if not os.path.isdir(directory): + os.makedirs(directory) + + self.log_file_mode = get_option_ini(config, "log_file_mode") or "w" + self.log_file_handler = _FileHandler( + log_file, mode=self.log_file_mode, encoding="UTF-8" + ) + log_file_format = get_option_ini(config, "log_file_format", "log_format") + log_file_date_format = get_option_ini( + config, "log_file_date_format", "log_date_format" + ) + + log_file_formatter = DatetimeFormatter( + log_file_format, datefmt=log_file_date_format + ) + self.log_file_handler.setFormatter(log_file_formatter) + + # CLI/live logging. + self.log_cli_level = get_log_level_for_setting( + config, "log_cli_level", "log_level" + ) + if self._log_cli_enabled(): + terminal_reporter = config.pluginmanager.get_plugin("terminalreporter") + # Guaranteed by `_log_cli_enabled()`. + assert terminal_reporter is not None + capture_manager = config.pluginmanager.get_plugin("capturemanager") + # if capturemanager plugin is disabled, live logging still works. + self.log_cli_handler: ( + _LiveLoggingStreamHandler | _LiveLoggingNullHandler + ) = _LiveLoggingStreamHandler(terminal_reporter, capture_manager) + else: + self.log_cli_handler = _LiveLoggingNullHandler() + log_cli_formatter = self._create_formatter( + get_option_ini(config, "log_cli_format", "log_format"), + get_option_ini(config, "log_cli_date_format", "log_date_format"), + get_option_ini(config, "log_auto_indent"), + ) + self.log_cli_handler.setFormatter(log_cli_formatter) + self._disable_loggers(loggers_to_disable=config.option.logger_disable) + + def _disable_loggers(self, loggers_to_disable: list[str]) -> None: + if not loggers_to_disable: + return + + for name in loggers_to_disable: + logger = logging.getLogger(name) + logger.disabled = True + + def _create_formatter(self, log_format, log_date_format, auto_indent): + # Color option doesn't exist if terminal plugin is disabled. + color = getattr(self._config.option, "color", "no") + if color != "no" and ColoredLevelFormatter.LEVELNAME_FMT_REGEX.search( + log_format + ): + formatter: logging.Formatter = ColoredLevelFormatter( + create_terminal_writer(self._config), log_format, log_date_format + ) + else: + formatter = DatetimeFormatter(log_format, log_date_format) + + formatter._style = PercentStyleMultiline( + formatter._style._fmt, auto_indent=auto_indent + ) + + return formatter + + def set_log_path(self, fname: str) -> None: + """Set the filename parameter for Logging.FileHandler(). + + Creates parent directory if it does not exist. + + .. warning:: + This is an experimental API. + """ + fpath = Path(fname) + + if not fpath.is_absolute(): + fpath = self._config.rootpath / fpath + + if not fpath.parent.exists(): + fpath.parent.mkdir(exist_ok=True, parents=True) + + # https://github.com/python/mypy/issues/11193 + stream: io.TextIOWrapper = fpath.open(mode=self.log_file_mode, encoding="UTF-8") # type: ignore[assignment] + old_stream = self.log_file_handler.setStream(stream) + if old_stream: + old_stream.close() + + def _log_cli_enabled(self) -> bool: + """Return whether live logging is enabled.""" + enabled = self._config.getoption( + "--log-cli-level" + ) is not None or self._config.getini("log_cli") + if not enabled: + return False + + terminal_reporter = self._config.pluginmanager.get_plugin("terminalreporter") + if terminal_reporter is None: + # terminal reporter is disabled e.g. by pytest-xdist. + return False + + return True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_sessionstart(self) -> Generator[None]: + self.log_cli_handler.set_when("sessionstart") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_collection(self) -> Generator[None]: + self.log_cli_handler.set_when("collection") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl(wrapper=True) + def pytest_runtestloop(self, session: Session) -> Generator[None, object, object]: + if session.config.option.collectonly: + return (yield) + + if self._log_cli_enabled() and self._config.get_verbosity() < 1: + # The verbose flag is needed to avoid messy test progress output. + self._config.option.verbose = 1 + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) # Run all the tests. + + @hookimpl + def pytest_runtest_logstart(self) -> None: + self.log_cli_handler.reset() + self.log_cli_handler.set_when("start") + + @hookimpl + def pytest_runtest_logreport(self) -> None: + self.log_cli_handler.set_when("logreport") + + def _runtest_for(self, item: nodes.Item, when: str) -> Generator[None]: + """Implement the internals of the pytest_runtest_xxx() hooks.""" + with catching_logs( + self.caplog_handler, + level=self.log_level, + ) as caplog_handler, catching_logs( + self.report_handler, + level=self.log_level, + ) as report_handler: + caplog_handler.reset() + report_handler.reset() + item.stash[caplog_records_key][when] = caplog_handler.records + item.stash[caplog_handler_key] = caplog_handler + + try: + yield + finally: + log = report_handler.stream.getvalue().strip() + item.add_report_section(when, "log", log) + + @hookimpl(wrapper=True) + def pytest_runtest_setup(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("setup") + + empty: dict[str, list[logging.LogRecord]] = {} + item.stash[caplog_records_key] = empty + yield from self._runtest_for(item, "setup") + + @hookimpl(wrapper=True) + def pytest_runtest_call(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("call") + + yield from self._runtest_for(item, "call") + + @hookimpl(wrapper=True) + def pytest_runtest_teardown(self, item: nodes.Item) -> Generator[None]: + self.log_cli_handler.set_when("teardown") + + try: + yield from self._runtest_for(item, "teardown") + finally: + del item.stash[caplog_records_key] + del item.stash[caplog_handler_key] + + @hookimpl + def pytest_runtest_logfinish(self) -> None: + self.log_cli_handler.set_when("finish") + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_sessionfinish(self) -> Generator[None]: + self.log_cli_handler.set_when("sessionfinish") + + with catching_logs(self.log_cli_handler, level=self.log_cli_level): + with catching_logs(self.log_file_handler, level=self.log_file_level): + return (yield) + + @hookimpl + def pytest_unconfigure(self) -> None: + # Close the FileHandler explicitly. + # (logging.shutdown might have lost the weakref?!) + self.log_file_handler.close() + + +class _FileHandler(logging.FileHandler): + """A logging FileHandler with pytest tweaks.""" + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingStreamHandler(logging_StreamHandler): + """A logging StreamHandler used by the live logging feature: it will + write a newline before the first log message in each test. + + During live logging we must also explicitly disable stdout/stderr + capturing otherwise it will get captured and won't appear in the + terminal. + """ + + # Officially stream needs to be a IO[str], but TerminalReporter + # isn't. So force it. + stream: TerminalReporter = None # type: ignore + + def __init__( + self, + terminal_reporter: TerminalReporter, + capture_manager: CaptureManager | None, + ) -> None: + super().__init__(stream=terminal_reporter) # type: ignore[arg-type] + self.capture_manager = capture_manager + self.reset() + self.set_when(None) + self._test_outcome_written = False + + def reset(self) -> None: + """Reset the handler; should be called before the start of each test.""" + self._first_record_emitted = False + + def set_when(self, when: str | None) -> None: + """Prepare for the given test phase (setup/call/teardown).""" + self._when = when + self._section_name_shown = False + if when == "start": + self._test_outcome_written = False + + def emit(self, record: logging.LogRecord) -> None: + ctx_manager = ( + self.capture_manager.global_and_fixture_disabled() + if self.capture_manager + else nullcontext() + ) + with ctx_manager: + if not self._first_record_emitted: + self.stream.write("\n") + self._first_record_emitted = True + elif self._when in ("teardown", "finish"): + if not self._test_outcome_written: + self._test_outcome_written = True + self.stream.write("\n") + if not self._section_name_shown and self._when: + self.stream.section("live log " + self._when, sep="-", bold=True) + self._section_name_shown = True + super().emit(record) + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass + + +class _LiveLoggingNullHandler(logging.NullHandler): + """A logging handler used when live logging is disabled.""" + + def reset(self) -> None: + pass + + def set_when(self, when: str) -> None: + pass + + def handleError(self, record: logging.LogRecord) -> None: + # Handled by LogCaptureHandler. + pass diff --git a/venv/Lib/site-packages/_pytest/main.py b/venv/Lib/site-packages/_pytest/main.py new file mode 100644 index 0000000..e5534e9 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/main.py @@ -0,0 +1,1072 @@ +"""Core implementation of the testing process: init, session, runtest loop.""" + +from __future__ import annotations + +import argparse +import dataclasses +import fnmatch +import functools +import importlib +import importlib.util +import os +from pathlib import Path +import sys +from typing import AbstractSet +from typing import Callable +from typing import Dict +from typing import final +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import overload +from typing import Sequence +from typing import TYPE_CHECKING +import warnings + +import pluggy + +from _pytest import nodes +import _pytest._code +from _pytest.config import Config +from _pytest.config import directory_arg +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import PytestPluginManager +from _pytest.config import UsageError +from _pytest.config.argparsing import Parser +from _pytest.config.compat import PathAwareHookProxy +from _pytest.outcomes import exit +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import safe_exists +from _pytest.pathlib import scandir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.runner import collect_one_node +from _pytest.runner import SetupState +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + from _pytest.fixtures import FixtureManager + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "norecursedirs", + "Directory patterns to avoid for recursion", + type="args", + default=[ + "*.egg", + ".*", + "_darcs", + "build", + "CVS", + "dist", + "node_modules", + "venv", + "{arch}", + ], + ) + parser.addini( + "testpaths", + "Directories to search for tests when no files or directories are given on the " + "command line", + type="args", + default=[], + ) + group = parser.getgroup("general", "Running and selection options") + group._addoption( + "-x", + "--exitfirst", + action="store_const", + dest="maxfail", + const=1, + help="Exit instantly on first error or failed test", + ) + group = parser.getgroup("pytest-warnings") + group.addoption( + "-W", + "--pythonwarnings", + action="append", + help="Set which warnings to report, see -W option of Python itself", + ) + parser.addini( + "filterwarnings", + type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W/--pythonwarnings.", + ) + group._addoption( + "--maxfail", + metavar="num", + action="store", + type=int, + dest="maxfail", + default=0, + help="Exit after first num failures or errors", + ) + group._addoption( + "--strict-config", + action="store_true", + help="Any warnings encountered while parsing the `pytest` section of the " + "configuration file raise errors", + ) + group._addoption( + "--strict-markers", + action="store_true", + help="Markers not registered in the `markers` section of the configuration " + "file raise errors", + ) + group._addoption( + "--strict", + action="store_true", + help="(Deprecated) alias to --strict-markers", + ) + group._addoption( + "-c", + "--config-file", + metavar="FILE", + type=str, + dest="inifilename", + help="Load configuration from `FILE` instead of trying to locate one of the " + "implicit configuration files.", + ) + group._addoption( + "--continue-on-collection-errors", + action="store_true", + default=False, + dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur", + ) + group._addoption( + "--rootdir", + action="store", + dest="rootdir", + help="Define root directory for tests. Can be relative path: 'root_dir', './root_dir', " + "'root_dir/another_dir/'; absolute path: '/home/user/root_dir'; path with variables: " + "'$HOME/root_dir'.", + ) + + group = parser.getgroup("collect", "collection") + group.addoption( + "--collectonly", + "--collect-only", + "--co", + action="store_true", + help="Only collect tests, don't execute them", + ) + group.addoption( + "--pyargs", + action="store_true", + help="Try to interpret all arguments as Python packages", + ) + group.addoption( + "--ignore", + action="append", + metavar="path", + help="Ignore path during collection (multi-allowed)", + ) + group.addoption( + "--ignore-glob", + action="append", + metavar="path", + help="Ignore path pattern during collection (multi-allowed)", + ) + group.addoption( + "--deselect", + action="append", + metavar="nodeid_prefix", + help="Deselect item (via node id prefix) during collection (multi-allowed)", + ) + group.addoption( + "--confcutdir", + dest="confcutdir", + default=None, + metavar="dir", + type=functools.partial(directory_arg, optname="--confcutdir"), + help="Only load conftest.py's relative to specified dir", + ) + group.addoption( + "--noconftest", + action="store_true", + dest="noconftest", + default=False, + help="Don't load any conftest.py files", + ) + group.addoption( + "--keepduplicates", + "--keep-duplicates", + action="store_true", + dest="keepduplicates", + default=False, + help="Keep duplicate tests", + ) + group.addoption( + "--collect-in-virtualenv", + action="store_true", + dest="collect_in_virtualenv", + default=False, + help="Don't ignore tests in a local virtualenv directory", + ) + group.addoption( + "--import-mode", + default="prepend", + choices=["prepend", "append", "importlib"], + dest="importmode", + help="Prepend/append to sys.path when importing test modules and conftest " + "files. Default: prepend.", + ) + parser.addini( + "consider_namespace_packages", + type="bool", + default=False, + help="Consider namespace packages when resolving module names during import", + ) + + group = parser.getgroup("debugconfig", "test session debugging and configuration") + group.addoption( + "--basetemp", + dest="basetemp", + default=None, + type=validate_basetemp, + metavar="dir", + help=( + "Base temporary directory for this test run. " + "(Warning: this directory is removed if it exists.)" + ), + ) + + +def validate_basetemp(path: str) -> str: + # GH 7119 + msg = "basetemp must not be empty, the current working directory or any parent directory of it" + + # empty path + if not path: + raise argparse.ArgumentTypeError(msg) + + def is_ancestor(base: Path, query: Path) -> bool: + """Return whether query is an ancestor of base.""" + if base == query: + return True + return query in base.parents + + # check if path is an ancestor of cwd + if is_ancestor(Path.cwd(), Path(path).absolute()): + raise argparse.ArgumentTypeError(msg) + + # check symlinks for ancestors + if is_ancestor(Path.cwd().resolve(), Path(path).resolve()): + raise argparse.ArgumentTypeError(msg) + + return path + + +def wrap_session( + config: Config, doit: Callable[[Config, Session], int | ExitCode | None] +) -> int | ExitCode: + """Skeleton command line program.""" + session = Session.from_config(config) + session.exitstatus = ExitCode.OK + initstate = 0 + try: + try: + config._do_configure() + initstate = 1 + config.hook.pytest_sessionstart(session=session) + initstate = 2 + session.exitstatus = doit(config, session) or 0 + except UsageError: + session.exitstatus = ExitCode.USAGE_ERROR + raise + except Failed: + session.exitstatus = ExitCode.TESTS_FAILED + except (KeyboardInterrupt, exit.Exception): + excinfo = _pytest._code.ExceptionInfo.from_current() + exitstatus: int | ExitCode = ExitCode.INTERRUPTED + if isinstance(excinfo.value, exit.Exception): + if excinfo.value.returncode is not None: + exitstatus = excinfo.value.returncode + if initstate < 2: + sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n") + config.hook.pytest_keyboard_interrupt(excinfo=excinfo) + session.exitstatus = exitstatus + except BaseException: + session.exitstatus = ExitCode.INTERNAL_ERROR + excinfo = _pytest._code.ExceptionInfo.from_current() + try: + config.notify_exception(excinfo, config.option) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + else: + if isinstance(excinfo.value, SystemExit): + sys.stderr.write("mainloop: caught unexpected SystemExit!\n") + + finally: + # Explicitly break reference cycle. + excinfo = None # type: ignore + os.chdir(session.startpath) + if initstate >= 2: + try: + config.hook.pytest_sessionfinish( + session=session, exitstatus=session.exitstatus + ) + except exit.Exception as exc: + if exc.returncode is not None: + session.exitstatus = exc.returncode + sys.stderr.write(f"{type(exc).__name__}: {exc}\n") + config._ensure_unconfigure() + return session.exitstatus + + +def pytest_cmdline_main(config: Config) -> int | ExitCode: + return wrap_session(config, _main) + + +def _main(config: Config, session: Session) -> int | ExitCode | None: + """Default command line protocol for initialization, session, + running tests and reporting.""" + config.hook.pytest_collection(session=session) + config.hook.pytest_runtestloop(session=session) + + if session.testsfailed: + return ExitCode.TESTS_FAILED + elif session.testscollected == 0: + return ExitCode.NO_TESTS_COLLECTED + return None + + +def pytest_collection(session: Session) -> None: + session.perform_collect() + + +def pytest_runtestloop(session: Session) -> bool: + if session.testsfailed and not session.config.option.continue_on_collection_errors: + raise session.Interrupted( + "%d error%s during collection" + % (session.testsfailed, "s" if session.testsfailed != 1 else "") + ) + + if session.config.option.collectonly: + return True + + for i, item in enumerate(session.items): + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None + item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) + if session.shouldfail: + raise session.Failed(session.shouldfail) + if session.shouldstop: + raise session.Interrupted(session.shouldstop) + return True + + +def _in_venv(path: Path) -> bool: + """Attempt to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the pyvenv.cfg file. + + [https://peps.python.org/pep-0405/] + + For regression protection we also check for conda environments that do not include pyenv.cfg yet -- + https://github.com/conda/conda/issues/13337 is the conda issue tracking adding pyenv.cfg. + + Checking for the `conda-meta/history` file per https://github.com/pytest-dev/pytest/issues/12652#issuecomment-2246336902. + + """ + try: + return ( + path.joinpath("pyvenv.cfg").is_file() + or path.joinpath("conda-meta", "history").is_file() + ) + except OSError: + return False + + +def pytest_ignore_collect(collection_path: Path, config: Config) -> bool | None: + if collection_path.name == "__pycache__": + return True + + ignore_paths = config._getconftest_pathlist( + "collect_ignore", path=collection_path.parent + ) + ignore_paths = ignore_paths or [] + excludeopt = config.getoption("ignore") + if excludeopt: + ignore_paths.extend(absolutepath(x) for x in excludeopt) + + if collection_path in ignore_paths: + return True + + ignore_globs = config._getconftest_pathlist( + "collect_ignore_glob", path=collection_path.parent + ) + ignore_globs = ignore_globs or [] + excludeglobopt = config.getoption("ignore_glob") + if excludeglobopt: + ignore_globs.extend(absolutepath(x) for x in excludeglobopt) + + if any(fnmatch.fnmatch(str(collection_path), str(glob)) for glob in ignore_globs): + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if not allow_in_venv and _in_venv(collection_path): + return True + + if collection_path.is_dir(): + norecursepatterns = config.getini("norecursedirs") + if any(fnmatch_ex(pat, collection_path) for pat in norecursepatterns): + return True + + return None + + +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> nodes.Collector | None: + return Dir.from_parent(parent, path=path) + + +def pytest_collection_modifyitems(items: list[nodes.Item], config: Config) -> None: + deselect_prefixes = tuple(config.getoption("deselect") or []) + if not deselect_prefixes: + return + + remaining = [] + deselected = [] + for colitem in items: + if colitem.nodeid.startswith(deselect_prefixes): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +class FSHookProxy: + def __init__( + self, + pm: PytestPluginManager, + remove_mods: AbstractSet[object], + ) -> None: + self.pm = pm + self.remove_mods = remove_mods + + def __getattr__(self, name: str) -> pluggy.HookCaller: + x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods) + self.__dict__[name] = x + return x + + +class Interrupted(KeyboardInterrupt): + """Signals that the test run was interrupted.""" + + __module__ = "builtins" # For py3. + + +class Failed(Exception): + """Signals a stop as failed test run.""" + + +@dataclasses.dataclass +class _bestrelpath_cache(Dict[Path, str]): + __slots__ = ("path",) + + path: Path + + def __missing__(self, path: Path) -> str: + r = bestrelpath(self.path, path) + self[path] = r + return r + + +@final +class Dir(nodes.Directory): + """Collector of files in a file system directory. + + .. versionadded:: 8.0 + + .. note:: + + Python directories with an `__init__.py` file are instead collected by + :class:`~pytest.Package` by default. Both are :class:`~pytest.Directory` + collectors. + """ + + @classmethod + def from_parent( # type: ignore[override] + cls, + parent: nodes.Collector, + *, + path: Path, + ) -> Self: + """The public constructor. + + :param parent: The parent collector of this Dir. + :param path: The directory's path. + :type path: pathlib.Path + """ + return super().from_parent(parent=parent, path=path) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + config = self.config + col: nodes.Collector | None + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path): + if direntry.is_dir(): + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +@final +class Session(nodes.Collector): + """The root of the collection tree. + + ``Session`` collects the initial paths given as arguments to pytest. + """ + + Interrupted = Interrupted + Failed = Failed + # Set on the session by runner.pytest_sessionstart. + _setupstate: SetupState + # Set on the session by fixtures.pytest_sessionstart. + _fixturemanager: FixtureManager + exitstatus: int | ExitCode + + def __init__(self, config: Config) -> None: + super().__init__( + name="", + path=config.rootpath, + fspath=None, + parent=None, + config=config, + session=self, + nodeid="", + ) + self.testsfailed = 0 + self.testscollected = 0 + self._shouldstop: bool | str = False + self._shouldfail: bool | str = False + self.trace = config.trace.root.get("collection") + self._initialpaths: frozenset[Path] = frozenset() + self._initialpaths_with_parents: frozenset[Path] = frozenset() + self._notfound: list[tuple[str, Sequence[nodes.Collector]]] = [] + self._initial_parts: list[CollectionArgument] = [] + self._collection_cache: dict[nodes.Collector, CollectReport] = {} + self.items: list[nodes.Item] = [] + + self._bestrelpathcache: dict[Path, str] = _bestrelpath_cache(config.rootpath) + + self.config.pluginmanager.register(self, name="session") + + @classmethod + def from_config(cls, config: Config) -> Session: + session: Session = cls._create(config=config) + return session + + def __repr__(self) -> str: + return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % ( + self.__class__.__name__, + self.name, + getattr(self, "exitstatus", ""), + self.testsfailed, + self.testscollected, + ) + + @property + def shouldstop(self) -> bool | str: + return self._shouldstop + + @shouldstop.setter + def shouldstop(self, value: bool | str) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldstop: + warnings.warn( + PytestWarning( + "session.shouldstop cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldstop = value + + @property + def shouldfail(self) -> bool | str: + return self._shouldfail + + @shouldfail.setter + def shouldfail(self, value: bool | str) -> None: + # The runner checks shouldfail and assumes that if it is set we are + # definitely stopping, so prevent unsetting it. + if value is False and self._shouldfail: + warnings.warn( + PytestWarning( + "session.shouldfail cannot be unset after it has been set; ignoring." + ), + stacklevel=2, + ) + return + self._shouldfail = value + + @property + def startpath(self) -> Path: + """The path from which pytest was invoked. + + .. versionadded:: 7.0.0 + """ + return self.config.invocation_params.dir + + def _node_location_to_relpath(self, node_path: Path) -> str: + # bestrelpath is a quite slow function. + return self._bestrelpathcache[node_path] + + @hookimpl(tryfirst=True) + def pytest_collectstart(self) -> None: + if self.shouldfail: + raise self.Failed(self.shouldfail) + if self.shouldstop: + raise self.Interrupted(self.shouldstop) + + @hookimpl(tryfirst=True) + def pytest_runtest_logreport(self, report: TestReport | CollectReport) -> None: + if report.failed and not hasattr(report, "wasxfail"): + self.testsfailed += 1 + maxfail = self.config.getvalue("maxfail") + if maxfail and self.testsfailed >= maxfail: + self.shouldfail = "stopping after %d failures" % (self.testsfailed) + + pytest_collectreport = pytest_runtest_logreport + + def isinitpath( + self, + path: str | os.PathLike[str], + *, + with_parents: bool = False, + ) -> bool: + """Is path an initial path? + + An initial path is a path explicitly given to pytest on the command + line. + + :param with_parents: + If set, also return True if the path is a parent of an initial path. + + .. versionchanged:: 8.0 + Added the ``with_parents`` parameter. + """ + # Optimization: Path(Path(...)) is much slower than isinstance. + path_ = path if isinstance(path, Path) else Path(path) + if with_parents: + return path_ in self._initialpaths_with_parents + else: + return path_ in self._initialpaths + + def gethookproxy(self, fspath: os.PathLike[str]) -> pluggy.HookRelay: + # Optimization: Path(Path(...)) is much slower than isinstance. + path = fspath if isinstance(fspath, Path) else Path(fspath) + pm = self.config.pluginmanager + # Check if we have the common case of running + # hooks with all conftest.py files. + my_conftestmodules = pm._getconftestmodules(path) + remove_mods = pm._conftest_plugins.difference(my_conftestmodules) + proxy: pluggy.HookRelay + if remove_mods: + # One or more conftests are not in use at this path. + proxy = PathAwareHookProxy(FSHookProxy(pm, remove_mods)) # type: ignore[arg-type,assignment] + else: + # All plugins are active for this fspath. + proxy = self.config.hook + return proxy + + def _collect_path( + self, + path: Path, + path_cache: dict[Path, Sequence[nodes.Collector]], + ) -> Sequence[nodes.Collector]: + """Create a Collector for the given path. + + `path_cache` makes it so the same Collectors are returned for the same + path. + """ + if path in path_cache: + return path_cache[path] + + if path.is_dir(): + ihook = self.gethookproxy(path.parent) + col: nodes.Collector | None = ihook.pytest_collect_directory( + path=path, parent=self + ) + cols: Sequence[nodes.Collector] = (col,) if col is not None else () + + elif path.is_file(): + ihook = self.gethookproxy(path) + cols = ihook.pytest_collect_file(file_path=path, parent=self) + + else: + # Broken symlink or invalid/missing file. + cols = () + + path_cache[path] = cols + return cols + + @overload + def perform_collect( + self, args: Sequence[str] | None = ..., genitems: Literal[True] = ... + ) -> Sequence[nodes.Item]: ... + + @overload + def perform_collect( + self, args: Sequence[str] | None = ..., genitems: bool = ... + ) -> Sequence[nodes.Item | nodes.Collector]: ... + + def perform_collect( + self, args: Sequence[str] | None = None, genitems: bool = True + ) -> Sequence[nodes.Item | nodes.Collector]: + """Perform the collection phase for this session. + + This is called by the default :hook:`pytest_collection` hook + implementation; see the documentation of this hook for more details. + For testing purposes, it may also be called directly on a fresh + ``Session``. + + This function normally recursively expands any collectors collected + from the session to their items, and only items are returned. For + testing purposes, this may be suppressed by passing ``genitems=False``, + in which case the return value contains these collectors unexpanded, + and ``session.items`` is empty. + """ + if args is None: + args = self.config.args + + self.trace("perform_collect", self, args) + self.trace.root.indent += 1 + + hook = self.config.hook + + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + self.items = [] + items: Sequence[nodes.Item | nodes.Collector] = self.items + try: + initialpaths: list[Path] = [] + initialpaths_with_parents: list[Path] = [] + for arg in args: + collection_argument = resolve_collection_argument( + self.config.invocation_params.dir, + arg, + as_pypath=self.config.option.pyargs, + ) + self._initial_parts.append(collection_argument) + initialpaths.append(collection_argument.path) + initialpaths_with_parents.append(collection_argument.path) + initialpaths_with_parents.extend(collection_argument.path.parents) + self._initialpaths = frozenset(initialpaths) + self._initialpaths_with_parents = frozenset(initialpaths_with_parents) + + rep = collect_one_node(self) + self.ihook.pytest_collectreport(report=rep) + self.trace.root.indent -= 1 + if self._notfound: + errors = [] + for arg, collectors in self._notfound: + if collectors: + errors.append( + f"not found: {arg}\n(no match in any of {collectors!r})" + ) + else: + errors.append(f"found no collectors for {arg}") + + raise UsageError(*errors) + + if not genitems: + items = rep.result + else: + if rep.passed: + for node in rep.result: + self.items.extend(self.genitems(node)) + + self.config.pluginmanager.check_pending() + hook.pytest_collection_modifyitems( + session=self, config=self.config, items=items + ) + finally: + self._notfound = [] + self._initial_parts = [] + self._collection_cache = {} + hook.pytest_collection_finish(session=self) + + if genitems: + self.testscollected = len(items) + + return items + + def _collect_one_node( + self, + node: nodes.Collector, + handle_dupes: bool = True, + ) -> tuple[CollectReport, bool]: + if node in self._collection_cache and handle_dupes: + rep = self._collection_cache[node] + return rep, True + else: + rep = collect_one_node(node) + self._collection_cache[node] = rep + return rep, False + + def collect(self) -> Iterator[nodes.Item | nodes.Collector]: + # This is a cache for the root directories of the initial paths. + # We can't use collection_cache for Session because of its special + # role as the bootstrapping collector. + path_cache: dict[Path, Sequence[nodes.Collector]] = {} + + pm = self.config.pluginmanager + + for collection_argument in self._initial_parts: + self.trace("processing argument", collection_argument) + self.trace.root.indent += 1 + + argpath = collection_argument.path + names = collection_argument.parts + module_name = collection_argument.module_name + + # resolve_collection_argument() ensures this. + if argpath.is_dir(): + assert not names, f"invalid arg {(argpath, names)!r}" + + paths = [argpath] + # Add relevant parents of the path, from the root, e.g. + # /a/b/c.py -> [/, /a, /a/b, /a/b/c.py] + if module_name is None: + # Paths outside of the confcutdir should not be considered. + for path in argpath.parents: + if not pm._is_in_confcutdir(path): + break + paths.insert(0, path) + else: + # For --pyargs arguments, only consider paths matching the module + # name. Paths beyond the package hierarchy are not included. + module_name_parts = module_name.split(".") + for i, path in enumerate(argpath.parents, 2): + if i > len(module_name_parts) or path.stem != module_name_parts[-i]: + break + paths.insert(0, path) + + # Start going over the parts from the root, collecting each level + # and discarding all nodes which don't match the level's part. + any_matched_in_initial_part = False + notfound_collectors = [] + work: list[tuple[nodes.Collector | nodes.Item, list[Path | str]]] = [ + (self, [*paths, *names]) + ] + while work: + matchnode, matchparts = work.pop() + + # Pop'd all of the parts, this is a match. + if not matchparts: + yield matchnode + any_matched_in_initial_part = True + continue + + # Should have been matched by now, discard. + if not isinstance(matchnode, nodes.Collector): + continue + + # Collect this level of matching. + # Collecting Session (self) is done directly to avoid endless + # recursion to this function. + subnodes: Sequence[nodes.Collector | nodes.Item] + if isinstance(matchnode, Session): + assert isinstance(matchparts[0], Path) + subnodes = matchnode._collect_path(matchparts[0], path_cache) + else: + # For backward compat, files given directly multiple + # times on the command line should not be deduplicated. + handle_dupes = not ( + len(matchparts) == 1 + and isinstance(matchparts[0], Path) + and matchparts[0].is_file() + ) + rep, duplicate = self._collect_one_node(matchnode, handle_dupes) + if not duplicate and not rep.passed: + # Report collection failures here to avoid failing to + # run some test specified in the command line because + # the module could not be imported (#134). + matchnode.ihook.pytest_collectreport(report=rep) + if not rep.passed: + continue + subnodes = rep.result + + # Prune this level. + any_matched_in_collector = False + for node in reversed(subnodes): + # Path part e.g. `/a/b/` in `/a/b/test_file.py::TestIt::test_it`. + if isinstance(matchparts[0], Path): + is_match = node.path == matchparts[0] + if sys.platform == "win32" and not is_match: + # In case the file paths do not match, fallback to samefile() to + # account for short-paths on Windows (#11895). + same_file = os.path.samefile(node.path, matchparts[0]) + # We don't want to match links to the current node, + # otherwise we would match the same file more than once (#12039). + is_match = same_file and ( + os.path.islink(node.path) + == os.path.islink(matchparts[0]) + ) + + # Name part e.g. `TestIt` in `/a/b/test_file.py::TestIt::test_it`. + else: + # TODO: Remove parametrized workaround once collection structure contains + # parametrization. + is_match = ( + node.name == matchparts[0] + or node.name.split("[")[0] == matchparts[0] + ) + if is_match: + work.append((node, matchparts[1:])) + any_matched_in_collector = True + + if not any_matched_in_collector: + notfound_collectors.append(matchnode) + + if not any_matched_in_initial_part: + report_arg = "::".join((str(argpath), *names)) + self._notfound.append((report_arg, notfound_collectors)) + + self.trace.root.indent -= 1 + + def genitems(self, node: nodes.Item | nodes.Collector) -> Iterator[nodes.Item]: + self.trace("genitems", node) + if isinstance(node, nodes.Item): + node.ihook.pytest_itemcollected(item=node) + yield node + else: + assert isinstance(node, nodes.Collector) + keepduplicates = self.config.getoption("keepduplicates") + # For backward compat, dedup only applies to files. + handle_dupes = not (keepduplicates and isinstance(node, nodes.File)) + rep, duplicate = self._collect_one_node(node, handle_dupes) + if duplicate and not keepduplicates: + return + if rep.passed: + for subnode in rep.result: + yield from self.genitems(subnode) + if not duplicate: + node.ihook.pytest_collectreport(report=rep) + + +def search_pypath(module_name: str) -> str | None: + """Search sys.path for the given a dotted module name, and return its file + system path if found.""" + try: + spec = importlib.util.find_spec(module_name) + # AttributeError: looks like package module, but actually filename + # ImportError: module does not exist + # ValueError: not a module name + except (AttributeError, ImportError, ValueError): + return None + if spec is None or spec.origin is None or spec.origin == "namespace": + return None + elif spec.submodule_search_locations: + return os.path.dirname(spec.origin) + else: + return spec.origin + + +@dataclasses.dataclass(frozen=True) +class CollectionArgument: + """A resolved collection argument.""" + + path: Path + parts: Sequence[str] + module_name: str | None + + +def resolve_collection_argument( + invocation_path: Path, arg: str, *, as_pypath: bool = False +) -> CollectionArgument: + """Parse path arguments optionally containing selection parts and return (fspath, names). + + Command-line arguments can point to files and/or directories, and optionally contain + parts for specific tests selection, for example: + + "pkg/tests/test_foo.py::TestClass::test_foo" + + This function ensures the path exists, and returns a resolved `CollectionArgument`: + + CollectionArgument( + path=Path("/full/path/to/pkg/tests/test_foo.py"), + parts=["TestClass", "test_foo"], + module_name=None, + ) + + When as_pypath is True, expects that the command-line argument actually contains + module paths instead of file-system paths: + + "pkg.tests.test_foo::TestClass::test_foo" + + In which case we search sys.path for a matching module, and then return the *path* to the + found module, which may look like this: + + CollectionArgument( + path=Path("/home/u/myvenv/lib/site-packages/pkg/tests/test_foo.py"), + parts=["TestClass", "test_foo"], + module_name="pkg.tests.test_foo", + ) + + If the path doesn't exist, raise UsageError. + If the path is a directory and selection parts are present, raise UsageError. + """ + base, squacket, rest = str(arg).partition("[") + strpath, *parts = base.split("::") + if parts: + parts[-1] = f"{parts[-1]}{squacket}{rest}" + module_name = None + if as_pypath: + pyarg_strpath = search_pypath(strpath) + if pyarg_strpath is not None: + module_name = strpath + strpath = pyarg_strpath + fspath = invocation_path / strpath + fspath = absolutepath(fspath) + if not safe_exists(fspath): + msg = ( + "module or package not found: {arg} (missing __init__.py?)" + if as_pypath + else "file or directory not found: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + if parts and fspath.is_dir(): + msg = ( + "package argument cannot contain :: selection parts: {arg}" + if as_pypath + else "directory argument cannot contain :: selection parts: {arg}" + ) + raise UsageError(msg.format(arg=arg)) + return CollectionArgument( + path=fspath, + parts=parts, + module_name=module_name, + ) diff --git a/venv/Lib/site-packages/_pytest/mark/__init__.py b/venv/Lib/site-packages/_pytest/mark/__init__.py new file mode 100644 index 0000000..a4f942c --- /dev/null +++ b/venv/Lib/site-packages/_pytest/mark/__init__.py @@ -0,0 +1,292 @@ +"""Generic mechanism for marking and selecting python functions.""" + +from __future__ import annotations + +import collections +import dataclasses +from typing import AbstractSet +from typing import Collection +from typing import Iterable +from typing import Optional +from typing import TYPE_CHECKING + +from .expression import Expression +from .expression import ParseError +from .structures import EMPTY_PARAMETERSET_OPTION +from .structures import get_empty_parameterset_mark +from .structures import Mark +from .structures import MARK_GEN +from .structures import MarkDecorator +from .structures import MarkGenerator +from .structures import ParameterSet +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import UsageError +from _pytest.config.argparsing import NOT_SET +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey + + +if TYPE_CHECKING: + from _pytest.nodes import Item + + +__all__ = [ + "MARK_GEN", + "Mark", + "MarkDecorator", + "MarkGenerator", + "ParameterSet", + "get_empty_parameterset_mark", +] + + +old_mark_config_key = StashKey[Optional[Config]]() + + +def param( + *values: object, + marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), + id: str | None = None, +) -> ParameterSet: + """Specify a parameter in `pytest.mark.parametrize`_ calls or + :ref:`parametrized fixtures `. + + .. code-block:: python + + @pytest.mark.parametrize( + "test_input,expected", + [ + ("3+5", 8), + pytest.param("6*9", 42, marks=pytest.mark.xfail), + ], + ) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + :param values: Variable args of the values of the parameter set, in order. + :param marks: A single mark or a list of marks to be applied to this parameter set. + :param id: The id to attribute to this parameter set. + """ + return ParameterSet.param(*values, marks=marks, id=id) + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group._addoption( + "-k", + action="store", + dest="keyword", + default="", + metavar="EXPRESSION", + help="Only run tests which match the given substring expression. " + "An expression is a Python evaluable expression " + "where all names are substring-matched against test names " + "and their parent classes. Example: -k 'test_method or test_" + "other' matches all test functions and classes whose name " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " + "-k 'not test_method and not test_other' will eliminate the matches. " + "Additionally keywords are matched to classes and functions " + "containing extra names in their 'extra_keyword_matches' set, " + "as well as functions which have names assigned directly to them. " + "The matching is case-insensitive.", + ) + + group._addoption( + "-m", + action="store", + dest="markexpr", + default="", + metavar="MARKEXPR", + help="Only run tests matching given mark expression. " + "For example: -m 'mark1 and not mark2'.", + ) + + group.addoption( + "--markers", + action="store_true", + help="show markers (builtin, plugin and per-project ones).", + ) + + parser.addini("markers", "Register new markers for test functions", "linelist") + parser.addini(EMPTY_PARAMETERSET_OPTION, "Default marker for empty parametersets") + + +@hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + import _pytest.config + + if config.option.markers: + config._do_configure() + tw = _pytest.config.create_terminal_writer(config) + for line in config.getini("markers"): + parts = line.split(":", 1) + name = parts[0] + rest = parts[1] if len(parts) == 2 else "" + tw.write(f"@pytest.mark.{name}:", bold=True) + tw.line(rest) + tw.line() + config._ensure_unconfigure() + return 0 + + return None + + +@dataclasses.dataclass +class KeywordMatcher: + """A matcher for keywords. + + Given a list of names, matches any substring of one of these names. The + string inclusion check is case-insensitive. + + Will match on the name of colitem, including the names of its parents. + Only matches names of items which are either a :class:`Class` or a + :class:`Function`. + + Additionally, matches on names in the 'extra_keyword_matches' set of + any item, as well as names directly assigned to test functions. + """ + + __slots__ = ("_names",) + + _names: AbstractSet[str] + + @classmethod + def from_item(cls, item: Item) -> KeywordMatcher: + mapped_names = set() + + # Add the names of the current item and any parent items, + # except the Session and root Directory's which are not + # interesting for matching. + import pytest + + for node in item.listchain(): + if isinstance(node, pytest.Session): + continue + if isinstance(node, pytest.Directory) and isinstance( + node.parent, pytest.Session + ): + continue + mapped_names.add(node.name) + + # Add the names added as extra keywords to current or parent items. + mapped_names.update(item.listextrakeywords()) + + # Add the names attached to the current function through direct assignment. + function_obj = getattr(item, "function", None) + if function_obj: + mapped_names.update(function_obj.__dict__) + + # Add the markers to the keywords as we no longer handle them correctly. + mapped_names.update(mark.name for mark in item.iter_markers()) + + return cls(mapped_names) + + def __call__(self, subname: str, /, **kwargs: str | int | bool | None) -> bool: + if kwargs: + raise UsageError("Keyword expressions do not support call parameters.") + subname = subname.lower() + names = (name.lower() for name in self._names) + + for name in names: + if subname in name: + return True + return False + + +def deselect_by_keyword(items: list[Item], config: Config) -> None: + keywordexpr = config.option.keyword.lstrip() + if not keywordexpr: + return + + expr = _parse_expression(keywordexpr, "Wrong expression passed to '-k'") + + remaining = [] + deselected = [] + for colitem in items: + if not expr.evaluate(KeywordMatcher.from_item(colitem)): + deselected.append(colitem) + else: + remaining.append(colitem) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +@dataclasses.dataclass +class MarkMatcher: + """A matcher for markers which are present. + + Tries to match on any marker names, attached to the given colitem. + """ + + __slots__ = ("own_mark_name_mapping",) + + own_mark_name_mapping: dict[str, list[Mark]] + + @classmethod + def from_markers(cls, markers: Iterable[Mark]) -> MarkMatcher: + mark_name_mapping = collections.defaultdict(list) + for mark in markers: + mark_name_mapping[mark.name].append(mark) + return cls(mark_name_mapping) + + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: + if not (matches := self.own_mark_name_mapping.get(name, [])): + return False + + for mark in matches: + if all(mark.kwargs.get(k, NOT_SET) == v for k, v in kwargs.items()): + return True + + return False + + +def deselect_by_mark(items: list[Item], config: Config) -> None: + matchexpr = config.option.markexpr + if not matchexpr: + return + + expr = _parse_expression(matchexpr, "Wrong expression passed to '-m'") + remaining: list[Item] = [] + deselected: list[Item] = [] + for item in items: + if expr.evaluate(MarkMatcher.from_markers(item.iter_markers())): + remaining.append(item) + else: + deselected.append(item) + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = remaining + + +def _parse_expression(expr: str, exc_message: str) -> Expression: + try: + return Expression.compile(expr) + except ParseError as e: + raise UsageError(f"{exc_message}: {expr}: {e}") from None + + +def pytest_collection_modifyitems(items: list[Item], config: Config) -> None: + deselect_by_keyword(items, config) + deselect_by_mark(items, config) + + +def pytest_configure(config: Config) -> None: + config.stash[old_mark_config_key] = MARK_GEN._config + MARK_GEN._config = config + + empty_parameterset = config.getini(EMPTY_PARAMETERSET_OPTION) + + if empty_parameterset not in ("skip", "xfail", "fail_at_collect", None, ""): + raise UsageError( + f"{EMPTY_PARAMETERSET_OPTION!s} must be one of skip, xfail or fail_at_collect" + f" but it is {empty_parameterset!r}" + ) + + +def pytest_unconfigure(config: Config) -> None: + MARK_GEN._config = config.stash.get(old_mark_config_key, None) diff --git a/venv/Lib/site-packages/_pytest/mark/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_pytest/mark/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..0a27b50 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/mark/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/mark/__pycache__/expression.cpython-311.pyc b/venv/Lib/site-packages/_pytest/mark/__pycache__/expression.cpython-311.pyc new file mode 100644 index 0000000..e6a3177 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/mark/__pycache__/expression.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/mark/__pycache__/structures.cpython-311.pyc b/venv/Lib/site-packages/_pytest/mark/__pycache__/structures.cpython-311.pyc new file mode 100644 index 0000000..39de401 Binary files /dev/null and b/venv/Lib/site-packages/_pytest/mark/__pycache__/structures.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/_pytest/mark/expression.py b/venv/Lib/site-packages/_pytest/mark/expression.py new file mode 100644 index 0000000..89cc0e9 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/mark/expression.py @@ -0,0 +1,333 @@ +r"""Evaluate match expressions, as used by `-k` and `-m`. + +The grammar is: + +expression: expr? EOF +expr: and_expr ('or' and_expr)* +and_expr: not_expr ('and' not_expr)* +not_expr: 'not' not_expr | '(' expr ')' | ident kwargs? + +ident: (\w|:|\+|-|\.|\[|\]|\\|/)+ +kwargs: ('(' name '=' value ( ', ' name '=' value )* ')') +name: a valid ident, but not a reserved keyword +value: (unescaped) string literal | (-)?[0-9]+ | 'False' | 'True' | 'None' + +The semantics are: + +- Empty expression evaluates to False. +- ident evaluates to True or False according to a provided matcher function. +- or/and/not evaluate according to the usual boolean semantics. +- ident with parentheses and keyword arguments evaluates to True or False according to a provided matcher function. +""" + +from __future__ import annotations + +import ast +import dataclasses +import enum +import keyword +import re +import types +from typing import Iterator +from typing import Literal +from typing import Mapping +from typing import NoReturn +from typing import overload +from typing import Protocol +from typing import Sequence + + +__all__ = [ + "Expression", + "ParseError", +] + + +class TokenType(enum.Enum): + LPAREN = "left parenthesis" + RPAREN = "right parenthesis" + OR = "or" + AND = "and" + NOT = "not" + IDENT = "identifier" + EOF = "end of input" + EQUAL = "=" + STRING = "string literal" + COMMA = "," + + +@dataclasses.dataclass(frozen=True) +class Token: + __slots__ = ("type", "value", "pos") + type: TokenType + value: str + pos: int + + +class ParseError(Exception): + """The expression contains invalid syntax. + + :param column: The column in the line where the error occurred (1-based). + :param message: A description of the error. + """ + + def __init__(self, column: int, message: str) -> None: + self.column = column + self.message = message + + def __str__(self) -> str: + return f"at column {self.column}: {self.message}" + + +class Scanner: + __slots__ = ("tokens", "current") + + def __init__(self, input: str) -> None: + self.tokens = self.lex(input) + self.current = next(self.tokens) + + def lex(self, input: str) -> Iterator[Token]: + pos = 0 + while pos < len(input): + if input[pos] in (" ", "\t"): + pos += 1 + elif input[pos] == "(": + yield Token(TokenType.LPAREN, "(", pos) + pos += 1 + elif input[pos] == ")": + yield Token(TokenType.RPAREN, ")", pos) + pos += 1 + elif input[pos] == "=": + yield Token(TokenType.EQUAL, "=", pos) + pos += 1 + elif input[pos] == ",": + yield Token(TokenType.COMMA, ",", pos) + pos += 1 + elif (quote_char := input[pos]) in ("'", '"'): + end_quote_pos = input.find(quote_char, pos + 1) + if end_quote_pos == -1: + raise ParseError( + pos + 1, + f'closing quote "{quote_char}" is missing', + ) + value = input[pos : end_quote_pos + 1] + if (backslash_pos := input.find("\\")) != -1: + raise ParseError( + backslash_pos + 1, + r'escaping with "\" not supported in marker expression', + ) + yield Token(TokenType.STRING, value, pos) + pos += len(value) + else: + match = re.match(r"(:?\w|:|\+|-|\.|\[|\]|\\|/)+", input[pos:]) + if match: + value = match.group(0) + if value == "or": + yield Token(TokenType.OR, value, pos) + elif value == "and": + yield Token(TokenType.AND, value, pos) + elif value == "not": + yield Token(TokenType.NOT, value, pos) + else: + yield Token(TokenType.IDENT, value, pos) + pos += len(value) + else: + raise ParseError( + pos + 1, + f'unexpected character "{input[pos]}"', + ) + yield Token(TokenType.EOF, "", pos) + + @overload + def accept(self, type: TokenType, *, reject: Literal[True]) -> Token: ... + + @overload + def accept( + self, type: TokenType, *, reject: Literal[False] = False + ) -> Token | None: ... + + def accept(self, type: TokenType, *, reject: bool = False) -> Token | None: + if self.current.type is type: + token = self.current + if token.type is not TokenType.EOF: + self.current = next(self.tokens) + return token + if reject: + self.reject((type,)) + return None + + def reject(self, expected: Sequence[TokenType]) -> NoReturn: + raise ParseError( + self.current.pos + 1, + "expected {}; got {}".format( + " OR ".join(type.value for type in expected), + self.current.type.value, + ), + ) + + +# True, False and None are legal match expression identifiers, +# but illegal as Python identifiers. To fix this, this prefix +# is added to identifiers in the conversion to Python AST. +IDENT_PREFIX = "$" + + +def expression(s: Scanner) -> ast.Expression: + if s.accept(TokenType.EOF): + ret: ast.expr = ast.Constant(False) + else: + ret = expr(s) + s.accept(TokenType.EOF, reject=True) + return ast.fix_missing_locations(ast.Expression(ret)) + + +def expr(s: Scanner) -> ast.expr: + ret = and_expr(s) + while s.accept(TokenType.OR): + rhs = and_expr(s) + ret = ast.BoolOp(ast.Or(), [ret, rhs]) + return ret + + +def and_expr(s: Scanner) -> ast.expr: + ret = not_expr(s) + while s.accept(TokenType.AND): + rhs = not_expr(s) + ret = ast.BoolOp(ast.And(), [ret, rhs]) + return ret + + +def not_expr(s: Scanner) -> ast.expr: + if s.accept(TokenType.NOT): + return ast.UnaryOp(ast.Not(), not_expr(s)) + if s.accept(TokenType.LPAREN): + ret = expr(s) + s.accept(TokenType.RPAREN, reject=True) + return ret + ident = s.accept(TokenType.IDENT) + if ident: + name = ast.Name(IDENT_PREFIX + ident.value, ast.Load()) + if s.accept(TokenType.LPAREN): + ret = ast.Call(func=name, args=[], keywords=all_kwargs(s)) + s.accept(TokenType.RPAREN, reject=True) + else: + ret = name + return ret + + s.reject((TokenType.NOT, TokenType.LPAREN, TokenType.IDENT)) + + +BUILTIN_MATCHERS = {"True": True, "False": False, "None": None} + + +def single_kwarg(s: Scanner) -> ast.keyword: + keyword_name = s.accept(TokenType.IDENT, reject=True) + if not keyword_name.value.isidentifier(): + raise ParseError( + keyword_name.pos + 1, + f"not a valid python identifier {keyword_name.value}", + ) + if keyword.iskeyword(keyword_name.value): + raise ParseError( + keyword_name.pos + 1, + f"unexpected reserved python keyword `{keyword_name.value}`", + ) + s.accept(TokenType.EQUAL, reject=True) + + if value_token := s.accept(TokenType.STRING): + value: str | int | bool | None = value_token.value[1:-1] # strip quotes + else: + value_token = s.accept(TokenType.IDENT, reject=True) + if ( + (number := value_token.value).isdigit() + or number.startswith("-") + and number[1:].isdigit() + ): + value = int(number) + elif value_token.value in BUILTIN_MATCHERS: + value = BUILTIN_MATCHERS[value_token.value] + else: + raise ParseError( + value_token.pos + 1, + f'unexpected character/s "{value_token.value}"', + ) + + ret = ast.keyword(keyword_name.value, ast.Constant(value)) + return ret + + +def all_kwargs(s: Scanner) -> list[ast.keyword]: + ret = [single_kwarg(s)] + while s.accept(TokenType.COMMA): + ret.append(single_kwarg(s)) + return ret + + +class MatcherCall(Protocol): + def __call__(self, name: str, /, **kwargs: str | int | bool | None) -> bool: ... + + +@dataclasses.dataclass +class MatcherNameAdapter: + matcher: MatcherCall + name: str + + def __bool__(self) -> bool: + return self.matcher(self.name) + + def __call__(self, **kwargs: str | int | bool | None) -> bool: + return self.matcher(self.name, **kwargs) + + +class MatcherAdapter(Mapping[str, MatcherNameAdapter]): + """Adapts a matcher function to a locals mapping as required by eval().""" + + def __init__(self, matcher: MatcherCall) -> None: + self.matcher = matcher + + def __getitem__(self, key: str) -> MatcherNameAdapter: + return MatcherNameAdapter(matcher=self.matcher, name=key[len(IDENT_PREFIX) :]) + + def __iter__(self) -> Iterator[str]: + raise NotImplementedError() + + def __len__(self) -> int: + raise NotImplementedError() + + +class Expression: + """A compiled match expression as used by -k and -m. + + The expression can be evaluated against different matchers. + """ + + __slots__ = ("code",) + + def __init__(self, code: types.CodeType) -> None: + self.code = code + + @classmethod + def compile(self, input: str) -> Expression: + """Compile a match expression. + + :param input: The input expression - one line. + """ + astexpr = expression(Scanner(input)) + code: types.CodeType = compile( + astexpr, + filename="", + mode="eval", + ) + return Expression(code) + + def evaluate(self, matcher: MatcherCall) -> bool: + """Evaluate the match expression. + + :param matcher: + Given an identifier, should return whether it matches or not. + Should be prepared to handle arbitrary strings as input. + + :returns: Whether the expression matches or not. + """ + ret: bool = bool(eval(self.code, {"__builtins__": {}}, MatcherAdapter(matcher))) + return ret diff --git a/venv/Lib/site-packages/_pytest/mark/structures.py b/venv/Lib/site-packages/_pytest/mark/structures.py new file mode 100644 index 0000000..92ade55 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/mark/structures.py @@ -0,0 +1,615 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import collections.abc +import dataclasses +import inspect +from typing import Any +from typing import Callable +from typing import Collection +from typing import final +from typing import Iterable +from typing import Iterator +from typing import Mapping +from typing import MutableMapping +from typing import NamedTuple +from typing import overload +from typing import Sequence +from typing import TYPE_CHECKING +from typing import TypeVar +from typing import Union +import warnings + +from .._code import getfslineno +from ..compat import ascii_escaped +from ..compat import NOTSET +from ..compat import NotSetType +from _pytest.config import Config +from _pytest.deprecated import check_ispytest +from _pytest.deprecated import MARKED_FIXTURE +from _pytest.outcomes import fail +from _pytest.scope import _ScopeName +from _pytest.warning_types import PytestUnknownMarkWarning + + +if TYPE_CHECKING: + from ..nodes import Node + + +EMPTY_PARAMETERSET_OPTION = "empty_parameter_set_mark" + + +def istestfunc(func) -> bool: + return callable(func) and getattr(func, "__name__", "") != "" + + +def get_empty_parameterset_mark( + config: Config, argnames: Sequence[str], func +) -> MarkDecorator: + from ..nodes import Collector + + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) + if requested_mark in ("", None, "skip"): + mark = MARK_GEN.skip(reason=reason) + elif requested_mark == "xfail": + mark = MARK_GEN.xfail(reason=reason, run=False) + elif requested_mark == "fail_at_collect": + f_name = func.__name__ + _, lineno = getfslineno(func) + raise Collector.CollectError( + "Empty parameter set in '%s' at line %d" % (f_name, lineno + 1) + ) + else: + raise LookupError(requested_mark) + return mark + + +class ParameterSet(NamedTuple): + values: Sequence[object | NotSetType] + marks: Collection[MarkDecorator | Mark] + id: str | None + + @classmethod + def param( + cls, + *values: object, + marks: MarkDecorator | Collection[MarkDecorator | Mark] = (), + id: str | None = None, + ) -> ParameterSet: + if isinstance(marks, MarkDecorator): + marks = (marks,) + else: + assert isinstance(marks, collections.abc.Collection) + + if id is not None: + if not isinstance(id, str): + raise TypeError(f"Expected id to be a string, got {type(id)}: {id!r}") + id = ascii_escaped(id) + return cls(values, marks, id) + + @classmethod + def extract_from( + cls, + parameterset: ParameterSet | Sequence[object] | object, + force_tuple: bool = False, + ) -> ParameterSet: + """Extract from an object or objects. + + :param parameterset: + A legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects. + + :param force_tuple: + Enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests. + """ + if isinstance(parameterset, cls): + return parameterset + if force_tuple: + return cls.param(parameterset) + else: + # TODO: Refactor to fix this type-ignore. Currently the following + # passes type-checking but crashes: + # + # @pytest.mark.parametrize(('x', 'y'), [1, 2]) + # def test_foo(x, y): pass + return cls(parameterset, marks=[], id=None) # type: ignore[arg-type] + + @staticmethod + def _parse_parametrize_args( + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + *args, + **kwargs, + ) -> tuple[Sequence[str], bool]: + if isinstance(argnames, str): + argnames = [x.strip() for x in argnames.split(",") if x.strip()] + force_tuple = len(argnames) == 1 + else: + force_tuple = False + return argnames, force_tuple + + @staticmethod + def _parse_parametrize_parameters( + argvalues: Iterable[ParameterSet | Sequence[object] | object], + force_tuple: bool, + ) -> list[ParameterSet]: + return [ + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues + ] + + @classmethod + def _for_parametrize( + cls, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + func, + config: Config, + nodeid: str, + ) -> tuple[Sequence[str], list[ParameterSet]]: + argnames, force_tuple = cls._parse_parametrize_args(argnames, argvalues) + parameters = cls._parse_parametrize_parameters(argvalues, force_tuple) + del argvalues + + if parameters: + # Check all parameter sets have the correct number of values. + for param in parameters: + if len(param.values) != len(argnames): + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, + ) + else: + # Empty parameter set (likely computed at runtime): create a single + # parameter set with NOTSET values, with the "empty parameter set" mark applied to it. + mark = get_empty_parameterset_mark(config, argnames, func) + parameters.append( + ParameterSet(values=(NOTSET,) * len(argnames), marks=[mark], id=None) + ) + return argnames, parameters + + +@final +@dataclasses.dataclass(frozen=True) +class Mark: + """A pytest mark.""" + + #: Name of the mark. + name: str + #: Positional arguments of the mark decorator. + args: tuple[Any, ...] + #: Keyword arguments of the mark decorator. + kwargs: Mapping[str, Any] + + #: Source Mark for ids with parametrize Marks. + _param_ids_from: Mark | None = dataclasses.field(default=None, repr=False) + #: Resolved/generated ids with parametrize Marks. + _param_ids_generated: Sequence[str] | None = dataclasses.field( + default=None, repr=False + ) + + def __init__( + self, + name: str, + args: tuple[Any, ...], + kwargs: Mapping[str, Any], + param_ids_from: Mark | None = None, + param_ids_generated: Sequence[str] | None = None, + *, + _ispytest: bool = False, + ) -> None: + """:meta private:""" + check_ispytest(_ispytest) + # Weirdness to bypass frozen=True. + object.__setattr__(self, "name", name) + object.__setattr__(self, "args", args) + object.__setattr__(self, "kwargs", kwargs) + object.__setattr__(self, "_param_ids_from", param_ids_from) + object.__setattr__(self, "_param_ids_generated", param_ids_generated) + + def _has_param_ids(self) -> bool: + return "ids" in self.kwargs or len(self.args) >= 4 + + def combined_with(self, other: Mark) -> Mark: + """Return a new Mark which is a combination of this + Mark and another Mark. + + Combines by appending args and merging kwargs. + + :param Mark other: The mark to combine with. + :rtype: Mark + """ + assert self.name == other.name + + # Remember source of ids with parametrize Marks. + param_ids_from: Mark | None = None + if self.name == "parametrize": + if other._has_param_ids(): + param_ids_from = other + elif self._has_param_ids(): + param_ids_from = self + + return Mark( + self.name, + self.args + other.args, + dict(self.kwargs, **other.kwargs), + param_ids_from=param_ids_from, + _ispytest=True, + ) + + +# A generic parameter designating an object to which a Mark may +# be applied -- a test function (callable) or class. +# Note: a lambda is not allowed, but this can't be represented. +Markable = TypeVar("Markable", bound=Union[Callable[..., object], type]) + + +@dataclasses.dataclass +class MarkDecorator: + """A decorator for applying a mark on test functions and classes. + + ``MarkDecorators`` are created with ``pytest.mark``:: + + mark1 = pytest.mark.NAME # Simple MarkDecorator + mark2 = pytest.mark.NAME(name1=value) # Parametrized MarkDecorator + + and can then be applied as decorators to test functions:: + + @mark2 + def test_function(): + pass + + When a ``MarkDecorator`` is called, it does the following: + + 1. If called with a single class as its only positional argument and no + additional keyword arguments, it attaches the mark to the class so it + gets applied automatically to all test cases found in that class. + + 2. If called with a single function as its only positional argument and + no additional keyword arguments, it attaches the mark to the function, + containing all the arguments already stored internally in the + ``MarkDecorator``. + + 3. When called in any other case, it returns a new ``MarkDecorator`` + instance with the original ``MarkDecorator``'s content updated with + the arguments passed to this call. + + Note: The rules above prevent a ``MarkDecorator`` from storing only a + single function or class reference as its positional argument with no + additional keyword or positional arguments. You can work around this by + using `with_args()`. + """ + + mark: Mark + + def __init__(self, mark: Mark, *, _ispytest: bool = False) -> None: + """:meta private:""" + check_ispytest(_ispytest) + self.mark = mark + + @property + def name(self) -> str: + """Alias for mark.name.""" + return self.mark.name + + @property + def args(self) -> tuple[Any, ...]: + """Alias for mark.args.""" + return self.mark.args + + @property + def kwargs(self) -> Mapping[str, Any]: + """Alias for mark.kwargs.""" + return self.mark.kwargs + + @property + def markname(self) -> str: + """:meta private:""" + return self.name # for backward-compat (2.4.1 had this attr) + + def with_args(self, *args: object, **kwargs: object) -> MarkDecorator: + """Return a MarkDecorator with extra arguments added. + + Unlike calling the MarkDecorator, with_args() can be used even + if the sole argument is a callable/class. + """ + mark = Mark(self.name, args, kwargs, _ispytest=True) + return MarkDecorator(self.mark.combined_with(mark), _ispytest=True) + + # Type ignored because the overloads overlap with an incompatible + # return type. Not much we can do about that. Thankfully mypy picks + # the first match so it works out even if we break the rules. + @overload + def __call__(self, arg: Markable) -> Markable: # type: ignore[overload-overlap] + pass + + @overload + def __call__(self, *args: object, **kwargs: object) -> MarkDecorator: + pass + + def __call__(self, *args: object, **kwargs: object): + """Call the MarkDecorator.""" + if args and not kwargs: + func = args[0] + is_class = inspect.isclass(func) + if len(args) == 1 and (istestfunc(func) or is_class): + store_mark(func, self.mark, stacklevel=3) + return func + return self.with_args(*args, **kwargs) + + +def get_unpacked_marks( + obj: object | type, + *, + consider_mro: bool = True, +) -> list[Mark]: + """Obtain the unpacked marks that are stored on an object. + + If obj is a class and consider_mro is true, return marks applied to + this class and all of its super-classes in MRO order. If consider_mro + is false, only return marks applied directly to this class. + """ + if isinstance(obj, type): + if not consider_mro: + mark_lists = [obj.__dict__.get("pytestmark", [])] + else: + mark_lists = [ + x.__dict__.get("pytestmark", []) for x in reversed(obj.__mro__) + ] + mark_list = [] + for item in mark_lists: + if isinstance(item, list): + mark_list.extend(item) + else: + mark_list.append(item) + else: + mark_attribute = getattr(obj, "pytestmark", []) + if isinstance(mark_attribute, list): + mark_list = mark_attribute + else: + mark_list = [mark_attribute] + return list(normalize_mark_list(mark_list)) + + +def normalize_mark_list( + mark_list: Iterable[Mark | MarkDecorator], +) -> Iterable[Mark]: + """ + Normalize an iterable of Mark or MarkDecorator objects into a list of marks + by retrieving the `mark` attribute on MarkDecorator instances. + + :param mark_list: marks to normalize + :returns: A new list of the extracted Mark objects + """ + for mark in mark_list: + mark_obj = getattr(mark, "mark", mark) + if not isinstance(mark_obj, Mark): + raise TypeError(f"got {mark_obj!r} instead of Mark") + yield mark_obj + + +def store_mark(obj, mark: Mark, *, stacklevel: int = 2) -> None: + """Store a Mark on an object. + + This is used to implement the Mark declarations/decorators correctly. + """ + assert isinstance(mark, Mark), mark + + from ..fixtures import getfixturemarker + + if getfixturemarker(obj) is not None: + warnings.warn(MARKED_FIXTURE, stacklevel=stacklevel) + + # Always reassign name to avoid updating pytestmark in a reference that + # was only borrowed. + obj.pytestmark = [*get_unpacked_marks(obj, consider_mro=False), mark] + + +# Typing for builtin pytest marks. This is cheating; it gives builtin marks +# special privilege, and breaks modularity. But practicality beats purity... +if TYPE_CHECKING: + + class _SkipMarkDecorator(MarkDecorator): + @overload # type: ignore[override,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: ... + + @overload + def __call__(self, reason: str = ...) -> MarkDecorator: ... + + class _SkipifMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + condition: str | bool = ..., + *conditions: str | bool, + reason: str = ..., + ) -> MarkDecorator: ... + + class _XfailMarkDecorator(MarkDecorator): + @overload # type: ignore[override,no-overload-impl] + def __call__(self, arg: Markable) -> Markable: ... + + @overload + def __call__( + self, + condition: str | bool = False, + *conditions: str | bool, + reason: str = ..., + run: bool = ..., + raises: None | type[BaseException] | tuple[type[BaseException], ...] = ..., + strict: bool = ..., + ) -> MarkDecorator: ... + + class _ParametrizeMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + *, + indirect: bool | Sequence[str] = ..., + ids: Iterable[None | str | float | int | bool] + | Callable[[Any], object | None] + | None = ..., + scope: _ScopeName | None = ..., + ) -> MarkDecorator: ... + + class _UsefixturesMarkDecorator(MarkDecorator): + def __call__(self, *fixtures: str) -> MarkDecorator: # type: ignore[override] + ... + + class _FilterwarningsMarkDecorator(MarkDecorator): + def __call__(self, *filters: str) -> MarkDecorator: # type: ignore[override] + ... + + +@final +class MarkGenerator: + """Factory for :class:`MarkDecorator` objects - exposed as + a ``pytest.mark`` singleton instance. + + Example:: + + import pytest + + + @pytest.mark.slowtest + def test_function(): + pass + + applies a 'slowtest' :class:`Mark` on ``test_function``. + """ + + # See TYPE_CHECKING above. + if TYPE_CHECKING: + skip: _SkipMarkDecorator + skipif: _SkipifMarkDecorator + xfail: _XfailMarkDecorator + parametrize: _ParametrizeMarkDecorator + usefixtures: _UsefixturesMarkDecorator + filterwarnings: _FilterwarningsMarkDecorator + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + self._config: Config | None = None + self._markers: set[str] = set() + + def __getattr__(self, name: str) -> MarkDecorator: + """Generate a new :class:`MarkDecorator` with the given name.""" + if name[0] == "_": + raise AttributeError("Marker name must NOT start with underscore") + + if self._config is not None: + # We store a set of markers as a performance optimisation - if a mark + # name is in the set we definitely know it, but a mark may be known and + # not in the set. We therefore start by updating the set! + if name not in self._markers: + for line in self._config.getini("markers"): + # example lines: "skipif(condition): skip the given test if..." + # or "hypothesis: tests which use Hypothesis", so to get the + # marker name we split on both `:` and `(`. + marker = line.split(":")[0].split("(")[0].strip() + self._markers.add(marker) + + # If the name is not in the set of known marks after updating, + # then it really is time to issue a warning or an error. + if name not in self._markers: + if self._config.option.strict_markers or self._config.option.strict: + fail( + f"{name!r} not found in `markers` configuration option", + pytrace=False, + ) + + # Raise a specific error for common misspellings of "parametrize". + if name in ["parameterize", "parametrise", "parameterise"]: + __tracebackhide__ = True + fail(f"Unknown '{name}' mark, did you mean 'parametrize'?") + + warnings.warn( + f"Unknown pytest.mark.{name} - is this a typo? You can register " + "custom marks to avoid this warning - for details, see " + "https://docs.pytest.org/en/stable/how-to/mark.html", + PytestUnknownMarkWarning, + 2, + ) + + return MarkDecorator(Mark(name, (), {}, _ispytest=True), _ispytest=True) + + +MARK_GEN = MarkGenerator(_ispytest=True) + + +@final +class NodeKeywords(MutableMapping[str, Any]): + __slots__ = ("node", "parent", "_markers") + + def __init__(self, node: Node) -> None: + self.node = node + self.parent = node.parent + self._markers = {node.name: True} + + def __getitem__(self, key: str) -> Any: + try: + return self._markers[key] + except KeyError: + if self.parent is None: + raise + return self.parent.keywords[key] + + def __setitem__(self, key: str, value: Any) -> None: + self._markers[key] = value + + # Note: we could've avoided explicitly implementing some of the methods + # below and use the collections.abc fallback, but that would be slow. + + def __contains__(self, key: object) -> bool: + return ( + key in self._markers + or self.parent is not None + and key in self.parent.keywords + ) + + def update( # type: ignore[override] + self, + other: Mapping[str, Any] | Iterable[tuple[str, Any]] = (), + **kwds: Any, + ) -> None: + self._markers.update(other) + self._markers.update(kwds) + + def __delitem__(self, key: str) -> None: + raise ValueError("cannot delete key in keywords dict") + + def __iter__(self) -> Iterator[str]: + # Doesn't need to be fast. + yield from self._markers + if self.parent is not None: + for keyword in self.parent.keywords: + # self._marks and self.parent.keywords can have duplicates. + if keyword not in self._markers: + yield keyword + + def __len__(self) -> int: + # Doesn't need to be fast. + return sum(1 for keyword in self) + + def __repr__(self) -> str: + return f"" diff --git a/venv/Lib/site-packages/_pytest/monkeypatch.py b/venv/Lib/site-packages/_pytest/monkeypatch.py new file mode 100644 index 0000000..46eb172 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/monkeypatch.py @@ -0,0 +1,415 @@ +# mypy: allow-untyped-defs +"""Monkeypatching and mocking functionality.""" + +from __future__ import annotations + +from contextlib import contextmanager +import os +import re +import sys +from typing import Any +from typing import final +from typing import Generator +from typing import Mapping +from typing import MutableMapping +from typing import overload +from typing import TypeVar +import warnings + +from _pytest.fixtures import fixture +from _pytest.warning_types import PytestWarning + + +RE_IMPORT_ERROR_NAME = re.compile(r"^No module named (.*)$") + + +K = TypeVar("K") +V = TypeVar("V") + + +@fixture +def monkeypatch() -> Generator[MonkeyPatch]: + """A convenient fixture for monkey-patching. + + The fixture provides these methods to modify objects, dictionaries, or + :data:`os.environ`: + + * :meth:`monkeypatch.setattr(obj, name, value, raising=True) ` + * :meth:`monkeypatch.delattr(obj, name, raising=True) ` + * :meth:`monkeypatch.setitem(mapping, name, value) ` + * :meth:`monkeypatch.delitem(obj, name, raising=True) ` + * :meth:`monkeypatch.setenv(name, value, prepend=None) ` + * :meth:`monkeypatch.delenv(name, raising=True) ` + * :meth:`monkeypatch.syspath_prepend(path) ` + * :meth:`monkeypatch.chdir(path) ` + * :meth:`monkeypatch.context() ` + + All modifications will be undone after the requesting test function or + fixture has finished. The ``raising`` parameter determines if a :class:`KeyError` + or :class:`AttributeError` will be raised if the set/deletion operation does not have the + specified target. + + To undo modifications done by the fixture in a contained scope, + use :meth:`context() `. + """ + mpatch = MonkeyPatch() + yield mpatch + mpatch.undo() + + +def resolve(name: str) -> object: + # Simplified from zope.dottedname. + parts = name.split(".") + + used = parts.pop(0) + found: object = __import__(used) + for part in parts: + used += "." + part + try: + found = getattr(found, part) + except AttributeError: + pass + else: + continue + # We use explicit un-nesting of the handling block in order + # to avoid nested exceptions. + try: + __import__(used) + except ImportError as ex: + expected = str(ex).split()[-1] + if expected == used: + raise + else: + raise ImportError(f"import error in {used}: {ex}") from ex + found = annotated_getattr(found, part, used) + return found + + +def annotated_getattr(obj: object, name: str, ann: str) -> object: + try: + obj = getattr(obj, name) + except AttributeError as e: + raise AttributeError( + f"{type(obj).__name__!r} object at {ann} has no attribute {name!r}" + ) from e + return obj + + +def derive_importpath(import_path: str, raising: bool) -> tuple[str, object]: + if not isinstance(import_path, str) or "." not in import_path: + raise TypeError(f"must be absolute import path string, not {import_path!r}") + module, attr = import_path.rsplit(".", 1) + target = resolve(module) + if raising: + annotated_getattr(target, attr, ann=module) + return attr, target + + +class Notset: + def __repr__(self) -> str: + return "" + + +notset = Notset() + + +@final +class MonkeyPatch: + """Helper to conveniently monkeypatch attributes/items/environment + variables/syspath. + + Returned by the :fixture:`monkeypatch` fixture. + + .. versionchanged:: 6.2 + Can now also be used directly as `pytest.MonkeyPatch()`, for when + the fixture is not available. In this case, use + :meth:`with MonkeyPatch.context() as mp: ` or remember to call + :meth:`undo` explicitly. + """ + + def __init__(self) -> None: + self._setattr: list[tuple[object, str, object]] = [] + self._setitem: list[tuple[Mapping[Any, Any], object, object]] = [] + self._cwd: str | None = None + self._savesyspath: list[str] | None = None + + @classmethod + @contextmanager + def context(cls) -> Generator[MonkeyPatch]: + """Context manager that returns a new :class:`MonkeyPatch` object + which undoes any patching done inside the ``with`` block upon exit. + + Example: + + .. code-block:: python + + import functools + + + def test_partial(monkeypatch): + with monkeypatch.context() as m: + m.setattr(functools, "partial", 3) + + Useful in situations where it is desired to undo some patches before the test ends, + such as mocking ``stdlib`` functions that might break pytest itself if mocked (for examples + of this see :issue:`3290`). + """ + m = cls() + try: + yield m + finally: + m.undo() + + @overload + def setattr( + self, + target: str, + name: object, + value: Notset = ..., + raising: bool = ..., + ) -> None: ... + + @overload + def setattr( + self, + target: object, + name: str, + value: object, + raising: bool = ..., + ) -> None: ... + + def setattr( + self, + target: str | object, + name: object | str, + value: object = notset, + raising: bool = True, + ) -> None: + """ + Set attribute value on target, memorizing the old value. + + For example: + + .. code-block:: python + + import os + + monkeypatch.setattr(os, "getcwd", lambda: "/") + + The code above replaces the :func:`os.getcwd` function by a ``lambda`` which + always returns ``"/"``. + + For convenience, you can specify a string as ``target`` which + will be interpreted as a dotted import path, with the last part + being the attribute name: + + .. code-block:: python + + monkeypatch.setattr("os.getcwd", lambda: "/") + + Raises :class:`AttributeError` if the attribute does not exist, unless + ``raising`` is set to False. + + **Where to patch** + + ``monkeypatch.setattr`` works by (temporarily) changing the object that a name points to with another one. + There can be many names pointing to any individual object, so for patching to work you must ensure + that you patch the name used by the system under test. + + See the section :ref:`Where to patch ` in the :mod:`unittest.mock` + docs for a complete explanation, which is meant for :func:`unittest.mock.patch` but + applies to ``monkeypatch.setattr`` as well. + """ + __tracebackhide__ = True + import inspect + + if isinstance(value, Notset): + if not isinstance(target, str): + raise TypeError( + "use setattr(target, name, value) or " + "setattr(target, value) with target being a dotted " + "import string" + ) + value = name + name, target = derive_importpath(target, raising) + else: + if not isinstance(name, str): + raise TypeError( + "use setattr(target, name, value) with name being a string or " + "setattr(target, value) with target being a dotted " + "import string" + ) + + oldval = getattr(target, name, notset) + if raising and oldval is notset: + raise AttributeError(f"{target!r} has no attribute {name!r}") + + # avoid class descriptors like staticmethod/classmethod + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + setattr(target, name, value) + + def delattr( + self, + target: object | str, + name: str | Notset = notset, + raising: bool = True, + ) -> None: + """Delete attribute ``name`` from ``target``. + + If no ``name`` is specified and ``target`` is a string + it will be interpreted as a dotted import path with the + last part being the attribute name. + + Raises AttributeError it the attribute does not exist, unless + ``raising`` is set to False. + """ + __tracebackhide__ = True + import inspect + + if isinstance(name, Notset): + if not isinstance(target, str): + raise TypeError( + "use delattr(target, name) or " + "delattr(target) with target being a dotted " + "import string" + ) + name, target = derive_importpath(target, raising) + + if not hasattr(target, name): + if raising: + raise AttributeError(name) + else: + oldval = getattr(target, name, notset) + # Avoid class descriptors like staticmethod/classmethod. + if inspect.isclass(target): + oldval = target.__dict__.get(name, notset) + self._setattr.append((target, name, oldval)) + delattr(target, name) + + def setitem(self, dic: Mapping[K, V], name: K, value: V) -> None: + """Set dictionary entry ``name`` to value.""" + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dic[name] = value # type: ignore[index] + + def delitem(self, dic: Mapping[K, V], name: K, raising: bool = True) -> None: + """Delete ``name`` from dict. + + Raises ``KeyError`` if it doesn't exist, unless ``raising`` is set to + False. + """ + if name not in dic: + if raising: + raise KeyError(name) + else: + self._setitem.append((dic, name, dic.get(name, notset))) + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dic[name] # type: ignore[attr-defined] + + def setenv(self, name: str, value: str, prepend: str | None = None) -> None: + """Set environment variable ``name`` to ``value``. + + If ``prepend`` is a character, read the current environment variable + value and prepend the ``value`` adjoined with the ``prepend`` + character. + """ + if not isinstance(value, str): + warnings.warn( # type: ignore[unreachable] + PytestWarning( + f"Value of environment variable {name} type should be str, but got " + f"{value!r} (type: {type(value).__name__}); converted to str implicitly" + ), + stacklevel=2, + ) + value = str(value) + if prepend and name in os.environ: + value = value + prepend + os.environ[name] + self.setitem(os.environ, name, value) + + def delenv(self, name: str, raising: bool = True) -> None: + """Delete ``name`` from the environment. + + Raises ``KeyError`` if it does not exist, unless ``raising`` is set to + False. + """ + environ: MutableMapping[str, str] = os.environ + self.delitem(environ, name, raising=raising) + + def syspath_prepend(self, path) -> None: + """Prepend ``path`` to ``sys.path`` list of import locations.""" + if self._savesyspath is None: + self._savesyspath = sys.path[:] + sys.path.insert(0, str(path)) + + # https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171 + # this is only needed when pkg_resources was already loaded by the namespace package + if "pkg_resources" in sys.modules: + from pkg_resources import fixup_namespace_packages + + fixup_namespace_packages(str(path)) + + # A call to syspathinsert() usually means that the caller wants to + # import some dynamically created files, thus with python3 we + # invalidate its import caches. + # This is especially important when any namespace package is in use, + # since then the mtime based FileFinder cache (that gets created in + # this case already) gets not invalidated when writing the new files + # quickly afterwards. + from importlib import invalidate_caches + + invalidate_caches() + + def chdir(self, path: str | os.PathLike[str]) -> None: + """Change the current working directory to the specified path. + + :param path: + The path to change into. + """ + if self._cwd is None: + self._cwd = os.getcwd() + os.chdir(path) + + def undo(self) -> None: + """Undo previous changes. + + This call consumes the undo stack. Calling it a second time has no + effect unless you do more monkeypatching after the undo call. + + There is generally no need to call `undo()`, since it is + called automatically during tear-down. + + .. note:: + The same `monkeypatch` fixture is used across a + single test function invocation. If `monkeypatch` is used both by + the test function itself and one of the test fixtures, + calling `undo()` will undo all of the changes made in + both functions. + + Prefer to use :meth:`context() ` instead. + """ + for obj, name, value in reversed(self._setattr): + if value is not notset: + setattr(obj, name, value) + else: + delattr(obj, name) + self._setattr[:] = [] + for dictionary, key, value in reversed(self._setitem): + if value is notset: + try: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + del dictionary[key] # type: ignore[attr-defined] + except KeyError: + pass # Was already deleted, so we have the desired state. + else: + # Not all Mapping types support indexing, but MutableMapping doesn't support TypedDict + dictionary[key] = value # type: ignore[index] + self._setitem[:] = [] + if self._savesyspath is not None: + sys.path[:] = self._savesyspath + self._savesyspath = None + + if self._cwd is not None: + os.chdir(self._cwd) + self._cwd = None diff --git a/venv/Lib/site-packages/_pytest/nodes.py b/venv/Lib/site-packages/_pytest/nodes.py new file mode 100644 index 0000000..51bc517 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/nodes.py @@ -0,0 +1,766 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import abc +from functools import cached_property +from inspect import signature +import os +import pathlib +from pathlib import Path +from typing import Any +from typing import Callable +from typing import cast +from typing import Iterable +from typing import Iterator +from typing import MutableMapping +from typing import NoReturn +from typing import overload +from typing import TYPE_CHECKING +from typing import TypeVar +import warnings + +import pluggy + +import _pytest._code +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._code.code import TracebackStyle +from _pytest.compat import LEGACY_PATH +from _pytest.config import Config +from _pytest.config import ConftestImportFailure +from _pytest.config.compat import _check_path +from _pytest.deprecated import NODE_CTOR_FSPATH_ARG +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import NodeKeywords +from _pytest.outcomes import fail +from _pytest.pathlib import absolutepath +from _pytest.pathlib import commonpath +from _pytest.stash import Stash +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + # Imported here due to circular import. + from _pytest.main import Session + + +SEP = "/" + +tracebackcutdir = Path(_pytest.__file__).parent + + +_T = TypeVar("_T") + + +def _imply_path( + node_type: type[Node], + path: Path | None, + fspath: LEGACY_PATH | None, +) -> Path: + if fspath is not None: + warnings.warn( + NODE_CTOR_FSPATH_ARG.format( + node_type_name=node_type.__name__, + ), + stacklevel=6, + ) + if path is not None: + if fspath is not None: + _check_path(path, fspath) + return path + else: + assert fspath is not None + return Path(fspath) + + +_NodeType = TypeVar("_NodeType", bound="Node") + + +class NodeMeta(abc.ABCMeta): + """Metaclass used by :class:`Node` to enforce that direct construction raises + :class:`Failed`. + + This behaviour supports the indirection introduced with :meth:`Node.from_parent`, + the named constructor to be used instead of direct construction. The design + decision to enforce indirection with :class:`NodeMeta` was made as a + temporary aid for refactoring the collection tree, which was diagnosed to + have :class:`Node` objects whose creational patterns were overly entangled. + Once the refactoring is complete, this metaclass can be removed. + + See https://github.com/pytest-dev/pytest/projects/3 for an overview of the + progress on detangling the :class:`Node` classes. + """ + + def __call__(cls, *k, **kw) -> NoReturn: + msg = ( + "Direct construction of {name} has been deprecated, please use {name}.from_parent.\n" + "See " + "https://docs.pytest.org/en/stable/deprecations.html#node-construction-changed-to-node-from-parent" + " for more details." + ).format(name=f"{cls.__module__}.{cls.__name__}") + fail(msg, pytrace=False) + + def _create(cls: type[_T], *k, **kw) -> _T: + try: + return super().__call__(*k, **kw) # type: ignore[no-any-return,misc] + except TypeError: + sig = signature(getattr(cls, "__init__")) + known_kw = {k: v for k, v in kw.items() if k in sig.parameters} + from .warning_types import PytestDeprecationWarning + + warnings.warn( + PytestDeprecationWarning( + f"{cls} is not using a cooperative constructor and only takes {set(known_kw)}.\n" + "See https://docs.pytest.org/en/stable/deprecations.html" + "#constructors-of-custom-pytest-node-subclasses-should-take-kwargs " + "for more details." + ) + ) + + return super().__call__(*k, **known_kw) # type: ignore[no-any-return,misc] + + +class Node(abc.ABC, metaclass=NodeMeta): + r"""Base class of :class:`Collector` and :class:`Item`, the components of + the test collection tree. + + ``Collector``\'s are the internal nodes of the tree, and ``Item``\'s are the + leaf nodes. + """ + + # Implemented in the legacypath plugin. + #: A ``LEGACY_PATH`` copy of the :attr:`path` attribute. Intended for usage + #: for methods not migrated to ``pathlib.Path`` yet, such as + #: :meth:`Item.reportinfo `. Will be deprecated in + #: a future release, prefer using :attr:`path` instead. + fspath: LEGACY_PATH + + # Use __slots__ to make attribute access faster. + # Note that __dict__ is still available. + __slots__ = ( + "name", + "parent", + "config", + "session", + "path", + "_nodeid", + "_store", + "__dict__", + ) + + def __init__( + self, + name: str, + parent: Node | None = None, + config: Config | None = None, + session: Session | None = None, + fspath: LEGACY_PATH | None = None, + path: Path | None = None, + nodeid: str | None = None, + ) -> None: + #: A unique name within the scope of the parent node. + self.name: str = name + + #: The parent collector node. + self.parent = parent + + if config: + #: The pytest config object. + self.config: Config = config + else: + if not parent: + raise TypeError("config or parent must be provided") + self.config = parent.config + + if session: + #: The pytest session this node is part of. + self.session: Session = session + else: + if not parent: + raise TypeError("session or parent must be provided") + self.session = parent.session + + if path is None and fspath is None: + path = getattr(parent, "path", None) + #: Filesystem path where this node was collected from (can be None). + self.path: pathlib.Path = _imply_path(type(self), path, fspath=fspath) + + # The explicit annotation is to avoid publicly exposing NodeKeywords. + #: Keywords/markers collected from all scopes. + self.keywords: MutableMapping[str, Any] = NodeKeywords(self) + + #: The marker objects belonging to this node. + self.own_markers: list[Mark] = [] + + #: Allow adding of extra keywords to use for matching. + self.extra_keyword_matches: set[str] = set() + + if nodeid is not None: + assert "::()" not in nodeid + self._nodeid = nodeid + else: + if not self.parent: + raise TypeError("nodeid or parent must be provided") + self._nodeid = self.parent.nodeid + "::" + self.name + + #: A place where plugins can store information on the node for their + #: own use. + self.stash: Stash = Stash() + # Deprecated alias. Was never public. Can be removed in a few releases. + self._store = self.stash + + @classmethod + def from_parent(cls, parent: Node, **kw) -> Self: + """Public constructor for Nodes. + + This indirection got introduced in order to enable removing + the fragile logic from the node constructors. + + Subclasses can use ``super().from_parent(...)`` when overriding the + construction. + + :param parent: The parent node of this Node. + """ + if "config" in kw: + raise TypeError("config is not a valid argument for from_parent") + if "session" in kw: + raise TypeError("session is not a valid argument for from_parent") + return cls._create(parent=parent, **kw) + + @property + def ihook(self) -> pluggy.HookRelay: + """fspath-sensitive hook proxy used to call pytest hooks.""" + return self.session.gethookproxy(self.path) + + def __repr__(self) -> str: + return "<{} {}>".format(self.__class__.__name__, getattr(self, "name", None)) + + def warn(self, warning: Warning) -> None: + """Issue a warning for this Node. + + Warnings will be displayed after the test session, unless explicitly suppressed. + + :param Warning warning: + The warning instance to issue. + + :raises ValueError: If ``warning`` instance is not a subclass of Warning. + + Example usage: + + .. code-block:: python + + node.warn(PytestWarning("some message")) + node.warn(UserWarning("some message")) + + .. versionchanged:: 6.2 + Any subclass of :class:`Warning` is now accepted, rather than only + :class:`PytestWarning ` subclasses. + """ + # enforce type checks here to avoid getting a generic type error later otherwise. + if not isinstance(warning, Warning): + raise ValueError( + f"warning must be an instance of Warning or subclass, got {warning!r}" + ) + path, lineno = get_fslocation_from_item(self) + assert lineno is not None + warnings.warn_explicit( + warning, + category=None, + filename=str(path), + lineno=lineno + 1, + ) + + # Methods for ordering nodes. + + @property + def nodeid(self) -> str: + """A ::-separated string denoting its collection tree address.""" + return self._nodeid + + def __hash__(self) -> int: + return hash(self._nodeid) + + def setup(self) -> None: + pass + + def teardown(self) -> None: + pass + + def iter_parents(self) -> Iterator[Node]: + """Iterate over all parent collectors starting from and including self + up to the root of the collection tree. + + .. versionadded:: 8.1 + """ + parent: Node | None = self + while parent is not None: + yield parent + parent = parent.parent + + def listchain(self) -> list[Node]: + """Return a list of all parent collectors starting from the root of the + collection tree down to and including self.""" + chain = [] + item: Node | None = self + while item is not None: + chain.append(item) + item = item.parent + chain.reverse() + return chain + + def add_marker(self, marker: str | MarkDecorator, append: bool = True) -> None: + """Dynamically add a marker object to the node. + + :param marker: + The marker. + :param append: + Whether to append the marker, or prepend it. + """ + from _pytest.mark import MARK_GEN + + if isinstance(marker, MarkDecorator): + marker_ = marker + elif isinstance(marker, str): + marker_ = getattr(MARK_GEN, marker) + else: + raise ValueError("is not a string or pytest.mark.* Marker") + self.keywords[marker_.name] = marker_ + if append: + self.own_markers.append(marker_.mark) + else: + self.own_markers.insert(0, marker_.mark) + + def iter_markers(self, name: str | None = None) -> Iterator[Mark]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of the markers of the node. + """ + return (x[1] for x in self.iter_markers_with_node(name=name)) + + def iter_markers_with_node( + self, name: str | None = None + ) -> Iterator[tuple[Node, Mark]]: + """Iterate over all markers of the node. + + :param name: If given, filter the results by the name attribute. + :returns: An iterator of (node, mark) tuples. + """ + for node in self.iter_parents(): + for mark in node.own_markers: + if name is None or getattr(mark, "name", None) == name: + yield node, mark + + @overload + def get_closest_marker(self, name: str) -> Mark | None: ... + + @overload + def get_closest_marker(self, name: str, default: Mark) -> Mark: ... + + def get_closest_marker(self, name: str, default: Mark | None = None) -> Mark | None: + """Return the first marker matching the name, from closest (for + example function) to farther level (for example module level). + + :param default: Fallback return value if no marker was found. + :param name: Name to filter by. + """ + return next(self.iter_markers(name=name), default) + + def listextrakeywords(self) -> set[str]: + """Return a set of all extra keywords in self and any parents.""" + extra_keywords: set[str] = set() + for item in self.listchain(): + extra_keywords.update(item.extra_keyword_matches) + return extra_keywords + + def listnames(self) -> list[str]: + return [x.name for x in self.listchain()] + + def addfinalizer(self, fin: Callable[[], object]) -> None: + """Register a function to be called without arguments when this node is + finalized. + + This method can only be called when this node is active + in a setup chain, for example during self.setup(). + """ + self.session._setupstate.addfinalizer(fin, self) + + def getparent(self, cls: type[_NodeType]) -> _NodeType | None: + """Get the closest parent node (including self) which is an instance of + the given class. + + :param cls: The node class to search for. + :returns: The node, if found. + """ + for node in self.iter_parents(): + if isinstance(node, cls): + return node + return None + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + return excinfo.traceback + + def _repr_failure_py( + self, + excinfo: ExceptionInfo[BaseException], + style: TracebackStyle | None = None, + ) -> TerminalRepr: + from _pytest.fixtures import FixtureLookupError + + if isinstance(excinfo.value, ConftestImportFailure): + excinfo = ExceptionInfo.from_exception(excinfo.value.cause) + if isinstance(excinfo.value, fail.Exception): + if not excinfo.value.pytrace: + style = "value" + if isinstance(excinfo.value, FixtureLookupError): + return excinfo.value.formatrepr() + + tbfilter: bool | Callable[[ExceptionInfo[BaseException]], Traceback] + if self.config.getoption("fulltrace", False): + style = "long" + tbfilter = False + else: + tbfilter = self._traceback_filter + if style == "auto": + style = "long" + # XXX should excinfo.getrepr record all data and toterminal() process it? + if style is None: + if self.config.getoption("tbstyle", "auto") == "short": + style = "short" + else: + style = "long" + + if self.config.get_verbosity() > 1: + truncate_locals = False + else: + truncate_locals = True + + truncate_args = False if self.config.get_verbosity() > 2 else True + + # excinfo.getrepr() formats paths relative to the CWD if `abspath` is False. + # It is possible for a fixture/test to change the CWD while this code runs, which + # would then result in the user seeing confusing paths in the failure message. + # To fix this, if the CWD changed, always display the full absolute path. + # It will be better to just always display paths relative to invocation_dir, but + # this requires a lot of plumbing (#6428). + try: + abspath = Path(os.getcwd()) != self.config.invocation_params.dir + except OSError: + abspath = True + + return excinfo.getrepr( + funcargs=True, + abspath=abspath, + showlocals=self.config.getoption("showlocals", False), + style=style, + tbfilter=tbfilter, + truncate_locals=truncate_locals, + truncate_args=truncate_args, + ) + + def repr_failure( + self, + excinfo: ExceptionInfo[BaseException], + style: TracebackStyle | None = None, + ) -> str | TerminalRepr: + """Return a representation of a collection or test failure. + + .. seealso:: :ref:`non-python tests` + + :param excinfo: Exception information for the failure. + """ + return self._repr_failure_py(excinfo, style) + + +def get_fslocation_from_item(node: Node) -> tuple[str | Path, int | None]: + """Try to extract the actual location from a node, depending on available attributes: + + * "location": a pair (path, lineno) + * "obj": a Python object that the node wraps. + * "path": just a path + + :rtype: A tuple of (str|Path, int) with filename and 0-based line number. + """ + # See Item.location. + location: tuple[str, int | None, str] | None = getattr(node, "location", None) + if location is not None: + return location[:2] + obj = getattr(node, "obj", None) + if obj is not None: + return getfslineno(obj) + return getattr(node, "path", "unknown location"), -1 + + +class Collector(Node, abc.ABC): + """Base class of all collectors. + + Collector create children through `collect()` and thus iteratively build + the collection tree. + """ + + class CollectError(Exception): + """An error during collection, contains a custom message.""" + + @abc.abstractmethod + def collect(self) -> Iterable[Item | Collector]: + """Collect children (items and collectors) for this collector.""" + raise NotImplementedError("abstract") + + # TODO: This omits the style= parameter which breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, excinfo: ExceptionInfo[BaseException] + ) -> str | TerminalRepr: + """Return a representation of a collection failure. + + :param excinfo: Exception information for the failure. + """ + if isinstance(excinfo.value, self.CollectError) and not self.config.getoption( + "fulltrace", False + ): + exc = excinfo.value + return str(exc.args[0]) + + # Respect explicit tbstyle option, but default to "short" + # (_repr_failure_py uses "long" with "fulltrace" option always). + tbstyle = self.config.getoption("tbstyle", "auto") + if tbstyle == "auto": + tbstyle = "short" + + return self._repr_failure_py(excinfo, style=tbstyle) + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "path"): + traceback = excinfo.traceback + ntraceback = traceback.cut(path=self.path) + if ntraceback == traceback: + ntraceback = ntraceback.cut(excludepath=tracebackcutdir) + return ntraceback.filter(excinfo) + return excinfo.traceback + + +def _check_initialpaths_for_relpath(session: Session, path: Path) -> str | None: + for initial_path in session._initialpaths: + if commonpath(path, initial_path) == initial_path: + rel = str(path.relative_to(initial_path)) + return "" if rel == "." else rel + return None + + +class FSCollector(Collector, abc.ABC): + """Base class for filesystem collectors.""" + + def __init__( + self, + fspath: LEGACY_PATH | None = None, + path_or_parent: Path | Node | None = None, + path: Path | None = None, + name: str | None = None, + parent: Node | None = None, + config: Config | None = None, + session: Session | None = None, + nodeid: str | None = None, + ) -> None: + if path_or_parent: + if isinstance(path_or_parent, Node): + assert parent is None + parent = cast(FSCollector, path_or_parent) + elif isinstance(path_or_parent, Path): + assert path is None + path = path_or_parent + + path = _imply_path(type(self), path, fspath=fspath) + if name is None: + name = path.name + if parent is not None and parent.path != path: + try: + rel = path.relative_to(parent.path) + except ValueError: + pass + else: + name = str(rel) + name = name.replace(os.sep, SEP) + self.path = path + + if session is None: + assert parent is not None + session = parent.session + + if nodeid is None: + try: + nodeid = str(self.path.relative_to(session.config.rootpath)) + except ValueError: + nodeid = _check_initialpaths_for_relpath(session, path) + + if nodeid and os.sep != SEP: + nodeid = nodeid.replace(os.sep, SEP) + + super().__init__( + name=name, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + path=path, + ) + + @classmethod + def from_parent( + cls, + parent, + *, + fspath: LEGACY_PATH | None = None, + path: Path | None = None, + **kw, + ) -> Self: + """The public constructor.""" + return super().from_parent(parent=parent, fspath=fspath, path=path, **kw) + + +class File(FSCollector, abc.ABC): + """Base class for collecting tests from a file. + + :ref:`non-python tests`. + """ + + +class Directory(FSCollector, abc.ABC): + """Base class for collecting files from a directory. + + A basic directory collector does the following: goes over the files and + sub-directories in the directory and creates collectors for them by calling + the hooks :hook:`pytest_collect_directory` and :hook:`pytest_collect_file`, + after checking that they are not ignored using + :hook:`pytest_ignore_collect`. + + The default directory collectors are :class:`~pytest.Dir` and + :class:`~pytest.Package`. + + .. versionadded:: 8.0 + + :ref:`custom directory collectors`. + """ + + +class Item(Node, abc.ABC): + """Base class of all test invocation items. + + Note that for a single function there might be multiple test invocation items. + """ + + nextitem = None + + def __init__( + self, + name, + parent=None, + config: Config | None = None, + session: Session | None = None, + nodeid: str | None = None, + **kw, + ) -> None: + # The first two arguments are intentionally passed positionally, + # to keep plugins who define a node type which inherits from + # (pytest.Item, pytest.File) working (see issue #8435). + # They can be made kwargs when the deprecation above is done. + super().__init__( + name, + parent, + config=config, + session=session, + nodeid=nodeid, + **kw, + ) + self._report_sections: list[tuple[str, str, str]] = [] + + #: A list of tuples (name, value) that holds user defined properties + #: for this test. + self.user_properties: list[tuple[str, object]] = [] + + self._check_item_and_collector_diamond_inheritance() + + def _check_item_and_collector_diamond_inheritance(self) -> None: + """ + Check if the current type inherits from both File and Collector + at the same time, emitting a warning accordingly (#8447). + """ + cls = type(self) + + # We inject an attribute in the type to avoid issuing this warning + # for the same class more than once, which is not helpful. + # It is a hack, but was deemed acceptable in order to avoid + # flooding the user in the common case. + attr_name = "_pytest_diamond_inheritance_warning_shown" + if getattr(cls, attr_name, False): + return + setattr(cls, attr_name, True) + + problems = ", ".join( + base.__name__ for base in cls.__bases__ if issubclass(base, Collector) + ) + if problems: + warnings.warn( + f"{cls.__name__} is an Item subclass and should not be a collector, " + f"however its bases {problems} are collectors.\n" + "Please split the Collectors and the Item into separate node types.\n" + "Pytest Doc example: https://docs.pytest.org/en/latest/example/nonpython.html\n" + "example pull request on a plugin: https://github.com/asmeurer/pytest-flakes/pull/40/", + PytestWarning, + ) + + @abc.abstractmethod + def runtest(self) -> None: + """Run the test case for this item. + + Must be implemented by subclasses. + + .. seealso:: :ref:`non-python tests` + """ + raise NotImplementedError("runtest must be implemented by Item subclass") + + def add_report_section(self, when: str, key: str, content: str) -> None: + """Add a new report section, similar to what's done internally to add + stdout and stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + :param str content: + The full contents as a string. + """ + if content: + self._report_sections.append((when, key, content)) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + """Get location information for this item for test reports. + + Returns a tuple with three elements: + + - The path of the test (default ``self.path``) + - The 0-based line number of the test (default ``None``) + - A name of the test to be shown (default ``""``) + + .. seealso:: :ref:`non-python tests` + """ + return self.path, None, "" + + @cached_property + def location(self) -> tuple[str, int | None, str]: + """ + Returns a tuple of ``(relfspath, lineno, testname)`` for this item + where ``relfspath`` is file path relative to ``config.rootpath`` + and lineno is a 0-based line number. + """ + location = self.reportinfo() + path = absolutepath(location[0]) + relfspath = self.session._node_location_to_relpath(path) + assert type(location[2]) is str + return (relfspath, location[1], location[2]) diff --git a/venv/Lib/site-packages/_pytest/outcomes.py b/venv/Lib/site-packages/_pytest/outcomes.py new file mode 100644 index 0000000..5b20803 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/outcomes.py @@ -0,0 +1,318 @@ +"""Exception classes and constants handling test outcomes as well as +functions creating them.""" + +from __future__ import annotations + +import sys +from typing import Any +from typing import Callable +from typing import cast +from typing import NoReturn +from typing import Protocol +from typing import Type +from typing import TypeVar + +from .warning_types import PytestDeprecationWarning + + +class OutcomeException(BaseException): + """OutcomeException and its subclass instances indicate and contain info + about test and collection outcomes.""" + + def __init__(self, msg: str | None = None, pytrace: bool = True) -> None: + if msg is not None and not isinstance(msg, str): + error_msg = ( # type: ignore[unreachable] + "{} expected string as 'msg' parameter, got '{}' instead.\n" + "Perhaps you meant to use a mark?" + ) + raise TypeError(error_msg.format(type(self).__name__, type(msg).__name__)) + super().__init__(msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self) -> str: + if self.msg is not None: + return self.msg + return f"<{self.__class__.__name__} instance>" + + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = "builtins" + + def __init__( + self, + msg: str | None = None, + pytrace: bool = True, + allow_module_level: bool = False, + *, + _use_item_location: bool = False, + ) -> None: + super().__init__(msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + # If true, the skip location is reported as the item's location, + # instead of the place that raises the exception/calls skip(). + self._use_item_location = _use_item_location + + +class Failed(OutcomeException): + """Raised from an explicit call to pytest.fail().""" + + __module__ = "builtins" + + +class Exit(Exception): + """Raised for immediate program exits (no tracebacks/summaries).""" + + def __init__( + self, msg: str = "unknown reason", returncode: int | None = None + ) -> None: + self.msg = msg + self.returncode = returncode + super().__init__(msg) + + +# Elaborate hack to work around https://github.com/python/mypy/issues/2087. +# Ideally would just be `exit.Exception = Exit` etc. + +_F = TypeVar("_F", bound=Callable[..., object]) +_ET = TypeVar("_ET", bound=Type[BaseException]) + + +class _WithException(Protocol[_F, _ET]): + Exception: _ET + __call__: _F + + +def _with_exception(exception_type: _ET) -> Callable[[_F], _WithException[_F, _ET]]: + def decorate(func: _F) -> _WithException[_F, _ET]: + func_with_exception = cast(_WithException[_F, _ET], func) + func_with_exception.Exception = exception_type + return func_with_exception + + return decorate + + +# Exposed helper methods. + + +@_with_exception(Exit) +def exit( + reason: str = "", + returncode: int | None = None, +) -> NoReturn: + """Exit testing process. + + :param reason: + The message to show as the reason for exiting pytest. reason has a default value + only because `msg` is deprecated. + + :param returncode: + Return code to be used when exiting pytest. None means the same as ``0`` (no error), same as :func:`sys.exit`. + + :raises pytest.exit.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise Exit(reason, returncode) + + +@_with_exception(Skipped) +def skip( + reason: str = "", + *, + allow_module_level: bool = False, +) -> NoReturn: + """Skip an executing test with the given message. + + This function should be called only during testing (setup, call or teardown) or + during collection by using the ``allow_module_level`` flag. This function can + be called in doctests as well. + + :param reason: + The message to show the user as reason for the skip. + + :param allow_module_level: + Allows this function to be called at module level. + Raising the skip exception at module level will stop + the execution of the module and prevent the collection of all tests in the module, + even those defined before the `skip` call. + + Defaults to False. + + :raises pytest.skip.Exception: + The exception that is raised. + + .. note:: + It is better to use the :ref:`pytest.mark.skipif ref` marker when + possible to declare a test to be skipped under certain conditions + like mismatching platforms or dependencies. + Similarly, use the ``# doctest: +SKIP`` directive (see :py:data:`doctest.SKIP`) + to skip a doctest statically. + """ + __tracebackhide__ = True + raise Skipped(msg=reason, allow_module_level=allow_module_level) + + +@_with_exception(Failed) +def fail(reason: str = "", pytrace: bool = True) -> NoReturn: + """Explicitly fail an executing test with the given message. + + :param reason: + The message to show the user as reason for the failure. + + :param pytrace: + If False, msg represents the full failure information and no + python traceback will be reported. + + :raises pytest.fail.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise Failed(msg=reason, pytrace=pytrace) + + +class XFailed(Failed): + """Raised from an explicit call to pytest.xfail().""" + + +@_with_exception(XFailed) +def xfail(reason: str = "") -> NoReturn: + """Imperatively xfail an executing test or setup function with the given reason. + + This function should be called only during testing (setup, call or teardown). + + No other code is executed after using ``xfail()`` (it is implemented + internally by raising an exception). + + :param reason: + The message to show the user as reason for the xfail. + + .. note:: + It is better to use the :ref:`pytest.mark.xfail ref` marker when + possible to declare a test to be xfailed under certain conditions + like known bugs or missing features. + + :raises pytest.xfail.Exception: + The exception that is raised. + """ + __tracebackhide__ = True + raise XFailed(reason) + + +def importorskip( + modname: str, + minversion: str | None = None, + reason: str | None = None, + *, + exc_type: type[ImportError] | None = None, +) -> Any: + """Import and return the requested module ``modname``, or skip the + current test if the module cannot be imported. + + :param modname: + The name of the module to import. + :param minversion: + If given, the imported module's ``__version__`` attribute must be at + least this minimal version, otherwise the test is still skipped. + :param reason: + If given, this reason is shown as the message when the module cannot + be imported. + :param exc_type: + The exception that should be captured in order to skip modules. + Must be :py:class:`ImportError` or a subclass. + + If the module can be imported but raises :class:`ImportError`, pytest will + issue a warning to the user, as often users expect the module not to be + found (which would raise :class:`ModuleNotFoundError` instead). + + This warning can be suppressed by passing ``exc_type=ImportError`` explicitly. + + See :ref:`import-or-skip-import-error` for details. + + + :returns: + The imported module. This should be assigned to its canonical name. + + :raises pytest.skip.Exception: + If the module cannot be imported. + + Example:: + + docutils = pytest.importorskip("docutils") + + .. versionadded:: 8.2 + + The ``exc_type`` parameter. + """ + import warnings + + __tracebackhide__ = True + compile(modname, "", "eval") # to catch syntaxerrors + + # Until pytest 9.1, we will warn the user if we catch ImportError (instead of ModuleNotFoundError), + # as this might be hiding an installation/environment problem, which is not usually what is intended + # when using importorskip() (#11523). + # In 9.1, to keep the function signature compatible, we just change the code below to: + # 1. Use `exc_type = ModuleNotFoundError` if `exc_type` is not given. + # 2. Remove `warn_on_import` and the warning handling. + if exc_type is None: + exc_type = ImportError + warn_on_import_error = True + else: + warn_on_import_error = False + + skipped: Skipped | None = None + warning: Warning | None = None + + with warnings.catch_warnings(): + # Make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file. + warnings.simplefilter("ignore") + + try: + __import__(modname) + except exc_type as exc: + # Do not raise or issue warnings inside the catch_warnings() block. + if reason is None: + reason = f"could not import {modname!r}: {exc}" + skipped = Skipped(reason, allow_module_level=True) + + if warn_on_import_error and not isinstance(exc, ModuleNotFoundError): + lines = [ + "", + f"Module '{modname}' was found, but when imported by pytest it raised:", + f" {exc!r}", + "In pytest 9.1 this warning will become an error by default.", + "You can fix the underlying problem, or alternatively overwrite this behavior and silence this " + "warning by passing exc_type=ImportError explicitly.", + "See https://docs.pytest.org/en/stable/deprecations.html#pytest-importorskip-default-behavior-regarding-importerror", + ] + warning = PytestDeprecationWarning("\n".join(lines)) + + if warning: + warnings.warn(warning, stacklevel=2) + if skipped: + raise skipped + + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, "__version__", None) + if minversion is not None: + # Imported lazily to improve start-up time. + from packaging.version import Version + + if verattr is None or Version(verattr) < Version(minversion): + raise Skipped( + f"module {modname!r} has __version__ {verattr!r}, required is: {minversion!r}", + allow_module_level=True, + ) + return mod diff --git a/venv/Lib/site-packages/_pytest/pastebin.py b/venv/Lib/site-packages/_pytest/pastebin.py new file mode 100644 index 0000000..69c011e --- /dev/null +++ b/venv/Lib/site-packages/_pytest/pastebin.py @@ -0,0 +1,113 @@ +# mypy: allow-untyped-defs +"""Submit failure or test session information to a pastebin service.""" + +from __future__ import annotations + +from io import StringIO +import tempfile +from typing import IO + +from _pytest.config import Config +from _pytest.config import create_terminal_writer +from _pytest.config.argparsing import Parser +from _pytest.stash import StashKey +from _pytest.terminal import TerminalReporter +import pytest + + +pastebinfile_key = StashKey[IO[bytes]]() + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting") + group._addoption( + "--pastebin", + metavar="mode", + action="store", + dest="pastebin", + default=None, + choices=["failed", "all"], + help="Send failed|all info to bpaste.net pastebin service", + ) + + +@pytest.hookimpl(trylast=True) +def pytest_configure(config: Config) -> None: + if config.option.pastebin == "all": + tr = config.pluginmanager.getplugin("terminalreporter") + # If no terminal reporter plugin is present, nothing we can do here; + # this can happen when this function executes in a worker node + # when using pytest-xdist, for example. + if tr is not None: + # pastebin file will be UTF-8 encoded binary file. + config.stash[pastebinfile_key] = tempfile.TemporaryFile("w+b") + oldwrite = tr._tw.write + + def tee_write(s, **kwargs): + oldwrite(s, **kwargs) + if isinstance(s, str): + s = s.encode("utf-8") + config.stash[pastebinfile_key].write(s) + + tr._tw.write = tee_write + + +def pytest_unconfigure(config: Config) -> None: + if pastebinfile_key in config.stash: + pastebinfile = config.stash[pastebinfile_key] + # Get terminal contents and delete file. + pastebinfile.seek(0) + sessionlog = pastebinfile.read() + pastebinfile.close() + del config.stash[pastebinfile_key] + # Undo our patching in the terminal reporter. + tr = config.pluginmanager.getplugin("terminalreporter") + del tr._tw.__dict__["write"] + # Write summary. + tr.write_sep("=", "Sending information to Paste Service") + pastebinurl = create_new_paste(sessionlog) + tr.write_line(f"pastebin session-log: {pastebinurl}\n") + + +def create_new_paste(contents: str | bytes) -> str: + """Create a new paste using the bpaste.net service. + + :contents: Paste contents string. + :returns: URL to the pasted contents, or an error message. + """ + import re + from urllib.parse import urlencode + from urllib.request import urlopen + + params = {"code": contents, "lexer": "text", "expiry": "1week"} + url = "https://bpa.st" + try: + response: str = ( + urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8") + ) + except OSError as exc_info: # urllib errors + return f"bad response: {exc_info}" + m = re.search(r'href="/raw/(\w+)"', response) + if m: + return f"{url}/show/{m.group(1)}" + else: + return "bad response: invalid format ('" + response + "')" + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + if terminalreporter.config.option.pastebin != "failed": + return + if "failed" in terminalreporter.stats: + terminalreporter.write_sep("=", "Sending information to Paste Service") + for rep in terminalreporter.stats["failed"]: + try: + msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc + except AttributeError: + msg = terminalreporter._getfailureheadline(rep) + file = StringIO() + tw = create_terminal_writer(terminalreporter.config, file) + rep.toterminal(tw) + s = file.getvalue() + assert len(s) + pastebinurl = create_new_paste(s) + terminalreporter.write_line(f"{msg} --> {pastebinurl}") diff --git a/venv/Lib/site-packages/_pytest/pathlib.py b/venv/Lib/site-packages/_pytest/pathlib.py new file mode 100644 index 0000000..81e52ea --- /dev/null +++ b/venv/Lib/site-packages/_pytest/pathlib.py @@ -0,0 +1,975 @@ +from __future__ import annotations + +import atexit +import contextlib +from enum import Enum +from errno import EBADF +from errno import ELOOP +from errno import ENOENT +from errno import ENOTDIR +import fnmatch +from functools import partial +from importlib.machinery import ModuleSpec +import importlib.util +import itertools +import os +from os.path import expanduser +from os.path import expandvars +from os.path import isabs +from os.path import sep +from pathlib import Path +from pathlib import PurePath +from posixpath import sep as posix_sep +import shutil +import sys +import types +from types import ModuleType +from typing import Any +from typing import Callable +from typing import Iterable +from typing import Iterator +from typing import TypeVar +import uuid +import warnings + +from _pytest.compat import assert_never +from _pytest.outcomes import skip +from _pytest.warning_types import PytestWarning + + +LOCK_TIMEOUT = 60 * 60 * 24 * 3 + + +_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath) + +# The following function, variables and comments were +# copied from cpython 3.9 Lib/pathlib.py file. + +# EBADF - guard against macOS `stat` throwing EBADF +_IGNORED_ERRORS = (ENOENT, ENOTDIR, EBADF, ELOOP) + +_IGNORED_WINERRORS = ( + 21, # ERROR_NOT_READY - drive exists but is not accessible + 1921, # ERROR_CANT_RESOLVE_FILENAME - fix for broken symlink pointing to itself +) + + +def _ignore_error(exception: Exception) -> bool: + return ( + getattr(exception, "errno", None) in _IGNORED_ERRORS + or getattr(exception, "winerror", None) in _IGNORED_WINERRORS + ) + + +def get_lock_path(path: _AnyPurePath) -> _AnyPurePath: + return path.joinpath(".lock") + + +def on_rm_rf_error( + func: Callable[..., Any] | None, + path: str, + excinfo: BaseException + | tuple[type[BaseException], BaseException, types.TracebackType | None], + *, + start_path: Path, +) -> bool: + """Handle known read-only errors during rmtree. + + The returned value is used only by our own tests. + """ + if isinstance(excinfo, BaseException): + exc = excinfo + else: + exc = excinfo[1] + + # Another process removed the file in the middle of the "rm_rf" (xdist for example). + # More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018 + if isinstance(exc, FileNotFoundError): + return False + + if not isinstance(exc, PermissionError): + warnings.warn( + PytestWarning(f"(rm_rf) error removing {path}\n{type(exc)}: {exc}") + ) + return False + + if func not in (os.rmdir, os.remove, os.unlink): + if func not in (os.open,): + warnings.warn( + PytestWarning( + f"(rm_rf) unknown function {func} when removing {path}:\n{type(exc)}: {exc}" + ) + ) + return False + + # Chmod + retry. + import stat + + def chmod_rw(p: str) -> None: + mode = os.stat(p).st_mode + os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR) + + # For files, we need to recursively go upwards in the directories to + # ensure they all are also writable. + p = Path(path) + if p.is_file(): + for parent in p.parents: + chmod_rw(str(parent)) + # Stop when we reach the original path passed to rm_rf. + if parent == start_path: + break + chmod_rw(str(path)) + + func(path) + return True + + +def ensure_extended_length_path(path: Path) -> Path: + """Get the extended-length version of a path (Windows). + + On Windows, by default, the maximum length of a path (MAX_PATH) is 260 + characters, and operations on paths longer than that fail. But it is possible + to overcome this by converting the path to "extended-length" form before + performing the operation: + https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation + + On Windows, this function returns the extended-length absolute version of path. + On other platforms it returns path unchanged. + """ + if sys.platform.startswith("win32"): + path = path.resolve() + path = Path(get_extended_length_path_str(str(path))) + return path + + +def get_extended_length_path_str(path: str) -> str: + """Convert a path to a Windows extended length path.""" + long_path_prefix = "\\\\?\\" + unc_long_path_prefix = "\\\\?\\UNC\\" + if path.startswith((long_path_prefix, unc_long_path_prefix)): + return path + # UNC + if path.startswith("\\\\"): + return unc_long_path_prefix + path[2:] + return long_path_prefix + path + + +def rm_rf(path: Path) -> None: + """Remove the path contents recursively, even if some elements + are read-only.""" + path = ensure_extended_length_path(path) + onerror = partial(on_rm_rf_error, start_path=path) + if sys.version_info >= (3, 12): + shutil.rmtree(str(path), onexc=onerror) + else: + shutil.rmtree(str(path), onerror=onerror) + + +def find_prefixed(root: Path, prefix: str) -> Iterator[os.DirEntry[str]]: + """Find all elements in root that begin with the prefix, case-insensitive.""" + l_prefix = prefix.lower() + for x in os.scandir(root): + if x.name.lower().startswith(l_prefix): + yield x + + +def extract_suffixes(iter: Iterable[os.DirEntry[str]], prefix: str) -> Iterator[str]: + """Return the parts of the paths following the prefix. + + :param iter: Iterator over path names. + :param prefix: Expected prefix of the path names. + """ + p_len = len(prefix) + for entry in iter: + yield entry.name[p_len:] + + +def find_suffixes(root: Path, prefix: str) -> Iterator[str]: + """Combine find_prefixes and extract_suffixes.""" + return extract_suffixes(find_prefixed(root, prefix), prefix) + + +def parse_num(maybe_num: str) -> int: + """Parse number path suffixes, returns -1 on error.""" + try: + return int(maybe_num) + except ValueError: + return -1 + + +def _force_symlink(root: Path, target: str | PurePath, link_to: str | Path) -> None: + """Helper to create the current symlink. + + It's full of race conditions that are reasonably OK to ignore + for the context of best effort linking to the latest test run. + + The presumption being that in case of much parallelism + the inaccuracy is going to be acceptable. + """ + current_symlink = root.joinpath(target) + try: + current_symlink.unlink() + except OSError: + pass + try: + current_symlink.symlink_to(link_to) + except Exception: + pass + + +def make_numbered_dir(root: Path, prefix: str, mode: int = 0o700) -> Path: + """Create a directory with an increased number as suffix for the given prefix.""" + for i in range(10): + # try up to 10 times to create the folder + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + new_number = max_existing + 1 + new_path = root.joinpath(f"{prefix}{new_number}") + try: + new_path.mkdir(mode=mode) + except Exception: + pass + else: + _force_symlink(root, prefix + "current", new_path) + return new_path + else: + raise OSError( + "could not create numbered dir with prefix " + f"{prefix} in {root} after 10 tries" + ) + + +def create_cleanup_lock(p: Path) -> Path: + """Create a lock to prevent premature folder cleanup.""" + lock_path = get_lock_path(p) + try: + fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) + except FileExistsError as e: + raise OSError(f"cannot create lockfile in {p}") from e + else: + pid = os.getpid() + spid = str(pid).encode() + os.write(fd, spid) + os.close(fd) + if not lock_path.is_file(): + raise OSError("lock path got renamed after successful creation") + return lock_path + + +def register_cleanup_lock_removal( + lock_path: Path, register: Any = atexit.register +) -> Any: + """Register a cleanup function for removing a lock, by default on atexit.""" + pid = os.getpid() + + def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None: + current_pid = os.getpid() + if current_pid != original_pid: + # fork + return + try: + lock_path.unlink() + except OSError: + pass + + return register(cleanup_on_exit) + + +def maybe_delete_a_numbered_dir(path: Path) -> None: + """Remove a numbered directory if its lock can be obtained and it does + not seem to be in use.""" + path = ensure_extended_length_path(path) + lock_path = None + try: + lock_path = create_cleanup_lock(path) + parent = path.parent + + garbage = parent.joinpath(f"garbage-{uuid.uuid4()}") + path.rename(garbage) + rm_rf(garbage) + except OSError: + # known races: + # * other process did a cleanup at the same time + # * deletable folder was found + # * process cwd (Windows) + return + finally: + # If we created the lock, ensure we remove it even if we failed + # to properly remove the numbered dir. + if lock_path is not None: + try: + lock_path.unlink() + except OSError: + pass + + +def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool: + """Check if `path` is deletable based on whether the lock file is expired.""" + if path.is_symlink(): + return False + lock = get_lock_path(path) + try: + if not lock.is_file(): + return True + except OSError: + # we might not have access to the lock file at all, in this case assume + # we don't have access to the entire directory (#7491). + return False + try: + lock_time = lock.stat().st_mtime + except Exception: + return False + else: + if lock_time < consider_lock_dead_if_created_before: + # We want to ignore any errors while trying to remove the lock such as: + # - PermissionDenied, like the file permissions have changed since the lock creation; + # - FileNotFoundError, in case another pytest process got here first; + # and any other cause of failure. + with contextlib.suppress(OSError): + lock.unlink() + return True + return False + + +def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None: + """Try to cleanup a folder if we can ensure it's deletable.""" + if ensure_deletable(path, consider_lock_dead_if_created_before): + maybe_delete_a_numbered_dir(path) + + +def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]: + """List candidates for numbered directories to be removed - follows py.path.""" + max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1) + max_delete = max_existing - keep + entries = find_prefixed(root, prefix) + entries, entries2 = itertools.tee(entries) + numbers = map(parse_num, extract_suffixes(entries2, prefix)) + for entry, number in zip(entries, numbers): + if number <= max_delete: + yield Path(entry) + + +def cleanup_dead_symlinks(root: Path) -> None: + for left_dir in root.iterdir(): + if left_dir.is_symlink(): + if not left_dir.resolve().exists(): + left_dir.unlink() + + +def cleanup_numbered_dir( + root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float +) -> None: + """Cleanup for lock driven numbered directories.""" + if not root.exists(): + return + for path in cleanup_candidates(root, prefix, keep): + try_cleanup(path, consider_lock_dead_if_created_before) + for path in root.glob("garbage-*"): + try_cleanup(path, consider_lock_dead_if_created_before) + + cleanup_dead_symlinks(root) + + +def make_numbered_dir_with_cleanup( + root: Path, + prefix: str, + keep: int, + lock_timeout: float, + mode: int, +) -> Path: + """Create a numbered dir with a cleanup lock and remove old ones.""" + e = None + for i in range(10): + try: + p = make_numbered_dir(root, prefix, mode) + # Only lock the current dir when keep is not 0 + if keep != 0: + lock_path = create_cleanup_lock(p) + register_cleanup_lock_removal(lock_path) + except Exception as exc: + e = exc + else: + consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout + # Register a cleanup for program exit + atexit.register( + cleanup_numbered_dir, + root, + prefix, + keep, + consider_lock_dead_if_created_before, + ) + return p + assert e is not None + raise e + + +def resolve_from_str(input: str, rootpath: Path) -> Path: + input = expanduser(input) + input = expandvars(input) + if isabs(input): + return Path(input) + else: + return rootpath.joinpath(input) + + +def fnmatch_ex(pattern: str, path: str | os.PathLike[str]) -> bool: + """A port of FNMatcher from py.path.common which works with PurePath() instances. + + The difference between this algorithm and PurePath.match() is that the + latter matches "**" glob expressions for each part of the path, while + this algorithm uses the whole path instead. + + For example: + "tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py" + with this algorithm, but not with PurePath.match(). + + This algorithm was ported to keep backward-compatibility with existing + settings which assume paths match according this logic. + + References: + * https://bugs.python.org/issue29249 + * https://bugs.python.org/issue34731 + """ + path = PurePath(path) + iswin32 = sys.platform.startswith("win") + + if iswin32 and sep not in pattern and posix_sep in pattern: + # Running on Windows, the pattern has no Windows path separators, + # and the pattern has one or more Posix path separators. Replace + # the Posix path separators with the Windows path separator. + pattern = pattern.replace(posix_sep, sep) + + if sep not in pattern: + name = path.name + else: + name = str(path) + if path.is_absolute() and not os.path.isabs(pattern): + pattern = f"*{os.sep}{pattern}" + return fnmatch.fnmatch(name, pattern) + + +def parts(s: str) -> set[str]: + parts = s.split(sep) + return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))} + + +def symlink_or_skip( + src: os.PathLike[str] | str, + dst: os.PathLike[str] | str, + **kwargs: Any, +) -> None: + """Make a symlink, or skip the test in case symlinks are not supported.""" + try: + os.symlink(src, dst, **kwargs) + except OSError as e: + skip(f"symlinks not supported: {e}") + + +class ImportMode(Enum): + """Possible values for `mode` parameter of `import_path`.""" + + prepend = "prepend" + append = "append" + importlib = "importlib" + + +class ImportPathMismatchError(ImportError): + """Raised on import_path() if there is a mismatch of __file__'s. + + This can happen when `import_path` is called multiple times with different filenames that has + the same basename but reside in packages + (for example "/tests1/test_foo.py" and "/tests2/test_foo.py"). + """ + + +def import_path( + path: str | os.PathLike[str], + *, + mode: str | ImportMode = ImportMode.prepend, + root: Path, + consider_namespace_packages: bool, +) -> ModuleType: + """ + Import and return a module from the given path, which can be a file (a module) or + a directory (a package). + + :param path: + Path to the file to import. + + :param mode: + Controls the underlying import mechanism that will be used: + + * ImportMode.prepend: the directory containing the module (or package, taking + `__init__.py` files into account) will be put at the *start* of `sys.path` before + being imported with `importlib.import_module`. + + * ImportMode.append: same as `prepend`, but the directory will be appended + to the end of `sys.path`, if not already in `sys.path`. + + * ImportMode.importlib: uses more fine control mechanisms provided by `importlib` + to import the module, which avoids having to muck with `sys.path` at all. It effectively + allows having same-named test modules in different places. + + :param root: + Used as an anchor when mode == ImportMode.importlib to obtain + a unique name for the module being imported so it can safely be stored + into ``sys.modules``. + + :param consider_namespace_packages: + If True, consider namespace packages when resolving module names. + + :raises ImportPathMismatchError: + If after importing the given `path` and the module `__file__` + are different. Only raised in `prepend` and `append` modes. + """ + path = Path(path) + mode = ImportMode(mode) + + if not path.exists(): + raise ImportError(path) + + if mode is ImportMode.importlib: + # Try to import this module using the standard import mechanisms, but + # without touching sys.path. + try: + pkg_root, module_name = resolve_pkg_root_and_module_name( + path, consider_namespace_packages=consider_namespace_packages + ) + except CouldNotResolvePathError: + pass + else: + # If the given module name is already in sys.modules, do not import it again. + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + mod = _import_module_using_spec( + module_name, path, pkg_root, insert_modules=False + ) + if mod is not None: + return mod + + # Could not import the module with the current sys.path, so we fall back + # to importing the file as a single module, not being a part of a package. + module_name = module_name_from_path(path, root) + with contextlib.suppress(KeyError): + return sys.modules[module_name] + + mod = _import_module_using_spec( + module_name, path, path.parent, insert_modules=True + ) + if mod is None: + raise ImportError(f"Can't find module {module_name} at location {path}") + return mod + + try: + pkg_root, module_name = resolve_pkg_root_and_module_name( + path, consider_namespace_packages=consider_namespace_packages + ) + except CouldNotResolvePathError: + pkg_root, module_name = path.parent, path.stem + + # Change sys.path permanently: restoring it at the end of this function would cause surprising + # problems because of delayed imports: for example, a conftest.py file imported by this function + # might have local imports, which would fail at runtime if we restored sys.path. + if mode is ImportMode.append: + if str(pkg_root) not in sys.path: + sys.path.append(str(pkg_root)) + elif mode is ImportMode.prepend: + if str(pkg_root) != sys.path[0]: + sys.path.insert(0, str(pkg_root)) + else: + assert_never(mode) + + importlib.import_module(module_name) + + mod = sys.modules[module_name] + if path.name == "__init__.py": + return mod + + ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "") + if ignore != "1": + module_file = mod.__file__ + if module_file is None: + raise ImportPathMismatchError(module_name, module_file, path) + + if module_file.endswith((".pyc", ".pyo")): + module_file = module_file[:-1] + if module_file.endswith(os.sep + "__init__.py"): + module_file = module_file[: -(len(os.sep + "__init__.py"))] + + try: + is_same = _is_same(str(path), module_file) + except FileNotFoundError: + is_same = False + + if not is_same: + raise ImportPathMismatchError(module_name, module_file, path) + + return mod + + +def _import_module_using_spec( + module_name: str, module_path: Path, module_location: Path, *, insert_modules: bool +) -> ModuleType | None: + """ + Tries to import a module by its canonical name, path to the .py file, and its + parent location. + + :param insert_modules: + If True, will call insert_missing_modules to create empty intermediate modules + for made-up module names (when importing test files not reachable from sys.path). + """ + # Checking with sys.meta_path first in case one of its hooks can import this module, + # such as our own assertion-rewrite hook. + for meta_importer in sys.meta_path: + spec = meta_importer.find_spec( + module_name, [str(module_location), str(module_path)] + ) + if spec_matches_module_path(spec, module_path): + break + else: + spec = importlib.util.spec_from_file_location(module_name, str(module_path)) + + if spec_matches_module_path(spec, module_path): + assert spec is not None + # Attempt to import the parent module, seems is our responsibility: + # https://github.com/python/cpython/blob/73906d5c908c1e0b73c5436faeff7d93698fc074/Lib/importlib/_bootstrap.py#L1308-L1311 + parent_module_name, _, name = module_name.rpartition(".") + parent_module: ModuleType | None = None + if parent_module_name: + parent_module = sys.modules.get(parent_module_name) + if parent_module is None: + # Find the directory of this module's parent. + parent_dir = ( + module_path.parent.parent + if module_path.name == "__init__.py" + else module_path.parent + ) + # Consider the parent module path as its __init__.py file, if it has one. + parent_module_path = ( + parent_dir / "__init__.py" + if (parent_dir / "__init__.py").is_file() + else parent_dir + ) + parent_module = _import_module_using_spec( + parent_module_name, + parent_module_path, + parent_dir, + insert_modules=insert_modules, + ) + + # Find spec and import this module. + mod = importlib.util.module_from_spec(spec) + sys.modules[module_name] = mod + spec.loader.exec_module(mod) # type: ignore[union-attr] + + # Set this module as an attribute of the parent module (#12194). + if parent_module is not None: + setattr(parent_module, name, mod) + + if insert_modules: + insert_missing_modules(sys.modules, module_name) + return mod + + return None + + +def spec_matches_module_path(module_spec: ModuleSpec | None, module_path: Path) -> bool: + """Return true if the given ModuleSpec can be used to import the given module path.""" + if module_spec is None or module_spec.origin is None: + return False + + return Path(module_spec.origin) == module_path + + +# Implement a special _is_same function on Windows which returns True if the two filenames +# compare equal, to circumvent os.path.samefile returning False for mounts in UNC (#7678). +if sys.platform.startswith("win"): + + def _is_same(f1: str, f2: str) -> bool: + return Path(f1) == Path(f2) or os.path.samefile(f1, f2) + +else: + + def _is_same(f1: str, f2: str) -> bool: + return os.path.samefile(f1, f2) + + +def module_name_from_path(path: Path, root: Path) -> str: + """ + Return a dotted module name based on the given path, anchored on root. + + For example: path="projects/src/tests/test_foo.py" and root="/projects", the + resulting module name will be "src.tests.test_foo". + """ + path = path.with_suffix("") + try: + relative_path = path.relative_to(root) + except ValueError: + # If we can't get a relative path to root, use the full path, except + # for the first part ("d:\\" or "/" depending on the platform, for example). + path_parts = path.parts[1:] + else: + # Use the parts for the relative path to the root path. + path_parts = relative_path.parts + + # Module name for packages do not contain the __init__ file, unless + # the `__init__.py` file is at the root. + if len(path_parts) >= 2 and path_parts[-1] == "__init__": + path_parts = path_parts[:-1] + + # Module names cannot contain ".", normalize them to "_". This prevents + # a directory having a "." in the name (".env.310" for example) causing extra intermediate modules. + # Also, important to replace "." at the start of paths, as those are considered relative imports. + path_parts = tuple(x.replace(".", "_") for x in path_parts) + + return ".".join(path_parts) + + +def insert_missing_modules(modules: dict[str, ModuleType], module_name: str) -> None: + """ + Used by ``import_path`` to create intermediate modules when using mode=importlib. + + When we want to import a module as "src.tests.test_foo" for example, we need + to create empty modules "src" and "src.tests" after inserting "src.tests.test_foo", + otherwise "src.tests.test_foo" is not importable by ``__import__``. + """ + module_parts = module_name.split(".") + while module_name: + parent_module_name, _, child_name = module_name.rpartition(".") + if parent_module_name: + parent_module = modules.get(parent_module_name) + if parent_module is None: + try: + # If sys.meta_path is empty, calling import_module will issue + # a warning and raise ModuleNotFoundError. To avoid the + # warning, we check sys.meta_path explicitly and raise the error + # ourselves to fall back to creating a dummy module. + if not sys.meta_path: + raise ModuleNotFoundError + parent_module = importlib.import_module(parent_module_name) + except ModuleNotFoundError: + parent_module = ModuleType( + module_name, + doc="Empty module created by pytest's importmode=importlib.", + ) + modules[parent_module_name] = parent_module + + # Add child attribute to the parent that can reference the child + # modules. + if not hasattr(parent_module, child_name): + setattr(parent_module, child_name, modules[module_name]) + + module_parts.pop(-1) + module_name = ".".join(module_parts) + + +def resolve_package_path(path: Path) -> Path | None: + """Return the Python package path by looking for the last + directory upwards which still contains an __init__.py. + + Returns None if it cannot be determined. + """ + result = None + for parent in itertools.chain((path,), path.parents): + if parent.is_dir(): + if not (parent / "__init__.py").is_file(): + break + if not parent.name.isidentifier(): + break + result = parent + return result + + +def resolve_pkg_root_and_module_name( + path: Path, *, consider_namespace_packages: bool = False +) -> tuple[Path, str]: + """ + Return the path to the directory of the root package that contains the + given Python file, and its module name: + + src/ + app/ + __init__.py + core/ + __init__.py + models.py + + Passing the full path to `models.py` will yield Path("src") and "app.core.models". + + If consider_namespace_packages is True, then we additionally check upwards in the hierarchy + for namespace packages: + + https://packaging.python.org/en/latest/guides/packaging-namespace-packages + + Raises CouldNotResolvePathError if the given path does not belong to a package (missing any __init__.py files). + """ + pkg_root: Path | None = None + pkg_path = resolve_package_path(path) + if pkg_path is not None: + pkg_root = pkg_path.parent + if consider_namespace_packages: + start = pkg_root if pkg_root is not None else path.parent + for candidate in (start, *start.parents): + module_name = compute_module_name(candidate, path) + if module_name and is_importable(module_name, path): + # Point the pkg_root to the root of the namespace package. + pkg_root = candidate + break + + if pkg_root is not None: + module_name = compute_module_name(pkg_root, path) + if module_name: + return pkg_root, module_name + + raise CouldNotResolvePathError(f"Could not resolve for {path}") + + +def is_importable(module_name: str, module_path: Path) -> bool: + """ + Return if the given module path could be imported normally by Python, akin to the user + entering the REPL and importing the corresponding module name directly, and corresponds + to the module_path specified. + + :param module_name: + Full module name that we want to check if is importable. + For example, "app.models". + + :param module_path: + Full path to the python module/package we want to check if is importable. + For example, "/projects/src/app/models.py". + """ + try: + # Note this is different from what we do in ``_import_module_using_spec``, where we explicitly search through + # sys.meta_path to be able to pass the path of the module that we want to import (``meta_importer.find_spec``). + # Using importlib.util.find_spec() is different, it gives the same results as trying to import + # the module normally in the REPL. + spec = importlib.util.find_spec(module_name) + except (ImportError, ValueError, ImportWarning): + return False + else: + return spec_matches_module_path(spec, module_path) + + +def compute_module_name(root: Path, module_path: Path) -> str | None: + """Compute a module name based on a path and a root anchor.""" + try: + path_without_suffix = module_path.with_suffix("") + except ValueError: + # Empty paths (such as Path.cwd()) might break meta_path hooks (like our own assertion rewriter). + return None + + try: + relative = path_without_suffix.relative_to(root) + except ValueError: # pragma: no cover + return None + names = list(relative.parts) + if not names: + return None + if names[-1] == "__init__": + names.pop() + return ".".join(names) + + +class CouldNotResolvePathError(Exception): + """Custom exception raised by resolve_pkg_root_and_module_name.""" + + +def scandir( + path: str | os.PathLike[str], + sort_key: Callable[[os.DirEntry[str]], object] = lambda entry: entry.name, +) -> list[os.DirEntry[str]]: + """Scan a directory recursively, in breadth-first order. + + The returned entries are sorted according to the given key. + The default is to sort by name. + """ + entries = [] + with os.scandir(path) as s: + # Skip entries with symlink loops and other brokenness, so the caller + # doesn't have to deal with it. + for entry in s: + try: + entry.is_file() + except OSError as err: + if _ignore_error(err): + continue + raise + entries.append(entry) + entries.sort(key=sort_key) # type: ignore[arg-type] + return entries + + +def visit( + path: str | os.PathLike[str], recurse: Callable[[os.DirEntry[str]], bool] +) -> Iterator[os.DirEntry[str]]: + """Walk a directory recursively, in breadth-first order. + + The `recurse` predicate determines whether a directory is recursed. + + Entries at each directory level are sorted. + """ + entries = scandir(path) + yield from entries + for entry in entries: + if entry.is_dir() and recurse(entry): + yield from visit(entry.path, recurse) + + +def absolutepath(path: str | os.PathLike[str]) -> Path: + """Convert a path to an absolute path using os.path.abspath. + + Prefer this over Path.resolve() (see #6523). + Prefer this over Path.absolute() (not public, doesn't normalize). + """ + return Path(os.path.abspath(path)) + + +def commonpath(path1: Path, path2: Path) -> Path | None: + """Return the common part shared with the other path, or None if there is + no common part. + + If one path is relative and one is absolute, returns None. + """ + try: + return Path(os.path.commonpath((str(path1), str(path2)))) + except ValueError: + return None + + +def bestrelpath(directory: Path, dest: Path) -> str: + """Return a string which is a relative path from directory to dest such + that directory/bestrelpath == dest. + + The paths must be either both absolute or both relative. + + If no such path can be determined, returns dest. + """ + assert isinstance(directory, Path) + assert isinstance(dest, Path) + if dest == directory: + return os.curdir + # Find the longest common directory. + base = commonpath(directory, dest) + # Can be the case on Windows for two absolute paths on different drives. + # Can be the case for two relative paths without common prefix. + # Can be the case for a relative path and an absolute path. + if not base: + return str(dest) + reldirectory = directory.relative_to(base) + reldest = dest.relative_to(base) + return os.path.join( + # Back from directory to base. + *([os.pardir] * len(reldirectory.parts)), + # Forward from base to dest. + *reldest.parts, + ) + + +def safe_exists(p: Path) -> bool: + """Like Path.exists(), but account for input arguments that might be too long (#11394).""" + try: + return p.exists() + except (ValueError, OSError): + # ValueError: stat: path too long for Windows + # OSError: [WinError 123] The filename, directory name, or volume label syntax is incorrect + return False diff --git a/venv/Lib/site-packages/_pytest/py.typed b/venv/Lib/site-packages/_pytest/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/venv/Lib/site-packages/_pytest/pytester.py b/venv/Lib/site-packages/_pytest/pytester.py new file mode 100644 index 0000000..3f7520e --- /dev/null +++ b/venv/Lib/site-packages/_pytest/pytester.py @@ -0,0 +1,1766 @@ +# mypy: allow-untyped-defs +"""(Disabled by default) support for testing pytest and pytest plugins. + +PYTEST_DONT_REWRITE +""" + +from __future__ import annotations + +import collections.abc +import contextlib +from fnmatch import fnmatch +import gc +import importlib +from io import StringIO +import locale +import os +from pathlib import Path +import platform +import re +import shutil +import subprocess +import sys +import traceback +from typing import Any +from typing import Callable +from typing import Final +from typing import final +from typing import Generator +from typing import IO +from typing import Iterable +from typing import Literal +from typing import overload +from typing import Sequence +from typing import TextIO +from typing import TYPE_CHECKING +from weakref import WeakKeyDictionary + +from iniconfig import IniConfig +from iniconfig import SectionWrapper + +from _pytest import timing +from _pytest._code import Source +from _pytest.capture import _get_multicapture +from _pytest.compat import NOTSET +from _pytest.compat import NotSetType +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config import main +from _pytest.config import PytestPluginManager +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.main import Session +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import importorskip +from _pytest.outcomes import skip +from _pytest.pathlib import bestrelpath +from _pytest.pathlib import make_numbered_dir +from _pytest.reports import CollectReport +from _pytest.reports import TestReport +from _pytest.tmpdir import TempPathFactory +from _pytest.warning_types import PytestWarning + + +if TYPE_CHECKING: + import pexpect + + +pytest_plugins = ["pytester_assertions"] + + +IGNORE_PAM = [ # filenames added when obtaining details about the current user + "/var/lib/sss/mc/passwd" +] + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + "--lsof", + action="store_true", + dest="lsof", + default=False, + help="Run FD checks if lsof is available", + ) + + parser.addoption( + "--runpytest", + default="inprocess", + dest="runpytest", + choices=("inprocess", "subprocess"), + help=( + "Run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method" + ), + ) + + parser.addini( + "pytester_example_dir", help="Directory to take the pytester example files from" + ) + + +def pytest_configure(config: Config) -> None: + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + config.addinivalue_line( + "markers", + "pytester_example_path(*path_segments): join the given path " + "segments to `pytester_example_dir` for this test.", + ) + + +class LsofFdLeakChecker: + def get_open_files(self) -> list[tuple[str, str]]: + if sys.version_info >= (3, 11): + # New in Python 3.11, ignores utf-8 mode + encoding = locale.getencoding() + else: + encoding = locale.getpreferredencoding(False) + out = subprocess.run( + ("lsof", "-Ffn0", "-p", str(os.getpid())), + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + check=True, + text=True, + encoding=encoding, + ).stdout + + def isopen(line: str) -> bool: + return line.startswith("f") and ( + "deleted" not in line + and "mem" not in line + and "txt" not in line + and "cwd" not in line + ) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split("\0") + fd = fields[0][1:] + filename = fields[1][1:] + if filename in IGNORE_PAM: + continue + if filename.startswith("/"): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self) -> bool: + try: + subprocess.run(("lsof", "-v"), check=True) + except (OSError, subprocess.CalledProcessError): + return False + else: + return True + + @hookimpl(wrapper=True, tryfirst=True) + def pytest_runtest_protocol(self, item: Item) -> Generator[None, object, object]: + lines1 = self.get_open_files() + try: + return (yield) + finally: + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = {t[0] for t in lines2} - {t[0] for t in lines1} + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [ + f"***** {len(leaked_files)} FD leakage detected", + *(str(f) for f in leaked_files), + "*** Before:", + *(str(f) for f in lines1), + "*** After:", + *(str(f) for f in lines2), + f"***** {len(leaked_files)} FD leakage detected", + "*** function {}:{}: {} ".format(*item.location), + "See issue #2366", + ] + item.warn(PytestWarning("\n".join(error))) + + +# used at least by pytest-xdist plugin + + +@fixture +def _pytest(request: FixtureRequest) -> PytestArg: + """Return a helper which offers a gethookrecorder(hook) method which + returns a HookRecorder instance which helps to make assertions about called + hooks.""" + return PytestArg(request) + + +class PytestArg: + def __init__(self, request: FixtureRequest) -> None: + self._request = request + + def gethookrecorder(self, hook) -> HookRecorder: + hookrecorder = HookRecorder(hook._pm) + self._request.addfinalizer(hookrecorder.finish_recording) + return hookrecorder + + +def get_public_names(values: Iterable[str]) -> list[str]: + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] + + +@final +class RecordedHookCall: + """A recorded call to a hook. + + The arguments to the hook call are set as attributes. + For example: + + .. code-block:: python + + calls = hook_recorder.getcalls("pytest_runtest_setup") + # Suppose pytest_runtest_setup was called once with `item=an_item`. + assert calls[0].item is an_item + """ + + def __init__(self, name: str, kwargs) -> None: + self.__dict__.update(kwargs) + self._name = name + + def __repr__(self) -> str: + d = self.__dict__.copy() + del d["_name"] + return f"" + + if TYPE_CHECKING: + # The class has undetermined attributes, this tells mypy about it. + def __getattr__(self, key: str): ... + + +@final +class HookRecorder: + """Record all hooks called in a plugin manager. + + Hook recorders are created by :class:`Pytester`. + + This wraps all the hook calls in the plugin manager, recording each call + before propagating the normal calls. + """ + + def __init__( + self, pluginmanager: PytestPluginManager, *, _ispytest: bool = False + ) -> None: + check_ispytest(_ispytest) + + self._pluginmanager = pluginmanager + self.calls: list[RecordedHookCall] = [] + self.ret: int | ExitCode | None = None + + def before(hook_name: str, hook_impls, kwargs) -> None: + self.calls.append(RecordedHookCall(hook_name, kwargs)) + + def after(outcome, hook_name: str, hook_impls, kwargs) -> None: + pass + + self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after) + + def finish_recording(self) -> None: + self._undo_wrapping() + + def getcalls(self, names: str | Iterable[str]) -> list[RecordedHookCall]: + """Get all recorded calls to hooks with the given names (or name).""" + if isinstance(names, str): + names = names.split() + return [call for call in self.calls if call._name in names] + + def assert_contains(self, entries: Sequence[tuple[str, str]]) -> None: + __tracebackhide__ = True + i = 0 + entries = list(entries) + # Since Python 3.13, f_locals is not a dict, but eval requires a dict. + backlocals = dict(sys._getframe(1).f_locals) + while entries: + name, check = entries.pop(0) + for ind, call in enumerate(self.calls[i:]): + if call._name == name: + print("NAMEMATCH", name, call) + if eval(check, backlocals, call.__dict__): + print("CHECKERMATCH", repr(check), "->", call) + else: + print("NOCHECKERMATCH", repr(check), "-", call) + continue + i += ind + 1 + break + print("NONAMEMATCH", name, "with", call) + else: + fail(f"could not find {name!r} check {check!r}") + + def popcall(self, name: str) -> RecordedHookCall: + __tracebackhide__ = True + for i, call in enumerate(self.calls): + if call._name == name: + del self.calls[i] + return call + lines = [f"could not find call {name!r}, in:"] + lines.extend([f" {x}" for x in self.calls]) + fail("\n".join(lines)) + + def getcall(self, name: str) -> RecordedHookCall: + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] + + # functionality for test reports + + @overload + def getreports( + self, + names: Literal["pytest_collectreport"], + ) -> Sequence[CollectReport]: ... + + @overload + def getreports( + self, + names: Literal["pytest_runtest_logreport"], + ) -> Sequence[TestReport]: ... + + @overload + def getreports( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: ... + + def getreports( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: + return [x.report for x in self.getcalls(names)] + + def matchreport( + self, + inamepart: str = "", + names: str | Iterable[str] = ( + "pytest_runtest_logreport", + "pytest_collectreport", + ), + when: str | None = None, + ) -> CollectReport | TestReport: + """Return a testreport whose dotted import path matches.""" + values = [] + for rep in self.getreports(names=names): + if not when and rep.when != "call" and rep.passed: + # setup/teardown passing reports - let's ignore those + continue + if when and rep.when != when: + continue + if not inamepart or inamepart in rep.nodeid.split("::"): + values.append(rep) + if not values: + raise ValueError( + f"could not find test report matching {inamepart!r}: " + "no test reports at all!" + ) + if len(values) > 1: + raise ValueError( + f"found 2 or more testreports matching {inamepart!r}: {values}" + ) + return values[0] + + @overload + def getfailures( + self, + names: Literal["pytest_collectreport"], + ) -> Sequence[CollectReport]: ... + + @overload + def getfailures( + self, + names: Literal["pytest_runtest_logreport"], + ) -> Sequence[TestReport]: ... + + @overload + def getfailures( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: ... + + def getfailures( + self, + names: str | Iterable[str] = ( + "pytest_collectreport", + "pytest_runtest_logreport", + ), + ) -> Sequence[CollectReport | TestReport]: + return [rep for rep in self.getreports(names) if rep.failed] + + def getfailedcollections(self) -> Sequence[CollectReport]: + return self.getfailures("pytest_collectreport") + + def listoutcomes( + self, + ) -> tuple[ + Sequence[TestReport], + Sequence[CollectReport | TestReport], + Sequence[CollectReport | TestReport], + ]: + passed = [] + skipped = [] + failed = [] + for rep in self.getreports( + ("pytest_collectreport", "pytest_runtest_logreport") + ): + if rep.passed: + if rep.when == "call": + assert isinstance(rep, TestReport) + passed.append(rep) + elif rep.skipped: + skipped.append(rep) + else: + assert rep.failed, f"Unexpected outcome: {rep!r}" + failed.append(rep) + return passed, skipped, failed + + def countoutcomes(self) -> list[int]: + return [len(x) for x in self.listoutcomes()] + + def assertoutcome(self, passed: int = 0, skipped: int = 0, failed: int = 0) -> None: + __tracebackhide__ = True + from _pytest.pytester_assertions import assertoutcome + + outcomes = self.listoutcomes() + assertoutcome( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + ) + + def clear(self) -> None: + self.calls[:] = [] + + +@fixture +def linecomp() -> LineComp: + """A :class: `LineComp` instance for checking that an input linearly + contains a sequence of strings.""" + return LineComp() + + +@fixture(name="LineMatcher") +def LineMatcher_fixture(request: FixtureRequest) -> type[LineMatcher]: + """A reference to the :class: `LineMatcher`. + + This is instantiable with a list of lines (without their trailing newlines). + This is useful for testing large texts, such as the output of commands. + """ + return LineMatcher + + +@fixture +def pytester( + request: FixtureRequest, tmp_path_factory: TempPathFactory, monkeypatch: MonkeyPatch +) -> Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to ``path`` and environment variables during initialization. + + It is particularly useful for testing plugins. It is similar to the :fixture:`tmp_path` + fixture but provides methods which aid in testing pytest itself. + """ + return Pytester(request, tmp_path_factory, monkeypatch, _ispytest=True) + + +@fixture +def _sys_snapshot() -> Generator[None]: + snappaths = SysPathsSnapshot() + snapmods = SysModulesSnapshot() + yield + snapmods.restore() + snappaths.restore() + + +@fixture +def _config_for_test() -> Generator[Config]: + from _pytest.config import get_config + + config = get_config() + yield config + config._ensure_unconfigure() # cleanup, e.g. capman closing tmpfiles. + + +# Regex to match the session duration string in the summary: "74.34s". +rex_session_duration = re.compile(r"\d+\.\d\ds") +# Regex to match all the counts and phrases in the summary line: "34 passed, 111 skipped". +rex_outcome = re.compile(r"(\d+) (\w+)") + + +@final +class RunResult: + """The result of running a command from :class:`~pytest.Pytester`.""" + + def __init__( + self, + ret: int | ExitCode, + outlines: list[str], + errlines: list[str], + duration: float, + ) -> None: + try: + self.ret: int | ExitCode = ExitCode(ret) + """The return value.""" + except ValueError: + self.ret = ret + self.outlines = outlines + """List of lines captured from stdout.""" + self.errlines = errlines + """List of lines captured from stderr.""" + self.stdout = LineMatcher(outlines) + """:class:`~pytest.LineMatcher` of stdout. + + Use e.g. :func:`str(stdout) ` to reconstruct stdout, or the commonly used + :func:`stdout.fnmatch_lines() ` method. + """ + self.stderr = LineMatcher(errlines) + """:class:`~pytest.LineMatcher` of stderr.""" + self.duration = duration + """Duration in seconds.""" + + def __repr__(self) -> str: + return ( + "" + % (self.ret, len(self.stdout.lines), len(self.stderr.lines), self.duration) + ) + + def parseoutcomes(self) -> dict[str, int]: + """Return a dictionary of outcome noun -> count from parsing the terminal + output that the test process produced. + + The returned nouns will always be in plural form:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + return self.parse_summary_nouns(self.outlines) + + @classmethod + def parse_summary_nouns(cls, lines) -> dict[str, int]: + """Extract the nouns from a pytest terminal summary line. + + It always returns the plural noun for consistency:: + + ======= 1 failed, 1 passed, 1 warning, 1 error in 0.13s ==== + + Will return ``{"failed": 1, "passed": 1, "warnings": 1, "errors": 1}``. + """ + for line in reversed(lines): + if rex_session_duration.search(line): + outcomes = rex_outcome.findall(line) + ret = {noun: int(count) for (count, noun) in outcomes} + break + else: + raise ValueError("Pytest terminal summary report not found") + + to_plural = { + "warning": "warnings", + "error": "errors", + } + return {to_plural.get(k, k): v for k, v in ret.items()} + + def assert_outcomes( + self, + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: int | None = None, + deselected: int | None = None, + ) -> None: + """ + Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run. + + ``warnings`` and ``deselected`` are only checked if not None. + """ + __tracebackhide__ = True + from _pytest.pytester_assertions import assert_outcomes + + outcomes = self.parseoutcomes() + assert_outcomes( + outcomes, + passed=passed, + skipped=skipped, + failed=failed, + errors=errors, + xpassed=xpassed, + xfailed=xfailed, + warnings=warnings, + deselected=deselected, + ) + + +class SysModulesSnapshot: + def __init__(self, preserve: Callable[[str], bool] | None = None) -> None: + self.__preserve = preserve + self.__saved = dict(sys.modules) + + def restore(self) -> None: + if self.__preserve: + self.__saved.update( + (k, m) for k, m in sys.modules.items() if self.__preserve(k) + ) + sys.modules.clear() + sys.modules.update(self.__saved) + + +class SysPathsSnapshot: + def __init__(self) -> None: + self.__saved = list(sys.path), list(sys.meta_path) + + def restore(self) -> None: + sys.path[:], sys.meta_path[:] = self.__saved + + +@final +class Pytester: + """ + Facilities to write tests/configuration files, execute pytest in isolation, and match + against expected output, perfect for black-box testing of pytest plugins. + + It attempts to isolate the test run from external factors as much as possible, modifying + the current working directory to :attr:`path` and environment variables during initialization. + """ + + __test__ = False + + CLOSE_STDIN: Final = NOTSET + + class TimeoutExpired(Exception): + pass + + def __init__( + self, + request: FixtureRequest, + tmp_path_factory: TempPathFactory, + monkeypatch: MonkeyPatch, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._request = request + self._mod_collections: WeakKeyDictionary[Collector, list[Item | Collector]] = ( + WeakKeyDictionary() + ) + if request.function: + name: str = request.function.__name__ + else: + name = request.node.name + self._name = name + self._path: Path = tmp_path_factory.mktemp(name, numbered=True) + #: A list of plugins to use with :py:meth:`parseconfig` and + #: :py:meth:`runpytest`. Initially this is an empty list but plugins can + #: be added to the list. The type of items to add to the list depends on + #: the method using them so refer to them for details. + self.plugins: list[str | _PluggyPlugin] = [] + self._sys_path_snapshot = SysPathsSnapshot() + self._sys_modules_snapshot = self.__take_sys_modules_snapshot() + self._request.addfinalizer(self._finalize) + self._method = self._request.config.getoption("--runpytest") + self._test_tmproot = tmp_path_factory.mktemp(f"tmp-{name}", numbered=True) + + self._monkeypatch = mp = monkeypatch + self.chdir() + mp.setenv("PYTEST_DEBUG_TEMPROOT", str(self._test_tmproot)) + # Ensure no unexpected caching via tox. + mp.delenv("TOX_ENV_DIR", raising=False) + # Discard outer pytest options. + mp.delenv("PYTEST_ADDOPTS", raising=False) + # Ensure no user config is used. + tmphome = str(self.path) + mp.setenv("HOME", tmphome) + mp.setenv("USERPROFILE", tmphome) + # Do not use colors for inner runs by default. + mp.setenv("PY_COLORS", "0") + + @property + def path(self) -> Path: + """Temporary directory path used to create files/run tests from, etc.""" + return self._path + + def __repr__(self) -> str: + return f"" + + def _finalize(self) -> None: + """ + Clean up global state artifacts. + + Some methods modify the global interpreter state and this tries to + clean this up. It does not remove the temporary directory however so + it can be looked at after the test run has finished. + """ + self._sys_modules_snapshot.restore() + self._sys_path_snapshot.restore() + + def __take_sys_modules_snapshot(self) -> SysModulesSnapshot: + # Some zope modules used by twisted-related tests keep internal state + # and can't be deleted; we had some trouble in the past with + # `zope.interface` for example. + # + # Preserve readline due to https://bugs.python.org/issue41033. + # pexpect issues a SIGWINCH. + def preserve_module(name): + return name.startswith(("zope", "readline")) + + return SysModulesSnapshot(preserve=preserve_module) + + def make_hook_recorder(self, pluginmanager: PytestPluginManager) -> HookRecorder: + """Create a new :class:`HookRecorder` for a :class:`PytestPluginManager`.""" + pluginmanager.reprec = reprec = HookRecorder(pluginmanager, _ispytest=True) # type: ignore[attr-defined] + self._request.addfinalizer(reprec.finish_recording) + return reprec + + def chdir(self) -> None: + """Cd into the temporary directory. + + This is done automatically upon instantiation. + """ + self._monkeypatch.chdir(self.path) + + def _makefile( + self, + ext: str, + lines: Sequence[Any | bytes], + files: dict[str, str], + encoding: str = "utf-8", + ) -> Path: + items = list(files.items()) + + if ext is None: + raise TypeError("ext must not be None") + + if ext and not ext.startswith("."): + raise ValueError( + f"pytester.makefile expects a file extension, try .{ext} instead of {ext}" + ) + + def to_text(s: Any | bytes) -> str: + return s.decode(encoding) if isinstance(s, bytes) else str(s) + + if lines: + source = "\n".join(to_text(x) for x in lines) + basename = self._name + items.insert(0, (basename, source)) + + ret = None + for basename, value in items: + p = self.path.joinpath(basename).with_suffix(ext) + p.parent.mkdir(parents=True, exist_ok=True) + source_ = Source(value) + source = "\n".join(to_text(line) for line in source_.lines) + p.write_text(source.strip(), encoding=encoding) + if ret is None: + ret = p + assert ret is not None + return ret + + def makefile(self, ext: str, *args: str, **kwargs: str) -> Path: + r"""Create new text file(s) in the test directory. + + :param ext: + The extension the file(s) should use, including the dot, e.g. `.py`. + :param args: + All args are treated as strings and joined using newlines. + The result is written as contents to the file. The name of the + file is based on the test function requesting this fixture. + :param kwargs: + Each keyword is the name of a file, while the value of it will + be written as contents of the file. + :returns: + The first created file. + + Examples: + + .. code-block:: python + + pytester.makefile(".txt", "line1", "line2") + + pytester.makefile(".ini", pytest="[pytest]\naddopts=-rs\n") + + To create binary files, use :meth:`pathlib.Path.write_bytes` directly: + + .. code-block:: python + + filename = pytester.path.joinpath("foo.bin") + filename.write_bytes(b"...") + """ + return self._makefile(ext, args, kwargs) + + def makeconftest(self, source: str) -> Path: + """Write a conftest.py file. + + :param source: The contents. + :returns: The conftest.py file. + """ + return self.makepyfile(conftest=source) + + def makeini(self, source: str) -> Path: + """Write a tox.ini file. + + :param source: The contents. + :returns: The tox.ini file. + """ + return self.makefile(".ini", tox=source) + + def getinicfg(self, source: str) -> SectionWrapper: + """Return the pytest section from the tox.ini config file.""" + p = self.makeini(source) + return IniConfig(str(p))["pytest"] + + def makepyprojecttoml(self, source: str) -> Path: + """Write a pyproject.toml file. + + :param source: The contents. + :returns: The pyproject.ini file. + + .. versionadded:: 6.0 + """ + return self.makefile(".toml", pyproject=source) + + def makepyfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .py extension. + + Defaults to the test name with a '.py' extension, e.g test_foobar.py, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.py. + pytester.makepyfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.makepyfile(custom="foobar") + # At this point, both 'test_something.py' & 'custom.py' exist in the test directory. + + """ + return self._makefile(".py", args, kwargs) + + def maketxtfile(self, *args, **kwargs) -> Path: + r"""Shortcut for .makefile() with a .txt extension. + + Defaults to the test name with a '.txt' extension, e.g test_foobar.txt, overwriting + existing files. + + Examples: + + .. code-block:: python + + def test_something(pytester): + # Initial file is created test_something.txt. + pytester.maketxtfile("foobar") + # To create multiple files, pass kwargs accordingly. + pytester.maketxtfile(custom="foobar") + # At this point, both 'test_something.txt' & 'custom.txt' exist in the test directory. + + """ + return self._makefile(".txt", args, kwargs) + + def syspathinsert(self, path: str | os.PathLike[str] | None = None) -> None: + """Prepend a directory to sys.path, defaults to :attr:`path`. + + This is undone automatically when this object dies at the end of each + test. + + :param path: + The path. + """ + if path is None: + path = self.path + + self._monkeypatch.syspath_prepend(str(path)) + + def mkdir(self, name: str | os.PathLike[str]) -> Path: + """Create a new (sub)directory. + + :param name: + The name of the directory, relative to the pytester path. + :returns: + The created directory. + :rtype: pathlib.Path + """ + p = self.path / name + p.mkdir() + return p + + def mkpydir(self, name: str | os.PathLike[str]) -> Path: + """Create a new python package. + + This creates a (sub)directory with an empty ``__init__.py`` file so it + gets recognised as a Python package. + """ + p = self.path / name + p.mkdir() + p.joinpath("__init__.py").touch() + return p + + def copy_example(self, name: str | None = None) -> Path: + """Copy file from project's directory into the testdir. + + :param name: + The name of the file to copy. + :return: + Path to the copied directory (inside ``self.path``). + :rtype: pathlib.Path + """ + example_dir_ = self._request.config.getini("pytester_example_dir") + if example_dir_ is None: + raise ValueError("pytester_example_dir is unset, can't copy examples") + example_dir: Path = self._request.config.rootpath / example_dir_ + + for extra_element in self._request.node.iter_markers("pytester_example_path"): + assert extra_element.args + example_dir = example_dir.joinpath(*extra_element.args) + + if name is None: + func_name = self._name + maybe_dir = example_dir / func_name + maybe_file = example_dir / (func_name + ".py") + + if maybe_dir.is_dir(): + example_path = maybe_dir + elif maybe_file.is_file(): + example_path = maybe_file + else: + raise LookupError( + f"{func_name} can't be found as module or package in {example_dir}" + ) + else: + example_path = example_dir.joinpath(name) + + if example_path.is_dir() and not example_path.joinpath("__init__.py").is_file(): + shutil.copytree(example_path, self.path, symlinks=True, dirs_exist_ok=True) + return self.path + elif example_path.is_file(): + result = self.path.joinpath(example_path.name) + shutil.copy(example_path, result) + return result + else: + raise LookupError( + f'example "{example_path}" is not found as a file or directory' + ) + + def getnode(self, config: Config, arg: str | os.PathLike[str]) -> Collector | Item: + """Get the collection node of a file. + + :param config: + A pytest config. + See :py:meth:`parseconfig` and :py:meth:`parseconfigure` for creating it. + :param arg: + Path to the file. + :returns: + The node. + """ + session = Session.from_config(config) + assert "::" not in str(arg) + p = Path(os.path.abspath(arg)) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([str(p)], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def getpathnode(self, path: str | os.PathLike[str]) -> Collector | Item: + """Return the collection node of a file. + + This is like :py:meth:`getnode` but uses :py:meth:`parseconfigure` to + create the (configured) pytest Config instance. + + :param path: + Path to the file. + :returns: + The node. + """ + path = Path(path) + config = self.parseconfigure(path) + session = Session.from_config(config) + x = bestrelpath(session.path, path) + config.hook.pytest_sessionstart(session=session) + res = session.perform_collect([x], genitems=False)[0] + config.hook.pytest_sessionfinish(session=session, exitstatus=ExitCode.OK) + return res + + def genitems(self, colitems: Sequence[Item | Collector]) -> list[Item]: + """Generate all test items from a collection node. + + This recurses into the collection node and returns a list of all the + test items contained within. + + :param colitems: + The collection nodes. + :returns: + The collected items. + """ + session = colitems[0].session + result: list[Item] = [] + for colitem in colitems: + result.extend(session.genitems(colitem)) + return result + + def runitem(self, source: str) -> Any: + """Run the "test_func" Item. + + The calling test instance (class containing the test method) must + provide a ``.getrunner()`` method which should return a runner which + can run the test protocol for a single item, e.g. + ``_pytest.runner.runtestprotocol``. + """ + # used from runner functional tests + item = self.getitem(source) + # the test class where we are called from wants to provide the runner + testclassinstance = self._request.instance + runner = testclassinstance.getrunner() + return runner(item) + + def inline_runsource(self, source: str, *cmdlineargs) -> HookRecorder: + """Run a test module in process using ``pytest.main()``. + + This run writes "source" into a temporary file and runs + ``pytest.main()`` on it, returning a :py:class:`HookRecorder` instance + for the result. + + :param source: The source code of the test module. + :param cmdlineargs: Any extra command line arguments to use. + """ + p = self.makepyfile(source) + values = [*list(cmdlineargs), p] + return self.inline_run(*values) + + def inline_genitems(self, *args) -> tuple[list[Item], HookRecorder]: + """Run ``pytest.main(['--collect-only'])`` in-process. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself like :py:meth:`inline_run`, but returns a + tuple of the collected items and a :py:class:`HookRecorder` instance. + """ + rec = self.inline_run("--collect-only", *args) + items = [x.item for x in rec.getcalls("pytest_itemcollected")] + return items, rec + + def inline_run( + self, + *args: str | os.PathLike[str], + plugins=(), + no_reraise_ctrlc: bool = False, + ) -> HookRecorder: + """Run ``pytest.main()`` in-process, returning a HookRecorder. + + Runs the :py:func:`pytest.main` function to run all of pytest inside + the test process itself. This means it can return a + :py:class:`HookRecorder` instance which gives more detailed results + from that run than can be done by matching stdout/stderr from + :py:meth:`runpytest`. + + :param args: + Command line arguments to pass to :py:func:`pytest.main`. + :param plugins: + Extra plugin instances the ``pytest.main()`` instance should use. + :param no_reraise_ctrlc: + Typically we reraise keyboard interrupts from the child run. If + True, the KeyboardInterrupt exception is captured. + """ + # (maybe a cpython bug?) the importlib cache sometimes isn't updated + # properly between file creation and inline_run (especially if imports + # are interspersed with file creation) + importlib.invalidate_caches() + + plugins = list(plugins) + finalizers = [] + try: + # Any sys.module or sys.path changes done while running pytest + # inline should be reverted after the test run completes to avoid + # clashing with later inline tests run within the same pytest test, + # e.g. just because they use matching test module names. + finalizers.append(self.__take_sys_modules_snapshot().restore) + finalizers.append(SysPathsSnapshot().restore) + + # Important note: + # - our tests should not leave any other references/registrations + # laying around other than possibly loaded test modules + # referenced from sys.modules, as nothing will clean those up + # automatically + + rec = [] + + class Collect: + def pytest_configure(x, config: Config) -> None: + rec.append(self.make_hook_recorder(config.pluginmanager)) + + plugins.append(Collect()) + ret = main([str(x) for x in args], plugins=plugins) + if len(rec) == 1: + reprec = rec.pop() + else: + + class reprec: # type: ignore + pass + + reprec.ret = ret + + # Typically we reraise keyboard interrupts from the child run + # because it's our user requesting interruption of the testing. + if ret == ExitCode.INTERRUPTED and not no_reraise_ctrlc: + calls = reprec.getcalls("pytest_keyboard_interrupt") + if calls and calls[-1].excinfo.type == KeyboardInterrupt: + raise KeyboardInterrupt() + return reprec + finally: + for finalizer in finalizers: + finalizer() + + def runpytest_inprocess( + self, *args: str | os.PathLike[str], **kwargs: Any + ) -> RunResult: + """Return result of running pytest in-process, providing a similar + interface to what self.runpytest() provides.""" + syspathinsert = kwargs.pop("syspathinsert", False) + + if syspathinsert: + self.syspathinsert() + now = timing.time() + capture = _get_multicapture("sys") + capture.start_capturing() + try: + try: + reprec = self.inline_run(*args, **kwargs) + except SystemExit as e: + ret = e.args[0] + try: + ret = ExitCode(e.args[0]) + except ValueError: + pass + + class reprec: # type: ignore + ret = ret + + except Exception: + traceback.print_exc() + + class reprec: # type: ignore + ret = ExitCode(3) + + finally: + out, err = capture.readouterr() + capture.stop_capturing() + sys.stdout.write(out) + sys.stderr.write(err) + + assert reprec.ret is not None + res = RunResult( + reprec.ret, out.splitlines(), err.splitlines(), timing.time() - now + ) + res.reprec = reprec # type: ignore + return res + + def runpytest(self, *args: str | os.PathLike[str], **kwargs: Any) -> RunResult: + """Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`~pytest.RunResult`.""" + new_args = self._ensure_basetemp(args) + if self._method == "inprocess": + return self.runpytest_inprocess(*new_args, **kwargs) + elif self._method == "subprocess": + return self.runpytest_subprocess(*new_args, **kwargs) + raise RuntimeError(f"Unrecognized runpytest option: {self._method}") + + def _ensure_basetemp( + self, args: Sequence[str | os.PathLike[str]] + ) -> list[str | os.PathLike[str]]: + new_args = list(args) + for x in new_args: + if str(x).startswith("--basetemp"): + break + else: + new_args.append( + "--basetemp={}".format(self.path.parent.joinpath("basetemp")) + ) + return new_args + + def parseconfig(self, *args: str | os.PathLike[str]) -> Config: + """Return a new pytest :class:`pytest.Config` instance from given + commandline args. + + This invokes the pytest bootstrapping code in _pytest.config to create a + new :py:class:`pytest.PytestPluginManager` and call the + :hook:`pytest_cmdline_parse` hook to create a new :class:`pytest.Config` + instance. + + If :attr:`plugins` has been populated they should be plugin modules + to be registered with the plugin manager. + """ + import _pytest.config + + new_args = self._ensure_basetemp(args) + new_args = [str(x) for x in new_args] + + config = _pytest.config._prepareconfig(new_args, self.plugins) # type: ignore[arg-type] + # we don't know what the test will do with this half-setup config + # object and thus we make sure it gets unconfigured properly in any + # case (otherwise capturing could still be active, for example) + self._request.addfinalizer(config._ensure_unconfigure) + return config + + def parseconfigure(self, *args: str | os.PathLike[str]) -> Config: + """Return a new pytest configured Config instance. + + Returns a new :py:class:`pytest.Config` instance like + :py:meth:`parseconfig`, but also calls the :hook:`pytest_configure` + hook. + """ + config = self.parseconfig(*args) + config._do_configure() + return config + + def getitem( + self, source: str | os.PathLike[str], funcname: str = "test_func" + ) -> Item: + """Return the test item for a test function. + + Writes the source to a python file and runs pytest's collection on + the resulting module, returning the test item for the requested + function name. + + :param source: + The module source. + :param funcname: + The name of the test function for which to return a test item. + :returns: + The test item. + """ + items = self.getitems(source) + for item in items: + if item.name == funcname: + return item + assert 0, f"{funcname!r} item not found in module:\n{source}\nitems: {items}" + + def getitems(self, source: str | os.PathLike[str]) -> list[Item]: + """Return all test items collected from the module. + + Writes the source to a Python file and runs pytest's collection on + the resulting module, returning all test items contained within. + """ + modcol = self.getmodulecol(source) + return self.genitems([modcol]) + + def getmodulecol( + self, + source: str | os.PathLike[str], + configargs=(), + *, + withinit: bool = False, + ): + """Return the module collection node for ``source``. + + Writes ``source`` to a file using :py:meth:`makepyfile` and then + runs the pytest collection on it, returning the collection node for the + test module. + + :param source: + The source code of the module to collect. + + :param configargs: + Any extra arguments to pass to :py:meth:`parseconfigure`. + + :param withinit: + Whether to also write an ``__init__.py`` file to the same + directory to ensure it is a package. + """ + if isinstance(source, os.PathLike): + path = self.path.joinpath(source) + assert not withinit, "not supported for paths" + else: + kw = {self._name: str(source)} + path = self.makepyfile(**kw) + if withinit: + self.makepyfile(__init__="#") + self.config = config = self.parseconfigure(path, *configargs) + return self.getnode(config, path) + + def collect_by_name(self, modcol: Collector, name: str) -> Item | Collector | None: + """Return the collection node for name from the module collection. + + Searches a module collection node for a collection node matching the + given name. + + :param modcol: A module collection node; see :py:meth:`getmodulecol`. + :param name: The name of the node to return. + """ + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: + if colitem.name == name: + return colitem + return None + + def popen( + self, + cmdargs: Sequence[str | os.PathLike[str]], + stdout: int | TextIO = subprocess.PIPE, + stderr: int | TextIO = subprocess.PIPE, + stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, + **kw, + ): + """Invoke :py:class:`subprocess.Popen`. + + Calls :py:class:`subprocess.Popen` making sure the current working + directory is in ``PYTHONPATH``. + + You probably want to use :py:meth:`run` instead. + """ + env = os.environ.copy() + env["PYTHONPATH"] = os.pathsep.join( + filter(None, [os.getcwd(), env.get("PYTHONPATH", "")]) + ) + kw["env"] = env + + if stdin is self.CLOSE_STDIN: + kw["stdin"] = subprocess.PIPE + elif isinstance(stdin, bytes): + kw["stdin"] = subprocess.PIPE + else: + kw["stdin"] = stdin + + popen = subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw) + if stdin is self.CLOSE_STDIN: + assert popen.stdin is not None + popen.stdin.close() + elif isinstance(stdin, bytes): + assert popen.stdin is not None + popen.stdin.write(stdin) + + return popen + + def run( + self, + *cmdargs: str | os.PathLike[str], + timeout: float | None = None, + stdin: NotSetType | bytes | IO[Any] | int = CLOSE_STDIN, + ) -> RunResult: + """Run a command with arguments. + + Run a process using :py:class:`subprocess.Popen` saving the stdout and + stderr. + + :param cmdargs: + The sequence of arguments to pass to :py:class:`subprocess.Popen`, + with path-like objects being converted to :py:class:`str` + automatically. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :param stdin: + Optional standard input. + + - If it is ``CLOSE_STDIN`` (Default), then this method calls + :py:class:`subprocess.Popen` with ``stdin=subprocess.PIPE``, and + the standard input is closed immediately after the new command is + started. + + - If it is of type :py:class:`bytes`, these bytes are sent to the + standard input of the command. + + - Otherwise, it is passed through to :py:class:`subprocess.Popen`. + For further information in this case, consult the document of the + ``stdin`` parameter in :py:class:`subprocess.Popen`. + :type stdin: _pytest.compat.NotSetType | bytes | IO[Any] | int + :returns: + The result. + + """ + __tracebackhide__ = True + + cmdargs = tuple(os.fspath(arg) for arg in cmdargs) + p1 = self.path.joinpath("stdout") + p2 = self.path.joinpath("stderr") + print("running:", *cmdargs) + print(" in:", Path.cwd()) + + with p1.open("w", encoding="utf8") as f1, p2.open("w", encoding="utf8") as f2: + now = timing.time() + popen = self.popen( + cmdargs, + stdin=stdin, + stdout=f1, + stderr=f2, + close_fds=(sys.platform != "win32"), + ) + if popen.stdin is not None: + popen.stdin.close() + + def handle_timeout() -> None: + __tracebackhide__ = True + + timeout_message = f"{timeout} second timeout expired running: {cmdargs}" + + popen.kill() + popen.wait() + raise self.TimeoutExpired(timeout_message) + + if timeout is None: + ret = popen.wait() + else: + try: + ret = popen.wait(timeout) + except subprocess.TimeoutExpired: + handle_timeout() + + with p1.open(encoding="utf8") as f1, p2.open(encoding="utf8") as f2: + out = f1.read().splitlines() + err = f2.read().splitlines() + + self._dump_lines(out, sys.stdout) + self._dump_lines(err, sys.stderr) + + with contextlib.suppress(ValueError): + ret = ExitCode(ret) + return RunResult(ret, out, err, timing.time() - now) + + def _dump_lines(self, lines, fp): + try: + for line in lines: + print(line, file=fp) + except UnicodeEncodeError: + print(f"couldn't print to {fp} because of encoding") + + def _getpytestargs(self) -> tuple[str, ...]: + return sys.executable, "-mpytest" + + def runpython(self, script: os.PathLike[str]) -> RunResult: + """Run a python script using sys.executable as interpreter.""" + return self.run(sys.executable, script) + + def runpython_c(self, command: str) -> RunResult: + """Run ``python -c "command"``.""" + return self.run(sys.executable, "-c", command) + + def runpytest_subprocess( + self, *args: str | os.PathLike[str], timeout: float | None = None + ) -> RunResult: + """Run pytest as a subprocess with given arguments. + + Any plugins added to the :py:attr:`plugins` list will be added using the + ``-p`` command line option. Additionally ``--basetemp`` is used to put + any temporary files and directories in a numbered directory prefixed + with "runpytest-" to not conflict with the normal numbered pytest + location for temporary files and directories. + + :param args: + The sequence of arguments to pass to the pytest subprocess. + :param timeout: + The period in seconds after which to timeout and raise + :py:class:`Pytester.TimeoutExpired`. + :returns: + The result. + """ + __tracebackhide__ = True + p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700) + args = (f"--basetemp={p}", *args) + plugins = [x for x in self.plugins if isinstance(x, str)] + if plugins: + args = ("-p", plugins[0], *args) + args = self._getpytestargs() + args + return self.run(*args, timeout=timeout) + + def spawn_pytest(self, string: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """Run pytest using pexpect. + + This makes sure to use the right pytest and sets up the temporary + directory locations. + + The pexpect child is returned. + """ + basetemp = self.path / "temp-pexpect" + basetemp.mkdir(mode=0o700) + invoke = " ".join(map(str, self._getpytestargs())) + cmd = f"{invoke} --basetemp={basetemp} {string}" + return self.spawn(cmd, expect_timeout=expect_timeout) + + def spawn(self, cmd: str, expect_timeout: float = 10.0) -> pexpect.spawn: + """Run a command using pexpect. + + The pexpect child is returned. + """ + pexpect = importorskip("pexpect", "3.0") + if hasattr(sys, "pypy_version_info") and "64" in platform.machine(): + skip("pypy-64 bit not supported") + if not hasattr(pexpect, "spawn"): + skip("pexpect.spawn not available") + logfile = self.path.joinpath("spawn.out").open("wb") + + child = pexpect.spawn(cmd, logfile=logfile, timeout=expect_timeout) + self._request.addfinalizer(logfile.close) + return child + + +class LineComp: + def __init__(self) -> None: + self.stringio = StringIO() + """:class:`python:io.StringIO()` instance used for input.""" + + def assert_contains_lines(self, lines2: Sequence[str]) -> None: + """Assert that ``lines2`` are contained (linearly) in :attr:`stringio`'s value. + + Lines are matched using :func:`LineMatcher.fnmatch_lines `. + """ + __tracebackhide__ = True + val = self.stringio.getvalue() + self.stringio.truncate(0) + self.stringio.seek(0) + lines1 = val.split("\n") + LineMatcher(lines1).fnmatch_lines(lines2) + + +class LineMatcher: + """Flexible matching of text. + + This is a convenience class to test large texts like the output of + commands. + + The constructor takes a list of lines without their trailing newlines, i.e. + ``text.splitlines()``. + """ + + def __init__(self, lines: list[str]) -> None: + self.lines = lines + self._log_output: list[str] = [] + + def __str__(self) -> str: + """Return the entire original text. + + .. versionadded:: 6.2 + You can use :meth:`str` in older versions. + """ + return "\n".join(self.lines) + + def _getlines(self, lines2: str | Sequence[str] | Source) -> Sequence[str]: + if isinstance(lines2, str): + lines2 = Source(lines2) + if isinstance(lines2, Source): + lines2 = lines2.strip().lines + return lines2 + + def fnmatch_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:fnmatch.fnmatch`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, fnmatch) + + def re_match_lines_random(self, lines2: Sequence[str]) -> None: + """Check lines exist in the output in any order (using :func:`python:re.match`).""" + __tracebackhide__ = True + self._match_lines_random(lines2, lambda name, pat: bool(re.match(pat, name))) + + def _match_lines_random( + self, lines2: Sequence[str], match_func: Callable[[str, str], bool] + ) -> None: + __tracebackhide__ = True + lines2 = self._getlines(lines2) + for line in lines2: + for x in self.lines: + if line == x or match_func(x, line): + self._log("matched: ", repr(line)) + break + else: + msg = f"line {line!r} not found in output" + self._log(msg) + self._fail(msg) + + def get_lines_after(self, fnline: str) -> Sequence[str]: + """Return all lines following the given line in the text. + + The given line can contain glob wildcards. + """ + for i, line in enumerate(self.lines): + if fnline == line or fnmatch(line, fnline): + return self.lines[i + 1 :] + raise ValueError(f"line {fnline!r} not found in output") + + def _log(self, *args) -> None: + self._log_output.append(" ".join(str(x) for x in args)) + + @property + def _log_text(self) -> str: + return "\n".join(self._log_output) + + def fnmatch_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:fnmatch.fnmatch`). + + The argument is a list of lines which have to match and can use glob + wildcards. If they do not match a pytest.fail() is called. The + matches and non-matches are also shown as part of the error message. + + :param lines2: String patterns to match. + :param consecutive: Match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines(lines2, fnmatch, "fnmatch", consecutive=consecutive) + + def re_match_lines( + self, lines2: Sequence[str], *, consecutive: bool = False + ) -> None: + """Check lines exist in the output (using :func:`python:re.match`). + + The argument is a list of lines which have to match using ``re.match``. + If they do not match a pytest.fail() is called. + + The matches and non-matches are also shown as part of the error message. + + :param lines2: string patterns to match. + :param consecutive: match lines consecutively? + """ + __tracebackhide__ = True + self._match_lines( + lines2, + lambda name, pat: bool(re.match(pat, name)), + "re.match", + consecutive=consecutive, + ) + + def _match_lines( + self, + lines2: Sequence[str], + match_func: Callable[[str, str], bool], + match_nickname: str, + *, + consecutive: bool = False, + ) -> None: + """Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``. + + :param Sequence[str] lines2: + List of string patterns to match. The actual format depends on + ``match_func``. + :param match_func: + A callable ``match_func(line, pattern)`` where line is the + captured line from stdout/stderr and pattern is the matching + pattern. + :param str match_nickname: + The nickname for the match function that will be logged to stdout + when a match occurs. + :param consecutive: + Match lines consecutively? + """ + if not isinstance(lines2, collections.abc.Sequence): + raise TypeError(f"invalid type for lines2: {type(lines2).__name__}") + lines2 = self._getlines(lines2) + lines1 = self.lines[:] + extralines = [] + __tracebackhide__ = True + wnick = len(match_nickname) + 1 + started = False + for line in lines2: + nomatchprinted = False + while lines1: + nextline = lines1.pop(0) + if line == nextline: + self._log("exact match:", repr(line)) + started = True + break + elif match_func(nextline, line): + self._log(f"{match_nickname}:", repr(line)) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + started = True + break + else: + if consecutive and started: + msg = f"no consecutive match: {line!r}" + self._log(msg) + self._log( + "{:>{width}}".format("with:", width=wnick), repr(nextline) + ) + self._fail(msg) + if not nomatchprinted: + self._log( + "{:>{width}}".format("nomatch:", width=wnick), repr(line) + ) + nomatchprinted = True + self._log("{:>{width}}".format("and:", width=wnick), repr(nextline)) + extralines.append(nextline) + else: + msg = f"remains unmatched: {line!r}" + self._log(msg) + self._fail(msg) + self._log_output = [] + + def no_fnmatch_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + self._no_match_line(pat, fnmatch, "fnmatch") + + def no_re_match_line(self, pat: str) -> None: + """Ensure captured lines do not match the given pattern, using ``re.match``. + + :param str pat: The regular expression to match lines. + """ + __tracebackhide__ = True + self._no_match_line( + pat, lambda name, pat: bool(re.match(pat, name)), "re.match" + ) + + def _no_match_line( + self, pat: str, match_func: Callable[[str, str], bool], match_nickname: str + ) -> None: + """Ensure captured lines does not have a the given pattern, using ``fnmatch.fnmatch``. + + :param str pat: The pattern to match lines. + """ + __tracebackhide__ = True + nomatch_printed = False + wnick = len(match_nickname) + 1 + for line in self.lines: + if match_func(line, pat): + msg = f"{match_nickname}: {pat!r}" + self._log(msg) + self._log("{:>{width}}".format("with:", width=wnick), repr(line)) + self._fail(msg) + else: + if not nomatch_printed: + self._log("{:>{width}}".format("nomatch:", width=wnick), repr(pat)) + nomatch_printed = True + self._log("{:>{width}}".format("and:", width=wnick), repr(line)) + self._log_output = [] + + def _fail(self, msg: str) -> None: + __tracebackhide__ = True + log_text = self._log_text + self._log_output = [] + fail(log_text) + + def str(self) -> str: + """Return the entire original text.""" + return str(self) diff --git a/venv/Lib/site-packages/_pytest/pytester_assertions.py b/venv/Lib/site-packages/_pytest/pytester_assertions.py new file mode 100644 index 0000000..d543798 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/pytester_assertions.py @@ -0,0 +1,74 @@ +"""Helper plugin for pytester; should not be loaded on its own.""" + +# This plugin contains assertions used by pytester. pytester cannot +# contain them itself, since it is imported by the `pytest` module, +# hence cannot be subject to assertion rewriting, which requires a +# module to not be already imported. +from __future__ import annotations + +from typing import Sequence + +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +def assertoutcome( + outcomes: tuple[ + Sequence[TestReport], + Sequence[CollectReport | TestReport], + Sequence[CollectReport | TestReport], + ], + passed: int = 0, + skipped: int = 0, + failed: int = 0, +) -> None: + __tracebackhide__ = True + + realpassed, realskipped, realfailed = outcomes + obtained = { + "passed": len(realpassed), + "skipped": len(realskipped), + "failed": len(realfailed), + } + expected = {"passed": passed, "skipped": skipped, "failed": failed} + assert obtained == expected, outcomes + + +def assert_outcomes( + outcomes: dict[str, int], + passed: int = 0, + skipped: int = 0, + failed: int = 0, + errors: int = 0, + xpassed: int = 0, + xfailed: int = 0, + warnings: int | None = None, + deselected: int | None = None, +) -> None: + """Assert that the specified outcomes appear with the respective + numbers (0 means it didn't occur) in the text output from a test run.""" + __tracebackhide__ = True + + obtained = { + "passed": outcomes.get("passed", 0), + "skipped": outcomes.get("skipped", 0), + "failed": outcomes.get("failed", 0), + "errors": outcomes.get("errors", 0), + "xpassed": outcomes.get("xpassed", 0), + "xfailed": outcomes.get("xfailed", 0), + } + expected = { + "passed": passed, + "skipped": skipped, + "failed": failed, + "errors": errors, + "xpassed": xpassed, + "xfailed": xfailed, + } + if warnings is not None: + obtained["warnings"] = outcomes.get("warnings", 0) + expected["warnings"] = warnings + if deselected is not None: + obtained["deselected"] = outcomes.get("deselected", 0) + expected["deselected"] = deselected + assert obtained == expected diff --git a/venv/Lib/site-packages/_pytest/python.py b/venv/Lib/site-packages/_pytest/python.py new file mode 100644 index 0000000..3478c34 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/python.py @@ -0,0 +1,1679 @@ +# mypy: allow-untyped-defs +"""Python test discovery, setup and run of test functions.""" + +from __future__ import annotations + +import abc +from collections import Counter +from collections import defaultdict +import dataclasses +import enum +import fnmatch +from functools import partial +import inspect +import itertools +import os +from pathlib import Path +import types +from typing import Any +from typing import Callable +from typing import Dict +from typing import final +from typing import Generator +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import Mapping +from typing import Pattern +from typing import Sequence +from typing import TYPE_CHECKING +import warnings + +import _pytest +from _pytest import fixtures +from _pytest import nodes +from _pytest._code import filter_traceback +from _pytest._code import getfslineno +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest._code.code import Traceback +from _pytest._io.saferepr import saferepr +from _pytest.compat import ascii_escaped +from _pytest.compat import get_default_arg_names +from _pytest.compat import get_real_func +from _pytest.compat import getimfunc +from _pytest.compat import is_async_function +from _pytest.compat import is_generator +from _pytest.compat import LEGACY_PATH +from _pytest.compat import NOTSET +from _pytest.compat import safe_getattr +from _pytest.compat import safe_isclass +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import FixtureRequest +from _pytest.fixtures import FuncFixtureInfo +from _pytest.fixtures import get_scope_node +from _pytest.main import Session +from _pytest.mark import MARK_GEN +from _pytest.mark import ParameterSet +from _pytest.mark.structures import get_unpacked_marks +from _pytest.mark.structures import Mark +from _pytest.mark.structures import MarkDecorator +from _pytest.mark.structures import normalize_mark_list +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.pathlib import fnmatch_ex +from _pytest.pathlib import import_path +from _pytest.pathlib import ImportPathMismatchError +from _pytest.pathlib import scandir +from _pytest.scope import _ScopeName +from _pytest.scope import Scope +from _pytest.stash import StashKey +from _pytest.warning_types import PytestCollectionWarning +from _pytest.warning_types import PytestReturnNotNoneWarning +from _pytest.warning_types import PytestUnhandledCoroutineWarning + + +if TYPE_CHECKING: + from typing_extensions import Self + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "python_files", + type="args", + # NOTE: default is also used in AssertionRewritingHook. + default=["test_*.py", "*_test.py"], + help="Glob-style file patterns for Python test module discovery", + ) + parser.addini( + "python_classes", + type="args", + default=["Test"], + help="Prefixes or glob names for Python test class discovery", + ) + parser.addini( + "python_functions", + type="args", + default=["test"], + help="Prefixes or glob names for Python test function and method discovery", + ) + parser.addini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support", + type="bool", + default=False, + help="Disable string escape non-ASCII characters, might cause unwanted " + "side effects(use at your own risk)", + ) + + +def pytest_generate_tests(metafunc: Metafunc) -> None: + for marker in metafunc.definition.iter_markers(name="parametrize"): + metafunc.parametrize(*marker.args, **marker.kwargs, _param_mark=marker) + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see https://docs.pytest.org/en/stable/how-to/parametrize.html for more info " + "and examples.", + ) + config.addinivalue_line( + "markers", + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see " + "https://docs.pytest.org/en/stable/explanation/fixtures.html#usefixtures ", + ) + + +def async_warn_and_skip(nodeid: str) -> None: + msg = "async def functions are not natively supported and have been skipped.\n" + msg += ( + "You need to install a suitable plugin for your async framework, for example:\n" + ) + msg += " - anyio\n" + msg += " - pytest-asyncio\n" + msg += " - pytest-tornasync\n" + msg += " - pytest-trio\n" + msg += " - pytest-twisted" + warnings.warn(PytestUnhandledCoroutineWarning(msg.format(nodeid))) + skip(reason="async def function and no async plugin installed (see warnings)") + + +@hookimpl(trylast=True) +def pytest_pyfunc_call(pyfuncitem: Function) -> object | None: + testfunction = pyfuncitem.obj + if is_async_function(testfunction): + async_warn_and_skip(pyfuncitem.nodeid) + funcargs = pyfuncitem.funcargs + testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames} + result = testfunction(**testargs) + if hasattr(result, "__await__") or hasattr(result, "__aiter__"): + async_warn_and_skip(pyfuncitem.nodeid) + elif result is not None: + warnings.warn( + PytestReturnNotNoneWarning( + f"Expected None, but {pyfuncitem.nodeid} returned {result!r}, which will be an error in a " + "future version of pytest. Did you mean to use `assert` instead of `return`?" + ) + ) + return True + + +def pytest_collect_directory( + path: Path, parent: nodes.Collector +) -> nodes.Collector | None: + pkginit = path / "__init__.py" + try: + has_pkginit = pkginit.is_file() + except PermissionError: + # See https://github.com/pytest-dev/pytest/issues/12120#issuecomment-2106349096. + return None + if has_pkginit: + return Package.from_parent(parent, path=path) + return None + + +def pytest_collect_file(file_path: Path, parent: nodes.Collector) -> Module | None: + if file_path.suffix == ".py": + if not parent.session.isinitpath(file_path): + if not path_matches_patterns( + file_path, parent.config.getini("python_files") + ): + return None + ihook = parent.session.gethookproxy(file_path) + module: Module = ihook.pytest_pycollect_makemodule( + module_path=file_path, parent=parent + ) + return module + return None + + +def path_matches_patterns(path: Path, patterns: Iterable[str]) -> bool: + """Return whether path matches any of the patterns in the list of globs given.""" + return any(fnmatch_ex(pattern, path) for pattern in patterns) + + +def pytest_pycollect_makemodule(module_path: Path, parent) -> Module: + return Module.from_parent(parent, path=module_path) + + +@hookimpl(trylast=True) +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> None | nodes.Item | nodes.Collector | list[nodes.Item | nodes.Collector]: + assert isinstance(collector, (Class, Module)), type(collector) + # Nothing was collected elsewhere, let's do it here. + if safe_isclass(obj): + if collector.istestclass(obj, name): + return Class.from_parent(collector, name=name, obj=obj) + elif collector.istestfunction(obj, name): + # mock seems to store unbound methods (issue473), normalize it. + obj = getattr(obj, "__func__", obj) + # We need to try and unwrap the function if it's a functools.partial + # or a functools.wrapped. + # We mustn't if it's been wrapped with mock.patch (python 2 only). + if not (inspect.isfunction(obj) or inspect.isfunction(get_real_func(obj))): + filename, lineno = getfslineno(obj) + warnings.warn_explicit( + message=PytestCollectionWarning( + f"cannot collect {name!r} because it is not a function." + ), + category=None, + filename=str(filename), + lineno=lineno + 1, + ) + elif getattr(obj, "__test__", True): + if is_generator(obj): + res = Function.from_parent(collector, name=name) + reason = ( + f"yield tests were removed in pytest 4.0 - {name} will be ignored" + ) + res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) + res.warn(PytestCollectionWarning(reason)) + return res + else: + return list(collector._genfunctions(name, obj)) + return None + + +class PyobjMixin(nodes.Node): + """this mix-in inherits from Node to carry over the typing information + + as its intended to always mix in before a node + its position in the mro is unaffected""" + + _ALLOW_MARKERS = True + + @property + def module(self): + """Python module object this node was collected from (can be None).""" + node = self.getparent(Module) + return node.obj if node is not None else None + + @property + def cls(self): + """Python class object this node was collected from (can be None).""" + node = self.getparent(Class) + return node.obj if node is not None else None + + @property + def instance(self): + """Python instance object the function is bound to. + + Returns None if not a test method, e.g. for a standalone test function, + a class or a module. + """ + # Overridden by Function. + return None + + @property + def obj(self): + """Underlying Python object.""" + obj = getattr(self, "_obj", None) + if obj is None: + self._obj = obj = self._getobj() + # XXX evil hack + # used to avoid Function marker duplication + if self._ALLOW_MARKERS: + self.own_markers.extend(get_unpacked_marks(self.obj)) + # This assumes that `obj` is called before there is a chance + # to add custom keys to `self.keywords`, so no fear of overriding. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + return obj + + @obj.setter + def obj(self, value): + self._obj = value + + def _getobj(self): + """Get the underlying Python object. May be overwritten by subclasses.""" + # TODO: Improve the type of `parent` such that assert/ignore aren't needed. + assert self.parent is not None + obj = self.parent.obj # type: ignore[attr-defined] + return getattr(obj, self.name) + + def getmodpath(self, stopatmodule: bool = True, includemodule: bool = False) -> str: + """Return Python path relative to the containing module.""" + parts = [] + for node in self.iter_parents(): + name = node.name + if isinstance(node, Module): + name = os.path.splitext(name)[0] + if stopatmodule: + if includemodule: + parts.append(name) + break + parts.append(name) + parts.reverse() + return ".".join(parts) + + def reportinfo(self) -> tuple[os.PathLike[str] | str, int | None, str]: + # XXX caching? + path, lineno = getfslineno(self.obj) + modpath = self.getmodpath() + return path, lineno, modpath + + +# As an optimization, these builtin attribute names are pre-ignored when +# iterating over an object during collection -- the pytest_pycollect_makeitem +# hook is not called for them. +# fmt: off +class _EmptyClass: pass # noqa: E701 +IGNORED_ATTRIBUTES = frozenset.union( + frozenset(), + # Module. + dir(types.ModuleType("empty_module")), + # Some extra module attributes the above doesn't catch. + {"__builtins__", "__file__", "__cached__"}, + # Class. + dir(_EmptyClass), + # Instance. + dir(_EmptyClass()), +) +del _EmptyClass +# fmt: on + + +class PyCollector(PyobjMixin, nodes.Collector, abc.ABC): + def funcnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_functions", name) + + def isnosetest(self, obj: object) -> bool: + """Look for the __test__ attribute, which is applied by the + @nose.tools.istest decorator. + """ + # We explicitly check for "is True" here to not mistakenly treat + # classes with a custom __getattr__ returning something truthy (like a + # function) as test classes. + return safe_getattr(obj, "__test__", False) is True + + def classnamefilter(self, name: str) -> bool: + return self._matches_prefix_or_glob_option("python_classes", name) + + def istestfunction(self, obj: object, name: str) -> bool: + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, (staticmethod, classmethod)): + # staticmethods and classmethods need to be unwrapped. + obj = safe_getattr(obj, "__func__", False) + return callable(obj) and fixtures.getfixturemarker(obj) is None + else: + return False + + def istestclass(self, obj: object, name: str) -> bool: + if not (self.classnamefilter(name) or self.isnosetest(obj)): + return False + if inspect.isabstract(obj): + return False + return True + + def _matches_prefix_or_glob_option(self, option_name: str, name: str) -> bool: + """Check if the given name matches the prefix or glob-pattern defined + in ini configuration.""" + for option in self.config.getini(option_name): + if name.startswith(option): + return True + # Check that name looks like a glob-string before calling fnmatch + # because this is called for every name in each collected module, + # and fnmatch is somewhat expensive to call. + elif ("*" in option or "?" in option or "[" in option) and fnmatch.fnmatch( + name, option + ): + return True + return False + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + if not getattr(self.obj, "__test__", True): + return [] + + # Avoid random getattrs and peek in the __dict__ instead. + dicts = [getattr(self.obj, "__dict__", {})] + if isinstance(self.obj, type): + for basecls in self.obj.__mro__: + dicts.append(basecls.__dict__) + + # In each class, nodes should be definition ordered. + # __dict__ is definition ordered. + seen: set[str] = set() + dict_values: list[list[nodes.Item | nodes.Collector]] = [] + ihook = self.ihook + for dic in dicts: + values: list[nodes.Item | nodes.Collector] = [] + # Note: seems like the dict can change during iteration - + # be careful not to remove the list() without consideration. + for name, obj in list(dic.items()): + if name in IGNORED_ATTRIBUTES: + continue + if name in seen: + continue + seen.add(name) + res = ihook.pytest_pycollect_makeitem( + collector=self, name=name, obj=obj + ) + if res is None: + continue + elif isinstance(res, list): + values.extend(res) + else: + values.append(res) + dict_values.append(values) + + # Between classes in the class hierarchy, reverse-MRO order -- nodes + # inherited from base classes should come before subclasses. + result = [] + for values in reversed(dict_values): + result.extend(values) + return result + + def _genfunctions(self, name: str, funcobj) -> Iterator[Function]: + modulecol = self.getparent(Module) + assert modulecol is not None + module = modulecol.obj + clscol = self.getparent(Class) + cls = clscol and clscol.obj or None + + definition = FunctionDefinition.from_parent(self, name=name, callobj=funcobj) + fixtureinfo = definition._fixtureinfo + + # pytest_generate_tests impls call metafunc.parametrize() which fills + # metafunc._calls, the outcome of the hook. + metafunc = Metafunc( + definition=definition, + fixtureinfo=fixtureinfo, + config=self.config, + cls=cls, + module=module, + _ispytest=True, + ) + methods = [] + if hasattr(module, "pytest_generate_tests"): + methods.append(module.pytest_generate_tests) + if cls is not None and hasattr(cls, "pytest_generate_tests"): + methods.append(cls().pytest_generate_tests) + self.ihook.pytest_generate_tests.call_extra(methods, dict(metafunc=metafunc)) + + if not metafunc._calls: + yield Function.from_parent(self, name=name, fixtureinfo=fixtureinfo) + else: + # Direct parametrizations taking place in module/class-specific + # `metafunc.parametrize` calls may have shadowed some fixtures, so make sure + # we update what the function really needs a.k.a its fixture closure. Note that + # direct parametrizations using `@pytest.mark.parametrize` have already been considered + # into making the closure using `ignore_args` arg to `getfixtureclosure`. + fixtureinfo.prune_dependency_tree() + + for callspec in metafunc._calls: + subname = f"{name}[{callspec.id}]" + yield Function.from_parent( + self, + name=subname, + callspec=callspec, + fixtureinfo=fixtureinfo, + keywords={callspec.id: True}, + originalname=name, + ) + + +def importtestmodule( + path: Path, + config: Config, +): + # We assume we are only called once per module. + importmode = config.getoption("--import-mode") + try: + mod = import_path( + path, + mode=importmode, + root=config.rootpath, + consider_namespace_packages=config.getini("consider_namespace_packages"), + ) + except SyntaxError as e: + raise nodes.Collector.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e + except ImportPathMismatchError as e: + raise nodes.Collector.CollectError( + "import file mismatch:\n" + "imported module {!r} has this __file__ attribute:\n" + " {}\n" + "which is not the same as the test file we want to collect:\n" + " {}\n" + "HINT: remove __pycache__ / .pyc files and/or use a " + "unique basename for your test file modules".format(*e.args) + ) from e + except ImportError as e: + exc_info = ExceptionInfo.from_current() + if config.get_verbosity() < 2: + exc_info.traceback = exc_info.traceback.filter(filter_traceback) + exc_repr = ( + exc_info.getrepr(style="short") + if exc_info.traceback + else exc_info.exconly() + ) + formatted_tb = str(exc_repr) + raise nodes.Collector.CollectError( + f"ImportError while importing test module '{path}'.\n" + "Hint: make sure your test modules/packages have valid Python names.\n" + "Traceback:\n" + f"{formatted_tb}" + ) from e + except skip.Exception as e: + if e.allow_module_level: + raise + raise nodes.Collector.CollectError( + "Using pytest.skip outside of a test will skip the entire module. " + "If that's your intention, pass `allow_module_level=True`. " + "If you want to skip a specific test or an entire class, " + "use the @pytest.mark.skip or @pytest.mark.skipif decorators." + ) from e + config.pluginmanager.consider_module(mod) + return mod + + +class Module(nodes.File, PyCollector): + """Collector for test classes and functions in a Python module.""" + + def _getobj(self): + return importtestmodule(self.path, self.config) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + self._register_setup_module_fixture() + self._register_setup_function_fixture() + self.session._fixturemanager.parsefactories(self) + return super().collect() + + def _register_setup_module_fixture(self) -> None: + """Register an autouse, module-scoped fixture for the collected module object + that invokes setUpModule/tearDownModule if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_module = _get_first_non_fixture_func( + self.obj, ("setUpModule", "setup_module") + ) + teardown_module = _get_first_non_fixture_func( + self.obj, ("tearDownModule", "teardown_module") + ) + + if setup_module is None and teardown_module is None: + return + + def xunit_setup_module_fixture(request) -> Generator[None]: + module = request.module + if setup_module is not None: + _call_with_optional_argument(setup_module, module) + yield + if teardown_module is not None: + _call_with_optional_argument(teardown_module, module) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_module_fixture_{self.obj.__name__}", + func=xunit_setup_module_fixture, + nodeid=self.nodeid, + scope="module", + autouse=True, + ) + + def _register_setup_function_fixture(self) -> None: + """Register an autouse, function-scoped fixture for the collected module object + that invokes setup_function/teardown_function if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_function = _get_first_non_fixture_func(self.obj, ("setup_function",)) + teardown_function = _get_first_non_fixture_func( + self.obj, ("teardown_function",) + ) + if setup_function is None and teardown_function is None: + return + + def xunit_setup_function_fixture(request) -> Generator[None]: + if request.instance is not None: + # in this case we are bound to an instance, so we need to let + # setup_method handle this + yield + return + function = request.function + if setup_function is not None: + _call_with_optional_argument(setup_function, function) + yield + if teardown_function is not None: + _call_with_optional_argument(teardown_function, function) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_function_fixture_{self.obj.__name__}", + func=xunit_setup_function_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +class Package(nodes.Directory): + """Collector for files and directories in a Python packages -- directories + with an `__init__.py` file. + + .. note:: + + Directories without an `__init__.py` file are instead collected by + :class:`~pytest.Dir` by default. Both are :class:`~pytest.Directory` + collectors. + + .. versionchanged:: 8.0 + + Now inherits from :class:`~pytest.Directory`. + """ + + def __init__( + self, + fspath: LEGACY_PATH | None, + parent: nodes.Collector, + # NOTE: following args are unused: + config=None, + session=None, + nodeid=None, + path: Path | None = None, + ) -> None: + # NOTE: Could be just the following, but kept as-is for compat. + # super().__init__(self, fspath, parent=parent) + session = parent.session + super().__init__( + fspath=fspath, + path=path, + parent=parent, + config=config, + session=session, + nodeid=nodeid, + ) + + def setup(self) -> None: + init_mod = importtestmodule(self.path / "__init__.py", self.config) + + # Not using fixtures to call setup_module here because autouse fixtures + # from packages are not called automatically (#4085). + setup_module = _get_first_non_fixture_func( + init_mod, ("setUpModule", "setup_module") + ) + if setup_module is not None: + _call_with_optional_argument(setup_module, init_mod) + + teardown_module = _get_first_non_fixture_func( + init_mod, ("tearDownModule", "teardown_module") + ) + if teardown_module is not None: + func = partial(_call_with_optional_argument, teardown_module, init_mod) + self.addfinalizer(func) + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + # Always collect __init__.py first. + def sort_key(entry: os.DirEntry[str]) -> object: + return (entry.name != "__init__.py", entry.name) + + config = self.config + col: nodes.Collector | None + cols: Sequence[nodes.Collector] + ihook = self.ihook + for direntry in scandir(self.path, sort_key): + if direntry.is_dir(): + path = Path(direntry.path) + if not self.session.isinitpath(path, with_parents=True): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + col = ihook.pytest_collect_directory(path=path, parent=self) + if col is not None: + yield col + + elif direntry.is_file(): + path = Path(direntry.path) + if not self.session.isinitpath(path): + if ihook.pytest_ignore_collect(collection_path=path, config=config): + continue + cols = ihook.pytest_collect_file(file_path=path, parent=self) + yield from cols + + +def _call_with_optional_argument(func, arg) -> None: + """Call the given function with the given argument if func accepts one argument, otherwise + calls func without arguments.""" + arg_count = func.__code__.co_argcount + if inspect.ismethod(func): + arg_count -= 1 + if arg_count: + func(arg) + else: + func() + + +def _get_first_non_fixture_func(obj: object, names: Iterable[str]) -> object | None: + """Return the attribute from the given object to be used as a setup/teardown + xunit-style function, but only if not marked as a fixture to avoid calling it twice. + """ + for name in names: + meth: object | None = getattr(obj, name, None) + if meth is not None and fixtures.getfixturemarker(meth) is None: + return meth + return None + + +class Class(PyCollector): + """Collector for test methods (and nested classes) in a Python class.""" + + @classmethod + def from_parent(cls, parent, *, name, obj=None, **kw) -> Self: # type: ignore[override] + """The public constructor.""" + return super().from_parent(name=name, parent=parent, **kw) + + def newinstance(self): + return self.obj() + + def collect(self) -> Iterable[nodes.Item | nodes.Collector]: + if not safe_getattr(self.obj, "__test__", True): + return [] + if hasinit(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + f"cannot collect test class {self.obj.__name__!r} because it has a " + f"__init__ constructor (from: {self.parent.nodeid})" + ) + ) + return [] + elif hasnew(self.obj): + assert self.parent is not None + self.warn( + PytestCollectionWarning( + f"cannot collect test class {self.obj.__name__!r} because it has a " + f"__new__ constructor (from: {self.parent.nodeid})" + ) + ) + return [] + + self._register_setup_class_fixture() + self._register_setup_method_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + return super().collect() + + def _register_setup_class_fixture(self) -> None: + """Register an autouse, class scoped fixture into the collected class object + that invokes setup_class/teardown_class if either or both are available. + + Using a fixture to invoke this methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_class = _get_first_non_fixture_func(self.obj, ("setup_class",)) + teardown_class = _get_first_non_fixture_func(self.obj, ("teardown_class",)) + if setup_class is None and teardown_class is None: + return + + def xunit_setup_class_fixture(request) -> Generator[None]: + cls = request.cls + if setup_class is not None: + func = getimfunc(setup_class) + _call_with_optional_argument(func, cls) + yield + if teardown_class is not None: + func = getimfunc(teardown_class) + _call_with_optional_argument(func, cls) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_class_fixture_{self.obj.__qualname__}", + func=xunit_setup_class_fixture, + nodeid=self.nodeid, + scope="class", + autouse=True, + ) + + def _register_setup_method_fixture(self) -> None: + """Register an autouse, function scoped fixture into the collected class object + that invokes setup_method/teardown_method if either or both are available. + + Using a fixture to invoke these methods ensures we play nicely and unsurprisingly with + other fixtures (#517). + """ + setup_name = "setup_method" + setup_method = _get_first_non_fixture_func(self.obj, (setup_name,)) + teardown_name = "teardown_method" + teardown_method = _get_first_non_fixture_func(self.obj, (teardown_name,)) + if setup_method is None and teardown_method is None: + return + + def xunit_setup_method_fixture(request) -> Generator[None]: + instance = request.instance + method = request.function + if setup_method is not None: + func = getattr(instance, setup_name) + _call_with_optional_argument(func, method) + yield + if teardown_method is not None: + func = getattr(instance, teardown_name) + _call_with_optional_argument(func, method) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_xunit_setup_method_fixture_{self.obj.__qualname__}", + func=xunit_setup_method_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +def hasinit(obj: object) -> bool: + init: object = getattr(obj, "__init__", None) + if init: + return init != object.__init__ + return False + + +def hasnew(obj: object) -> bool: + new: object = getattr(obj, "__new__", None) + if new: + return new != object.__new__ + return False + + +@final +@dataclasses.dataclass(frozen=True) +class IdMaker: + """Make IDs for a parametrization.""" + + __slots__ = ( + "argnames", + "parametersets", + "idfn", + "ids", + "config", + "nodeid", + "func_name", + ) + + # The argnames of the parametrization. + argnames: Sequence[str] + # The ParameterSets of the parametrization. + parametersets: Sequence[ParameterSet] + # Optionally, a user-provided callable to make IDs for parameters in a + # ParameterSet. + idfn: Callable[[Any], object | None] | None + # Optionally, explicit IDs for ParameterSets by index. + ids: Sequence[object | None] | None + # Optionally, the pytest config. + # Used for controlling ASCII escaping, and for calling the + # :hook:`pytest_make_parametrize_id` hook. + config: Config | None + # Optionally, the ID of the node being parametrized. + # Used only for clearer error messages. + nodeid: str | None + # Optionally, the ID of the function being parametrized. + # Used only for clearer error messages. + func_name: str | None + + def make_unique_parameterset_ids(self) -> list[str]: + """Make a unique identifier for each ParameterSet, that may be used to + identify the parametrization in a node ID. + + Format is -...-[counter], where prm_x_token is + - user-provided id, if given + - else an id derived from the value, applicable for certain types + - else + The counter suffix is appended only in case a string wouldn't be unique + otherwise. + """ + resolved_ids = list(self._resolve_ids()) + # All IDs must be unique! + if len(resolved_ids) != len(set(resolved_ids)): + # Record the number of occurrences of each ID. + id_counts = Counter(resolved_ids) + # Map the ID to its next suffix. + id_suffixes: dict[str, int] = defaultdict(int) + # Suffix non-unique IDs to make them unique. + for index, id in enumerate(resolved_ids): + if id_counts[id] > 1: + suffix = "" + if id and id[-1].isdigit(): + suffix = "_" + new_id = f"{id}{suffix}{id_suffixes[id]}" + while new_id in set(resolved_ids): + id_suffixes[id] += 1 + new_id = f"{id}{suffix}{id_suffixes[id]}" + resolved_ids[index] = new_id + id_suffixes[id] += 1 + assert len(resolved_ids) == len( + set(resolved_ids) + ), f"Internal error: {resolved_ids=}" + return resolved_ids + + def _resolve_ids(self) -> Iterable[str]: + """Resolve IDs for all ParameterSets (may contain duplicates).""" + for idx, parameterset in enumerate(self.parametersets): + if parameterset.id is not None: + # ID provided directly - pytest.param(..., id="...") + yield parameterset.id + elif self.ids and idx < len(self.ids) and self.ids[idx] is not None: + # ID provided in the IDs list - parametrize(..., ids=[...]). + yield self._idval_from_value_required(self.ids[idx], idx) + else: + # ID not provided - generate it. + yield "-".join( + self._idval(val, argname, idx) + for val, argname in zip(parameterset.values, self.argnames) + ) + + def _idval(self, val: object, argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet.""" + idval = self._idval_from_function(val, argname, idx) + if idval is not None: + return idval + idval = self._idval_from_hook(val, argname) + if idval is not None: + return idval + idval = self._idval_from_value(val) + if idval is not None: + return idval + return self._idval_from_argname(argname, idx) + + def _idval_from_function(self, val: object, argname: str, idx: int) -> str | None: + """Try to make an ID for a parameter in a ParameterSet using the + user-provided id callable, if given.""" + if self.idfn is None: + return None + try: + id = self.idfn(val) + except Exception as e: + prefix = f"{self.nodeid}: " if self.nodeid is not None else "" + msg = "error raised while trying to determine id of parameter '{}' at position {}" + msg = prefix + msg.format(argname, idx) + raise ValueError(msg) from e + if id is None: + return None + return self._idval_from_value(id) + + def _idval_from_hook(self, val: object, argname: str) -> str | None: + """Try to make an ID for a parameter in a ParameterSet by calling the + :hook:`pytest_make_parametrize_id` hook.""" + if self.config: + id: str | None = self.config.hook.pytest_make_parametrize_id( + config=self.config, val=val, argname=argname + ) + return id + return None + + def _idval_from_value(self, val: object) -> str | None: + """Try to make an ID for a parameter in a ParameterSet from its value, + if the value type is supported.""" + if isinstance(val, (str, bytes)): + return _ascii_escaped_by_config(val, self.config) + elif val is None or isinstance(val, (float, int, bool, complex)): + return str(val) + elif isinstance(val, Pattern): + return ascii_escaped(val.pattern) + elif val is NOTSET: + # Fallback to default. Note that NOTSET is an enum.Enum. + pass + elif isinstance(val, enum.Enum): + return str(val) + elif isinstance(getattr(val, "__name__", None), str): + # Name of a class, function, module, etc. + name: str = getattr(val, "__name__") + return name + return None + + def _idval_from_value_required(self, val: object, idx: int) -> str: + """Like _idval_from_value(), but fails if the type is not supported.""" + id = self._idval_from_value(val) + if id is not None: + return id + + # Fail. + if self.func_name is not None: + prefix = f"In {self.func_name}: " + elif self.nodeid is not None: + prefix = f"In {self.nodeid}: " + else: + prefix = "" + msg = ( + f"{prefix}ids contains unsupported value {saferepr(val)} (type: {type(val)!r}) at index {idx}. " + "Supported types are: str, bytes, int, float, complex, bool, enum, regex or anything with a __name__." + ) + fail(msg, pytrace=False) + + @staticmethod + def _idval_from_argname(argname: str, idx: int) -> str: + """Make an ID for a parameter in a ParameterSet from the argument name + and the index of the ParameterSet.""" + return str(argname) + str(idx) + + +@final +@dataclasses.dataclass(frozen=True) +class CallSpec2: + """A planned parameterized invocation of a test function. + + Calculated during collection for a given test function's Metafunc. + Once collection is over, each callspec is turned into a single Item + and stored in item.callspec. + """ + + # arg name -> arg value which will be passed to a fixture or pseudo-fixture + # of the same name. (indirect or direct parametrization respectively) + params: dict[str, object] = dataclasses.field(default_factory=dict) + # arg name -> arg index. + indices: dict[str, int] = dataclasses.field(default_factory=dict) + # Used for sorting parametrized resources. + _arg2scope: Mapping[str, Scope] = dataclasses.field(default_factory=dict) + # Parts which will be added to the item's name in `[..]` separated by "-". + _idlist: Sequence[str] = dataclasses.field(default_factory=tuple) + # Marks which will be applied to the item. + marks: list[Mark] = dataclasses.field(default_factory=list) + + def setmulti( + self, + *, + argnames: Iterable[str], + valset: Iterable[object], + id: str, + marks: Iterable[Mark | MarkDecorator], + scope: Scope, + param_index: int, + ) -> CallSpec2: + params = self.params.copy() + indices = self.indices.copy() + arg2scope = dict(self._arg2scope) + for arg, val in zip(argnames, valset): + if arg in params: + raise ValueError(f"duplicate parametrization of {arg!r}") + params[arg] = val + indices[arg] = param_index + arg2scope[arg] = scope + return CallSpec2( + params=params, + indices=indices, + _arg2scope=arg2scope, + _idlist=[*self._idlist, id], + marks=[*self.marks, *normalize_mark_list(marks)], + ) + + def getparam(self, name: str) -> object: + try: + return self.params[name] + except KeyError as e: + raise ValueError(name) from e + + @property + def id(self) -> str: + return "-".join(self._idlist) + + +def get_direct_param_fixture_func(request: FixtureRequest) -> Any: + return request.param + + +# Used for storing pseudo fixturedefs for direct parametrization. +name2pseudofixturedef_key = StashKey[Dict[str, FixtureDef[Any]]]() + + +@final +class Metafunc: + """Objects passed to the :hook:`pytest_generate_tests` hook. + + They help to inspect a test function and to generate tests according to + test configuration or values specified in the class or module where a + test function is defined. + """ + + def __init__( + self, + definition: FunctionDefinition, + fixtureinfo: fixtures.FuncFixtureInfo, + config: Config, + cls=None, + module=None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + + #: Access to the underlying :class:`_pytest.python.FunctionDefinition`. + self.definition = definition + + #: Access to the :class:`pytest.Config` object for the test session. + self.config = config + + #: The module object where the test function is defined in. + self.module = module + + #: Underlying Python test function. + self.function = definition.obj + + #: Set of fixture names required by the test function. + self.fixturenames = fixtureinfo.names_closure + + #: Class object where the test function is defined in or ``None``. + self.cls = cls + + self._arg2fixturedefs = fixtureinfo.name2fixturedefs + + # Result of parametrize(). + self._calls: list[CallSpec2] = [] + + def parametrize( + self, + argnames: str | Sequence[str], + argvalues: Iterable[ParameterSet | Sequence[object] | object], + indirect: bool | Sequence[str] = False, + ids: Iterable[object | None] | Callable[[Any], object | None] | None = None, + scope: _ScopeName | None = None, + *, + _param_mark: Mark | None = None, + ) -> None: + """Add new invocations to the underlying test function using the list + of argvalues for the given argnames. Parametrization is performed + during the collection phase. If you need to setup expensive resources + see about setting indirect to do it rather than at test setup time. + + Can be called multiple times per test function (but only on different + argument names), in which case each call parametrizes all previous + parametrizations, e.g. + + :: + + unparametrized: t + parametrize ["x", "y"]: t[x], t[y] + parametrize [1, 2]: t[x-1], t[x-2], t[y-1], t[y-2] + + :param argnames: + A comma-separated string denoting one or more argument names, or + a list/tuple of argument strings. + + :param argvalues: + The list of argvalues determines how often a test is invoked with + different argument values. + + If only one argname was specified argvalues is a list of values. + If N argnames were specified, argvalues must be a list of + N-tuples, where each tuple-element specifies a value for its + respective argname. + :type argvalues: Iterable[_pytest.mark.structures.ParameterSet | Sequence[object] | object] + :param indirect: + A list of arguments' names (subset of argnames) or a boolean. + If True the list contains all names from the argnames. Each + argvalue corresponding to an argname in this list will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. + + :param ids: + Sequence of (or generator for) ids for ``argvalues``, + or a callable to return part of the id for each argvalue. + + With sequences (and generators like ``itertools.count()``) the + returned ids should be of type ``string``, ``int``, ``float``, + ``bool``, or ``None``. + They are mapped to the corresponding index in ``argvalues``. + ``None`` means to use the auto-generated id. + + If it is a callable it will be called for each entry in + ``argvalues``, and the return value is used as part of the + auto-generated id for the whole set (where parts are joined with + dashes ("-")). + This is useful to provide more specific ids for certain items, e.g. + dates. Returning ``None`` will use an auto-generated id. + + If no ids are provided they will be generated automatically from + the argvalues. + + :param scope: + If specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. + """ + argnames, parametersets = ParameterSet._for_parametrize( + argnames, + argvalues, + self.function, + self.config, + nodeid=self.definition.nodeid, + ) + del argvalues + + if "request" in argnames: + fail( + "'request' is a reserved name and cannot be used in @pytest.mark.parametrize", + pytrace=False, + ) + + if scope is not None: + scope_ = Scope.from_user( + scope, descr=f"parametrize() call in {self.function.__name__}" + ) + else: + scope_ = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) + + self._validate_if_using_arg_names(argnames, indirect) + + # Use any already (possibly) generated ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from: + generated_ids = _param_mark._param_ids_from._param_ids_generated + if generated_ids is not None: + ids = generated_ids + + ids = self._resolve_parameter_set_ids( + argnames, ids, parametersets, nodeid=self.definition.nodeid + ) + + # Store used (possibly generated) ids with parametrize Marks. + if _param_mark and _param_mark._param_ids_from and generated_ids is None: + object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids) + + # Add funcargs as fixturedefs to fixtureinfo.arg2fixturedefs by registering + # artificial "pseudo" FixtureDef's so that later at test execution time we can + # rely on a proper FixtureDef to exist for fixture setup. + node = None + # If we have a scope that is higher than function, we need + # to make sure we only ever create an according fixturedef on + # a per-scope basis. We thus store and cache the fixturedef on the + # node related to the scope. + if scope_ is not Scope.Function: + collector = self.definition.parent + assert collector is not None + node = get_scope_node(collector, scope_) + if node is None: + # If used class scope and there is no class, use module-level + # collector (for now). + if scope_ is Scope.Class: + assert isinstance(collector, Module) + node = collector + # If used package scope and there is no package, use session + # (for now). + elif scope_ is Scope.Package: + node = collector.session + else: + assert False, f"Unhandled missing scope: {scope}" + if node is None: + name2pseudofixturedef = None + else: + default: dict[str, FixtureDef[Any]] = {} + name2pseudofixturedef = node.stash.setdefault( + name2pseudofixturedef_key, default + ) + arg_directness = self._resolve_args_directness(argnames, indirect) + for argname in argnames: + if arg_directness[argname] == "indirect": + continue + if name2pseudofixturedef is not None and argname in name2pseudofixturedef: + fixturedef = name2pseudofixturedef[argname] + else: + fixturedef = FixtureDef( + config=self.config, + baseid="", + argname=argname, + func=get_direct_param_fixture_func, + scope=scope_, + params=None, + ids=None, + _ispytest=True, + ) + if name2pseudofixturedef is not None: + name2pseudofixturedef[argname] = fixturedef + self._arg2fixturedefs[argname] = [fixturedef] + + # Create the new calls: if we are parametrize() multiple times (by applying the decorator + # more than once) then we accumulate those calls generating the cartesian product + # of all calls. + newcalls = [] + for callspec in self._calls or [CallSpec2()]: + for param_index, (param_id, param_set) in enumerate( + zip(ids, parametersets) + ): + newcallspec = callspec.setmulti( + argnames=argnames, + valset=param_set.values, + id=param_id, + marks=param_set.marks, + scope=scope_, + param_index=param_index, + ) + newcalls.append(newcallspec) + self._calls = newcalls + + def _resolve_parameter_set_ids( + self, + argnames: Sequence[str], + ids: Iterable[object | None] | Callable[[Any], object | None] | None, + parametersets: Sequence[ParameterSet], + nodeid: str, + ) -> list[str]: + """Resolve the actual ids for the given parameter sets. + + :param argnames: + Argument names passed to ``parametrize()``. + :param ids: + The `ids` parameter of the ``parametrize()`` call (see docs). + :param parametersets: + The parameter sets, each containing a set of values corresponding + to ``argnames``. + :param nodeid str: + The nodeid of the definition item that generated this + parametrization. + :returns: + List with ids for each parameter set given. + """ + if ids is None: + idfn = None + ids_ = None + elif callable(ids): + idfn = ids + ids_ = None + else: + idfn = None + ids_ = self._validate_ids(ids, parametersets, self.function.__name__) + id_maker = IdMaker( + argnames, + parametersets, + idfn, + ids_, + self.config, + nodeid=nodeid, + func_name=self.function.__name__, + ) + return id_maker.make_unique_parameterset_ids() + + def _validate_ids( + self, + ids: Iterable[object | None], + parametersets: Sequence[ParameterSet], + func_name: str, + ) -> list[object | None]: + try: + num_ids = len(ids) # type: ignore[arg-type] + except TypeError: + try: + iter(ids) + except TypeError as e: + raise TypeError("ids must be a callable or an iterable") from e + num_ids = len(parametersets) + + # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 + if num_ids != len(parametersets) and num_ids != 0: + msg = "In {}: {} parameter sets specified, with different number of ids: {}" + fail(msg.format(func_name, len(parametersets), num_ids), pytrace=False) + + return list(itertools.islice(ids, num_ids)) + + def _resolve_args_directness( + self, + argnames: Sequence[str], + indirect: bool | Sequence[str], + ) -> dict[str, Literal["indirect", "direct"]]: + """Resolve if each parametrized argument must be considered an indirect + parameter to a fixture of the same name, or a direct parameter to the + parametrized function, based on the ``indirect`` parameter of the + parametrized() call. + + :param argnames: + List of argument names passed to ``parametrize()``. + :param indirect: + Same as the ``indirect`` parameter of ``parametrize()``. + :returns + A dict mapping each arg name to either "indirect" or "direct". + """ + arg_directness: dict[str, Literal["indirect", "direct"]] + if isinstance(indirect, bool): + arg_directness = dict.fromkeys( + argnames, "indirect" if indirect else "direct" + ) + elif isinstance(indirect, Sequence): + arg_directness = dict.fromkeys(argnames, "direct") + for arg in indirect: + if arg not in argnames: + fail( + f"In {self.function.__name__}: indirect fixture '{arg}' doesn't exist", + pytrace=False, + ) + arg_directness[arg] = "indirect" + else: + fail( + f"In {self.function.__name__}: expected Sequence or boolean" + f" for indirect, got {type(indirect).__name__}", + pytrace=False, + ) + return arg_directness + + def _validate_if_using_arg_names( + self, + argnames: Sequence[str], + indirect: bool | Sequence[str], + ) -> None: + """Check if all argnames are being used, by default values, or directly/indirectly. + + :param List[str] argnames: List of argument names passed to ``parametrize()``. + :param indirect: Same as the ``indirect`` parameter of ``parametrize()``. + :raises ValueError: If validation fails. + """ + default_arg_names = set(get_default_arg_names(self.function)) + func_name = self.function.__name__ + for arg in argnames: + if arg not in self.fixturenames: + if arg in default_arg_names: + fail( + f"In {func_name}: function already takes an argument '{arg}' with a default value", + pytrace=False, + ) + else: + if isinstance(indirect, Sequence): + name = "fixture" if arg in indirect else "argument" + else: + name = "fixture" if indirect else "argument" + fail( + f"In {func_name}: function uses no {name} '{arg}'", + pytrace=False, + ) + + +def _find_parametrized_scope( + argnames: Sequence[str], + arg2fixturedefs: Mapping[str, Sequence[fixtures.FixtureDef[object]]], + indirect: bool | Sequence[str], +) -> Scope: + """Find the most appropriate scope for a parametrized call based on its arguments. + + When there's at least one direct argument, always use "function" scope. + + When a test function is parametrized and all its arguments are indirect + (e.g. fixtures), return the most narrow scope based on the fixtures used. + + Related to issue #1832, based on code posted by @Kingdread. + """ + if isinstance(indirect, Sequence): + all_arguments_are_fixtures = len(indirect) == len(argnames) + else: + all_arguments_are_fixtures = bool(indirect) + + if all_arguments_are_fixtures: + fixturedefs = arg2fixturedefs or {} + used_scopes = [ + fixturedef[-1]._scope + for name, fixturedef in fixturedefs.items() + if name in argnames + ] + # Takes the most narrow scope from used fixtures. + return min(used_scopes, default=Scope.Function) + + return Scope.Function + + +def _ascii_escaped_by_config(val: str | bytes, config: Config | None) -> str: + if config is None: + escape_option = False + else: + escape_option = config.getini( + "disable_test_id_escaping_and_forfeit_all_rights_to_community_support" + ) + # TODO: If escaping is turned off and the user passes bytes, + # will return a bytes. For now we ignore this but the + # code *probably* doesn't handle this case. + return val if escape_option else ascii_escaped(val) # type: ignore + + +class Function(PyobjMixin, nodes.Item): + """Item responsible for setting up and executing a Python test function. + + :param name: + The full function name, including any decorations like those + added by parametrization (``my_func[my_param]``). + :param parent: + The parent Node. + :param config: + The pytest Config object. + :param callspec: + If given, this function has been parametrized and the callspec contains + meta information about the parametrization. + :param callobj: + If given, the object which will be called when the Function is invoked, + otherwise the callobj will be obtained from ``parent`` using ``originalname``. + :param keywords: + Keywords bound to the function object for "-k" matching. + :param session: + The pytest Session object. + :param fixtureinfo: + Fixture information already resolved at this fixture node.. + :param originalname: + The attribute name to use for accessing the underlying function object. + Defaults to ``name``. Set this if name is different from the original name, + for example when it contains decorations like those added by parametrization + (``my_func[my_param]``). + """ + + # Disable since functions handle it themselves. + _ALLOW_MARKERS = False + + def __init__( + self, + name: str, + parent, + config: Config | None = None, + callspec: CallSpec2 | None = None, + callobj=NOTSET, + keywords: Mapping[str, Any] | None = None, + session: Session | None = None, + fixtureinfo: FuncFixtureInfo | None = None, + originalname: str | None = None, + ) -> None: + super().__init__(name, parent, config=config, session=session) + + if callobj is not NOTSET: + self._obj = callobj + self._instance = getattr(callobj, "__self__", None) + + #: Original function name, without any decorations (for example + #: parametrization adds a ``"[...]"`` suffix to function names), used to access + #: the underlying function object from ``parent`` (in case ``callobj`` is not given + #: explicitly). + #: + #: .. versionadded:: 3.0 + self.originalname = originalname or name + + # Note: when FunctionDefinition is introduced, we should change ``originalname`` + # to a readonly property that returns FunctionDefinition.name. + + self.own_markers.extend(get_unpacked_marks(self.obj)) + if callspec: + self.callspec = callspec + self.own_markers.extend(callspec.marks) + + # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + # Note: the order of the updates is important here; indicates what + # takes priority (ctor argument over function attributes over markers). + # Take own_markers only; NodeKeywords handles parent traversal on its own. + self.keywords.update((mark.name, mark) for mark in self.own_markers) + self.keywords.update(self.obj.__dict__) + if keywords: + self.keywords.update(keywords) + + if fixtureinfo is None: + fm = self.session._fixturemanager + fixtureinfo = fm.getfixtureinfo(self, self.obj, self.cls) + self._fixtureinfo: FuncFixtureInfo = fixtureinfo + self.fixturenames = fixtureinfo.names_closure + self._initrequest() + + # todo: determine sound type limitations + @classmethod + def from_parent(cls, parent, **kw) -> Self: + """The public constructor.""" + return super().from_parent(parent=parent, **kw) + + def _initrequest(self) -> None: + self.funcargs: dict[str, object] = {} + self._request = fixtures.TopRequest(self, _ispytest=True) + + @property + def function(self): + """Underlying python 'function' object.""" + return getimfunc(self.obj) + + @property + def instance(self): + try: + return self._instance + except AttributeError: + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + self._instance = self._getinstance() + else: + self._instance = None + return self._instance + + def _getinstance(self): + if isinstance(self.parent, Class): + # Each Function gets a fresh class instance. + return self.parent.newinstance() + else: + return None + + def _getobj(self): + instance = self.instance + if instance is not None: + parent_obj = instance + else: + assert self.parent is not None + parent_obj = self.parent.obj # type: ignore[attr-defined] + return getattr(parent_obj, self.originalname) + + @property + def _pyfuncitem(self): + """(compatonly) for code expecting pytest-2.2 style request objects.""" + return self + + def runtest(self) -> None: + """Execute the underlying test function.""" + self.ihook.pytest_pyfunc_call(pyfuncitem=self) + + def setup(self) -> None: + self._request._fillfixtures() + + def _traceback_filter(self, excinfo: ExceptionInfo[BaseException]) -> Traceback: + if hasattr(self, "_obj") and not self.config.getoption("fulltrace", False): + code = _pytest._code.Code.from_function(get_real_func(self.obj)) + path, firstlineno = code.path, code.firstlineno + traceback = excinfo.traceback + ntraceback = traceback.cut(path=path, firstlineno=firstlineno) + if ntraceback == traceback: + ntraceback = ntraceback.cut(path=path) + if ntraceback == traceback: + ntraceback = ntraceback.filter(filter_traceback) + if not ntraceback: + ntraceback = traceback + ntraceback = ntraceback.filter(excinfo) + + # issue364: mark all but first and last frames to + # only show a single-line message for each frame. + if self.config.getoption("tbstyle", "auto") == "auto": + if len(ntraceback) > 2: + ntraceback = Traceback( + ( + ntraceback[0], + *(t.with_repr_style("short") for t in ntraceback[1:-1]), + ntraceback[-1], + ) + ) + + return ntraceback + return excinfo.traceback + + # TODO: Type ignored -- breaks Liskov Substitution. + def repr_failure( # type: ignore[override] + self, + excinfo: ExceptionInfo[BaseException], + ) -> str | TerminalRepr: + style = self.config.getoption("tbstyle", "auto") + if style == "auto": + style = "long" + return self._repr_failure_py(excinfo, style=style) + + +class FunctionDefinition(Function): + """This class is a stop gap solution until we evolve to have actual function + definition nodes and manage to get rid of ``metafunc``.""" + + def runtest(self) -> None: + raise RuntimeError("function definitions are not supposed to be run as tests") + + setup = runtest diff --git a/venv/Lib/site-packages/_pytest/python_api.py b/venv/Lib/site-packages/_pytest/python_api.py new file mode 100644 index 0000000..4174a55 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/python_api.py @@ -0,0 +1,1020 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from collections.abc import Collection +from collections.abc import Sized +from decimal import Decimal +import math +from numbers import Complex +import pprint +import re +from types import TracebackType +from typing import Any +from typing import Callable +from typing import cast +from typing import ContextManager +from typing import final +from typing import Mapping +from typing import overload +from typing import Pattern +from typing import Sequence +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import TypeVar + +import _pytest._code +from _pytest.outcomes import fail + + +if TYPE_CHECKING: + from numpy import ndarray + + +def _compare_approx( + full_object: object, + message_data: Sequence[tuple[str, str, str]], + number_of_elements: int, + different_ids: Sequence[object], + max_abs_diff: float, + max_rel_diff: float, +) -> list[str]: + message_list = list(message_data) + message_list.insert(0, ("Index", "Obtained", "Expected")) + max_sizes = [0, 0, 0] + for index, obtained, expected in message_list: + max_sizes[0] = max(max_sizes[0], len(index)) + max_sizes[1] = max(max_sizes[1], len(obtained)) + max_sizes[2] = max(max_sizes[2], len(expected)) + explanation = [ + f"comparison failed. Mismatched elements: {len(different_ids)} / {number_of_elements}:", + f"Max absolute difference: {max_abs_diff}", + f"Max relative difference: {max_rel_diff}", + ] + [ + f"{indexes:<{max_sizes[0]}} | {obtained:<{max_sizes[1]}} | {expected:<{max_sizes[2]}}" + for indexes, obtained, expected in message_list + ] + return explanation + + +# builtin pytest.approx helper + + +class ApproxBase: + """Provide shared utilities for making approximate comparisons between + numbers or sequences of numbers.""" + + # Tell numpy to use our `__eq__` operator instead of its. + __array_ufunc__ = None + __array_priority__ = 100 + + def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None: + __tracebackhide__ = True + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + self._check_type() + + def __repr__(self) -> str: + raise NotImplementedError + + def _repr_compare(self, other_side: Any) -> list[str]: + return [ + "comparison failed", + f"Obtained: {other_side}", + f"Expected: {self}", + ] + + def __eq__(self, actual) -> bool: + return all( + a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual) + ) + + def __bool__(self): + __tracebackhide__ = True + raise AssertionError( + "approx() is not supported in a boolean context.\nDid you mean: `assert a == approx(b)`?" + ) + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + def __ne__(self, actual) -> bool: + return not (actual == self) + + def _approx_scalar(self, x) -> ApproxScalar: + if isinstance(x, Decimal): + return ApproxDecimal(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """Yield all the pairs of numbers to be compared. + + This is used to implement the `__eq__` method. + """ + raise NotImplementedError + + def _check_type(self) -> None: + """Raise a TypeError if the expected value is not a valid type.""" + # This is only a concern if the expected value is a sequence. In every + # other case, the approx() function ensures that the expected value has + # a numeric type. For this reason, the default is to do nothing. The + # classes that deal with sequences should reimplement this method to + # raise if there are any non-numeric elements in the sequence. + + +def _recursive_sequence_map(f, x): + """Recursively map a function over a sequence of arbitrary depth""" + if isinstance(x, (list, tuple)): + seq_type = type(x) + return seq_type(_recursive_sequence_map(f, xi) for xi in x) + elif _is_sequence_like(x): + return [_recursive_sequence_map(f, xi) for xi in x] + else: + return f(x) + + +class ApproxNumpy(ApproxBase): + """Perform approximate comparisons where the expected value is numpy array.""" + + def __repr__(self) -> str: + list_scalars = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + return f"approx({list_scalars!r})" + + def _repr_compare(self, other_side: ndarray | list[Any]) -> list[str]: + import itertools + import math + + def get_value_from_nested_list( + nested_list: list[Any], nd_index: tuple[Any, ...] + ) -> Any: + """ + Helper function to get the value out of a nested list, given an n-dimensional index. + This mimics numpy's indexing, but for raw nested python lists. + """ + value: Any = nested_list + for i in nd_index: + value = value[i] + return value + + np_array_shape = self.expected.shape + approx_side_as_seq = _recursive_sequence_map( + self._approx_scalar, self.expected.tolist() + ) + + # convert other_side to numpy array to ensure shape attribute is available + other_side_as_array = _as_numpy_array(other_side) + assert other_side_as_array is not None + + if np_array_shape != other_side_as_array.shape: + return [ + "Impossible to compare arrays with different shapes.", + f"Shapes: {np_array_shape} and {other_side_as_array.shape}", + ] + + number_of_elements = self.expected.size + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for index in itertools.product(*(range(i) for i in np_array_shape)): + approx_value = get_value_from_nested_list(approx_side_as_seq, index) + other_value = get_value_from_nested_list(other_side_as_array, index) + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(index) + + message_data = [ + ( + str(index), + str(get_value_from_nested_list(other_side_as_array, index)), + str(get_value_from_nested_list(approx_side_as_seq, index)), + ) + for index in different_ids + ] + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + import numpy as np + + # self.expected is supposed to always be an array here. + + if not np.isscalar(actual): + try: + actual = np.asarray(actual) + except Exception as e: + raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e + + if not np.isscalar(actual) and actual.shape != self.expected.shape: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # `actual` can either be a numpy array or a scalar, it is treated in + # `__eq__` before being passed to `ApproxBase.__eq__`, which is the + # only method that calls this one. + + if np.isscalar(actual): + for i in np.ndindex(self.expected.shape): + yield actual, self.expected[i].item() + else: + for i in np.ndindex(self.expected.shape): + yield actual[i].item(), self.expected[i].item() + + +class ApproxMapping(ApproxBase): + """Perform approximate comparisons where the expected value is a mapping + with numeric values (the keys can be anything).""" + + def __repr__(self) -> str: + return f"approx({({k: self._approx_scalar(v) for k, v in self.expected.items()})!r})" + + def _repr_compare(self, other_side: Mapping[object, float]) -> list[str]: + import math + + approx_side_as_map = { + k: self._approx_scalar(v) for k, v in self.expected.items() + } + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for (approx_key, approx_value), other_value in zip( + approx_side_as_map.items(), other_side.values() + ): + if approx_value != other_value: + if approx_value.expected is not None and other_value is not None: + max_abs_diff = max( + max_abs_diff, abs(approx_value.expected - other_value) + ) + if approx_value.expected == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max( + max_rel_diff, + abs( + (approx_value.expected - other_value) + / approx_value.expected + ), + ) + different_ids.append(approx_key) + + message_data = [ + (str(key), str(other_side[key]), str(approx_side_as_map[key])) + for key in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if set(actual.keys()) != set(self.expected.keys()): + return False + except AttributeError: + return False + + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + def _check_type(self) -> None: + __tracebackhide__ = True + for key, value in self.expected.items(): + if isinstance(value, type(self.expected)): + msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}" + raise TypeError(msg.format(key, value, pprint.pformat(self.expected))) + + +class ApproxSequenceLike(ApproxBase): + """Perform approximate comparisons where the expected value is a sequence of numbers.""" + + def __repr__(self) -> str: + seq_type = type(self.expected) + if seq_type not in (tuple, list): + seq_type = list + return f"approx({seq_type(self._approx_scalar(x) for x in self.expected)!r})" + + def _repr_compare(self, other_side: Sequence[float]) -> list[str]: + import math + + if len(self.expected) != len(other_side): + return [ + "Impossible to compare lists with different sizes.", + f"Lengths: {len(self.expected)} and {len(other_side)}", + ] + + approx_side_as_map = _recursive_sequence_map(self._approx_scalar, self.expected) + + number_of_elements = len(approx_side_as_map) + max_abs_diff = -math.inf + max_rel_diff = -math.inf + different_ids = [] + for i, (approx_value, other_value) in enumerate( + zip(approx_side_as_map, other_side) + ): + if approx_value != other_value: + abs_diff = abs(approx_value.expected - other_value) + max_abs_diff = max(max_abs_diff, abs_diff) + if other_value == 0.0: + max_rel_diff = math.inf + else: + max_rel_diff = max(max_rel_diff, abs_diff / abs(other_value)) + different_ids.append(i) + + message_data = [ + (str(i), str(other_side[i]), str(approx_side_as_map[i])) + for i in different_ids + ] + + return _compare_approx( + self.expected, + message_data, + number_of_elements, + different_ids, + max_abs_diff, + max_rel_diff, + ) + + def __eq__(self, actual) -> bool: + try: + if len(actual) != len(self.expected): + return False + except TypeError: + return False + return super().__eq__(actual) + + def _yield_comparisons(self, actual): + return zip(actual, self.expected) + + def _check_type(self) -> None: + __tracebackhide__ = True + for index, x in enumerate(self.expected): + if isinstance(x, type(self.expected)): + msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}" + raise TypeError(msg.format(x, index, pprint.pformat(self.expected))) + + +class ApproxScalar(ApproxBase): + """Perform approximate comparisons where the expected value is a single number.""" + + # Using Real should be better than this Union, but not possible yet: + # https://github.com/python/typeshed/pull/3108 + DEFAULT_ABSOLUTE_TOLERANCE: float | Decimal = 1e-12 + DEFAULT_RELATIVE_TOLERANCE: float | Decimal = 1e-6 + + def __repr__(self) -> str: + """Return a string communicating both the expected value and the + tolerance for the comparison being made. + + For example, ``1.0 ± 1e-6``, ``(3+4j) ± 5e-6 ∠ ±180°``. + """ + # Don't show a tolerance for values that aren't compared using + # tolerances, i.e. non-numerics and infinities. Need to call abs to + # handle complex numbers, e.g. (inf + 1j). + if (not isinstance(self.expected, (Complex, Decimal))) or math.isinf( + abs(self.expected) + ): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = f"{self.tolerance:.1e}" + if ( + isinstance(self.expected, Complex) + and self.expected.imag + and not math.isinf(self.tolerance) + ): + vetted_tolerance += " ∠ ±180°" + except ValueError: + vetted_tolerance = "???" + + return f"{self.expected} ± {vetted_tolerance}" + + def __eq__(self, actual) -> bool: + """Return whether the given value is equal to the expected value + within the pre-specified tolerance.""" + asarray = _as_numpy_array(actual) + if asarray is not None: + # Call ``__eq__()`` manually to prevent infinite-recursion with + # numpy<1.13. See #3748. + return all(self.__eq__(a) for a in asarray.flat) + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # If either type is non-numeric, fall back to strict equality. + # NB: we need Complex, rather than just Number, to ensure that __abs__, + # __sub__, and __float__ are defined. + if not ( + isinstance(self.expected, (Complex, Decimal)) + and isinstance(actual, (Complex, Decimal)) + ): + return False + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + result: bool = abs(self.expected - actual) <= self.tolerance + return result + + # Ignore type because of https://github.com/python/mypy/issues/4266. + __hash__ = None # type: ignore + + @property + def tolerance(self): + """Return the tolerance for the comparison. + + This could be either an absolute tolerance or a relative tolerance, + depending on what the user specified or which would be larger. + """ + + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE) + + if absolute_tolerance < 0: + raise ValueError( + f"absolute tolerance can't be negative: {absolute_tolerance}" + ) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default( + self.rel, self.DEFAULT_RELATIVE_TOLERANCE + ) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError( + f"relative tolerance can't be negative: {relative_tolerance}" + ) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +class ApproxDecimal(ApproxScalar): + """Perform approximate comparisons where the expected value is a Decimal.""" + + DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12") + DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6") + + +def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase: + """Assert that two numbers (or two ordered sequences of numbers) are equal to each other + within some tolerance. + + Due to the :doc:`python:tutorial/floatingpoint`, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for ordered sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + And for a ``numpy`` array against a scalar:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP + True + + Only ordered sequences are supported, because ``approx`` needs + to infer the relative position of the sequences without ambiguity. This means + ``sets`` and other unordered sequences are not supported. + + Finally, dictionary *values* can also be compared:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + The comparison will be true if both mappings have the same keys and their + respective values match the expected tolerances. + + **Tolerances** + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + You can also use ``approx`` to compare nonnumeric types, or dicts and + sequences containing nonnumeric types, in which case it falls back to + strict equality. This can be useful for comparing dicts and sequences that + can contain optional values:: + + >>> {"required": 1.0000005, "optional": None} == approx({"required": 1, "optional": None}) + True + >>> [None, 1.0000005] == approx([None,1]) + True + >>> ["foo", 1.0000005] == approx([None,1]) + False + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. More information: :py:func:`math.isclose`. + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by :py:func:`numpy.allclose`. More information: + :std:doc:`numpy:reference/generated/numpy.isclose`. + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered , so this function is not appropriate for very large or very + small numbers. Also, it's only available in subclasses of ``unittest.TestCase`` + and it's ugly because it doesn't follow PEP8. More information: + :py:meth:`unittest.TestCase.assertAlmostEqual`. + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. note:: + + ``approx`` can handle numpy arrays, but we recommend the + specialised test helpers in :std:doc:`numpy:reference/routines.testing` + if you need support for comparisons, NaNs, or ULP-based tolerances. + + To match strings using regex, you can use + `Matches `_ + from the + `re_assert package `_. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, :py:exc:`TypeError` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. More information: :py:meth:`object.__ge__` + + .. versionchanged:: 3.7.1 + ``approx`` raises ``TypeError`` when it encounters a dict value or + sequence element of nonnumeric type. + + .. versionchanged:: 6.1.0 + ``approx`` falls back to strict equality for nonnumeric types instead + of raising ``TypeError``. + """ + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # The primary responsibility of these classes is to implement ``__eq__()`` + # and ``__repr__()``. The former is used to actually check if some + # "actual" value is equivalent to the given expected value within the + # allowed tolerance. The latter is used to show the user the expected + # value and tolerance, in the case that a test failed. + # + # The actual logic for making approximate comparisons can be found in + # ApproxScalar, which is used to compare individual numbers. All of the + # other Approx classes eventually delegate to this class. The ApproxBase + # class provides some convenient methods and overloads, but isn't really + # essential. + + __tracebackhide__ = True + + if isinstance(expected, Decimal): + cls: type[ApproxBase] = ApproxDecimal + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif _is_numpy_array(expected): + expected = _as_numpy_array(expected) + cls = ApproxNumpy + elif _is_sequence_like(expected): + cls = ApproxSequenceLike + elif isinstance(expected, Collection) and not isinstance(expected, (str, bytes)): + msg = f"pytest.approx() only supports ordered sequences, but got: {expected!r}" + raise TypeError(msg) + else: + cls = ApproxScalar + + return cls(expected, rel, abs, nan_ok) + + +def _is_sequence_like(expected: object) -> bool: + return ( + hasattr(expected, "__getitem__") + and isinstance(expected, Sized) + and not isinstance(expected, (str, bytes)) + ) + + +def _is_numpy_array(obj: object) -> bool: + """ + Return true if the given object is implicitly convertible to ndarray, + and numpy is already imported. + """ + return _as_numpy_array(obj) is not None + + +def _as_numpy_array(obj: object) -> ndarray | None: + """ + Return an ndarray if the given object is implicitly convertible to ndarray, + and numpy is already imported, otherwise None. + """ + import sys + + np: Any = sys.modules.get("numpy") + if np is not None: + # avoid infinite recursion on numpy scalars, which have __array__ + if np.isscalar(obj): + return None + elif isinstance(obj, np.ndarray): + return obj + elif hasattr(obj, "__array__") or hasattr("obj", "__array_interface__"): + return np.asarray(obj) + return None + + +# builtin pytest.raises helper + +E = TypeVar("E", bound=BaseException) + + +@overload +def raises( + expected_exception: type[E] | tuple[type[E], ...], + *, + match: str | Pattern[str] | None = ..., +) -> RaisesContext[E]: ... + + +@overload +def raises( + expected_exception: type[E] | tuple[type[E], ...], + func: Callable[..., Any], + *args: Any, + **kwargs: Any, +) -> _pytest._code.ExceptionInfo[E]: ... + + +def raises( + expected_exception: type[E] | tuple[type[E], ...], *args: Any, **kwargs: Any +) -> RaisesContext[E] | _pytest._code.ExceptionInfo[E]: + r"""Assert that a code block/function call raises an exception type, or one of its subclasses. + + :param expected_exception: + The expected exception type, or a tuple if one of multiple possible + exception types are expected. Note that subclasses of the passed exceptions + will also match. + + :kwparam str | re.Pattern[str] | None match: + If specified, a string containing a regular expression, + or a regular expression object, that is tested against the string + representation of the exception and its :pep:`678` `__notes__` + using :func:`re.search`. + + To match a literal string that may contain :ref:`special characters + `, the pattern can first be escaped with :func:`re.escape`. + + (This is only used when ``pytest.raises`` is used as a context manager, + and passed through to the function otherwise. + When using ``pytest.raises`` as a function, you can use: + ``pytest.raises(Exc, func, match="passed on").match("my pattern")``.) + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type, or any of its subclasses:: + + >>> import pytest + >>> with pytest.raises(ZeroDivisionError): + ... 1/0 + + If the code block does not raise the expected exception (:class:`ZeroDivisionError` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with pytest.raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with pytest.raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + The ``match`` argument searches the formatted exception string, which includes any + `PEP-678 `__ ``__notes__``: + + >>> with pytest.raises(ValueError, match=r"had a note added"): # doctest: +SKIP + ... e = ValueError("value must be 42") + ... e.add_note("had a note added") + ... raise e + + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: + + >>> with pytest.raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. warning:: + + Given that ``pytest.raises`` matches subclasses, be wary of using it to match :class:`Exception` like this:: + + with pytest.raises(Exception): # Careful, this will catch ANY exception raised. + some_function() + + Because :class:`Exception` is the base class of almost all exceptions, it is easy for this to hide + real bugs, where the user wrote this expecting a specific exception, but some other exception is being + raised due to a bug introduced during a refactoring. + + Avoid using ``pytest.raises`` to catch :class:`Exception` unless certain that you really want to catch + **any** exception raised. + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type is ValueError # This will not execute. + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with pytest.raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type is ValueError + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` + it is possible to parametrize tests such that + some runs raise an exception and others do not. + + See :ref:`parametrizing_conditional_raising` for an example. + + .. seealso:: + + :ref:`assertraises` for more examples and detailed discussion. + + **Legacy form** + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. + More detailed information can be found in the official Python + documentation for :ref:`the try statement `. + """ + __tracebackhide__ = True + + if not expected_exception: + raise ValueError( + f"Expected an exception type or a tuple of exception types, but got `{expected_exception!r}`. " + f"Raising exceptions is already understood as failing the test, so you don't need " + f"any special code to say 'this should never raise an exception'." + ) + if isinstance(expected_exception, type): + expected_exceptions: tuple[type[E], ...] = (expected_exception,) + else: + expected_exceptions = expected_exception + for exc in expected_exceptions: + if not isinstance(exc, type) or not issubclass(exc, BaseException): + msg = "expected exception must be a BaseException type, not {}" # type: ignore[unreachable] + not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__ + raise TypeError(msg.format(not_a)) + + message = f"DID NOT RAISE {expected_exception}" + + if not args: + match: str | Pattern[str] | None = kwargs.pop("match", None) + if kwargs: + msg = "Unexpected keyword arguments passed to pytest.raises: " + msg += ", ".join(sorted(kwargs)) + msg += "\nUse context-manager form instead?" + raise TypeError(msg) + return RaisesContext(expected_exception, message, match) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + try: + func(*args[1:], **kwargs) + except expected_exception as e: + return _pytest._code.ExceptionInfo.from_exception(e) + fail(message) + + +# This doesn't work with mypy for now. Use fail.Exception instead. +raises.Exception = fail.Exception # type: ignore + + +@final +class RaisesContext(ContextManager[_pytest._code.ExceptionInfo[E]]): + def __init__( + self, + expected_exception: type[E] | tuple[type[E], ...], + message: str, + match_expr: str | Pattern[str] | None = None, + ) -> None: + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo: _pytest._code.ExceptionInfo[E] | None = None + if self.match_expr is not None: + re_error = None + try: + re.compile(self.match_expr) + except re.error as e: + re_error = e + if re_error is not None: + fail(f"Invalid regex pattern provided to 'match': {re_error}") + + def __enter__(self) -> _pytest._code.ExceptionInfo[E]: + self.excinfo = _pytest._code.ExceptionInfo.for_later() + return self.excinfo + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> bool: + __tracebackhide__ = True + if exc_type is None: + fail(self.message) + assert self.excinfo is not None + if not issubclass(exc_type, self.expected_exception): + return False + # Cast to narrow the exception type now that it's verified. + exc_info = cast(Tuple[Type[E], E, TracebackType], (exc_type, exc_val, exc_tb)) + self.excinfo.fill_unfilled(exc_info) + if self.match_expr is not None: + self.excinfo.match(self.match_expr) + return True diff --git a/venv/Lib/site-packages/_pytest/python_path.py b/venv/Lib/site-packages/_pytest/python_path.py new file mode 100644 index 0000000..6e33c8a --- /dev/null +++ b/venv/Lib/site-packages/_pytest/python_path.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +import sys + +import pytest +from pytest import Config +from pytest import Parser + + +def pytest_addoption(parser: Parser) -> None: + parser.addini("pythonpath", type="paths", help="Add paths to sys.path", default=[]) + + +@pytest.hookimpl(tryfirst=True) +def pytest_load_initial_conftests(early_config: Config) -> None: + # `pythonpath = a b` will set `sys.path` to `[a, b, x, y, z, ...]` + for path in reversed(early_config.getini("pythonpath")): + sys.path.insert(0, str(path)) + + +@pytest.hookimpl(trylast=True) +def pytest_unconfigure(config: Config) -> None: + for path in config.getini("pythonpath"): + path_str = str(path) + if path_str in sys.path: + sys.path.remove(path_str) diff --git a/venv/Lib/site-packages/_pytest/recwarn.py b/venv/Lib/site-packages/_pytest/recwarn.py new file mode 100644 index 0000000..85d8de8 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/recwarn.py @@ -0,0 +1,365 @@ +# mypy: allow-untyped-defs +"""Record warnings during test function execution.""" + +from __future__ import annotations + +from pprint import pformat +import re +from types import TracebackType +from typing import Any +from typing import Callable +from typing import final +from typing import Generator +from typing import Iterator +from typing import overload +from typing import Pattern +from typing import TYPE_CHECKING +from typing import TypeVar + + +if TYPE_CHECKING: + from typing_extensions import Self + +import warnings + +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.outcomes import Exit +from _pytest.outcomes import fail + + +T = TypeVar("T") + + +@fixture +def recwarn() -> Generator[WarningsRecorder]: + """Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. + + See https://docs.pytest.org/en/latest/how-to/capture-warnings.html for information + on warning categories. + """ + wrec = WarningsRecorder(_ispytest=True) + with wrec: + warnings.simplefilter("default") + yield wrec + + +@overload +def deprecated_call(*, match: str | Pattern[str] | None = ...) -> WarningsRecorder: ... + + +@overload +def deprecated_call(func: Callable[..., T], *args: Any, **kwargs: Any) -> T: ... + + +def deprecated_call( + func: Callable[..., Any] | None = None, *args: Any, **kwargs: Any +) -> WarningsRecorder | Any: + """Assert that code produces a ``DeprecationWarning`` or ``PendingDeprecationWarning`` or ``FutureWarning``. + + This function can be used as a context manager:: + + >>> import warnings + >>> def api_call_v2(): + ... warnings.warn('use v3 of this api', DeprecationWarning) + ... return 200 + + >>> import pytest + >>> with pytest.deprecated_call(): + ... assert api_call_v2() == 200 + + It can also be used by passing a function and ``*args`` and ``**kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of + the warnings types above. The return value is the return value of the function. + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex. + + The context manager produces a list of :class:`warnings.WarningMessage` objects, + one for each warning raised. + """ + __tracebackhide__ = True + if func is not None: + args = (func, *args) + return warns( + (DeprecationWarning, PendingDeprecationWarning, FutureWarning), *args, **kwargs + ) + + +@overload +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...] = ..., + *, + match: str | Pattern[str] | None = ..., +) -> WarningsChecker: ... + + +@overload +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...], + func: Callable[..., T], + *args: Any, + **kwargs: Any, +) -> T: ... + + +def warns( + expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, + *args: Any, + match: str | Pattern[str] | None = None, + **kwargs: Any, +) -> WarningsChecker | Any: + r"""Assert that code raises a particular class of warning. + + Specifically, the parameter ``expected_warning`` can be a warning class or tuple + of warning classes, and the code inside the ``with`` block must issue at least one + warning of that class or classes. + + This helper produces a list of :class:`warnings.WarningMessage` objects, one for + each warning emitted (regardless of whether it is an ``expected_warning`` or not). + Since pytest 8.0, unmatched warnings are also re-emitted when the context closes. + + This function can be used as a context manager:: + + >>> import pytest + >>> with pytest.warns(RuntimeWarning): + ... warnings.warn("my warning", RuntimeWarning) + + In the context manager form you may use the keyword argument ``match`` to assert + that the warning matches a text or regex:: + + >>> with pytest.warns(UserWarning, match='must be 0 or None'): + ... warnings.warn("value must be 0 or None", UserWarning) + + >>> with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("value must be 42", UserWarning) + + >>> with pytest.warns(UserWarning): # catch re-emitted warning + ... with pytest.warns(UserWarning, match=r'must be \d+$'): + ... warnings.warn("this is not here", UserWarning) + Traceback (most recent call last): + ... + Failed: DID NOT WARN. No warnings of type ...UserWarning... were emitted... + + **Using with** ``pytest.mark.parametrize`` + + When using :ref:`pytest.mark.parametrize ref` it is possible to parametrize tests + such that some runs raise a warning and others do not. + + This could be achieved in the same way as with exceptions, see + :ref:`parametrizing_conditional_raising` for an example. + + """ + __tracebackhide__ = True + if not args: + if kwargs: + argnames = ", ".join(sorted(kwargs)) + raise TypeError( + f"Unexpected keyword arguments passed to pytest.warns: {argnames}" + "\nUse context-manager form instead?" + ) + return WarningsChecker(expected_warning, match_expr=match, _ispytest=True) + else: + func = args[0] + if not callable(func): + raise TypeError(f"{func!r} object (type: {type(func)}) must be callable") + with WarningsChecker(expected_warning, _ispytest=True): + return func(*args[1:], **kwargs) + + +class WarningsRecorder(warnings.catch_warnings): # type:ignore[type-arg] + """A context manager to record raised warnings. + + Each recorded warning is an instance of :class:`warnings.WarningMessage`. + + Adapted from `warnings.catch_warnings`. + + .. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + + """ + + def __init__(self, *, _ispytest: bool = False) -> None: + check_ispytest(_ispytest) + super().__init__(record=True) + self._entered = False + self._list: list[warnings.WarningMessage] = [] + + @property + def list(self) -> list[warnings.WarningMessage]: + """The list of recorded warnings.""" + return self._list + + def __getitem__(self, i: int) -> warnings.WarningMessage: + """Get a recorded warning by index.""" + return self._list[i] + + def __iter__(self) -> Iterator[warnings.WarningMessage]: + """Iterate through the recorded warnings.""" + return iter(self._list) + + def __len__(self) -> int: + """The number of recorded warnings.""" + return len(self._list) + + def pop(self, cls: type[Warning] = Warning) -> warnings.WarningMessage: + """Pop the first recorded warning which is an instance of ``cls``, + but not an instance of a child class of any other match. + Raises ``AssertionError`` if there is no match. + """ + best_idx: int | None = None + for i, w in enumerate(self._list): + if w.category == cls: + return self._list.pop(i) # exact match, stop looking + if issubclass(w.category, cls) and ( + best_idx is None + or not issubclass(w.category, self._list[best_idx].category) + ): + best_idx = i + if best_idx is not None: + return self._list.pop(best_idx) + __tracebackhide__ = True + raise AssertionError(f"{cls!r} not found in warning list") + + def clear(self) -> None: + """Clear the list of recorded warnings.""" + self._list[:] = [] + + def __enter__(self) -> Self: + if self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot enter {self!r} twice") + _list = super().__enter__() + # record=True means it's None. + assert _list is not None + self._list = _list + warnings.simplefilter("always") + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + if not self._entered: + __tracebackhide__ = True + raise RuntimeError(f"Cannot exit {self!r} without entering first") + + super().__exit__(exc_type, exc_val, exc_tb) + + # Built-in catch_warnings does not reset entered state so we do it + # manually here for this context manager to become reusable. + self._entered = False + + +@final +class WarningsChecker(WarningsRecorder): + def __init__( + self, + expected_warning: type[Warning] | tuple[type[Warning], ...] = Warning, + match_expr: str | Pattern[str] | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + super().__init__(_ispytest=True) + + msg = "exceptions must be derived from Warning, not %s" + if isinstance(expected_warning, tuple): + for exc in expected_warning: + if not issubclass(exc, Warning): + raise TypeError(msg % type(exc)) + expected_warning_tup = expected_warning + elif isinstance(expected_warning, type) and issubclass( + expected_warning, Warning + ): + expected_warning_tup = (expected_warning,) + else: + raise TypeError(msg % type(expected_warning)) + + self.expected_warning = expected_warning_tup + self.match_expr = match_expr + + def matches(self, warning: warnings.WarningMessage) -> bool: + assert self.expected_warning is not None + return issubclass(warning.category, self.expected_warning) and bool( + self.match_expr is None or re.search(self.match_expr, str(warning.message)) + ) + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + super().__exit__(exc_type, exc_val, exc_tb) + + __tracebackhide__ = True + + # BaseExceptions like pytest.{skip,fail,xfail,exit} or Ctrl-C within + # pytest.warns should *not* trigger "DID NOT WARN" and get suppressed + # when the warning doesn't happen. Control-flow exceptions should always + # propagate. + if exc_val is not None and ( + not isinstance(exc_val, Exception) + # Exit is an Exception, not a BaseException, for some reason. + or isinstance(exc_val, Exit) + ): + return + + def found_str() -> str: + return pformat([record.message for record in self], indent=2) + + try: + if not any(issubclass(w.category, self.expected_warning) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} were emitted.\n" + f" Emitted warnings: {found_str()}." + ) + elif not any(self.matches(w) for w in self): + fail( + f"DID NOT WARN. No warnings of type {self.expected_warning} matching the regex were emitted.\n" + f" Regex: {self.match_expr}\n" + f" Emitted warnings: {found_str()}." + ) + finally: + # Whether or not any warnings matched, we want to re-emit all unmatched warnings. + for w in self: + if not self.matches(w): + warnings.warn_explicit( + message=w.message, + category=w.category, + filename=w.filename, + lineno=w.lineno, + module=w.__module__, + source=w.source, + ) + + # Currently in Python it is possible to pass other types than an + # `str` message when creating `Warning` instances, however this + # causes an exception when :func:`warnings.filterwarnings` is used + # to filter those warnings. See + # https://github.com/python/cpython/issues/103577 for a discussion. + # While this can be considered a bug in CPython, we put guards in + # pytest as the error message produced without this check in place + # is confusing (#10865). + for w in self: + if type(w.message) is not UserWarning: + # If the warning was of an incorrect type then `warnings.warn()` + # creates a UserWarning. Any other warning must have been specified + # explicitly. + continue + if not w.message.args: + # UserWarning() without arguments must have been specified explicitly. + continue + msg = w.message.args[0] + if isinstance(msg, str): + continue + # It's possible that UserWarning was explicitly specified, and + # its first argument was not a string. But that case can't be + # distinguished from an invalid type. + raise TypeError( + f"Warning must be str or Warning, got {msg!r} (type {type(msg).__name__})" + ) diff --git a/venv/Lib/site-packages/_pytest/reports.py b/venv/Lib/site-packages/_pytest/reports.py new file mode 100644 index 0000000..77cbf77 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/reports.py @@ -0,0 +1,636 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +import dataclasses +from io import StringIO +import os +from pprint import pprint +from typing import Any +from typing import cast +from typing import final +from typing import Iterable +from typing import Iterator +from typing import Literal +from typing import Mapping +from typing import NoReturn +from typing import Sequence +from typing import TYPE_CHECKING + +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._code.code import ReprEntry +from _pytest._code.code import ReprEntryNative +from _pytest._code.code import ReprExceptionInfo +from _pytest._code.code import ReprFileLocation +from _pytest._code.code import ReprFuncArgs +from _pytest._code.code import ReprLocals +from _pytest._code.code import ReprTraceback +from _pytest._code.code import TerminalRepr +from _pytest._io import TerminalWriter +from _pytest.config import Config +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip + + +if TYPE_CHECKING: + from typing_extensions import Self + + from _pytest.runner import CallInfo + + +def getworkerinfoline(node): + try: + return node._workerinfocache + except AttributeError: + d = node.workerinfo + ver = "{}.{}.{}".format(*d["version_info"][:3]) + node._workerinfocache = s = "[{}] {} -- Python {} {}".format( + d["id"], d["sysplatform"], ver, d["executable"] + ) + return s + + +class BaseReport: + when: str | None + location: tuple[str, int | None, str] | None + longrepr: ( + None | ExceptionInfo[BaseException] | tuple[str, int, str] | str | TerminalRepr + ) + sections: list[tuple[str, str]] + nodeid: str + outcome: Literal["passed", "failed", "skipped"] + + def __init__(self, **kw: Any) -> None: + self.__dict__.update(kw) + + if TYPE_CHECKING: + # Can have arbitrary fields given to __init__(). + def __getattr__(self, key: str) -> Any: ... + + def toterminal(self, out: TerminalWriter) -> None: + if hasattr(self, "node"): + worker_info = getworkerinfoline(self.node) + if worker_info: + out.line(worker_info) + + longrepr = self.longrepr + if longrepr is None: + return + + if hasattr(longrepr, "toterminal"): + longrepr_terminal = cast(TerminalRepr, longrepr) + longrepr_terminal.toterminal(out) + else: + try: + s = str(longrepr) + except UnicodeEncodeError: + s = "" + out.line(s) + + def get_sections(self, prefix: str) -> Iterator[tuple[str, str]]: + for name, content in self.sections: + if name.startswith(prefix): + yield prefix, content + + @property + def longreprtext(self) -> str: + """Read-only property that returns the full string representation of + ``longrepr``. + + .. versionadded:: 3.0 + """ + file = StringIO() + tw = TerminalWriter(file) + tw.hasmarkup = False + self.toterminal(tw) + exc = file.getvalue() + return exc.strip() + + @property + def caplog(self) -> str: + """Return captured log lines, if log capturing is enabled. + + .. versionadded:: 3.5 + """ + return "\n".join( + content for (prefix, content) in self.get_sections("Captured log") + ) + + @property + def capstdout(self) -> str: + """Return captured text from stdout, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stdout") + ) + + @property + def capstderr(self) -> str: + """Return captured text from stderr, if capturing is enabled. + + .. versionadded:: 3.0 + """ + return "".join( + content for (prefix, content) in self.get_sections("Captured stderr") + ) + + @property + def passed(self) -> bool: + """Whether the outcome is passed.""" + return self.outcome == "passed" + + @property + def failed(self) -> bool: + """Whether the outcome is failed.""" + return self.outcome == "failed" + + @property + def skipped(self) -> bool: + """Whether the outcome is skipped.""" + return self.outcome == "skipped" + + @property + def fspath(self) -> str: + """The path portion of the reported node, as a string.""" + return self.nodeid.split("::")[0] + + @property + def count_towards_summary(self) -> bool: + """**Experimental** Whether this report should be counted towards the + totals shown at the end of the test session: "1 passed, 1 failure, etc". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + return True + + @property + def head_line(self) -> str | None: + """**Experimental** The head line shown with longrepr output for this + report, more commonly during traceback representation during + failures:: + + ________ Test.foo ________ + + + In the example above, the head_line is "Test.foo". + + .. note:: + + This function is considered **experimental**, so beware that it is subject to changes + even in patch releases. + """ + if self.location is not None: + fspath, lineno, domain = self.location + return domain + return None + + def _get_verbose_word_with_markup( + self, config: Config, default_markup: Mapping[str, bool] + ) -> tuple[str, Mapping[str, bool]]: + _category, _short, verbose = config.hook.pytest_report_teststatus( + report=self, config=config + ) + + if isinstance(verbose, str): + return verbose, default_markup + + if isinstance(verbose, Sequence) and len(verbose) == 2: + word, markup = verbose + if isinstance(word, str) and isinstance(markup, Mapping): + return word, markup + + fail( # pragma: no cover + "pytest_report_teststatus() hook (from a plugin) returned " + f"an invalid verbose value: {verbose!r}.\nExpected either a string " + "or a tuple of (word, markup)." + ) + + def _to_json(self) -> dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + return _report_to_json(self) + + @classmethod + def _from_json(cls, reportdict: dict[str, object]) -> Self: + """Create either a TestReport or CollectReport, depending on the calling class. + + It is the callers responsibility to know which class to pass here. + + This was originally the serialize_report() function from xdist (ca03269). + + Experimental method. + """ + kwargs = _report_kwargs_from_json(reportdict) + return cls(**kwargs) + + +def _report_unserialization_failure( + type_name: str, report_class: type[BaseReport], reportdict +) -> NoReturn: + url = "https://github.com/pytest-dev/pytest/issues" + stream = StringIO() + pprint("-" * 100, stream=stream) + pprint(f"INTERNALERROR: Unknown entry type returned: {type_name}", stream=stream) + pprint(f"report_name: {report_class}", stream=stream) + pprint(reportdict, stream=stream) + pprint(f"Please report this bug at {url}", stream=stream) + pprint("-" * 100, stream=stream) + raise RuntimeError(stream.getvalue()) + + +@final +class TestReport(BaseReport): + """Basic test report object (also used for setup and teardown calls if + they fail). + + Reports can contain arbitrary extra attributes. + """ + + __test__ = False + # Defined by skipping plugin. + # xfail reason if xfailed, otherwise not defined. Use hasattr to distinguish. + wasxfail: str + + def __init__( + self, + nodeid: str, + location: tuple[str, int | None, str], + keywords: Mapping[str, Any], + outcome: Literal["passed", "failed", "skipped"], + longrepr: None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr, + when: Literal["setup", "call", "teardown"], + sections: Iterable[tuple[str, str]] = (), + duration: float = 0, + start: float = 0, + stop: float = 0, + user_properties: Iterable[tuple[str, object]] | None = None, + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: A (filesystempath, lineno, domaininfo) tuple indicating the + #: actual location of a test item - it might be different from the + #: collected one e.g. if a method is inherited from a different module. + #: The filesystempath may be relative to ``config.rootdir``. + #: The line number is 0-based. + self.location: tuple[str, int | None, str] = location + + #: A name -> value dictionary containing all keywords and + #: markers associated with a test invocation. + self.keywords: Mapping[str, Any] = keywords + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: One of 'setup', 'call', 'teardown' to indicate runtest phase. + self.when = when + + #: User properties is a list of tuples (name, value) that holds user + #: defined properties of the test. + self.user_properties = list(user_properties or []) + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + #: Time it took to run just the test. + self.duration: float = duration + + #: The system time when the call started, in seconds since the epoch. + self.start: float = start + #: The system time when the call ended, in seconds since the epoch. + self.stop: float = stop + + self.__dict__.update(extra) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {self.nodeid!r} when={self.when!r} outcome={self.outcome!r}>" + + @classmethod + def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport: + """Create and fill a TestReport with standard item and call info. + + :param item: The item. + :param call: The call info. + """ + when = call.when + # Remove "collect" from the Literal type -- only for collection calls. + assert when != "collect" + duration = call.duration + start = call.start + stop = call.stop + keywords = {x: 1 for x in item.keywords} + excinfo = call.excinfo + sections = [] + if not call.excinfo: + outcome: Literal["passed", "failed", "skipped"] = "passed" + longrepr: ( + None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr + ) = None + else: + if not isinstance(excinfo, ExceptionInfo): + outcome = "failed" + longrepr = excinfo + elif isinstance(excinfo.value, skip.Exception): + outcome = "skipped" + r = excinfo._getreprcrash() + assert ( + r is not None + ), "There should always be a traceback entry for skipping a test." + if excinfo.value._use_item_location: + path, line = item.reportinfo()[:2] + assert line is not None + longrepr = os.fspath(path), line + 1, r.message + else: + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + if call.when == "call": + longrepr = item.repr_failure(excinfo) + else: # exception in setup or teardown + longrepr = item._repr_failure_py( + excinfo, style=item.config.getoption("tbstyle", "auto") + ) + for rwhen, key, content in item._report_sections: + sections.append((f"Captured {key} {rwhen}", content)) + return cls( + item.nodeid, + item.location, + keywords, + outcome, + longrepr, + when, + sections, + duration, + start, + stop, + user_properties=item.user_properties, + ) + + +@final +class CollectReport(BaseReport): + """Collection report object. + + Reports can contain arbitrary extra attributes. + """ + + when = "collect" + + def __init__( + self, + nodeid: str, + outcome: Literal["passed", "failed", "skipped"], + longrepr: None + | ExceptionInfo[BaseException] + | tuple[str, int, str] + | str + | TerminalRepr, + result: list[Item | Collector] | None, + sections: Iterable[tuple[str, str]] = (), + **extra, + ) -> None: + #: Normalized collection nodeid. + self.nodeid = nodeid + + #: Test outcome, always one of "passed", "failed", "skipped". + self.outcome = outcome + + #: None or a failure representation. + self.longrepr = longrepr + + #: The collected items and collection nodes. + self.result = result or [] + + #: Tuples of str ``(heading, content)`` with extra information + #: for the test report. Used by pytest to add text captured + #: from ``stdout``, ``stderr``, and intercepted logging events. May + #: be used by other plugins to add arbitrary information to reports. + self.sections = list(sections) + + self.__dict__.update(extra) + + @property + def location( # type:ignore[override] + self, + ) -> tuple[str, int | None, str] | None: + return (self.fspath, None, self.fspath) + + def __repr__(self) -> str: + return f"" + + +class CollectErrorRepr(TerminalRepr): + def __init__(self, msg: str) -> None: + self.longrepr = msg + + def toterminal(self, out: TerminalWriter) -> None: + out.line(self.longrepr, red=True) + + +def pytest_report_to_serializable( + report: CollectReport | TestReport, +) -> dict[str, Any] | None: + if isinstance(report, (TestReport, CollectReport)): + data = report._to_json() + data["$report_type"] = report.__class__.__name__ + return data + # TODO: Check if this is actually reachable. + return None # type: ignore[unreachable] + + +def pytest_report_from_serializable( + data: dict[str, Any], +) -> CollectReport | TestReport | None: + if "$report_type" in data: + if data["$report_type"] == "TestReport": + return TestReport._from_json(data) + elif data["$report_type"] == "CollectReport": + return CollectReport._from_json(data) + assert False, "Unknown report_type unserialize data: {}".format( + data["$report_type"] + ) + return None + + +def _report_to_json(report: BaseReport) -> dict[str, Any]: + """Return the contents of this report as a dict of builtin entries, + suitable for serialization. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def serialize_repr_entry( + entry: ReprEntry | ReprEntryNative, + ) -> dict[str, Any]: + data = dataclasses.asdict(entry) + for key, value in data.items(): + if hasattr(value, "__dict__"): + data[key] = dataclasses.asdict(value) + entry_data = {"type": type(entry).__name__, "data": data} + return entry_data + + def serialize_repr_traceback(reprtraceback: ReprTraceback) -> dict[str, Any]: + result = dataclasses.asdict(reprtraceback) + result["reprentries"] = [ + serialize_repr_entry(x) for x in reprtraceback.reprentries + ] + return result + + def serialize_repr_crash( + reprcrash: ReprFileLocation | None, + ) -> dict[str, Any] | None: + if reprcrash is not None: + return dataclasses.asdict(reprcrash) + else: + return None + + def serialize_exception_longrepr(rep: BaseReport) -> dict[str, Any]: + assert rep.longrepr is not None + # TODO: Investigate whether the duck typing is really necessary here. + longrepr = cast(ExceptionRepr, rep.longrepr) + result: dict[str, Any] = { + "reprcrash": serialize_repr_crash(longrepr.reprcrash), + "reprtraceback": serialize_repr_traceback(longrepr.reprtraceback), + "sections": longrepr.sections, + } + if isinstance(longrepr, ExceptionChainRepr): + result["chain"] = [] + for repr_traceback, repr_crash, description in longrepr.chain: + result["chain"].append( + ( + serialize_repr_traceback(repr_traceback), + serialize_repr_crash(repr_crash), + description, + ) + ) + else: + result["chain"] = None + return result + + d = report.__dict__.copy() + if hasattr(report.longrepr, "toterminal"): + if hasattr(report.longrepr, "reprtraceback") and hasattr( + report.longrepr, "reprcrash" + ): + d["longrepr"] = serialize_exception_longrepr(report) + else: + d["longrepr"] = str(report.longrepr) + else: + d["longrepr"] = report.longrepr + for name in d: + if isinstance(d[name], os.PathLike): + d[name] = os.fspath(d[name]) + elif name == "result": + d[name] = None # for now + return d + + +def _report_kwargs_from_json(reportdict: dict[str, Any]) -> dict[str, Any]: + """Return **kwargs that can be used to construct a TestReport or + CollectReport instance. + + This was originally the serialize_report() function from xdist (ca03269). + """ + + def deserialize_repr_entry(entry_data): + data = entry_data["data"] + entry_type = entry_data["type"] + if entry_type == "ReprEntry": + reprfuncargs = None + reprfileloc = None + reprlocals = None + if data["reprfuncargs"]: + reprfuncargs = ReprFuncArgs(**data["reprfuncargs"]) + if data["reprfileloc"]: + reprfileloc = ReprFileLocation(**data["reprfileloc"]) + if data["reprlocals"]: + reprlocals = ReprLocals(data["reprlocals"]["lines"]) + + reprentry: ReprEntry | ReprEntryNative = ReprEntry( + lines=data["lines"], + reprfuncargs=reprfuncargs, + reprlocals=reprlocals, + reprfileloc=reprfileloc, + style=data["style"], + ) + elif entry_type == "ReprEntryNative": + reprentry = ReprEntryNative(data["lines"]) + else: + _report_unserialization_failure(entry_type, TestReport, reportdict) + return reprentry + + def deserialize_repr_traceback(repr_traceback_dict): + repr_traceback_dict["reprentries"] = [ + deserialize_repr_entry(x) for x in repr_traceback_dict["reprentries"] + ] + return ReprTraceback(**repr_traceback_dict) + + def deserialize_repr_crash(repr_crash_dict: dict[str, Any] | None): + if repr_crash_dict is not None: + return ReprFileLocation(**repr_crash_dict) + else: + return None + + if ( + reportdict["longrepr"] + and "reprcrash" in reportdict["longrepr"] + and "reprtraceback" in reportdict["longrepr"] + ): + reprtraceback = deserialize_repr_traceback( + reportdict["longrepr"]["reprtraceback"] + ) + reprcrash = deserialize_repr_crash(reportdict["longrepr"]["reprcrash"]) + if reportdict["longrepr"]["chain"]: + chain = [] + for repr_traceback_data, repr_crash_data, description in reportdict[ + "longrepr" + ]["chain"]: + chain.append( + ( + deserialize_repr_traceback(repr_traceback_data), + deserialize_repr_crash(repr_crash_data), + description, + ) + ) + exception_info: ExceptionChainRepr | ReprExceptionInfo = ExceptionChainRepr( + chain + ) + else: + exception_info = ReprExceptionInfo( + reprtraceback=reprtraceback, + reprcrash=reprcrash, + ) + + for section in reportdict["longrepr"]["sections"]: + exception_info.addsection(*section) + reportdict["longrepr"] = exception_info + + return reportdict diff --git a/venv/Lib/site-packages/_pytest/runner.py b/venv/Lib/site-packages/_pytest/runner.py new file mode 100644 index 0000000..0b60301 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/runner.py @@ -0,0 +1,571 @@ +# mypy: allow-untyped-defs +"""Basic collect and runtest protocol implementations.""" + +from __future__ import annotations + +import bdb +import dataclasses +import os +import sys +import types +from typing import Callable +from typing import cast +from typing import final +from typing import Generic +from typing import Literal +from typing import TYPE_CHECKING +from typing import TypeVar + +from .reports import BaseReport +from .reports import CollectErrorRepr +from .reports import CollectReport +from .reports import TestReport +from _pytest import timing +from _pytest._code.code import ExceptionChainRepr +from _pytest._code.code import ExceptionInfo +from _pytest._code.code import TerminalRepr +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.nodes import Collector +from _pytest.nodes import Directory +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.outcomes import Exit +from _pytest.outcomes import OutcomeException +from _pytest.outcomes import Skipped +from _pytest.outcomes import TEST_OUTCOME + + +if sys.version_info < (3, 11): + from exceptiongroup import BaseExceptionGroup + +if TYPE_CHECKING: + from _pytest.main import Session + from _pytest.terminal import TerminalReporter + +# +# pytest plugin hooks. + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group.addoption( + "--durations", + action="store", + type=int, + default=None, + metavar="N", + help="Show N slowest setup/test durations (N=0 for all)", + ) + group.addoption( + "--durations-min", + action="store", + type=float, + default=0.005, + metavar="N", + help="Minimal duration in seconds for inclusion in slowest list. " + "Default: 0.005.", + ) + + +def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None: + durations = terminalreporter.config.option.durations + durations_min = terminalreporter.config.option.durations_min + verbose = terminalreporter.config.get_verbosity() + if durations is None: + return + tr = terminalreporter + dlist = [] + for replist in tr.stats.values(): + for rep in replist: + if hasattr(rep, "duration"): + dlist.append(rep) + if not dlist: + return + dlist.sort(key=lambda x: x.duration, reverse=True) + if not durations: + tr.write_sep("=", "slowest durations") + else: + tr.write_sep("=", f"slowest {durations} durations") + dlist = dlist[:durations] + + for i, rep in enumerate(dlist): + if verbose < 2 and rep.duration < durations_min: + tr.write_line("") + tr.write_line( + f"({len(dlist) - i} durations < {durations_min:g}s hidden. Use -vv to show these durations.)" + ) + break + tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}") + + +def pytest_sessionstart(session: Session) -> None: + session._setupstate = SetupState() + + +def pytest_sessionfinish(session: Session) -> None: + session._setupstate.teardown_exact(None) + + +def pytest_runtest_protocol(item: Item, nextitem: Item | None) -> bool: + ihook = item.ihook + ihook.pytest_runtest_logstart(nodeid=item.nodeid, location=item.location) + runtestprotocol(item, nextitem=nextitem) + ihook.pytest_runtest_logfinish(nodeid=item.nodeid, location=item.location) + return True + + +def runtestprotocol( + item: Item, log: bool = True, nextitem: Item | None = None +) -> list[TestReport]: + hasrequest = hasattr(item, "_request") + if hasrequest and not item._request: # type: ignore[attr-defined] + # This only happens if the item is re-run, as is done by + # pytest-rerunfailures. + item._initrequest() # type: ignore[attr-defined] + rep = call_and_report(item, "setup", log) + reports = [rep] + if rep.passed: + if item.config.getoption("setupshow", False): + show_test_item(item) + if not item.config.getoption("setuponly", False): + reports.append(call_and_report(item, "call", log)) + # If the session is about to fail or stop, teardown everything - this is + # necessary to correctly report fixture teardown errors (see #11706) + if item.session.shouldfail or item.session.shouldstop: + nextitem = None + reports.append(call_and_report(item, "teardown", log, nextitem=nextitem)) + # After all teardown hooks have been called + # want funcargs and request info to go away. + if hasrequest: + item._request = False # type: ignore[attr-defined] + item.funcargs = None # type: ignore[attr-defined] + return reports + + +def show_test_item(item: Item) -> None: + """Show test function, parameters and the fixtures of the test item.""" + tw = item.config.get_terminal_writer() + tw.line() + tw.write(" " * 8) + tw.write(item.nodeid) + used_fixtures = sorted(getattr(item, "fixturenames", [])) + if used_fixtures: + tw.write(" (fixtures used: {})".format(", ".join(used_fixtures))) + tw.flush() + + +def pytest_runtest_setup(item: Item) -> None: + _update_current_test_var(item, "setup") + item.session._setupstate.setup(item) + + +def pytest_runtest_call(item: Item) -> None: + _update_current_test_var(item, "call") + try: + del sys.last_type + del sys.last_value + del sys.last_traceback + if sys.version_info >= (3, 12, 0): + del sys.last_exc # type:ignore[attr-defined] + except AttributeError: + pass + try: + item.runtest() + except Exception as e: + # Store trace info to allow postmortem debugging + sys.last_type = type(e) + sys.last_value = e + if sys.version_info >= (3, 12, 0): + sys.last_exc = e # type:ignore[attr-defined] + assert e.__traceback__ is not None + # Skip *this* frame + sys.last_traceback = e.__traceback__.tb_next + raise + + +def pytest_runtest_teardown(item: Item, nextitem: Item | None) -> None: + _update_current_test_var(item, "teardown") + item.session._setupstate.teardown_exact(nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var( + item: Item, when: Literal["setup", "call", "teardown"] | None +) -> None: + """Update :envvar:`PYTEST_CURRENT_TEST` to reflect the current item and stage. + + If ``when`` is None, delete ``PYTEST_CURRENT_TEST`` from the environment. + """ + var_name = "PYTEST_CURRENT_TEST" + if when: + value = f"{item.nodeid} ({when})" + # don't allow null bytes on environment variables (see #2644, #2957) + value = value.replace("\x00", "(null)") + os.environ[var_name] = value + else: + os.environ.pop(var_name) + + +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: + if report.when in ("setup", "teardown"): + if report.failed: + # category, shortletter, verbose-word + return "error", "E", "ERROR" + elif report.skipped: + return "skipped", "s", "SKIPPED" + else: + return "", "", "" + return None + + +# +# Implementation + + +def call_and_report( + item: Item, when: Literal["setup", "call", "teardown"], log: bool = True, **kwds +) -> TestReport: + ihook = item.ihook + if when == "setup": + runtest_hook: Callable[..., None] = ihook.pytest_runtest_setup + elif when == "call": + runtest_hook = ihook.pytest_runtest_call + elif when == "teardown": + runtest_hook = ihook.pytest_runtest_teardown + else: + assert False, f"Unhandled runtest hook case: {when}" + reraise: tuple[type[BaseException], ...] = (Exit,) + if not item.config.getoption("usepdb", False): + reraise += (KeyboardInterrupt,) + call = CallInfo.from_call( + lambda: runtest_hook(item=item, **kwds), when=when, reraise=reraise + ) + report: TestReport = ihook.pytest_runtest_makereport(item=item, call=call) + if log: + ihook.pytest_runtest_logreport(report=report) + if check_interactive_exception(call, report): + ihook.pytest_exception_interact(node=item, call=call, report=report) + return report + + +def check_interactive_exception(call: CallInfo[object], report: BaseReport) -> bool: + """Check whether the call raised an exception that should be reported as + interactive.""" + if call.excinfo is None: + # Didn't raise. + return False + if hasattr(report, "wasxfail"): + # Exception was expected. + return False + if isinstance(call.excinfo.value, (Skipped, bdb.BdbQuit)): + # Special control flow exception. + return False + return True + + +TResult = TypeVar("TResult", covariant=True) + + +@final +@dataclasses.dataclass +class CallInfo(Generic[TResult]): + """Result/Exception info of a function invocation.""" + + _result: TResult | None + #: The captured exception of the call, if it raised. + excinfo: ExceptionInfo[BaseException] | None + #: The system time when the call started, in seconds since the epoch. + start: float + #: The system time when the call ended, in seconds since the epoch. + stop: float + #: The call duration, in seconds. + duration: float + #: The context of invocation: "collect", "setup", "call" or "teardown". + when: Literal["collect", "setup", "call", "teardown"] + + def __init__( + self, + result: TResult | None, + excinfo: ExceptionInfo[BaseException] | None, + start: float, + stop: float, + duration: float, + when: Literal["collect", "setup", "call", "teardown"], + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + self._result = result + self.excinfo = excinfo + self.start = start + self.stop = stop + self.duration = duration + self.when = when + + @property + def result(self) -> TResult: + """The return value of the call, if it didn't raise. + + Can only be accessed if excinfo is None. + """ + if self.excinfo is not None: + raise AttributeError(f"{self!r} has no valid result") + # The cast is safe because an exception wasn't raised, hence + # _result has the expected function return type (which may be + # None, that's why a cast and not an assert). + return cast(TResult, self._result) + + @classmethod + def from_call( + cls, + func: Callable[[], TResult], + when: Literal["collect", "setup", "call", "teardown"], + reraise: type[BaseException] | tuple[type[BaseException], ...] | None = None, + ) -> CallInfo[TResult]: + """Call func, wrapping the result in a CallInfo. + + :param func: + The function to call. Called without arguments. + :type func: Callable[[], _pytest.runner.TResult] + :param when: + The phase in which the function is called. + :param reraise: + Exception or exceptions that shall propagate if raised by the + function, instead of being wrapped in the CallInfo. + """ + excinfo = None + start = timing.time() + precise_start = timing.perf_counter() + try: + result: TResult | None = func() + except BaseException: + excinfo = ExceptionInfo.from_current() + if reraise is not None and isinstance(excinfo.value, reraise): + raise + result = None + # use the perf counter + precise_stop = timing.perf_counter() + duration = precise_stop - precise_start + stop = timing.time() + return cls( + start=start, + stop=stop, + duration=duration, + when=when, + result=result, + excinfo=excinfo, + _ispytest=True, + ) + + def __repr__(self) -> str: + if self.excinfo is None: + return f"" + return f"" + + +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport: + return TestReport.from_item_and_call(item, call) + + +def pytest_make_collect_report(collector: Collector) -> CollectReport: + def collect() -> list[Item | Collector]: + # Before collecting, if this is a Directory, load the conftests. + # If a conftest import fails to load, it is considered a collection + # error of the Directory collector. This is why it's done inside of the + # CallInfo wrapper. + # + # Note: initial conftests are loaded early, not here. + if isinstance(collector, Directory): + collector.config.pluginmanager._loadconftestmodules( + collector.path, + collector.config.getoption("importmode"), + rootpath=collector.config.rootpath, + consider_namespace_packages=collector.config.getini( + "consider_namespace_packages" + ), + ) + + return list(collector.collect()) + + call = CallInfo.from_call( + collect, "collect", reraise=(KeyboardInterrupt, SystemExit) + ) + longrepr: None | tuple[str, int, str] | str | TerminalRepr = None + if not call.excinfo: + outcome: Literal["passed", "skipped", "failed"] = "passed" + else: + skip_exceptions = [Skipped] + unittest = sys.modules.get("unittest") + if unittest is not None: + skip_exceptions.append(unittest.SkipTest) + if isinstance(call.excinfo.value, tuple(skip_exceptions)): + outcome = "skipped" + r_ = collector._repr_failure_py(call.excinfo, "line") + assert isinstance(r_, ExceptionChainRepr), repr(r_) + r = r_.reprcrash + assert r + longrepr = (str(r.path), r.lineno, r.message) + else: + outcome = "failed" + errorinfo = collector.repr_failure(call.excinfo) + if not hasattr(errorinfo, "toterminal"): + assert isinstance(errorinfo, str) + errorinfo = CollectErrorRepr(errorinfo) + longrepr = errorinfo + result = call.result if not call.excinfo else None + rep = CollectReport(collector.nodeid, outcome, longrepr, result) + rep.call = call # type: ignore # see collect_one_node + return rep + + +class SetupState: + """Shared state for setting up/tearing down test items or collectors + in a session. + + Suppose we have a collection tree as follows: + + + + + + + + The SetupState maintains a stack. The stack starts out empty: + + [] + + During the setup phase of item1, setup(item1) is called. What it does + is: + + push session to stack, run session.setup() + push mod1 to stack, run mod1.setup() + push item1 to stack, run item1.setup() + + The stack is: + + [session, mod1, item1] + + While the stack is in this shape, it is allowed to add finalizers to + each of session, mod1, item1 using addfinalizer(). + + During the teardown phase of item1, teardown_exact(item2) is called, + where item2 is the next item to item1. What it does is: + + pop item1 from stack, run its teardowns + pop mod1 from stack, run its teardowns + + mod1 was popped because it ended its purpose with item1. The stack is: + + [session] + + During the setup phase of item2, setup(item2) is called. What it does + is: + + push mod2 to stack, run mod2.setup() + push item2 to stack, run item2.setup() + + Stack: + + [session, mod2, item2] + + During the teardown phase of item2, teardown_exact(None) is called, + because item2 is the last item. What it does is: + + pop item2 from stack, run its teardowns + pop mod2 from stack, run its teardowns + pop session from stack, run its teardowns + + Stack: + + [] + + The end! + """ + + def __init__(self) -> None: + # The stack is in the dict insertion order. + self.stack: dict[ + Node, + tuple[ + # Node's finalizers. + list[Callable[[], object]], + # Node's exception and original traceback, if its setup raised. + tuple[OutcomeException | Exception, types.TracebackType | None] | None, + ], + ] = {} + + def setup(self, item: Item) -> None: + """Setup objects along the collector chain to the item.""" + needed_collectors = item.listchain() + + # If a collector fails its setup, fail its entire subtree of items. + # The setup is not retried for each item - the same exception is used. + for col, (finalizers, exc) in self.stack.items(): + assert col in needed_collectors, "previous item was not torn down properly" + if exc: + raise exc[0].with_traceback(exc[1]) + + for col in needed_collectors[len(self.stack) :]: + assert col not in self.stack + # Push onto the stack. + self.stack[col] = ([col.teardown], None) + try: + col.setup() + except TEST_OUTCOME as exc: + self.stack[col] = (self.stack[col][0], (exc, exc.__traceback__)) + raise + + def addfinalizer(self, finalizer: Callable[[], object], node: Node) -> None: + """Attach a finalizer to the given node. + + The node must be currently active in the stack. + """ + assert node and not isinstance(node, tuple) + assert callable(finalizer) + assert node in self.stack, (node, self.stack) + self.stack[node][0].append(finalizer) + + def teardown_exact(self, nextitem: Item | None) -> None: + """Teardown the current stack up until reaching nodes that nextitem + also descends from. + + When nextitem is None (meaning we're at the last item), the entire + stack is torn down. + """ + needed_collectors = nextitem and nextitem.listchain() or [] + exceptions: list[BaseException] = [] + while self.stack: + if list(self.stack.keys()) == needed_collectors[: len(self.stack)]: + break + node, (finalizers, _) = self.stack.popitem() + these_exceptions = [] + while finalizers: + fin = finalizers.pop() + try: + fin() + except TEST_OUTCOME as e: + these_exceptions.append(e) + + if len(these_exceptions) == 1: + exceptions.extend(these_exceptions) + elif these_exceptions: + msg = f"errors while tearing down {node!r}" + exceptions.append(BaseExceptionGroup(msg, these_exceptions[::-1])) + + if len(exceptions) == 1: + raise exceptions[0] + elif exceptions: + raise BaseExceptionGroup("errors during test teardown", exceptions[::-1]) + if nextitem is None: + assert not self.stack + + +def collect_one_node(collector: Collector) -> CollectReport: + ihook = collector.ihook + ihook.pytest_collectstart(collector=collector) + rep: CollectReport = ihook.pytest_make_collect_report(collector=collector) + call = rep.__dict__.pop("call", None) + if call and check_interactive_exception(call, rep): + ihook.pytest_exception_interact(node=collector, call=call, report=rep) + return rep diff --git a/venv/Lib/site-packages/_pytest/scope.py b/venv/Lib/site-packages/_pytest/scope.py new file mode 100644 index 0000000..976a3ba --- /dev/null +++ b/venv/Lib/site-packages/_pytest/scope.py @@ -0,0 +1,91 @@ +""" +Scope definition and related utilities. + +Those are defined here, instead of in the 'fixtures' module because +their use is spread across many other pytest modules, and centralizing it in 'fixtures' +would cause circular references. + +Also this makes the module light to import, as it should. +""" + +from __future__ import annotations + +from enum import Enum +from functools import total_ordering +from typing import Literal + + +_ScopeName = Literal["session", "package", "module", "class", "function"] + + +@total_ordering +class Scope(Enum): + """ + Represents one of the possible fixture scopes in pytest. + + Scopes are ordered from lower to higher, that is: + + ->>> higher ->>> + + Function < Class < Module < Package < Session + + <<<- lower <<<- + """ + + # Scopes need to be listed from lower to higher. + Function: _ScopeName = "function" + Class: _ScopeName = "class" + Module: _ScopeName = "module" + Package: _ScopeName = "package" + Session: _ScopeName = "session" + + def next_lower(self) -> Scope: + """Return the next lower scope.""" + index = _SCOPE_INDICES[self] + if index == 0: + raise ValueError(f"{self} is the lower-most scope") + return _ALL_SCOPES[index - 1] + + def next_higher(self) -> Scope: + """Return the next higher scope.""" + index = _SCOPE_INDICES[self] + if index == len(_SCOPE_INDICES) - 1: + raise ValueError(f"{self} is the upper-most scope") + return _ALL_SCOPES[index + 1] + + def __lt__(self, other: Scope) -> bool: + self_index = _SCOPE_INDICES[self] + other_index = _SCOPE_INDICES[other] + return self_index < other_index + + @classmethod + def from_user( + cls, scope_name: _ScopeName, descr: str, where: str | None = None + ) -> Scope: + """ + Given a scope name from the user, return the equivalent Scope enum. Should be used + whenever we want to convert a user provided scope name to its enum object. + + If the scope name is invalid, construct a user friendly message and call pytest.fail. + """ + from _pytest.outcomes import fail + + try: + # Holding this reference is necessary for mypy at the moment. + scope = Scope(scope_name) + except ValueError: + fail( + "{} {}got an unexpected scope value '{}'".format( + descr, f"from {where} " if where else "", scope_name + ), + pytrace=False, + ) + return scope + + +_ALL_SCOPES = list(Scope) +_SCOPE_INDICES = {scope: index for index, scope in enumerate(_ALL_SCOPES)} + + +# Ordered list of scopes which can contain many tests (in practice all except Function). +HIGH_SCOPES = [x for x in Scope if x is not Scope.Function] diff --git a/venv/Lib/site-packages/_pytest/setuponly.py b/venv/Lib/site-packages/_pytest/setuponly.py new file mode 100644 index 0000000..de297f4 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/setuponly.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from typing import Generator + +from _pytest._io.saferepr import saferepr +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +from _pytest.scope import Scope +import pytest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setuponly", + "--setup-only", + action="store_true", + help="Only setup fixtures, do not execute tests", + ) + group.addoption( + "--setupshow", + "--setup-show", + action="store_true", + help="Show setup of fixtures while executing tests", + ) + + +@pytest.hookimpl(wrapper=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> Generator[None, object, object]: + try: + return (yield) + finally: + if request.config.option.setupshow: + if hasattr(request, "param"): + # Save the fixture parameter so ._show_fixture_action() can + # display it now and during the teardown (in .finish()). + if fixturedef.ids: + if callable(fixturedef.ids): + param = fixturedef.ids(request.param) + else: + param = fixturedef.ids[request.param_index] + else: + param = request.param + fixturedef.cached_param = param # type: ignore[attr-defined] + _show_fixture_action(fixturedef, request.config, "SETUP") + + +def pytest_fixture_post_finalizer( + fixturedef: FixtureDef[object], request: SubRequest +) -> None: + if fixturedef.cached_result is not None: + config = request.config + if config.option.setupshow: + _show_fixture_action(fixturedef, request.config, "TEARDOWN") + if hasattr(fixturedef, "cached_param"): + del fixturedef.cached_param + + +def _show_fixture_action( + fixturedef: FixtureDef[object], config: Config, msg: str +) -> None: + capman = config.pluginmanager.getplugin("capturemanager") + if capman: + capman.suspend_global_capture() + + tw = config.get_terminal_writer() + tw.line() + # Use smaller indentation the higher the scope: Session = 0, Package = 1, etc. + scope_indent = list(reversed(Scope)).index(fixturedef._scope) + tw.write(" " * 2 * scope_indent) + tw.write( + "{step} {scope} {fixture}".format( # noqa: UP032 (Readability) + step=msg.ljust(8), # align the output to TEARDOWN + scope=fixturedef.scope[0].upper(), + fixture=fixturedef.argname, + ) + ) + + if msg == "SETUP": + deps = sorted(arg for arg in fixturedef.argnames if arg != "request") + if deps: + tw.write(" (fixtures used: {})".format(", ".join(deps))) + + if hasattr(fixturedef, "cached_param"): + tw.write(f"[{saferepr(fixturedef.cached_param, maxsize=42)}]") + + tw.flush() + + if capman: + capman.resume_global_capture() + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.setuponly: + config.option.setupshow = True + return None diff --git a/venv/Lib/site-packages/_pytest/setupplan.py b/venv/Lib/site-packages/_pytest/setupplan.py new file mode 100644 index 0000000..4e124cc --- /dev/null +++ b/venv/Lib/site-packages/_pytest/setupplan.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config.argparsing import Parser +from _pytest.fixtures import FixtureDef +from _pytest.fixtures import SubRequest +import pytest + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("debugconfig") + group.addoption( + "--setupplan", + "--setup-plan", + action="store_true", + help="Show what fixtures and tests would be executed but " + "don't execute anything", + ) + + +@pytest.hookimpl(tryfirst=True) +def pytest_fixture_setup( + fixturedef: FixtureDef[object], request: SubRequest +) -> object | None: + # Will return a dummy fixture if the setuponly option is provided. + if request.config.option.setupplan: + my_cache_key = fixturedef.cache_key(request) + fixturedef.cached_result = (None, my_cache_key, None) + return fixturedef.cached_result + return None + + +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config: Config) -> int | ExitCode | None: + if config.option.setupplan: + config.option.setuponly = True + config.option.setupshow = True + return None diff --git a/venv/Lib/site-packages/_pytest/skipping.py b/venv/Lib/site-packages/_pytest/skipping.py new file mode 100644 index 0000000..9818be2 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/skipping.py @@ -0,0 +1,301 @@ +# mypy: allow-untyped-defs +"""Support for skip/xfail functions and markers.""" + +from __future__ import annotations + +from collections.abc import Mapping +import dataclasses +import os +import platform +import sys +import traceback +from typing import Generator +from typing import Optional + +from _pytest.config import Config +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.mark.structures import Mark +from _pytest.nodes import Item +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.reports import BaseReport +from _pytest.reports import TestReport +from _pytest.runner import CallInfo +from _pytest.stash import StashKey + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--runxfail", + action="store_true", + dest="runxfail", + default=False, + help="Report the results of xfail tests as if they were not marked", + ) + + parser.addini( + "xfail_strict", + "Default for the strict parameter of xfail " + "markers when not given explicitly (default: False)", + default=False, + type="bool", + ) + + +def pytest_configure(config: Config) -> None: + if config.option.runxfail: + # yay a hack + import pytest + + old = pytest.xfail + config.add_cleanup(lambda: setattr(pytest, "xfail", old)) + + def nop(*args, **kwargs): + pass + + nop.Exception = xfail.Exception # type: ignore[attr-defined] + setattr(pytest, "xfail", nop) + + config.addinivalue_line( + "markers", + "skip(reason=None): skip the given test function with an optional reason. " + 'Example: skip(reason="no way of currently testing this") skips the ' + "test.", + ) + config.addinivalue_line( + "markers", + "skipif(condition, ..., *, reason=...): " + "skip the given test function if any of the conditions evaluate to True. " + "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " + "See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-skipif", + ) + config.addinivalue_line( + "markers", + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " + "mark the test function as an expected failure if any of the conditions " + "evaluate to True. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See https://docs.pytest.org/en/stable/reference/reference.html#pytest-mark-xfail", + ) + + +def evaluate_condition(item: Item, mark: Mark, condition: object) -> tuple[bool, str]: + """Evaluate a single skipif/xfail condition. + + If an old-style string condition is given, it is eval()'d, otherwise the + condition is bool()'d. If this fails, an appropriately formatted pytest.fail + is raised. + + Returns (result, reason). The reason is only relevant if the result is True. + """ + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + for dictionary in reversed( + item.ihook.pytest_markeval_namespace(config=item.config) + ): + if not isinstance(dictionary, Mapping): + raise ValueError( + f"pytest_markeval_namespace() needs to return a dict, got {dictionary!r}" + ) + globals_.update(dictionary) + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) + try: + filename = f"<{mark.name} condition>" + condition_code = compile(condition, filename, "eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition", + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition", + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + f"Error evaluating {mark.name!r} condition as a boolean", + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + reason = mark.kwargs.get("reason", None) + if reason is None: + if isinstance(condition, str): + reason = "condition: " + condition + else: + # XXX better be checked at collection time + msg = ( + f"Error evaluating {mark.name!r}: " + + "you need to specify reason=STRING when using booleans as conditions." + ) + fail(msg, pytrace=False) + + return result, reason + + +@dataclasses.dataclass(frozen=True) +class Skip: + """The result of evaluate_skip_marks().""" + + reason: str = "unconditional skip" + + +def evaluate_skip_marks(item: Item) -> Skip | None: + """Evaluate skip and skipif marks on item, returning Skip if triggered.""" + for mark in item.iter_markers(name="skipif"): + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Skip(reason) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Skip(reason) + + for mark in item.iter_markers(name="skip"): + try: + return Skip(*mark.args, **mark.kwargs) + except TypeError as e: + raise TypeError(str(e) + " - maybe you meant pytest.mark.skipif?") from None + + return None + + +@dataclasses.dataclass(frozen=True) +class Xfail: + """The result of evaluate_xfail_marks().""" + + __slots__ = ("reason", "run", "strict", "raises") + + reason: str + run: bool + strict: bool + raises: tuple[type[BaseException], ...] | None + + +def evaluate_xfail_marks(item: Item) -> Xfail | None: + """Evaluate xfail marks on item, returning Xfail if triggered.""" + for mark in item.iter_markers(name="xfail"): + run = mark.kwargs.get("run", True) + strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) + raises = mark.kwargs.get("raises", None) + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Xfail(reason, run, strict, raises) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Xfail(reason, run, strict, raises) + + return None + + +# Saves the xfail mark evaluation. Can be refreshed during call if None. +xfailed_key = StashKey[Optional[Xfail]]() + + +@hookimpl(tryfirst=True) +def pytest_runtest_setup(item: Item) -> None: + skipped = evaluate_skip_marks(item) + if skipped: + raise skip.Exception(skipped.reason, _use_item_location=True) + + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + +@hookimpl(wrapper=True) +def pytest_runtest_call(item: Item) -> Generator[None]: + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + + try: + return (yield) + finally: + # The test run may have added an xfail mark dynamically. + xfailed = item.stash.get(xfailed_key, None) + if xfailed is None: + item.stash[xfailed_key] = xfailed = evaluate_xfail_marks(item) + + +@hookimpl(wrapper=True) +def pytest_runtest_makereport( + item: Item, call: CallInfo[None] +) -> Generator[None, TestReport, TestReport]: + rep = yield + xfailed = item.stash.get(xfailed_key, None) + if item.config.option.runxfail: + pass # don't interfere + elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): + assert call.excinfo.value.msg is not None + rep.wasxfail = "reason: " + call.excinfo.value.msg + rep.outcome = "skipped" + elif not rep.skipped and xfailed: + if call.excinfo: + raises = xfailed.raises + if raises is not None and not isinstance(call.excinfo.value, raises): + rep.outcome = "failed" + else: + rep.outcome = "skipped" + rep.wasxfail = xfailed.reason + elif call.when == "call": + if xfailed.strict: + rep.outcome = "failed" + rep.longrepr = "[XPASS(strict)] " + xfailed.reason + else: + rep.outcome = "passed" + rep.wasxfail = xfailed.reason + return rep + + +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str] | None: + if hasattr(report, "wasxfail"): + if report.skipped: + return "xfailed", "x", "XFAIL" + elif report.passed: + return "xpassed", "X", "XPASS" + return None diff --git a/venv/Lib/site-packages/_pytest/stash.py b/venv/Lib/site-packages/_pytest/stash.py new file mode 100644 index 0000000..6a9ff88 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/stash.py @@ -0,0 +1,116 @@ +from __future__ import annotations + +from typing import Any +from typing import cast +from typing import Generic +from typing import TypeVar + + +__all__ = ["Stash", "StashKey"] + + +T = TypeVar("T") +D = TypeVar("D") + + +class StashKey(Generic[T]): + """``StashKey`` is an object used as a key to a :class:`Stash`. + + A ``StashKey`` is associated with the type ``T`` of the value of the key. + + A ``StashKey`` is unique and cannot conflict with another key. + + .. versionadded:: 7.0 + """ + + __slots__ = () + + +class Stash: + r"""``Stash`` is a type-safe heterogeneous mutable mapping that + allows keys and value types to be defined separately from + where it (the ``Stash``) is created. + + Usually you will be given an object which has a ``Stash``, for example + :class:`~pytest.Config` or a :class:`~_pytest.nodes.Node`: + + .. code-block:: python + + stash: Stash = some_object.stash + + If a module or plugin wants to store data in this ``Stash``, it creates + :class:`StashKey`\s for its keys (at the module level): + + .. code-block:: python + + # At the top-level of the module + some_str_key = StashKey[str]() + some_bool_key = StashKey[bool]() + + To store information: + + .. code-block:: python + + # Value type must match the key. + stash[some_str_key] = "value" + stash[some_bool_key] = True + + To retrieve the information: + + .. code-block:: python + + # The static type of some_str is str. + some_str = stash[some_str_key] + # The static type of some_bool is bool. + some_bool = stash[some_bool_key] + + .. versionadded:: 7.0 + """ + + __slots__ = ("_storage",) + + def __init__(self) -> None: + self._storage: dict[StashKey[Any], object] = {} + + def __setitem__(self, key: StashKey[T], value: T) -> None: + """Set a value for key.""" + self._storage[key] = value + + def __getitem__(self, key: StashKey[T]) -> T: + """Get the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + return cast(T, self._storage[key]) + + def get(self, key: StashKey[T], default: D) -> T | D: + """Get the value for key, or return default if the key wasn't set + before.""" + try: + return self[key] + except KeyError: + return default + + def setdefault(self, key: StashKey[T], default: T) -> T: + """Return the value of key if already set, otherwise set the value + of key to default and return default.""" + try: + return self[key] + except KeyError: + self[key] = default + return default + + def __delitem__(self, key: StashKey[T]) -> None: + """Delete the value for key. + + Raises ``KeyError`` if the key wasn't set before. + """ + del self._storage[key] + + def __contains__(self, key: StashKey[T]) -> bool: + """Return whether key was set.""" + return key in self._storage + + def __len__(self) -> int: + """Return how many items exist in the stash.""" + return len(self._storage) diff --git a/venv/Lib/site-packages/_pytest/stepwise.py b/venv/Lib/site-packages/_pytest/stepwise.py new file mode 100644 index 0000000..c786080 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/stepwise.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +from _pytest import nodes +from _pytest.cacheprovider import Cache +from _pytest.config import Config +from _pytest.config.argparsing import Parser +from _pytest.main import Session +from _pytest.reports import TestReport + + +STEPWISE_CACHE_DIR = "cache/stepwise" + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("general") + group.addoption( + "--sw", + "--stepwise", + action="store_true", + default=False, + dest="stepwise", + help="Exit on test failure and continue from last failing test next time", + ) + group.addoption( + "--sw-skip", + "--stepwise-skip", + action="store_true", + default=False, + dest="stepwise_skip", + help="Ignore the first failing test but stop on the next failing test. " + "Implicitly enables --stepwise.", + ) + + +def pytest_configure(config: Config) -> None: + if config.option.stepwise_skip: + # allow --stepwise-skip to work on its own merits. + config.option.stepwise = True + if config.getoption("stepwise"): + config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin") + + +def pytest_sessionfinish(session: Session) -> None: + if not session.config.getoption("stepwise"): + assert session.config.cache is not None + if hasattr(session.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + # Clear the list of failing tests if the plugin is not active. + session.config.cache.set(STEPWISE_CACHE_DIR, []) + + +class StepwisePlugin: + def __init__(self, config: Config) -> None: + self.config = config + self.session: Session | None = None + self.report_status = "" + assert config.cache is not None + self.cache: Cache = config.cache + self.lastfailed: str | None = self.cache.get(STEPWISE_CACHE_DIR, None) + self.skip: bool = config.getoption("stepwise_skip") + + def pytest_sessionstart(self, session: Session) -> None: + self.session = session + + def pytest_collection_modifyitems( + self, config: Config, items: list[nodes.Item] + ) -> None: + if not self.lastfailed: + self.report_status = "no previously failed tests, not skipping." + return + + # check all item nodes until we find a match on last failed + failed_index = None + for index, item in enumerate(items): + if item.nodeid == self.lastfailed: + failed_index = index + break + + # If the previously failed test was not found among the test items, + # do not skip any tests. + if failed_index is None: + self.report_status = "previously failed test not found, not skipping." + else: + self.report_status = f"skipping {failed_index} already passed items." + deselected = items[:failed_index] + del items[:failed_index] + config.hook.pytest_deselected(items=deselected) + + def pytest_runtest_logreport(self, report: TestReport) -> None: + if report.failed: + if self.skip: + # Remove test from the failed ones (if it exists) and unset the skip option + # to make sure the following tests will not be skipped. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + self.skip = False + else: + # Mark test as the last failing and interrupt the test session. + self.lastfailed = report.nodeid + assert self.session is not None + self.session.shouldstop = ( + "Test failed, continuing from this test next run." + ) + + else: + # If the test was actually run and did pass. + if report.when == "call": + # Remove test from the failed ones, if exists. + if report.nodeid == self.lastfailed: + self.lastfailed = None + + def pytest_report_collectionfinish(self) -> str | None: + if self.config.get_verbosity() >= 0 and self.report_status: + return f"stepwise: {self.report_status}" + return None + + def pytest_sessionfinish(self) -> None: + if hasattr(self.config, "workerinput"): + # Do not update cache if this process is a xdist worker to prevent + # race conditions (#10641). + return + self.cache.set(STEPWISE_CACHE_DIR, self.lastfailed) diff --git a/venv/Lib/site-packages/_pytest/terminal.py b/venv/Lib/site-packages/_pytest/terminal.py new file mode 100644 index 0000000..ed267bf --- /dev/null +++ b/venv/Lib/site-packages/_pytest/terminal.py @@ -0,0 +1,1577 @@ +# mypy: allow-untyped-defs +"""Terminal reporting of the full testing process. + +This is a good source for looking at the various reporting hooks. +""" + +from __future__ import annotations + +import argparse +from collections import Counter +import dataclasses +import datetime +from functools import partial +import inspect +from pathlib import Path +import platform +import sys +import textwrap +from typing import Any +from typing import Callable +from typing import ClassVar +from typing import final +from typing import Generator +from typing import Literal +from typing import Mapping +from typing import NamedTuple +from typing import Sequence +from typing import TextIO +from typing import TYPE_CHECKING +import warnings + +import pluggy + +from _pytest import nodes +from _pytest import timing +from _pytest._code import ExceptionInfo +from _pytest._code.code import ExceptionRepr +from _pytest._io import TerminalWriter +from _pytest._io.wcwidth import wcswidth +import _pytest._version +from _pytest.assertion.util import running_on_ci +from _pytest.config import _PluggyPlugin +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.nodes import Item +from _pytest.nodes import Node +from _pytest.pathlib import absolutepath +from _pytest.pathlib import bestrelpath +from _pytest.reports import BaseReport +from _pytest.reports import CollectReport +from _pytest.reports import TestReport + + +if TYPE_CHECKING: + from _pytest.main import Session + + +REPORT_COLLECTING_RESOLUTION = 0.5 + +KNOWN_TYPES = ( + "failed", + "passed", + "skipped", + "deselected", + "xfailed", + "xpassed", + "warnings", + "error", +) + +_REPORTCHARS_DEFAULT = "fE" + + +class MoreQuietAction(argparse.Action): + """A modified copy of the argparse count action which counts down and updates + the legacy quiet attribute at the same time. + + Used to unify verbosity handling. + """ + + def __init__( + self, + option_strings: Sequence[str], + dest: str, + default: object = None, + required: bool = False, + help: str | None = None, + ) -> None: + super().__init__( + option_strings=option_strings, + dest=dest, + nargs=0, + default=default, + required=required, + help=help, + ) + + def __call__( + self, + parser: argparse.ArgumentParser, + namespace: argparse.Namespace, + values: str | Sequence[object] | None, + option_string: str | None = None, + ) -> None: + new_count = getattr(namespace, self.dest, 0) - 1 + setattr(namespace, self.dest, new_count) + # todo Deprecate config.quiet + namespace.quiet = getattr(namespace, "quiet", 0) + 1 + + +class TestShortLogReport(NamedTuple): + """Used to store the test status result category, shortletter and verbose word. + For example ``"rerun", "R", ("RERUN", {"yellow": True})``. + + :ivar category: + The class of result, for example ``“passed”``, ``“skipped”``, ``“error”``, or the empty string. + + :ivar letter: + The short letter shown as testing progresses, for example ``"."``, ``"s"``, ``"E"``, or the empty string. + + :ivar word: + Verbose word is shown as testing progresses in verbose mode, for example ``"PASSED"``, ``"SKIPPED"``, + ``"ERROR"``, or the empty string. + """ + + category: str + letter: str + word: str | tuple[str, Mapping[str, bool]] + + +def pytest_addoption(parser: Parser) -> None: + group = parser.getgroup("terminal reporting", "Reporting", after="general") + group._addoption( + "-v", + "--verbose", + action="count", + default=0, + dest="verbose", + help="Increase verbosity", + ) + group._addoption( + "--no-header", + action="store_true", + default=False, + dest="no_header", + help="Disable header", + ) + group._addoption( + "--no-summary", + action="store_true", + default=False, + dest="no_summary", + help="Disable summary", + ) + group._addoption( + "--no-fold-skipped", + action="store_false", + dest="fold_skipped", + default=True, + help="Do not fold skipped tests in short summary.", + ) + group._addoption( + "-q", + "--quiet", + action=MoreQuietAction, + default=0, + dest="verbose", + help="Decrease verbosity", + ) + group._addoption( + "--verbosity", + dest="verbose", + type=int, + default=0, + help="Set verbosity. Default: 0.", + ) + group._addoption( + "-r", + action="store", + dest="reportchars", + default=_REPORTCHARS_DEFAULT, + metavar="chars", + help="Show extra test summary info as specified by chars: (f)ailed, " + "(E)rror, (s)kipped, (x)failed, (X)passed, " + "(p)assed, (P)assed with output, (a)ll except passed (p/P), or (A)ll. " + "(w)arnings are enabled by default (see --disable-warnings), " + "'N' can be used to reset the list. (default: 'fE').", + ) + group._addoption( + "--disable-warnings", + "--disable-pytest-warnings", + default=False, + dest="disable_warnings", + action="store_true", + help="Disable warnings summary", + ) + group._addoption( + "-l", + "--showlocals", + action="store_true", + dest="showlocals", + default=False, + help="Show locals in tracebacks (disabled by default)", + ) + group._addoption( + "--no-showlocals", + action="store_false", + dest="showlocals", + help="Hide locals in tracebacks (negate --showlocals passed through addopts)", + ) + group._addoption( + "--tb", + metavar="style", + action="store", + dest="tbstyle", + default="auto", + choices=["auto", "long", "short", "no", "line", "native"], + help="Traceback print mode (auto/long/short/line/native/no)", + ) + group._addoption( + "--xfail-tb", + action="store_true", + dest="xfail_tb", + default=False, + help="Show tracebacks for xfail (as long as --tb != no)", + ) + group._addoption( + "--show-capture", + action="store", + dest="showcapture", + choices=["no", "stdout", "stderr", "log", "all"], + default="all", + help="Controls how captured stdout/stderr/log is shown on failed tests. " + "Default: all.", + ) + group._addoption( + "--fulltrace", + "--full-trace", + action="store_true", + default=False, + help="Don't cut any tracebacks (default is to cut)", + ) + group._addoption( + "--color", + metavar="color", + action="store", + dest="color", + default="auto", + choices=["yes", "no", "auto"], + help="Color terminal output (yes/no/auto)", + ) + group._addoption( + "--code-highlight", + default="yes", + choices=["yes", "no"], + help="Whether code should be highlighted (only if --color is also enabled). " + "Default: yes.", + ) + + parser.addini( + "console_output_style", + help='Console output: "classic", or with additional progress information ' + '("progress" (percentage) | "count" | "progress-even-when-capture-no" (forces ' + "progress even when capture=no)", + default="progress", + ) + Config._add_verbosity_ini( + parser, + Config.VERBOSITY_TEST_CASES, + help=( + "Specify a verbosity level for test case execution, overriding the main level. " + "Higher levels will provide more detailed information about each test case executed." + ), + ) + + +def pytest_configure(config: Config) -> None: + reporter = TerminalReporter(config, sys.stdout) + config.pluginmanager.register(reporter, "terminalreporter") + if config.option.debug or config.option.traceconfig: + + def mywriter(tags, args): + msg = " ".join(map(str, args)) + reporter.write_line("[traceconfig] " + msg) + + config.trace.root.setprocessor("pytest:config", mywriter) + + +def getreportopt(config: Config) -> str: + reportchars: str = config.option.reportchars + + old_aliases = {"F", "S"} + reportopts = "" + for char in reportchars: + if char in old_aliases: + char = char.lower() + if char == "a": + reportopts = "sxXEf" + elif char == "A": + reportopts = "PpsxXEf" + elif char == "N": + reportopts = "" + elif char not in reportopts: + reportopts += char + + if not config.option.disable_warnings and "w" not in reportopts: + reportopts = "w" + reportopts + elif config.option.disable_warnings and "w" in reportopts: + reportopts = reportopts.replace("w", "") + + return reportopts + + +@hookimpl(trylast=True) # after _pytest.runner +def pytest_report_teststatus(report: BaseReport) -> tuple[str, str, str]: + letter = "F" + if report.passed: + letter = "." + elif report.skipped: + letter = "s" + + outcome: str = report.outcome + if report.when in ("collect", "setup", "teardown") and outcome == "failed": + outcome = "error" + letter = "E" + + return outcome, letter, outcome.upper() + + +@dataclasses.dataclass +class WarningReport: + """Simple structure to hold warnings information captured by ``pytest_warning_recorded``. + + :ivar str message: + User friendly message about the warning. + :ivar str|None nodeid: + nodeid that generated the warning (see ``get_location``). + :ivar tuple fslocation: + File system location of the source of the warning (see ``get_location``). + """ + + message: str + nodeid: str | None = None + fslocation: tuple[str, int] | None = None + + count_towards_summary: ClassVar = True + + def get_location(self, config: Config) -> str | None: + """Return the more user-friendly information about the location of a warning, or None.""" + if self.nodeid: + return self.nodeid + if self.fslocation: + filename, linenum = self.fslocation + relpath = bestrelpath(config.invocation_params.dir, absolutepath(filename)) + return f"{relpath}:{linenum}" + return None + + +@final +class TerminalReporter: + def __init__(self, config: Config, file: TextIO | None = None) -> None: + import _pytest.config + + self.config = config + self._numcollected = 0 + self._session: Session | None = None + self._showfspath: bool | None = None + + self.stats: dict[str, list[Any]] = {} + self._main_color: str | None = None + self._known_types: list[str] | None = None + self.startpath = config.invocation_params.dir + if file is None: + file = sys.stdout + self._tw = _pytest.config.create_terminal_writer(config, file) + self._screen_width = self._tw.fullwidth + self.currentfspath: None | Path | str | int = None + self.reportchars = getreportopt(config) + self.foldskipped = config.option.fold_skipped + self.hasmarkup = self._tw.hasmarkup + self.isatty = file.isatty() + self._progress_nodeids_reported: set[str] = set() + self._show_progress_info = self._determine_show_progress_info() + self._collect_report_last_write: float | None = None + self._already_displayed_warnings: int | None = None + self._keyboardinterrupt_memo: ExceptionRepr | None = None + + def _determine_show_progress_info(self) -> Literal["progress", "count", False]: + """Return whether we should display progress information based on the current config.""" + # do not show progress if we are not capturing output (#3038) unless explicitly + # overridden by progress-even-when-capture-no + if ( + self.config.getoption("capture", "no") == "no" + and self.config.getini("console_output_style") + != "progress-even-when-capture-no" + ): + return False + # do not show progress if we are showing fixture setup/teardown + if self.config.getoption("setupshow", False): + return False + cfg: str = self.config.getini("console_output_style") + if cfg in {"progress", "progress-even-when-capture-no"}: + return "progress" + elif cfg == "count": + return "count" + else: + return False + + @property + def verbosity(self) -> int: + verbosity: int = self.config.option.verbose + return verbosity + + @property + def showheader(self) -> bool: + return self.verbosity >= 0 + + @property + def no_header(self) -> bool: + return bool(self.config.option.no_header) + + @property + def no_summary(self) -> bool: + return bool(self.config.option.no_summary) + + @property + def showfspath(self) -> bool: + if self._showfspath is None: + return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) >= 0 + return self._showfspath + + @showfspath.setter + def showfspath(self, value: bool | None) -> None: + self._showfspath = value + + @property + def showlongtestinfo(self) -> bool: + return self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) > 0 + + def hasopt(self, char: str) -> bool: + char = {"xfailed": "x", "skipped": "s"}.get(char, char) + return char in self.reportchars + + def write_fspath_result(self, nodeid: str, res: str, **markup: bool) -> None: + fspath = self.config.rootpath / nodeid.split("::")[0] + if self.currentfspath is None or fspath != self.currentfspath: + if self.currentfspath is not None and self._show_progress_info: + self._write_progress_information_filling_space() + self.currentfspath = fspath + relfspath = bestrelpath(self.startpath, fspath) + self._tw.line() + self._tw.write(relfspath + " ") + self._tw.write(res, flush=True, **markup) + + def write_ensure_prefix(self, prefix: str, extra: str = "", **kwargs) -> None: + if self.currentfspath != prefix: + self._tw.line() + self.currentfspath = prefix + self._tw.write(prefix) + if extra: + self._tw.write(extra, **kwargs) + self.currentfspath = -2 + + def ensure_newline(self) -> None: + if self.currentfspath: + self._tw.line() + self.currentfspath = None + + def wrap_write( + self, + content: str, + *, + flush: bool = False, + margin: int = 8, + line_sep: str = "\n", + **markup: bool, + ) -> None: + """Wrap message with margin for progress info.""" + width_of_current_line = self._tw.width_of_current_line + wrapped = line_sep.join( + textwrap.wrap( + " " * width_of_current_line + content, + width=self._screen_width - margin, + drop_whitespace=True, + replace_whitespace=False, + ), + ) + wrapped = wrapped[width_of_current_line:] + self._tw.write(wrapped, flush=flush, **markup) + + def write(self, content: str, *, flush: bool = False, **markup: bool) -> None: + self._tw.write(content, flush=flush, **markup) + + def flush(self) -> None: + self._tw.flush() + + def write_line(self, line: str | bytes, **markup: bool) -> None: + if not isinstance(line, str): + line = str(line, errors="replace") + self.ensure_newline() + self._tw.line(line, **markup) + + def rewrite(self, line: str, **markup: bool) -> None: + """Rewinds the terminal cursor to the beginning and writes the given line. + + :param erase: + If True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop("erase", False) + if erase: + fill_count = self._tw.fullwidth - len(line) - 1 + fill = " " * fill_count + else: + fill = "" + line = str(line) + self._tw.write("\r" + line + fill, **markup) + + def write_sep( + self, + sep: str, + title: str | None = None, + fullwidth: int | None = None, + **markup: bool, + ) -> None: + self.ensure_newline() + self._tw.sep(sep, title, fullwidth, **markup) + + def section(self, title: str, sep: str = "=", **kw: bool) -> None: + self._tw.sep(sep, title, **kw) + + def line(self, msg: str, **kw: bool) -> None: + self._tw.line(msg, **kw) + + def _add_stats(self, category: str, items: Sequence[Any]) -> None: + set_main_color = category not in self.stats + self.stats.setdefault(category, []).extend(items) + if set_main_color: + self._set_main_color() + + def pytest_internalerror(self, excrepr: ExceptionRepr) -> bool: + for line in str(excrepr).split("\n"): + self.write_line("INTERNALERROR> " + line) + return True + + def pytest_warning_recorded( + self, + warning_message: warnings.WarningMessage, + nodeid: str, + ) -> None: + from _pytest.warnings import warning_record_to_str + + fslocation = warning_message.filename, warning_message.lineno + message = warning_record_to_str(warning_message) + + warning_report = WarningReport( + fslocation=fslocation, message=message, nodeid=nodeid + ) + self._add_stats("warnings", [warning_report]) + + def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None: + if self.config.option.traceconfig: + msg = f"PLUGIN registered: {plugin}" + # XXX This event may happen during setup/teardown time + # which unfortunately captures our output here + # which garbles our output if we use self.write_line. + self.write_line(msg) + + def pytest_deselected(self, items: Sequence[Item]) -> None: + self._add_stats("deselected", items) + + def pytest_runtest_logstart( + self, nodeid: str, location: tuple[str, int | None, str] + ) -> None: + fspath, lineno, domain = location + # Ensure that the path is printed before the + # 1st test of a module starts running. + if self.showlongtestinfo: + line = self._locationline(nodeid, fspath, lineno, domain) + self.write_ensure_prefix(line, "") + self.flush() + elif self.showfspath: + self.write_fspath_result(nodeid, "") + self.flush() + + def pytest_runtest_logreport(self, report: TestReport) -> None: + self._tests_ran = True + rep = report + + res = TestShortLogReport( + *self.config.hook.pytest_report_teststatus(report=rep, config=self.config) + ) + category, letter, word = res.category, res.letter, res.word + if not isinstance(word, tuple): + markup = None + else: + word, markup = word + self._add_stats(category, [rep]) + if not letter and not word: + # Probably passed setup/teardown. + return + if markup is None: + was_xfail = hasattr(report, "wasxfail") + if rep.passed and not was_xfail: + markup = {"green": True} + elif rep.passed and was_xfail: + markup = {"yellow": True} + elif rep.failed: + markup = {"red": True} + elif rep.skipped: + markup = {"yellow": True} + else: + markup = {} + self._progress_nodeids_reported.add(rep.nodeid) + if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0: + self._tw.write(letter, **markup) + # When running in xdist, the logreport and logfinish of multiple + # items are interspersed, e.g. `logreport`, `logreport`, + # `logfinish`, `logfinish`. To avoid the "past edge" calculation + # from getting confused and overflowing (#7166), do the past edge + # printing here and not in logfinish, except for the 100% which + # should only be printed after all teardowns are finished. + if self._show_progress_info and not self._is_last_item: + self._write_progress_information_if_past_edge() + else: + line = self._locationline(rep.nodeid, *rep.location) + running_xdist = hasattr(rep, "node") + if not running_xdist: + self.write_ensure_prefix(line, word, **markup) + if rep.skipped or hasattr(report, "wasxfail"): + reason = _get_raw_skip_reason(rep) + if self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) < 2: + available_width = ( + (self._tw.fullwidth - self._tw.width_of_current_line) + - len(" [100%]") + - 1 + ) + formatted_reason = _format_trimmed( + " ({})", reason, available_width + ) + else: + formatted_reason = f" ({reason})" + + if reason and formatted_reason is not None: + self.wrap_write(formatted_reason) + if self._show_progress_info: + self._write_progress_information_filling_space() + else: + self.ensure_newline() + self._tw.write(f"[{rep.node.gateway.id}]") + if self._show_progress_info: + self._tw.write( + self._get_progress_information_message() + " ", cyan=True + ) + else: + self._tw.write(" ") + self._tw.write(word, **markup) + self._tw.write(" " + line) + self.currentfspath = -2 + self.flush() + + @property + def _is_last_item(self) -> bool: + assert self._session is not None + return len(self._progress_nodeids_reported) == self._session.testscollected + + @hookimpl(wrapper=True) + def pytest_runtestloop(self) -> Generator[None, object, object]: + result = yield + + # Write the final/100% progress -- deferred until the loop is complete. + if ( + self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) <= 0 + and self._show_progress_info + and self._progress_nodeids_reported + ): + self._write_progress_information_filling_space() + + return result + + def _get_progress_information_message(self) -> str: + assert self._session + collected = self._session.testscollected + if self._show_progress_info == "count": + if collected: + progress = len(self._progress_nodeids_reported) + counter_format = f"{{:{len(str(collected))}d}}" + format_string = f" [{counter_format}/{{}}]" + return format_string.format(progress, collected) + return f" [ {collected} / {collected} ]" + else: + if collected: + return ( + f" [{len(self._progress_nodeids_reported) * 100 // collected:3d}%]" + ) + return " [100%]" + + def _write_progress_information_if_past_edge(self) -> None: + w = self._width_of_current_line + if self._show_progress_info == "count": + assert self._session + num_tests = self._session.testscollected + progress_length = len(f" [{num_tests}/{num_tests}]") + else: + progress_length = len(" [100%]") + past_edge = w + progress_length + 1 >= self._screen_width + if past_edge: + main_color, _ = self._get_main_color() + msg = self._get_progress_information_message() + self._tw.write(msg + "\n", **{main_color: True}) + + def _write_progress_information_filling_space(self) -> None: + color, _ = self._get_main_color() + msg = self._get_progress_information_message() + w = self._width_of_current_line + fill = self._tw.fullwidth - w - 1 + self.write(msg.rjust(fill), flush=True, **{color: True}) + + @property + def _width_of_current_line(self) -> int: + """Return the width of the current line.""" + return self._tw.width_of_current_line + + def pytest_collection(self) -> None: + if self.isatty: + if self.config.option.verbose >= 0: + self.write("collecting ... ", flush=True, bold=True) + self._collect_report_last_write = timing.time() + elif self.config.option.verbose >= 1: + self.write("collecting ... ", flush=True, bold=True) + + def pytest_collectreport(self, report: CollectReport) -> None: + if report.failed: + self._add_stats("error", [report]) + elif report.skipped: + self._add_stats("skipped", [report]) + items = [x for x in report.result if isinstance(x, Item)] + self._numcollected += len(items) + if self.isatty: + self.report_collect() + + def report_collect(self, final: bool = False) -> None: + if self.config.option.verbose < 0: + return + + if not final: + # Only write "collecting" report every 0.5s. + t = timing.time() + if ( + self._collect_report_last_write is not None + and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION + ): + return + self._collect_report_last_write = t + + errors = len(self.stats.get("error", [])) + skipped = len(self.stats.get("skipped", [])) + deselected = len(self.stats.get("deselected", [])) + selected = self._numcollected - deselected + line = "collected " if final else "collecting " + line += ( + str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") + ) + if errors: + line += " / %d error%s" % (errors, "s" if errors != 1 else "") + if deselected: + line += " / %d deselected" % deselected + if skipped: + line += " / %d skipped" % skipped + if self._numcollected > selected: + line += " / %d selected" % selected + if self.isatty: + self.rewrite(line, bold=True, erase=True) + if final: + self.write("\n") + else: + self.write_line(line) + + @hookimpl(trylast=True) + def pytest_sessionstart(self, session: Session) -> None: + self._session = session + self._sessionstarttime = timing.time() + if not self.showheader: + return + self.write_sep("=", "test session starts", bold=True) + verinfo = platform.python_version() + if not self.no_header: + msg = f"platform {sys.platform} -- Python {verinfo}" + pypy_version_info = getattr(sys, "pypy_version_info", None) + if pypy_version_info: + verinfo = ".".join(map(str, pypy_version_info[:3])) + msg += f"[pypy-{verinfo}-{pypy_version_info[3]}]" + msg += f", pytest-{_pytest._version.version}, pluggy-{pluggy.__version__}" + if ( + self.verbosity > 0 + or self.config.option.debug + or getattr(self.config.option, "pastebin", None) + ): + msg += " -- " + str(sys.executable) + self.write_line(msg) + lines = self.config.hook.pytest_report_header( + config=self.config, start_path=self.startpath + ) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks( + self, lines: Sequence[str | Sequence[str]] + ) -> None: + for line_or_lines in reversed(lines): + if isinstance(line_or_lines, str): + self.write_line(line_or_lines) + else: + for line in line_or_lines: + self.write_line(line) + + def pytest_report_header(self, config: Config) -> list[str]: + result = [f"rootdir: {config.rootpath}"] + + if config.inipath: + result.append("configfile: " + bestrelpath(config.rootpath, config.inipath)) + + if config.args_source == Config.ArgsSource.TESTPATHS: + testpaths: list[str] = config.getini("testpaths") + result.append("testpaths: {}".format(", ".join(testpaths))) + + plugininfo = config.pluginmanager.list_plugin_distinfo() + if plugininfo: + result.append( + "plugins: {}".format(", ".join(_plugin_nameversions(plugininfo))) + ) + return result + + def pytest_collection_finish(self, session: Session) -> None: + self.report_collect(True) + + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, + start_path=self.startpath, + items=session.items, + ) + self._write_report_lines_from_hooks(lines) + + if self.config.getoption("collectonly"): + if session.items: + if self.config.option.verbose > -1: + self._tw.line("") + self._printcollecteditems(session.items) + + failed = self.stats.get("failed") + if failed: + self._tw.sep("!", "collection failures") + for rep in failed: + rep.toterminal(self._tw) + + def _printcollecteditems(self, items: Sequence[Item]) -> None: + test_cases_verbosity = self.config.get_verbosity(Config.VERBOSITY_TEST_CASES) + if test_cases_verbosity < 0: + if test_cases_verbosity < -1: + counts = Counter(item.nodeid.split("::", 1)[0] for item in items) + for name, count in sorted(counts.items()): + self._tw.line("%s: %d" % (name, count)) + else: + for item in items: + self._tw.line(item.nodeid) + return + stack: list[Node] = [] + indent = "" + for item in items: + needed_collectors = item.listchain()[1:] # strip root node + while stack: + if stack == needed_collectors[: len(stack)]: + break + stack.pop() + for col in needed_collectors[len(stack) :]: + stack.append(col) + indent = (len(stack) - 1) * " " + self._tw.line(f"{indent}{col}") + if test_cases_verbosity >= 1: + obj = getattr(col, "obj", None) + doc = inspect.getdoc(obj) if obj else None + if doc: + for line in doc.splitlines(): + self._tw.line("{}{}".format(indent + " ", line)) + + @hookimpl(wrapper=True) + def pytest_sessionfinish( + self, session: Session, exitstatus: int | ExitCode + ) -> Generator[None]: + result = yield + self._tw.line("") + summary_exit_codes = ( + ExitCode.OK, + ExitCode.TESTS_FAILED, + ExitCode.INTERRUPTED, + ExitCode.USAGE_ERROR, + ExitCode.NO_TESTS_COLLECTED, + ) + if exitstatus in summary_exit_codes and not self.no_summary: + self.config.hook.pytest_terminal_summary( + terminalreporter=self, exitstatus=exitstatus, config=self.config + ) + if session.shouldfail: + self.write_sep("!", str(session.shouldfail), red=True) + if exitstatus == ExitCode.INTERRUPTED: + self._report_keyboardinterrupt() + self._keyboardinterrupt_memo = None + elif session.shouldstop: + self.write_sep("!", str(session.shouldstop), red=True) + self.summary_stats() + return result + + @hookimpl(wrapper=True) + def pytest_terminal_summary(self) -> Generator[None]: + self.summary_errors() + self.summary_failures() + self.summary_xfailures() + self.summary_warnings() + self.summary_passes() + self.summary_xpasses() + try: + return (yield) + finally: + self.short_test_summary() + # Display any extra warnings from teardown here (if any). + self.summary_warnings() + + def pytest_keyboard_interrupt(self, excinfo: ExceptionInfo[BaseException]) -> None: + self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True) + + def pytest_unconfigure(self) -> None: + if self._keyboardinterrupt_memo is not None: + self._report_keyboardinterrupt() + + def _report_keyboardinterrupt(self) -> None: + excrepr = self._keyboardinterrupt_memo + assert excrepr is not None + assert excrepr.reprcrash is not None + msg = excrepr.reprcrash.message + self.write_sep("!", msg) + if "KeyboardInterrupt" in msg: + if self.config.option.fulltrace: + excrepr.toterminal(self._tw) + else: + excrepr.reprcrash.toterminal(self._tw) + self._tw.line( + "(to show a full traceback on KeyboardInterrupt use --full-trace)", + yellow=True, + ) + + def _locationline( + self, nodeid: str, fspath: str, lineno: int | None, domain: str + ) -> str: + def mkrel(nodeid: str) -> str: + line = self.config.cwd_relative_nodeid(nodeid) + if domain and line.endswith(domain): + line = line[: -len(domain)] + values = domain.split("[") + values[0] = values[0].replace(".", "::") # don't replace '.' in params + line += "[".join(values) + return line + + # fspath comes from testid which has a "/"-normalized path. + if fspath: + res = mkrel(nodeid) + if self.verbosity >= 2 and nodeid.split("::")[0] != fspath.replace( + "\\", nodes.SEP + ): + res += " <- " + bestrelpath(self.startpath, Path(fspath)) + else: + res = "[location]" + return res + " " + + def _getfailureheadline(self, rep): + head_line = rep.head_line + if head_line: + return head_line + return "test session" # XXX? + + def _getcrashline(self, rep): + try: + return str(rep.longrepr.reprcrash) + except AttributeError: + try: + return str(rep.longrepr)[:50] + except AttributeError: + return "" + + # + # Summaries for sessionfinish. + # + def getreports(self, name: str): + return [x for x in self.stats.get(name, ()) if not hasattr(x, "_pdbshown")] + + def summary_warnings(self) -> None: + if self.hasopt("w"): + all_warnings: list[WarningReport] | None = self.stats.get("warnings") + if not all_warnings: + return + + final = self._already_displayed_warnings is not None + if final: + warning_reports = all_warnings[self._already_displayed_warnings :] + else: + warning_reports = all_warnings + self._already_displayed_warnings = len(warning_reports) + if not warning_reports: + return + + reports_grouped_by_message: dict[str, list[WarningReport]] = {} + for wr in warning_reports: + reports_grouped_by_message.setdefault(wr.message, []).append(wr) + + def collapsed_location_report(reports: list[WarningReport]) -> str: + locations = [] + for w in reports: + location = w.get_location(self.config) + if location: + locations.append(location) + + if len(locations) < 10: + return "\n".join(map(str, locations)) + + counts_by_filename = Counter( + str(loc).split("::", 1)[0] for loc in locations + ) + return "\n".join( + "{}: {} warning{}".format(k, v, "s" if v > 1 else "") + for k, v in counts_by_filename.items() + ) + + title = "warnings summary (final)" if final else "warnings summary" + self.write_sep("=", title, yellow=True, bold=False) + for message, message_reports in reports_grouped_by_message.items(): + maybe_location = collapsed_location_report(message_reports) + if maybe_location: + self._tw.line(maybe_location) + lines = message.splitlines() + indented = "\n".join(" " + x for x in lines) + message = indented.rstrip() + else: + message = message.rstrip() + self._tw.line(message) + self._tw.line() + self._tw.line( + "-- Docs: https://docs.pytest.org/en/stable/how-to/capture-warnings.html" + ) + + def summary_passes(self) -> None: + self.summary_passes_combined("passed", "PASSES", "P") + + def summary_xpasses(self) -> None: + self.summary_passes_combined("xpassed", "XPASSES", "X") + + def summary_passes_combined( + self, which_reports: str, sep_title: str, needed_opt: str + ) -> None: + if self.config.option.tbstyle != "no": + if self.hasopt(needed_opt): + reports: list[TestReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + for rep in reports: + if rep.sections: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, green=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def _get_teardown_reports(self, nodeid: str) -> list[TestReport]: + reports = self.getreports("") + return [ + report + for report in reports + if report.when == "teardown" and report.nodeid == nodeid + ] + + def _handle_teardown_sections(self, nodeid: str) -> None: + for report in self._get_teardown_reports(nodeid): + self.print_teardown_sections(report) + + def print_teardown_sections(self, rep: TestReport) -> None: + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + if "teardown" in secname: + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_failures(self) -> None: + style = self.config.option.tbstyle + self.summary_failures_combined("failed", "FAILURES", style=style) + + def summary_xfailures(self) -> None: + show_tb = self.config.option.xfail_tb + style = self.config.option.tbstyle if show_tb else "no" + self.summary_failures_combined("xfailed", "XFAILURES", style=style) + + def summary_failures_combined( + self, + which_reports: str, + sep_title: str, + *, + style: str, + needed_opt: str | None = None, + ) -> None: + if style != "no": + if not needed_opt or self.hasopt(needed_opt): + reports: list[BaseReport] = self.getreports(which_reports) + if not reports: + return + self.write_sep("=", sep_title) + if style == "line": + for rep in reports: + line = self._getcrashline(rep) + self.write_line(line) + else: + for rep in reports: + msg = self._getfailureheadline(rep) + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + self._handle_teardown_sections(rep.nodeid) + + def summary_errors(self) -> None: + if self.config.option.tbstyle != "no": + reports: list[BaseReport] = self.getreports("error") + if not reports: + return + self.write_sep("=", "ERRORS") + for rep in self.stats["error"]: + msg = self._getfailureheadline(rep) + if rep.when == "collect": + msg = "ERROR collecting " + msg + else: + msg = f"ERROR at {rep.when} of {msg}" + self.write_sep("_", msg, red=True, bold=True) + self._outrep_summary(rep) + + def _outrep_summary(self, rep: BaseReport) -> None: + rep.toterminal(self._tw) + showcapture = self.config.option.showcapture + if showcapture == "no": + return + for secname, content in rep.sections: + if showcapture != "all" and showcapture not in secname: + continue + self._tw.sep("-", secname) + if content[-1:] == "\n": + content = content[:-1] + self._tw.line(content) + + def summary_stats(self) -> None: + if self.verbosity < -1: + return + + session_duration = timing.time() - self._sessionstarttime + (parts, main_color) = self.build_summary_stats_line() + line_parts = [] + + display_sep = self.verbosity >= 0 + if display_sep: + fullwidth = self._tw.fullwidth + for text, markup in parts: + with_markup = self._tw.markup(text, **markup) + if display_sep: + fullwidth += len(with_markup) - len(text) + line_parts.append(with_markup) + msg = ", ".join(line_parts) + + main_markup = {main_color: True} + duration = f" in {format_session_duration(session_duration)}" + duration_with_markup = self._tw.markup(duration, **main_markup) + if display_sep: + fullwidth += len(duration_with_markup) - len(duration) + msg += duration_with_markup + + if display_sep: + markup_for_end_sep = self._tw.markup("", **main_markup) + if markup_for_end_sep.endswith("\x1b[0m"): + markup_for_end_sep = markup_for_end_sep[:-4] + fullwidth += len(markup_for_end_sep) + msg += markup_for_end_sep + + if display_sep: + self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) + else: + self.write_line(msg, **main_markup) + + def short_test_summary(self) -> None: + if not self.reportchars: + return + + def show_simple(lines: list[str], *, stat: str) -> None: + failed = self.stats.get(stat, []) + if not failed: + return + config = self.config + for rep in failed: + color = _color_for_type.get(stat, _color_for_type_default) + line = _get_line_with_reprcrash_message( + config, rep, self._tw, {color: True} + ) + lines.append(line) + + def show_xfailed(lines: list[str]) -> None: + xfailed = self.stats.get("xfailed", []) + for rep in xfailed: + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + + lines.append(line) + + def show_xpassed(lines: list[str]) -> None: + xpassed = self.stats.get("xpassed", []) + for rep in xpassed: + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.wasxfail + if reason: + line += " - " + str(reason) + lines.append(line) + + def show_skipped_folded(lines: list[str]) -> None: + skipped: list[CollectReport] = self.stats.get("skipped", []) + fskips = _folded_skips(self.startpath, skipped) if skipped else [] + if not fskips: + return + verbose_word, verbose_markup = skipped[0]._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + prefix = "Skipped: " + for num, fspath, lineno, reason in fskips: + if reason.startswith(prefix): + reason = reason[len(prefix) :] + if lineno is not None: + lines.append( + "%s [%d] %s:%d: %s" % (markup_word, num, fspath, lineno, reason) + ) + else: + lines.append("%s [%d] %s: %s" % (markup_word, num, fspath, reason)) + + def show_skipped_unfolded(lines: list[str]) -> None: + skipped: list[CollectReport] = self.stats.get("skipped", []) + + for rep in skipped: + assert rep.longrepr is not None + assert isinstance(rep.longrepr, tuple), (rep, rep.longrepr) + assert len(rep.longrepr) == 3, (rep, rep.longrepr) + + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + self.config, {_color_for_type["warnings"]: True} + ) + markup_word = self._tw.markup(verbose_word, **verbose_markup) + nodeid = _get_node_id_with_markup(self._tw, self.config, rep) + line = f"{markup_word} {nodeid}" + reason = rep.longrepr[2] + if reason: + line += " - " + str(reason) + lines.append(line) + + def show_skipped(lines: list[str]) -> None: + if self.foldskipped: + show_skipped_folded(lines) + else: + show_skipped_unfolded(lines) + + REPORTCHAR_ACTIONS: Mapping[str, Callable[[list[str]], None]] = { + "x": show_xfailed, + "X": show_xpassed, + "f": partial(show_simple, stat="failed"), + "s": show_skipped, + "p": partial(show_simple, stat="passed"), + "E": partial(show_simple, stat="error"), + } + + lines: list[str] = [] + for char in self.reportchars: + action = REPORTCHAR_ACTIONS.get(char) + if action: # skipping e.g. "P" (passed with output) here. + action(lines) + + if lines: + self.write_sep("=", "short test summary info", cyan=True, bold=True) + for line in lines: + self.write_line(line) + + def _get_main_color(self) -> tuple[str, list[str]]: + if self._main_color is None or self._known_types is None or self._is_last_item: + self._set_main_color() + assert self._main_color + assert self._known_types + return self._main_color, self._known_types + + def _determine_main_color(self, unknown_type_seen: bool) -> str: + stats = self.stats + if "failed" in stats or "error" in stats: + main_color = "red" + elif "warnings" in stats or "xpassed" in stats or unknown_type_seen: + main_color = "yellow" + elif "passed" in stats or not self._is_last_item: + main_color = "green" + else: + main_color = "yellow" + return main_color + + def _set_main_color(self) -> None: + unknown_types: list[str] = [] + for found_type in self.stats: + if found_type: # setup/teardown reports have an empty key, ignore them + if found_type not in KNOWN_TYPES and found_type not in unknown_types: + unknown_types.append(found_type) + self._known_types = list(KNOWN_TYPES) + unknown_types + self._main_color = self._determine_main_color(bool(unknown_types)) + + def build_summary_stats_line(self) -> tuple[list[tuple[str, dict[str, bool]]], str]: + """ + Build the parts used in the last summary stats line. + + The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". + + This function builds a list of the "parts" that make up for the text in that line, in + the example above it would be: + + [ + ("12 passed", {"green": True}), + ("2 errors", {"red": True} + ] + + That last dict for each line is a "markup dictionary", used by TerminalWriter to + color output. + + The final color of the line is also determined by this function, and is the second + element of the returned tuple. + """ + if self.config.getoption("collectonly"): + return self._build_collect_only_summary_stats_line() + else: + return self._build_normal_summary_stats_line() + + def _get_reports_to_display(self, key: str) -> list[Any]: + """Get test/collection reports for the given status key, such as `passed` or `error`.""" + reports = self.stats.get(key, []) + return [x for x in reports if getattr(x, "count_towards_summary", True)] + + def _build_normal_summary_stats_line( + self, + ) -> tuple[list[tuple[str, dict[str, bool]]], str]: + main_color, known_types = self._get_main_color() + parts = [] + + for key in known_types: + reports = self._get_reports_to_display(key) + if reports: + count = len(reports) + color = _color_for_type.get(key, _color_for_type_default) + markup = {color: True, "bold": color == main_color} + parts.append(("%d %s" % pluralize(count, key), markup)) + + if not parts: + parts = [("no tests ran", {_color_for_type_default: True})] + + return parts, main_color + + def _build_collect_only_summary_stats_line( + self, + ) -> tuple[list[tuple[str, dict[str, bool]]], str]: + deselected = len(self._get_reports_to_display("deselected")) + errors = len(self._get_reports_to_display("error")) + + if self._numcollected == 0: + parts = [("no tests collected", {"yellow": True})] + main_color = "yellow" + + elif deselected == 0: + main_color = "green" + collected_output = "%d %s collected" % pluralize(self._numcollected, "test") + parts = [(collected_output, {main_color: True})] + else: + all_tests_were_deselected = self._numcollected == deselected + if all_tests_were_deselected: + main_color = "yellow" + collected_output = f"no tests collected ({deselected} deselected)" + else: + main_color = "green" + selected = self._numcollected - deselected + collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" + + parts = [(collected_output, {main_color: True})] + + if errors: + main_color = _color_for_type["error"] + parts += [("%d %s" % pluralize(errors, "error"), {main_color: True})] + + return parts, main_color + + +def _get_node_id_with_markup(tw: TerminalWriter, config: Config, rep: BaseReport): + nodeid = config.cwd_relative_nodeid(rep.nodeid) + path, *parts = nodeid.split("::") + if parts: + parts_markup = tw.markup("::".join(parts), bold=True) + return path + "::" + parts_markup + else: + return path + + +def _format_trimmed(format: str, msg: str, available_width: int) -> str | None: + """Format msg into format, ellipsizing it if doesn't fit in available_width. + + Returns None if even the ellipsis can't fit. + """ + # Only use the first line. + i = msg.find("\n") + if i != -1: + msg = msg[:i] + + ellipsis = "..." + format_width = wcswidth(format.format("")) + if format_width + len(ellipsis) > available_width: + return None + + if format_width + wcswidth(msg) > available_width: + available_width -= len(ellipsis) + msg = msg[:available_width] + while format_width + wcswidth(msg) > available_width: + msg = msg[:-1] + msg += ellipsis + + return format.format(msg) + + +def _get_line_with_reprcrash_message( + config: Config, rep: BaseReport, tw: TerminalWriter, word_markup: dict[str, bool] +) -> str: + """Get summary line for a report, trying to add reprcrash message.""" + verbose_word, verbose_markup = rep._get_verbose_word_with_markup( + config, word_markup + ) + word = tw.markup(verbose_word, **verbose_markup) + node = _get_node_id_with_markup(tw, config, rep) + + line = f"{word} {node}" + line_width = wcswidth(line) + + try: + # Type ignored intentionally -- possible AttributeError expected. + msg = rep.longrepr.reprcrash.message # type: ignore[union-attr] + except AttributeError: + pass + else: + if running_on_ci() or config.option.verbose >= 2: + msg = f" - {msg}" + else: + available_width = tw.fullwidth - line_width + msg = _format_trimmed(" - {}", msg, available_width) + if msg is not None: + line += msg + + return line + + +def _folded_skips( + startpath: Path, + skipped: Sequence[CollectReport], +) -> list[tuple[int, str, int | None, str]]: + d: dict[tuple[str, int | None, str], list[CollectReport]] = {} + for event in skipped: + assert event.longrepr is not None + assert isinstance(event.longrepr, tuple), (event, event.longrepr) + assert len(event.longrepr) == 3, (event, event.longrepr) + fspath, lineno, reason = event.longrepr + # For consistency, report all fspaths in relative form. + fspath = bestrelpath(startpath, Path(fspath)) + keywords = getattr(event, "keywords", {}) + # Folding reports with global pytestmark variable. + # This is a workaround, because for now we cannot identify the scope of a skip marker + # TODO: Revisit after marks scope would be fixed. + if ( + event.when == "setup" + and "skip" in keywords + and "pytestmark" not in keywords + ): + key: tuple[str, int | None, str] = (fspath, None, reason) + else: + key = (fspath, lineno, reason) + d.setdefault(key, []).append(event) + values: list[tuple[int, str, int | None, str]] = [] + for key, events in d.items(): + values.append((len(events), *key)) + return values + + +_color_for_type = { + "failed": "red", + "error": "red", + "warnings": "yellow", + "passed": "green", +} +_color_for_type_default = "yellow" + + +def pluralize(count: int, noun: str) -> tuple[int, str]: + # No need to pluralize words such as `failed` or `passed`. + if noun not in ["error", "warnings", "test"]: + return count, noun + + # The `warnings` key is plural. To avoid API breakage, we keep it that way but + # set it to singular here so we can determine plurality in the same way as we do + # for `error`. + noun = noun.replace("warnings", "warning") + + return count, noun + "s" if count != 1 else noun + + +def _plugin_nameversions(plugininfo) -> list[str]: + values: list[str] = [] + for plugin, dist in plugininfo: + # Gets us name and version! + name = f"{dist.project_name}-{dist.version}" + # Questionable convenience, but it keeps things short. + if name.startswith("pytest-"): + name = name[7:] + # We decided to print python package names they can have more than one plugin. + if name not in values: + values.append(name) + return values + + +def format_session_duration(seconds: float) -> str: + """Format the given seconds in a human readable manner to show in the final summary.""" + if seconds < 60: + return f"{seconds:.2f}s" + else: + dt = datetime.timedelta(seconds=int(seconds)) + return f"{seconds:.2f}s ({dt})" + + +def _get_raw_skip_reason(report: TestReport) -> str: + """Get the reason string of a skip/xfail/xpass test report. + + The string is just the part given by the user. + """ + if hasattr(report, "wasxfail"): + reason = report.wasxfail + if reason.startswith("reason: "): + reason = reason[len("reason: ") :] + return reason + else: + assert report.skipped + assert isinstance(report.longrepr, tuple) + _, _, reason = report.longrepr + if reason.startswith("Skipped: "): + reason = reason[len("Skipped: ") :] + elif reason == "Skipped": + reason = "" + return reason diff --git a/venv/Lib/site-packages/_pytest/threadexception.py b/venv/Lib/site-packages/_pytest/threadexception.py new file mode 100644 index 0000000..c1ed803 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/threadexception.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import threading +import traceback +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import TYPE_CHECKING +import warnings + +import pytest + + +if TYPE_CHECKING: + from typing_extensions import Self + + +# Copied from cpython/Lib/test/support/threading_helper.py, with modifications. +class catch_threading_exception: + """Context manager catching threading.Thread exception using + threading.excepthook. + + Storing exc_value using a custom hook can create a reference cycle. The + reference cycle is broken explicitly when the context manager exits. + + Storing thread using a custom hook can resurrect it if it is set to an + object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with threading_helper.catch_threading_exception() as cm: + # code spawning a thread which raises an exception + ... + # check the thread exception: use cm.args + ... + # cm.args attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.args: threading.ExceptHookArgs | None = None + self._old_hook: Callable[[threading.ExceptHookArgs], Any] | None = None + + def _hook(self, args: threading.ExceptHookArgs) -> None: + self.args = args + + def __enter__(self) -> Self: + self._old_hook = threading.excepthook + threading.excepthook = self._hook + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + assert self._old_hook is not None + threading.excepthook = self._old_hook + self._old_hook = None + del self.args + + +def thread_exception_runtest_hook() -> Generator[None]: + with catch_threading_exception() as cm: + try: + yield + finally: + if cm.args: + thread_name = ( + "" if cm.args.thread is None else cm.args.thread.name + ) + msg = f"Exception in thread {thread_name}\n\n" + msg += "".join( + traceback.format_exception( + cm.args.exc_type, + cm.args.exc_value, + cm.args.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnhandledThreadExceptionWarning(msg)) + + +@pytest.hookimpl(wrapper=True, trylast=True) +def pytest_runtest_setup() -> Generator[None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None]: + yield from thread_exception_runtest_hook() + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None]: + yield from thread_exception_runtest_hook() diff --git a/venv/Lib/site-packages/_pytest/timing.py b/venv/Lib/site-packages/_pytest/timing.py new file mode 100644 index 0000000..b23c7f6 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/timing.py @@ -0,0 +1,16 @@ +"""Indirection for time functions. + +We intentionally grab some "time" functions internally to avoid tests mocking "time" to affect +pytest runtime information (issue #185). + +Fixture "mock_timing" also interacts with this module for pytest's own tests. +""" + +from __future__ import annotations + +from time import perf_counter +from time import sleep +from time import time + + +__all__ = ["perf_counter", "sleep", "time"] diff --git a/venv/Lib/site-packages/_pytest/tmpdir.py b/venv/Lib/site-packages/_pytest/tmpdir.py new file mode 100644 index 0000000..de0cbcf --- /dev/null +++ b/venv/Lib/site-packages/_pytest/tmpdir.py @@ -0,0 +1,322 @@ +# mypy: allow-untyped-defs +"""Support for providing temporary directories to test functions.""" + +from __future__ import annotations + +import dataclasses +import os +from pathlib import Path +import re +from shutil import rmtree +import tempfile +from typing import Any +from typing import Dict +from typing import final +from typing import Generator +from typing import Literal + +from .pathlib import cleanup_dead_symlinks +from .pathlib import LOCK_TIMEOUT +from .pathlib import make_numbered_dir +from .pathlib import make_numbered_dir_with_cleanup +from .pathlib import rm_rf +from _pytest.compat import get_user_id +from _pytest.config import Config +from _pytest.config import ExitCode +from _pytest.config import hookimpl +from _pytest.config.argparsing import Parser +from _pytest.deprecated import check_ispytest +from _pytest.fixtures import fixture +from _pytest.fixtures import FixtureRequest +from _pytest.monkeypatch import MonkeyPatch +from _pytest.nodes import Item +from _pytest.reports import TestReport +from _pytest.stash import StashKey + + +tmppath_result_key = StashKey[Dict[str, bool]]() +RetentionType = Literal["all", "failed", "none"] + + +@final +@dataclasses.dataclass +class TempPathFactory: + """Factory for temporary directories under the common base temp directory. + + The base directory can be configured using the ``--basetemp`` option. + """ + + _given_basetemp: Path | None + # pluggy TagTracerSub, not currently exposed, so Any. + _trace: Any + _basetemp: Path | None + _retention_count: int + _retention_policy: RetentionType + + def __init__( + self, + given_basetemp: Path | None, + retention_count: int, + retention_policy: RetentionType, + trace, + basetemp: Path | None = None, + *, + _ispytest: bool = False, + ) -> None: + check_ispytest(_ispytest) + if given_basetemp is None: + self._given_basetemp = None + else: + # Use os.path.abspath() to get absolute path instead of resolve() as it + # does not work the same in all platforms (see #4427). + # Path.absolute() exists, but it is not public (see https://bugs.python.org/issue25012). + self._given_basetemp = Path(os.path.abspath(str(given_basetemp))) + self._trace = trace + self._retention_count = retention_count + self._retention_policy = retention_policy + self._basetemp = basetemp + + @classmethod + def from_config( + cls, + config: Config, + *, + _ispytest: bool = False, + ) -> TempPathFactory: + """Create a factory according to pytest configuration. + + :meta private: + """ + check_ispytest(_ispytest) + count = int(config.getini("tmp_path_retention_count")) + if count < 0: + raise ValueError( + f"tmp_path_retention_count must be >= 0. Current input: {count}." + ) + + policy = config.getini("tmp_path_retention_policy") + if policy not in ("all", "failed", "none"): + raise ValueError( + f"tmp_path_retention_policy must be either all, failed, none. Current input: {policy}." + ) + + return cls( + given_basetemp=config.option.basetemp, + trace=config.trace.get("tmpdir"), + retention_count=count, + retention_policy=policy, + _ispytest=True, + ) + + def _ensure_relative_to_basetemp(self, basename: str) -> str: + basename = os.path.normpath(basename) + if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp(): + raise ValueError(f"{basename} is not a normalized and relative path") + return basename + + def mktemp(self, basename: str, numbered: bool = True) -> Path: + """Create a new temporary directory managed by the factory. + + :param basename: + Directory base name, must be a relative path. + + :param numbered: + If ``True``, ensure the directory is unique by adding a numbered + suffix greater than any existing one: ``basename="foo-"`` and ``numbered=True`` + means that this function will create directories named ``"foo-0"``, + ``"foo-1"``, ``"foo-2"`` and so on. + + :returns: + The path to the new directory. + """ + basename = self._ensure_relative_to_basetemp(basename) + if not numbered: + p = self.getbasetemp().joinpath(basename) + p.mkdir(mode=0o700) + else: + p = make_numbered_dir(root=self.getbasetemp(), prefix=basename, mode=0o700) + self._trace("mktemp", p) + return p + + def getbasetemp(self) -> Path: + """Return the base temporary directory, creating it if needed. + + :returns: + The base temporary directory. + """ + if self._basetemp is not None: + return self._basetemp + + if self._given_basetemp is not None: + basetemp = self._given_basetemp + if basetemp.exists(): + rm_rf(basetemp) + basetemp.mkdir(mode=0o700) + basetemp = basetemp.resolve() + else: + from_env = os.environ.get("PYTEST_DEBUG_TEMPROOT") + temproot = Path(from_env or tempfile.gettempdir()).resolve() + user = get_user() or "unknown" + # use a sub-directory in the temproot to speed-up + # make_numbered_dir() call + rootdir = temproot.joinpath(f"pytest-of-{user}") + try: + rootdir.mkdir(mode=0o700, exist_ok=True) + except OSError: + # getuser() likely returned illegal characters for the platform, use unknown back off mechanism + rootdir = temproot.joinpath("pytest-of-unknown") + rootdir.mkdir(mode=0o700, exist_ok=True) + # Because we use exist_ok=True with a predictable name, make sure + # we are the owners, to prevent any funny business (on unix, where + # temproot is usually shared). + # Also, to keep things private, fixup any world-readable temp + # rootdir's permissions. Historically 0o755 was used, so we can't + # just error out on this, at least for a while. + uid = get_user_id() + if uid is not None: + rootdir_stat = rootdir.stat() + if rootdir_stat.st_uid != uid: + raise OSError( + f"The temporary directory {rootdir} is not owned by the current user. " + "Fix this and try again." + ) + if (rootdir_stat.st_mode & 0o077) != 0: + os.chmod(rootdir, rootdir_stat.st_mode & ~0o077) + keep = self._retention_count + if self._retention_policy == "none": + keep = 0 + basetemp = make_numbered_dir_with_cleanup( + prefix="pytest-", + root=rootdir, + keep=keep, + lock_timeout=LOCK_TIMEOUT, + mode=0o700, + ) + assert basetemp is not None, basetemp + self._basetemp = basetemp + self._trace("new basetemp", basetemp) + return basetemp + + +def get_user() -> str | None: + """Return the current user name, or None if getuser() does not work + in the current environment (see #1010).""" + try: + # In some exotic environments, getpass may not be importable. + import getpass + + return getpass.getuser() + except (ImportError, OSError, KeyError): + return None + + +def pytest_configure(config: Config) -> None: + """Create a TempPathFactory and attach it to the config object. + + This is to comply with existing plugins which expect the handler to be + available at pytest_configure time, but ideally should be moved entirely + to the tmp_path_factory session fixture. + """ + mp = MonkeyPatch() + config.add_cleanup(mp.undo) + _tmp_path_factory = TempPathFactory.from_config(config, _ispytest=True) + mp.setattr(config, "_tmp_path_factory", _tmp_path_factory, raising=False) + + +def pytest_addoption(parser: Parser) -> None: + parser.addini( + "tmp_path_retention_count", + help="How many sessions should we keep the `tmp_path` directories, according to `tmp_path_retention_policy`.", + default=3, + ) + + parser.addini( + "tmp_path_retention_policy", + help="Controls which directories created by the `tmp_path` fixture are kept around, based on test outcome. " + "(all/failed/none)", + default="all", + ) + + +@fixture(scope="session") +def tmp_path_factory(request: FixtureRequest) -> TempPathFactory: + """Return a :class:`pytest.TempPathFactory` instance for the test session.""" + # Set dynamically by pytest_configure() above. + return request.config._tmp_path_factory # type: ignore + + +def _mk_tmp(request: FixtureRequest, factory: TempPathFactory) -> Path: + name = request.node.name + name = re.sub(r"[\W]", "_", name) + MAXVAL = 30 + name = name[:MAXVAL] + return factory.mktemp(name, numbered=True) + + +@fixture +def tmp_path( + request: FixtureRequest, tmp_path_factory: TempPathFactory +) -> Generator[Path]: + """Return a temporary directory path object which is unique to each test + function invocation, created as a sub directory of the base temporary + directory. + + By default, a new base temporary directory is created each test session, + and old bases are removed after 3 sessions, to aid in debugging. + This behavior can be configured with :confval:`tmp_path_retention_count` and + :confval:`tmp_path_retention_policy`. + If ``--basetemp`` is used then it is cleared each session. See + :ref:`temporary directory location and retention`. + + The returned object is a :class:`pathlib.Path` object. + """ + path = _mk_tmp(request, tmp_path_factory) + yield path + + # Remove the tmpdir if the policy is "failed" and the test passed. + tmp_path_factory: TempPathFactory = request.session.config._tmp_path_factory # type: ignore + policy = tmp_path_factory._retention_policy + result_dict = request.node.stash[tmppath_result_key] + + if policy == "failed" and result_dict.get("call", True): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(path, ignore_errors=True) + + del request.node.stash[tmppath_result_key] + + +def pytest_sessionfinish(session, exitstatus: int | ExitCode): + """After each session, remove base directory if all the tests passed, + the policy is "failed", and the basetemp is not specified by a user. + """ + tmp_path_factory: TempPathFactory = session.config._tmp_path_factory + basetemp = tmp_path_factory._basetemp + if basetemp is None: + return + + policy = tmp_path_factory._retention_policy + if ( + exitstatus == 0 + and policy == "failed" + and tmp_path_factory._given_basetemp is None + ): + if basetemp.is_dir(): + # We do a "best effort" to remove files, but it might not be possible due to some leaked resource, + # permissions, etc, in which case we ignore it. + rmtree(basetemp, ignore_errors=True) + + # Remove dead symlinks. + if basetemp.is_dir(): + cleanup_dead_symlinks(basetemp) + + +@hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_makereport( + item: Item, call +) -> Generator[None, TestReport, TestReport]: + rep = yield + assert rep.when is not None + empty: dict[str, bool] = {} + item.stash.setdefault(tmppath_result_key, empty)[rep.when] = rep.passed + return rep diff --git a/venv/Lib/site-packages/_pytest/unittest.py b/venv/Lib/site-packages/_pytest/unittest.py new file mode 100644 index 0000000..8cecd4f --- /dev/null +++ b/venv/Lib/site-packages/_pytest/unittest.py @@ -0,0 +1,435 @@ +# mypy: allow-untyped-defs +"""Discover and run std-library "unittest" style tests.""" + +from __future__ import annotations + +import inspect +import sys +import traceback +import types +from typing import Any +from typing import Callable +from typing import Generator +from typing import Iterable +from typing import Tuple +from typing import Type +from typing import TYPE_CHECKING +from typing import Union + +import _pytest._code +from _pytest.compat import is_async_function +from _pytest.config import hookimpl +from _pytest.fixtures import FixtureRequest +from _pytest.nodes import Collector +from _pytest.nodes import Item +from _pytest.outcomes import exit +from _pytest.outcomes import fail +from _pytest.outcomes import skip +from _pytest.outcomes import xfail +from _pytest.python import Class +from _pytest.python import Function +from _pytest.python import Module +from _pytest.runner import CallInfo +import pytest + + +if sys.version_info[:2] < (3, 11): + from exceptiongroup import ExceptionGroup + +if TYPE_CHECKING: + import unittest + + import twisted.trial.unittest + + +_SysExcInfoType = Union[ + Tuple[Type[BaseException], BaseException, types.TracebackType], + Tuple[None, None, None], +] + + +def pytest_pycollect_makeitem( + collector: Module | Class, name: str, obj: object +) -> UnitTestCase | None: + try: + # Has unittest been imported? + ut = sys.modules["unittest"] + # Is obj a subclass of unittest.TestCase? + # Type ignored because `ut` is an opaque module. + if not issubclass(obj, ut.TestCase): # type: ignore + return None + except Exception: + return None + # Is obj a concrete class? + # Abstract classes can't be instantiated so no point collecting them. + if inspect.isabstract(obj): + return None + # Yes, so let's collect it. + return UnitTestCase.from_parent(collector, name=name, obj=obj) + + +class UnitTestCase(Class): + # Marker for fixturemanger.getfixtureinfo() + # to declare that our children do not support funcargs. + nofuncargs = True + + def newinstance(self): + # TestCase __init__ takes the method (test) name. The TestCase + # constructor treats the name "runTest" as a special no-op, so it can be + # used when a dummy instance is needed. While unittest.TestCase has a + # default, some subclasses omit the default (#9610), so always supply + # it. + return self.obj("runTest") + + def collect(self) -> Iterable[Item | Collector]: + from unittest import TestLoader + + cls = self.obj + if not getattr(cls, "__test__", True): + return + + skipped = _is_skipped(cls) + if not skipped: + self._register_unittest_setup_method_fixture(cls) + self._register_unittest_setup_class_fixture(cls) + self._register_setup_class_fixture() + + self.session._fixturemanager.parsefactories(self.newinstance(), self.nodeid) + + loader = TestLoader() + foundsomething = False + for name in loader.getTestCaseNames(self.obj): + x = getattr(self.obj, name) + if not getattr(x, "__test__", True): + continue + yield TestCaseFunction.from_parent(self, name=name) + foundsomething = True + + if not foundsomething: + runtest = getattr(self.obj, "runTest", None) + if runtest is not None: + ut = sys.modules.get("twisted.trial.unittest", None) + if ut is None or runtest != ut.TestCase.runTest: + yield TestCaseFunction.from_parent(self, name="runTest") + + def _register_unittest_setup_class_fixture(self, cls: type) -> None: + """Register an auto-use fixture to invoke setUpClass and + tearDownClass (#517).""" + setup = getattr(cls, "setUpClass", None) + teardown = getattr(cls, "tearDownClass", None) + if setup is None and teardown is None: + return None + cleanup = getattr(cls, "doClassCleanups", lambda: None) + + def process_teardown_exceptions() -> None: + # tearDown_exceptions is a list set in the class containing exc_infos for errors during + # teardown for the class. + exc_infos = getattr(cls, "tearDown_exceptions", None) + if not exc_infos: + return + exceptions = [exc for (_, exc, _) in exc_infos] + # If a single exception, raise it directly as this provides a more readable + # error (hopefully this will improve in #12255). + if len(exceptions) == 1: + raise exceptions[0] + else: + raise ExceptionGroup("Unittest class cleanup errors", exceptions) + + def unittest_setup_class_fixture( + request: FixtureRequest, + ) -> Generator[None]: + cls = request.cls + if _is_skipped(cls): + reason = cls.__unittest_skip_why__ + raise pytest.skip.Exception(reason, _use_item_location=True) + if setup is not None: + try: + setup() + # unittest does not call the cleanup function for every BaseException, so we + # follow this here. + except Exception: + cleanup() + process_teardown_exceptions() + raise + yield + try: + if teardown is not None: + teardown() + finally: + cleanup() + process_teardown_exceptions() + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_unittest_setUpClass_fixture_{cls.__qualname__}", + func=unittest_setup_class_fixture, + nodeid=self.nodeid, + scope="class", + autouse=True, + ) + + def _register_unittest_setup_method_fixture(self, cls: type) -> None: + """Register an auto-use fixture to invoke setup_method and + teardown_method (#517).""" + setup = getattr(cls, "setup_method", None) + teardown = getattr(cls, "teardown_method", None) + if setup is None and teardown is None: + return None + + def unittest_setup_method_fixture( + request: FixtureRequest, + ) -> Generator[None]: + self = request.instance + if _is_skipped(self): + reason = self.__unittest_skip_why__ + raise pytest.skip.Exception(reason, _use_item_location=True) + if setup is not None: + setup(self, request.function) + yield + if teardown is not None: + teardown(self, request.function) + + self.session._fixturemanager._register_fixture( + # Use a unique name to speed up lookup. + name=f"_unittest_setup_method_fixture_{cls.__qualname__}", + func=unittest_setup_method_fixture, + nodeid=self.nodeid, + scope="function", + autouse=True, + ) + + +class TestCaseFunction(Function): + nofuncargs = True + _excinfo: list[_pytest._code.ExceptionInfo[BaseException]] | None = None + + def _getinstance(self): + assert isinstance(self.parent, UnitTestCase) + return self.parent.obj(self.name) + + # Backward compat for pytest-django; can be removed after pytest-django + # updates + some slack. + @property + def _testcase(self): + return self.instance + + def setup(self) -> None: + # A bound method to be called during teardown() if set (see 'runtest()'). + self._explicit_tearDown: Callable[[], None] | None = None + super().setup() + + def teardown(self) -> None: + if self._explicit_tearDown is not None: + self._explicit_tearDown() + self._explicit_tearDown = None + self._obj = None + del self._instance + super().teardown() + + def startTest(self, testcase: unittest.TestCase) -> None: + pass + + def _addexcinfo(self, rawexcinfo: _SysExcInfoType) -> None: + # Unwrap potential exception info (see twisted trial support below). + rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) + try: + excinfo = _pytest._code.ExceptionInfo[BaseException].from_exc_info( + rawexcinfo # type: ignore[arg-type] + ) + # Invoke the attributes to trigger storing the traceback + # trial causes some issue there. + _ = excinfo.value + _ = excinfo.traceback + except TypeError: + try: + try: + values = traceback.format_exception(*rawexcinfo) + values.insert( + 0, + "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n", + ) + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): + raise + except BaseException: + fail( + "ERROR: Unknown Incompatible Exception " + f"representation:\n{rawexcinfo!r}", + pytrace=False, + ) + except KeyboardInterrupt: + raise + except fail.Exception: + excinfo = _pytest._code.ExceptionInfo.from_current() + self.__dict__.setdefault("_excinfo", []).append(excinfo) + + def addError( + self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType + ) -> None: + try: + if isinstance(rawexcinfo[1], exit.Exception): + exit(rawexcinfo[1].msg) + except TypeError: + pass + self._addexcinfo(rawexcinfo) + + def addFailure( + self, testcase: unittest.TestCase, rawexcinfo: _SysExcInfoType + ) -> None: + self._addexcinfo(rawexcinfo) + + def addSkip(self, testcase: unittest.TestCase, reason: str) -> None: + try: + raise pytest.skip.Exception(reason, _use_item_location=True) + except skip.Exception: + self._addexcinfo(sys.exc_info()) + + def addExpectedFailure( + self, + testcase: unittest.TestCase, + rawexcinfo: _SysExcInfoType, + reason: str = "", + ) -> None: + try: + xfail(str(reason)) + except xfail.Exception: + self._addexcinfo(sys.exc_info()) + + def addUnexpectedSuccess( + self, + testcase: unittest.TestCase, + reason: twisted.trial.unittest.Todo | None = None, + ) -> None: + msg = "Unexpected success" + if reason: + msg += f": {reason.reason}" + # Preserve unittest behaviour - fail the test. Explicitly not an XPASS. + try: + fail(msg, pytrace=False) + except fail.Exception: + self._addexcinfo(sys.exc_info()) + + def addSuccess(self, testcase: unittest.TestCase) -> None: + pass + + def stopTest(self, testcase: unittest.TestCase) -> None: + pass + + def addDuration(self, testcase: unittest.TestCase, elapsed: float) -> None: + pass + + def runtest(self) -> None: + from _pytest.debugging import maybe_wrap_pytest_function_for_tracing + + testcase = self.instance + assert testcase is not None + + maybe_wrap_pytest_function_for_tracing(self) + + # Let the unittest framework handle async functions. + if is_async_function(self.obj): + testcase(result=self) + else: + # When --pdb is given, we want to postpone calling tearDown() otherwise + # when entering the pdb prompt, tearDown() would have probably cleaned up + # instance variables, which makes it difficult to debug. + # Arguably we could always postpone tearDown(), but this changes the moment where the + # TestCase instance interacts with the results object, so better to only do it + # when absolutely needed. + # We need to consider if the test itself is skipped, or the whole class. + assert isinstance(self.parent, UnitTestCase) + skipped = _is_skipped(self.obj) or _is_skipped(self.parent.obj) + if self.config.getoption("usepdb") and not skipped: + self._explicit_tearDown = testcase.tearDown + setattr(testcase, "tearDown", lambda *args: None) + + # We need to update the actual bound method with self.obj, because + # wrap_pytest_function_for_tracing replaces self.obj by a wrapper. + setattr(testcase, self.name, self.obj) + try: + testcase(result=self) + finally: + delattr(testcase, self.name) + + def _traceback_filter( + self, excinfo: _pytest._code.ExceptionInfo[BaseException] + ) -> _pytest._code.Traceback: + traceback = super()._traceback_filter(excinfo) + ntraceback = traceback.filter( + lambda x: not x.frame.f_globals.get("__unittest"), + ) + if not ntraceback: + ntraceback = traceback + return ntraceback + + +@hookimpl(tryfirst=True) +def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> None: + if isinstance(item, TestCaseFunction): + if item._excinfo: + call.excinfo = item._excinfo.pop(0) + try: + del call.result + except AttributeError: + pass + + # Convert unittest.SkipTest to pytest.skip. + # This is actually only needed for nose, which reuses unittest.SkipTest for + # its own nose.SkipTest. For unittest TestCases, SkipTest is already + # handled internally, and doesn't reach here. + unittest = sys.modules.get("unittest") + if unittest and call.excinfo and isinstance(call.excinfo.value, unittest.SkipTest): + excinfo = call.excinfo + call2 = CallInfo[None].from_call( + lambda: pytest.skip(str(excinfo.value)), call.when + ) + call.excinfo = call2.excinfo + + +# Twisted trial support. +classImplements_has_run = False + + +@hookimpl(wrapper=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + if isinstance(item, TestCaseFunction) and "twisted.trial.unittest" in sys.modules: + ut: Any = sys.modules["twisted.python.failure"] + global classImplements_has_run + Failure__init__ = ut.Failure.__init__ + if not classImplements_has_run: + from twisted.trial.itrial import IReporter + from zope.interface import classImplements + + classImplements(TestCaseFunction, IReporter) + classImplements_has_run = True + + def excstore( + self, exc_value=None, exc_type=None, exc_tb=None, captureVars=None + ): + if exc_value is None: + self._rawexcinfo = sys.exc_info() + else: + if exc_type is None: + exc_type = type(exc_value) + self._rawexcinfo = (exc_type, exc_value, exc_tb) + try: + Failure__init__( + self, exc_value, exc_type, exc_tb, captureVars=captureVars + ) + except TypeError: + Failure__init__(self, exc_value, exc_type, exc_tb) + + ut.Failure.__init__ = excstore + try: + res = yield + finally: + ut.Failure.__init__ = Failure__init__ + else: + res = yield + return res + + +def _is_skipped(obj) -> bool: + """Return True if the given object has been marked with @unittest.skip.""" + return bool(getattr(obj, "__unittest_skip__", False)) diff --git a/venv/Lib/site-packages/_pytest/unraisableexception.py b/venv/Lib/site-packages/_pytest/unraisableexception.py new file mode 100644 index 0000000..77a2de2 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/unraisableexception.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import sys +import traceback +from types import TracebackType +from typing import Any +from typing import Callable +from typing import Generator +from typing import TYPE_CHECKING +import warnings + +import pytest + + +if TYPE_CHECKING: + from typing_extensions import Self + + +# Copied from cpython/Lib/test/support/__init__.py, with modifications. +class catch_unraisable_exception: + """Context manager catching unraisable exception using sys.unraisablehook. + + Storing the exception value (cm.unraisable.exc_value) creates a reference + cycle. The reference cycle is broken explicitly when the context manager + exits. + + Storing the object (cm.unraisable.object) can resurrect it if it is set to + an object which is being finalized. Exiting the context manager clears the + stored object. + + Usage: + with catch_unraisable_exception() as cm: + # code creating an "unraisable exception" + ... + # check the unraisable exception: use cm.unraisable + ... + # cm.unraisable attribute no longer exists at this point + # (to break a reference cycle) + """ + + def __init__(self) -> None: + self.unraisable: sys.UnraisableHookArgs | None = None + self._old_hook: Callable[[sys.UnraisableHookArgs], Any] | None = None + + def _hook(self, unraisable: sys.UnraisableHookArgs) -> None: + # Storing unraisable.object can resurrect an object which is being + # finalized. Storing unraisable.exc_value creates a reference cycle. + self.unraisable = unraisable + + def __enter__(self) -> Self: + self._old_hook = sys.unraisablehook + sys.unraisablehook = self._hook + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_val: BaseException | None, + exc_tb: TracebackType | None, + ) -> None: + assert self._old_hook is not None + sys.unraisablehook = self._old_hook + self._old_hook = None + del self.unraisable + + +def unraisable_exception_runtest_hook() -> Generator[None]: + with catch_unraisable_exception() as cm: + try: + yield + finally: + if cm.unraisable: + if cm.unraisable.err_msg is not None: + err_msg = cm.unraisable.err_msg + else: + err_msg = "Exception ignored in" + msg = f"{err_msg}: {cm.unraisable.object!r}\n\n" + msg += "".join( + traceback.format_exception( + cm.unraisable.exc_type, + cm.unraisable.exc_value, + cm.unraisable.exc_traceback, + ) + ) + warnings.warn(pytest.PytestUnraisableExceptionWarning(msg)) + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_setup() -> Generator[None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_call() -> Generator[None]: + yield from unraisable_exception_runtest_hook() + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_teardown() -> Generator[None]: + yield from unraisable_exception_runtest_hook() diff --git a/venv/Lib/site-packages/_pytest/warning_types.py b/venv/Lib/site-packages/_pytest/warning_types.py new file mode 100644 index 0000000..4ab14e4 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/warning_types.py @@ -0,0 +1,166 @@ +from __future__ import annotations + +import dataclasses +import inspect +from types import FunctionType +from typing import Any +from typing import final +from typing import Generic +from typing import TypeVar +import warnings + + +class PytestWarning(UserWarning): + """Base class for all warnings emitted by pytest.""" + + __module__ = "pytest" + + +@final +class PytestAssertRewriteWarning(PytestWarning): + """Warning emitted by the pytest assert rewrite module.""" + + __module__ = "pytest" + + +@final +class PytestCacheWarning(PytestWarning): + """Warning emitted by the cache plugin in various situations.""" + + __module__ = "pytest" + + +@final +class PytestConfigWarning(PytestWarning): + """Warning emitted for configuration issues.""" + + __module__ = "pytest" + + +@final +class PytestCollectionWarning(PytestWarning): + """Warning emitted when pytest is not able to collect a file or symbol in a module.""" + + __module__ = "pytest" + + +class PytestDeprecationWarning(PytestWarning, DeprecationWarning): + """Warning class for features that will be removed in a future version.""" + + __module__ = "pytest" + + +class PytestRemovedIn9Warning(PytestDeprecationWarning): + """Warning class for features that will be removed in pytest 9.""" + + __module__ = "pytest" + + +class PytestReturnNotNoneWarning(PytestWarning): + """Warning emitted when a test function is returning value other than None.""" + + __module__ = "pytest" + + +@final +class PytestExperimentalApiWarning(PytestWarning, FutureWarning): + """Warning category used to denote experiments in pytest. + + Use sparingly as the API might change or even be removed completely in a + future version. + """ + + __module__ = "pytest" + + @classmethod + def simple(cls, apiname: str) -> PytestExperimentalApiWarning: + return cls(f"{apiname} is an experimental api that may change over time") + + +@final +class PytestUnhandledCoroutineWarning(PytestReturnNotNoneWarning): + """Warning emitted for an unhandled coroutine. + + A coroutine was encountered when collecting test functions, but was not + handled by any async-aware plugin. + Coroutine test functions are not natively supported. + """ + + __module__ = "pytest" + + +@final +class PytestUnknownMarkWarning(PytestWarning): + """Warning emitted on use of unknown markers. + + See :ref:`mark` for details. + """ + + __module__ = "pytest" + + +@final +class PytestUnraisableExceptionWarning(PytestWarning): + """An unraisable exception was reported. + + Unraisable exceptions are exceptions raised in :meth:`__del__ ` + implementations and similar situations when the exception cannot be raised + as normal. + """ + + __module__ = "pytest" + + +@final +class PytestUnhandledThreadExceptionWarning(PytestWarning): + """An unhandled exception occurred in a :class:`~threading.Thread`. + + Such exceptions don't propagate normally. + """ + + __module__ = "pytest" + + +_W = TypeVar("_W", bound=PytestWarning) + + +@final +@dataclasses.dataclass +class UnformattedWarning(Generic[_W]): + """A warning meant to be formatted during runtime. + + This is used to hold warnings that need to format their message at runtime, + as opposed to a direct message. + """ + + category: type[_W] + template: str + + def format(self, **kwargs: Any) -> _W: + """Return an instance of the warning category, formatted with given kwargs.""" + return self.category(self.template.format(**kwargs)) + + +def warn_explicit_for(method: FunctionType, message: PytestWarning) -> None: + """ + Issue the warning :param:`message` for the definition of the given :param:`method` + + this helps to log warnings for functions defined prior to finding an issue with them + (like hook wrappers being marked in a legacy mechanism) + """ + lineno = method.__code__.co_firstlineno + filename = inspect.getfile(method) + module = method.__module__ + mod_globals = method.__globals__ + try: + warnings.warn_explicit( + message, + type(message), + filename=filename, + module=module, + registry=mod_globals.setdefault("__warningregistry__", {}), + lineno=lineno, + ) + except Warning as w: + # If warnings are errors (e.g. -Werror), location information gets lost, so we add it to the message. + raise type(w)(f"{w}\n at {filename}:{lineno}") from None diff --git a/venv/Lib/site-packages/_pytest/warnings.py b/venv/Lib/site-packages/_pytest/warnings.py new file mode 100644 index 0000000..eeb4772 --- /dev/null +++ b/venv/Lib/site-packages/_pytest/warnings.py @@ -0,0 +1,151 @@ +# mypy: allow-untyped-defs +from __future__ import annotations + +from contextlib import contextmanager +import sys +from typing import Generator +from typing import Literal +import warnings + +from _pytest.config import apply_warning_filters +from _pytest.config import Config +from _pytest.config import parse_warning_filter +from _pytest.main import Session +from _pytest.nodes import Item +from _pytest.terminal import TerminalReporter +import pytest + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line( + "markers", + "filterwarnings(warning): add a warning filter to the given test. " + "see https://docs.pytest.org/en/stable/how-to/capture-warnings.html#pytest-mark-filterwarnings ", + ) + + +@contextmanager +def catch_warnings_for_item( + config: Config, + ihook, + when: Literal["config", "collect", "runtest"], + item: Item | None, +) -> Generator[None]: + """Context manager that catches warnings generated in the contained execution block. + + ``item`` can be None if we are not in the context of an item execution. + + Each warning captured triggers the ``pytest_warning_recorded`` hook. + """ + config_filters = config.getini("filterwarnings") + cmdline_filters = config.known_args_namespace.pythonwarnings or [] + with warnings.catch_warnings(record=True) as log: + # mypy can't infer that record=True means log is not None; help it. + assert log is not None + + if not sys.warnoptions: + # If user is not explicitly configuring warning filters, show deprecation warnings by default (#2908). + warnings.filterwarnings("always", category=DeprecationWarning) + warnings.filterwarnings("always", category=PendingDeprecationWarning) + + # To be enabled in pytest 9.0.0. + # warnings.filterwarnings("error", category=pytest.PytestRemovedIn9Warning) + + apply_warning_filters(config_filters, cmdline_filters) + + # apply filters from "filterwarnings" marks + nodeid = "" if item is None else item.nodeid + if item is not None: + for mark in item.iter_markers(name="filterwarnings"): + for arg in mark.args: + warnings.filterwarnings(*parse_warning_filter(arg, escape=False)) + + try: + yield + finally: + for warning_message in log: + ihook.pytest_warning_recorded.call_historic( + kwargs=dict( + warning_message=warning_message, + nodeid=nodeid, + when=when, + location=None, + ) + ) + + +def warning_record_to_str(warning_message: warnings.WarningMessage) -> str: + """Convert a warnings.WarningMessage to a string.""" + warn_msg = warning_message.message + msg = warnings.formatwarning( + str(warn_msg), + warning_message.category, + warning_message.filename, + warning_message.lineno, + warning_message.line, + ) + if warning_message.source is not None: + try: + import tracemalloc + except ImportError: + pass + else: + tb = tracemalloc.get_object_traceback(warning_message.source) + if tb is not None: + formatted_tb = "\n".join(tb.format()) + # Use a leading new line to better separate the (large) output + # from the traceback to the previous warning text. + msg += f"\nObject allocated at:\n{formatted_tb}" + else: + # No need for a leading new line. + url = "https://docs.pytest.org/en/stable/how-to/capture-warnings.html#resource-warnings" + msg += "Enable tracemalloc to get traceback where the object was allocated.\n" + msg += f"See {url} for more info." + return msg + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_runtest_protocol(item: Item) -> Generator[None, object, object]: + with catch_warnings_for_item( + config=item.config, ihook=item.ihook, when="runtest", item=item + ): + return (yield) + + +@pytest.hookimpl(wrapper=True, tryfirst=True) +def pytest_collection(session: Session) -> Generator[None, object, object]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="collect", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_terminal_summary( + terminalreporter: TerminalReporter, +) -> Generator[None]: + config = terminalreporter.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_sessionfinish(session: Session) -> Generator[None]: + config = session.config + with catch_warnings_for_item( + config=config, ihook=config.hook, when="config", item=None + ): + return (yield) + + +@pytest.hookimpl(wrapper=True) +def pytest_load_initial_conftests( + early_config: Config, +) -> Generator[None]: + with catch_warnings_for_item( + config=early_config, ihook=early_config.hook, when="config", item=None + ): + return (yield) diff --git a/venv/Lib/site-packages/_yaml/__init__.py b/venv/Lib/site-packages/_yaml/__init__.py new file mode 100644 index 0000000..7baa8c4 --- /dev/null +++ b/venv/Lib/site-packages/_yaml/__init__.py @@ -0,0 +1,33 @@ +# This is a stub package designed to roughly emulate the _yaml +# extension module, which previously existed as a standalone module +# and has been moved into the `yaml` package namespace. +# It does not perfectly mimic its old counterpart, but should get +# close enough for anyone who's relying on it even when they shouldn't. +import yaml + +# in some circumstances, the yaml module we imoprted may be from a different version, so we need +# to tread carefully when poking at it here (it may not have the attributes we expect) +if not getattr(yaml, '__with_libyaml__', False): + from sys import version_info + + exc = ModuleNotFoundError if version_info >= (3, 6) else ImportError + raise exc("No module named '_yaml'") +else: + from yaml._yaml import * + import warnings + warnings.warn( + 'The _yaml extension module is now located at yaml._yaml' + ' and its location is subject to change. To use the' + ' LibYAML-based parser and emitter, import from `yaml`:' + ' `from yaml import CLoader as Loader, CDumper as Dumper`.', + DeprecationWarning + ) + del warnings + # Don't `del yaml` here because yaml is actually an existing + # namespace member of _yaml. + +__name__ = '_yaml' +# If the module is top-level (i.e. not a part of any specific package) +# then the attribute should be set to ''. +# https://docs.python.org/3.8/library/types.html +__package__ = '' diff --git a/venv/Lib/site-packages/_yaml/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/_yaml/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..332a8f9 Binary files /dev/null and b/venv/Lib/site-packages/_yaml/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__init__.py b/venv/Lib/site-packages/attr/__init__.py new file mode 100644 index 0000000..51b1c25 --- /dev/null +++ b/venv/Lib/site-packages/attr/__init__.py @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: MIT + +""" +Classes Without Boilerplate +""" + +from functools import partial +from typing import Callable + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._compat import Protocol +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Converter, + Factory, + attrib, + attrs, + fields, + fields_dict, + make_class, + validate, +) +from ._next_gen import define, field, frozen, mutable +from ._version_info import VersionInfo + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + + +class AttrsInstance(Protocol): + pass + + +__all__ = [ + "Attribute", + "AttrsInstance", + "Converter", + "Factory", + "NOTHING", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "field", + "fields", + "fields_dict", + "filters", + "frozen", + "get_run_validators", + "has", + "ib", + "make_class", + "mutable", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + + +def _make_getattr(mod_name: str) -> Callable: + """ + Create a metadata proxy for packaging information that uses *mod_name* in + its warnings and errors. + """ + + def __getattr__(name: str) -> str: + if name not in ("__version__", "__version_info__"): + msg = f"module {mod_name} has no attribute {name}" + raise AttributeError(msg) + + try: + from importlib.metadata import metadata + except ImportError: + from importlib_metadata import metadata + + meta = metadata("attrs") + + if name == "__version_info__": + return VersionInfo._from_version_string(meta["version"]) + + return meta["version"] + + return __getattr__ + + +__getattr__ = _make_getattr(__name__) diff --git a/venv/Lib/site-packages/attr/__init__.pyi b/venv/Lib/site-packages/attr/__init__.pyi new file mode 100644 index 0000000..6ae0a83 --- /dev/null +++ b/venv/Lib/site-packages/attr/__init__.pyi @@ -0,0 +1,388 @@ +import enum +import sys + +from typing import ( + Any, + Callable, + Generic, + Mapping, + Protocol, + Sequence, + TypeVar, + overload, +) + +# `import X as X` is required to make these public +from . import converters as converters +from . import exceptions as exceptions +from . import filters as filters +from . import setters as setters +from . import validators as validators +from ._cmp import cmp_using as cmp_using +from ._typing_compat import AttrsInstance_ +from ._version_info import VersionInfo +from attrs import ( + define as define, + field as field, + mutable as mutable, + frozen as frozen, + _EqOrderType, + _ValidatorType, + _ConverterType, + _ReprArgType, + _OnSetAttrType, + _OnSetAttrArgType, + _FieldTransformer, + _ValidatorArgType, +) + +if sys.version_info >= (3, 10): + from typing import TypeGuard +else: + from typing_extensions import TypeGuard + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +__version__: str +__version_info__: VersionInfo +__title__: str +__description__: str +__url__: str +__uri__: str +__author__: str +__email__: str +__license__: str +__copyright__: str + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_FilterType = Callable[["Attribute[_T]", _T], bool] + +# We subclass this here to keep the protocol's qualified name clean. +class AttrsInstance(AttrsInstance_, Protocol): + pass + +_A = TypeVar("_A", bound=type[AttrsInstance]) + +class _Nothing(enum.Enum): + NOTHING = enum.auto() + +NOTHING = _Nothing.NOTHING + +# NOTE: Factory lies about its return type to make this possible: +# `x: List[int] # = Factory(list)` +# Work around mypy issue #4554 in the common case by using an overload. +if sys.version_info >= (3, 8): + from typing import Literal + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Callable[[Any], _T], + takes_self: Literal[True], + ) -> _T: ... + @overload + def Factory( + factory: Callable[[], _T], + takes_self: Literal[False], + ) -> _T: ... + +else: + @overload + def Factory(factory: Callable[[], _T]) -> _T: ... + @overload + def Factory( + factory: Union[Callable[[Any], _T], Callable[[], _T]], + takes_self: bool = ..., + ) -> _T: ... + +In = TypeVar("In") +Out = TypeVar("Out") + +class Converter(Generic[In, Out]): + @overload + def __init__(self, converter: Callable[[In], Out]) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, AttrsInstance, Attribute], Out], + *, + takes_self: Literal[True], + takes_field: Literal[True], + ) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, Attribute], Out], + *, + takes_field: Literal[True], + ) -> None: ... + @overload + def __init__( + self, + converter: Callable[[In, AttrsInstance], Out], + *, + takes_self: Literal[True], + ) -> None: ... + +class Attribute(Generic[_T]): + name: str + default: _T | None + validator: _ValidatorType[_T] | None + repr: _ReprArgType + cmp: _EqOrderType + eq: _EqOrderType + order: _EqOrderType + hash: bool | None + init: bool + converter: _ConverterType | Converter[Any, _T] | None + metadata: dict[Any, Any] + type: type[_T] | None + kw_only: bool + on_setattr: _OnSetAttrType + alias: str | None + + def evolve(self, **changes: Any) -> "Attribute[Any]": ... + +# NOTE: We had several choices for the annotation to use for type arg: +# 1) Type[_T] +# - Pros: Handles simple cases correctly +# - Cons: Might produce less informative errors in the case of conflicting +# TypeVars e.g. `attr.ib(default='bad', type=int)` +# 2) Callable[..., _T] +# - Pros: Better error messages than #1 for conflicting TypeVars +# - Cons: Terrible error messages for validator checks. +# e.g. attr.ib(type=int, validator=validate_str) +# -> error: Cannot infer function type argument +# 3) type (and do all of the work in the mypy plugin) +# - Pros: Simple here, and we could customize the plugin with our own errors. +# - Cons: Would need to write mypy plugin code to handle all the cases. +# We chose option #1. + +# `attr` lies about its return type to make the following possible: +# attr() -> Any +# attr(8) -> int +# attr(validator=) -> Whatever the callable expects. +# This makes this type of assignments possible: +# x: int = attr(8) +# +# This form catches explicit None or no default but with no other arguments +# returns Any. +@overload +def attrib( + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def attrib( + default: None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: type[_T] | None = ..., + converter: _ConverterType | Converter[Any, _T] | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def attrib( + default: _T, + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: type[_T] | None = ..., + converter: _ConverterType | Converter[Any, _T] | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def attrib( + default: _T | None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + type: object = ..., + converter: _ConverterType | Converter[Any, _T] | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., +) -> Any: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: _C, + these: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., + unsafe_hash: bool | None = ..., +) -> _C: ... +@overload +@dataclass_transform(order_default=True, field_specifiers=(attrib, field)) +def attrs( + maybe_cls: None = ..., + these: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + auto_detect: bool = ..., + collect_by_mro: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., + unsafe_hash: bool | None = ..., +) -> Callable[[_C], _C]: ... +def fields(cls: type[AttrsInstance]) -> Any: ... +def fields_dict(cls: type[AttrsInstance]) -> dict[str, Attribute[Any]]: ... +def validate(inst: AttrsInstance) -> None: ... +def resolve_types( + cls: _A, + globalns: dict[str, Any] | None = ..., + localns: dict[str, Any] | None = ..., + attribs: list[Attribute[Any]] | None = ..., + include_extras: bool = ..., +) -> _A: ... + +# TODO: add support for returning a proper attrs class from the mypy plugin +# we use Any instead of _CountingAttr so that e.g. `make_class('Foo', +# [attr.ib()])` is valid +def make_class( + name: str, + attrs: list[str] | tuple[str, ...] | dict[str, Any], + bases: tuple[type, ...] = ..., + class_body: dict[str, Any] | None = ..., + repr_ns: str | None = ..., + repr: bool = ..., + cmp: _EqOrderType | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + collect_by_mro: bool = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., +) -> type: ... + +# _funcs -- + +# TODO: add support for returning TypedDict from the mypy plugin +# FIXME: asdict/astuple do not honor their factory args. Waiting on one of +# these: +# https://github.com/python/mypy/issues/4236 +# https://github.com/python/typing/issues/253 +# XXX: remember to fix attrs.asdict/astuple too! +def asdict( + inst: AttrsInstance, + recurse: bool = ..., + filter: _FilterType[Any] | None = ..., + dict_factory: type[Mapping[Any, Any]] = ..., + retain_collection_types: bool = ..., + value_serializer: Callable[[type, Attribute[Any], Any], Any] | None = ..., + tuple_keys: bool | None = ..., +) -> dict[str, Any]: ... + +# TODO: add support for returning NamedTuple from the mypy plugin +def astuple( + inst: AttrsInstance, + recurse: bool = ..., + filter: _FilterType[Any] | None = ..., + tuple_factory: type[Sequence[Any]] = ..., + retain_collection_types: bool = ..., +) -> tuple[Any, ...]: ... +def has(cls: type) -> TypeGuard[type[AttrsInstance]]: ... +def assoc(inst: _T, **changes: Any) -> _T: ... +def evolve(inst: _T, **changes: Any) -> _T: ... + +# _config -- + +def set_run_validators(run: bool) -> None: ... +def get_run_validators() -> bool: ... + +# aliases -- + +s = attributes = attrs +ib = attr = attrib +dataclass = attrs # Technically, partial(attrs, auto_attribs=True) ;) diff --git a/venv/Lib/site-packages/attr/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..53008c7 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/_cmp.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/_cmp.cpython-311.pyc new file mode 100644 index 0000000..97324ca Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/_cmp.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/_compat.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/_compat.cpython-311.pyc new file mode 100644 index 0000000..9fd72c2 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/_compat.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/_config.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/_config.cpython-311.pyc new file mode 100644 index 0000000..0c85879 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/_config.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/_funcs.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/_funcs.cpython-311.pyc new file mode 100644 index 0000000..87d87fa Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/_funcs.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/_make.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/_make.cpython-311.pyc new file mode 100644 index 0000000..7cec52b Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/_make.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/_next_gen.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/_next_gen.cpython-311.pyc new file mode 100644 index 0000000..13411b2 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/_next_gen.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/_version_info.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/_version_info.cpython-311.pyc new file mode 100644 index 0000000..1619539 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/_version_info.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/converters.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/converters.cpython-311.pyc new file mode 100644 index 0000000..7d76573 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/converters.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/exceptions.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/exceptions.cpython-311.pyc new file mode 100644 index 0000000..13750b8 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/exceptions.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/filters.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/filters.cpython-311.pyc new file mode 100644 index 0000000..255566a Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/filters.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/setters.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/setters.cpython-311.pyc new file mode 100644 index 0000000..df0c453 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/setters.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/__pycache__/validators.cpython-311.pyc b/venv/Lib/site-packages/attr/__pycache__/validators.cpython-311.pyc new file mode 100644 index 0000000..5ba3cb2 Binary files /dev/null and b/venv/Lib/site-packages/attr/__pycache__/validators.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attr/_cmp.py b/venv/Lib/site-packages/attr/_cmp.py new file mode 100644 index 0000000..f367bb3 --- /dev/null +++ b/venv/Lib/site-packages/attr/_cmp.py @@ -0,0 +1,160 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import _make_ne + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attrs.field`'s ``eq``, ``order``, + and ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if at least + one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + Args: + eq (typing.Callable | None): + Callable used to evaluate equality of two objects. + + lt (typing.Callable | None): + Callable used to evaluate whether one object is less than another + object. + + le (typing.Callable | None): + Callable used to evaluate whether one object is less than or equal + to another object. + + gt (typing.Callable | None): + Callable used to evaluate whether one object is greater than + another object. + + ge (typing.Callable | None): + Callable used to evaluate whether one object is greater than or + equal to another object. + + require_same_type (bool): + When `True`, equality and ordering methods will return + `NotImplemented` if objects are not of the same type. + + class_name (str | None): Name of class. Defaults to "Comparable". + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = _make_ne() + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + msg = "eq must be define is order to complete ordering from lt, le, gt, ge." + raise ValueError(msg) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = f"__{name}__" + method.__doc__ = ( + f"Return a {_operation_names[name]} b. Computed by attrs." + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + return all(func(self, other) for func in self._requirements) + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/venv/Lib/site-packages/attr/_cmp.pyi b/venv/Lib/site-packages/attr/_cmp.pyi new file mode 100644 index 0000000..cc7893b --- /dev/null +++ b/venv/Lib/site-packages/attr/_cmp.pyi @@ -0,0 +1,13 @@ +from typing import Any, Callable + +_CompareWithType = Callable[[Any, Any], bool] + +def cmp_using( + eq: _CompareWithType | None = ..., + lt: _CompareWithType | None = ..., + le: _CompareWithType | None = ..., + gt: _CompareWithType | None = ..., + ge: _CompareWithType | None = ..., + require_same_type: bool = ..., + class_name: str = ..., +) -> type: ... diff --git a/venv/Lib/site-packages/attr/_compat.py b/venv/Lib/site-packages/attr/_compat.py new file mode 100644 index 0000000..104eeb0 --- /dev/null +++ b/venv/Lib/site-packages/attr/_compat.py @@ -0,0 +1,103 @@ +# SPDX-License-Identifier: MIT + +import inspect +import platform +import sys +import threading + +from collections.abc import Mapping, Sequence # noqa: F401 +from typing import _GenericAlias + + +PYPY = platform.python_implementation() == "PyPy" +PY_3_8_PLUS = sys.version_info[:2] >= (3, 8) +PY_3_9_PLUS = sys.version_info[:2] >= (3, 9) +PY_3_10_PLUS = sys.version_info[:2] >= (3, 10) +PY_3_11_PLUS = sys.version_info[:2] >= (3, 11) +PY_3_12_PLUS = sys.version_info[:2] >= (3, 12) +PY_3_13_PLUS = sys.version_info[:2] >= (3, 13) +PY_3_14_PLUS = sys.version_info[:2] >= (3, 14) + + +if sys.version_info < (3, 8): + try: + from typing_extensions import Protocol + except ImportError: # pragma: no cover + Protocol = object +else: + from typing import Protocol # noqa: F401 + +if PY_3_14_PLUS: # pragma: no cover + import annotationlib + + _get_annotations = annotationlib.get_annotations + +else: + + def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + return cls.__dict__.get("__annotations__", {}) + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() + + +def get_generic_base(cl): + """If this is a generic class (A[str]), return the generic base for it.""" + if cl.__class__ is _GenericAlias: + return cl.__origin__ + return None diff --git a/venv/Lib/site-packages/attr/_config.py b/venv/Lib/site-packages/attr/_config.py new file mode 100644 index 0000000..9c245b1 --- /dev/null +++ b/venv/Lib/site-packages/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + msg = "'run' must be bool." + raise TypeError(msg) + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/venv/Lib/site-packages/attr/_funcs.py b/venv/Lib/site-packages/attr/_funcs.py new file mode 100644 index 0000000..355cef4 --- /dev/null +++ b/venv/Lib/site-packages/attr/_funcs.py @@ -0,0 +1,522 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._compat import PY_3_9_PLUS, get_generic_base +from ._make import _OBJ_SETATTR, NOTHING, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the *attrs* attribute values of *inst* as a dict. + + Optionally recurse into other *attrs*-decorated classes. + + Args: + inst: Instance of an *attrs*-decorated class. + + recurse (bool): Recurse into classes that are also *attrs*-decorated. + + filter (~typing.Callable): + A callable whose return code determines whether an attribute or + element is included (`True`) or dropped (`False`). Is called with + the `attrs.Attribute` as the first argument and the value as the + second argument. + + dict_factory (~typing.Callable): + A callable to produce dictionaries from. For example, to produce + ordered dictionaries instead of normal Python dictionaries, pass in + ``collections.OrderedDict``. + + retain_collection_types (bool): + Do not convert to `list` when encountering an attribute whose type + is `tuple` or `set`. Only meaningful if *recurse* is `True`. + + value_serializer (typing.Callable | None): + A hook that is called for every attribute or dict key/value. It + receives the current instance, field and value and must return the + (updated) value. The hook is run *after* the optional *filter* has + been applied. + + Returns: + Return type of *dict_factory*. + + Raises: + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 + If a dict has a collection for a key, it is serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain_collection_types is True else list + items = [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + try: + rv[a.name] = cf(items) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv[a.name] = cf(*items) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(val, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the *attrs* attribute values of *inst* as a tuple. + + Optionally recurse into other *attrs*-decorated classes. + + Args: + inst: Instance of an *attrs*-decorated class. + + recurse (bool): + Recurse into classes that are also *attrs*-decorated. + + filter (~typing.Callable): + A callable whose return code determines whether an attribute or + element is included (`True`) or dropped (`False`). Is called with + the `attrs.Attribute` as the first argument and the value as the + second argument. + + tuple_factory (~typing.Callable): + A callable to produce tuples from. For example, to produce lists + instead of tuples. + + retain_collection_types (bool): + Do not convert to `list` or `dict` when encountering an attribute + which type is `tuple`, `dict` or `set`. Only meaningful if + *recurse* is `True`. + + Returns: + Return type of *tuple_factory* + + Raises: + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + items = [ + ( + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + ) + for j in v + ] + try: + rv.append(cf(items)) + except TypeError: + if not issubclass(cf, tuple): + raise + # Workaround for TypeError: cf.__new__() missing 1 required + # positional argument (which appears, for a namedturle) + rv.append(cf(*items)) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk + ), + ( + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv + ), + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with *attrs* attributes. + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + Returns: + bool: + """ + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is not None: + return True + + # No attrs, maybe it's a specialized generic (A[str])? + generic_base = get_generic_base(cls) + if generic_base is not None: + generic_attrs = getattr(generic_base, "__attrs_attrs__", None) + if generic_attrs is not None: + # Stick it on here for speed next time. + cls.__attrs_attrs__ = generic_attrs + return generic_attrs is not None + return False + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + This is different from `evolve` that applies the changes to the arguments + that create the new instance. + + `evolve`'s behavior is preferable, but there are `edge cases`_ where it + doesn't work. Therefore `assoc` is deprecated, but will not be removed. + + .. _`edge cases`: https://github.com/python-attrs/attrs/issues/251 + + Args: + inst: Instance of a class with *attrs* attributes. + + changes: Keyword changes in the new copy. + + Returns: + A copy of inst with *changes* incorporated. + + Raises: + attrs.exceptions.AttrsAttributeNotFoundError: + If *attr_name* couldn't be found on *cls*. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. This function will not be + removed du to the slightly different approach compared to + `attrs.evolve`, though. + """ + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + msg = f"{k} is not an attrs attribute on {new.__class__}." + raise AttrsAttributeNotFoundError(msg) + _OBJ_SETATTR(new, k, v) + return new + + +def evolve(*args, **changes): + """ + Create a new instance, based on the first positional argument with + *changes* applied. + + Args: + + inst: + Instance of a class with *attrs* attributes. *inst* must be passed + as a positional argument. + + changes: + Keyword changes in the new copy. + + Returns: + A copy of inst with *changes* incorporated. + + Raises: + TypeError: + If *attr_name* couldn't be found in the class ``__init__``. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + .. versionadded:: 17.1.0 + .. deprecated:: 23.1.0 + It is now deprecated to pass the instance using the keyword argument + *inst*. It will raise a warning until at least April 2024, after which + it will become an error. Always pass the instance as a positional + argument. + .. versionchanged:: 24.1.0 + *inst* can't be passed as a keyword argument anymore. + """ + try: + (inst,) = args + except ValueError: + msg = ( + f"evolve() takes 1 positional argument, but {len(args)} were given" + ) + raise TypeError(msg) from None + + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = a.alias + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +def resolve_types( + cls, globalns=None, localns=None, attribs=None, include_extras=True +): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in :class:`Attribute`'s + *type* field. In other words, you don't need to resolve your types if you + only use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, for example, if the name only + exists inside a method, you may pass *globalns* or *localns* to specify + other dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + Args: + cls (type): Class to resolve. + + globalns (dict | None): Dictionary containing global variables. + + localns (dict | None): Dictionary containing local variables. + + attribs (list | None): + List of attribs for the given class. This is necessary when calling + from inside a ``field_transformer`` since *cls* is not an *attrs* + class yet. + + include_extras (bool): + Resolve more accurately, if possible. Pass ``include_extras`` to + ``typing.get_hints``, if supported by the typing module. On + supported Python versions (3.9+), this resolves the types more + accurately. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class and you didn't pass any attribs. + + NameError: If types cannot be resolved because of missing variables. + + Returns: + *cls* so you can use this function also as a class decorator. Please + note that you have to apply it **after** `attrs.define`. That means the + decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + .. versionadded:: 23.1.0 *include_extras* + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + kwargs = {"globalns": globalns, "localns": localns} + + if PY_3_9_PLUS: + kwargs["include_extras"] = include_extras + + hints = typing.get_type_hints(cls, **kwargs) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _OBJ_SETATTR(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/venv/Lib/site-packages/attr/_make.py b/venv/Lib/site-packages/attr/_make.py new file mode 100644 index 0000000..bf00c5f --- /dev/null +++ b/venv/Lib/site-packages/attr/_make.py @@ -0,0 +1,2960 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +import abc +import contextlib +import copy +import enum +import functools +import inspect +import itertools +import linecache +import sys +import types +import typing + +from operator import itemgetter + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + PY_3_8_PLUS, + PY_3_10_PLUS, + PY_3_11_PLUS, + _AnnotationExtractor, + _get_annotations, + get_generic_base, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_OBJ_SETATTR = object.__setattr__ +_INIT_FACTORY_PAT = "__attr_factory_%s" +_CLASSVAR_PREFIXES = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_HASH_CACHE_FIELD = "_attrs_cached_hash" + +_EMPTY_METADATA_SINGLETON = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_SENTINEL = object() + +_DEFAULT_ON_SETATTR = setters.pipe(setters.convert, setters.validate) + + +class _Nothing(enum.Enum): + """ + Sentinel to indicate the lack of a value when `None` is ambiguous. + + If extending attrs, you can use ``typing.Literal[NOTHING]`` to show + that a value may be ``NOTHING``. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + .. versionchanged:: 22.2.0 ``NOTHING`` is now an ``enum.Enum`` variant. + """ + + NOTHING = enum.auto() + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing.NOTHING +""" +Sentinel to indicate the lack of a value when `None` is ambiguous. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since `None` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): # noqa: B008 + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new field / attribute on a class. + + Identical to `attrs.field`, except it's not keyword-only. + + Consider using `attrs.field` in new code (``attr.ib`` will *never* go away, + though). + + .. warning:: + + Does **nothing** unless the class is also decorated with + `attr.s` (or similar)! + + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is `None` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 + *converter* as a replacement for the deprecated *convert* to achieve + consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 22.2.0 *alias* + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if factory is not None: + if default is not NOTHING: + msg = ( + "The `default` and `factory` arguments are mutually exclusive." + ) + raise ValueError(msg) + if not callable(factory): + msg = "The `factory` argument must be a callable." + raise ValueError(msg) + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + alias=alias, + ) + + +def _compile_and_eval(script, globs, locs=None, filename=""): + """ + Evaluate the script with the given global (globs) and local (locs) + variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _make_method(name, script, filename, globs, locals=None): + """ + Create the method with the script given and return the method object. + """ + locs = {} if locals is None else locals + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + + filename = f"{base_filename[:-1]}-{count}>" + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs[name] + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = f"{cls_name}Attributes" + attr_class_template = [ + f"class {attr_class_name}(tuple):", + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + f" {attr_name} = _attrs_property(_attrs_itemgetter({i}))" + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + _compile_and_eval("\n".join(attr_class_template), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_CLASSVAR_PREFIXES) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + """ + return attrib_name in cls.__dict__ + + +def _collect_base_attrs(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) # noqa: PLW2901 + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + If *collect_by_mro* is True, collect them in the correct MRO order, + otherwise use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = list(these.items()) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if not isinstance(a, _CountingAttr): + a = attrib() if a is NOTHING else attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + msg = f"No mandatory attributes allowed after an attribute with a default value or factory. Attribute in question: {a!r}" + raise ValueError(msg) + + if had_default is False and a.default is not NOTHING: + had_default = True + + if field_transformer is not None: + attrs = field_transformer(cls, attrs) + + # Resolve default field alias after executing field_transformer. + # This allows field_transformer to differentiate between explicit vs + # default aliases and supply their own defaults. + attrs = [ + a.evolve(alias=_default_init_alias_for(a.name)) if not a.alias else a + for a in attrs + ] + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) + + +def _make_cached_property_getattr(cached_properties, original_getattr, cls): + lines = [ + # Wrapped to get `__class__` into closure cell for super() + # (It will be replaced with the newly constructed class after construction). + "def wrapper(_cls):", + " __class__ = _cls", + " def __getattr__(self, item, cached_properties=cached_properties, original_getattr=original_getattr, _cached_setattr_get=_cached_setattr_get):", + " func = cached_properties.get(item)", + " if func is not None:", + " result = func(self)", + " _setter = _cached_setattr_get(self)", + " _setter(item, result)", + " return result", + ] + if original_getattr is not None: + lines.append( + " return original_getattr(self, item)", + ) + else: + lines.extend( + [ + " try:", + " return super().__getattribute__(item)", + " except AttributeError:", + " if not hasattr(super(), '__getattr__'):", + " raise", + " return super().__getattr__(item)", + " original_error = f\"'{self.__class__.__name__}' object has no attribute '{item}'\"", + " raise AttributeError(original_error)", + ] + ) + + lines.extend( + [ + " return __getattr__", + "__getattr__ = wrapper(_cls)", + ] + ) + + unique_filename = _generate_unique_filename(cls, "getattr") + + glob = { + "cached_properties": cached_properties, + "_cached_setattr_get": _OBJ_SETATTR.__get__, + "original_getattr": original_getattr, + } + + return _make_method( + "__getattr__", + "\n".join(lines), + unique_filename, + glob, + locals={ + "_cls": cls, + }, + ) + + +def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + "__traceback__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_pre_init", + "_pre_init_has_args", + "_has_post_init", + "_is_exc", + "_on_setattr", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + "_has_custom_setattr", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._pre_init_has_args = False + if self._has_pre_init: + # Check if the pre init method has more arguments than just `self` + # We want to pass arguments if pre init expects arguments + pre_init_func = cls.__attrs_pre_init__ + pre_init_signature = inspect.signature(pre_init_func) + self._pre_init_has_args = len(pre_init_signature.parameters) > 1 + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _DEFAULT_ON_SETATTR, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _DEFAULT_ON_SETATTR + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + def __repr__(self): + return f"<_ClassBuilder(cls={self._cls.__name__})>" + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + cls = self._create_slots_class() + else: + cls = self._patch_original_class() + if PY_3_10_PLUS: + cls = abc.update_abstractmethods(cls) + + # The method gets only called if it's not inherited from a base class. + # _has_own_attribute does NOT work properly for classmethods. + if ( + getattr(cls, "__attrs_init_subclass__", None) + and "__attrs_init_subclass__" not in cls.__dict__ + ): + cls.__attrs_init_subclass__() + + return cls + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _SENTINEL) is not _SENTINEL + ): + # An AttributeError can happen if a base class defines a + # class variable and we want to set an attribute with the + # same name by using only a type annotation. + with contextlib.suppress(AttributeError): + delattr(cls, name) + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _OBJ_SETATTR + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in (*tuple(self._attr_names), "__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _OBJ_SETATTR + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = {} + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + if PY_3_8_PLUS: + cached_properties = { + name: cached_property.func + for name, cached_property in cd.items() + if isinstance(cached_property, functools.cached_property) + } + else: + # `functools.cached_property` was introduced in 3.8. + # So can't be used before this. + cached_properties = {} + + # Collect methods with a `__class__` reference that are shadowed in the new class. + # To know to update them. + additional_closure_functions_to_update = [] + if cached_properties: + class_annotations = _get_annotations(self._cls) + for name, func in cached_properties.items(): + # Add cached properties to names for slotting. + names += (name,) + # Clear out function from class to avoid clashing. + del cd[name] + additional_closure_functions_to_update.append(func) + annotation = inspect.signature(func).return_annotation + if annotation is not inspect.Parameter.empty: + class_annotations[name] = annotation + + original_getattr = cd.get("__getattr__") + if original_getattr is not None: + additional_closure_functions_to_update.append(original_getattr) + + cd["__getattr__"] = _make_cached_property_getattr( + cached_properties, original_getattr, self._cls + ) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_HASH_CACHE_FIELD) + + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . + # If a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in itertools.chain( + cls.__dict__.values(), additional_closure_functions_to_update + ): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # noqa: PERF203 + # ValueError: Cell is empty + pass + else: + if match: + cell.cell_contents = cls + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns, self._cls) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + msg = "__str__ can only be generated if a __repr__ exists." + raise ValueError(msg) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return {name: getattr(self, name) for name in state_attr_names} + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _OBJ_SETATTR.__get__(self) + if isinstance(state, tuple): + # Backward compatibility with attrs instances pickled with + # attrs versions before v22.2.0 which stored tuples. + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + else: + for name in state_attr_names: + if name in state: + __bound_setattr(name, state[name]) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_HASH_CACHE_FIELD, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + ) + + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + self._cls_dict["__attrs_init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._pre_init_has_args, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + ) + + return self + + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"] = self._add_method_dunders( + _make_eq(self._cls, self._attrs) + ) + cd["__ne__"] = self._add_method_dunders(_make_ne()) + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + if self._frozen: + return self + + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + msg = "Can't combine custom __setattr__ with on_setattr hooks." + raise ValueError(msg) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _OBJ_SETATTR(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + with contextlib.suppress(AttributeError): + method.__module__ = self._cls.__module__ + + with contextlib.suppress(AttributeError): + method.__qualname__ = f"{self._cls.__qualname__}.{method.__name__}" + + with contextlib.suppress(AttributeError): + method.__doc__ = ( + "Method generated by attrs for class " + f"{self._cls.__qualname__}." + ) + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + msg = "Don't mix `cmp` with `eq' and `order`." + raise ValueError(msg) + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + msg = "`order` can only be True if `eq` is True too." + raise ValueError(msg) + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, + unsafe_hash=None, +): + r""" + A class decorator that adds :term:`dunder methods` according to the + specified attributes using `attr.ib` or the *these* argument. + + Consider using `attrs.define` / `attrs.frozen` in new code (``attr.s`` will + *never* go away, though). + + Args: + repr_ns (str): + When using nested classes, there was no way in Python 2 to + automatically detect that. This argument allows to set a custom + name for a more meaningful ``repr`` output. This argument is + pointless in Python 3 and is therefore deprecated. + + .. caution:: + Refer to `attrs.define` for the rest of the parameters, but note that they + can have different defaults. + + Notably, leaving *on_setattr* as `None` will **not** add any hooks. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports `None` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + .. deprecated:: 24.1.0 *repr_ns* + .. versionchanged:: 24.1.0 + Instances are not compared as tuples of attributes anymore, but using a + big ``and`` condition. This is faster and has more correct behavior for + uncomparable values like `math.nan`. + .. versionadded:: 24.1.0 + If a class has an *inherited* classmethod called + ``__attrs_init_subclass__``, it is executed after the class is created. + .. deprecated:: 24.1.0 *hash* is deprecated in favor of *unsafe_hash*. + """ + if repr_ns is not None: + import warnings + + warnings.warn( + DeprecationWarning( + "The `repr_ns` argument is deprecated and will be removed in or after August 2025." + ), + stacklevel=2, + ) + + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + + # unsafe_hash takes precedence due to PEP 681. + if unsafe_hash is not None: + hash = unsafe_hash + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + msg = "Can't freeze a class with a custom __setattr__." + raise ValueError(msg) + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + builder.add_setattr() + + nonlocal hash + if ( + hash is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + msg = "Invalid value for hash. Must be True, False, or None." + raise TypeError(msg) + + if hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, hashing must be either explicitly or implicitly enabled." + raise TypeError(msg) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + msg = "Invalid value for cache_hash. To use hash caching, init must be True." + raise TypeError(msg) + + if ( + PY_3_10_PLUS + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but `None` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + return ( + f"" + ) + + +def _make_hash(cls, attrs, frozen, cache_hash): + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + unique_filename = _generate_unique_filename(cls, "hash") + type_hash = hash(unique_filename) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ", _cache_wrapper=__import__('attr._make')._make._CacheHashWrapper):" + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + f" {type_hash},", + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + globs[cmp_name] = a.eq_key + method_lines.append( + indent + f" {cmp_name}(self.{a.name})," + ) + else: + method_lines.append(indent + f" self.{a.name},") + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + f"if self.{_HASH_CACHE_FIELD} is None:") + if frozen: + append_hash_computation_lines( + f"object.__setattr__(self, '{_HASH_CACHE_FIELD}', ", tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + f"self.{_HASH_CACHE_FIELD} = ", tab * 2 + ) + method_lines.append(tab + f"return self.{_HASH_CACHE_FIELD}") + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return _make_method("__hash__", script, unique_filename, globs) + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) + return cls + + +def _make_ne(): + """ + Create __ne__ method. + """ + + def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + return __ne__ + + +def _make_eq(cls, attrs): + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + unique_filename = _generate_unique_filename(cls, "eq") + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + globs = {} + if attrs: + lines.append(" return (") + for a in attrs: + if a.eq_key: + cmp_name = f"_{a.name}_key" + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append( + f" {cmp_name}(self.{a.name}) == {cmp_name}(other.{a.name})" + ) + else: + lines.append(f" self.{a.name} == other.{a.name}") + if a is not attrs[-1]: + lines[-1] = f"{lines[-1]} and" + lines.append(" )") + else: + lines.append(" return True") + + script = "\n".join(lines) + + return _make_method("__eq__", script, unique_filename, globs) + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__ = _make_eq(cls, attrs) + cls.__ne__ = _make_ne() + + return cls + + +def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r for name, r, _ in attr_names_with_reprs if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name if i else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + f" return f'{cls_name_fragment}({repr_fragment})'", + " finally:", + " already_repring.remove(id(self))", + ] + + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns, cls) + return cls + + +def fields(cls): + """ + Return the tuple of *attrs* attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + Returns: + tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + .. versionchanged:: 23.1.0 Add support for generic classes. + """ + generic_base = get_generic_base(cls) + + if generic_base is None and not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + + attrs = getattr(cls, "__attrs_attrs__", None) + + if attrs is None: + if generic_base is not None: + attrs = getattr(generic_base, "__attrs_attrs__", None) + if attrs is not None: + # Even though this is global state, stick it on here to speed + # it up. We rely on `cls` being cached for this to be + # efficient. + cls.__attrs_attrs__ = attrs + return attrs + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of *attrs* attributes for a class, whose keys + are the attribute names. + + Args: + cls (type): Class to introspect. + + Raises: + TypeError: If *cls* is not a class. + + attrs.exceptions.NotAnAttrsClassError: + If *cls* is not an *attrs* class. + + Returns: + dict[str, attrs.Attribute]: Dict of attribute name to definition + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + msg = "Passed object must be a class." + raise TypeError(msg) + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + msg = f"{cls!r} is not an attrs-decorated class." + raise NotAnAttrsClassError(msg) + return {a.name: a for a in attrs} + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + Args: + inst: Instance of a class with *attrs* attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + cls = base_attr_map.get(a_name) + return cls and "__slots__" in cls.__dict__ + + +def _make_init( + cls, + attrs, + pre_init, + pre_init_has_args, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +): + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + msg = "Frozen classes can't use on_setattr." + raise ValueError(msg) + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + unique_filename = _generate_unique_filename(cls, "init") + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + pre_init_has_args, + post_init, + cache_hash, + base_attr_map, + is_exc, + needs_cached_setattr, + has_cls_on_setattr, + "__attrs_init__" if attrs_init else "__init__", + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_cached_setattr_get"] = _OBJ_SETATTR.__get__ + + init = _make_method( + "__attrs_init__" if attrs_init else "__init__", + script, + unique_filename, + globs, + ) + init.__annotations__ = annotations + + return init + + +def _setattr(attr_name: str, value_var: str, has_on_setattr: bool) -> str: + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return f"_setattr('{attr_name}', {value_var})" + + +def _setattr_with_converter( + attr_name: str, value_var: str, has_on_setattr: bool, converter: Converter +) -> str: + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return f"_setattr('{attr_name}', {converter._fmt_converter_call(attr_name, value_var)})" + + +def _assign(attr_name: str, value: str, has_on_setattr: bool) -> str: + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return f"self.{attr_name} = {value}" + + +def _assign_with_converter( + attr_name: str, value_var: str, has_on_setattr: bool, converter: Converter +) -> str: + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True, converter) + + return f"self.{attr_name} = {converter._fmt_converter_call(attr_name, value_var)}" + + +def _determine_setters( + frozen: bool, slots: bool, base_attr_map: dict[str, type] +): + """ + Determine the correct setter functions based on whether a class is frozen + and/or slotted. + """ + if frozen is True: + if slots is True: + return (), _setattr, _setattr_with_converter + + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + + def fmt_setter( + attr_name: str, value_var: str, has_on_setattr: bool + ) -> str: + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return f"_inst_dict['{attr_name}'] = {value_var}" + + def fmt_setter_with_converter( + attr_name: str, + value_var: str, + has_on_setattr: bool, + converter: Converter, + ) -> str: + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr, converter + ) + + return f"_inst_dict['{attr_name}'] = {converter._fmt_converter_call(attr_name, value_var)}" + + return ( + ("_inst_dict = self.__dict__",), + fmt_setter, + fmt_setter_with_converter, + ) + + # Not frozen -- we can just assign directly. + return (), _assign, _assign_with_converter + + +def _attrs_to_init_script( + attrs: list[Attribute], + is_frozen: bool, + is_slotted: bool, + call_pre_init: bool, + pre_init_has_args: bool, + call_post_init: bool, + does_cache_hash: bool, + base_attr_map: dict[str, type], + is_exc: bool, + needs_cached_setattr: bool, + has_cls_on_setattr: bool, + method_name: str, +) -> tuple[str, dict, dict]: + """ + Return a script of an initializer for *attrs*, a dict of globals, and + annotations for the initializer. + + The globals are required by the generated script. + """ + lines = ["self.__attrs_pre_init__()"] if call_pre_init else [] + + if needs_cached_setattr: + lines.append( + # Circumvent the __setattr__ descriptor to save one lookup per + # assignment. Note _setattr will be used again below if + # does_cache_hash is True. + "_setattr = _cached_setattr_get(self)" + ) + + extra_lines, fmt_setter, fmt_setter_with_converter = _determine_setters( + is_frozen, is_slotted, base_attr_map + ) + lines.extend(extra_lines) + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + # a.alias is set to maybe-mangled attr_name in _ClassBuilder if not + # explicitly provided + arg_name = a.alias + + has_factory = isinstance(a.default, Factory) + maybe_self = "self" if has_factory and a.default.takes_self else "" + + if a.converter and not isinstance(a.converter, Converter): + converter = Converter(a.converter) + else: + converter = a.converter + + if a.init is False: + if has_factory: + init_factory_name = _INIT_FACTORY_PAT % (a.name,) + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + f"({maybe_self})", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + elif converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + fmt_setter( + attr_name, + f"attr_dict['{attr_name}'].default", + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = f"{arg_name}=attr_dict['{attr_name}'].default" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = f"{arg_name}=NOTHING" + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append(f"if {arg_name} is not NOTHING:") + + init_factory_name = _INIT_FACTORY_PAT % (a.name,) + if converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + converter, + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr, converter + ) + ) + names_for_globals[converter._get_global_name(a.name)] = ( + converter.converter + ) + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and converter is None: + annotations[arg_name] = a.type + elif converter is not None and converter._first_param_type: + # Use the type from the converter if present. + annotations[arg_name] = converter._first_param_type + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append(f" {val_name}(self, {attr_name}, self.{a.name})") + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if call_post_init: + lines.append("self.__attrs_post_init__()") + + # Because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to field + # values during post-init combined with post-init accessing the hash code + # would result in silent bugs. + if does_cache_hash: + if is_frozen: + if is_slotted: + init_hash_cache = f"_setattr('{_HASH_CACHE_FIELD}', None)" + else: + init_hash_cache = f"_inst_dict['{_HASH_CACHE_FIELD}'] = None" + else: + init_hash_cache = f"self.{_HASH_CACHE_FIELD} = None" + lines.append(init_hash_cache) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join(f"self.{a.name}" for a in attrs if a.init) + + lines.append(f"BaseException.__init__(self, {vals})") + + args = ", ".join(args) + pre_init_args = args + if kw_only_args: + # leading comma & kw_only args + args += f"{', ' if args else ''}*, {', '.join(kw_only_args)}" + pre_init_kw_only_args = ", ".join( + [ + f"{kw_arg_name}={kw_arg_name}" + # We need to remove the defaults from the kw_only_args. + for kw_arg_name in (kwa.split("=")[0] for kwa in kw_only_args) + ] + ) + pre_init_args += ", " if pre_init_args else "" + pre_init_args += pre_init_kw_only_args + + if call_pre_init and pre_init_has_args: + # If pre init method has arguments, pass same arguments as `__init__`. + lines[0] = f"self.__attrs_pre_init__({pre_init_args})" + + # Python 3.7 doesn't allow backslashes in f strings. + NL = "\n " + return ( + f"""def {method_name}(self, {args}): + {NL.join(lines) if lines else 'pass'} +""", + names_for_globals, + annotations, + ) + + +def _default_init_alias_for(name: str) -> str: + """ + The default __init__ parameter name for a field. + + This performs private-name adjustment via leading-unscore stripping, + and is the default value of Attribute.alias if not provided. + """ + + return name.lstrip("_") + + +class Attribute: + """ + *Read-only* representation of an attribute. + + .. warning:: + + You should never instantiate this class yourself. + + The class has *all* arguments of `attr.ib` (except for ``factory`` which is + only syntactic sugar for ``default=Factory(...)`` plus the following: + + - ``name`` (`str`): The name of the attribute. + - ``alias`` (`str`): The __init__ parameter name of the attribute, after + any explicit overrides and default private-attribute-name handling. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The + callables that are used for comparing and ordering objects by this + attribute, respectively. These are set by passing a callable to + `attr.ib`'s ``eq``, ``order``, or ``cmp`` arguments. See also + :ref:`comparison customization `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + - The ``alias`` property exposes the __init__ parameter name of the field, + with any overrides and default private-attribute handling applied. + + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + .. versionadded:: 22.2.0 *alias* + + For the full version history of the fields, see `attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + "alias", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + alias=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _OBJ_SETATTR.__get__(self) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _EMPTY_METADATA_SINGLETON + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + bound_setattr("alias", alias) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + msg = "Type annotation and type argument cannot both be present" + raise ValueError(msg) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "inherited", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + cmp=None, + inherited=False, + **inst_dict, + ) + + # Don't use attrs.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attrs.evolve` but that function does not work + with {class}`Attribute`. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _OBJ_SETATTR.__get__(self) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + ( + types.MappingProxyType(dict(value)) + if value + else _EMPTY_METADATA_SINGLETON + ), + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + alias=_default_init_alias_for(name), + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + "on_setattr", + "alias", + ) + __attrs_attrs__ = ( + *tuple( + Attribute( + name=name, + alias=_default_init_alias_for(name), + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + "alias", + ) + ), + Attribute( + name="metadata", + alias="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + alias, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + self.alias = alias + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + Raises: + DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + Args: + factory (typing.Callable): + A callable that takes either none or exactly one mandatory + positional argument depending on *takes_self*. + + takes_self (bool): + Pass the partially initialized instance that is being initialized + as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +class Converter: + """ + Stores a converter callable. + + Allows for the wrapped converter to take additional arguments. The + arguments are passed in the order they are documented. + + Args: + converter (Callable): A callable that converts the passed value. + + takes_self (bool): + Pass the partially initialized instance that is being initialized + as a positional argument. (default: `False`) + + takes_field (bool): + Pass the field definition (an :class:`Attribute`) into the + converter as a positional argument. (default: `False`) + + .. versionadded:: 24.1.0 + """ + + __slots__ = ( + "converter", + "takes_self", + "takes_field", + "_first_param_type", + "_global_name", + "__call__", + ) + + def __init__(self, converter, *, takes_self=False, takes_field=False): + self.converter = converter + self.takes_self = takes_self + self.takes_field = takes_field + + ex = _AnnotationExtractor(converter) + self._first_param_type = ex.get_first_param_type() + + if not (self.takes_self or self.takes_field): + self.__call__ = lambda value, _, __: self.converter(value) + elif self.takes_self and not self.takes_field: + self.__call__ = lambda value, instance, __: self.converter( + value, instance + ) + elif not self.takes_self and self.takes_field: + self.__call__ = lambda value, __, field: self.converter( + value, field + ) + else: + self.__call__ = lambda value, instance, field: self.converter( + value, instance, field + ) + + rt = ex.get_return_type() + if rt is not None: + self.__call__.__annotations__["return"] = rt + + @staticmethod + def _get_global_name(attr_name: str) -> str: + """ + Return the name that a converter for an attribute name *attr_name* + would have. + """ + return f"__attr_converter_{attr_name}" + + def _fmt_converter_call(self, attr_name: str, value_var: str) -> str: + """ + Return a string that calls the converter for an attribute name + *attr_name* and the value in variable named *value_var* according to + `self.takes_self` and `self.takes_field`. + """ + if not (self.takes_self or self.takes_field): + return f"{self._get_global_name(attr_name)}({value_var})" + + if self.takes_self and self.takes_field: + return f"{self._get_global_name(attr_name)}({value_var}, self, attr_dict['{attr_name}'])" + + if self.takes_self: + return f"{self._get_global_name(attr_name)}({value_var}, self)" + + return f"{self._get_global_name(attr_name)}({value_var}, attr_dict['{attr_name}'])" + + def __getstate__(self): + """ + Return a dict containing only converter and takes_self -- the rest gets + computed when loading. + """ + return { + "converter": self.converter, + "takes_self": self.takes_self, + "takes_field": self.takes_field, + } + + def __setstate__(self, state): + """ + Load instance from state. + """ + self.__init__(**state) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in ("converter", "takes_self", "takes_field") +] + +Converter = _add_hash( + _add_eq(_add_repr(Converter, attrs=_f), attrs=_f), attrs=_f +) + + +def make_class( + name, attrs, bases=(object,), class_body=None, **attributes_arguments +): + r""" + A quick way to create a new class called *name* with *attrs*. + + Args: + name (str): The name for the new class. + + attrs( list | dict): + A list of names or a dictionary of mappings of names to `attr.ib`\ + s / `attrs.field`\ s. + + The order is deduced from the order of the names or attributes + inside *attrs*. Otherwise the order of the definition of the + attributes is used. + + bases (tuple[type, ...]): Classes that the new class will subclass. + + class_body (dict): + An optional dictionary of class attributes for the new class. + + attributes_arguments: Passed unmodified to `attr.s`. + + Returns: + type: A new class with *attrs*. + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + .. versionchanged:: 23.2.0 *class_body* + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + msg = "attrs argument must be a dict or a list." + raise TypeError(msg) + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if class_body is not None: + body.update(class_body) + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + with contextlib.suppress(AttributeError, ValueError): + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + cls = _attrs(these=cls_dict, **attributes_arguments)(type_) + # Only add type annotations now or "_attrs()" will complain: + cls.__annotations__ = { + k: v.type for k, v in cls_dict.items() if v.type is not None + } + return cls + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, unsafe_hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + Args: + validators (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if they + have any. + + converters (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + def pipe_converter(val, inst, field): + for c in converters: + val = c(val, inst, field) if isinstance(c, Converter) else c(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__.update({"val": A, "return": A}) + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + last = converters[-1] + if not PY_3_11_PLUS and isinstance(last, Converter): + last = last.__call__ + + # Get return type from last converter. + rt = _AnnotationExtractor(last).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + return Converter(pipe_converter, takes_self=True, takes_field=True) diff --git a/venv/Lib/site-packages/attr/_next_gen.py b/venv/Lib/site-packages/attr/_next_gen.py new file mode 100644 index 0000000..dbb65cc --- /dev/null +++ b/venv/Lib/site-packages/attr/_next_gen.py @@ -0,0 +1,631 @@ +# SPDX-License-Identifier: MIT + +""" +These are keyword-only APIs that call `attr.s` and `attr.ib` with different +default values. +""" + + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + _DEFAULT_ON_SETATTR, + NOTHING, + _frozen_setattrs, + attrib, + attrs, +) +from .exceptions import UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + unsafe_hash=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + A class decorator that adds :term:`dunder methods` according to + :term:`fields ` specified using :doc:`type annotations `, + `field()` calls, or the *these* argument. + + Since *attrs* patches or replaces an existing class, you cannot use + `object.__init_subclass__` with *attrs* classes, because it runs too early. + As a replacement, you can define ``__attrs_init_subclass__`` on your class. + It will be called by *attrs* classes that subclass it after they're + created. See also :ref:`init-subclass`. + + Args: + slots (bool): + Create a :term:`slotted class ` that's more + memory-efficient. Slotted classes are generally superior to the + default dict classes, but have some gotchas you should know about, + so we encourage you to read the :term:`glossary entry `. + + auto_detect (bool): + Instead of setting the *init*, *repr*, *eq*, and *hash* arguments + explicitly, assume they are set to True **unless any** of the + involved methods for one of the arguments is implemented in the + *current* class (meaning, it is *not* inherited from some base + class). + + So, for example by implementing ``__eq__`` on a class yourself, + *attrs* will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a + sensible ``__ne__`` by default, so it *should* be enough to only + implement ``__eq__`` in most cases). + + Passing True or False` to *init*, *repr*, *eq*, *cmp*, or *hash* + overrides whatever *auto_detect* would determine. + + auto_exc (bool): + If the class subclasses `BaseException` (which implicitly includes + any subclass of any exception), the following happens to behave + like a well-behaved Python exception class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids [#]_ , + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the + ``args`` attribute, + - the value of *str* is ignored leaving ``__str__`` to base + classes. + + .. [#] + Note that *attrs* will *not* remove existing implementations of + ``__hash__`` or the equality methods. It just won't add own + ones. + + on_setattr (~typing.Callable | list[~typing.Callable] | None | ~typing.Literal[attrs.setters.NO_OP]): + A callable that is run whenever the user attempts to set an + attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same + arguments as validators: the instance, the attribute that is being + modified, and the new value. + + If no exception is raised, the attribute is set to the return value + of the callable. + + If a list of callables is passed, they're automatically wrapped in + an `attrs.setters.pipe`. + + If left None, the default behavior is to run converters and + validators whenever an attribute is set. + + init (bool): + Create a ``__init__`` method that initializes the *attrs* + attributes. Leading underscores are stripped for the argument name, + unless an alias is set on the attribute. + + .. seealso:: + `init` shows advanced ways to customize the generated + ``__init__`` method, including executing code before and after. + + repr(bool): + Create a ``__repr__`` method with a human readable representation + of *attrs* attributes. + + str (bool): + Create a ``__str__`` method that is identical to ``__repr__``. This + is usually not necessary except for `Exception`\ s. + + eq (bool | None): + If True or None (default), add ``__eq__`` and ``__ne__`` methods + that check two instances for equality. + + .. seealso:: + `comparison` describes how to customize the comparison behavior + going as far comparing NumPy arrays. + + order (bool | None): + If True, add ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` + methods that behave like *eq* above and allow instances to be + ordered. + + They compare the instances as if they were tuples of their *attrs* + attributes if and only if the types of both classes are + *identical*. + + If `None` mirror value of *eq*. + + .. seealso:: `comparison` + + cmp (bool | None): + Setting *cmp* is equivalent to setting *eq* and *order* to the same + value. Must not be mixed with *eq* or *order*. + + unsafe_hash (bool | None): + If None (default), the ``__hash__`` method is generated according + how *eq* and *frozen* are set. + + 1. If *both* are True, *attrs* will generate a ``__hash__`` for + you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set + to None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning + the ``__hash__`` method of the base class will be used. If the + base class is `object`, this means it will fall back to id-based + hashing. + + Although not recommended, you can decide for yourself and force + *attrs* to create one (for example, if the class is immutable even + though you didn't freeze it programmatically) by passing True or + not. Both of these cases are rather special and should be used + carefully. + + .. seealso:: + + - Our documentation on `hashing`, + - Python's documentation on `object.__hash__`, + - and the `GitHub issue that led to the default \ behavior + `_ for more + details. + + hash (bool | None): + Deprecated alias for *unsafe_hash*. *unsafe_hash* takes precedence. + + cache_hash (bool): + Ensure that the object's hash code is computed only once and stored + on the object. If this is set to True, hashing must be either + explicitly or implicitly enabled for this class. If the hash code + is cached, avoid any reassignments of fields involved in hash code + computation or mutations of the objects those fields point to after + object creation. If such changes occur, the behavior of the + object's hash code is undefined. + + frozen (bool): + Make instances immutable after initialization. If someone attempts + to modify a frozen instance, `attrs.exceptions.FrozenInstanceError` + is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` + method on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other + words: ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You + can circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + kw_only (bool): + Make all attributes keyword-only in the generated ``__init__`` (if + *init* is False, this parameter is ignored). + + weakref_slot (bool): + Make instances weak-referenceable. This has no effect unless + *slots* is True. + + field_transformer (~typing.Callable | None): + A function that is called with the original class object and all + fields right before *attrs* finalizes the class. You can use this, + for example, to automatically add converters or validators to + fields based on their types. + + .. seealso:: `transform-fields` + + match_args (bool): + If True (default), set ``__match_args__`` on the class to support + :pep:`634` (*Structural Pattern Matching*). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and + later. Ignored on older Python versions. + + collect_by_mro (bool): + If True, *attrs* collects attributes from base classes correctly + according to the `method resolution order + `_. If False, *attrs* + will mimic the (wrong) behavior of `dataclasses` and :pep:`681`. + + See also `issue #428 + `_. + + getstate_setstate (bool | None): + .. note:: + + This is usually only interesting for slotted classes and you + should probably just set *auto_detect* to True. + + If True, ``__getstate__`` and ``__setstate__`` are generated and + attached to the class. This is necessary for slotted classes to be + pickleable. If left None, it's True by default for slotted classes + and False for dict classes. + + If *auto_detect* is True, and *getstate_setstate* is left None, and + **either** ``__getstate__`` or ``__setstate__`` is detected + directly on the class (meaning: not inherited), it is set to False + (this is usually what you want). + + auto_attribs (bool | None): + If True, look at type annotations to determine which attributes to + use, like `dataclasses`. If False, it will only look for explicit + :func:`field` class attributes, like classic *attrs*. + + If left None, it will guess: + + 1. If any attributes are annotated and no unannotated + `attrs.field`\ s are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.field`\ s. + + If *attrs* decides to look at type annotations, **all** fields + **must** be annotated. If *attrs* encounters a field that is set to + a :func:`field` / `attr.ib` but lacks a type annotation, an + `attrs.exceptions.UnannotatedAttributeError` is raised. Use + ``field_name: typing.Any = field(...)`` if you don't want to set a + type. + + .. warning:: + + For features that use the attribute name to create decorators + (for example, :ref:`validators `), you still *must* + assign :func:`field` / `attr.ib` to them. Otherwise Python will + either not find the name or try to use the default value to + call, for example, ``validator`` on it. + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `field()` are **ignored**. + + these (dict[str, object]): + A dictionary of name to the (private) return value of `field()` + mappings. This is useful to avoid the definition of your attributes + within the class body because you can't (for example, if you want + to add ``__repr__`` methods to Django models) or don't want to. + + If *these* is not `None`, *attrs* will *not* search the class body + for attributes and will *not* remove any attributes from it. + + The order is deduced from the order of the attributes inside + *these*. + + Arguably, this is a rather obscure feature. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + .. versionadded:: 22.2.0 + *unsafe_hash* as an alias for *hash* (for :pep:`681` compliance). + .. versionchanged:: 24.1.0 + Instances are not compared as tuples of attributes anymore, but using a + big ``and`` condition. This is faster and has more correct behavior for + uncomparable values like `math.nan`. + .. versionadded:: 24.1.0 + If a class has an *inherited* classmethod called + ``__attrs_init_subclass__``, it is executed after the class is created. + .. deprecated:: 24.1.0 *hash* is deprecated in favor of *unsafe_hash*. + + .. note:: + + The main differences to the classic `attr.s` are: + + - Automatically detect whether or not *auto_attribs* should be `True` + (c.f. *auto_attribs* parameter). + - Converters and validators run when attributes are set by default -- + if *frozen* is `False`. + - *slots=True* + + Usually, this has only upsides and few visible effects in everyday + programming. But it *can* lead to some surprising behaviors, so + please make sure to read :term:`slotted classes`. + + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - Some options that were only relevant on Python 2 or were kept around + for backwards-compatibility have been removed. + + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + unsafe_hash=unsafe_hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _DEFAULT_ON_SETATTR + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + msg = "Frozen classes can't use on_setattr (frozen-ness was inherited)." + raise ValueError(msg) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but `None` if used as `@attrs()`. + if maybe_cls is None: + return wrap + + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, + alias=None, +): + """ + Create a new :term:`field` / :term:`attribute` on a class. + + .. warning:: + + Does **nothing** unless the class is also decorated with + `attrs.define` (or similar)! + + Args: + default: + A value that is used if an *attrs*-generated ``__init__`` is used + and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will + be used to construct a new value (useful for mutable data types + like lists or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a + value *must* be supplied when instantiating; otherwise a + `TypeError` will be raised. + + .. seealso:: `defaults` + + factory (~typing.Callable): + Syntactic sugar for ``default=attr.Factory(factory)``. + + validator (~typing.Callable | list[~typing.Callable]): + Callable that is called by *attrs*-generated ``__init__`` methods + after the instance has been initialized. They receive the + initialized instance, the :func:`~attrs.Attribute`, and the passed + value. + + The return value is *not* inspected so the validator has to throw + an exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `attrs.validators.get_disabled` / `attrs.validators.set_disabled`. + + The validator can also be set using decorator notation as shown + below. + + .. seealso:: :ref:`validators` + + repr (bool | ~typing.Callable): + Include this attribute in the generated ``__repr__`` method. If + True, include the attribute; if False, omit it. By default, the + built-in ``repr()`` function is used. To override how the attribute + value is formatted, pass a ``callable`` that takes a single value + and returns a string. Note that the resulting string is used as-is, + which means it will be used directly *instead* of calling + ``repr()`` (the default). + + eq (bool | ~typing.Callable): + If True (default), include this attribute in the generated + ``__eq__`` and ``__ne__`` methods that check two instances for + equality. To override how the attribute value is compared, pass a + callable that takes a single value and returns the value to be + compared. + + .. seealso:: `comparison` + + order (bool | ~typing.Callable): + If True (default), include this attributes in the generated + ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. To + override how the attribute value is ordered, pass a callable that + takes a single value and returns the value to be ordered. + + .. seealso:: `comparison` + + cmp(bool | ~typing.Callable): + Setting *cmp* is equivalent to setting *eq* and *order* to the same + value. Must not be mixed with *eq* or *order*. + + .. seealso:: `comparison` + + hash (bool | None): + Include this attribute in the generated ``__hash__`` method. If + None (default), mirror *eq*'s value. This is the correct behavior + according the Python spec. Setting this value to anything else + than None is *discouraged*. + + .. seealso:: `hashing` + + init (bool): + Include this attribute in the generated ``__init__`` method. + + It is possible to set this to False and set a default value. In + that case this attributed is unconditionally initialized with the + specified default value or factory. + + .. seealso:: `init` + + converter (typing.Callable | Converter): + A callable that is called by *attrs*-generated ``__init__`` methods + to convert attribute's value to the desired format. + + If a vanilla callable is passed, it is given the passed-in value as + the only positional argument. It is possible to receive additional + arguments by wrapping the callable in a `Converter`. + + Either way, the returned value will be used as the new value of the + attribute. The value is converted before being passed to the + validator, if any. + + .. seealso:: :ref:`converters` + + metadata (dict | None): + An arbitrary mapping, to be used by third-party code. + + .. seealso:: `extending-metadata`. + + type (type): + The type of the attribute. Nowadays, the preferred method to + specify the type is using a variable annotation (see :pep:`526`). + This argument is provided for backwards-compatibility and for usage + with `make_class`. Regardless of the approach used, the type will + be stored on ``Attribute.type``. + + Please note that *attrs* doesn't do anything with this metadata by + itself. You can use it as part of your own code or for `static type + checking `. + + kw_only (bool): + Make this attribute keyword-only in the generated ``__init__`` (if + ``init`` is False, this parameter is ignored). + + on_setattr (~typing.Callable | list[~typing.Callable] | None | ~typing.Literal[attrs.setters.NO_OP]): + Allows to overwrite the *on_setattr* setting from `attr.s`. If left + None, the *on_setattr* value from `attr.s` is used. Set to + `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `define()`. + + alias (str | None): + Override this attribute's parameter name in the generated + ``__init__`` method. If left None, default to ``name`` stripped + of leading underscores. See `private-attributes`. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionadded:: 22.2.0 *alias* + .. versionadded:: 23.1.0 + The *type* parameter has been re-added; mostly for `attrs.make_class`. + Please note that type checkers ignore this metadata. + + .. seealso:: + + `attr.ib` + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + type=type, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + alias=alias, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) diff --git a/venv/Lib/site-packages/attr/_typing_compat.pyi b/venv/Lib/site-packages/attr/_typing_compat.pyi new file mode 100644 index 0000000..ca7b71e --- /dev/null +++ b/venv/Lib/site-packages/attr/_typing_compat.pyi @@ -0,0 +1,15 @@ +from typing import Any, ClassVar, Protocol + +# MYPY is a special constant in mypy which works the same way as `TYPE_CHECKING`. +MYPY = False + +if MYPY: + # A protocol to be able to statically accept an attrs class. + class AttrsInstance_(Protocol): + __attrs_attrs__: ClassVar[Any] + +else: + # For type checkers without plug-in support use an empty protocol that + # will (hopefully) be combined into a union. + class AttrsInstance_(Protocol): + pass diff --git a/venv/Lib/site-packages/attr/_version_info.py b/venv/Lib/site-packages/attr/_version_info.py new file mode 100644 index 0000000..51a1312 --- /dev/null +++ b/venv/Lib/site-packages/attr/_version_info.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/venv/Lib/site-packages/attr/_version_info.pyi b/venv/Lib/site-packages/attr/_version_info.pyi new file mode 100644 index 0000000..45ced08 --- /dev/null +++ b/venv/Lib/site-packages/attr/_version_info.pyi @@ -0,0 +1,9 @@ +class VersionInfo: + @property + def year(self) -> int: ... + @property + def minor(self) -> int: ... + @property + def micro(self) -> int: ... + @property + def releaselevel(self) -> str: ... diff --git a/venv/Lib/site-packages/attr/converters.py b/venv/Lib/site-packages/attr/converters.py new file mode 100644 index 0000000..9238311 --- /dev/null +++ b/venv/Lib/site-packages/attr/converters.py @@ -0,0 +1,151 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to `None`. + + Type annotations will be inferred from the wrapped converter's, if it has + any. + + Args: + converter (typing.Callable): + the converter that is used for non-`None` values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace `None` values by *default* or the result + of *factory*. + + Args: + default: + Value to be used if `None` is passed. Passing an instance of + `attrs.Factory` is supported, however the ``takes_self`` option is + *not*. + + factory (typing.Callable): + A callable that takes no parameters whose result is used if `None` + is passed. + + Raises: + TypeError: If **neither** *default* or *factory* is passed. + + TypeError: If **both** *default* and *factory* are passed. + + ValueError: + If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + msg = "Must pass either `default` or `factory`." + raise TypeError(msg) + + if default is not NOTHING and factory is not None: + msg = "Must pass either `default` or `factory` but not both." + raise TypeError(msg) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + msg = "`takes_self` is not supported by default_if_none." + raise ValueError(msg) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (for example, from environment variables) to real + booleans. + + Values mapping to `True`: + + - ``True`` + - ``"true"`` / ``"t"`` + - ``"yes"`` / ``"y"`` + - ``"on"`` + - ``"1"`` + - ``1`` + + Values mapping to `False`: + + - ``False`` + - ``"false"`` / ``"f"`` + - ``"no"`` / ``"n"`` + - ``"off"`` + - ``"0"`` + - ``0`` + + Raises: + ValueError: For any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + + if val in (True, "true", "t", "yes", "y", "on", "1", 1): + return True + if val in (False, "false", "f", "no", "n", "off", "0", 0): + return False + + msg = f"Cannot convert value to bool: {val!r}" + raise ValueError(msg) diff --git a/venv/Lib/site-packages/attr/converters.pyi b/venv/Lib/site-packages/attr/converters.pyi new file mode 100644 index 0000000..9ef478f --- /dev/null +++ b/venv/Lib/site-packages/attr/converters.pyi @@ -0,0 +1,13 @@ +from typing import Callable, TypeVar, overload + +from attrs import _ConverterType + +_T = TypeVar("_T") + +def pipe(*validators: _ConverterType) -> _ConverterType: ... +def optional(converter: _ConverterType) -> _ConverterType: ... +@overload +def default_if_none(default: _T) -> _ConverterType: ... +@overload +def default_if_none(*, factory: Callable[[], _T]) -> _ConverterType: ... +def to_bool(val: str) -> bool: ... diff --git a/venv/Lib/site-packages/attr/exceptions.py b/venv/Lib/site-packages/attr/exceptions.py new file mode 100644 index 0000000..3b7abb8 --- /dev/null +++ b/venv/Lib/site-packages/attr/exceptions.py @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: MIT + +from __future__ import annotations + +from typing import ClassVar + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args: ClassVar[tuple[str]] = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An *attrs* function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-*attrs* class has been passed into an *attrs* function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set when defining the field and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has a field without a type annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an *attrs* feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A field requiring a callable has been set with a value that is not + callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/venv/Lib/site-packages/attr/exceptions.pyi b/venv/Lib/site-packages/attr/exceptions.pyi new file mode 100644 index 0000000..f268011 --- /dev/null +++ b/venv/Lib/site-packages/attr/exceptions.pyi @@ -0,0 +1,17 @@ +from typing import Any + +class FrozenError(AttributeError): + msg: str = ... + +class FrozenInstanceError(FrozenError): ... +class FrozenAttributeError(FrozenError): ... +class AttrsAttributeNotFoundError(ValueError): ... +class NotAnAttrsClassError(ValueError): ... +class DefaultAlreadySetError(RuntimeError): ... +class UnannotatedAttributeError(RuntimeError): ... +class PythonTooOldError(RuntimeError): ... + +class NotCallableError(TypeError): + msg: str = ... + value: Any = ... + def __init__(self, msg: str, value: Any) -> None: ... diff --git a/venv/Lib/site-packages/attr/filters.py b/venv/Lib/site-packages/attr/filters.py new file mode 100644 index 0000000..689b170 --- /dev/null +++ b/venv/Lib/site-packages/attr/filters.py @@ -0,0 +1,72 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attrs.asdict` and `attrs.astuple`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, str)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Create a filter that only allows *what*. + + Args: + what (list[type, str, attrs.Attribute]): + What to include. Can be a type, a name, or an attribute. + + Returns: + Callable: + A callable that can be passed to `attrs.asdict`'s and + `attrs.astuple`'s *filter* argument. + + .. versionchanged:: 23.1.0 Accept strings with field names. + """ + cls, names, attrs = _split_what(what) + + def include_(attribute, value): + return ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return include_ + + +def exclude(*what): + """ + Create a filter that does **not** allow *what*. + + Args: + what (list[type, str, attrs.Attribute]): + What to exclude. Can be a type, a name, or an attribute. + + Returns: + Callable: + A callable that can be passed to `attrs.asdict`'s and + `attrs.astuple`'s *filter* argument. + + .. versionchanged:: 23.3.0 Accept field name string as input argument + """ + cls, names, attrs = _split_what(what) + + def exclude_(attribute, value): + return not ( + value.__class__ in cls + or attribute.name in names + or attribute in attrs + ) + + return exclude_ diff --git a/venv/Lib/site-packages/attr/filters.pyi b/venv/Lib/site-packages/attr/filters.pyi new file mode 100644 index 0000000..974abdc --- /dev/null +++ b/venv/Lib/site-packages/attr/filters.pyi @@ -0,0 +1,6 @@ +from typing import Any + +from . import Attribute, _FilterType + +def include(*what: type | str | Attribute[Any]) -> _FilterType[Any]: ... +def exclude(*what: type | str | Attribute[Any]) -> _FilterType[Any]: ... diff --git a/venv/Lib/site-packages/attr/py.typed b/venv/Lib/site-packages/attr/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/venv/Lib/site-packages/attr/setters.py b/venv/Lib/site-packages/attr/setters.py new file mode 100644 index 0000000..a9ce016 --- /dev/null +++ b/venv/Lib/site-packages/attr/setters.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError() + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + # This can be removed once we drop 3.8 and use attrs.Converter instead. + from ._make import Converter + + if not isinstance(c, Converter): + return c(new_value) + + return c(new_value, instance, attrib) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# Sphinx's autodata stopped working, so the docstring is inlined in the API +# docs. +NO_OP = object() diff --git a/venv/Lib/site-packages/attr/setters.pyi b/venv/Lib/site-packages/attr/setters.pyi new file mode 100644 index 0000000..73abf36 --- /dev/null +++ b/venv/Lib/site-packages/attr/setters.pyi @@ -0,0 +1,20 @@ +from typing import Any, NewType, NoReturn, TypeVar + +from . import Attribute +from attrs import _OnSetAttrType + +_T = TypeVar("_T") + +def frozen( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> NoReturn: ... +def pipe(*setters: _OnSetAttrType) -> _OnSetAttrType: ... +def validate(instance: Any, attribute: Attribute[_T], new_value: _T) -> _T: ... + +# convert is allowed to return Any, because they can be chained using pipe. +def convert( + instance: Any, attribute: Attribute[Any], new_value: Any +) -> Any: ... + +_NoOpType = NewType("_NoOpType", object) +NO_OP: _NoOpType diff --git a/venv/Lib/site-packages/attr/validators.py b/venv/Lib/site-packages/attr/validators.py new file mode 100644 index 0000000..8a56717 --- /dev/null +++ b/venv/Lib/site-packages/attr/validators.py @@ -0,0 +1,711 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + + +import operator +import re + +from contextlib import contextmanager +from re import Pattern + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .converters import default_if_none +from .exceptions import NotCallableError + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "not_", + "optional", + "or_", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + Args: + disabled (bool): If `True`, disable running all validators. + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + Returns: + bool:`True` if validators are currently disabled. + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + msg = f"'{attr.name}' must be {self.type!r} (got {value!r} that is a {value.__class__!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called with a + wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + Args: + type (type | tuple[type]): The type to check for. + + Raises: + TypeError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected type, and the value it got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + msg = f"'{attr.name}' must match regex {self.pattern.pattern!r} ({value!r} doesn't)" + raise ValueError( + msg, + attr, + self.pattern, + value, + ) + + def __repr__(self): + return f"" + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called with a + string that doesn't match *regex*. + + Args: + regex (str, re.Pattern): + A regex string or precompiled pattern to match against + + flags (int): + Flags that will be passed to the underlying re function (default 0) + + func (typing.Callable): + Which underlying `re` function to call. Valid options are + `re.fullmatch`, `re.search`, and `re.match`; the default `None` + means `re.fullmatch`. For performance reasons, the pattern is + always precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + msg = "'func' must be one of {}.".format( + ", ".join( + sorted(e and e.__name__ or "None" for e in set(valid_funcs)) + ) + ) + raise ValueError(msg) + + if isinstance(regex, Pattern): + if flags: + msg = "'flags' can only be used with a string pattern; pass flags to re.compile() instead" + raise TypeError(msg) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return f"" + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to `None` in addition to satisfying the requirements of + the sub-validator. + + Args: + validator + (typing.Callable | tuple[typing.Callable] | list[typing.Callable]): + A validator (or validators) that is used for non-`None` values. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + .. versionchanged:: 23.1.0 *validator* can also be a tuple of validators. + """ + if isinstance(validator, (list, tuple)): + return _OptionalValidator(_AndValidator(validator)) + + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _InValidator: + options = attrib() + _original_options = attrib(hash=False) + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + msg = f"'{attr.name}' must be in {self._original_options!r} (got {value!r})" + raise ValueError( + msg, + attr, + self._original_options, + value, + ) + + def __repr__(self): + return f"" + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called with a + value that does not belong in the *options* provided. + + The check is performed using ``value in options``, so *options* has to + support that operation. + + To keep the validator hashable, dicts, lists, and sets are transparently + transformed into a `tuple`. + + Args: + options: Allowed options. + + Raises: + ValueError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected options, and the value it got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + .. versionchanged:: 24.1.0 + *options* that are a list, dict, or a set are now transformed into a + tuple to keep the validator hashable. + """ + repr_options = options + if isinstance(options, (list, dict, set)): + options = tuple(options) + + return _InValidator(options, repr_options) + + +@attrs(repr=False, slots=False, unsafe_hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attrs.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute that is + not callable. + + .. versionadded:: 19.1.0 + + Raises: + attrs.exceptions.NotCallableError: + With a human readable error message containing the attribute + (`attrs.Attribute`) name, and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else f" {self.iterable_validator!r}" + ) + return ( + f"" + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + Args: + member_validator: Validator to apply to iterable members. + + iterable_validator: + Validator to apply to iterable itself (optional). + + Raises + TypeError: if any sub-validators fail + + .. versionadded:: 19.1.0 + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _DeepMapping: + key_validator = attrib(validator=is_callable()) + value_validator = attrib(validator=is_callable()) + mapping_validator = attrib(default=None, validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + self.key_validator(inst, attr, key) + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return f"" + + +def deep_mapping(key_validator, value_validator, mapping_validator=None): + """ + A validator that performs deep validation of a dictionary. + + Args: + key_validator: Validator to apply to dictionary keys. + + value_validator: Validator to apply to dictionary values. + + mapping_validator: + Validator to apply to top-level mapping attribute (optional). + + .. versionadded:: 19.1.0 + + Raises: + TypeError: if any sub-validators fail + """ + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + msg = f"'{attr.name}' must be {self.compare_op} {self.bound}: {value}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number larger or equal to *val*. + + The validator uses `operator.lt` to compare the values. + + Args: + val: Exclusive upper bound for values. + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number greater than *val*. + + The validator uses `operator.le` to compare the values. + + Args: + val: Inclusive upper bound for values. + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number smaller than *val*. + + The validator uses `operator.ge` to compare the values. + + Args: + val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called with a + number smaller or equal to *val*. + + The validator uses `operator.ge` to compare the values. + + Args: + val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + msg = f"Length of '{attr.name}' must be <= {self.max_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + Args: + length (int): Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + msg = f"Length of '{attr.name}' must be >= {self.min_length}: {len(value)}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + Args: + length (int): Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _SubclassOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not issubclass(value, self.type): + msg = f"'{attr.name}' must be a subclass of {self.type!r} (got {value!r})." + raise TypeError( + msg, + attr, + self.type, + value, + ) + + def __repr__(self): + return f"" + + +def _subclass_of(type): + """ + A validator that raises a `TypeError` if the initializer is called with a + wrong type for this particular attribute (checks are performed using + `issubclass` therefore it's also valid to pass a tuple of types). + + Args: + type (type | tuple[type, ...]): The type(s) to check for. + + Raises: + TypeError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the expected type, and the value it got. + """ + return _SubclassOfValidator(type) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _NotValidator: + validator = attrib() + msg = attrib( + converter=default_if_none( + "not_ validator child '{validator!r}' " + "did not raise a captured error" + ) + ) + exc_types = attrib( + validator=deep_iterable( + member_validator=_subclass_of(Exception), + iterable_validator=instance_of(tuple), + ), + ) + + def __call__(self, inst, attr, value): + try: + self.validator(inst, attr, value) + except self.exc_types: + pass # suppress error to invert validity + else: + raise ValueError( + self.msg.format( + validator=self.validator, + exc_types=self.exc_types, + ), + attr, + self.validator, + value, + self.exc_types, + ) + + def __repr__(self): + return f"" + + +def not_(validator, *, msg=None, exc_types=(ValueError, TypeError)): + """ + A validator that wraps and logically 'inverts' the validator passed to it. + It will raise a `ValueError` if the provided validator *doesn't* raise a + `ValueError` or `TypeError` (by default), and will suppress the exception + if the provided validator *does*. + + Intended to be used with existing validators to compose logic without + needing to create inverted variants, for example, ``not_(in_(...))``. + + Args: + validator: A validator to be logically inverted. + + msg (str): + Message to raise if validator fails. Formatted with keys + ``exc_types`` and ``validator``. + + exc_types (tuple[type, ...]): + Exception type(s) to capture. Other types raised by child + validators will not be intercepted and pass through. + + Raises: + ValueError: + With a human readable error message, the attribute (of type + `attrs.Attribute`), the validator that failed to raise an + exception, the value it got, and the expected exception types. + + .. versionadded:: 22.2.0 + """ + try: + exc_types = tuple(exc_types) + except TypeError: + exc_types = (exc_types,) + return _NotValidator(validator, msg, exc_types) + + +@attrs(repr=False, slots=True, unsafe_hash=True) +class _OrValidator: + validators = attrib() + + def __call__(self, inst, attr, value): + for v in self.validators: + try: + v(inst, attr, value) + except Exception: # noqa: BLE001, PERF203, S112 + continue + else: + return + + msg = f"None of {self.validators!r} satisfied for value {value!r}" + raise ValueError(msg) + + def __repr__(self): + return f"" + + +def or_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators until one of them is + satisfied. + + Args: + validators (~collections.abc.Iterable[typing.Callable]): + Arbitrary number of validators. + + Raises: + ValueError: + If no validator is satisfied. Raised with a human-readable error + message listing all the wrapped validators and the value that + failed all of them. + + .. versionadded:: 24.1.0 + """ + vals = [] + for v in validators: + vals.extend(v.validators if isinstance(v, _OrValidator) else [v]) + + return _OrValidator(tuple(vals)) diff --git a/venv/Lib/site-packages/attr/validators.pyi b/venv/Lib/site-packages/attr/validators.pyi new file mode 100644 index 0000000..a314110 --- /dev/null +++ b/venv/Lib/site-packages/attr/validators.pyi @@ -0,0 +1,83 @@ +from typing import ( + Any, + AnyStr, + Callable, + Container, + ContextManager, + Iterable, + Mapping, + Match, + Pattern, + TypeVar, + overload, +) + +from attrs import _ValidatorType +from attrs import _ValidatorArgType + +_T = TypeVar("_T") +_T1 = TypeVar("_T1") +_T2 = TypeVar("_T2") +_T3 = TypeVar("_T3") +_I = TypeVar("_I", bound=Iterable) +_K = TypeVar("_K") +_V = TypeVar("_V") +_M = TypeVar("_M", bound=Mapping) + +def set_disabled(run: bool) -> None: ... +def get_disabled() -> bool: ... +def disabled() -> ContextManager[None]: ... + +# To be more precise on instance_of use some overloads. +# If there are more than 3 items in the tuple then we fall back to Any +@overload +def instance_of(type: type[_T]) -> _ValidatorType[_T]: ... +@overload +def instance_of(type: tuple[type[_T]]) -> _ValidatorType[_T]: ... +@overload +def instance_of( + type: tuple[type[_T1], type[_T2]] +) -> _ValidatorType[_T1 | _T2]: ... +@overload +def instance_of( + type: tuple[type[_T1], type[_T2], type[_T3]] +) -> _ValidatorType[_T1 | _T2 | _T3]: ... +@overload +def instance_of(type: tuple[type, ...]) -> _ValidatorType[Any]: ... +def optional( + validator: ( + _ValidatorType[_T] + | list[_ValidatorType[_T]] + | tuple[_ValidatorType[_T]] + ), +) -> _ValidatorType[_T | None]: ... +def in_(options: Container[_T]) -> _ValidatorType[_T]: ... +def and_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... +def matches_re( + regex: Pattern[AnyStr] | AnyStr, + flags: int = ..., + func: Callable[[AnyStr, AnyStr, int], Match[AnyStr] | None] | None = ..., +) -> _ValidatorType[AnyStr]: ... +def deep_iterable( + member_validator: _ValidatorArgType[_T], + iterable_validator: _ValidatorType[_I] | None = ..., +) -> _ValidatorType[_I]: ... +def deep_mapping( + key_validator: _ValidatorType[_K], + value_validator: _ValidatorType[_V], + mapping_validator: _ValidatorType[_M] | None = ..., +) -> _ValidatorType[_M]: ... +def is_callable() -> _ValidatorType[_T]: ... +def lt(val: _T) -> _ValidatorType[_T]: ... +def le(val: _T) -> _ValidatorType[_T]: ... +def ge(val: _T) -> _ValidatorType[_T]: ... +def gt(val: _T) -> _ValidatorType[_T]: ... +def max_len(length: int) -> _ValidatorType[_T]: ... +def min_len(length: int) -> _ValidatorType[_T]: ... +def not_( + validator: _ValidatorType[_T], + *, + msg: str | None = None, + exc_types: type[Exception] | Iterable[type[Exception]] = ..., +) -> _ValidatorType[_T]: ... +def or_(*validators: _ValidatorType[_T]) -> _ValidatorType[_T]: ... diff --git a/venv/Lib/site-packages/attrs-24.2.0.dist-info/INSTALLER b/venv/Lib/site-packages/attrs-24.2.0.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/Lib/site-packages/attrs-24.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/attrs-24.2.0.dist-info/METADATA b/venv/Lib/site-packages/attrs-24.2.0.dist-info/METADATA new file mode 100644 index 0000000..a85b378 --- /dev/null +++ b/venv/Lib/site-packages/attrs-24.2.0.dist-info/METADATA @@ -0,0 +1,242 @@ +Metadata-Version: 2.3 +Name: attrs +Version: 24.2.0 +Summary: Classes Without Boilerplate +Project-URL: Documentation, https://www.attrs.org/ +Project-URL: Changelog, https://www.attrs.org/en/stable/changelog.html +Project-URL: GitHub, https://github.com/python-attrs/attrs +Project-URL: Funding, https://github.com/sponsors/hynek +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-attrs?utm_source=pypi-attrs&utm_medium=pypi +Author-email: Hynek Schlawack +License-Expression: MIT +License-File: LICENSE +Keywords: attribute,boilerplate,class +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Typing :: Typed +Requires-Python: >=3.7 +Requires-Dist: importlib-metadata; python_version < '3.8' +Provides-Extra: benchmark +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'benchmark' +Requires-Dist: hypothesis; extra == 'benchmark' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.9') and extra == 'benchmark' +Requires-Dist: pympler; extra == 'benchmark' +Requires-Dist: pytest-codspeed; extra == 'benchmark' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.9' and python_version < '3.13') and extra == 'benchmark' +Requires-Dist: pytest-xdist[psutil]; extra == 'benchmark' +Requires-Dist: pytest>=4.3.0; extra == 'benchmark' +Provides-Extra: cov +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'cov' +Requires-Dist: coverage[toml]>=5.3; extra == 'cov' +Requires-Dist: hypothesis; extra == 'cov' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.9') and extra == 'cov' +Requires-Dist: pympler; extra == 'cov' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.9' and python_version < '3.13') and extra == 'cov' +Requires-Dist: pytest-xdist[psutil]; extra == 'cov' +Requires-Dist: pytest>=4.3.0; extra == 'cov' +Provides-Extra: dev +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'dev' +Requires-Dist: hypothesis; extra == 'dev' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.9') and extra == 'dev' +Requires-Dist: pre-commit; extra == 'dev' +Requires-Dist: pympler; extra == 'dev' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.9' and python_version < '3.13') and extra == 'dev' +Requires-Dist: pytest-xdist[psutil]; extra == 'dev' +Requires-Dist: pytest>=4.3.0; extra == 'dev' +Provides-Extra: docs +Requires-Dist: cogapp; extra == 'docs' +Requires-Dist: furo; extra == 'docs' +Requires-Dist: myst-parser; extra == 'docs' +Requires-Dist: sphinx; extra == 'docs' +Requires-Dist: sphinx-notfound-page; extra == 'docs' +Requires-Dist: sphinxcontrib-towncrier; extra == 'docs' +Requires-Dist: towncrier<24.7; extra == 'docs' +Provides-Extra: tests +Requires-Dist: cloudpickle; (platform_python_implementation == 'CPython') and extra == 'tests' +Requires-Dist: hypothesis; extra == 'tests' +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.9') and extra == 'tests' +Requires-Dist: pympler; extra == 'tests' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.9' and python_version < '3.13') and extra == 'tests' +Requires-Dist: pytest-xdist[psutil]; extra == 'tests' +Requires-Dist: pytest>=4.3.0; extra == 'tests' +Provides-Extra: tests-mypy +Requires-Dist: mypy>=1.11.1; (platform_python_implementation == 'CPython' and python_version >= '3.9') and extra == 'tests-mypy' +Requires-Dist: pytest-mypy-plugins; (platform_python_implementation == 'CPython' and python_version >= '3.9' and python_version < '3.13') and extra == 'tests-mypy' +Description-Content-Type: text/markdown + +

+ + attrs + +

+ + +*attrs* is the Python package that will bring back the **joy** of **writing classes** by relieving you from the drudgery of implementing object protocols (aka [dunder methods](https://www.attrs.org/en/latest/glossary.html#term-dunder-methods)). +[Trusted by NASA](https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-github-profile/customizing-your-profile/personalizing-your-profile#list-of-qualifying-repositories-for-mars-2020-helicopter-contributor-achievement) for Mars missions since 2020! + +Its main goal is to help you to write **concise** and **correct** software without slowing down your code. + + +## Sponsors + +*attrs* would not be possible without our [amazing sponsors](https://github.com/sponsors/hynek). +Especially those generously supporting us at the *The Organization* tier and higher: + + + +

+ + + + + + + + +

+ + + +

+ Please consider joining them to help make attrs’s maintenance more sustainable! +

+ + + +## Example + +*attrs* gives you a class decorator and a way to declaratively define the attributes on that class: + + + +```pycon +>>> from attrs import asdict, define, make_class, Factory + +>>> @define +... class SomeClass: +... a_number: int = 42 +... list_of_numbers: list[int] = Factory(list) +... +... def hard_math(self, another_number): +... return self.a_number + sum(self.list_of_numbers) * another_number + + +>>> sc = SomeClass(1, [1, 2, 3]) +>>> sc +SomeClass(a_number=1, list_of_numbers=[1, 2, 3]) + +>>> sc.hard_math(3) +19 +>>> sc == SomeClass(1, [1, 2, 3]) +True +>>> sc != SomeClass(2, [3, 2, 1]) +True + +>>> asdict(sc) +{'a_number': 1, 'list_of_numbers': [1, 2, 3]} + +>>> SomeClass() +SomeClass(a_number=42, list_of_numbers=[]) + +>>> C = make_class("C", ["a", "b"]) +>>> C("foo", "bar") +C(a='foo', b='bar') +``` + +After *declaring* your attributes, *attrs* gives you: + +- a concise and explicit overview of the class's attributes, +- a nice human-readable `__repr__`, +- equality-checking methods, +- an initializer, +- and much more, + +*without* writing dull boilerplate code again and again and *without* runtime performance penalties. + +--- + +This example uses *attrs*'s modern APIs that have been introduced in version 20.1.0, and the *attrs* package import name that has been added in version 21.3.0. +The classic APIs (`@attr.s`, `attr.ib`, plus their serious-business aliases) and the `attr` package import name will remain **indefinitely**. + +Check out [*On The Core API Names*](https://www.attrs.org/en/latest/names.html) for an in-depth explanation! + + +### Hate Type Annotations!? + +No problem! +Types are entirely **optional** with *attrs*. +Simply assign `attrs.field()` to the attributes instead of annotating them with types: + +```python +from attrs import define, field + +@define +class SomeClass: + a_number = field(default=42) + list_of_numbers = field(factory=list) +``` + + +## Data Classes + +On the tin, *attrs* might remind you of `dataclasses` (and indeed, `dataclasses` [are a descendant](https://hynek.me/articles/import-attrs/) of *attrs*). +In practice it does a lot more and is more flexible. +For instance, it allows you to define [special handling of NumPy arrays for equality checks](https://www.attrs.org/en/stable/comparison.html#customization), allows more ways to [plug into the initialization process](https://www.attrs.org/en/stable/init.html#hooking-yourself-into-initialization), has a replacement for `__init_subclass__`, and allows for stepping through the generated methods using a debugger. + +For more details, please refer to our [comparison page](https://www.attrs.org/en/stable/why.html#data-classes), but generally speaking, we are more likely to commit crimes against nature to make things work that one would expect to work, but that are quite complicated in practice. + + +## Project Information + +- [**Changelog**](https://www.attrs.org/en/stable/changelog.html) +- [**Documentation**](https://www.attrs.org/) +- [**PyPI**](https://pypi.org/project/attrs/) +- [**Source Code**](https://github.com/python-attrs/attrs) +- [**Contributing**](https://github.com/python-attrs/attrs/blob/main/.github/CONTRIBUTING.md) +- [**Third-party Extensions**](https://github.com/python-attrs/attrs/wiki/Extensions-to-attrs) +- **Get Help**: use the `python-attrs` tag on [Stack Overflow](https://stackoverflow.com/questions/tagged/python-attrs) + + +### *attrs* for Enterprise + +Available as part of the Tidelift Subscription. + +The maintainers of *attrs* and thousands of other packages are working with Tidelift to deliver commercial support and maintenance for the open source packages you use to build your applications. +Save time, reduce risk, and improve code health, while paying the maintainers of the exact packages you use. +[Learn more](https://tidelift.com/?utm_source=lifter&utm_medium=referral&utm_campaign=hynek). + +## Release Information + +### Deprecations + +- Given the amount of warnings raised in the broader ecosystem, we've decided to only soft-deprecate the *hash* argument to `@define` / `@attr.s`. + Please don't use it in new code, but we don't intend to remove it anymore. + [#1330](https://github.com/python-attrs/attrs/issues/1330) + + +### Changes + +- `attrs.converters.pipe()` (and its syntactic sugar of passing a list for `attrs.field()`'s / `attr.ib()`'s *converter* argument) works again when passing `attrs.setters.convert` to *on_setattr* (which is default for `attrs.define`). + [#1328](https://github.com/python-attrs/attrs/issues/1328) +- Restored support for PEP [649](https://peps.python.org/pep-0649/) / [749](https://peps.python.org/pep-0749/)-implementing Pythons -- currently 3.14-dev. + [#1329](https://github.com/python-attrs/attrs/issues/1329) + + + +--- + +[Full changelog →](https://www.attrs.org/en/stable/changelog.html) diff --git a/venv/Lib/site-packages/attrs-24.2.0.dist-info/RECORD b/venv/Lib/site-packages/attrs-24.2.0.dist-info/RECORD new file mode 100644 index 0000000..f4876cb --- /dev/null +++ b/venv/Lib/site-packages/attrs-24.2.0.dist-info/RECORD @@ -0,0 +1,55 @@ +attr/__init__.py,sha256=l8Ewh5KZE7CCY0i1iDfSCnFiUTIkBVoqsXjX9EZnIVA,2087 +attr/__init__.pyi,sha256=aTVHBPX6krCGvbQvOl_UKqEzmi2HFsaIVm2WKmAiqVs,11434 +attr/__pycache__/__init__.cpython-311.pyc,, +attr/__pycache__/_cmp.cpython-311.pyc,, +attr/__pycache__/_compat.cpython-311.pyc,, +attr/__pycache__/_config.cpython-311.pyc,, +attr/__pycache__/_funcs.cpython-311.pyc,, +attr/__pycache__/_make.cpython-311.pyc,, +attr/__pycache__/_next_gen.cpython-311.pyc,, +attr/__pycache__/_version_info.cpython-311.pyc,, +attr/__pycache__/converters.cpython-311.pyc,, +attr/__pycache__/exceptions.cpython-311.pyc,, +attr/__pycache__/filters.cpython-311.pyc,, +attr/__pycache__/setters.cpython-311.pyc,, +attr/__pycache__/validators.cpython-311.pyc,, +attr/_cmp.py,sha256=3umHiBtgsEYtvNP_8XrQwTCdFoZIX4DEur76N-2a3X8,4123 +attr/_cmp.pyi,sha256=U-_RU_UZOyPUEQzXE6RMYQQcjkZRY25wTH99sN0s7MM,368 +attr/_compat.py,sha256=n2Uk3c-ywv0PkFfGlvqR7SzDXp4NOhWmNV_ZK6YfWoM,2958 +attr/_config.py,sha256=z81Vt-GeT_2taxs1XZfmHx9TWlSxjPb6eZH1LTGsS54,843 +attr/_funcs.py,sha256=SGDmNlED1TM3tgO9Ap2mfRfVI24XEAcrNQs7o2eBXHQ,17386 +attr/_make.py,sha256=BjENJz5eJoojJVbCoupWjXLLEZJ7VID89lisLbQUlmQ,91479 +attr/_next_gen.py,sha256=dhGb96VFg4kXBkS9Zdz1A2uxVJ99q_RT1hw3kLA9-uI,24630 +attr/_typing_compat.pyi,sha256=XDP54TUn-ZKhD62TOQebmzrwFyomhUCoGRpclb6alRA,469 +attr/_version_info.py,sha256=exSqb3b5E-fMSsgZAlEw9XcLpEgobPORCZpcaEglAM4,2121 +attr/_version_info.pyi,sha256=x_M3L3WuB7r_ULXAWjx959udKQ4HLB8l-hsc1FDGNvk,209 +attr/converters.py,sha256=vNa58pZi9V6uxBzl4t1QrHbQfkT4iRFAodyXe7lcgg0,3506 +attr/converters.pyi,sha256=mpDoVFO3Cpx8xYSSV0iZFl7IAHuoNBglxKfxHvLj_sY,410 +attr/exceptions.py,sha256=HRFq4iybmv7-DcZwyjl6M1euM2YeJVK_hFxuaBGAngI,1977 +attr/exceptions.pyi,sha256=zZq8bCUnKAy9mDtBEw42ZhPhAUIHoTKedDQInJD883M,539 +attr/filters.py,sha256=ZBiKWLp3R0LfCZsq7X11pn9WX8NslS2wXM4jsnLOGc8,1795 +attr/filters.pyi,sha256=3J5BG-dTxltBk1_-RuNRUHrv2qu1v8v4aDNAQ7_mifA,208 +attr/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attr/setters.py,sha256=faMQeiBo_nbXYnPaQ1pq8PXeA7Zr-uNsVsPMiKCmxhc,1619 +attr/setters.pyi,sha256=NnVkaFU1BB4JB8E4JuXyrzTUgvtMpj8p3wBdJY7uix4,584 +attr/validators.py,sha256=985eTP6RHyon61YEauMJgyNy1rEOhJWiSXMJgRxPtrQ,20045 +attr/validators.pyi,sha256=LjKf7AoXZfvGSfT3LRs61Qfln94konYyMUPoJJjOxK4,2502 +attrs-24.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +attrs-24.2.0.dist-info/METADATA,sha256=3Jgk4lr9Y1SAqAcwOLPN_mpW0wc6VOGm-yHt1LsPIHw,11524 +attrs-24.2.0.dist-info/RECORD,, +attrs-24.2.0.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87 +attrs-24.2.0.dist-info/licenses/LICENSE,sha256=iCEVyV38KvHutnFPjsbVy8q_Znyv-HKfQkINpj9xTp8,1109 +attrs/__init__.py,sha256=5FHo-EMFOX-g4ialSK4fwOjuoHzLISJDZCwoOl02Ty8,1071 +attrs/__init__.pyi,sha256=o3l92VsD9kHz8sldEtb_tllBTs3TeL-vIBMTxo2Zc_4,7703 +attrs/__pycache__/__init__.cpython-311.pyc,, +attrs/__pycache__/converters.cpython-311.pyc,, +attrs/__pycache__/exceptions.cpython-311.pyc,, +attrs/__pycache__/filters.cpython-311.pyc,, +attrs/__pycache__/setters.cpython-311.pyc,, +attrs/__pycache__/validators.cpython-311.pyc,, +attrs/converters.py,sha256=8kQljrVwfSTRu8INwEk8SI0eGrzmWftsT7rM0EqyohM,76 +attrs/exceptions.py,sha256=ACCCmg19-vDFaDPY9vFl199SPXCQMN_bENs4DALjzms,76 +attrs/filters.py,sha256=VOUMZug9uEU6dUuA0dF1jInUK0PL3fLgP0VBS5d-CDE,73 +attrs/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +attrs/setters.py,sha256=eL1YidYQV3T2h9_SYIZSZR1FAcHGb1TuCTy0E0Lv2SU,73 +attrs/validators.py,sha256=xcy6wD5TtTkdCG1f4XWbocPSO0faBjk5IfVJfP6SUj0,76 diff --git a/venv/Lib/site-packages/attrs-24.2.0.dist-info/WHEEL b/venv/Lib/site-packages/attrs-24.2.0.dist-info/WHEEL new file mode 100644 index 0000000..cdd68a4 --- /dev/null +++ b/venv/Lib/site-packages/attrs-24.2.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.25.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/attrs-24.2.0.dist-info/licenses/LICENSE b/venv/Lib/site-packages/attrs-24.2.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000..2bd6453 --- /dev/null +++ b/venv/Lib/site-packages/attrs-24.2.0.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/Lib/site-packages/attrs/__init__.py b/venv/Lib/site-packages/attrs/__init__.py new file mode 100644 index 0000000..963b197 --- /dev/null +++ b/venv/Lib/site-packages/attrs/__init__.py @@ -0,0 +1,67 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + AttrsInstance, + Converter, + Factory, + _make_getattr, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._next_gen import asdict, astuple + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "Attribute", + "AttrsInstance", + "cmp_using", + "Converter", + "converters", + "define", + "evolve", + "exceptions", + "Factory", + "field", + "fields_dict", + "fields", + "filters", + "frozen", + "has", + "make_class", + "mutable", + "NOTHING", + "resolve_types", + "setters", + "validate", + "validators", +] + +__getattr__ = _make_getattr(__name__) diff --git a/venv/Lib/site-packages/attrs/__init__.pyi b/venv/Lib/site-packages/attrs/__init__.pyi new file mode 100644 index 0000000..b2670de --- /dev/null +++ b/venv/Lib/site-packages/attrs/__init__.pyi @@ -0,0 +1,252 @@ +import sys + +from typing import ( + Any, + Callable, + Mapping, + Sequence, + overload, + TypeVar, +) + +# Because we need to type our own stuff, we have to make everything from +# attr explicitly public too. +from attr import __author__ as __author__ +from attr import __copyright__ as __copyright__ +from attr import __description__ as __description__ +from attr import __email__ as __email__ +from attr import __license__ as __license__ +from attr import __title__ as __title__ +from attr import __url__ as __url__ +from attr import __version__ as __version__ +from attr import __version_info__ as __version_info__ +from attr import assoc as assoc +from attr import Attribute as Attribute +from attr import AttrsInstance as AttrsInstance +from attr import cmp_using as cmp_using +from attr import converters as converters +from attr import Converter as Converter +from attr import evolve as evolve +from attr import exceptions as exceptions +from attr import Factory as Factory +from attr import fields as fields +from attr import fields_dict as fields_dict +from attr import filters as filters +from attr import has as has +from attr import make_class as make_class +from attr import NOTHING as NOTHING +from attr import resolve_types as resolve_types +from attr import setters as setters +from attr import validate as validate +from attr import validators as validators +from attr import attrib, asdict as asdict, astuple as astuple + +if sys.version_info >= (3, 11): + from typing import dataclass_transform +else: + from typing_extensions import dataclass_transform + +_T = TypeVar("_T") +_C = TypeVar("_C", bound=type) + +_EqOrderType = bool | Callable[[Any], Any] +_ValidatorType = Callable[[Any, "Attribute[_T]", _T], Any] +_ConverterType = Callable[[Any], Any] +_ReprType = Callable[[Any], str] +_ReprArgType = bool | _ReprType +_OnSetAttrType = Callable[[Any, "Attribute[Any]", Any], Any] +_OnSetAttrArgType = _OnSetAttrType | list[_OnSetAttrType] | setters._NoOpType +_FieldTransformer = Callable[ + [type, list["Attribute[Any]"]], list["Attribute[Any]"] +] +# FIXME: in reality, if multiple validators are passed they must be in a list +# or tuple, but those are invariant and so would prevent subtypes of +# _ValidatorType from working when passed in a list or tuple. +_ValidatorArgType = _ValidatorType[_T] | Sequence[_ValidatorType[_T]] + +@overload +def field( + *, + default: None = ..., + validator: None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: None = ..., + factory: None = ..., + kw_only: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> Any: ... + +# This form catches an explicit None or no default and infers the type from the +# other arguments. +@overload +def field( + *, + default: None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType | Converter[Any, _T] | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> _T: ... + +# This form catches an explicit default argument. +@overload +def field( + *, + default: _T, + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType | Converter[Any, _T] | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> _T: ... + +# This form covers type=non-Type: e.g. forward references (str), Any +@overload +def field( + *, + default: _T | None = ..., + validator: _ValidatorArgType[_T] | None = ..., + repr: _ReprArgType = ..., + hash: bool | None = ..., + init: bool = ..., + metadata: Mapping[Any, Any] | None = ..., + converter: _ConverterType | Converter[Any, _T] | None = ..., + factory: Callable[[], _T] | None = ..., + kw_only: bool = ..., + eq: _EqOrderType | None = ..., + order: _EqOrderType | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + alias: str | None = ..., + type: type | None = ..., +) -> Any: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: _C, + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(field_specifiers=(attrib, field)) +def define( + maybe_cls: None = ..., + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... + +mutable = define + +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: _C, + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> _C: ... +@overload +@dataclass_transform(frozen_default=True, field_specifiers=(attrib, field)) +def frozen( + maybe_cls: None = ..., + *, + these: dict[str, Any] | None = ..., + repr: bool = ..., + unsafe_hash: bool | None = ..., + hash: bool | None = ..., + init: bool = ..., + slots: bool = ..., + frozen: bool = ..., + weakref_slot: bool = ..., + str: bool = ..., + auto_attribs: bool = ..., + kw_only: bool = ..., + cache_hash: bool = ..., + auto_exc: bool = ..., + eq: bool | None = ..., + order: bool | None = ..., + auto_detect: bool = ..., + getstate_setstate: bool | None = ..., + on_setattr: _OnSetAttrArgType | None = ..., + field_transformer: _FieldTransformer | None = ..., + match_args: bool = ..., +) -> Callable[[_C], _C]: ... diff --git a/venv/Lib/site-packages/attrs/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/attrs/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..bc56eba Binary files /dev/null and b/venv/Lib/site-packages/attrs/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attrs/__pycache__/converters.cpython-311.pyc b/venv/Lib/site-packages/attrs/__pycache__/converters.cpython-311.pyc new file mode 100644 index 0000000..09990eb Binary files /dev/null and b/venv/Lib/site-packages/attrs/__pycache__/converters.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attrs/__pycache__/exceptions.cpython-311.pyc b/venv/Lib/site-packages/attrs/__pycache__/exceptions.cpython-311.pyc new file mode 100644 index 0000000..89af533 Binary files /dev/null and b/venv/Lib/site-packages/attrs/__pycache__/exceptions.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attrs/__pycache__/filters.cpython-311.pyc b/venv/Lib/site-packages/attrs/__pycache__/filters.cpython-311.pyc new file mode 100644 index 0000000..fde59c1 Binary files /dev/null and b/venv/Lib/site-packages/attrs/__pycache__/filters.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attrs/__pycache__/setters.cpython-311.pyc b/venv/Lib/site-packages/attrs/__pycache__/setters.cpython-311.pyc new file mode 100644 index 0000000..d4fe26a Binary files /dev/null and b/venv/Lib/site-packages/attrs/__pycache__/setters.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attrs/__pycache__/validators.cpython-311.pyc b/venv/Lib/site-packages/attrs/__pycache__/validators.cpython-311.pyc new file mode 100644 index 0000000..60450e8 Binary files /dev/null and b/venv/Lib/site-packages/attrs/__pycache__/validators.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/attrs/converters.py b/venv/Lib/site-packages/attrs/converters.py new file mode 100644 index 0000000..7821f6c --- /dev/null +++ b/venv/Lib/site-packages/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa: F403 diff --git a/venv/Lib/site-packages/attrs/exceptions.py b/venv/Lib/site-packages/attrs/exceptions.py new file mode 100644 index 0000000..3323f9d --- /dev/null +++ b/venv/Lib/site-packages/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa: F403 diff --git a/venv/Lib/site-packages/attrs/filters.py b/venv/Lib/site-packages/attrs/filters.py new file mode 100644 index 0000000..3080f48 --- /dev/null +++ b/venv/Lib/site-packages/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa: F403 diff --git a/venv/Lib/site-packages/attrs/py.typed b/venv/Lib/site-packages/attrs/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/venv/Lib/site-packages/attrs/setters.py b/venv/Lib/site-packages/attrs/setters.py new file mode 100644 index 0000000..f3d73bb --- /dev/null +++ b/venv/Lib/site-packages/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa: F403 diff --git a/venv/Lib/site-packages/attrs/validators.py b/venv/Lib/site-packages/attrs/validators.py new file mode 100644 index 0000000..037e124 --- /dev/null +++ b/venv/Lib/site-packages/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa: F403 diff --git a/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/INSTALLER b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/METADATA b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/METADATA new file mode 100644 index 0000000..a2681d7 --- /dev/null +++ b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/METADATA @@ -0,0 +1,122 @@ +Metadata-Version: 2.1 +Name: beautifulsoup4 +Version: 4.12.3 +Summary: Screen-scraping library +Project-URL: Download, https://www.crummy.com/software/BeautifulSoup/bs4/download/ +Project-URL: Homepage, https://www.crummy.com/software/BeautifulSoup/bs4/ +Author-email: Leonard Richardson +License: MIT License +License-File: AUTHORS +License-File: LICENSE +Keywords: HTML,XML,parse,soup +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Classifier: Topic :: Text Processing :: Markup :: HTML +Classifier: Topic :: Text Processing :: Markup :: SGML +Classifier: Topic :: Text Processing :: Markup :: XML +Requires-Python: >=3.6.0 +Requires-Dist: soupsieve>1.2 +Provides-Extra: cchardet +Requires-Dist: cchardet; extra == 'cchardet' +Provides-Extra: chardet +Requires-Dist: chardet; extra == 'chardet' +Provides-Extra: charset-normalizer +Requires-Dist: charset-normalizer; extra == 'charset-normalizer' +Provides-Extra: html5lib +Requires-Dist: html5lib; extra == 'html5lib' +Provides-Extra: lxml +Requires-Dist: lxml; extra == 'lxml' +Description-Content-Type: text/markdown + +Beautiful Soup is a library that makes it easy to scrape information +from web pages. It sits atop an HTML or XML parser, providing Pythonic +idioms for iterating, searching, and modifying the parse tree. + +# Quick start + +``` +>>> from bs4 import BeautifulSoup +>>> soup = BeautifulSoup("

SomebadHTML") +>>> print(soup.prettify()) + + +

+ Some + + bad + + HTML + + +

+ + +>>> soup.find(text="bad") +'bad' +>>> soup.i +HTML +# +>>> soup = BeautifulSoup("SomebadXML", "xml") +# +>>> print(soup.prettify()) + + + Some + + bad + + XML + + +``` + +To go beyond the basics, [comprehensive documentation is available](https://www.crummy.com/software/BeautifulSoup/bs4/doc/). + +# Links + +* [Homepage](https://www.crummy.com/software/BeautifulSoup/bs4/) +* [Documentation](https://www.crummy.com/software/BeautifulSoup/bs4/doc/) +* [Discussion group](https://groups.google.com/group/beautifulsoup/) +* [Development](https://code.launchpad.net/beautifulsoup/) +* [Bug tracker](https://bugs.launchpad.net/beautifulsoup/) +* [Complete changelog](https://bazaar.launchpad.net/~leonardr/beautifulsoup/bs4/view/head:/CHANGELOG) + +# Note on Python 2 sunsetting + +Beautiful Soup's support for Python 2 was discontinued on December 31, +2020: one year after the sunset date for Python 2 itself. From this +point onward, new Beautiful Soup development will exclusively target +Python 3. The final release of Beautiful Soup 4 to support Python 2 +was 4.9.3. + +# Supporting the project + +If you use Beautiful Soup as part of your professional work, please consider a +[Tidelift subscription](https://tidelift.com/subscription/pkg/pypi-beautifulsoup4?utm_source=pypi-beautifulsoup4&utm_medium=referral&utm_campaign=readme). +This will support many of the free software projects your organization +depends on, not just Beautiful Soup. + +If you use Beautiful Soup for personal projects, the best way to say +thank you is to read +[Tool Safety](https://www.crummy.com/software/BeautifulSoup/zine/), a zine I +wrote about what Beautiful Soup has taught me about software +development. + +# Building the documentation + +The bs4/doc/ directory contains full documentation in Sphinx +format. Run `make html` in that directory to create HTML +documentation. + +# Running the unit tests + +Beautiful Soup supports unit test discovery using Pytest: + +``` +$ pytest +``` + diff --git a/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/RECORD b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/RECORD new file mode 100644 index 0000000..f5577ae --- /dev/null +++ b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/RECORD @@ -0,0 +1,78 @@ +beautifulsoup4-4.12.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +beautifulsoup4-4.12.3.dist-info/METADATA,sha256=UkOS1koIjlakIy9Q1u2yCNwDEFOUZSrLcsbV-mTInz4,3790 +beautifulsoup4-4.12.3.dist-info/RECORD,, +beautifulsoup4-4.12.3.dist-info/WHEEL,sha256=mRYSEL3Ih6g5a_CVMIcwiF__0Ae4_gLYh01YFNwiq1k,87 +beautifulsoup4-4.12.3.dist-info/licenses/AUTHORS,sha256=uSIdbrBb1sobdXl7VrlUvuvim2dN9kF3MH4Edn0WKGE,2176 +beautifulsoup4-4.12.3.dist-info/licenses/LICENSE,sha256=VbTY1LHlvIbRDvrJG3TIe8t3UmsPW57a-LnNKtxzl7I,1441 +bs4/__init__.py,sha256=kq32cCtQiNjjU9XwjD0b1jdXN5WEC87nJqSSW3PhVkM,33822 +bs4/__pycache__/__init__.cpython-311.pyc,, +bs4/__pycache__/css.cpython-311.pyc,, +bs4/__pycache__/dammit.cpython-311.pyc,, +bs4/__pycache__/diagnose.cpython-311.pyc,, +bs4/__pycache__/element.cpython-311.pyc,, +bs4/__pycache__/formatter.cpython-311.pyc,, +bs4/builder/__init__.py,sha256=nwb35ftjcwzOs2WkjVm1zvfi7FxSyJP-nN1YheIVT14,24566 +bs4/builder/__pycache__/__init__.cpython-311.pyc,, +bs4/builder/__pycache__/_html5lib.cpython-311.pyc,, +bs4/builder/__pycache__/_htmlparser.cpython-311.pyc,, +bs4/builder/__pycache__/_lxml.cpython-311.pyc,, +bs4/builder/_html5lib.py,sha256=0w-hmPM5wWR2iDuRCR6MvY6ZPXbg_hgddym-YWqj03s,19114 +bs4/builder/_htmlparser.py,sha256=_VD5Z08j6A9YYMR4y7ZTfdMzwiCBsSUQAPuHiYB-WZI,14923 +bs4/builder/_lxml.py,sha256=yKdMx1kdX7H2CopwSWEYm4Sgrfkd-WDj8HbskcaLauU,14948 +bs4/css.py,sha256=gqGaHRrKeCRF3gDqxzeU0uclOCeSsTpuW9gUaSnJeWc,10077 +bs4/dammit.py,sha256=G0cQfsEqfwJ-FIQMkXgCJwSHMn7t9vPepCrud6fZEKk,41158 +bs4/diagnose.py,sha256=uAwdDugL_67tB-BIwDIFLFbiuzGxP2wQzJJ4_bGYUrA,7195 +bs4/element.py,sha256=Dsol2iehkSjk10GzYgwFyjUEgpqmYZpyaAmbL0rWM2w,92845 +bs4/formatter.py,sha256=Bu4utAQYT9XDJaPPpTRM-dyxJDVLdxf_as-IU5gSY8A,7188 +bs4/tests/__init__.py,sha256=NydTegds_r7MoOEuQLS6TFmTA9TwK3KxJhwEkqjCGTQ,48392 +bs4/tests/__pycache__/__init__.cpython-311.pyc,, +bs4/tests/__pycache__/test_builder.cpython-311.pyc,, +bs4/tests/__pycache__/test_builder_registry.cpython-311.pyc,, +bs4/tests/__pycache__/test_css.cpython-311.pyc,, +bs4/tests/__pycache__/test_dammit.cpython-311.pyc,, +bs4/tests/__pycache__/test_docs.cpython-311.pyc,, +bs4/tests/__pycache__/test_element.cpython-311.pyc,, +bs4/tests/__pycache__/test_formatter.cpython-311.pyc,, +bs4/tests/__pycache__/test_fuzz.cpython-311.pyc,, +bs4/tests/__pycache__/test_html5lib.cpython-311.pyc,, +bs4/tests/__pycache__/test_htmlparser.cpython-311.pyc,, +bs4/tests/__pycache__/test_lxml.cpython-311.pyc,, +bs4/tests/__pycache__/test_navigablestring.cpython-311.pyc,, +bs4/tests/__pycache__/test_pageelement.cpython-311.pyc,, +bs4/tests/__pycache__/test_soup.cpython-311.pyc,, +bs4/tests/__pycache__/test_tag.cpython-311.pyc,, +bs4/tests/__pycache__/test_tree.cpython-311.pyc,, +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase,sha256=yUdXkbpNK7LVOQ0LBHMoqZ1rWaBfSXWytoO_xdSm7Ho,15 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4818336571064320.testcase,sha256=Uv_dx4a43TSfoNkjU-jHW2nSXkqHFg4XdAw7SWVObUk,23 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4999465949331456.testcase,sha256=OEyVA0Ej4FxswOElrUNt0In4s4YhrmtaxE_NHGZvGtg,30 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase,sha256=G4vpNBOz-RwMpi6ewEgNEa13zX0sXhmL7VHOyIcdKVQ,15347 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase,sha256=3d8z65o4p7Rur-RmCHoOjzqaYQ8EAtjmiBYTHNyAdl4,19469 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase,sha256=NfGIlit1k40Ip3mlnBkYOkIDJX6gHtjlErwl7gsBjAQ,12 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase,sha256=xy4i1U0nhFHcnyc5pRKS6JRMvuoCNUur-Scor6UxIGw,4317 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase,sha256=Q-UTYpQBUsWoMgIUspUlzveSI-41s4ABC3jajRb-K0o,11502 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase,sha256=2bq3S8KxZgk8EajLReHD8m4_0Lj_nrkyJAxB_z_U0D0,5 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5843991618256896.testcase,sha256=MZDu31LPLfgu6jP9IZkrlwNes3f_sL8WFP5BChkUKdY,35 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5984173902397440.testcase,sha256=w58r-s6besG5JwPXpnz37W2YTj9-_qxFbk6hiEnKeIQ,51495 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6124268085182464.testcase,sha256=q8rkdMECEXKcqVhOf5zWHkSBTQeOPt0JiLg2TZiPCuk,10380 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6241471367348224.testcase,sha256=QfzoOxKwNuqG-4xIrea6MOQLXhfAAOQJ0r9u-J6kSNs,19 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6306874195312640.testcase,sha256=MJ2pHFuuCQUiQz1Kor2sof7LWeRERQ6QK43YNqQHg9o,47 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase,sha256=EItOpSdeD4ewK-qgJ9vtxennwn_huguzXgctrUT7fqE,3546 +bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase,sha256=a2aJTG4FceGSJXsjtxoS8S4jk_8rZsS3aznLkeO2_dY,124 +bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase,sha256=jRFRtCKlP3-3EDLc_iVRTcE6JNymv0rYcVM6qRaPrxI,2607 +bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase,sha256=7NsdCiXWAhNkmoW1pvF7rbZExyLAQIWtDtSHXIsH6YU,103 +bs4/tests/test_builder.py,sha256=nc2JE5EMrEf-p24qhf2R8qAV5PpFiOuNpYCmtmCjlTI,1115 +bs4/tests/test_builder_registry.py,sha256=7WLj2prjSHGphebnrjQuI6JYr03Uy_c9_CkaFSQ9HRo,5114 +bs4/tests/test_css.py,sha256=jCcgIWem3lyPa5AjhAk9S6fWI07hk1rg0v8coD7bEtI,17279 +bs4/tests/test_dammit.py,sha256=MbSmRN6VEP0Rm56-w6Ja0TW8eC-8ZxOJ-wXWVf_hRi8,15451 +bs4/tests/test_docs.py,sha256=xoAxnUfoQ7aRqGImwW_9BJDU8WNMZHIuvWqVepvWXt8,1127 +bs4/tests/test_element.py,sha256=92oRSRoGk8gIXAbAGHErKzocx2MK32TqcQdUJ-dGQMo,2377 +bs4/tests/test_formatter.py,sha256=eTzj91Lmhv90z-WiHjK3sBJZm0hRk0crFY1TZaXstCY,4148 +bs4/tests/test_fuzz.py,sha256=_K2utiYVkZ22mvh03g8CBioFU1QDJaff1vTaDyXhxNk,6972 +bs4/tests/test_html5lib.py,sha256=2-ipm-_MaPt37WTxEd5DodUTNhS4EbLFKPRaO6XSCW4,8322 +bs4/tests/test_htmlparser.py,sha256=wnngcIlzjEwH21JFfu_mgt6JdpLt0ncJfLcGT7HeGw0,6256 +bs4/tests/test_lxml.py,sha256=nQCmLt7bWk0id7xMumZw--PzEe1xF9PTQn3lvHyNC6I,7635 +bs4/tests/test_navigablestring.py,sha256=RGSgziNf7cZnYdEPsoqL1B2I68TUJp1JmEQVxbh_ryA,5081 +bs4/tests/test_pageelement.py,sha256=VdGjUxx3RhjqmNsJ92ao6VZC_YD7T8mdLkDZjosOYeE,14274 +bs4/tests/test_soup.py,sha256=JmnAPLE1_GXm0wmwEUN7icdvBz9HDch-qoU2mT_TDrs,19877 +bs4/tests/test_tag.py,sha256=FBPDUisDCbFmvl5HmTtN49CGo3YoUXh5Wiuw5FMLS5E,9616 +bs4/tests/test_tree.py,sha256=n9nTQOzJb3-ZnZ6AkmMdZQ5TYcTUPnqHoVgal0mYXfg,48129 diff --git a/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/WHEEL b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/WHEEL new file mode 100644 index 0000000..2860816 --- /dev/null +++ b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.21.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/AUTHORS b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/AUTHORS new file mode 100644 index 0000000..1f14fe0 --- /dev/null +++ b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/AUTHORS @@ -0,0 +1,49 @@ +Behold, mortal, the origins of Beautiful Soup... +================================================ + +Leonard Richardson is the primary maintainer. + +Aaron DeVore and Isaac Muse have made significant contributions to the +code base. + +Mark Pilgrim provided the encoding detection code that forms the base +of UnicodeDammit. + +Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful +Soup 4 working under Python 3. + +Simon Willison wrote soupselect, which was used to make Beautiful Soup +support CSS selectors. Isaac Muse wrote SoupSieve, which made it +possible to _remove_ the CSS selector code from Beautiful Soup. + +Sam Ruby helped with a lot of edge cases. + +Jonathan Ellis was awarded the prestigious Beau Potage D'Or for his +work in solving the nestable tags conundrum. + +An incomplete list of people have contributed patches to Beautiful +Soup: + + Istvan Albert, Andrew Lin, Anthony Baxter, Oliver Beattie, Andrew +Boyko, Tony Chang, Francisco Canas, "Delong", Zephyr Fang, Fuzzy, +Roman Gaufman, Yoni Gilad, Richie Hindle, Toshihiro Kamiya, Peteris +Krumins, Kent Johnson, Marek Kapolka, Andreas Kostyrka, Roel Kramer, +Ben Last, Robert Leftwich, Stefaan Lippens, "liquider", Staffan +Malmgren, Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon", +Ed Oskiewicz, Martijn Peters, Greg Phillips, Giles Radford, Stefano +Revera, Arthur Rudolph, Marko Samastur, James Salter, Jouni Seppnen, +Alexander Schmolck, Tim Shirley, Geoffrey Sneddon, Ville Skytt, +"Vikas", Jens Svalgaard, Andy Theyers, Eric Weiser, Glyn Webster, John +Wiseman, Paul Wright, Danny Yoo + +An incomplete list of people who made suggestions or found bugs or +found ways to break Beautiful Soup: + + Hanno Bck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel, + Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes, + Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams, + warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison, + Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed + Summers, Dennis Sutch, Chris Smith, Aaron Swartz, Stuart + Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de + Sousa Rocha, Yichun Wei, Per Vognsen diff --git a/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/LICENSE b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/LICENSE new file mode 100644 index 0000000..08e3a9c --- /dev/null +++ b/venv/Lib/site-packages/beautifulsoup4-4.12.3.dist-info/licenses/LICENSE @@ -0,0 +1,31 @@ +Beautiful Soup is made available under the MIT license: + + Copyright (c) Leonard Richardson + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + +Beautiful Soup incorporates code from the html5lib library, which is +also made available under the MIT license. Copyright (c) James Graham +and other contributors + +Beautiful Soup has an optional dependency on the soupsieve library, +which is also made available under the MIT license. Copyright (c) +Isaac Muse diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/DESCRIPTION.rst b/venv/Lib/site-packages/behave-1.2.6.dist-info/DESCRIPTION.rst new file mode 100644 index 0000000..ece0948 --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/DESCRIPTION.rst @@ -0,0 +1,120 @@ +.. image:: https://img.shields.io/travis/behave/behave/master.svg + :target: https://travis-ci.org/behave/behave + :alt: Travis CI Build Status + +.. image:: https://readthedocs.org/projects/behave/badge/?version=latest + :target: http://behave.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +.. image:: https://img.shields.io/pypi/v/behave.svg + :target: https://pypi.python.org/pypi/behave + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/dm/behave.svg + :target: https://pypi.python.org/pypi/behave + :alt: Downloads + +.. image:: https://img.shields.io/pypi/l/behave.svg + :target: https://pypi.python.org/pypi/behave/ + :alt: License + +.. image:: https://badges.gitter.im/Join%20Chat.svg + :alt: Join the chat at https://gitter.im/behave/behave + :target: https://gitter.im/behave/behave?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + + +.. |logo| image:: https://raw.github.com/behave/behave/master/docs/_static/behave_logo1.png + +behave is behavior-driven development, Python style. + +|logo| + +Behavior-driven development (or BDD) is an agile software development +technique that encourages collaboration between developers, QA and +non-technical or business participants in a software project. + +*behave* uses tests written in a natural language style, backed up by Python +code. + +First, `install *behave*.`_ + + +Now make a directory called "features/". +In that directory create a file called "example.feature" containing: + +.. code-block:: gherkin + + # -- FILE: features/example.feature + Feature: Showing off behave + + Scenario: Run a simple test + Given we have behave installed + When we implement 5 tests + Then behave will test them for us! + +Make a new directory called "features/steps/". +In that directory create a file called "example_steps.py" containing: + +.. code-block:: python + + # -- FILE: features/steps/example_steps.py + from behave import given, when, then, step + + @given('we have behave installed') + def step_impl(context): + pass + + @when('we implement {number:d} tests') + def step_impl(context, number): # -- NOTE: number is converted into integer + assert number > 1 or number == 0 + context.tests_count = number + + @then('behave will test them for us!') + def step_impl(context): + assert context.failed is False + assert context.tests_count >= 0 + +Run behave: + +.. code-block:: bash + + $ behave + Feature: Showing off behave # features/example.feature:2 + + Scenario: Run a simple test # features/example.feature:4 + Given we have behave installed # features/steps/example_steps.py:4 + When we implement 5 tests # features/steps/example_steps.py:8 + Then behave will test them for us! # features/steps/example_steps.py:13 + + 1 feature passed, 0 failed, 0 skipped + 1 scenario passed, 0 failed, 0 skipped + 3 steps passed, 0 failed, 0 skipped, 0 undefined + +Now, continue reading to learn how to get the most out of *behave*. To get started, +we recommend the `tutorial`_ and then the `feature testing language`_ and +`api`_ references. + + +.. _`Install *behave*.`: http://pythonhosted.org/behave/install.html +.. _`tutorial`: http://pythonhosted.org/behave/tutorial.html#features +.. _`feature testing language`: http://pythonhosted.org/behave/gherkin.html +.. _`api`: http://pythonhosted.org/behave/api.html + + +More Information +------------------------------------------------------------------------------- + +* `behave documentation`_: `latest edition`_, `stable edition`_, `PDF`_ +* `behave.example`_: Behave Examples and Tutorials (docs, executable examples). +* `changelog`_ (latest changes) + + +.. _behave documentation: http://behave.readthedocs.io/ +.. _changelog: https://github.com/behave/behave/blob/master/CHANGES.rst +.. _behave.example: https://github.com/behave/behave.example + +.. _`latest edition`: http://behave.readthedocs.io/en/latest/ +.. _`stable edition`: http://behave.readthedocs.io/en/stable/ +.. _PDF: https://media.readthedocs.org/pdf/behave/latest/behave.pdf + + diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/INSTALLER b/venv/Lib/site-packages/behave-1.2.6.dist-info/INSTALLER new file mode 100644 index 0000000..a1b589e --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/METADATA b/venv/Lib/site-packages/behave-1.2.6.dist-info/METADATA new file mode 100644 index 0000000..2cf3a23 --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/METADATA @@ -0,0 +1,173 @@ +Metadata-Version: 2.0 +Name: behave +Version: 1.2.6 +Summary: behave is behaviour-driven development, Python style +Home-page: http://github.com/behave/behave +Author: Jens Engel, Benno Rice and Richard Jones +Author-email: behave-users@googlegroups.com +License: BSD +Description-Content-Type: UNKNOWN +Platform: UNKNOWN +Classifier: Development Status :: 4 - Beta +Classifier: Environment :: Console +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.6 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: Jython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Software Development :: Testing +Classifier: License :: OSI Approved :: BSD License +Provides: behave +Provides: setuptools_behave +Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.* +Requires-Dist: parse (>=1.8.2) +Requires-Dist: parse-type (>=0.4.2) +Requires-Dist: six (>=1.11) +Requires-Dist: argparse; python_version < "2.7" +Requires-Dist: importlib; python_version < "2.7" +Requires-Dist: ordereddict; python_version < "2.7" +Requires-Dist: traceback2; python_version < "3.0" +Requires-Dist: enum34; python_version < "3.4" +Provides-Extra: develop +Requires-Dist: coverage; extra == 'develop' +Requires-Dist: pytest (>=3.0); extra == 'develop' +Requires-Dist: pytest-cov; extra == 'develop' +Requires-Dist: tox; extra == 'develop' +Requires-Dist: invoke (>=0.21.0); extra == 'develop' +Requires-Dist: path.py (>=8.1.2); extra == 'develop' +Requires-Dist: pycmd; extra == 'develop' +Requires-Dist: pathlib; extra == 'develop' +Requires-Dist: modernize (>=0.5); extra == 'develop' +Requires-Dist: pylint; extra == 'develop' +Provides-Extra: docs +Requires-Dist: sphinx (>=1.6); extra == 'docs' +Requires-Dist: sphinx-bootstrap-theme (>=0.6); extra == 'docs' + +.. image:: https://img.shields.io/travis/behave/behave/master.svg + :target: https://travis-ci.org/behave/behave + :alt: Travis CI Build Status + +.. image:: https://readthedocs.org/projects/behave/badge/?version=latest + :target: http://behave.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +.. image:: https://img.shields.io/pypi/v/behave.svg + :target: https://pypi.python.org/pypi/behave + :alt: Latest Version + +.. image:: https://img.shields.io/pypi/dm/behave.svg + :target: https://pypi.python.org/pypi/behave + :alt: Downloads + +.. image:: https://img.shields.io/pypi/l/behave.svg + :target: https://pypi.python.org/pypi/behave/ + :alt: License + +.. image:: https://badges.gitter.im/Join%20Chat.svg + :alt: Join the chat at https://gitter.im/behave/behave + :target: https://gitter.im/behave/behave?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge + + +.. |logo| image:: https://raw.github.com/behave/behave/master/docs/_static/behave_logo1.png + +behave is behavior-driven development, Python style. + +|logo| + +Behavior-driven development (or BDD) is an agile software development +technique that encourages collaboration between developers, QA and +non-technical or business participants in a software project. + +*behave* uses tests written in a natural language style, backed up by Python +code. + +First, `install *behave*.`_ + + +Now make a directory called "features/". +In that directory create a file called "example.feature" containing: + +.. code-block:: gherkin + + # -- FILE: features/example.feature + Feature: Showing off behave + + Scenario: Run a simple test + Given we have behave installed + When we implement 5 tests + Then behave will test them for us! + +Make a new directory called "features/steps/". +In that directory create a file called "example_steps.py" containing: + +.. code-block:: python + + # -- FILE: features/steps/example_steps.py + from behave import given, when, then, step + + @given('we have behave installed') + def step_impl(context): + pass + + @when('we implement {number:d} tests') + def step_impl(context, number): # -- NOTE: number is converted into integer + assert number > 1 or number == 0 + context.tests_count = number + + @then('behave will test them for us!') + def step_impl(context): + assert context.failed is False + assert context.tests_count >= 0 + +Run behave: + +.. code-block:: bash + + $ behave + Feature: Showing off behave # features/example.feature:2 + + Scenario: Run a simple test # features/example.feature:4 + Given we have behave installed # features/steps/example_steps.py:4 + When we implement 5 tests # features/steps/example_steps.py:8 + Then behave will test them for us! # features/steps/example_steps.py:13 + + 1 feature passed, 0 failed, 0 skipped + 1 scenario passed, 0 failed, 0 skipped + 3 steps passed, 0 failed, 0 skipped, 0 undefined + +Now, continue reading to learn how to get the most out of *behave*. To get started, +we recommend the `tutorial`_ and then the `feature testing language`_ and +`api`_ references. + + +.. _`Install *behave*.`: http://pythonhosted.org/behave/install.html +.. _`tutorial`: http://pythonhosted.org/behave/tutorial.html#features +.. _`feature testing language`: http://pythonhosted.org/behave/gherkin.html +.. _`api`: http://pythonhosted.org/behave/api.html + + +More Information +------------------------------------------------------------------------------- + +* `behave documentation`_: `latest edition`_, `stable edition`_, `PDF`_ +* `behave.example`_: Behave Examples and Tutorials (docs, executable examples). +* `changelog`_ (latest changes) + + +.. _behave documentation: http://behave.readthedocs.io/ +.. _changelog: https://github.com/behave/behave/blob/master/CHANGES.rst +.. _behave.example: https://github.com/behave/behave.example + +.. _`latest edition`: http://behave.readthedocs.io/en/latest/ +.. _`stable edition`: http://behave.readthedocs.io/en/stable/ +.. _PDF: https://media.readthedocs.org/pdf/behave/latest/behave.pdf + + diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/RECORD b/venv/Lib/site-packages/behave-1.2.6.dist-info/RECORD new file mode 100644 index 0000000..e922ed6 --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/RECORD @@ -0,0 +1,114 @@ +../../Scripts/behave.exe,sha256=GzG-rMsP6cATEvd4t0Zjq8Wv7m9z7YlPLfR1fF_sVrM,108402 +__pycache__/setuptools_behave.cpython-311.pyc,, +behave-1.2.6.dist-info/DESCRIPTION.rst,sha256=NEym6QFwio6BqzPKf4KIMN58bm5SFClfORbuUD709o4,4117 +behave-1.2.6.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +behave-1.2.6.dist-info/METADATA,sha256=ekz_7DCEl9p9Z-rRuiXzmx6-UelQpI0Uti2tli0EC80,6365 +behave-1.2.6.dist-info/RECORD,, +behave-1.2.6.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 +behave-1.2.6.dist-info/entry_points.txt,sha256=QfqCy19OVJPBkL-qJ8d2gmVmtpm3KFY7T8uyu-EALA8,115 +behave-1.2.6.dist-info/metadata.json,sha256=1Qru5IZdWcbZd2p3CN0TVoDarN4pIXFI6vNtAMW6J1s,2304 +behave-1.2.6.dist-info/top_level.txt,sha256=pqVl-BPsxqPsqLMfVIxmYktrPC5f5Tz8GzEDxn5KwQE,25 +behave-1.2.6.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +behave/__init__.py,sha256=oEM7dUg9DxIOCvJQjfG6Ws_uZVDHUSuLEUVeO-xDhYw,1048 +behave/__main__.py,sha256=_3OAlikNWqtbVJ6QBKZ2SrinhPdy1vt4bF39bxNkBpc,6320 +behave/__pycache__/__init__.cpython-311.pyc,, +behave/__pycache__/__main__.cpython-311.pyc,, +behave/__pycache__/_stepimport.cpython-311.pyc,, +behave/__pycache__/_types.cpython-311.pyc,, +behave/__pycache__/capture.cpython-311.pyc,, +behave/__pycache__/configuration.cpython-311.pyc,, +behave/__pycache__/fixture.cpython-311.pyc,, +behave/__pycache__/i18n.cpython-311.pyc,, +behave/__pycache__/importer.cpython-311.pyc,, +behave/__pycache__/json_parser.cpython-311.pyc,, +behave/__pycache__/log_capture.cpython-311.pyc,, +behave/__pycache__/matchers.cpython-311.pyc,, +behave/__pycache__/model.cpython-311.pyc,, +behave/__pycache__/model_core.cpython-311.pyc,, +behave/__pycache__/model_describe.cpython-311.pyc,, +behave/__pycache__/parser.cpython-311.pyc,, +behave/__pycache__/runner.cpython-311.pyc,, +behave/__pycache__/runner_util.cpython-311.pyc,, +behave/__pycache__/step_registry.cpython-311.pyc,, +behave/__pycache__/tag_expression.cpython-311.pyc,, +behave/__pycache__/tag_matcher.cpython-311.pyc,, +behave/__pycache__/textutil.cpython-311.pyc,, +behave/__pycache__/userdata.cpython-311.pyc,, +behave/_stepimport.py,sha256=4Ix9ry-YLx_0YhNOm7Wa3hGuTop-AnZa3u7KzjOFWJM,7381 +behave/_types.py,sha256=454jWTmTYpYcDx0lCiUXfuU7aJbMkA99voDzgpyeMkk,4710 +behave/api/__init__.py,sha256=HhvEEiygIfS6u06E25QpqcoGp6fpJsiuKDVbIEZ-qQQ,147 +behave/api/__pycache__/__init__.cpython-311.pyc,, +behave/api/__pycache__/async_step.cpython-311.pyc,, +behave/api/async_step.py,sha256=3sqokJk2eTANFhkoYwjvcSS3kt4_dS363OXdTRtmH2g,10911 +behave/capture.py,sha256=IGaadThUgnse2Pi7Ce131aXlC3awWcURNP1eimlsmMU,7846 +behave/compat/__init__.py,sha256=nCZjzP6-vYhVwr8wc8rHVYAEGnx64QYy9ujxm0b1e2Y,126 +behave/compat/__pycache__/__init__.cpython-311.pyc,, +behave/compat/__pycache__/collections.cpython-311.pyc,, +behave/compat/collections.py,sha256=5tc3cfjxoiFgFF6oCMQBLq6twEbjtZP0d2dedhsxbcI,647 +behave/configuration.py,sha256=lKci9ti2R9oQnG5Ka3SDwTGEyAqAqd4476B3FEHUIkA,30884 +behave/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +behave/contrib/__pycache__/__init__.cpython-311.pyc,, +behave/contrib/__pycache__/formatter_missing_steps.cpython-311.pyc,, +behave/contrib/__pycache__/scenario_autoretry.cpython-311.pyc,, +behave/contrib/__pycache__/substep_dirs.cpython-311.pyc,, +behave/contrib/formatter_missing_steps.py,sha256=I2SmWz84_k1VSo8zVU5EvosQE7ANqmuTQy1vNMjTXJs,2689 +behave/contrib/scenario_autoretry.py,sha256=VM0XGFPbi_NPg2qUa2jp7LPD-VofYe584WKd-Q-Pf2Y,2747 +behave/contrib/substep_dirs.py,sha256=gYIgRz04XZKI7O7lIp6sP1RPj6W6aQl3y7npYJZlsKI,423 +behave/fixture.py,sha256=0GJsJZUYL40hQhONr81x1_LUw-903RgRGxV8UHJV9e4,15399 +behave/formatter/__init__.py,sha256=rAVnExIQ5dwy_h1zqQ11XglE3M4Xt0h-uPgg6Tke5uo,355 +behave/formatter/__pycache__/__init__.cpython-311.pyc,, +behave/formatter/__pycache__/_builtins.cpython-311.pyc,, +behave/formatter/__pycache__/_registry.cpython-311.pyc,, +behave/formatter/__pycache__/ansi_escapes.cpython-311.pyc,, +behave/formatter/__pycache__/base.cpython-311.pyc,, +behave/formatter/__pycache__/formatters.cpython-311.pyc,, +behave/formatter/__pycache__/json.cpython-311.pyc,, +behave/formatter/__pycache__/null.cpython-311.pyc,, +behave/formatter/__pycache__/plain.cpython-311.pyc,, +behave/formatter/__pycache__/pretty.cpython-311.pyc,, +behave/formatter/__pycache__/progress.cpython-311.pyc,, +behave/formatter/__pycache__/rerun.cpython-311.pyc,, +behave/formatter/__pycache__/sphinx_steps.cpython-311.pyc,, +behave/formatter/__pycache__/sphinx_util.cpython-311.pyc,, +behave/formatter/__pycache__/steps.cpython-311.pyc,, +behave/formatter/__pycache__/tags.cpython-311.pyc,, +behave/formatter/_builtins.py,sha256=gQqrVjVtfi4WXaktteK1KRC2se_sP3FSDo58yAT8mB8,1799 +behave/formatter/_registry.py,sha256=J9wW-b8IaXy84TqWUJMp-Uo_CxcuB23UvVfxuC0j6U4,5085 +behave/formatter/ansi_escapes.py,sha256=XyEtjnE011IfHMuC5kdUA0_EJcFgPTTOC2rggkaBsRE,2870 +behave/formatter/base.py,sha256=DFsCETuD17L6AO8zR3tod9bz4V_agVH9j6G6M7tfPfc,6258 +behave/formatter/formatters.py,sha256=NFq5oHSjhkqe563yXnhU8dxnoM3n7Y-ihL5yKiWcZJI,2015 +behave/formatter/json.py,sha256=8eYnBgqvr0V2jq0Qjx9H-YaIQv1nCu7jF2LdNVppm-o,8434 +behave/formatter/null.py,sha256=aK6qxPGqmaLLEYB7kmVvW6edE88ZOtLOtjBfpBwr-A0,374 +behave/formatter/plain.py,sha256=6OjdAnS4U3I8jfeU3NDjjyUIACgZib3AQ6_YUlbcMnE,5466 +behave/formatter/pretty.py,sha256=omCNrsR2sS6wS0rgqPQQRqHYplGrouhG3BmVLyonSmE,12342 +behave/formatter/progress.py,sha256=0zdB_pb241v7isxNHHoinNARADMU8nrOmacT48-gDNA,10301 +behave/formatter/rerun.py,sha256=A8qAlcoa0xJpxD4E-kK6eToGs5NyBsico4nuqnQr0d0,4085 +behave/formatter/sphinx_steps.py,sha256=fc9wcvxx2I62QEFD4RP2_0LaH7g-v_gS9iM8uBZ3RNM,13706 +behave/formatter/sphinx_util.py,sha256=jVyge6YVLLibh69DsmgkITyZkFWj_fDvExzlP4fzG5A,4396 +behave/formatter/steps.py,sha256=d4j2nvlcWKSAdgyaL1-EYMypcIjichepSC74Boh8uz0,18824 +behave/formatter/tags.py,sha256=OtX_QZK2H9TWKCHc3423hER0u8bEicPrECRYVXkeU7Y,6002 +behave/i18n.py,sha256=VkobSjgfukG-8PD5NxYqRIWBRSaE2zLw6lWJ7i0qzHg,28025 +behave/importer.py,sha256=ovAv8E831XJRorDepipRqv2pnaVs0UeLqUzKhJ9BRZU,3392 +behave/json_parser.py,sha256=KrI8bcCNzwO1s7bogzW2DJ1-isG76Rt0UyfCJQLQXGE,9352 +behave/log_capture.py,sha256=KWXFU93sk4iHKtjuHGkOOiVvb5O-a7Lgd65c8DEvW-g,7544 +behave/matchers.py,sha256=yWoUwfpdznpCE65RGnBaTZLNLWPW_Yyc7I6Agmx4hEY,14300 +behave/model.py,sha256=zR-4zV4T13SNPcZj7ytopYz1UU7XGV81LeyHzd8D7eY,61610 +behave/model_core.py,sha256=7evGP2PeRPZwu-6Ur1HijYsUnvM_21PXAojGJSpm3V8,13604 +behave/model_describe.py,sha256=FuZbc0bVoZgUjYRR1_m3-Ltfx3T4YvLtOJqVJ1-HSaA,3539 +behave/parser.py,sha256=b8opRKpFez6juyZWKnizmlsBV5umGspDQmcpaB6VuTQ,21838 +behave/reporter/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +behave/reporter/__pycache__/__init__.cpython-311.pyc,, +behave/reporter/__pycache__/base.cpython-311.pyc,, +behave/reporter/__pycache__/junit.cpython-311.pyc,, +behave/reporter/__pycache__/summary.cpython-311.pyc,, +behave/reporter/base.py,sha256=JyttpJ4bcRlHwVh0oUTWnEnVUTGGfwNhmLgeA1lS7EE,1444 +behave/reporter/junit.py,sha256=5aNrvQskPC4dTIsPvPH6JaPxCZw9w4-1_3ZfWfKfjEY,18144 +behave/reporter/summary.py,sha256=aFCIKCNXwWFjVDiNrNk3UOMe1y1ARhww1-0Mvqq0vP4,3697 +behave/runner.py,sha256=Ws-U0x9i_f-AqZXl38_AIu6v1uQfwrnMd2SYdn3JMZM,30396 +behave/runner_util.py,sha256=GzU9JMBVRfXVInnaCk0a5krtYk_DcOi006_zarzriVw,18124 +behave/step_registry.py,sha256=8UYkCN545BFqxvy7EWDu3iUkBKDe5gx1uOe1pxzKouc,4007 +behave/tag_expression.py,sha256=Ehwm29oo1YqVgriYvQmi00Bsd79TyQ-TjgQPo8kZJjI,3564 +behave/tag_matcher.py,sha256=jIdio2DzsjrplG8Gb5TNcqIJ8hU-i60b-2MTbshjzB4,17298 +behave/textutil.py,sha256=7mWv4ou_gFoX1Ao13WAG87mltHLkbgLXLoNx8yzBQ80,5678 +behave/userdata.py,sha256=-WjGDTWHWvpF2QIujH6Cb8PAom_tQR1qJR-WXNi3JjI,7706 +setuptools_behave.py,sha256=vgQf6yP0e7L0zoUoj7csKq77ZNyjwL7fb0uPvMK_FmE,4832 diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/WHEEL b/venv/Lib/site-packages/behave-1.2.6.dist-info/WHEEL new file mode 100644 index 0000000..7332a41 --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.30.0) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/entry_points.txt b/venv/Lib/site-packages/behave-1.2.6.dist-info/entry_points.txt new file mode 100644 index 0000000..ba4dcad --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/entry_points.txt @@ -0,0 +1,6 @@ +[console_scripts] +behave = behave.__main__:main + +[distutils.commands] +behave_test = setuptools_behave:behave_test + diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/metadata.json b/venv/Lib/site-packages/behave-1.2.6.dist-info/metadata.json new file mode 100644 index 0000000..660857a --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/metadata.json @@ -0,0 +1 @@ +{"classifiers": ["Development Status :: 4 - Beta", "Environment :: Console", "Intended Audience :: Developers", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: Jython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: Software Development :: Testing", "License :: OSI Approved :: BSD License"], "description_content_type": "UNKNOWN", "extensions": {"python.commands": {"wrap_console": {"behave": "behave.__main__:main"}}, "python.details": {"contacts": [{"email": "behave-users@googlegroups.com", "name": "Jens Engel, Benno Rice and Richard Jones", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://github.com/behave/behave"}}, "python.exports": {"console_scripts": {"behave": "behave.__main__:main"}, "distutils.commands": {"behave_test": "setuptools_behave:behave_test"}}}, "extras": ["develop", "docs"], "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "behave", "provides": "behave", "requires_python": ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*", "run_requires": [{"extra": "develop", "requires": ["coverage", "invoke (>=0.21.0)", "modernize (>=0.5)", "path.py (>=8.1.2)", "pathlib", "pycmd", "pylint", "pytest (>=3.0)", "pytest-cov", "tox"]}, {"requires": ["parse (>=1.8.2)", "parse-type (>=0.4.2)", "six (>=1.11)"]}, {"extra": "docs", "requires": ["sphinx (>=1.6)", "sphinx-bootstrap-theme (>=0.6)"]}, {"environment": "python_version < \"2.7\"", "requires": ["argparse", "importlib", "ordereddict"]}, {"environment": "python_version < \"3.0\"", "requires": ["traceback2"]}, {"environment": "python_version < \"3.4\"", "requires": ["enum34"]}], "summary": "behave is behaviour-driven development, Python style", "test_requires": [{"requires": ["PyHamcrest (>=1.8)", "mock (>=1.1)", "nose (>=1.3)", "path.py (>=10.1)", "pytest (>=3.0)"]}], "version": "1.2.6"} \ No newline at end of file diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/top_level.txt b/venv/Lib/site-packages/behave-1.2.6.dist-info/top_level.txt new file mode 100644 index 0000000..e68ebff --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/top_level.txt @@ -0,0 +1,2 @@ +behave +setuptools_behave diff --git a/venv/Lib/site-packages/behave-1.2.6.dist-info/zip-safe b/venv/Lib/site-packages/behave-1.2.6.dist-info/zip-safe new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/venv/Lib/site-packages/behave-1.2.6.dist-info/zip-safe @@ -0,0 +1 @@ + diff --git a/venv/Lib/site-packages/behave/__init__.py b/venv/Lib/site-packages/behave/__init__.py new file mode 100644 index 0000000..341dcab --- /dev/null +++ b/venv/Lib/site-packages/behave/__init__.py @@ -0,0 +1,32 @@ +# -*- coding: UTF-8 -*- +"""behave is behaviour-driven development, Python style + +Behavior-driven development (or BDD) is an agile software development +technique that encourages collaboration between developers, QA and +non-technical or business participants in a software project. + +*behave* uses tests written in a natural language style, backed up by Python +code. + +To get started, we recommend the `tutorial`_ and then the `test language`_ and +`api`_ references. + +.. _`tutorial`: tutorial.html +.. _`test language`: gherkin.html +.. _`api`: api.html +""" + +from __future__ import absolute_import +from behave.step_registry import * # pylint: disable=wildcard-import +from behave.matchers import use_step_matcher, step_matcher, register_type +from behave.fixture import fixture, use_fixture + +# pylint: disable=undefined-all-variable +__all__ = [ + "given", "when", "then", "step", "use_step_matcher", "register_type", + "Given", "When", "Then", "Step", + "fixture", "use_fixture", + # -- DEPRECATING: + "step_matcher" +] +__version__ = "1.2.6" diff --git a/venv/Lib/site-packages/behave/__main__.py b/venv/Lib/site-packages/behave/__main__.py new file mode 100644 index 0000000..dae0dcb --- /dev/null +++ b/venv/Lib/site-packages/behave/__main__.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, print_function +import codecs +import sys +import six +from behave import __version__ +from behave.configuration import Configuration, ConfigError +from behave.parser import ParserError +from behave.runner import Runner +from behave.runner_util import print_undefined_step_snippets, reset_runtime, \ + InvalidFileLocationError, InvalidFilenameError, FileNotFoundError +from behave.textutil import compute_words_maxsize, text as _text + + +TAG_HELP = """ +Scenarios inherit tags declared on the Feature level. The simplest +TAG_EXPRESSION is simply a tag:: + + --tags @dev + +You may even leave off the "@" - behave doesn't mind. + +When a tag in a tag expression starts with a ~, this represents boolean NOT:: + + --tags ~@dev + +A tag expression can have several tags separated by a comma, which represents +logical OR:: + + --tags @dev,@wip + +The --tags option can be specified several times, and this represents logical +AND, for instance this represents the boolean expression +"(@foo or not @bar) and @zap":: + + --tags @foo,~@bar --tags @zap. + +Beware that if you want to use several negative tags to exclude several tags +you have to use logical AND:: + + --tags ~@fixme --tags ~@buggy. +""".strip() + +# TODO +# Positive tags can be given a threshold to limit the number of occurrences. +# Which can be practical if you are practicing Kanban or CONWIP. This will fail +# if there are more than 3 occurrences of the @qa tag: +# +# --tags @qa:3 +# """.strip() + + +def run_behave(config, runner_class=None): + """Run behave with configuration. + + :param config: Configuration object for behave. + :param runner_class: Runner class to use or none (use Runner class). + :return: 0, if successful. Non-zero on failure. + + .. note:: BEST EFFORT, not intended for multi-threaded usage. + """ + # pylint: disable=too-many-branches, too-many-statements, too-many-return-statements + if runner_class is None: + runner_class = Runner + + if config.version: + print("behave " + __version__) + return 0 + + if config.tags_help: + print(TAG_HELP) + return 0 + + if config.lang_list: + from behave.i18n import languages + stdout = sys.stdout + if six.PY2: + # -- PYTHON2: Overcome implicit encode problems (encoding=ASCII). + stdout = codecs.getwriter("UTF-8")(sys.stdout) + iso_codes = languages.keys() + print("Languages available:") + for iso_code in sorted(iso_codes): + native = languages[iso_code]["native"][0] + name = languages[iso_code]["name"][0] + print(u"%s: %s / %s" % (iso_code, native, name), file=stdout) + return 0 + + if config.lang_help: + from behave.i18n import languages + if config.lang_help not in languages: + print("%s is not a recognised language: try --lang-list" % \ + config.lang_help) + return 1 + trans = languages[config.lang_help] + print(u"Translations for %s / %s" % (trans["name"][0], + trans["native"][0])) + for kw in trans: + if kw in "name native".split(): + continue + print(u"%16s: %s" % (kw.title().replace("_", " "), + u", ".join(w for w in trans[kw] if w != "*"))) + return 0 + + if not config.format: + config.format = [config.default_format] + elif config.format and "format" in config.defaults: + # -- CASE: Formatter are specified in behave configuration file. + # Check if formatter are provided on command-line, too. + if len(config.format) == len(config.defaults["format"]): + # -- NO FORMATTER on command-line: Add default formatter. + config.format.append(config.default_format) + if "help" in config.format: + print_formatters("Available formatters:") + return 0 + + if len(config.outputs) > len(config.format): + print("CONFIG-ERROR: More outfiles (%d) than formatters (%d)." % \ + (len(config.outputs), len(config.format))) + return 1 + + # -- MAIN PART: + failed = True + try: + reset_runtime() + runner = runner_class(config) + failed = runner.run() + except ParserError as e: + print(u"ParserError: %s" % e) + except ConfigError as e: + print(u"ConfigError: %s" % e) + except FileNotFoundError as e: + print(u"FileNotFoundError: %s" % e) + except InvalidFileLocationError as e: + print(u"InvalidFileLocationError: %s" % e) + except InvalidFilenameError as e: + print(u"InvalidFilenameError: %s" % e) + except Exception as e: + # -- DIAGNOSTICS: + text = _text(e) + print(u"Exception %s: %s" % (e.__class__.__name__, text)) + raise + + if config.show_snippets and runner.undefined_steps: + print_undefined_step_snippets(runner.undefined_steps, + colored=config.color) + + return_code = 0 + if failed: + return_code = 1 + return return_code + +def print_formatters(title=None, stream=None): + """Prints the list of available formatters and their description. + + :param title: Optional title (as string). + :param stream: Optional, output stream to use (default: sys.stdout). + """ + from behave.formatter._registry import format_items + from operator import itemgetter + + if stream is None: + stream = sys.stdout + if title: + stream.write(u"%s\n" % title) + + format_items = sorted(format_items(resolved=True), key=itemgetter(0)) + format_names = [item[0] for item in format_items] + column_size = compute_words_maxsize(format_names) + schema = u" %-"+ _text(column_size) +"s %s\n" + for name, formatter_class in format_items: + formatter_description = getattr(formatter_class, "description", "") + stream.write(schema % (name, formatter_description)) + + +def main(args=None): + """Main function to run behave (as program). + + :param args: Command-line args (or string) to use. + :return: 0, if successful. Non-zero, in case of errors/failures. + """ + config = Configuration(args) + return run_behave(config) + +if __name__ == "__main__": + # -- EXAMPLE: main("--version") + sys.exit(main()) diff --git a/venv/Lib/site-packages/behave/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..b731225 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/__main__.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/__main__.cpython-311.pyc new file mode 100644 index 0000000..35f3b89 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/__main__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/_stepimport.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/_stepimport.cpython-311.pyc new file mode 100644 index 0000000..636aeef Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/_stepimport.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/_types.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/_types.cpython-311.pyc new file mode 100644 index 0000000..ca0673a Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/_types.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/capture.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/capture.cpython-311.pyc new file mode 100644 index 0000000..9da60c2 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/capture.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/configuration.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/configuration.cpython-311.pyc new file mode 100644 index 0000000..ffc950f Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/configuration.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/fixture.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/fixture.cpython-311.pyc new file mode 100644 index 0000000..511298f Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/fixture.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/i18n.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/i18n.cpython-311.pyc new file mode 100644 index 0000000..10ec005 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/i18n.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/importer.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/importer.cpython-311.pyc new file mode 100644 index 0000000..eb794f0 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/importer.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/json_parser.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/json_parser.cpython-311.pyc new file mode 100644 index 0000000..b8802cd Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/json_parser.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/log_capture.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/log_capture.cpython-311.pyc new file mode 100644 index 0000000..88fb352 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/log_capture.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/matchers.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/matchers.cpython-311.pyc new file mode 100644 index 0000000..37108ef Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/matchers.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/model.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/model.cpython-311.pyc new file mode 100644 index 0000000..a4c4903 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/model.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/model_core.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/model_core.cpython-311.pyc new file mode 100644 index 0000000..7851271 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/model_core.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/model_describe.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/model_describe.cpython-311.pyc new file mode 100644 index 0000000..b740a8f Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/model_describe.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/parser.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/parser.cpython-311.pyc new file mode 100644 index 0000000..fdddfee Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/parser.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/runner.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/runner.cpython-311.pyc new file mode 100644 index 0000000..13551fc Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/runner.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/runner_util.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/runner_util.cpython-311.pyc new file mode 100644 index 0000000..fad9c5f Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/runner_util.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/step_registry.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/step_registry.cpython-311.pyc new file mode 100644 index 0000000..be13d2b Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/step_registry.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/tag_expression.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/tag_expression.cpython-311.pyc new file mode 100644 index 0000000..8aa1e7d Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/tag_expression.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/tag_matcher.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/tag_matcher.cpython-311.pyc new file mode 100644 index 0000000..78026a5 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/tag_matcher.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/textutil.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/textutil.cpython-311.pyc new file mode 100644 index 0000000..acc7508 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/textutil.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/__pycache__/userdata.cpython-311.pyc b/venv/Lib/site-packages/behave/__pycache__/userdata.cpython-311.pyc new file mode 100644 index 0000000..9d72a34 Binary files /dev/null and b/venv/Lib/site-packages/behave/__pycache__/userdata.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/_stepimport.py b/venv/Lib/site-packages/behave/_stepimport.py new file mode 100644 index 0000000..3015639 --- /dev/null +++ b/venv/Lib/site-packages/behave/_stepimport.py @@ -0,0 +1,185 @@ +# -*- coding: UTF-8 -*- +""" +This module provides low-level helper functionality during step imports. + +.. warn:: Do not use directly + + It should not be used directly except in behave Runner classes + that need to provide the correct context (step_registry, matchers, etc.) + instead of using the global module specific variables. +""" + +from __future__ import absolute_import +from contextlib import contextmanager +from threading import Lock +from types import ModuleType +import os.path +import sys +from behave import step_registry as _step_registry +# from behave import matchers as _matchers +import six + + +# ----------------------------------------------------------------------------- +# UTILITY FUNCTIONS: +# ----------------------------------------------------------------------------- +def setup_api_with_step_decorators(module, step_registry): + _step_registry.setup_step_decorators(module, step_registry) + +def setup_api_with_matcher_functions(module, matcher_factory): + module.use_step_matcher = matcher_factory.use_step_matcher + module.step_matcher = matcher_factory.use_step_matcher + module.register_type = matcher_factory.register_type + +# ----------------------------------------------------------------------------- +# FAKE MODULE CLASSES: For step imports +# ----------------------------------------------------------------------------- +# class FakeModule(object): +class FakeModule(ModuleType): + ensure_fake = True + + # -- SUPPORT FOR: behave.step_registry.setup_step_decorators() + def __setitem__(self, name, value): + assert "." not in name + setattr(self, name, value) + + +class StepRegistryModule(FakeModule): + """Provides a fake :mod:`behave.step_registry` module + that can be used during step imports. + """ + __all__ = [ + "given", "when", "then", "step", + "Given", "When", "Then", "Step", + ] + + def __init__(self, step_registry): + super(StepRegistryModule, self).__init__("behave.step_registry") + self.registry = step_registry + setup_api_with_step_decorators(self, step_registry) + + +class StepMatchersModule(FakeModule): + __all__ = ["use_step_matcher", "register_type", "step_matcher"] + + def __init__(self, matcher_factory): + super(StepMatchersModule, self).__init__("behave.matchers") + self.matcher_factory = matcher_factory + setup_api_with_matcher_functions(self, matcher_factory) + self.use_default_step_matcher = matcher_factory.use_default_step_matcher + self.get_matcher = matcher_factory.make_matcher + # self.matcher_mapping = matcher_mapping or _matchers.matcher_mapping.copy() + # self.current_matcher = current_matcher or _matchers.current_matcher + + # -- INJECT PYTHON PACKAGE META-DATA: + # REQUIRED-FOR: Non-fake submodule imports (__path__). + here = os.path.dirname(__file__) + self.__file__ = os.path.abspath(os.path.join(here, "matchers.py")) + self.__name__ = "behave.matchers" + # self.__path__ = [os.path.abspath(here)] + + # def use_step_matcher(self, name): + # self.matcher_factory.use_step_matcher(name) + # # self.current_matcher = self.matcher_mapping[name] + # + # def use_default_step_matcher(self, name=None): + # self.matcher_factory.use_default_step_matcher(name=None) + # + # def get_matcher(self, func, pattern): + # # return self.current_matcher + # return self.matcher_factory.make_matcher(func, pattern) + # + # def register_type(self, **kwargs): + # # _matchers.register_type(**kwargs) + # self.matcher_factory.register_type(**kwargs) + # + # step_matcher = use_step_matcher + + +class BehaveModule(FakeModule): + __all__ = StepRegistryModule.__all__ + StepMatchersModule.__all__ + + def __init__(self, step_registry, matcher_factory=None): + if matcher_factory is None: + matcher_factory = step_registry.step_matcher_factory + assert matcher_factory is not None + super(BehaveModule, self).__init__("behave") + setup_api_with_step_decorators(self, step_registry) + setup_api_with_matcher_functions(self, matcher_factory) + self.use_default_step_matcher = matcher_factory.use_default_step_matcher + assert step_registry.matcher_factory == matcher_factory + + # -- INJECT PYTHON PACKAGE META-DATA: + # REQUIRED-FOR: Non-fake submodule imports (__path__). + here = os.path.dirname(__file__) + self.__file__ = os.path.abspath(os.path.join(here, "__init__.py")) + self.__name__ = "behave" + self.__path__ = [os.path.abspath(here)] + self.__package__ = None + + +class StepImportModuleContext(object): + + def __init__(self, step_container): + self.step_registry = step_container.step_registry + self.matcher_factory = step_container.matcher_factory + assert self.step_registry.matcher_factory == self.matcher_factory + self.step_registry.matcher_factory = self.matcher_factory + + step_registry_module = StepRegistryModule(self.step_registry) + step_matchers_module = StepMatchersModule(self.matcher_factory) + behave_module = BehaveModule(self.step_registry, self.matcher_factory) + self.modules = { + "behave": behave_module, + "behave.matchers": step_matchers_module, + "behave.step_registry": step_registry_module, + } + # self.default_matcher = self.step_matchers_module.current_matcher + + def reset_current_matcher(self): + self.matcher_factory.use_default_step_matcher() + +_step_import_lock = Lock() +unknown = object() + +@contextmanager +def use_step_import_modules(step_container): + """Redirect any step/type registration to the runner's step-context object + during step imports by using fake modules (instead of using module-globals). + + This allows that multiple runners can be used without polluting the + global variables in problematic modules + (:mod:`behave.step_registry`, mod:`behave.matchers`). + + .. sourcecode:: python + + # -- RUNNER-IMPLEMENTATION: + def load_step_definitions(self, ...): + step_container = self.step_container + with use_step_import_modules(step_container) as import_context: + # -- USE: Fake modules during step imports + ... + import_context.reset_current_matcher() + + :param step_container: Step context object with step_registry, matcher_factory. + """ + orig_modules = {} + import_context = StepImportModuleContext(step_container) + with _step_import_lock: + # -- CRITICAL-SECTION (multi-threading protected) + try: + # -- SCOPE-GUARD SETUP: Replace original modules with fake ones. + for module_name, fake_module in six.iteritems(import_context.modules): + orig_module = sys.modules.get(module_name, unknown) + orig_modules[module_name] = orig_module + sys.modules[module_name] = fake_module + + # -- USE: Fake modules for step imports. + yield import_context + finally: + # -- SCOPE-GUARD CLEANUP: Restore original modules. + for module_name, orig_module in six.iteritems(orig_modules): + if orig_module is unknown: + del sys.modules[module_name] + else: + sys.modules[module_name] = orig_module diff --git a/venv/Lib/site-packages/behave/_types.py b/venv/Lib/site-packages/behave/_types.py new file mode 100644 index 0000000..629f3a4 --- /dev/null +++ b/venv/Lib/site-packages/behave/_types.py @@ -0,0 +1,134 @@ +# -*- coding: UTF-8 -*- +"""Basic types (helper classes).""" + +import sys +import six +if six.PY2: + # -- USE PYTHON2 BACKPORT: With unicode support + import traceback2 as traceback +else: + import traceback + + +class Unknown(object): + """Placeholder for unknown/missing information, distinguishable from None. + + .. code-block:: python + + data = {} + value = data.get("name", Unknown) + if value is Unknown: + # -- DO SOMETHING + ... + """ + + +class ExceptionUtil(object): + """Provides a utility class for accessing/modifying exception information. + + .. seealso:: PEP-3134 Chained excpetions + """ + # pylint: disable=no-init + + @staticmethod + def get_traceback(exception): + # -- ASSUMPTION: assert isinstance(exception, Exception) + return getattr(exception, "__traceback__", None) + + @staticmethod + def set_traceback(exception, exc_traceback=Unknown): + assert isinstance(exception, Exception) + if exc_traceback is Unknown: + exc_traceback = sys.exc_info()[2] + exception.__traceback__ = exc_traceback + + @classmethod + def has_traceback(cls, exception): + """Indicates if traceback information related to this exception + is stored with the exception object. + + :param exception: Exception object to check. + :return: True, if traceback info is stored. False, otherwise. + """ + return cls.get_traceback(exception) is not None + + @classmethod + def describe(cls, exception, use_traceback=False, prefix=""): + # -- NORMAL CASE: + text = u"{prefix}{0}: {1}\n".format(exception.__class__.__name__, + exception, prefix=prefix) + if use_traceback: + exc_traceback = cls.get_traceback(exception) + if exc_traceback: + # -- NOTE: Chained-exception cause (see: PEP-3134). + text += u"".join(traceback.format_tb(exc_traceback)) + return text + + +class ChainedExceptionUtil(ExceptionUtil): + """Provides a utility class for accessing/modifying exception information + related to chained exceptions. + + .. seealso:: PEP-3134 Chained excpetions + """ + # pylint: disable=no-init + + @staticmethod + def get_cause(exception): + # -- ASSUMPTION: assert isinstance(exception, Exception) + return getattr(exception, "__cause__", None) + + @staticmethod + def set_cause(exception, exc_cause): + assert isinstance(exception, Exception) + assert isinstance(exc_cause, Exception) or exc_cause is None + exception.__cause__ = exc_cause + if exc_cause and not hasattr(exc_cause, "__traceback__"): + # -- NEEDED-FOR: Python2 + # Otherwise, traceback formatting tries to access missing attribute. + exc_cause.__traceback__ = None + + # pylint: disable=arguments-differ + @classmethod + def describe(cls, exception, use_traceback=False, prefix="", style="reversed"): + """Describes an exception, optionally together with its traceback info. + Also shows information about exception cause (chained exceptions), + if exists. + + :param exception: Exception object to describe. + :param use_traceback: Indicates if traceback info should be shown. + :param prefix: Optional prefix for description text. + :param style: Optional style indicator ("reversed", "normal") + :return: Exception description as text. + """ + text = ExceptionUtil.describe(exception, use_traceback, prefix) + + # -- STEP: Collect chained exceptions. + causes = [] + exc_cause = cls.get_cause(exception) + while exc_cause: + causes.append(exc_cause) + exc_cause = cls.get_cause(exc_cause) + + # -- STEP: Describe causes for chained exceptions. + parts = [] + if style == "normal": + prefix = "CAUSE: " + for exc_cause in reversed(causes): + cause_text = ExceptionUtil.describe(exc_cause, use_traceback, + prefix) + parts.append(cause_text) + if len(parts) == 1: + prefix = "CAUSES: " + parts.append(text) + else: + parts.append(text) + for exc_cause in causes: + cause_text = ExceptionUtil.describe(exc_cause, use_traceback, + prefix="CAUSED-BY: ") + parts.append(cause_text) + return u"\n".join(parts) + # if exc_cause: + # cause_text = + # text += u"\n" + cause_text + # return text diff --git a/venv/Lib/site-packages/behave/api/__init__.py b/venv/Lib/site-packages/behave/api/__init__.py new file mode 100644 index 0000000..1312c0a --- /dev/null +++ b/venv/Lib/site-packages/behave/api/__init__.py @@ -0,0 +1,7 @@ +""" +Provides/defines stable APIs for behave users: + +* step writer(s): features/steps/*.py +* environment writers: features/environment.py +* ... +""" diff --git a/venv/Lib/site-packages/behave/api/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/behave/api/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..c6f50bc Binary files /dev/null and b/venv/Lib/site-packages/behave/api/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/api/__pycache__/async_step.cpython-311.pyc b/venv/Lib/site-packages/behave/api/__pycache__/async_step.cpython-311.pyc new file mode 100644 index 0000000..00f1e7e Binary files /dev/null and b/venv/Lib/site-packages/behave/api/__pycache__/async_step.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/api/async_step.py b/venv/Lib/site-packages/behave/api/async_step.py new file mode 100644 index 0000000..f75d570 --- /dev/null +++ b/venv/Lib/site-packages/behave/api/async_step.py @@ -0,0 +1,283 @@ +# -*- coding: UTF-8 -*- +# pylint: disable=line-too-long +""" +This module provides functionality to support "async steps" (coroutines) +in a step-module with behave. This functionality simplifies to test +frameworks and protocols that make use of `asyncio.coroutines`_ or +provide `asyncio.coroutines`_. + +EXAMPLE: + +.. code-block:: python + + # -- FILE: features/steps/my_async_steps.py + # EXAMPLE REQUIRES: Python >= 3.5 + from behave import step + from behave.api.async_step import async_run_until_complete + + @step('an async coroutine step waits {duration:f} seconds') + @async_run_until_complete + async def step_async_step_waits_seconds(context, duration): + await asyncio.sleep(duration) + +.. code-block:: python + + # -- FILE: features/steps/my_async_steps2.py + # EXAMPLE REQUIRES: Python >= 3.4 + from behave import step + from behave.api.async_step import async_run_until_complete + import asyncio + + @step('a tagged-coroutine async step waits {duration:f} seconds') + @async_run_until_complete + @asyncio.coroutine + def step_async_step_waits_seconds2(context, duration): + yield from asyncio.sleep(duration) + + +.. requires:: Python 3.5 (or 3.4) or :mod:`asyncio` backport (like :pypi:`trollius`) +.. seealso:: + https://docs.python.org/3/library/asyncio.html + +.. _asyncio.coroutines: https://docs.python.org/3/library/asyncio-task.html#coroutines +""" +# pylint: enable=line-too-long + +from __future__ import print_function +# -- REQUIRES: Python >= 3.4 +# MAYBE BACKPORT: trollius +import functools +from six import string_types +try: + import asyncio + has_asyncio = True +except ImportError: + has_asyncio = False + +# ----------------------------------------------------------------------------- +# ASYNC STEP DECORATORS: +# ----------------------------------------------------------------------------- +def async_run_until_complete(astep_func=None, loop=None, timeout=None, + async_context=None, should_close=False): + """Provides a function decorator for async-steps (coroutines). + Provides an async event loop and runs the async-step until completion + (or timeout, if specified). + + .. code-block:: python + + from behave import step + from behave.api.async_step import async_run_until_complete + import asyncio + + @step("an async step is executed") + @async_run_until_complete + async def astep_impl(context) + await asycio.sleep(0.1) + + @step("an async step is executed") + @async_run_until_complete(timeout=1.2) + async def astep_impl2(context) + # -- NOTE: Wrapped event loop waits with timeout=1.2 seconds. + await asycio.sleep(0.3) + + Parameters: + astep_func: Async step function (coroutine) + loop (asyncio.EventLoop): Event loop to use or None. + timeout (int, float): Timeout to wait for async-step completion. + async_context (name): Async_context name or object to use. + should_close (bool): Indicates if event lopp should be closed. + + .. note:: + + * If :param:`loop` is None, the default event loop will be used + or a new event loop is created. + * If :param:`timeout` is provided, the event loop waits only the + specified time. + * :param:`async_context` is only used, if :param:`loop` is None. + * If :param:`async_context` is a name, it will be used to retrieve + the real async_context object from the context. + + """ + @functools.wraps(astep_func) + def step_decorator(astep_func, context, *args, **kwargs): + loop = kwargs.pop("_loop", None) + timeout = kwargs.pop("_timeout", None) + async_context = kwargs.pop("_async_context", None) + should_close = kwargs.pop("_should_close", None) + + if isinstance(loop, string_types): + loop = getattr(context, loop, None) + elif async_context: + if isinstance(async_context, string_types): + name = async_context + async_context = use_or_create_async_context(context, name) + loop = async_context.loop + else: + assert isinstance(async_context, AsyncContext) + loop = async_context.loop + if loop is None: + loop = asyncio.get_event_loop() or asyncio.new_event_loop() + + # -- WORKHORSE: + try: + if timeout is None: + loop.run_until_complete(astep_func(context, *args, **kwargs)) + else: + # MAYBE: loop = asyncio.new_event_loop() + # MAYBE: should_close = True + task = loop.create_task(astep_func(context, *args, **kwargs)) + done, pending = loop.run_until_complete( + asyncio.wait([task], timeout=timeout)) + assert not pending, "TIMEOUT-OCCURED: timeout=%s" % timeout + finally: + if loop and should_close: + # -- MAYBE-AVOID: + loop.close() + + if astep_func is None: + # -- CASE: @decorator(timeout=1.2, ...) + # MAYBE: return functools.partial(step_decorator, + def wrapped_decorator1(astep_func): + @functools.wraps(astep_func) + def wrapped_decorator2(context, *args, **kwargs): + return step_decorator(astep_func, context, *args, + _loop=loop, + _timeout=timeout, + _async_context=async_context, + _should_close=should_close, **kwargs) + assert callable(astep_func) + return wrapped_decorator2 + return wrapped_decorator1 + else: + # -- CASE: @decorator ... or astep = decorator(astep) + # MAYBE: return functools.partial(step_decorator, astep_func=astep_func) + assert callable(astep_func) + @functools.wraps(astep_func) + def wrapped_decorator(context, *args, **kwargs): + return step_decorator(astep_func, context, *args, **kwargs) + return wrapped_decorator + +# -- ALIAS: +run_until_complete = async_run_until_complete + +# ----------------------------------------------------------------------------- +# ASYNC STEP UTILITY CLASSES: +# ----------------------------------------------------------------------------- +class AsyncContext(object): + # pylint: disable=line-too-long + """Provides a context object for "async steps" to keep track: + + * which event loop is used + * which (asyncio) tasks are used or of interest + + .. attribute:: loop + Event loop object to use. + If none is provided, the current event-loop is used + (or a new one is created). + + .. attribute:: tasks + List of started :mod:`asyncio` tasks (of interest). + + .. attribute:: name + + Optional name of this object (in the behave context). + If none is provided, :attr:`AsyncContext.default_name` is used. + + .. attribute:: should_close + Indicates if the :attr:`loop` (event-loop) should be closed or not. + + EXAMPLE: + + .. code-block:: python + + # -- FILE: features/steps/my_async_steps.py + # REQUIRES: Python 3.5 + from behave import given, when, then, step + from behave.api.async_step import AsyncContext + + @when('I dispatch an async-call with param "{param}"') + def step_impl(context, param): + async_context = getattr(context, "async_context", None) + if async_context is None: + async_context = context.async_context = AsyncContext() + task = async_context.loop.create_task(my_async_func(param)) + async_context.tasks.append(task) + + @then('I wait at most {duration:f} seconds until all async-calls are completed') + def step_impl(context, duration): + async_context = context.async_context + assert async_context.tasks + done, pending = async_context.loop.run_until_complete(asyncio.wait( + async_context.tasks, loop=async_context.loop, timeout=duration)) + assert len(pending) == 0 + + # -- COROUTINE: + async def my_async_func(param): + await asyncio.sleep(0.5) + return param.upper() + """ + # pylint: enable=line-too-long + default_name = "async_context" + + def __init__(self, loop=None, name=None, should_close=False, tasks=None): + self.loop = loop or asyncio.get_event_loop() or asyncio.new_event_loop() + self.tasks = tasks or [] + self.name = name or self.default_name + self.should_close = should_close + + def __del__(self): + if self.loop and self.should_close: + self.close() + + def close(self): + if self.loop and not self.loop.is_closed(): + self.loop.close() + self.loop = None + + +# ----------------------------------------------------------------------------- +# ASYNC STEP UTILITY FUNCTIONS: +# ----------------------------------------------------------------------------- +def use_or_create_async_context(context, name=None, loop=None, **kwargs): + """Utility function to be used in step implementations to ensure that an + :class:`AsyncContext` object is stored in the :param:`context` object. + + If no such attribute exists (under the given name), + a new :class:`AsyncContext` object is created with the provided args. + Otherwise, the existing context attribute is used. + + EXAMPLE: + + .. code-block:: python + + # -- FILE: features/steps/my_async_steps.py + # EXAMPLE REQUIRES: Python 3.5 + from behave import when + from behave.api.async_step import use_or_create_async_context + + @when('I dispatch an async-call with param "{param}"') + def step_impl(context, param): + async_context = use_or_create_async_context(context, "async_context") + task = async_context.loop.create_task(my_async_func(param)) + async_context.tasks.append(task) + + # -- COROUTINE: + async def my_async_func(param): + await asyncio.sleep(0.5) + return param.upper() + + :param context: Behave context object to use. + :param name: Optional name of async-context object (as string or None). + :param loop: Optional event_loop object to use for create call. + :param kwargs: Optional :class:`AsyncContext` params for create call. + :return: :class:`AsyncContext` object from the param:`context`. + """ + if name is None: + name = AsyncContext.default_name + async_context = getattr(context, name, None) + if async_context is None: + async_context = AsyncContext(loop=loop, name=name, **kwargs) + setattr(context, async_context.name, async_context) + assert isinstance(async_context, AsyncContext) + assert getattr(context, async_context.name) is async_context + return async_context diff --git a/venv/Lib/site-packages/behave/capture.py b/venv/Lib/site-packages/behave/capture.py new file mode 100644 index 0000000..35cbae2 --- /dev/null +++ b/venv/Lib/site-packages/behave/capture.py @@ -0,0 +1,227 @@ +# -*- coding: UTF-8 -*- +""" +Capture output (stdout, stderr), logs, etc. +""" + +from __future__ import absolute_import +from contextlib import contextmanager +import sys +from six import StringIO, PY2 +from behave.log_capture import LoggingCapture +from behave.textutil import text as _text + +def add_text_to(value, more_text, separator="\n"): + if more_text: + if value: + if separator and not value.endswith(separator): + value += separator + value += more_text + else: + value = more_text + return value + + +class Captured(object): + """Stores and aggregates captured output data.""" + empty = u"" + linesep = u"\n" + + def __init__(self, stdout=None, stderr=None, log_output=None): + self.stdout = stdout or self.empty + self.stderr = stderr or self.empty + self.log_output = log_output or self.empty + + def reset(self): + self.stdout = self.empty + self.stderr = self.empty + self.log_output = self.empty + + # -- PYTHON2: + if PY2: + def __nonzero__(self): + return bool(self.stdout or self.stderr or self.log_output) + else: + def __bool__(self): + return bool(self.stdout or self.stderr or self.log_output) + + @property + def output(self): + """Makes a simple report of the captured data by concatenating + all parts. + """ + output_text = self.stdout + output_text = add_text_to(output_text, self.stderr) + output_text = add_text_to(output_text, self.log_output) + return output_text + + def add(self, captured): + """Adds/appends captured output data to this object. + + :param captured: Captured object whose data should be added. + :return: self, to allow daisy-chaining (if needed). + """ + assert isinstance(captured, Captured) + self.stdout = add_text_to(self.stdout, captured.stdout, self.linesep) + self.stderr = add_text_to(self.stderr, captured.stderr, self.linesep) + self.log_output = add_text_to(self.log_output, captured.log_output, + self.linesep) + return self + + def make_report(self): + """Makes a detailled report of the captured output data. + + :returns: Report as string. + """ + report_parts = [] + if self.stdout: + parts = ["Captured stdout:", _text(self.stdout).rstrip(), ""] + report_parts.extend(parts) + if self.stderr: + parts = ["Captured stderr:", _text(self.stderr).rstrip(), ""] + report_parts.extend(parts) + if self.log_output: + parts = ["Captured logging:", _text(self.log_output)] + report_parts.extend(parts) + return self.linesep.join(report_parts).strip() + + def __add__(self, other): + """Supports incremental add:: + + captured1 = Captured("Hello") + captured2 = Captured("World") + captured3 = captured1 + captured2 + assert captured3.stdout == "Hello\nWorld" + """ + new_data = Captured(self.stdout, self.stderr, self.log_output) + return new_data.add(other) + + def __iadd__(self, other): + """Supports incremental add:: + + captured1 = Captured("Hello") + captured2 = Captured("World") + captured1 += captured2 + assert captured1.stdout == "Hello\nWorld" + """ + return self.add(other) + + +class CaptureController(object): + """Simplifies the lifecycle to capture output from various sources.""" + def __init__(self, config): + self.config = config + self.stdout_capture = None + self.stderr_capture = None + self.log_capture = None + self.old_stdout = None + self.old_stderr = None + + @property + def captured(self): + """Provides access of the captured output data. + + :return: Object that stores the captured output parts (as Captured). + """ + stdout = None + stderr = None + log_out = None + if self.config.stdout_capture and self.stdout_capture: + stdout = _text(self.stdout_capture.getvalue()) + if self.config.stderr_capture and self.stderr_capture: + stderr = _text(self.stderr_capture.getvalue()) + if self.config.log_capture and self.log_capture: + log_out = _text(self.log_capture.getvalue()) + return Captured(stdout, stderr, log_out) + + def setup_capture(self, context): + assert context is not None + if self.config.stdout_capture: + self.stdout_capture = StringIO() + context.stdout_capture = self.stdout_capture + + if self.config.stderr_capture: + self.stderr_capture = StringIO() + context.stderr_capture = self.stderr_capture + + if self.config.log_capture: + self.log_capture = LoggingCapture(self.config) + self.log_capture.inveigle() + context.log_capture = self.log_capture + + def start_capture(self): + if self.config.stdout_capture: + # -- REPLACE ONLY: In non-capturing mode. + if not self.old_stdout: + self.old_stdout = sys.stdout + sys.stdout = self.stdout_capture + assert sys.stdout is self.stdout_capture + + if self.config.stderr_capture: + # -- REPLACE ONLY: In non-capturing mode. + if not self.old_stderr: + self.old_stderr = sys.stderr + sys.stderr = self.stderr_capture + assert sys.stderr is self.stderr_capture + + def stop_capture(self): + if self.config.stdout_capture: + # -- RESTORE ONLY: In capturing mode. + if self.old_stdout: + sys.stdout = self.old_stdout + self.old_stdout = None + assert sys.stdout is not self.stdout_capture + + if self.config.stderr_capture: + # -- RESTORE ONLY: In capturing mode. + if self.old_stderr: + sys.stderr = self.old_stderr + self.old_stderr = None + assert sys.stderr is not self.stderr_capture + + def teardown_capture(self): + if self.config.log_capture: + self.log_capture.abandon() + + def make_capture_report(self): + """Combine collected output and return as string.""" + return self.captured.make_report() + # report = u"" + # if self.config.stdout_capture and self.stdout_capture: + # output = self.stdout_capture.getvalue() + # if output: + # output = _text(output) + # report += u"\nCaptured stdout:\n" + output + # if self.config.stderr_capture and self.stderr_capture: + # output = self.stderr_capture.getvalue() + # if output: + # output = _text(output) + # report += u"\nCaptured stderr:\n" + output + # if self.config.log_capture and self.log_capture: + # output = self.log_capture.getvalue() + # if output: + # output = _text(output) + # report += u"\nCaptured logging:\n" + output + # return report + +# ----------------------------------------------------------------------------- +# UTILITY FUNCTIONS: +# ----------------------------------------------------------------------------- +@contextmanager +def capture_output(controller, enabled=True): + """Provides a context manager that starts capturing output + + .. code-block:: + + with capture_output(capture_controller): + ... # Do something + """ + if enabled: + try: + controller.start_capture() + yield + finally: + controller.stop_capture() + else: + # -- CAPTURING OUTPUT is disabled. + # Needed to prevent recursive captures with context.execute_steps() + yield diff --git a/venv/Lib/site-packages/behave/compat/__init__.py b/venv/Lib/site-packages/behave/compat/__init__.py new file mode 100644 index 0000000..4dd4681 --- /dev/null +++ b/venv/Lib/site-packages/behave/compat/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- +""" +Used for behave as compatibility layer between different Python versions +and implementations. +""" diff --git a/venv/Lib/site-packages/behave/compat/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/behave/compat/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..e846353 Binary files /dev/null and b/venv/Lib/site-packages/behave/compat/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/compat/__pycache__/collections.cpython-311.pyc b/venv/Lib/site-packages/behave/compat/__pycache__/collections.cpython-311.pyc new file mode 100644 index 0000000..eff0820 Binary files /dev/null and b/venv/Lib/site-packages/behave/compat/__pycache__/collections.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/compat/collections.py b/venv/Lib/site-packages/behave/compat/collections.py new file mode 100644 index 0000000..00444f4 --- /dev/null +++ b/venv/Lib/site-packages/behave/compat/collections.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +""" +Compatibility of :module:`collections` between different Python versions. +""" + +from __future__ import absolute_import +import warnings +# pylint: disable=unused-import +try: + # -- SINCE: Python2.7 + from collections import OrderedDict +except ImportError: # pragma: no cover + try: + # -- BACK-PORTED FOR: Python 2.4 .. 2.6 + from ordereddict import OrderedDict + except ImportError: + message = "collections.OrderedDict is missing: Install 'ordereddict'." + warnings.warn(message) + # -- BACKWARD-COMPATIBLE: Better than nothing (for behave use case). + OrderedDict = dict diff --git a/venv/Lib/site-packages/behave/configuration.py b/venv/Lib/site-packages/behave/configuration.py new file mode 100644 index 0000000..2e0bcf3 --- /dev/null +++ b/venv/Lib/site-packages/behave/configuration.py @@ -0,0 +1,795 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function +import argparse +import logging +import os +import re +import sys +import shlex +import six +from six.moves import configparser + +from behave.model import ScenarioOutline +from behave.model_core import FileLocation +from behave.reporter.junit import JUnitReporter +from behave.reporter.summary import SummaryReporter +from behave.tag_expression import TagExpression +from behave.formatter.base import StreamOpener +from behave.formatter import _registry as _format_registry +from behave.userdata import UserData, parse_user_define +from behave._types import Unknown +from behave.textutil import select_best_encoding, to_texts + +# -- PYTHON 2/3 COMPATIBILITY: +# SINCE Python 3.2: ConfigParser = SafeConfigParser +ConfigParser = configparser.ConfigParser +if six.PY2: + ConfigParser = configparser.SafeConfigParser + + +# ----------------------------------------------------------------------------- +# CONFIGURATION DATA TYPES: +# ----------------------------------------------------------------------------- +class LogLevel(object): + names = [ + "NOTSET", "CRITICAL", "FATAL", "ERROR", + "WARNING", "WARN", "INFO", "DEBUG", + ] + + @staticmethod + def parse(levelname, unknown_level=None): + """ + Convert levelname into a numeric log level. + + :param levelname: Logging levelname (as string) + :param unknown_level: Used if levelname is unknown (optional). + :return: Numeric log-level or unknown_level, if levelname is unknown. + """ + return getattr(logging, levelname.upper(), unknown_level) + + @classmethod + def parse_type(cls, levelname): + level = cls.parse(levelname, Unknown) + if level is Unknown: + message = "%s is unknown, use: %s" % \ + (levelname, ", ".join(cls.names[1:])) + raise argparse.ArgumentTypeError(message) + return level + + @staticmethod + def to_string(level): + return logging.getLevelName(level) + + +class ConfigError(Exception): + pass + + +# ----------------------------------------------------------------------------- +# CONFIGURATION SCHEMA: +# ----------------------------------------------------------------------------- +options = [ + (("-c", "--no-color"), + dict(action="store_false", dest="color", + help="Disable the use of ANSI color escapes.")), + + (("--color",), + dict(action="store_true", dest="color", + help="""Use ANSI color escapes. This is the default + behaviour. This switch is used to override a + configuration file setting.""")), + + (("-d", "--dry-run"), + dict(action="store_true", + help="Invokes formatters without executing the steps.")), + + (("-D", "--define"), + dict(dest="userdata_defines", type=parse_user_define, action="append", + metavar="NAME=VALUE", + help="""Define user-specific data for the config.userdata dictionary. + Example: -D foo=bar to store it in config.userdata["foo"].""")), + + (("-e", "--exclude"), + dict(metavar="PATTERN", dest="exclude_re", + help="""Don't run feature files matching regular expression + PATTERN.""")), + + (("-i", "--include"), + dict(metavar="PATTERN", dest="include_re", + help="Only run feature files matching regular expression PATTERN.")), + + (("--no-junit",), + dict(action="store_false", dest="junit", + help="Don't output JUnit-compatible reports.")), + + (("--junit",), + dict(action="store_true", + help="""Output JUnit-compatible reports. + When junit is enabled, all stdout and stderr + will be redirected and dumped to the junit report, + regardless of the "--capture" and "--no-capture" options. + """)), + + (("--junit-directory",), + dict(metavar="PATH", dest="junit_directory", + default="reports", + help="""Directory in which to store JUnit reports.""")), + + ((), # -- CONFIGFILE only + dict(dest="default_format", + help="Specify default formatter (default: pretty).")), + + + (("-f", "--format"), + dict(action="append", + help="""Specify a formatter. If none is specified the default + formatter is used. Pass "--format help" to get a + list of available formatters.""")), + + (("--steps-catalog",), + dict(action="store_true", dest="steps_catalog", + help="""Show a catalog of all available step definitions. + SAME AS: --format=steps.catalog --dry-run --no-summary -q""")), + + ((), # -- CONFIGFILE only + dict(dest="scenario_outline_annotation_schema", + help="""Specify name annotation schema for scenario outline + (default="{name} -- @{row.id} {examples.name}").""")), + + (("-k", "--no-skipped"), + dict(action="store_false", dest="show_skipped", + help="Don't print skipped steps (due to tags).")), + + (("--show-skipped",), + dict(action="store_true", + help="""Print skipped steps. + This is the default behaviour. This switch is used to + override a configuration file setting.""")), + + (("--no-snippets",), + dict(action="store_false", dest="show_snippets", + help="Don't print snippets for unimplemented steps.")), + (("--snippets",), + dict(action="store_true", dest="show_snippets", + help="""Print snippets for unimplemented steps. + This is the default behaviour. This switch is used to + override a configuration file setting.""")), + + (("-m", "--no-multiline"), + dict(action="store_false", dest="show_multiline", + help="""Don't print multiline strings and tables under + steps.""")), + + (("--multiline", ), + dict(action="store_true", dest="show_multiline", + help="""Print multiline strings and tables under steps. + This is the default behaviour. This switch is used to + override a configuration file setting.""")), + + (("-n", "--name"), + dict(action="append", + help="""Only execute the feature elements which match part + of the given name. If this option is given more + than once, it will match against all the given + names.""")), + + (("--no-capture",), + dict(action="store_false", dest="stdout_capture", + help="""Don't capture stdout (any stdout output will be + printed immediately.)""")), + + (("--capture",), + dict(action="store_true", dest="stdout_capture", + help="""Capture stdout (any stdout output will be + printed if there is a failure.) + This is the default behaviour. This switch is used to + override a configuration file setting.""")), + + (("--no-capture-stderr",), + dict(action="store_false", dest="stderr_capture", + help="""Don't capture stderr (any stderr output will be + printed immediately.)""")), + + (("--capture-stderr",), + dict(action="store_true", dest="stderr_capture", + help="""Capture stderr (any stderr output will be + printed if there is a failure.) + This is the default behaviour. This switch is used to + override a configuration file setting.""")), + + (("--no-logcapture",), + dict(action="store_false", dest="log_capture", + help="""Don't capture logging. Logging configuration will + be left intact.""")), + + (("--logcapture",), + dict(action="store_true", dest="log_capture", + help="""Capture logging. All logging during a step will be captured + and displayed in the event of a failure. + This is the default behaviour. This switch is used to + override a configuration file setting.""")), + + (("--logging-level",), + dict(type=LogLevel.parse_type, + help="""Specify a level to capture logging at. The default + is INFO - capturing everything.""")), + + (("--logging-format",), + dict(help="""Specify custom format to print statements. Uses the + same format as used by standard logging handlers. The + default is "%%(levelname)s:%%(name)s:%%(message)s".""")), + + (("--logging-datefmt",), + dict(help="""Specify custom date/time format to print + statements. + Uses the same format as used by standard logging + handlers.""")), + + (("--logging-filter",), + dict(help="""Specify which statements to filter in/out. By default, + everything is captured. If the output is too verbose, use + this option to filter out needless output. + Example: --logging-filter=foo will capture statements issued + ONLY to foo or foo.what.ever.sub but not foobar or other + logger. Specify multiple loggers with comma: + filter=foo,bar,baz. + If any logger name is prefixed with a minus, eg filter=-foo, + it will be excluded rather than included.""", + config_help="""Specify which statements to filter in/out. By default, + everything is captured. If the output is too verbose, + use this option to filter out needless output. + Example: ``logging_filter = foo`` will capture + statements issued ONLY to "foo" or "foo.what.ever.sub" + but not "foobar" or other logger. Specify multiple + loggers with comma: ``logging_filter = foo,bar,baz``. + If any logger name is prefixed with a minus, eg + ``logging_filter = -foo``, it will be excluded rather + than included.""")), + + (("--logging-clear-handlers",), + dict(action="store_true", + help="Clear all other logging handlers.")), + + (("--no-summary",), + dict(action="store_false", dest="summary", + help="""Don't display the summary at the end of the run.""")), + + (("--summary",), + dict(action="store_true", dest="summary", + help="""Display the summary at the end of the run.""")), + + (("-o", "--outfile"), + dict(action="append", dest="outfiles", metavar="FILE", + help="Write to specified file instead of stdout.")), + + ((), # -- CONFIGFILE only + dict(action="append", dest="paths", + help="Specify default feature paths, used when none are provided.")), + + (("-q", "--quiet"), + dict(action="store_true", + help="Alias for --no-snippets --no-source.")), + + (("-s", "--no-source"), + dict(action="store_false", dest="show_source", + help="""Don't print the file and line of the step definition with the + steps.""")), + + (("--show-source",), + dict(action="store_true", dest="show_source", + help="""Print the file and line of the step + definition with the steps. This is the default + behaviour. This switch is used to override a + configuration file setting.""")), + + (("--stage",), + dict(help="""Defines the current test stage. + The test stage name is used as name prefix for the environment + file and the steps directory (instead of default path names). + """)), + + (("--stop",), + dict(action="store_true", + help="Stop running tests at the first failure.")), + + # -- DISABLE-UNUSED-OPTION: Not used anywhere. + # (("-S", "--strict"), + # dict(action="store_true", + # help="Fail if there are any undefined or pending steps.")), + + ((), # -- CONFIGFILE only + dict(dest="default_tags", metavar="TAG_EXPRESSION", + help="""Define default tags when non are provided. + See --tags for more information.""")), + + (("-t", "--tags"), + dict(action="append", metavar="TAG_EXPRESSION", + help="""Only execute features or scenarios with tags + matching TAG_EXPRESSION. Pass "--tags-help" for + more information.""", + config_help="""Only execute certain features or scenarios based + on the tag expression given. See below for how to code + tag expressions in configuration files.""")), + + (("-T", "--no-timings"), + dict(action="store_false", dest="show_timings", + help="""Don't print the time taken for each step.""")), + + (("--show-timings",), + dict(action="store_true", dest="show_timings", + help="""Print the time taken, in seconds, of each step after the + step has completed. This is the default behaviour. This + switch is used to override a configuration file + setting.""")), + + (("-v", "--verbose"), + dict(action="store_true", + help="Show the files and features loaded.")), + + (("-w", "--wip"), + dict(action="store_true", + help="""Only run scenarios tagged with "wip". Additionally: use the + "plain" formatter, do not capture stdout or logging output + and stop at the first failure.""")), + + (("-x", "--expand"), + dict(action="store_true", + help="Expand scenario outline tables in output.")), + + (("--lang",), + dict(metavar="LANG", + help="Use keywords for a language other than English.")), + + (("--lang-list",), + dict(action="store_true", + help="List the languages available for --lang.")), + + (("--lang-help",), + dict(metavar="LANG", + help="List the translations accepted for one language.")), + + (("--tags-help",), + dict(action="store_true", + help="Show help for tag expressions.")), + + (("--version",), + dict(action="store_true", help="Show version.")), +] + +# -- OPTIONS: With raw value access semantics in configuration file. +raw_value_options = frozenset([ + "logging_format", + "logging_datefmt", + # -- MAYBE: "scenario_outline_annotation_schema", +]) + + +def read_configuration(path): + # pylint: disable=too-many-locals, too-many-branches + config = ConfigParser() + config.optionxform = str # -- SUPPORT: case-sensitive keys + config.read(path) + config_dir = os.path.dirname(path) + result = {} + for fixed, keywords in options: + if "dest" in keywords: + dest = keywords["dest"] + else: + for opt in fixed: + if opt.startswith("--"): + dest = opt[2:].replace("-", "_") + else: + assert len(opt) == 2 + dest = opt[1:] + if dest in "tags_help lang_list lang_help version".split(): + continue + if not config.has_option("behave", dest): + continue + action = keywords.get("action", "store") + if action == "store": + use_raw_value = dest in raw_value_options + result[dest] = config.get("behave", dest, raw=use_raw_value) + elif action in ("store_true", "store_false"): + result[dest] = config.getboolean("behave", dest) + elif action == "append": + if dest == "userdata_defines": + continue # -- SKIP-CONFIGFILE: Command-line only option. + result[dest] = \ + [s.strip() for s in config.get("behave", dest).splitlines()] + else: + raise ValueError('action "%s" not implemented' % action) + + # -- STEP: format/outfiles coupling + if "format" in result: + # -- OPTIONS: format/outfiles are coupled in configuration file. + formatters = result["format"] + formatter_size = len(formatters) + outfiles = result.get("outfiles", []) + outfiles_size = len(outfiles) + if outfiles_size < formatter_size: + for formatter_name in formatters[outfiles_size:]: + outfile = "%s.output" % formatter_name + outfiles.append(outfile) + result["outfiles"] = outfiles + elif len(outfiles) > formatter_size: + print("CONFIG-ERROR: Too many outfiles (%d) provided." % + outfiles_size) + result["outfiles"] = outfiles[:formatter_size] + + for paths_name in ("paths", "outfiles"): + if paths_name in result: + # -- Evaluate relative paths relative to location. + # NOTE: Absolute paths are preserved by os.path.join(). + paths = result[paths_name] + result[paths_name] = \ + [os.path.normpath(os.path.join(config_dir, p)) for p in paths] + + # -- STEP: Special additional configuration sections. + # SCHEMA: config_section: data_name + special_config_section_map = { + "behave.formatters": "more_formatters", + "behave.userdata": "userdata", + } + for section_name, data_name in special_config_section_map.items(): + result[data_name] = {} + if config.has_section(section_name): + result[data_name].update(config.items(section_name)) + + return result + + +def config_filenames(): + paths = ["./", os.path.expanduser("~")] + if sys.platform in ("cygwin", "win32") and "APPDATA" in os.environ: + paths.append(os.path.join(os.environ["APPDATA"])) + + for path in reversed(paths): + for filename in reversed( + ("behave.ini", ".behaverc", "setup.cfg", "tox.ini")): + filename = os.path.join(path, filename) + if os.path.isfile(filename): + yield filename + + +def load_configuration(defaults, verbose=False): + for filename in config_filenames(): + if verbose: + print('Loading config defaults from "%s"' % filename) + defaults.update(read_configuration(filename)) + + if verbose: + print("Using defaults:") + for k, v in six.iteritems(defaults): + print("%15s %s" % (k, v)) + + +def setup_parser(): + # construct the parser + # usage = "%(prog)s [options] [ [FILE|DIR|URL][:LINE[:LINE]*] ]+" + usage = "%(prog)s [options] [ [DIR|FILE|FILE:LINE] ]+" + description = """\ + Run a number of feature tests with behave.""" + more = """ + EXAMPLES: + behave features/ + behave features/one.feature features/two.feature + behave features/one.feature:10 + behave @features.txt + """ + parser = argparse.ArgumentParser(usage=usage, description=description) + for fixed, keywords in options: + if not fixed: + continue # -- CONFIGFILE only. + if "config_help" in keywords: + keywords = dict(keywords) + del keywords["config_help"] + parser.add_argument(*fixed, **keywords) + parser.add_argument("paths", nargs="*", + help="Feature directory, file or file location (FILE:LINE).") + return parser + + +class Configuration(object): + """Configuration object for behave and behave runners.""" + # pylint: disable=too-many-instance-attributes + defaults = dict( + color=sys.platform != "win32", + show_snippets=True, + show_skipped=True, + dry_run=False, + show_source=True, + show_timings=True, + stdout_capture=True, + stderr_capture=True, + log_capture=True, + logging_format="%(levelname)s:%(name)s:%(message)s", + logging_level=logging.INFO, + steps_catalog=False, + summary=True, + junit=False, + stage=None, + userdata={}, + # -- SPECIAL: + default_format="pretty", # -- Used when no formatters are configured. + default_tags="", # -- Used when no tags are defined. + scenario_outline_annotation_schema=u"{name} -- @{row.id} {examples.name}" + ) + cmdline_only_options = set("userdata_defines") + + def __init__(self, command_args=None, load_config=True, verbose=None, + **kwargs): + """ + Constructs a behave configuration object. + * loads the configuration defaults (if needed). + * process the command-line args + * store the configuration results + + :param command_args: Provide command args (as sys.argv). + If command_args is None, sys.argv[1:] is used. + :type command_args: list, str + :param load_config: Indicate if configfile should be loaded (=true) + :param verbose: Indicate if diagnostic output is enabled + :param kwargs: Used to hand-over/overwrite default values. + """ + # pylint: disable=too-many-branches, too-many-statements + if command_args is None: + command_args = sys.argv[1:] + elif isinstance(command_args, six.string_types): + encoding = select_best_encoding() or "utf-8" + if six.PY2 and isinstance(command_args, six.text_type): + command_args = command_args.encode(encoding) + elif six.PY3 and isinstance(command_args, six.binary_type): + command_args = command_args.decode(encoding) + command_args = shlex.split(command_args) + elif isinstance(command_args, (list, tuple)): + command_args = to_texts(command_args) + + if verbose is None: + # -- AUTO-DISCOVER: Verbose mode from command-line args. + verbose = ("-v" in command_args) or ("--verbose" in command_args) + + self.version = None + self.tags_help = None + self.lang_list = None + self.lang_help = None + self.default_tags = None + self.junit = None + self.logging_format = None + self.logging_datefmt = None + self.name = None + self.scope = None + self.steps_catalog = None + self.userdata = None + self.wip = None + + defaults = self.defaults.copy() + for name, value in six.iteritems(kwargs): + defaults[name] = value + self.defaults = defaults + self.formatters = [] + self.reporters = [] + self.name_re = None + self.outputs = [] + self.include_re = None + self.exclude_re = None + self.scenario_outline_annotation_schema = None # pylint: disable=invalid-name + self.steps_dir = "steps" + self.environment_file = "environment.py" + self.userdata_defines = None + self.more_formatters = None + if load_config: + load_configuration(self.defaults, verbose=verbose) + parser = setup_parser() + parser.set_defaults(**self.defaults) + args = parser.parse_args(command_args) + for key, value in six.iteritems(args.__dict__): + if key.startswith("_") and key not in self.cmdline_only_options: + continue + setattr(self, key, value) + + self.paths = [os.path.normpath(path) for path in self.paths] + self.setup_outputs(args.outfiles) + + if self.steps_catalog: + # -- SHOW STEP-CATALOG: As step summary. + self.default_format = "steps.catalog" + self.format = ["steps.catalog"] + self.dry_run = True + self.summary = False + self.show_skipped = False + self.quiet = True + + if self.wip: + # Only run scenarios tagged with "wip". + # Additionally: + # * use the "plain" formatter (per default) + # * do not capture stdout or logging output and + # * stop at the first failure. + self.default_format = "plain" + self.tags = ["wip"] + self.default_tags.split() + self.color = False + self.stop = True + self.log_capture = False + self.stdout_capture = False + + self.tags = TagExpression(self.tags or self.default_tags.split()) + + if self.quiet: + self.show_source = False + self.show_snippets = False + + if self.exclude_re: + self.exclude_re = re.compile(self.exclude_re) + + if self.include_re: + self.include_re = re.compile(self.include_re) + if self.name: + # -- SELECT: Scenario-by-name, build regular expression. + self.name_re = self.build_name_re(self.name) + + if self.stage is None: # pylint: disable=access-member-before-definition + # -- USE ENVIRONMENT-VARIABLE, if stage is undefined. + self.stage = os.environ.get("BEHAVE_STAGE", None) + self.setup_stage(self.stage) + self.setup_model() + self.setup_userdata() + + # -- FINALLY: Setup Reporters and Formatters + # NOTE: Reporters and Formatters can now use userdata information. + if self.junit: + # Buffer the output (it will be put into Junit report) + self.stdout_capture = True + self.stderr_capture = True + self.log_capture = True + self.reporters.append(JUnitReporter(self)) + if self.summary: + self.reporters.append(SummaryReporter(self)) + + self.setup_formats() + unknown_formats = self.collect_unknown_formats() + if unknown_formats: + parser.error("format=%s is unknown" % ", ".join(unknown_formats)) + + + def setup_outputs(self, args_outfiles=None): + if self.outputs: + assert not args_outfiles, "ONLY-ONCE" + return + + # -- NORMAL CASE: Setup only initially (once). + if not args_outfiles: + self.outputs.append(StreamOpener(stream=sys.stdout)) + else: + for outfile in args_outfiles: + if outfile and outfile != "-": + self.outputs.append(StreamOpener(outfile)) + else: + self.outputs.append(StreamOpener(stream=sys.stdout)) + + def setup_formats(self): + """Register more, user-defined formatters by name.""" + if self.more_formatters: + for name, scoped_class_name in self.more_formatters.items(): + _format_registry.register_as(name, scoped_class_name) + + def collect_unknown_formats(self): + unknown_formats = [] + if self.format: + for format_name in self.format: + if (format_name == "help" or + _format_registry.is_formatter_valid(format_name)): + continue + unknown_formats.append(format_name) + return unknown_formats + + @staticmethod + def build_name_re(names): + """ + Build regular expression for scenario selection by name + by using a list of name parts or name regular expressions. + + :param names: List of name parts or regular expressions (as text). + :return: Compiled regular expression to use. + """ + # -- NOTE: re.LOCALE is removed in Python 3.6 (deprecated in Python 3.5) + # flags = (re.UNICODE | re.LOCALE) + # -- ENSURE: Names are all unicode/text values (for issue #606). + names = to_texts(names) + pattern = u"|".join(names) + return re.compile(pattern, flags=re.UNICODE) + + def exclude(self, filename): + if isinstance(filename, FileLocation): + filename = six.text_type(filename) + + if self.include_re and self.include_re.search(filename) is None: + return True + if self.exclude_re and self.exclude_re.search(filename) is not None: + return True + return False + + def setup_logging(self, level=None, configfile=None, **kwargs): + """ + Support simple setup of logging subsystem. + Ensures that the logging level is set. + But note that the logging setup can only occur once. + + SETUP MODES: + * :func:`logging.config.fileConfig()`, if ``configfile`` is provided. + * :func:`logging.basicConfig()`, otherwise. + + .. code-block: python + # -- FILE: features/environment.py + def before_all(context): + context.config.setup_logging() + + :param level: Logging level of root logger. + If None, use :attr:`logging_level` value. + :param configfile: Configuration filename for fileConfig() setup. + :param kwargs: Passed to :func:`logging.basicConfig()` + """ + if level is None: + level = self.logging_level # pylint: disable=no-member + + if configfile: + from logging.config import fileConfig + fileConfig(configfile) + else: + # pylint: disable=no-member + format_ = kwargs.pop("format", self.logging_format) + datefmt = kwargs.pop("datefmt", self.logging_datefmt) + logging.basicConfig(format=format_, datefmt=datefmt, **kwargs) + # -- ENSURE: Default log level is set + # (even if logging subsystem is already configured). + logging.getLogger().setLevel(level) + + def setup_model(self): + if self.scenario_outline_annotation_schema: + name_schema = six.text_type(self.scenario_outline_annotation_schema) + ScenarioOutline.annotation_schema = name_schema.strip() + + def setup_stage(self, stage=None): + """Setup the test stage that selects a different set of + steps and environment implementations. + + :param stage: Name of current test stage (as string or None). + + EXAMPLE:: + + # -- SETUP DEFAULT TEST STAGE (unnamed): + config = Configuration() + config.setup_stage() + assert config.steps_dir == "steps" + assert config.environment_file == "environment.py" + + # -- SETUP PRODUCT TEST STAGE: + config.setup_stage("product") + assert config.steps_dir == "product_steps" + assert config.environment_file == "product_environment.py" + """ + steps_dir = "steps" + environment_file = "environment.py" + if stage: + # -- USE A TEST STAGE: Select different set of implementations. + prefix = stage + "_" + steps_dir = prefix + steps_dir + environment_file = prefix + environment_file + self.steps_dir = steps_dir + self.environment_file = environment_file + + def setup_userdata(self): + if not isinstance(self.userdata, UserData): + self.userdata = UserData(self.userdata) + if self.userdata_defines: + # -- ENSURE: Cmd-line overrides configuration file parameters. + self.userdata.update(self.userdata_defines) + + def update_userdata(self, data): + """Update userdata with data and reapply userdata defines (cmdline). + :param data: Provides (partial) userdata (as dict) + """ + self.userdata.update(data) + if self.userdata_defines: + # -- REAPPLY: Cmd-line defines (override configuration file data). + self.userdata.update(self.userdata_defines) diff --git a/venv/Lib/site-packages/behave/contrib/__init__.py b/venv/Lib/site-packages/behave/contrib/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/venv/Lib/site-packages/behave/contrib/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/behave/contrib/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..c2a5976 Binary files /dev/null and b/venv/Lib/site-packages/behave/contrib/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/contrib/__pycache__/formatter_missing_steps.cpython-311.pyc b/venv/Lib/site-packages/behave/contrib/__pycache__/formatter_missing_steps.cpython-311.pyc new file mode 100644 index 0000000..d44f9c9 Binary files /dev/null and b/venv/Lib/site-packages/behave/contrib/__pycache__/formatter_missing_steps.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/contrib/__pycache__/scenario_autoretry.cpython-311.pyc b/venv/Lib/site-packages/behave/contrib/__pycache__/scenario_autoretry.cpython-311.pyc new file mode 100644 index 0000000..b21f662 Binary files /dev/null and b/venv/Lib/site-packages/behave/contrib/__pycache__/scenario_autoretry.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/contrib/__pycache__/substep_dirs.cpython-311.pyc b/venv/Lib/site-packages/behave/contrib/__pycache__/substep_dirs.cpython-311.pyc new file mode 100644 index 0000000..1eccbbd Binary files /dev/null and b/venv/Lib/site-packages/behave/contrib/__pycache__/substep_dirs.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/contrib/formatter_missing_steps.py b/venv/Lib/site-packages/behave/contrib/formatter_missing_steps.py new file mode 100644 index 0000000..dd42c45 --- /dev/null +++ b/venv/Lib/site-packages/behave/contrib/formatter_missing_steps.py @@ -0,0 +1,80 @@ +# -*- coding: UTF-8 -*- +""" +Provides a formatter that writes prototypes for missing step functions +into a step module file by using step snippets. + +NOTE: This is only simplistic, proof-of-concept code. +""" + +from __future__ import absolute_import, print_function +from behave.runner_util import make_undefined_step_snippets +from .steps import StepsUsageFormatter + + +STEP_MODULE_TEMPLATE = '''\ +# -*- coding: {encoding} -*- +""" +Missing step implementations (proof-of-concept). +""" + +from behave import given, when, then, step + +{step_snippets} +''' + + +class MissingStepsFormatter(StepsUsageFormatter): + """Formatter that writes missing steps snippets into a step module file. + + Reuses StepsUsageFormatter class because it already contains the logic + for discovering missing/undefined steps. + + .. code-block:: ini + + # -- FILE: behave.ini + # NOTE: Long text value needs indentation on following lines. + [behave.userdata] + behave.formatter.missing_steps.template = # -*- coding: {encoding} -*- + # Missing step implementations. + from behave import given, when, then, step + + {step_snippets} + """ + name = "missing-steps" + description = "Writes implementation for missing step definitions." + template = STEP_MODULE_TEMPLATE + scope = "behave.formatter.missing_steps" + + def __init__(self, stream_opener, config): + super(MissingStepsFormatter, self).__init__(stream_opener, config) + self.template = self.__class__.template + self.init_from_userdata(config.userdata) + + def init_from_userdata(self, userdata): + scoped_name = "%s.%s" %(self.scope, "template") + template_text = userdata.get(scoped_name, self.template) + self.template = template_text + + def close(self): + """Called at end of test run. + NOTE: Overwritten to avoid to truncate/overwrite output-file. + """ + if self.step_registry and self.undefined_steps: + # -- ENSURE: Output stream is open. + self.stream = self.open() + self.report() + + # -- FINALLY: + self.close_stream() + + # -- REPORT SPECIFIC-API: + def report(self): + """Writes missing step implementations by using step snippets.""" + step_snippets = make_undefined_step_snippets(undefined_steps) + encoding = self.stream.encoding or "UTF-8" + function_separator = u"\n\n\n" + step_snippets_text = function_separator.join(step_snippets) + module_text = self.template.format(encoding=encoding, + step_snippets=step_snippets_text) + self.stream.write(module_text) + self.stream.write("\n") diff --git a/venv/Lib/site-packages/behave/contrib/scenario_autoretry.py b/venv/Lib/site-packages/behave/contrib/scenario_autoretry.py new file mode 100644 index 0000000..2b7f94f --- /dev/null +++ b/venv/Lib/site-packages/behave/contrib/scenario_autoretry.py @@ -0,0 +1,73 @@ +# -*- coding: UTF -*- +# pylint: disable=line-too-long +""" +Provides support functionality to retry scenarios a number of times before +their failure is accepted. This functionality can be helpful when you use +behave tests in a unreliable server/network infrastructure. + +EXAMPLE: + +.. sourcecode:: gherkin + + # -- FILE: features/alice.feature + # TAG: Feature or Scenario/ScenarioOutline with @autoretry + # NOTE: If you tag the feature, all its scenarios are retried. + @autoretry + Feature: Use unreliable Server infrastructure + + Scenario: ... + + +.. sourcecode:: python + + # -- FILE: features/environment.py + from behave.contrib.scenario_autoretry import patch_scenario_with_autoretry + + def before_feature(context, feature): + for scenario in feature.scenarios: + if "autoretry" in scenario.effective_tags: + patch_scenario_with_autoretry(scenario, max_attempts=2) + +.. seealso:: + * https://github.com/behave/behave/pull/328 + * https://github.com/hypothesis/smokey/blob/sauce-reliability/smokey/features/environment.py +""" + +from __future__ import print_function +import functools +from behave.model import ScenarioOutline + + +def patch_scenario_with_autoretry(scenario, max_attempts=3): + """Monkey-patches :func:`~behave.model.Scenario.run()` to auto-retry a + scenario that fails. The scenario is retried a number of times + before its failure is accepted. + + This is helpful when the test infrastructure (server/network environment) + is unreliable (which should be a rare case). + + :param scenario: Scenario or ScenarioOutline to patch. + :param max_attempts: How many times the scenario can be run. + """ + def scenario_run_with_retries(scenario_run, *args, **kwargs): + for attempt in range(1, max_attempts+1): + if not scenario_run(*args, **kwargs): + if attempt > 1: + message = u"AUTO-RETRY SCENARIO PASSED (after {0} attempts)" + print(message.format(attempt)) + return False # -- NOT-FAILED = PASSED + # -- SCENARIO FAILED: + if attempt < max_attempts: + print(u"AUTO-RETRY SCENARIO (attempt {0})".format(attempt)) + message = u"AUTO-RETRY SCENARIO FAILED (after {0} attempts)" + print(message.format(max_attempts)) + return True + + if isinstance(scenario, ScenarioOutline): + scenario_outline = scenario + for scenario in scenario_outline.scenarios: + scenario_run = scenario.run + scenario.run = functools.partial(scenario_run_with_retries, scenario_run) + else: + scenario_run = scenario.run + scenario.run = functools.partial(scenario_run_with_retries, scenario_run) diff --git a/venv/Lib/site-packages/behave/contrib/substep_dirs.py b/venv/Lib/site-packages/behave/contrib/substep_dirs.py new file mode 100644 index 0000000..b568d57 --- /dev/null +++ b/venv/Lib/site-packages/behave/contrib/substep_dirs.py @@ -0,0 +1,16 @@ +# -*- coding: UTF-8 -*- +""" +Recipe for loading additional step definitions from sub-directories +in the "features/steps" directory. + +.. code-block:: + + # -- FILE: features/steps/use_substep_dirs.py + # REQUIRES: path.py + from behave.runner_util import load_step_modules + from path import Path + + HERE = Path(__file__).dirname + SUBSTEP_DIRS = list(HERE.walkdirs()) + load_step_modules(SUBSTEP_DIRS) +""" diff --git a/venv/Lib/site-packages/behave/fixture.py b/venv/Lib/site-packages/behave/fixture.py new file mode 100644 index 0000000..21093b0 --- /dev/null +++ b/venv/Lib/site-packages/behave/fixture.py @@ -0,0 +1,402 @@ +# -*- coding: UTF-8 -*- +# STATUS: Basic concept works. +""" +A **fixture** provides a concept to simplify test support functionality +that needs a setup/cleanup cycle per scenario, feature or test-run. +A fixture is provided as fixture-function that contains the setup part and +cleanup part similar to :func:`contextlib.contextmanager` or `pytest.fixture`_. + +.. _pytest.fixture: https://docs.pytest.org/en/latest/fixture.html + +A fixture is used when: + +* the (registered) fixture tag is used for a scenario or feature +* the :func:`.use_fixture()` is called in the environment file (normally) + +.. sourcecode:: python + + # -- FILE: behave4my_project/fixtures.py (or: features/environment.py) + from behave import fixture + from somewhere.browser.firefox import FirefoxBrowser + + @fixture + def browser_firefox(context, timeout=30, **kwargs): + # -- SETUP-FIXTURE PART: + context.browser = FirefoxBrowser(timeout, *args, **kwargs) + yield context.browser + # -- CLEANUP-FIXTURE PART: + context.browser.shutdown() + +.. sourcecode:: gherkin + + # -- FILE: features/use_fixture.feature + Feature: Use Fixture in Scenario + + @fixture.browser.firefox + Scenario: Use browser=firefox + Given I use the browser + ... + # -- AFTER-SCENEARIO: Cleanup fixture.browser.firefox + +.. sourcecode:: python + + # -- FILE: features/environment.py + from behave import use_fixture + from behave4my_project.fixtures import browser_firefox + + def before_tag(context, tag): + if tag == "fixture.browser.firefox": + # -- Performs fixture setup and registers fixture cleanup + use_fixture(browser_firefox, context, timeout=10) + +.. hidden: + + BEHAVIORAL DECISIONS: + + * Should scenario/feature be executed when fixture-setup fails + (similar to before-hook failures) ? + NO, scope is skipped, but after-hooks and cleanups are executed. + + * Should remaining fixture-setups be performed after first fixture fails? + NO, first setup-error aborts the setup and execution of the scope. + + * Should remaining fixture-cleanups be performed when first cleanup-error + occurs? + YES, try to perform all fixture-cleanups and then reraise the + first cleanup-error. + + + OPEN ISSUES: + + * AUTO_CALL_REGISTERED_FIXTURE (planned in future): + Run fixture setup before or after before-hooks? + + IDEAS: + + * Fixture registers itself in fixture registry (runtime context). + * Code in before_tag() will either be replaced w/ fixture processing function + or will be automatically be executed (AUTO_CALL_REGISTERED_FIXTURE) + * Support fixture tags w/ parameters that are automatically parsed and + passed to fixture function, like: + @fixture(name="foo", pattern="{name}={browser}") +""" + +import inspect + + +# ------------------------------------------------------------------------------- +# LOCAL HELPERS: +# ------------------------------------------------------------------------------- +def iscoroutinefunction(func): + """Checks if a function is a coroutine-function, like: + + * ``async def f(): ...`` (since Python 3.5) + * ``@asyncio.coroutine def f(): ...`` (since Python3) + + .. note:: Compatibility helper + + Avoids to import :mod:`asyncio` module directly (since Python3), + which in turns initializes the :mod:`logging` module as side-effect. + + :param func: Function to check. + :return: True, if function is a coroutine function. + False, otherwise. + """ + # -- NOTE: inspect.iscoroutinefunction() is available since Python 3.5 + # Checks also if @asyncio.coroutine decorator is not used. + # pylint: disable=no-member + return (getattr(func, "_is_coroutine", False) or + (hasattr(inspect, "iscoroutinefunction") and + inspect.iscoroutinefunction(func))) + + +def is_context_manager(func): + """Checks if a fixture function provides context-manager functionality, + similar to :func`contextlib.contextmanager()` function decorator. + + .. code-block:: python + + @fixture + def foo(context, *args, **kwargs): + context.foo = setup_foo() + yield context.foo + cleanup_foo() + + @fixture + def bar(context, *args, **kwargs): + context.bar = setup_bar() + return context.bar + + assert is_context_manager(foo) is True # Generator-function + assert is_context_manager(bar) is False # Normal function + + :param func: Function to check. + :return: True, if function is a generator/context-manager function. + False, otherwise. + """ + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +# ------------------------------------------------------------------------------- +# EXCEPTIONS: +# ------------------------------------------------------------------------------- +class InvalidFixtureError(RuntimeError): + """Raised when a fixture is invalid. + This occurs when a generator-function with more than one yield statement + is used as fixture-function. + """ + + +# ------------------------------------------------------------------------------- +# FUNCTIONS: Fixture support +# ------------------------------------------------------------------------------- +def _setup_fixture(fixture_func, context, *fixture_args, **fixture_kwargs): + """Provides core functionality to setup a fixture and registers its + cleanup part (if needed). + """ + if is_context_manager(fixture_func): + # -- CASE: Fixture function is a two-step generator (setup, cleanup). + def cleanup_fixture(): + try: + next(func_it) # CLEANUP-FIXTURE PART + # -- USE func_it: From outer scope (here). + except StopIteration: + return False + # -- NOT-NEEDED: + # except Exception: + # raise # -- CLEANUP-FIXTURE PART raised an error. + else: + message = "Has more than one yield: %r" % fixture_func + raise InvalidFixtureError(message) + + # -- GENERATOR: Get instance via call and register cleanup. + # Then perform setup_fixture to ensure that cleanup_fixture() + # is called even if setup-error occurs. + # NOTE: cleanup_fixture() is called when context layer is removed. + func_it = fixture_func(context, *fixture_args, **fixture_kwargs) + context.add_cleanup(cleanup_fixture) + setup_result = next(func_it) # SETUP-FIXTURE PART (may raise error) + else: + # -- CASE: Fixture is a simple function (setup-only) + # NOTE: No cleanup is registered (not needed by intention of user) + setup_result = fixture_func(context, *fixture_args, **fixture_kwargs) + return setup_result + + +def use_fixture(fixture_func, context, *fixture_args, **fixture_kwargs): + """Use fixture (function) and call it to perform its setup-part. + + The fixture-function is similar to a :func:`contextlib.contextmanager` + (and contains a yield-statement to seperate setup and cleanup part). + If it contains a yield-statement, it registers a context-cleanup function + to the context object to perform the fixture-cleanup at the end of the + current scoped when the context layer is removed + (and all context-cleanup functions are called). + + Therefore, fixture-cleanup is performed after scenario, feature or test-run + (depending when its fixture-setup is performed). + + .. code-block:: python + + # -- FILE: behave4my_project/fixtures.py (or: features/environment.py) + from behave import fixture + from somewhere.browser import FirefoxBrowser + + @fixture(name="fixture.browser.firefox") + def browser_firefox(context, *args, **kwargs): + # -- SETUP-FIXTURE PART: + context.browser = FirefoxBrowser(*args, **kwargs) + yield context.browser + # -- CLEANUP-FIXTURE PART: + context.browser.shutdown() + + .. code-block:: python + + # -- FILE: features/environment.py + from behave import use_fixture + from behave4my_project.fixtures import browser_firefox + + def before_tag(context, tag): + if tag == "fixture.browser.firefox": + use_fixture(browser_firefox, context, timeout=10) + + + :param fixture_func: Fixture function to use. + :param context: Context object to use + :param fixture_kwargs: Positional args, passed to the fixture function. + :param fixture_kwargs: Additional kwargs, passed to the fixture function. + :return: Setup result object (may be None). + """ + return _setup_fixture(fixture_func, context, *fixture_args, **fixture_kwargs) + + +def use_fixture_by_tag(tag, context, fixture_registry): + """Process any fixture-tag to perform :func:`use_fixture()` for its fixture. + If the fixture-tag is known, the fixture data is retrieved from the + fixture registry. + + .. code-block:: python + + # -- FILE: features/environment.py + from behave.fixture import use_fixture_by_tag + from behave4my_project.fixtures import browser_firefox, browser_chrome + + # -- SCHEMA 1: fixture_func + fixture_registry1 = { + "fixture.browser.firefox": browser_firefox, + "fixture.browser.chrome": browser_chrome, + } + # -- SCHEMA 2: fixture_func, fixture_args, fixture_kwargs + fixture_registry2 = { + "fixture.browser.firefox": (browser_firefox, (), dict(timeout=10)), + "fixture.browser.chrome": (browser_chrome, (), dict(timeout=12)), + } + + def before_tag(context, tag): + if tag.startswith("fixture."): + return use_fixture_by_tag(tag, context, fixture_registry1): + # -- MORE: Tag processing steps ... + + + :param tag: Fixture tag to process. + :param context: Runtime context object, used for :func:`use_fixture()`. + :param fixture_registry: Registry maps fixture-tag to fixture data. + :return: Fixture-setup result (same as: use_fixture()) + :raises LookupError: If fixture-tag/fixture is unknown. + :raises ValueError: If fixture data type is not supported. + """ + fixture_data = fixture_registry.get(tag, None) + if fixture_data is None: + raise LookupError("Unknown fixture-tag: %s" % tag) + + if callable(fixture_data): + fixture_func = fixture_data + use_fixture(fixture_func, context) + elif isinstance(fixture_data, (tuple, list)): + assert len(fixture_data) == 3 + fixture_func, fixture_args, fixture_kwargs = fixture_data + return use_fixture(fixture_func, context, *fixture_args, **fixture_kwargs) + else: + message = "fixture_data: Expected tuple or fixture-func, but is: %r" + raise ValueError(message % fixture_data) + + +def fixture_call_params(fixture_func, *args, **kwargs): + # -- SEE: use_composite_fixture_with() + return (fixture_func, args, kwargs) + + +def use_composite_fixture_with(context, fixture_funcs_with_params): + """Helper function when complex fixtures should be created and + safe-cleanup is needed even if an setup-fixture-error occurs. + + This function ensures that fixture-cleanup is performed + for every fixture that was setup before the setup-error occured. + + .. code-block:: python + + # -- BAD-EXAMPLE: Simplistic composite-fixture + # NOTE: Created fixtures (fixture1) are not cleaned up. + @fixture + def foo_and_bad0(context, *args, **kwargs): + the_fixture1 = setup_fixture_foo(*args, **kwargs) + the_fixture2 = setup_fixture_bar_with_error("OOPS-HERE") + yield (the_fixture1, the_fixture2) # NOT_REACHED. + # -- NOT_REACHED: Due to fixture2-setup-error. + the_fixture1.cleanup() # NOT-CALLED (SAD). + the_fixture2.cleanup() # OOPS, the_fixture2 is None (if called). + + .. code-block:: python + + # -- GOOD-EXAMPLE: Sane composite-fixture + # NOTE: Fixture foo.cleanup() occurs even after fixture2-setup-error. + @fixture + def foo(context, *args, **kwargs): + the_fixture = setup_fixture_foo(*args, **kwargs) + yield the_fixture + cleanup_fixture_foo(the_fixture) + + @fixture + def bad_with_setup_error(context, *args, **kwargs): + raise RuntimeError("BAD-FIXTURE-SETUP") + + # -- SOLUTION 1: With use_fixture() + @fixture + def foo_and_bad1(context, *args, **kwargs): + the_fixture1 = use_fixture(foo, context, *args, **kwargs) + the_fixture2 = use_fixture(bad_with_setup_error, context, "OOPS") + return (the_fixture1, the_fixture2) # NOT_REACHED + + # -- SOLUTION 2: With use_composite_fixture_with() + @fixture + def foo_and_bad2(context, *args, **kwargs): + the_fixture = use_composite_fixture_with(context, [ + fixture_call_params(foo, *args, **kwargs), + fixture_call_params(bad_with_setup_error, "OOPS") + ]) + return the_fixture + + :param context: Runtime context object, used for all fixtures. + :param fixture_funcs_with_params: List of fixture functions with params. + :return: List of created fixture objects. + """ + composite_fixture = [] + for fixture_func, args, kwargs in fixture_funcs_with_params: + the_fixture = use_fixture(fixture_func, context, *args, **kwargs) + composite_fixture.append(the_fixture) + return composite_fixture + + + +# ------------------------------------------------------------------------------- +# DECORATORS: +# ------------------------------------------------------------------------------- +def fixture(func=None, name=None, pattern=None): + """Fixture decorator (currently mostly syntactic sugar). + + .. code-block:: python + + # -- FILE: features/environment.py + # CASE FIXTURE-GENERATOR-FUNCTION (like @contextlib.contextmanager): + @fixture + def foo(context, *args, **kwargs): + the_fixture = setup_fixture_foo(*args, **kwargs) + context.foo = the_fixture + yield the_fixture + cleanup_fixture_foo(the_fixture) + + # CASE FIXTURE-FUNCTION: No cleanup or cleanup via context-cleanup. + @fixture(name="fixture.bar") + def bar(context, *args, **kwargs): + the_fixture = setup_fixture_bar(*args, **kwargs) + context.bar = the_fixture + context.add_cleanup(cleanup_fixture_bar, the_fixture.cleanup) + return the_fixture + + :param name: Specifies the fixture tag name (as string). + + .. seealso:: + + * :func:`contextlib.contextmanager` decorator + * `@pytest.fixture`_ + """ + def mark_as_fixture(func, name=None, pattern=None): + func.name = name + func.pattern = pattern + func.behave_fixture = True + return func + + if func is None: + # CASE: @fixture() + # CASE: @fixture(name="foo") + def decorator(func): + return mark_as_fixture(func, name, pattern=pattern) + return decorator + elif callable(func): + # CASE: @fixture + return mark_as_fixture(func, name, pattern=pattern) + else: + # -- OOPS: Should be callable-object or None + message = "Invalid func: func=%r, name=%r" % (func, name) + raise TypeError(message) diff --git a/venv/Lib/site-packages/behave/formatter/__init__.py b/venv/Lib/site-packages/behave/formatter/__init__.py new file mode 100644 index 0000000..67cd796 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/__init__.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- +""" +Root module for all behave formatters. +""" + +from __future__ import absolute_import +from behave.formatter import _builtins + +# ----------------------------------------------------------------------------- +# MODULE-INIT: +# ----------------------------------------------------------------------------- +_builtins.setup_formatters() diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..f8a8d3a Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/_builtins.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/_builtins.cpython-311.pyc new file mode 100644 index 0000000..d110c45 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/_builtins.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/_registry.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/_registry.cpython-311.pyc new file mode 100644 index 0000000..946a884 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/_registry.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/ansi_escapes.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/ansi_escapes.cpython-311.pyc new file mode 100644 index 0000000..918af82 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/ansi_escapes.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/base.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000..aa01a93 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/base.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/formatters.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/formatters.cpython-311.pyc new file mode 100644 index 0000000..b25210d Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/formatters.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/json.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/json.cpython-311.pyc new file mode 100644 index 0000000..db857ee Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/json.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/null.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/null.cpython-311.pyc new file mode 100644 index 0000000..718df10 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/null.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/plain.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/plain.cpython-311.pyc new file mode 100644 index 0000000..d16a3a7 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/plain.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/pretty.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/pretty.cpython-311.pyc new file mode 100644 index 0000000..7b179a1 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/pretty.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/progress.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/progress.cpython-311.pyc new file mode 100644 index 0000000..c1277d2 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/progress.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/rerun.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/rerun.cpython-311.pyc new file mode 100644 index 0000000..a4861f8 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/rerun.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/sphinx_steps.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/sphinx_steps.cpython-311.pyc new file mode 100644 index 0000000..71e7e9b Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/sphinx_steps.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/sphinx_util.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/sphinx_util.cpython-311.pyc new file mode 100644 index 0000000..9c60e25 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/sphinx_util.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/steps.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/steps.cpython-311.pyc new file mode 100644 index 0000000..3ba60a7 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/steps.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/__pycache__/tags.cpython-311.pyc b/venv/Lib/site-packages/behave/formatter/__pycache__/tags.cpython-311.pyc new file mode 100644 index 0000000..47f7c38 Binary files /dev/null and b/venv/Lib/site-packages/behave/formatter/__pycache__/tags.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/behave/formatter/_builtins.py b/venv/Lib/site-packages/behave/formatter/_builtins.py new file mode 100644 index 0000000..e83d608 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/_builtins.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +""" +Knowledge base of all built-in formatters. +""" + +from __future__ import absolute_import +from behave.formatter import _registry + + +# ----------------------------------------------------------------------------- +# DATA: +# ----------------------------------------------------------------------------- +# SCHEMA: formatter.name, formatter.class(_name) +_BUILTIN_FORMATS = [ + # pylint: disable=bad-whitespace + ("plain", "behave.formatter.plain:PlainFormatter"), + ("pretty", "behave.formatter.pretty:PrettyFormatter"), + ("json", "behave.formatter.json:JSONFormatter"), + ("json.pretty", "behave.formatter.json:PrettyJSONFormatter"), + ("null", "behave.formatter.null:NullFormatter"), + ("progress", "behave.formatter.progress:ScenarioProgressFormatter"), + ("progress2", "behave.formatter.progress:StepProgressFormatter"), + ("progress3", "behave.formatter.progress:ScenarioStepProgressFormatter"), + ("rerun", "behave.formatter.rerun:RerunFormatter"), + ("tags", "behave.formatter.tags:TagsFormatter"), + ("tags.location", "behave.formatter.tags:TagsLocationFormatter"), + ("steps", "behave.formatter.steps:StepsFormatter"), + ("steps.doc", "behave.formatter.steps:StepsDocFormatter"), + ("steps.catalog", "behave.formatter.steps:StepsCatalogFormatter"), + ("steps.usage", "behave.formatter.steps:StepsUsageFormatter"), + ("sphinx.steps", "behave.formatter.sphinx_steps:SphinxStepsFormatter"), +] + +# ----------------------------------------------------------------------------- +# FUNCTIONS: +# ----------------------------------------------------------------------------- +def setup_formatters(): + """Register all built-in formatters (lazy-loaded).""" + _registry.register_formats(_BUILTIN_FORMATS) diff --git a/venv/Lib/site-packages/behave/formatter/_registry.py b/venv/Lib/site-packages/behave/formatter/_registry.py new file mode 100644 index 0000000..0f7ad94 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/_registry.py @@ -0,0 +1,135 @@ +# -*- coding: utf-8 -*- + +import sys +import warnings +from behave.formatter.base import Formatter, StreamOpener +from behave.importer import LazyDict, LazyObject, parse_scoped_name, load_module +import six + + +# ----------------------------------------------------------------------------- +# FORMATTER REGISTRY: +# ----------------------------------------------------------------------------- +_formatter_registry = LazyDict() + +def format_iter(): + return iter(_formatter_registry.keys()) + +def format_items(resolved=False): + if resolved: + # -- ENSURE: All formatter classes are loaded (and resolved). + _formatter_registry.load_all(strict=False) + return iter(_formatter_registry.items()) + +def register_as(name, formatter_class): + """ + Register formatter class with given name. + + :param name: Name for this formatter (as identifier). + :param formatter_class: Formatter class to register. + + .. since:: 1.2.5 + Parameter ordering starts with name. + """ + if not isinstance(name, six.string_types): + # -- REORDER-PARAMS: Used old ordering before behave-1.2.5 (2015). + warnings.warn("Use parameter ordering: name, formatter_class (for: %s)"\ + % formatter_class) + _formatter_class = name + name = formatter_class + formatter_class = _formatter_class + + if isinstance(formatter_class, six.string_types): + # -- SPEEDUP-STARTUP: Only import formatter_class when used. + scoped_formatter_class_name = formatter_class + formatter_class = LazyObject(scoped_formatter_class_name) + assert (isinstance(formatter_class, LazyObject) or + issubclass(formatter_class, Formatter)) + _formatter_registry[name] = formatter_class + +def register(formatter_class): + register_as(formatter_class.name, formatter_class) + +def register_formats(formats): + """Register many format items into the registry. + + :param formats: List of format items (as: (name, class|class_name)). + """ + for formatter_name, formatter_class_name in formats: + register_as(formatter_name, formatter_class_name) + +def load_formatter_class(scoped_class_name): + """Load a formatter class by using its scoped class name. + + :param scoped_class_name: Formatter module and class name (as string). + :return: Formatter class. + :raises: ValueError, if scoped_class_name is invalid. + :raises: ImportError, if module cannot be loaded or class is not in module. + """ + if ":" not in scoped_class_name: + message = 'REQUIRE: "module:class", but was: "%s"' % scoped_class_name + raise ValueError(message) + module_name, class_name = parse_scoped_name(scoped_class_name) + formatter_module = load_module(module_name) + formatter_class = getattr(formatter_module, class_name, None) + if formatter_class is None: + raise ImportError("CLASS NOT FOUND: %s" % scoped_class_name) + return formatter_class + + +def select_formatter_class(formatter_name): + """Resolve the formatter class by: + + * using one of the registered ones + * loading a user-specified formatter class (like: my.module_name:MyClass) + + :param formatter_name: Name of the formatter or scoped name (as string). + :return: Formatter class + :raises: LookupError, if not found. + :raises: ImportError, if a user-specific formatter class cannot be loaded. + :raises: ValueError, if formatter name is invalid. + """ + try: + return _formatter_registry[formatter_name] + except KeyError: + # -- NOT-FOUND: + if ":" not in formatter_name: + raise + # -- OTHERWISE: SCOPED-NAME, try to load a user-specific formatter. + # MAY RAISE: ImportError + return load_formatter_class(formatter_name) + + +def is_formatter_valid(formatter_name): + """Checks if the formatter is known (registered) or loadable. + + :param formatter_name: Format(ter) name to check (as string). + :return: True, if formatter is known or can be loaded. + """ + try: + formatter_class = select_formatter_class(formatter_name) + return issubclass(formatter_class, Formatter) + except (LookupError, ImportError, ValueError): + return False + + +def make_formatters(config, stream_openers): + """Build a list of formatter, used by a behave runner. + + :param config: Configuration object to use. + :param stream_openers: List of stream openers to use (for formatters). + :return: List of formatters. + :raises: LookupError/KeyError if a formatter class is unknown. + :raises: ImportError, if a formatter class cannot be loaded/resolved. + """ + # -- BUILD: Formatter list + default_stream_opener = StreamOpener(stream=sys.stdout) + formatter_list = [] + for i, name in enumerate(config.format): + stream_opener = default_stream_opener + if i < len(stream_openers): + stream_opener = stream_openers[i] + formatter_class = select_formatter_class(name) + formatter_object = formatter_class(stream_opener, config) + formatter_list.append(formatter_object) + return formatter_list diff --git a/venv/Lib/site-packages/behave/formatter/ansi_escapes.py b/venv/Lib/site-packages/behave/formatter/ansi_escapes.py new file mode 100644 index 0000000..6c93e6c --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/ansi_escapes.py @@ -0,0 +1,94 @@ +# -*- coding: utf-8 -*- +""" +Provides ANSI escape sequences for coloring/formatting output in ANSI terminals. +""" + +from __future__ import absolute_import +import os +import re + +colors = { + "black": u"\x1b[30m", + "red": u"\x1b[31m", + "green": u"\x1b[32m", + "yellow": u"\x1b[33m", + "blue": u"\x1b[34m", + "magenta": u"\x1b[35m", + "cyan": u"\x1b[36m", + "white": u"\x1b[37m", + "grey": u"\x1b[90m", + "bold": u"\x1b[1m", +} + +aliases = { + "untested": "cyan", # SAME-COLOR AS: skipped + "undefined": "yellow", + "pending": "yellow", + "executing": "grey", + "failed": "red", + "passed": "green", + "outline": "cyan", + "skipped": "cyan", + "comments": "grey", + "tag": "cyan", +} + +escapes = { + "reset": u"\x1b[0m", + "up": u"\x1b[1A", +} + +if "GHERKIN_COLORS" in os.environ: + new_aliases = [p.split("=") for p in os.environ["GHERKIN_COLORS"].split(":")] + aliases.update(dict(new_aliases)) + +for alias in aliases: + escapes[alias] = "".join([colors[c] for c in aliases[alias].split(",")]) + arg_alias = alias + "_arg" + arg_seq = aliases.get(arg_alias, aliases[alias] + ",bold") + escapes[arg_alias] = "".join([colors[c] for c in arg_seq.split(",")]) + + +# pylint: disable=anomalous-backslash-in-string +def up(n): + return u"\x1b[%dA" % n + + +_ANSI_ESCAPE_PATTERN = re.compile(u"\x1b\[\d+[mA]", re.UNICODE) +# pylint: enable=anomalous-backslash-in-string +def strip_escapes(text): + """ + Removes ANSI escape sequences from text (if any are contained). + + :param text: Text that may or may not contain ANSI escape sequences. + :return: Text without ANSI escape sequences. + """ + return _ANSI_ESCAPE_PATTERN.sub("", text) + + +def use_ansi_escape_colorbold_composites(): # pragma: no cover + """ + Patch for "sphinxcontrib-ansi" to process the following ANSI escapes + correctly (set-color set-bold sequences): + + ESC[{color}mESC[1m => ESC[{color};1m + + Reapply aliases to ANSI escapes mapping. + """ + # NOT-NEEDED: global escapes + color_codes = {} + for color_name, color_escape in colors.items(): + color_code = color_escape.replace(u"\x1b[", u"").replace(u"m", u"") + color_codes[color_name] = color_code + + # pylint: disable=redefined-outer-name + for alias in aliases: + parts = [color_codes[c] for c in aliases[alias].split(",")] + composite_escape = u"\x1b[{0}m".format(u";".join(parts)) + escapes[alias] = composite_escape + + arg_alias = alias + "_arg" + arg_seq = aliases.get(arg_alias, aliases[alias] + ",bold") + parts = [color_codes[c] for c in arg_seq.split(",")] + composite_escape = u"\x1b[{0}m".format(u";".join(parts)) + escapes[arg_alias] = composite_escape diff --git a/venv/Lib/site-packages/behave/formatter/base.py b/venv/Lib/site-packages/behave/formatter/base.py new file mode 100644 index 0000000..84f11c6 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/base.py @@ -0,0 +1,188 @@ +# -*- coding: utf-8 -*- + +# UNUSED: import sys +# UNUSED: import codecs +import os.path +from behave.textutil import select_best_encoding, \ + ensure_stream_with_encoder as _ensure_stream_with_encoder + + +class StreamOpener(object): + """Provides a transport vehicle to open the formatter output stream + when the formatter needs it. + In addition, it provides the formatter with more control: + + * when a stream is opened + * if a stream is opened at all + * the name (filename/dirname) of the output stream + * let it decide if directory mode is used instead of file mode + """ + # FORMER: default_encoding = "UTF-8" + default_encoding = select_best_encoding() + + def __init__(self, filename=None, stream=None, encoding=None): + if not encoding: + encoding = self.default_encoding + if stream: + stream = self.ensure_stream_with_encoder(stream, encoding) + self.name = filename + self.stream = stream + self.encoding = encoding + self.should_close_stream = not stream # Only for not pre-opened ones. + + @staticmethod + def ensure_dir_exists(directory): + if directory and not os.path.isdir(directory): + os.makedirs(directory) + + @classmethod + def ensure_stream_with_encoder(cls, stream, encoding=None): + return _ensure_stream_with_encoder(stream, encoding) + + def open(self): + if not self.stream or self.stream.closed: + self.ensure_dir_exists(os.path.dirname(self.name)) + stream = open(self.name, "w") + # stream = codecs.open(self.name, "w", encoding=self.encoding) + stream = self.ensure_stream_with_encoder(stream, self.encoding) + self.stream = stream # -- Keep stream for house-keeping. + self.should_close_stream = True + assert self.should_close_stream + return self.stream + + def close(self): + """ + Close the stream, if it was opened by this stream_opener. + Skip closing for sys.stdout and pre-opened streams. + :return: True, if stream was closed. + """ + closed = False + if self.stream and self.should_close_stream: + closed = getattr(self.stream, "closed", False) + if not closed: + self.stream.close() + closed = True + self.stream = None + return closed + + +class Formatter(object): + """ + Base class for all formatter classes. + A formatter is an extension point (variation point) for the runner logic. + A formatter is called while processing model elements. + + Processing Logic (simplified, without ScenarioOutline and skip logic):: + + for feature in runner.features: + formatter = make_formatters(...) + formatter.uri(feature.filename) + formatter.feature(feature) + for scenario in feature.scenarios: + formatter.scenario(scenario) + for step in scenario.all_steps: + formatter.step(step) + step_match = step_registry.find_match(step) + formatter.match(step_match) + if step_match: + step_match.run() + else: + step.status = Status.undefined + formatter.result(step.status) + formatter.eof() # -- FEATURE-END + formatter.close() + """ + name = None + description = None + + def __init__(self, stream_opener, config): + self.stream_opener = stream_opener + self.stream = stream_opener.stream + self.config = config + + @property + def stdout_mode(self): + return not self.stream_opener.name + + def open(self): + """ + Ensure that the output stream is open. + Triggers the stream opener protocol (if necessary). + + :return: Output stream to use (just opened or already open). + """ + if not self.stream: + self.stream = self.stream_opener.open() + return self.stream + + def uri(self, uri): + """Called before processing a file (normally a feature file). + + :param uri: URI or filename (as string). + """ + pass + + def feature(self, feature): + """Called before a feature is executed. + + :param feature: Feature object (as :class:`behave.model.Feature`) + """ + pass + + def background(self, background): + """Called when a (Feature) Background is provided. + Called after :method:`feature()` is called. + Called before processing any scenarios or scenario outlines. + + :param background: Background object (as :class:`behave.model.Background`) + """ + pass + + def scenario(self, scenario): + """Called before a scenario is executed (or ScenarioOutline scenarios). + + :param scenario: Scenario object (as :class:`behave.model.Scenario`) + """ + pass + + def step(self, step): + """Called before a step is executed (and matched). + NOTE: Normally called before scenario is executed for all its steps. + + :param step: Step object (as :class:`behave.model.Step`) + """ + pass + + def match(self, match): + """Called when a step was matched against its step implementation. + + :param match: Registered step (as Match), undefined step (as NoMatch). + """ + pass + + def result(self, step): + """Called after processing a step (when the step result is known). + + :param step: Step object with result (after being executed/skipped). + """ + pass + + def eof(self): + """Called after processing a feature (or a feature file).""" + pass + + def close(self): + """Called before the formatter is no longer used + (stream/io compatibility). + """ + self.close_stream() + + def close_stream(self): + """Close the stream, but only if this is needed. + This step is skipped if the stream is sys.stdout. + """ + if self.stream: + # -- DELEGATE STREAM-CLOSING: To stream_opener + assert self.stream is self.stream_opener.stream + self.stream_opener.close() + self.stream = None # -- MARK CLOSED. diff --git a/venv/Lib/site-packages/behave/formatter/formatters.py b/venv/Lib/site-packages/behave/formatter/formatters.py new file mode 100644 index 0000000..ae8fdd0 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/formatters.py @@ -0,0 +1,57 @@ +# -*- coding: utf-8 -*- +""" +Deprecated module. Functionality was split-up into: + + * behave.formatter._registry (generic core functionality) + * behave.formatter._builtins (registration of known, builtin formatters) + +.. since:: 1.2.5a1 + Deprecated, use "behave.formatter._registry" or "behave.formatter._builtin". +""" + +from __future__ import absolute_import +import warnings +from behave.formatter import _registry + +warnings.simplefilter("once", DeprecationWarning) +warnings.warn("Use 'behave.formatter._registry' instead.", DeprecationWarning) + +# ----------------------------------------------------------------------------- +# FORMATTER REGISTRY: +# ----------------------------------------------------------------------------- +def register_as(formatter_class, name): + """ + Register formatter class with given name. + + :param formatter_class: Formatter class to register. + :param name: Name for this formatter (as identifier). + """ + warnings.warn("Use behave.formatter._registry.register_as() instead.", + DeprecationWarning, stacklevel=2) + _registry.register_as(name, formatter_class) + + +def register(formatter_class): + register_as(formatter_class, formatter_class.name) + + +def get_formatter(config, stream_openers): + warnings.warn("Use make_formatters() instead", + DeprecationWarning, stacklevel=2) + return _registry.make_formatters(config, stream_openers) + + +# ----------------------------------------------------------------------------- +# SETUP: +# ----------------------------------------------------------------------------- +def setup_formatters(): + warnings.warn("Use behave.formatter._builtins instead", + DeprecationWarning, stacklevel=2) + from behave.formatter import _builtins + _builtins.setup_formatters() + + +# ----------------------------------------------------------------------------- +# MODULE-INIT: +# ----------------------------------------------------------------------------- +# DISABLED: setup_formatters() diff --git a/venv/Lib/site-packages/behave/formatter/json.py b/venv/Lib/site-packages/behave/formatter/json.py new file mode 100644 index 0000000..6da0d59 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/json.py @@ -0,0 +1,253 @@ +# -*- coding: utf-8 -*- +""" +This module provides `JSON`_ formatters for :mod:`behave`: + +* json: Generates compact JSON output +* json.pretty: Generates readable JSON output + +.. _JSON: http://json.org +""" + +from __future__ import absolute_import +import base64 +from behave.formatter.base import Formatter +from behave.model_core import Status +import six +try: + import json +except ImportError: + import simplejson as json + + +# ----------------------------------------------------------------------------- +# CLASS: JSONFormatter +# ----------------------------------------------------------------------------- +class JSONFormatter(Formatter): + name = "json" + description = "JSON dump of test run" + dumps_kwargs = {} + split_text_into_lines = True # EXPERIMENT for better readability. + + json_number_types = six.integer_types + (float,) + json_scalar_types = json_number_types + (six.text_type, bool, type(None)) + + def __init__(self, stream_opener, config): + super(JSONFormatter, self).__init__(stream_opener, config) + # -- ENSURE: Output stream is open. + self.stream = self.open() + self.feature_count = 0 + self.current_feature = None + self.current_feature_data = None + self.current_scenario = None + self._step_index = 0 + + def reset(self): + self.current_feature = None + self.current_feature_data = None + self.current_scenario = None + self._step_index = 0 + + # -- FORMATTER API: + def uri(self, uri): + pass + + def feature(self, feature): + self.reset() + self.current_feature = feature + self.current_feature_data = { + "keyword": feature.keyword, + "name": feature.name, + "tags": list(feature.tags), + "location": six.text_type(feature.location), + "status": None, # Not known before feature run. + } + element = self.current_feature_data + if feature.description: + element["description"] = feature.description + + def background(self, background): + element = self.add_feature_element({ + "type": "background", + "keyword": background.keyword, + "name": background.name, + "location": six.text_type(background.location), + "steps": [], + }) + if background.name: + element["name"] = background.name + self._step_index = 0 + + # -- ADD BACKGROUND STEPS: Support *.feature file regeneration. + for step_ in background.steps: + self.step(step_) + + def scenario(self, scenario): + self.finish_current_scenario() + self.current_scenario = scenario + + element = self.add_feature_element({ + "type": "scenario", + "keyword": scenario.keyword, + "name": scenario.name, + "tags": scenario.tags, + "location": six.text_type(scenario.location), + "steps": [], + "status": None, + }) + if scenario.description: + element["description"] = scenario.description + self._step_index = 0 + + @classmethod + def make_table(cls, table): + table_data = { + "headings": table.headings, + "rows": [list(row) for row in table.rows] + } + return table_data + + def step(self, step): + s = { + "keyword": step.keyword, + "step_type": step.step_type, + "name": step.name, + "location": six.text_type(step.location), + } + + if step.text: + text = step.text + if self.split_text_into_lines and "\n" in text: + text = text.splitlines() + s["text"] = text + if step.table: + s["table"] = self.make_table(step.table) + element = self.current_feature_element + element["steps"].append(s) + + def match(self, match): + args = [] + for argument in match.arguments: + argument_value = argument.value + if not isinstance(argument_value, self.json_scalar_types): + # -- OOPS: Avoid invalid JSON format w/ custom types. + # Use raw string (original) instead. + argument_value = argument.original + assert isinstance(argument_value, self.json_scalar_types) + arg = { + "value": argument_value, + } + if argument.name: + arg["name"] = argument.name + if argument.original != argument_value: + # -- REDUNDANT DATA COMPRESSION: Suppress for strings. + arg["original"] = argument.original + args.append(arg) + + match_data = { + "location": six.text_type(match.location) or "", + "arguments": args, + } + if match.location: + # -- NOTE: match.location=None occurs for undefined steps. + steps = self.current_feature_element["steps"] + steps[self._step_index]["match"] = match_data + + def result(self, step): + steps = self.current_feature_element["steps"] + steps[self._step_index]["result"] = { + "status": step.status.name, + "duration": step.duration, + } + if step.error_message and step.status == Status.failed: + # -- OPTIONAL: Provided for failed steps. + error_message = step.error_message + if self.split_text_into_lines and "\n" in error_message: + error_message = error_message.splitlines() + result_element = steps[self._step_index]["result"] + result_element["error_message"] = error_message + self._step_index += 1 + + def embedding(self, mime_type, data): + step = self.current_feature_element["steps"][-1] + step["embeddings"].append({ + "mime_type": mime_type, + "data": base64.b64encode(data).replace("\n", ""), + }) + + def eof(self): + """ + End of feature + """ + if not self.current_feature_data: + return + + # -- NORMAL CASE: Write collected data of current feature. + self.finish_current_scenario() + self.update_status_data() + + if self.feature_count == 0: + # -- FIRST FEATURE: + self.write_json_header() + else: + # -- NEXT FEATURE: + self.write_json_feature_separator() + + self.write_json_feature(self.current_feature_data) + self.reset() + self.feature_count += 1 + + def close(self): + if self.feature_count == 0: + # -- FIRST FEATURE: Corner case when no features are provided. + self.write_json_header() + self.write_json_footer() + self.close_stream() + + # -- JSON-DATA COLLECTION: + def add_feature_element(self, element): + assert self.current_feature_data is not None + if "elements" not in self.current_feature_data: + self.current_feature_data["elements"] = [] + self.current_feature_data["elements"].append(element) + return element + + @property + def current_feature_element(self): + assert self.current_feature_data is not None + return self.current_feature_data["elements"][-1] + + def update_status_data(self): + assert self.current_feature + assert self.current_feature_data + self.current_feature_data["status"] = self.current_feature.status.name + + def finish_current_scenario(self): + if self.current_scenario: + status_name = self.current_scenario.status.name + self.current_feature_element["status"] = status_name + + # -- JSON-WRITER: + def write_json_header(self): + self.stream.write("[\n") + + def write_json_footer(self): + self.stream.write("\n]\n") + + def write_json_feature(self, feature_data): + self.stream.write(json.dumps(feature_data, **self.dumps_kwargs)) + self.stream.flush() + + def write_json_feature_separator(self): + self.stream.write(",\n\n") + + +# ----------------------------------------------------------------------------- +# CLASS: PrettyJSONFormatter +# ----------------------------------------------------------------------------- +class PrettyJSONFormatter(JSONFormatter): + """ + Provides readable/comparable textual JSON output. + """ + name = "json.pretty" + description = "JSON dump of test run (human readable)" + dumps_kwargs = {"indent": 2, "sort_keys": True} diff --git a/venv/Lib/site-packages/behave/formatter/null.py b/venv/Lib/site-packages/behave/formatter/null.py new file mode 100644 index 0000000..42582b0 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/null.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import +from behave.formatter.base import Formatter + +class NullFormatter(Formatter): + """ + Provides formatter that does not output anything. + Implements the NULL pattern for a formatter (similar like: /dev/null). + """ + name = "null" + description = "Provides formatter that does not output anything." diff --git a/venv/Lib/site-packages/behave/formatter/plain.py b/venv/Lib/site-packages/behave/formatter/plain.py new file mode 100644 index 0000000..9f1f833 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/plain.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import +from behave.formatter.base import Formatter +from behave.model_describe import ModelPrinter +from behave.textutil import make_indentation + + +# ----------------------------------------------------------------------------- +# CLASS: PlainFormatter +# ----------------------------------------------------------------------------- +class PlainFormatter(Formatter): + """ + Provides a simple plain formatter without coloring/formatting. + The formatter displays now also: + + * multi-line text (doc-strings) + * table + * tags (maybe) + """ + name = "plain" + description = "Very basic formatter with maximum compatibility" + + SHOW_MULTI_LINE = True + SHOW_TAGS = False + SHOW_ALIGNED_KEYWORDS = False + DEFAULT_INDENT_SIZE = 2 + RAISE_OUTPUT_ERRORS = True + + def __init__(self, stream_opener, config, **kwargs): + super(PlainFormatter, self).__init__(stream_opener, config) + self.steps = [] + self.show_timings = config.show_timings + self.show_multiline = config.show_multiline and self.SHOW_MULTI_LINE + self.show_aligned_keywords = self.SHOW_ALIGNED_KEYWORDS + self.show_tags = self.SHOW_TAGS + self.indent_size = self.DEFAULT_INDENT_SIZE + # -- ENSURE: Output stream is open. + self.stream = self.open() + self.printer = ModelPrinter(self.stream) + # -- LAZY-EVALUATE: + self._multiline_indentation = None + + @property + def multiline_indentation(self): + if self._multiline_indentation is None: + offset = 0 + if self.show_aligned_keywords: + offset = 2 + indentation = make_indentation(3 * self.indent_size + offset) + self._multiline_indentation = indentation + return self._multiline_indentation + + def reset_steps(self): + self.steps = [] + + def write_tags(self, tags, indent=None): + if tags and self.show_tags: + indent = indent or "" + text = " @".join(tags) + self.stream.write(u"%s@%s\n" % (indent, text)) + + # -- IMPLEMENT-INTERFACE FOR: Formatter + def feature(self, feature): + self.reset_steps() + self.write_tags(feature.tags) + self.stream.write(u"%s: %s\n" % (feature.keyword, feature.name)) + + def background(self, background): + self.reset_steps() + indent = make_indentation(self.indent_size) + text = u"%s%s: %s\n" % (indent, background.keyword, background.name) + self.stream.write(text) + + def scenario(self, scenario): + self.reset_steps() + self.stream.write(u"\n") + indent = make_indentation(self.indent_size) + text = u"%s%s: %s\n" % (indent, scenario.keyword, scenario.name) + self.write_tags(scenario.tags, indent) + self.stream.write(text) + + def step(self, step): + self.steps.append(step) + + def result(self, step): + """ + Process the result of a step (after step execution). + + :param step: Step object with result to process. + """ + step = self.steps.pop(0) + indent = make_indentation(2 * self.indent_size) + if self.show_aligned_keywords: + # -- RIGHT-ALIGN KEYWORDS (max. keyword width: 6): + text = u"%s%6s %s ... " % (indent, step.keyword, step.name) + else: + text = u"%s%s %s ... " % (indent, step.keyword, step.name) + self.stream.write(text) + + status_text = step.status.name + if self.show_timings: + status_text += " in %0.3fs" % step.duration + + unicode_errors = 0 + if step.error_message: + try: + self.stream.write(u"%s\n%s\n" % (status_text, step.error_message)) + except UnicodeError as e: + unicode_errors += 1 + self.stream.write(u"%s\n" % status_text) + self.stream.write(u"%s while writing error message: %s\n" % \ + (e.__class__.__name__, e)) + if self.RAISE_OUTPUT_ERRORS: + raise + else: + self.stream.write(u"%s\n" % status_text) + + if self.show_multiline: + if step.text: + try: + self.doc_string(step.text) + except UnicodeError as e: + unicode_errors += 1 + self.stream.write(u"%s while writing docstring: %s\n" % \ + (e.__class__.__name__, e)) + if self.RAISE_OUTPUT_ERRORS: + raise + if step.table: + self.table(step.table) + + def eof(self): + self.stream.write("\n") + + # -- MORE: Formatter helpers + def doc_string(self, doc_string): + self.printer.print_docstring(doc_string, self.multiline_indentation) + + def table(self, table): + self.printer.print_table(table, self.multiline_indentation) + + +# ----------------------------------------------------------------------------- +# CLASS: Plain0Formatter +# ----------------------------------------------------------------------------- +class Plain0Formatter(PlainFormatter): + """ + Similar to old plain formatter without support for: + + * multi-line text + * tables + * tags + """ + name = "plain0" + description = "Very basic formatter with maximum compatibility" + SHOW_MULTI_LINE = False + SHOW_TAGS = False + SHOW_ALIGNED_KEYWORDS = False diff --git a/venv/Lib/site-packages/behave/formatter/pretty.py b/venv/Lib/site-packages/behave/formatter/pretty.py new file mode 100644 index 0000000..b6f0eac --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/pretty.py @@ -0,0 +1,351 @@ +# -*- coding: utf8 -*- + +from __future__ import absolute_import, division +import sys +from behave.formatter.ansi_escapes import escapes, up +from behave.formatter.base import Formatter +from behave.model_core import Status +from behave.model_describe import escape_cell, escape_triple_quotes +from behave.textutil import indent, text as _text +import six +from six.moves import range, zip + + +# ----------------------------------------------------------------------------- +# TERMINAL SUPPORT: +# ----------------------------------------------------------------------------- +DEFAULT_WIDTH = 80 +DEFAULT_HEIGHT = 24 + +def get_terminal_size(): + if sys.platform == "windows": + # Autodetecting the size of a Windows command window is left as an + # exercise for the reader. Prizes may be awarded for the best answer. + return (DEFAULT_WIDTH, DEFAULT_HEIGHT) + + try: + import fcntl + import termios + import struct + + zero_struct = struct.pack("HHHH", 0, 0, 0, 0) + result = fcntl.ioctl(0, termios.TIOCGWINSZ, zero_struct) + h, w, hp1, wp1 = struct.unpack("HHHH", result) + + return w or DEFAULT_WIDTH, h or DEFAULT_HEIGHT + except Exception: # pylint: disable=broad-except + return (DEFAULT_WIDTH, DEFAULT_HEIGHT) + + +# ----------------------------------------------------------------------------- +# COLORING SUPPORT: +# ----------------------------------------------------------------------------- +class MonochromeFormat(object): + def text(self, text): # pylint: disable=no-self-use + assert isinstance(text, six.text_type) + return text + +class ColorFormat(object): + def __init__(self, status): + self.status = status + + def text(self, text): + assert isinstance(text, six.text_type) + return escapes[self.status] + text + escapes["reset"] + + +# ----------------------------------------------------------------------------- +# CLASS: PrettyFormatter +# ----------------------------------------------------------------------------- +class PrettyFormatter(Formatter): + # pylint: disable=too-many-instance-attributes + name = "pretty" + description = "Standard colourised pretty formatter" + + def __init__(self, stream_opener, config): + super(PrettyFormatter, self).__init__(stream_opener, config) + # -- ENSURE: Output stream is open. + self.stream = self.open() + isatty = getattr(self.stream, "isatty", lambda: True) + stream_supports_colors = isatty() + self.monochrome = not config.color or not stream_supports_colors + self.show_source = config.show_source + self.show_timings = config.show_timings + self.show_multiline = config.show_multiline + self.formats = None + self.display_width = get_terminal_size()[0] + + # -- UNUSED: self.tag_statement = None + self.steps = [] + self._uri = None + self._match = None + self.statement = None + self.indentations = [] + self.step_lines = 0 + + + def reset(self): + # -- UNUSED: self.tag_statement = None + self.steps = [] + self._uri = None + self._match = None + self.statement = None + self.indentations = [] + self.step_lines = 0 + + def uri(self, uri): + self.reset() + self._uri = uri + + def feature(self, feature): + #self.print_comments(feature.comments, '') + self.print_tags(feature.tags, '') + self.stream.write(u"%s: %s" % (feature.keyword, feature.name)) + if self.show_source: + # pylint: disable=redefined-builtin + format = self.format("comments") + self.stream.write(format.text(u" # %s" % feature.location)) + self.stream.write("\n") + self.print_description(feature.description, " ", False) + self.stream.flush() + + def background(self, background): + self.replay() + self.statement = background + + def scenario(self, scenario): + self.replay() + self.statement = scenario + + def replay(self): + self.print_statement() + self.print_steps() + self.stream.flush() + + def step(self, step): + self.steps.append(step) + + def match(self, match): + self._match = match + self.print_statement() + self.print_step(Status.executing, self._match.arguments, + self._match.location, self.monochrome) + self.stream.flush() + + def result(self, step): + if not self.monochrome: + lines = self.step_lines + 1 + if self.show_multiline: + if step.table: + lines += len(step.table.rows) + 1 + if step.text: + lines += len(step.text.splitlines()) + 2 + self.stream.write(up(lines)) + arguments = [] + location = None + if self._match: + arguments = self._match.arguments + location = self._match.location + self.print_step(step.status, arguments, location, True) + if step.error_message: + self.stream.write(indent(step.error_message.strip(), u" ")) + self.stream.write("\n\n") + self.stream.flush() + + def arg_format(self, key): + return self.format(key + "_arg") + + def format(self, key): + if self.monochrome: + if self.formats is None: + self.formats = MonochromeFormat() + return self.formats + # -- OTHERWISE: + if self.formats is None: + self.formats = {} + # pylint: disable=redefined-builtin + format = self.formats.get(key, None) + if format is not None: + return format + format = self.formats[key] = ColorFormat(key) + return format + + def eof(self): + self.replay() + self.stream.write("\n") + self.stream.flush() + + def table(self, table): + cell_lengths = [] + all_rows = [table.headings] + table.rows + for row in all_rows: + lengths = [len(escape_cell(c)) for c in row] + cell_lengths.append(lengths) + + max_lengths = [] + for col in range(0, len(cell_lengths[0])): + max_lengths.append(max([c[col] for c in cell_lengths])) + + for i, row in enumerate(all_rows): + #for comment in row.comments: + # self.stream.write(" %s\n" % comment.value) + self.stream.write(" |") + for j, (cell, max_length) in enumerate(zip(row, max_lengths)): + self.stream.write(" ") + self.stream.write(self.color(cell, None, j)) + self.stream.write(" " * (max_length - cell_lengths[i][j])) + self.stream.write(" |") + self.stream.write("\n") + self.stream.flush() + + def doc_string(self, doc_string): + #self.stream.write(' """' + doc_string.content_type + '\n') + doc_string = _text(doc_string) + prefix = u" " + self.stream.write(u'%s"""\n' % prefix) + doc_string = escape_triple_quotes(indent(doc_string, prefix)) + self.stream.write(doc_string) + self.stream.write(u'\n%s"""\n' % prefix) + self.stream.flush() + + # def doc_string(self, doc_string): + # from behave.model_describe import ModelDescriptor + # prefix = " " + # text = ModelDescriptor.describe_docstring(doc_string, prefix) + # self.stream.write(text) + # self.stream.flush() + + # -- UNUSED: + # def exception(self, exception): + # exception_text = _text(exception) + # self.stream.write(self.format("failed").text(exception_text) + "\n") + # self.stream.flush() + + def color(self, cell, statuses, _color): # pylint: disable=no-self-use + if statuses: + return escapes["color"] + escapes["reset"] + # -- OTHERWISE: + return escape_cell(cell) + + def indented_text(self, text, proceed): + if not text: + return u"" + + if proceed: + indentation = self.indentations.pop(0) + else: + indentation = self.indentations[0] + + indentation = u" " * indentation + return u"%s # %s" % (indentation, text) + + def calculate_location_indentations(self): + line_widths = [] + for s in [self.statement] + self.steps: + string = s.keyword + " " + s.name + line_widths.append(len(string)) + max_line_width = max(line_widths) + self.indentations = [max_line_width - width for width in line_widths] + + def print_statement(self): + if self.statement is None: + return + + self.calculate_location_indentations() + self.stream.write(u"\n") + #self.print_comments(self.statement.comments, " ") + if hasattr(self.statement, "tags"): + self.print_tags(self.statement.tags, u" ") + self.stream.write(u" %s: %s " % (self.statement.keyword, + self.statement.name)) + + location = self.indented_text(six.text_type(self.statement.location), True) + if self.show_source: + self.stream.write(self.format("comments").text(location)) + self.stream.write("\n") + #self.print_description(self.statement.description, u" ") + self.statement = None + + def print_steps(self): + while self.steps: + self.print_step(Status.skipped, [], None, True) + + def print_step(self, status, arguments, location, proceed): + if proceed: + step = self.steps.pop(0) + else: + step = self.steps[0] + + text_format = self.format(status.name) + arg_format = self.arg_format(status.name) + + #self.print_comments(step.comments, " ") + self.stream.write(" ") + self.stream.write(text_format.text(step.keyword + " ")) + line_length = 5 + len(step.keyword) + + step_name = six.text_type(step.name) + + text_start = 0 + for arg in arguments: + if arg.end <= text_start: + # -- SKIP-OVER: Optional and nested regexp args + # - Optional regexp args (unmatched: None). + # - Nested regexp args that are already processed. + continue + # -- VALID, MATCHED ARGUMENT: + assert arg.original is not None + text = step_name[text_start:arg.start] + self.stream.write(text_format.text(text)) + line_length += len(text) + self.stream.write(arg_format.text(arg.original)) + line_length += len(arg.original) + text_start = arg.end + + if text_start != len(step_name): + text = step_name[text_start:] + self.stream.write(text_format.text(text)) + line_length += (len(text)) + + if self.show_source: + location = six.text_type(location) + if self.show_timings and status in (Status.passed, Status.failed): + location += " %0.3fs" % step.duration + location = self.indented_text(location, proceed) + self.stream.write(self.format("comments").text(location)) + line_length += len(location) + elif self.show_timings and status in (Status.passed, Status.failed): + timing = "%0.3fs" % step.duration + timing = self.indented_text(timing, proceed) + self.stream.write(self.format("comments").text(timing)) + line_length += len(timing) + self.stream.write("\n") + + self.step_lines = int((line_length - 1) / self.display_width) + + if self.show_multiline: + if step.text: + self.doc_string(step.text) + if step.table: + self.table(step.table) + + def print_tags(self, tags, indentation): + if not tags: + return + line = " ".join("@" + tag for tag in tags) + self.stream.write(indentation + line + "\n") + + def print_comments(self, comments, indentation): + if not comments: + return + + self.stream.write(indent([c.value for c in comments], indentation)) + self.stream.write("\n") + + def print_description(self, description, indentation, newline=True): + if not description: + return + + self.stream.write(indent(description, indentation)) + if newline: + self.stream.write("\n") diff --git a/venv/Lib/site-packages/behave/formatter/progress.py b/venv/Lib/site-packages/behave/formatter/progress.py new file mode 100644 index 0000000..6d8adf6 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/progress.py @@ -0,0 +1,283 @@ +# -*- coding: utf-8 -*- +""" +Provides 2 dotted progress formatters: + + * ScenarioProgressFormatter (scope: scenario) + * StepProgressFormatter (scope: step) + +A "dot" character that represents the result status is printed after +executing a scope item. +""" + +from __future__ import absolute_import +import six +from behave.formatter.base import Formatter +from behave.model_core import Status +from behave.textutil import text as _text + + +# ----------------------------------------------------------------------------- +# CLASS: ProgressFormatterBase +# ----------------------------------------------------------------------------- +class ProgressFormatterBase(Formatter): + """ + Provides formatter base class for different variants of progress formatters. + A progress formatter show an abbreviated, compact dotted progress bar, + similar to unittest output (in terse mode). + """ + # -- MAP: step.status to short dot_status representation. + dot_status = { + "passed": ".", + "failed": "F", + "error": "E", # Caught exception, but not an AssertionError + "skipped": "S", + "untested": "_", + "undefined": "U", + } + show_timings = False + + def __init__(self, stream_opener, config): + super(ProgressFormatterBase, self).__init__(stream_opener, config) + # -- ENSURE: Output stream is open. + self.stream = self.open() + self.steps = [] + self.failures = [] + self.current_feature = None + self.current_scenario = None + self.show_timings = config.show_timings and self.show_timings + + def reset(self): + self.steps = [] + self.failures = [] + self.current_feature = None + self.current_scenario = None + + # -- FORMATTER API: + def feature(self, feature): + self.current_feature = feature + self.stream.write("%s " % six.text_type(feature.filename)) + self.stream.flush() + + def background(self, background): + pass + + def scenario(self, scenario): + """ + Process the next scenario. + But first allow to report the status on the last scenario. + """ + self.report_scenario_completed() + self.current_scenario = scenario + + def step(self, step): + self.steps.append(step) + + def result(self, step): + self.steps.pop(0) + self.report_step_progress(step) + + def eof(self): + """ + Called at end of a feature. + It would be better to have a hook that is called after all features. + """ + self.report_scenario_completed() + self.report_feature_completed() + self.report_failures() + self.stream.flush() + self.reset() + + # -- SPECIFIC PART: + def report_step_progress(self, step): + """Report the progress on the current step. + The default implementation is empty. + It should be override by a concrete class. + """ + pass + + def report_scenario_progress(self): + """Report the progress for the current/last scenario. + The default implementation is empty. + It should be override by a concrete class. + """ + pass + + def report_feature_completed(self): + """Hook called when a feature is completed to perform the last tasks. + """ + pass + + def report_scenario_completed(self): + """Hook called when a scenario is completed to perform the last tasks. + """ + self.report_scenario_progress() + + def report_feature_duration(self): + if self.show_timings and self.current_feature: + self.stream.write(u" # %.3fs" % self.current_feature.duration) + self.stream.write("\n") + + def report_scenario_duration(self): + if self.show_timings and self.current_scenario: + self.stream.write(u" # %.3fs" % self.current_scenario.duration) + self.stream.write("\n") + + def report_failures(self): + if self.failures: + separator = "-" * 80 + self.stream.write(u"%s\n" % separator) + for step in self.failures: + self.stream.write(u"FAILURE in step '%s':\n" % step.name) + self.stream.write(u" Feature: %s\n" % step.feature.name) + self.stream.write(u" Scenario: %s\n" % step.scenario.name) + self.stream.write(u"%s\n" % step.error_message) + if step.exception: + self.stream.write(u"exception: %s\n" % step.exception) + self.stream.write(u"%s\n" % separator) + + +# ----------------------------------------------------------------------------- +# CLASS: ScenarioProgressFormatter +# ----------------------------------------------------------------------------- +class ScenarioProgressFormatter(ProgressFormatterBase): + """ + Report dotted progress for each scenario similar to unittest. + """ + name = "progress" + description = "Shows dotted progress for each executed scenario." + + def report_scenario_progress(self): + """ + Report the progress for the current/last scenario. + """ + if not self.current_scenario: + return # SKIP: No results to report for first scenario. + # -- NORMAL-CASE: + status_name = self.current_scenario.status.name + dot_status = self.dot_status[status_name] + if status_name == "failed": + # MAYBE TODO: self.failures.append(result) + pass + self.stream.write(dot_status) + self.stream.flush() + + def report_feature_completed(self): + self.report_feature_duration() + +# ----------------------------------------------------------------------------- +# CLASS: StepProgressFormatter +# ----------------------------------------------------------------------------- +class StepProgressFormatter(ProgressFormatterBase): + """ + Report dotted progress for each step similar to unittest. + """ + name = "progress2" + description = "Shows dotted progress for each executed step." + + def report_step_progress(self, step): + """Report the progress for each step.""" + dot_status = self.dot_status[step.status.name] + if step.status == Status.failed: + if (step.exception and + not isinstance(step.exception, AssertionError)): + # -- ISA-ERROR: Some Exception + dot_status = self.dot_status["error"] + step.feature = self.current_feature + step.scenario = self.current_scenario + self.failures.append(step) + self.stream.write(dot_status) + self.stream.flush() + + def report_feature_completed(self): + self.report_feature_duration() + + +# ----------------------------------------------------------------------------- +# CLASS: ScenarioStepProgressFormatter +# ----------------------------------------------------------------------------- +class ScenarioStepProgressFormatter(StepProgressFormatter): + """ + Shows detailed dotted progress for both each step of a scenario. + Differs from StepProgressFormatter by: + + * showing scenario names (as prefix scenario step progress) + * showing failures after each scenario (if necessary) + + EXAMPLE: + $ behave -f progress3 features + Feature with failing scenario # features/failing_scenario.feature + Simple scenario with last failing step ....F + ----------------------------------------------------------------------- + FAILURE in step 'last step fails' (features/failing_scenario.feature:7): + Assertion Failed: xxx + ----------------------------------------------------------------------- + """ + name = "progress3" + description = "Shows detailed progress for each step of a scenario." + indent_size = 2 + scenario_prefix = " " * indent_size + + # -- FORMATTER API: + def feature(self, feature): + self.current_feature = feature + self.stream.write(u"%s # %s" % (feature.name, feature.filename)) + + def scenario(self, scenario): + """Process the next scenario.""" + # -- LAST SCENARIO: Report failures (if any). + self.report_scenario_completed() + + # -- NEW SCENARIO: + assert not self.failures + self.current_scenario = scenario + scenario_name = scenario.name + if scenario_name: + scenario_name += " " + self.stream.write(u"%s%s " % (self.scenario_prefix, scenario_name)) + self.stream.flush() + + # -- DISABLED: + # def eof(self): + # has_scenarios = self.current_feature and self.current_scenario + # super(ScenarioStepProgressFormatter, self).eof() + # if has_scenarios: + # # -- EMPTY-LINE between 2 features. + # self.stream.write("\n") + + # -- PROGRESS FORMATTER DETAILS: + # @overriden + def report_feature_completed(self): + # -- SKIP: self.report_feature_duration() + has_scenarios = self.current_feature and self.current_scenario + if has_scenarios: + # -- EMPTY-LINE between 2 features. + self.stream.write("\n") + + def report_scenario_completed(self): + self.report_scenario_progress() + self.report_scenario_duration() + self.report_failures() + self.failures = [] + + def report_failures(self): + if self.failures: + separator = "-" * 80 + self.stream.write(u"%s\n" % separator) + unicode_errors = 0 + for step in self.failures: + try: + self.stream.write(u"FAILURE in step '%s' (%s):\n" % \ + (step.name, step.location)) + self.stream.write(u"%s\n" % step.error_message) + self.stream.write(u"%s\n" % separator) + except UnicodeError as e: + self.stream.write(u"%s while reporting failure in %s\n" % \ + (e.__class__.__name__, step.location)) + self.stream.write(u"ERROR: %s\n" % \ + _text(e, encoding=self.stream.encoding)) + unicode_errors += 1 + + if unicode_errors: + msg = u"HINT: %d unicode errors occured during failure reporting.\n" + self.stream.write(msg % unicode_errors) + self.stream.flush() diff --git a/venv/Lib/site-packages/behave/formatter/rerun.py b/venv/Lib/site-packages/behave/formatter/rerun.py new file mode 100644 index 0000000..46dff11 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/rerun.py @@ -0,0 +1,114 @@ +# -*- coding: utf-8 -*- +""" +Provides a formatter that simplifies to rerun the failing scenarios +of the last test run. It writes a text file with the file locations of +the failing scenarios, like: + + # -- file:rerun.features + # RERUN: Failing scenarios during last test run. + features/alice.feature:10 + features/alice.feature:42 + features/bob.feature:67 + +To rerun the failing scenarios, use: + + behave @rerun_failing.features + +Normally, you put the RerunFormatter into the behave configuration file: + + # -- file:behave.ini + [behave] + format = rerun + outfiles = rerun_failing.features +""" + +from __future__ import absolute_import +from datetime import datetime +from os.path import relpath +import os +from behave.formatter.base import Formatter +from behave.model_core import Status + + +# ----------------------------------------------------------------------------- +# CLASS: RerunFormatter +# ----------------------------------------------------------------------------- +class RerunFormatter(Formatter): + """ + Provides formatter class that emits a summary which scenarios failed + during the last test run. This output can be used to rerun the tests + with the failed scenarios. + """ + name = "rerun" + description = "Emits scenario file locations of failing scenarios" + + show_timestamp = False + show_failed_scenarios_descriptions = False + + def __init__(self, stream_opener, config): + super(RerunFormatter, self).__init__(stream_opener, config) + self.failed_scenarios = [] + self.current_feature = None + + def reset(self): + self.failed_scenarios = [] + self.current_feature = None + + # -- FORMATTER API: + def feature(self, feature): + self.current_feature = feature + + def eof(self): + """Called at end of a feature.""" + if self.current_feature and self.current_feature.status == Status.failed: + # -- COLLECT SCENARIO FAILURES: + for scenario in self.current_feature.walk_scenarios(): + if scenario.status == Status.failed: + self.failed_scenarios.append(scenario) + + # -- RESET: + self.current_feature = None + assert self.current_feature is None + + def close(self): + """Called at end of test run.""" + stream_name = self.stream_opener.name + if self.failed_scenarios: + # -- ENSURE: Output stream is open. + self.stream = self.open() + self.report_scenario_failures() + elif stream_name and os.path.exists(stream_name): + # -- ON SUCCESS: Remove last rerun file with its failures. + os.remove(self.stream_opener.name) + + # -- FINALLY: + self.close_stream() + + # -- SPECIFIC-API: + def report_scenario_failures(self): + assert self.failed_scenarios + # -- SECTION: Banner + message = u"# -- RERUN: %d failing scenarios during last test run.\n" + self.stream.write(message % len(self.failed_scenarios)) + if self.show_timestamp: + now = datetime.now().replace(microsecond=0) + self.stream.write("# NOW: %s\n"% now.isoformat(" ")) + + # -- SECTION: Textual summary in comments. + if self.show_failed_scenarios_descriptions: + current_feature = None + for index, scenario in enumerate(self.failed_scenarios): + if current_feature != scenario.filename: + if current_feature is not None: + self.stream.write(u"#\n") + current_feature = scenario.filename + short_filename = relpath(scenario.filename, os.getcwd()) + self.stream.write(u"# %s\n" % short_filename) + self.stream.write(u"# %4d: %s\n" % \ + (scenario.line, scenario.name)) + self.stream.write("\n") + + # -- SECTION: Scenario file locations, ala: "alice.feature:10" + for scenario in self.failed_scenarios: + self.stream.write(u"%s\n" % scenario.location) + self.stream.write("\n") diff --git a/venv/Lib/site-packages/behave/formatter/sphinx_steps.py b/venv/Lib/site-packages/behave/formatter/sphinx_steps.py new file mode 100644 index 0000000..e317747 --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/sphinx_steps.py @@ -0,0 +1,370 @@ +# -*- coding: utf-8 -*- +""" +Provides a formatter that generates Sphinx-based documentation +of available step definitions (step implementations). + +TODO: + * Post-processor for step docstrings. + * Solution for requires: table, text + * i18n keywords + +.. seealso:: + http://sphinx-doc.org/ + +.. note:: REQUIRES docutils + :mod:`docutils` are needed to generate step-label for step references. +""" + +from __future__ import absolute_import, print_function +from operator import attrgetter +import inspect +import os.path +import sys +from behave.formatter.steps import AbstractStepsFormatter +from behave.formatter import sphinx_util +from behave.model import Table +try: + # -- NEEDED FOR: step-labels (and step-refs) + from docutils.nodes import fully_normalize_name + has_docutils = True +except ImportError: + has_docutils = False + + +# ----------------------------------------------------------------------------- +# HELPER CLASS: +# ----------------------------------------------------------------------------- +class StepsModule(object): + """ + Value object to keep track of step definitions that belong to same module. + """ + + def __init__(self, module_name, step_definitions=None): + self.module_name = module_name + self.step_definitions = step_definitions or [] + self._name = None + self._filename = None + + + @property + def name(self): + if self._name is None: + # -- DISCOVER ON DEMAND: From step definitions (module). + # REQUIRED: To discover complete canonical module name. + module = self.module + if module: + # -- USED-BY: Imported step libraries. + module_name = self.module.__name__ + else: + # -- USED-BY: features/steps/*.py (without __init__.py) + module_name = self.module_name + self._name = module_name + return self._name + + @property + def filename(self): + if not self._filename: + if self.step_definitions: + filename = inspect.getfile(self.step_definitions[0].func) + self._filename = os.path.relpath(filename) + return self._filename + + @property + def module(self): + if self.step_definitions: + return inspect.getmodule(self.step_definitions[0].func) + return sys.modules.get(self.module_name) + + @property + def module_doc(self): + module = self.module + if module: + return inspect.getdoc(module) + return None + + def append(self, step_definition): + self.step_definitions.append(step_definition) + + +# ----------------------------------------------------------------------------- +# CLASS: SphinxStepsDocumentGenerator +# ----------------------------------------------------------------------------- +class SphinxStepsDocumentGenerator(object): + """ + Provides document generator class that generates Sphinx-based + documentation for step definitions. The primary purpose is to: + + * help the step-library provider/writer + * simplify self-documentation of step-libraries + + EXAMPLE: + step_definitions = ... # Collect from step_registry + doc_generator = SphinxStepsDocumentGenerator(step_definitions, "output") + doc_generator.write_docs() + + .. seealso:: http://sphinx-doc.org/ + """ + default_step_definition_doc = """\ +.. todo:: + Step definition description is missing. +""" + shows_step_module_info = True + shows_step_module_overview = True + make_step_index_entries = True + make_step_labels = has_docutils + + document_separator = "# -- DOCUMENT-END " + "-" * 60 + step_document_prefix = "step_module." + step_heading_prefix = "**Step:** " + + def __init__(self, step_definitions, destdir=None, stream=None): + self.step_definitions = step_definitions + self.destdir = destdir + self.stream = stream + self.document = None + + @property + def stdout_mode(self): + """ + Indicates that output towards stdout should be used. + """ + return self.stream is not None + + @staticmethod + def describe_step_definition(step_definition, step_type=None): + if not step_type: + step_type = step_definition.step_type or "step" + + if step_type == "step": + step_type_text = "Given/When/Then" + else: + step_type_text = step_type.capitalize() + # -- ESCAPE: Some chars required for ReST documents (like backticks) + step_text = step_definition.pattern + if "`" in step_text: + step_text = step_text.replace("`", r"\`") + return u"%s %s" % (step_type_text, step_text) + + def ensure_destdir_exists(self): + assert self.destdir + if os.path.isfile(self.destdir): + print("OOPS: remove %s" % self.destdir) + os.remove(self.destdir) + if not os.path.exists(self.destdir): + os.makedirs(self.destdir) + + def ensure_document_is_closed(self): + if self.document and not self.stdout_mode: + self.document.close() + self.document = None + + def discover_step_modules(self): + step_modules_map = {} + for step_definition in self.step_definitions: + assert step_definition.step_type is not None + step_filename = step_definition.location.filename + step_module = step_modules_map.get(step_filename, None) + if not step_module: + filename = inspect.getfile(step_definition.func) + module_name = inspect.getmodulename(filename) + assert module_name, \ + "step_definition: %s" % step_definition.location + step_module = StepsModule(module_name) + step_modules_map[step_filename] = step_module + step_module.append(step_definition) + + step_modules = sorted(step_modules_map.values(), key=attrgetter("name")) + for module in step_modules: + step_definitions = sorted(module.step_definitions, + key=attrgetter("location")) + module.step_definitions = step_definitions + return step_modules + + def create_document(self, filename): + if not (filename.endswith(".rst") or filename.endswith(".txt")): + filename += ".rst" + if self.stdout_mode: + stream = self.stream + document = sphinx_util.DocumentWriter(stream, should_close=False) + else: + self.ensure_destdir_exists() + filename = os.path.join(self.destdir, filename) + document = sphinx_util.DocumentWriter.open(filename) + return document + + def write_docs(self): + step_modules = self.discover_step_modules() + self.write_step_module_index(step_modules) + for step_module in step_modules: + self.write_step_module(step_module) + return len(step_modules) + + def write_step_module_index(self, step_modules, filename="index.rst"): + document = self.create_document(filename) + document.write(".. _docid.steps:\n\n") + document.write_heading("Step Definitions") + document.write("""\ +The following step definitions are provided here. + +---- + +""") + entries = sorted([self.step_document_prefix + module.name + for module in step_modules]) + document.write_toctree(entries, maxdepth=1) + document.close() + if self.stdout_mode: + sys.stdout.write("\n%s\n" % self.document_separator) + + def write_step_module(self, step_module): + self.ensure_document_is_closed() + document_name = self.step_document_prefix + step_module.name + self.document = self.create_document(document_name) + self.document.write(".. _docid.steps.%s:\n" % step_module.name) + self.document.write_heading(step_module.name, index_id=step_module.name) + if self.shows_step_module_info: + self.document.write(":Module: %s\n" % step_module.name) + self.document.write(":Filename: %s\n" % step_module.filename) + self.document.write("\n") + if step_module.module_doc: + module_doc = step_module.module_doc.strip() + self.document.write("%s\n\n" % module_doc) + if self.shows_step_module_overview: + self.document.write_heading("Step Overview", level=1) + self.write_step_module_overview(step_module.step_definitions) + + self.document.write_heading("Step Definitions", level=1) + for step_definition in step_module.step_definitions: + self.write_step_definition(step_definition) + + # -- FINALLY: Clean up resources. + self.document.close() + self.document = None + if self.stdout_mode: + sys.stdout.write("\n%s\n" % self.document_separator) + + def write_step_module_overview(self, step_definitions): + assert self.document + headings = [u"Step Definition", u"Given", u"When", u"Then", u"Step"] + table = Table(headings) + step_type_cols = { + # -- pylint: disable=bad-whitespace + "given": [u" x", u" ", u" ", u" "], + "when": [u" ", u" x", u" ", u" "], + "then": [u" ", u" ", u" x", u" "], + "step": [u" x", u" x", u" x", u" x"], + } + for step_definition in step_definitions: + row = [self.describe_step_definition(step_definition)] + row.extend(step_type_cols[step_definition.step_type]) + table.add_row(row) + self.document.write_table(table) + + @staticmethod + def make_step_definition_index_id(step): + if step.step_type == "step": + index_kinds = ("Given", "When", "Then", "Step") + else: + keyword = step.step_type.capitalize() + index_kinds = (keyword,) + + schema = "single: %s%s; %s %s" + index_parts = [] + for index_kind in index_kinds: + keyword = index_kind + word = " step" + if index_kind == "Step": + keyword = "Given/When/Then" + word = "" + part = schema % (index_kind, word, keyword, step.pattern) + index_parts.append(part) + joiner = "\n " + return joiner + joiner.join(index_parts) + + def make_step_definition_doc(self, step): + doc = inspect.getdoc(step.func) + if not doc: + doc = self.default_step_definition_doc + doc = doc.strip() + return doc + + def write_step_definition(self, step): + assert self.document + step_text = self.describe_step_definition(step) + if step_text.startswith("* "): + step_text = step_text[2:] + index_id = None + if self.make_step_index_entries: + index_id = self.make_step_definition_index_id(step) + + heading = step_text + step_label = None + if self.step_heading_prefix: + heading = self.step_heading_prefix + step_text + if has_docutils and self.make_step_labels: + # -- ADD STEP-LABEL (supports: step-refs by name) + # EXAMPLE: See also :ref:`When my step does "{something}"`. + step_label = fully_normalize_name(step_text) + # SKIP-HERE: self.document.write(".. _%s:\n\n" % step_label) + self.document.write_heading(heading, level=2, index_id=index_id, + label=step_label) + step_definition_doc = self.make_step_definition_doc(step) + self.document.write("%s\n" % step_definition_doc) + self.document.write("\n") + + + +# ----------------------------------------------------------------------------- +# CLASS: SphinxStepsFormatter +# ----------------------------------------------------------------------------- +class SphinxStepsFormatter(AbstractStepsFormatter): + """ + Provides formatter class that generates Sphinx-based documentation + for all registered step definitions. The primary purpose is to: + + * help the step-library provider/writer + * simplify self-documentation of step-libraries + + .. note:: + Supports dry-run mode. + Supports destination directory mode to write multiple documents. + """ + name = "sphinx.steps" + description = "Generate sphinx-based documentation for step definitions." + doc_generator_class = SphinxStepsDocumentGenerator + + def __init__(self, stream_opener, config): + super(SphinxStepsFormatter, self).__init__(stream_opener, config) + self.destdir = stream_opener.name + + @property + def step_definitions(self): + """Derive step definitions from step-registry.""" + steps = [] + for step_type, step_definitions in self.step_registry.steps.items(): + for step in step_definitions: + step.step_type = step_type + steps.append(step) + return steps + + # -- FORMATTER-API: + def close(self): + """Called at end of test run.""" + if not self.step_registry: + self.discover_step_definitions() + self.report() + + # -- SPECIFIC-API: + def create_document_generator(self): + generator_class = self.doc_generator_class + if self.stdout_mode: + return generator_class(self.step_definitions, stream=self.stream) + # -- OTHERWISE: + return generator_class(self.step_definitions, destdir=self.destdir) + + def report(self): + document_generator = self.create_document_generator() + document_counts = document_generator.write_docs() + if not self.stdout_mode: + msg = "%s: Written %s document(s) into directory '%s'.\n" + sys.stdout.write(msg % (self.name, document_counts, self.destdir)) diff --git a/venv/Lib/site-packages/behave/formatter/sphinx_util.py b/venv/Lib/site-packages/behave/formatter/sphinx_util.py new file mode 100644 index 0000000..d68a77a --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/sphinx_util.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +""" +Provides utility function for generating Sphinx-based documentation. +""" + +from __future__ import absolute_import +import codecs +from behave.textutil import compute_words_maxsize, text as _text +import six + + +# ----------------------------------------------------------------------------- +# SPHINX OUTPUT GENERATION FUNCTIONS: +# ----------------------------------------------------------------------------- +class DocumentWriter(object): + """ + Provides a simple "ReStructured Text Writer" to generate + Sphinx-based documentation. + """ + heading_styles = ["=", "=", "-", "~"] + default_encoding = "utf-8" + default_toctree_title = "**Contents:**" + + def __init__(self, stream, filename=None, should_close=True): + self.stream = stream + self.filename = filename + self.should_close = should_close + + @classmethod + def open(cls, filename, encoding=None): + encoding = encoding or cls.default_encoding + stream = codecs.open(filename, "wb", encoding) + return cls(stream, filename) + + def write(self, *args): + return self.stream.write(*args) + + def close(self): + if self.stream and self.should_close: + self.stream.close() + self.stream = None + + def write_heading(self, heading, level=0, index_id=None, label=None): + assert self.stream + assert heading, "Heading should not be empty" + assert 0 <= level < len(self.heading_styles) + if level >= len(self.heading_styles): + level = len(self.heading_styles) - 1 + heading_size = len(heading) + heading_style = self.heading_styles[level] + if level == 0 and heading_size < 70: + heading_size = 70 + separator = heading_style * heading_size + if index_id: + if isinstance(index_id, (list, tuple)): + index_id = ", ".join(index_id) + self.stream.write(".. index:: %s\n\n" % index_id) + if label: + self.stream.write(".. _%s:\n\n" % label) + if level == 0: + self.stream.write("%s\n" % separator) + self.stream.write("%s\n" % heading) + self.stream.write("%s\n" % separator) + self.stream.write("\n") + + def write_toctree(self, entries, title=None, maxdepth=2): + if title is None: + title = self.default_toctree_title + line_prefix = " " * 4 + if title: + self.stream.write("%s\n\n" % title) + self.stream.write(".. toctree::\n") + self.stream.write("%s:maxdepth: %d\n\n" % (line_prefix, maxdepth)) + for entry in entries: + self.stream.write("%s%s\n" % (line_prefix, entry)) + self.stream.write("\n") + + def write_table(self, table): + """ + Write a ReST simple table. + + EXAMPLE: + ========================================= ===== ===== ===== ===== + Step Definition Given When Then Step + ========================================= ===== ===== ===== ===== + Given a file named "{filename}" contains + Then the file "{filename}" should ... + ========================================= ===== ===== ===== ===== + + :param table: Table to render (as `behave.model.Table`) + + .. todo:: + Column alignments + """ + assert self.stream + + # -- STEP: Determine table layout dimensions + cols_size = [] + separator_parts = [] + row_schema_parts = [] + for col_index, heading in enumerate(table.headings): + column = [six.text_type(row[col_index]) for row in table.rows] + column.append(heading) + column_size = compute_words_maxsize(column) + cols_size.append(column_size) + separator_parts.append("=" * column_size) + row_schema_parts.append("%-" + _text(column_size) + "s") + + separator = " ".join(separator_parts) + "\n" + row_schema = " ".join(row_schema_parts) + "\n" + self.stream.write("\n") # -- ENSURE: Empty line before table start. + self.stream.write(separator) + self.stream.write(row_schema % tuple(table.headings)) + self.stream.write(separator) + for row in table.rows: + self.stream.write(row_schema % tuple(row)) + self.stream.write(separator) + self.stream.write("\n") # -- ENSURE: Empty line after table end. diff --git a/venv/Lib/site-packages/behave/formatter/steps.py b/venv/Lib/site-packages/behave/formatter/steps.py new file mode 100644 index 0000000..77a02ac --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/steps.py @@ -0,0 +1,497 @@ +# -*- coding: UTF-8 -*- +""" +Provides a formatter that provides an overview of available step definitions +(step implementations). +""" + +from __future__ import absolute_import +from operator import attrgetter +import inspect +from six.moves import zip +from behave.formatter.base import Formatter +from behave.step_registry import StepRegistry, registry +from behave.textutil import \ + compute_words_maxsize, indent, make_indentation, text as _text +from behave import i18n + + +# ----------------------------------------------------------------------------- +# CLASS: AbstractStepsFormatter +# ----------------------------------------------------------------------------- +class AbstractStepsFormatter(Formatter): + """ + Provides a formatter base class that provides the common functionality + for formatter classes that operate on step definitions (implementations). + + .. note:: + Supports behave dry-run mode. + """ + step_types = ("given", "when", "then", "step") + + def __init__(self, stream_opener, config): + super(AbstractStepsFormatter, self).__init__(stream_opener, config) + self.step_registry = None + self.current_feature = None + self.shows_location = config.show_source + + def reset(self): + self.step_registry = None + self.current_feature = None + + def discover_step_definitions(self): + if self.step_registry is None: + self.step_registry = StepRegistry() + + for step_type in registry.steps: + step_definitions = tuple(registry.steps[step_type]) + for step_definition in step_definitions: + step_definition.step_type = step_type + self.step_registry.steps[step_type] = step_definitions + + # -- FORMATTER API: + def feature(self, feature): + self.current_feature = feature + if not self.step_registry: + # -- ONLY-ONCE: + self.discover_step_definitions() + + def eof(self): + """Called at end of a feature.""" + self.current_feature = None + + def close(self): + """Called at end of test run.""" + if not self.step_registry: + self.discover_step_definitions() + + if self.step_registry: + # -- ENSURE: Output stream is open. + self.stream = self.open() + self.report() + + # -- FINALLY: + self.close_stream() + + # -- REPORT SPECIFIC-API: + def report(self): + raise NotImplementedError() + + # pylint: disable=no-self-use + def describe_step_definition(self, step_definition, step_type=None): + if not step_type: + step_type = step_definition.step_type + assert step_type + return u"@%s('%s')" % (step_type, step_definition.pattern) + + +# ----------------------------------------------------------------------------- +# CLASS: StepsFormatter +# ----------------------------------------------------------------------------- +class StepsFormatter(AbstractStepsFormatter): + """ + Provides formatter class that provides an overview + which step definitions are available. + + EXAMPLE: + $ behave --dry-run -f steps features/ + GIVEN STEP DEFINITIONS[21]: + Given a new working directory + Given I use the current directory as working directory + Given a file named "{filename}" with + ... + Given a step passes + Given a step fails + + WHEN STEP DEFINITIONS[14]: + When I run "{command}" + ... + When a step passes + When a step fails + + THEN STEP DEFINITIONS[45]: + Then the command should fail with returncode="{result:int}" + Then it should pass with + Then it should fail with + Then the command output should contain "{text}" + ... + Then a step passes + Then a step fails + + GENERIC STEP DEFINITIONS[13]: + * I remove the directory "{directory}" + * a file named "{filename}" exists + * a file named "{filename}" does not exist + ... + * a step passes + * a step fails + + .. note:: + Supports behave dry-run mode. + """ + name = "steps" + description = "Shows step definitions (step implementations)." + shows_location = True + min_location_column = 40 + + # -- REPORT SPECIFIC-API: + def report(self): + self.report_steps_by_type() + + def report_steps_by_type(self): + """Show an overview of the existing step implementations per step type. + """ + # pylint: disable=too-many-branches + assert set(self.step_types) == set(self.step_registry.steps.keys()) + language = self.config.lang or "en" + language_keywords = i18n.languages[language] + + for step_type in self.step_types: + steps = list(self.step_registry.steps[step_type]) + if step_type != "step": + steps.extend(self.step_registry.steps["step"]) + if not steps: + continue + + # -- PREPARE REPORT: For a step-type. + step_type_name = step_type.upper() + if step_type == "step": + step_keyword = "*" + step_type_name = "GENERIC" + else: + # step_keyword = step_type.capitalize() + keywords = language_keywords[step_type] + if keywords[0] == u"*": + assert len(keywords) > 1 + step_keyword = keywords[1] + else: + step_keyword = keywords[0] + + steps_text = [u"%s %s" % (step_keyword, step.pattern) + for step in steps] + if self.shows_location: + max_size = compute_words_maxsize(steps_text) + if max_size < self.min_location_column: + max_size = self.min_location_column + schema = u" %-" + _text(max_size) + "s # %s\n" + else: + schema = u" %s\n" + + # -- REPORT: + message = "%s STEP DEFINITIONS[%s]:\n" + self.stream.write(message % (step_type_name, len(steps))) + for step, step_text in zip(steps, steps_text): + if self.shows_location: + self.stream.write(schema % (step_text, step.location)) + else: + self.stream.write(schema % step_text) + self.stream.write("\n") + + +# ----------------------------------------------------------------------------- +# CLASS: StepsDocFormatter +# ----------------------------------------------------------------------------- +class StepsDocFormatter(AbstractStepsFormatter): + """ + Provides formatter class that shows the documentation of all registered + step definitions. The primary purpose is to provide help for a test writer. + + EXAMPLE: + $ behave --dry-run -f steps.doc features/ + @given('a file named "{filename}" with') + Function: step_a_file_named_filename_with() + Location: behave4cmd0/command_steps.py:50 + Creates a textual file with the content provided as docstring. + + @when('I run "{command}"') + Function: step_i_run_command() + Location: behave4cmd0/command_steps.py:80 + Run a command as subprocess, collect its output and returncode. + + @step('a file named "{filename}" exists') + Function: step_file_named_filename_exists() + Location: behave4cmd0/command_steps.py:305 + Verifies that a file with this filename exists. + + .. code-block:: gherkin + + Given a file named "abc.txt" exists + When a file named "abc.txt" exists + ... + + .. note:: + Supports behave dry-run mode. + """ + name = "steps.doc" + description = "Shows documentation for step definitions." + shows_location = True + shows_function_name = True + ordered_by_location = True + doc_prefix = make_indentation(4) + + # -- REPORT SPECIFIC-API: + def report(self): + self.report_step_definition_docs() + self.stream.write("\n") + + def report_step_definition_docs(self): + step_definitions = [] + for step_type in self.step_types: + for step_definition in self.step_registry.steps[step_type]: + # step_definition.step_type = step_type + assert step_definition.step_type is not None + step_definitions.append(step_definition) + + if self.ordered_by_location: + step_definitions = sorted(step_definitions, + key=attrgetter("location")) + + for step_definition in step_definitions: + self.write_step_definition(step_definition) + + def write_step_definition(self, step_definition): + step_definition_text = self.describe_step_definition(step_definition) + self.stream.write(u"%s\n" % step_definition_text) + doc = inspect.getdoc(step_definition.func) + func_name = step_definition.func.__name__ + if self.shows_function_name and func_name not in ("step", "impl"): + self.stream.write(u" Function: %s()\n" % func_name) + if self.shows_location: + self.stream.write(u" Location: %s\n" % step_definition.location) + if doc: + doc = doc.strip() + self.stream.write(indent(doc, self.doc_prefix)) + self.stream.write("\n") + self.stream.write("\n") + + +# ----------------------------------------------------------------------------- +# CLASS: StepsCatalogFormatter +# ----------------------------------------------------------------------------- +class StepsCatalogFormatter(StepsDocFormatter): + """ + Provides formatter class that shows the documentation of all registered + step definitions. The primary purpose is to provide help for a test writer. + + In order to ease work for non-programmer testers, the technical details of + the steps (i.e. function name, source location) are ommited and the + steps are shown as they would apprear in a feature file (no noisy '@', + or '(', etc.). + + Also, the output is sorted by step type (Given, When, Then) + + Generic step definitions are listed with all three step types. + + EXAMPLE: + $ behave --dry-run -f steps.catalog features/ + Given a file named "{filename}" with + Creates a textual file with the content provided as docstring. + + When I run "{command}" + Run a command as subprocess, collect its output and returncode. + + Given a file named "{filename}" exists + When a file named "{filename}" exists + Then a file named "{filename}" exists + Verifies that a file with this filename exists. + + .. code-block:: gherkin + + Given a file named "abc.txt" exists + When a file named "abc.txt" exists + ... + + .. note:: + Supports behave dry-run mode. + """ + name = "steps.catalog" + description = "Shows non-technical documentation for step definitions." + shows_location = False + shows_function_name = False + ordered_by_location = False + doc_prefix = make_indentation(4) + + + def describe_step_definition(self, step_definition, step_type=None): + if not step_type: + step_type = step_definition.step_type + assert step_type + desc = [] + if step_type == "step": + for step_type1 in self.step_types[:-1]: + text = u"%5s %s" % (step_type1.title(), step_definition.pattern) + desc.append(text) + else: + desc.append(u"%s %s" % (step_type.title(), step_definition.pattern)) + + return '\n'.join(desc) + + +# ----------------------------------------------------------------------------- +# CLASS: StepsUsageFormatter +# ----------------------------------------------------------------------------- +class StepsUsageFormatter(AbstractStepsFormatter): + """ + Provides formatter class that shows how step definitions are used by steps. + + EXAMPLE: + $ behave --dry-run -f steps.usage features/ + ... + + .. note:: + Supports behave dry-run mode. + """ + name = "steps.usage" + description = "Shows how step definitions are used by steps." + doc_prefix = make_indentation(4) + min_location_column = 40 + + def __init__(self, stream_opener, config): + super(StepsUsageFormatter, self).__init__(stream_opener, config) + self.step_usage_database = {} + self.undefined_steps = [] + + def reset(self): + super(StepsUsageFormatter, self).reset() + self.step_usage_database = {} + self.undefined_steps = [] + + # pylint: disable=invalid-name + def get_step_type_for_step_definition(self, step_definition): + step_type = step_definition.step_type + if not step_type: + # -- DETERMINE STEP-TYPE FROM STEP-REGISTRY: + assert self.step_registry + for step_type, values in self.step_registry.steps.items(): + if step_definition in values: + return step_type + # -- OTHERWISE: + step_type = "step" + return step_type + # pylint: enable=invalid-name + + def select_unused_step_definitions(self): + step_definitions = set() + for step_type, values in self.step_registry.steps.items(): + step_definitions.update(values) + used_step_definitions = set(self.step_usage_database.keys()) + unused_step_definitions = step_definitions - used_step_definitions + return unused_step_definitions + + def update_usage_database(self, step_definition, step): + matching_steps = self.step_usage_database.get(step_definition, None) + if matching_steps is None: + assert step_definition.step_type is not None + matching_steps = self.step_usage_database[step_definition] = [] + # -- AVOID DUPLICATES: From Scenario Outlines + if not steps_contain(matching_steps, step): + matching_steps.append(step) + + def update_usage_database_for_step(self, step): + step_definition = self.step_registry.find_step_definition(step) + if step_definition: + self.update_usage_database(step_definition, step) + # elif step not in self.undefined_steps: + elif not steps_contain(self.undefined_steps, step): + # -- AVOID DUPLICATES: From Scenario Outlines + self.undefined_steps.append(step) + + # pylint: disable=invalid-name + def update_usage_database_for_feature(self, feature): + # -- PROCESS BACKGROUND (if exists): Use Background steps only once. + if feature.background: + for step in feature.background.steps: + self.update_usage_database_for_step(step) + + # -- PROCESS SCENARIOS: Without background steps. + for scenario in feature.walk_scenarios(): + for step in scenario.steps: + self.update_usage_database_for_step(step) + # pylint: enable=invalid-name + + # -- FORMATTER API: + def feature(self, feature): + super(StepsUsageFormatter, self).feature(feature) + self.update_usage_database_for_feature(feature) + + # -- REPORT API: + def report(self): + self.report_used_step_definitions() + self.report_unused_step_definitions() + self.report_undefined_steps() + self.stream.write("\n") + + # -- REPORT SPECIFIC-API: + def report_used_step_definitions(self): + # -- STEP: Used step definitions. + # ORDERING: Sort step definitions by file location. + get_location = lambda x: x[0].location + step_definition_items = self.step_usage_database.items() + step_definition_items = sorted(step_definition_items, key=get_location) + + for step_definition, steps in step_definition_items: + stepdef_text = self.describe_step_definition(step_definition) + steps_text = [u" %s %s" % (step.keyword, step.name) + for step in steps] + steps_text.append(stepdef_text) + max_size = compute_words_maxsize(steps_text) + if max_size < self.min_location_column: + max_size = self.min_location_column + + schema = u"%-" + _text(max_size) + "s # %s\n" + self.stream.write(schema % (stepdef_text, step_definition.location)) + schema = u"%-" + _text(max_size) + "s # %s\n" + for step, step_text in zip(steps, steps_text): + self.stream.write(schema % (step_text, step.location)) + self.stream.write("\n") + + def report_unused_step_definitions(self): + unused_step_definitions = self.select_unused_step_definitions() + if not unused_step_definitions: + return + + # -- STEP: Prepare report for unused step definitions. + # ORDERING: Sort step definitions by file location. + get_location = lambda x: x.location + step_definitions = sorted(unused_step_definitions, key=get_location) + step_texts = [self.describe_step_definition(step_definition) + for step_definition in step_definitions] + + max_size = compute_words_maxsize(step_texts) + if max_size < self.min_location_column-2: + max_size = self.min_location_column-2 + + # -- STEP: Write report. + schema = u" %-" + _text(max_size) + "s # %s\n" + self.stream.write("UNUSED STEP DEFINITIONS[%d]:\n" % len(step_texts)) + for step_definition, step_text in zip(step_definitions, step_texts): + self.stream.write(schema % (step_text, step_definition.location)) + + def report_undefined_steps(self): + if not self.undefined_steps: + return + + # -- STEP: Undefined steps. + undefined_steps = sorted(self.undefined_steps, + key=attrgetter("location")) + + steps_text = [u" %s %s" % (step.keyword, step.name) + for step in undefined_steps] + max_size = compute_words_maxsize(steps_text) + if max_size < self.min_location_column: + max_size = self.min_location_column + + self.stream.write("\nUNDEFINED STEPS[%d]:\n" % len(steps_text)) + schema = u"%-" + _text(max_size) + "s # %s\n" + for step, step_text in zip(undefined_steps, steps_text): + self.stream.write(schema % (step_text, step.location)) + +# ----------------------------------------------------------------------------- +# UTILITY FUNCTIONS: +# ----------------------------------------------------------------------------- +def steps_contain(steps, step): + for other_step in steps: + if step == other_step and step.location == other_step.location: + # -- NOTE: Step comparison does not take location into account. + return True + # -- OTHERWISE: Not contained yet (or step in other location). + return False diff --git a/venv/Lib/site-packages/behave/formatter/tags.py b/venv/Lib/site-packages/behave/formatter/tags.py new file mode 100644 index 0000000..886d10d --- /dev/null +++ b/venv/Lib/site-packages/behave/formatter/tags.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- +""" +Collects data how often a tag count is used and where. + +EXAMPLE: + + $ behave --dry-run -f tag_counts features/ +""" + +from __future__ import absolute_import +import six +from behave.formatter.base import Formatter +from behave.textutil import compute_words_maxsize, text as _text + + +# ----------------------------------------------------------------------------- +# CLASS: AbstractTagsFormatter +# ----------------------------------------------------------------------------- +class AbstractTagsFormatter(Formatter): + """ + Abstract base class for formatter that collect information on tags. + + .. note:: + Supports dry-run mode for faster feedback. + """ + with_tag_inheritance = False + + def __init__(self, stream_opener, config): + super(AbstractTagsFormatter, self).__init__(stream_opener, config) + self.tag_counts = {} + self._uri = None + self._feature_tags = None + self._scenario_outline_tags = None + + # -- Formatter API: + def uri(self, uri): + self._uri = uri + + def feature(self, feature): + self._feature_tags = feature.tags + self.record_tags(feature.tags, feature) + + def scenario(self, scenario): + tags = set(scenario.tags) + if self.with_tag_inheritance: + tags.update(self._feature_tags) + self.record_tags(tags, scenario) + + def close(self): + """Emit tag count reports.""" + # -- ENSURE: Output stream is open. + self.stream = self.open() + self.report_tags() + self.close_stream() + + # -- SPECIFIC API: + def record_tags(self, tags, model_element): + for tag in tags: + if tag not in self.tag_counts: + self.tag_counts[tag] = [] + self.tag_counts[tag].append(model_element) + + def report_tags(self): + raise NotImplementedError + + +# ----------------------------------------------------------------------------- +# CLASS: TagsFormatter +# ----------------------------------------------------------------------------- +class TagsFormatter(AbstractTagsFormatter): + """ + Formatter that collects information: + + * which tags exist + * how often a tag is used (counts) + * usage context/category: feature, scenario, ... + + .. note:: + Supports dry-run mode for faster feedback. + """ + name = "tags" + description = "Shows tags (and how often they are used)." + with_tag_inheritance = False + show_ordered_by_usage = False + + def report_tags(self): + self.report_tag_counts() + if self.show_ordered_by_usage: + self.report_tag_counts_by_usage() + + @staticmethod + def get_tag_count_details(tag_count): + details = {} + for element in tag_count: + category = element.keyword.lower() + if category not in details: + details[category] = 0 + details[category] += 1 + + parts = [] + if len(details) == 1: + parts.append(list(details.keys())[0]) + else: + for category in sorted(details): + text = u"%s: %d" % (category, details[category]) + parts.append(text) + return ", ".join(parts) + + def report_tag_counts(self): + # -- PREPARE REPORT: + ordered_tags = sorted(list(self.tag_counts.keys())) + tag_maxsize = compute_words_maxsize(ordered_tags) + schema = " @%-" + _text(tag_maxsize) + "s %4d (used for %s)\n" + + # -- EMIT REPORT: + self.stream.write("TAG COUNTS (alphabetically sorted):\n") + for tag in ordered_tags: + tag_data = self.tag_counts[tag] + counts = len(tag_data) + details = self.get_tag_count_details(tag_data) + self.stream.write(schema % (tag, counts, details)) + self.stream.write("\n") + + def report_tag_counts_by_usage(self): + # -- PREPARE REPORT: + compare_tag_counts_size = lambda x: len(self.tag_counts[x]) + ordered_tags = sorted(list(self.tag_counts.keys()), + key=compare_tag_counts_size) + tag_maxsize = compute_words_maxsize(ordered_tags) + schema = " @%-" + _text(tag_maxsize) + "s %4d (used for %s)\n" + + # -- EMIT REPORT: + self.stream.write("TAG COUNTS (most often used first):\n") + for tag in ordered_tags: + tag_data = self.tag_counts[tag] + counts = len(tag_data) + details = self.get_tag_count_details(tag_data) + self.stream.write(schema % (tag, counts, details)) + self.stream.write("\n") + + +# ----------------------------------------------------------------------------- +# CLASS: TagsLocationFormatter +# ----------------------------------------------------------------------------- +class TagsLocationFormatter(AbstractTagsFormatter): + """ + Formatter that collects information: + + * which tags exist + * where the tags are used (location) + + .. note:: + Supports dry-run mode for faster feedback. + """ + name = "tags.location" + description = "Shows tags and the location where they are used." + with_tag_inheritance = False + + def report_tags(self): + self.report_tags_by_locations() + + def report_tags_by_locations(self): + # -- PREPARE REPORT: + locations = set() + for tag_elements in self.tag_counts.values(): + locations.update([six.text_type(x.location) for x in tag_elements]) + location_column_size = compute_words_maxsize(locations) + schema = u" %-" + _text(location_column_size) + "s %s\n" + + # -- EMIT REPORT: + self.stream.write("TAG LOCATIONS (alphabetically ordered):\n") + for tag in sorted(self.tag_counts): + self.stream.write(" @%s:\n" % tag) + for element in self.tag_counts[tag]: + info = u"%s: %s" % (element.keyword, element.name) + self.stream.write(schema % (element.location, info)) + self.stream.write("\n") + self.stream.write("\n") diff --git a/venv/Lib/site-packages/behave/i18n.py b/venv/Lib/site-packages/behave/i18n.py new file mode 100644 index 0000000..25f22ef --- /dev/null +++ b/venv/Lib/site-packages/behave/i18n.py @@ -0,0 +1,626 @@ +# -*- coding: UTF-8 -*- +# -- FILE GENERATED BY: convert_i18n_yaml.py with i18n.yml +# pylint: disable=line-too-long + +languages = \ +{'ar': {'and': [u'*', u'\u0648'], + 'background': [u'\u0627\u0644\u062e\u0644\u0641\u064a\u0629'], + 'but': [u'*', u'\u0644\u0643\u0646'], + 'examples': [u'\u0627\u0645\u062b\u0644\u0629'], + 'feature': [u'\u062e\u0627\u0635\u064a\u0629'], + 'given': [u'*', u'\u0628\u0641\u0631\u0636'], + 'name': [u'Arabic'], + 'native': [u'\u0627\u0644\u0639\u0631\u0628\u064a\u0629'], + 'scenario': [u'\u0633\u064a\u0646\u0627\u0631\u064a\u0648'], + 'scenario_outline': [u'\u0633\u064a\u0646\u0627\u0631\u064a\u0648 \u0645\u062e\u0637\u0637'], + 'then': [u'*', u'\u0627\u0630\u0627\u064b', u'\u062b\u0645'], + 'when': [u'*', + u'\u0645\u062a\u0649', + u'\u0639\u0646\u062f\u0645\u0627']}, + 'bg': {'and': [u'*', u'\u0418'], + 'background': [u'\u041f\u0440\u0435\u0434\u0438\u0441\u0442\u043e\u0440\u0438\u044f'], + 'but': [u'*', u'\u041d\u043e'], + 'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u0438'], + 'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442'], + 'given': [u'*', u'\u0414\u0430\u0434\u0435\u043d\u043e'], + 'name': [u'Bulgarian'], + 'native': [u'\u0431\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438'], + 'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'], + 'scenario_outline': [u'\u0420\u0430\u043c\u043a\u0430 \u043d\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u0439'], + 'then': [u'*', u'\u0422\u043e'], + 'when': [u'*', u'\u041a\u043e\u0433\u0430\u0442\u043e']}, + 'ca': {'and': [u'*', u'I'], + 'background': [u'Rerefons', u'Antecedents'], + 'but': [u'*', u'Per\xf2'], + 'examples': [u'Exemples'], + 'feature': [u'Caracter\xedstica', u'Funcionalitat'], + 'given': [u'*', u'Donat', u'Donada', u'At\xe8s', u'Atesa'], + 'name': [u'Catalan'], + 'native': [u'catal\xe0'], + 'scenario': [u'Escenari'], + 'scenario_outline': [u"Esquema de l'escenari"], + 'then': [u'*', u'Aleshores', u'Cal'], + 'when': [u'*', u'Quan']}, + 'cs': {'and': [u'*', u'A', u'A tak\xe9'], + 'background': [u'Pozad\xed', u'Kontext'], + 'but': [u'*', u'Ale'], + 'examples': [u'P\u0159\xedklady'], + 'feature': [u'Po\u017eadavek'], + 'given': [u'*', u'Pokud', u'Za p\u0159edpokladu'], + 'name': [u'Czech'], + 'native': [u'\u010cesky'], + 'scenario': [u'Sc\xe9n\xe1\u0159'], + 'scenario_outline': [u'N\xe1\u010drt Sc\xe9n\xe1\u0159e', + u'Osnova sc\xe9n\xe1\u0159e'], + 'then': [u'*', u'Pak'], + 'when': [u'*', u'Kdy\u017e']}, + 'cy-GB': {'and': [u'*', u'A'], + 'background': [u'Cefndir'], + 'but': [u'*', u'Ond'], + 'examples': [u'Enghreifftiau'], + 'feature': [u'Arwedd'], + 'given': [u'*', u'Anrhegedig a'], + 'name': [u'Welsh'], + 'native': [u'Cymraeg'], + 'scenario': [u'Scenario'], + 'scenario_outline': [u'Scenario Amlinellol'], + 'then': [u'*', u'Yna'], + 'when': [u'*', u'Pryd']}, + 'da': {'and': [u'*', u'Og'], + 'background': [u'Baggrund'], + 'but': [u'*', u'Men'], + 'examples': [u'Eksempler'], + 'feature': [u'Egenskab'], + 'given': [u'*', u'Givet'], + 'name': [u'Danish'], + 'native': [u'dansk'], + 'scenario': [u'Scenarie'], + 'scenario_outline': [u'Abstrakt Scenario'], + 'then': [u'*', u'S\xe5'], + 'when': [u'*', u'N\xe5r']}, + 'de': {'and': [u'*', u'Und'], + 'background': [u'Grundlage'], + 'but': [u'*', u'Aber'], + 'examples': [u'Beispiele'], + 'feature': [u'Funktionalit\xe4t'], + 'given': [u'*', u'Angenommen', u'Gegeben sei'], + 'name': [u'German'], + 'native': [u'Deutsch'], + 'scenario': [u'Szenario'], + 'scenario_outline': [u'Szenariogrundriss'], + 'then': [u'*', u'Dann'], + 'when': [u'*', u'Wenn']}, + 'en': {'and': [u'*', u'And'], + 'background': [u'Background'], + 'but': [u'*', u'But'], + 'examples': [u'Examples', u'Scenarios'], + 'feature': [u'Feature'], + 'given': [u'*', u'Given'], + 'name': [u'English'], + 'native': [u'English'], + 'scenario': [u'Scenario'], + 'scenario_outline': [u'Scenario Outline', u'Scenario Template'], + 'then': [u'*', u'Then'], + 'when': [u'*', u'When']}, + 'en-Scouse': {'and': [u'*', u'An'], + 'background': [u'Dis is what went down'], + 'but': [u'*', u'Buh'], + 'examples': [u'Examples'], + 'feature': [u'Feature'], + 'given': [u'*', u'Givun', u'Youse know when youse got'], + 'name': [u'Scouse'], + 'native': [u'Scouse'], + 'scenario': [u'The thing of it is'], + 'scenario_outline': [u'Wharrimean is'], + 'then': [u'*', u'Dun', u'Den youse gotta'], + 'when': [u'*', u'Wun', u'Youse know like when']}, + 'en-au': {'and': [u'*', u'N'], + 'background': [u'Background'], + 'but': [u'*', u'Cept'], + 'examples': [u'Cobber'], + 'feature': [u'Crikey'], + 'given': [u'*', u'Ya know how'], + 'name': [u'Australian'], + 'native': [u'Australian'], + 'scenario': [u'Mate'], + 'scenario_outline': [u'Blokes'], + 'then': [u'*', u'Ya gotta'], + 'when': [u'*', u'When']}, + 'en-lol': {'and': [u'*', u'AN'], + 'background': [u'B4'], + 'but': [u'*', u'BUT'], + 'examples': [u'EXAMPLZ'], + 'feature': [u'OH HAI'], + 'given': [u'*', u'I CAN HAZ'], + 'name': [u'LOLCAT'], + 'native': [u'LOLCAT'], + 'scenario': [u'MISHUN'], + 'scenario_outline': [u'MISHUN SRSLY'], + 'then': [u'*', u'DEN'], + 'when': [u'*', u'WEN']}, + 'en-pirate': {'and': [u'*', u'Aye'], + 'background': [u'Yo-ho-ho'], + 'but': [u'*', u'Avast!'], + 'examples': [u'Dead men tell no tales'], + 'feature': [u'Ahoy matey!'], + 'given': [u'*', u'Gangway!'], + 'name': [u'Pirate'], + 'native': [u'Pirate'], + 'scenario': [u'Heave to'], + 'scenario_outline': [u'Shiver me timbers'], + 'then': [u'*', u'Let go and haul'], + 'when': [u'*', u'Blimey!']}, + 'en-tx': {'and': [u'*', u"And y'all"], + 'background': [u'Background'], + 'but': [u'*', u"But y'all"], + 'examples': [u'Examples'], + 'feature': [u'Feature'], + 'given': [u'*', u"Given y'all"], + 'name': [u'Texan'], + 'native': [u'Texan'], + 'scenario': [u'Scenario'], + 'scenario_outline': [u"All y'all"], + 'then': [u'*', u"Then y'all"], + 'when': [u'*', u"When y'all"]}, + 'eo': {'and': [u'*', u'Kaj'], + 'background': [u'Fono'], + 'but': [u'*', u'Sed'], + 'examples': [u'Ekzemploj'], + 'feature': [u'Trajto'], + 'given': [u'*', u'Donita\u0135o'], + 'name': [u'Esperanto'], + 'native': [u'Esperanto'], + 'scenario': [u'Scenaro'], + 'scenario_outline': [u'Konturo de la scenaro'], + 'then': [u'*', u'Do'], + 'when': [u'*', u'Se']}, + 'es': {'and': [u'*', u'Y'], + 'background': [u'Antecedentes'], + 'but': [u'*', u'Pero'], + 'examples': [u'Ejemplos'], + 'feature': [u'Caracter\xedstica'], + 'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'], + 'name': [u'Spanish'], + 'native': [u'espa\xf1ol'], + 'scenario': [u'Escenario'], + 'scenario_outline': [u'Esquema del escenario'], + 'then': [u'*', u'Entonces'], + 'when': [u'*', u'Cuando']}, + 'et': {'and': [u'*', u'Ja'], + 'background': [u'Taust'], + 'but': [u'*', u'Kuid'], + 'examples': [u'Juhtumid'], + 'feature': [u'Omadus'], + 'given': [u'*', u'Eeldades'], + 'name': [u'Estonian'], + 'native': [u'eesti keel'], + 'scenario': [u'Stsenaarium'], + 'scenario_outline': [u'Raamstsenaarium'], + 'then': [u'*', u'Siis'], + 'when': [u'*', u'Kui']}, + 'fi': {'and': [u'*', u'Ja'], + 'background': [u'Tausta'], + 'but': [u'*', u'Mutta'], + 'examples': [u'Tapaukset'], + 'feature': [u'Ominaisuus'], + 'given': [u'*', u'Oletetaan'], + 'name': [u'Finnish'], + 'native': [u'suomi'], + 'scenario': [u'Tapaus'], + 'scenario_outline': [u'Tapausaihio'], + 'then': [u'*', u'Niin'], + 'when': [u'*', u'Kun']}, + 'fr': {'and': [u'*', u'Et'], + 'background': [u'Contexte'], + 'but': [u'*', u'Mais'], + 'examples': [u'Exemples'], + 'feature': [u'Fonctionnalit\xe9'], + 'given': [u'*', + u'Soit', + u'Etant donn\xe9', + u'Etant donn\xe9e', + u'Etant donn\xe9s', + u'Etant donn\xe9es', + u'\xc9tant donn\xe9', + u'\xc9tant donn\xe9e', + u'\xc9tant donn\xe9s', + u'\xc9tant donn\xe9es'], + 'name': [u'French'], + 'native': [u'fran\xe7ais'], + 'scenario': [u'Sc\xe9nario'], + 'scenario_outline': [u'Plan du sc\xe9nario', u'Plan du Sc\xe9nario'], + 'then': [u'*', u'Alors'], + 'when': [u'*', u'Quand', u'Lorsque', u"Lorsqu'<"]}, + 'gl': {'and': [u'*', u'E'], + 'background': [u'Contexto'], + 'but': [u'*', u'Mais', u'Pero'], + 'examples': [u'Exemplos'], + 'feature': [u'Caracter\xedstica'], + 'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'], + 'name': [u'Galician'], + 'native': [u'galego'], + 'scenario': [u'Escenario'], + 'scenario_outline': [u'Esbozo do escenario'], + 'then': [u'*', u'Ent\xf3n', u'Logo'], + 'when': [u'*', u'Cando']}, + 'he': {'and': [u'*', u'\u05d5\u05d2\u05dd'], + 'background': [u'\u05e8\u05e7\u05e2'], + 'but': [u'*', u'\u05d0\u05d1\u05dc'], + 'examples': [u'\u05d3\u05d5\u05d2\u05de\u05d0\u05d5\u05ea'], + 'feature': [u'\u05ea\u05db\u05d5\u05e0\u05d4'], + 'given': [u'*', u'\u05d1\u05d4\u05d9\u05e0\u05ea\u05df'], + 'name': [u'Hebrew'], + 'native': [u'\u05e2\u05d1\u05e8\u05d9\u05ea'], + 'scenario': [u'\u05ea\u05e8\u05d7\u05d9\u05e9'], + 'scenario_outline': [u'\u05ea\u05d1\u05e0\u05d9\u05ea \u05ea\u05e8\u05d7\u05d9\u05e9'], + 'then': [u'*', u'\u05d0\u05d6', u'\u05d0\u05d6\u05d9'], + 'when': [u'*', u'\u05db\u05d0\u05e9\u05e8']}, + 'hr': {'and': [u'*', u'I'], + 'background': [u'Pozadina'], + 'but': [u'*', u'Ali'], + 'examples': [u'Primjeri', u'Scenariji'], + 'feature': [u'Osobina', u'Mogu\u0107nost', u'Mogucnost'], + 'given': [u'*', u'Zadan', u'Zadani', u'Zadano'], + 'name': [u'Croatian'], + 'native': [u'hrvatski'], + 'scenario': [u'Scenarij'], + 'scenario_outline': [u'Skica', u'Koncept'], + 'then': [u'*', u'Onda'], + 'when': [u'*', u'Kada', u'Kad']}, + 'hu': {'and': [u'*', u'\xc9s'], + 'background': [u'H\xe1tt\xe9r'], + 'but': [u'*', u'De'], + 'examples': [u'P\xe9ld\xe1k'], + 'feature': [u'Jellemz\u0151'], + 'given': [u'*', u'Amennyiben', u'Adott'], + 'name': [u'Hungarian'], + 'native': [u'magyar'], + 'scenario': [u'Forgat\xf3k\xf6nyv'], + 'scenario_outline': [u'Forgat\xf3k\xf6nyv v\xe1zlat'], + 'then': [u'*', u'Akkor'], + 'when': [u'*', u'Majd', u'Ha', u'Amikor']}, + 'id': {'and': [u'*', u'Dan'], + 'background': [u'Dasar'], + 'but': [u'*', u'Tapi'], + 'examples': [u'Contoh'], + 'feature': [u'Fitur'], + 'given': [u'*', u'Dengan'], + 'name': [u'Indonesian'], + 'native': [u'Bahasa Indonesia'], + 'scenario': [u'Skenario'], + 'scenario_outline': [u'Skenario konsep'], + 'then': [u'*', u'Maka'], + 'when': [u'*', u'Ketika']}, + 'is': {'and': [u'*', u'Og'], + 'background': [u'Bakgrunnur'], + 'but': [u'*', u'En'], + 'examples': [u'D\xe6mi', u'Atbur\xf0ar\xe1sir'], + 'feature': [u'Eiginleiki'], + 'given': [u'*', u'Ef'], + 'name': [u'Icelandic'], + 'native': [u'\xcdslenska'], + 'scenario': [u'Atbur\xf0ar\xe1s'], + 'scenario_outline': [u'L\xfdsing Atbur\xf0ar\xe1sar', + u'L\xfdsing D\xe6ma'], + 'then': [u'*', u'\xde\xe1'], + 'when': [u'*', u'\xdeegar']}, + 'it': {'and': [u'*', u'E'], + 'background': [u'Contesto'], + 'but': [u'*', u'Ma'], + 'examples': [u'Esempi'], + 'feature': [u'Funzionalit\xe0'], + 'given': [u'*', u'Dato', u'Data', u'Dati', u'Date'], + 'name': [u'Italian'], + 'native': [u'italiano'], + 'scenario': [u'Scenario'], + 'scenario_outline': [u'Schema dello scenario'], + 'then': [u'*', u'Allora'], + 'when': [u'*', u'Quando']}, + 'ja': {'and': [u'*', u'\u304b\u3064<'], + 'background': [u'\u80cc\u666f'], + 'but': [u'*', + u'\u3057\u304b\u3057<', + u'\u4f46\u3057<', + u'\u305f\u3060\u3057<'], + 'examples': [u'\u4f8b', u'\u30b5\u30f3\u30d7\u30eb'], + 'feature': [u'\u30d5\u30a3\u30fc\u30c1\u30e3', u'\u6a5f\u80fd'], + 'given': [u'*', u'\u524d\u63d0<'], + 'name': [u'Japanese'], + 'native': [u'\u65e5\u672c\u8a9e'], + 'scenario': [u'\u30b7\u30ca\u30ea\u30aa'], + 'scenario_outline': [u'\u30b7\u30ca\u30ea\u30aa\u30a2\u30a6\u30c8\u30e9\u30a4\u30f3', + u'\u30b7\u30ca\u30ea\u30aa\u30c6\u30f3\u30d7\u30ec\u30fc\u30c8', + u'\u30c6\u30f3\u30d7\u30ec', + u'\u30b7\u30ca\u30ea\u30aa\u30c6\u30f3\u30d7\u30ec'], + 'then': [u'*', u'\u306a\u3089\u3070<'], + 'when': [u'*', u'\u3082\u3057<']}, + 'ko': {'and': [u'*', u'\uadf8\ub9ac\uace0<'], + 'background': [u'\ubc30\uacbd'], + 'but': [u'*', u'\ud558\uc9c0\ub9cc<', u'\ub2e8<'], + 'examples': [u'\uc608'], + 'feature': [u'\uae30\ub2a5'], + 'given': [u'*', u'\uc870\uac74<', u'\uba3c\uc800<'], + 'name': [u'Korean'], + 'native': [u'\ud55c\uad6d\uc5b4'], + 'scenario': [u'\uc2dc\ub098\ub9ac\uc624'], + 'scenario_outline': [u'\uc2dc\ub098\ub9ac\uc624 \uac1c\uc694'], + 'then': [u'*', u'\uadf8\ub7ec\uba74<'], + 'when': [u'*', u'\ub9cc\uc77c<', u'\ub9cc\uc57d<']}, + 'lt': {'and': [u'*', u'Ir'], + 'background': [u'Kontekstas'], + 'but': [u'*', u'Bet'], + 'examples': [u'Pavyzd\u017eiai', u'Scenarijai', u'Variantai'], + 'feature': [u'Savyb\u0117'], + 'given': [u'*', u'Duota'], + 'name': [u'Lithuanian'], + 'native': [u'lietuvi\u0173 kalba'], + 'scenario': [u'Scenarijus'], + 'scenario_outline': [u'Scenarijaus \u0161ablonas'], + 'then': [u'*', u'Tada'], + 'when': [u'*', u'Kai']}, + 'lu': {'and': [u'*', u'an', u'a'], + 'background': [u'Hannergrond'], + 'but': [u'*', u'awer', u'm\xe4'], + 'examples': [u'Beispiller'], + 'feature': [u'Funktionalit\xe9it'], + 'given': [u'*', u'ugeholl'], + 'name': [u'Luxemburgish'], + 'native': [u'L\xebtzebuergesch'], + 'scenario': [u'Szenario'], + 'scenario_outline': [u'Plang vum Szenario'], + 'then': [u'*', u'dann'], + 'when': [u'*', u'wann']}, + 'lv': {'and': [u'*', u'Un'], + 'background': [u'Konteksts', u'Situ\u0101cija'], + 'but': [u'*', u'Bet'], + 'examples': [u'Piem\u0113ri', u'Paraugs'], + 'feature': [u'Funkcionalit\u0101te', u'F\u012b\u010da'], + 'given': [u'*', u'Kad'], + 'name': [u'Latvian'], + 'native': [u'latvie\u0161u'], + 'scenario': [u'Scen\u0101rijs'], + 'scenario_outline': [u'Scen\u0101rijs p\u0113c parauga'], + 'then': [u'*', u'Tad'], + 'when': [u'*', u'Ja']}, + 'nl': {'and': [u'*', u'En'], + 'background': [u'Achtergrond'], + 'but': [u'*', u'Maar'], + 'examples': [u'Voorbeelden'], + 'feature': [u'Functionaliteit'], + 'given': [u'*', u'Gegeven', u'Stel'], + 'name': [u'Dutch'], + 'native': [u'Nederlands'], + 'scenario': [u'Scenario'], + 'scenario_outline': [u'Abstract Scenario'], + 'then': [u'*', u'Dan'], + 'when': [u'*', u'Als']}, + 'no': {'and': [u'*', u'Og'], + 'background': [u'Bakgrunn'], + 'but': [u'*', u'Men'], + 'examples': [u'Eksempler'], + 'feature': [u'Egenskap'], + 'given': [u'*', u'Gitt'], + 'name': [u'Norwegian'], + 'native': [u'norsk'], + 'scenario': [u'Scenario'], + 'scenario_outline': [u'Scenariomal', u'Abstrakt Scenario'], + 'then': [u'*', u'S\xe5'], + 'when': [u'*', u'N\xe5r']}, + 'pl': {'and': [u'*', u'Oraz', u'I'], + 'background': [u'Za\u0142o\u017cenia'], + 'but': [u'*', u'Ale'], + 'examples': [u'Przyk\u0142ady'], + 'feature': [u'W\u0142a\u015bciwo\u015b\u0107'], + 'given': [u'*', u'Zak\u0142adaj\u0105c', u'Maj\u0105c'], + 'name': [u'Polish'], + 'native': [u'polski'], + 'scenario': [u'Scenariusz'], + 'scenario_outline': [u'Szablon scenariusza'], + 'then': [u'*', u'Wtedy'], + 'when': [u'*', u'Je\u017celi', u'Je\u015bli']}, + 'pt': {'and': [u'*', u'E'], + 'background': [u'Contexto'], + 'but': [u'*', u'Mas'], + 'examples': [u'Exemplos'], + 'feature': [u'Funcionalidade'], + 'given': [u'*', u'Dado', u'Dada', u'Dados', u'Dadas'], + 'name': [u'Portuguese'], + 'native': [u'portugu\xeas'], + 'scenario': [u'Cen\xe1rio', u'Cenario'], + 'scenario_outline': [u'Esquema do Cen\xe1rio', u'Esquema do Cenario'], + 'then': [u'*', u'Ent\xe3o', u'Entao'], + 'when': [u'*', u'Quando']}, + 'ro': {'and': [u'*', u'Si', u'\u0218i', u'\u015ei'], + 'background': [u'Context'], + 'but': [u'*', u'Dar'], + 'examples': [u'Exemple'], + 'feature': [u'Functionalitate', + u'Func\u021bionalitate', + u'Func\u0163ionalitate'], + 'given': [u'*', + u'Date fiind', + u'Dat fiind', + u'Dati fiind', + u'Da\u021bi fiind', + u'Da\u0163i fiind'], + 'name': [u'Romanian'], + 'native': [u'rom\xe2n\u0103'], + 'scenario': [u'Scenariu'], + 'scenario_outline': [u'Structura scenariu', + u'Structur\u0103 scenariu'], + 'then': [u'*', u'Atunci'], + 'when': [u'*', u'Cand', u'C\xe2nd']}, + 'ru': {'and': [u'*', + u'\u0418', + u'\u041a \u0442\u043e\u043c\u0443 \u0436\u0435'], + 'background': [u'\u041f\u0440\u0435\u0434\u044b\u0441\u0442\u043e\u0440\u0438\u044f', + u'\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442'], + 'but': [u'*', u'\u041d\u043e', u'\u0410'], + 'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u044b'], + 'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u044f', + u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b', + u'\u0421\u0432\u043e\u0439\u0441\u0442\u0432\u043e'], + 'given': [u'*', + u'\u0414\u043e\u043f\u0443\u0441\u0442\u0438\u043c', + u'\u0414\u0430\u043d\u043e', + u'\u041f\u0443\u0441\u0442\u044c'], + 'name': [u'Russian'], + 'native': [u'\u0440\u0443\u0441\u0441\u043a\u0438\u0439'], + 'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'], + 'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u044f'], + 'then': [u'*', u'\u0422\u043e', u'\u0422\u043e\u0433\u0434\u0430'], + 'when': [u'*', + u'\u0415\u0441\u043b\u0438', + u'\u041a\u043e\u0433\u0434\u0430']}, + 'sk': {'and': [u'*', u'A'], + 'background': [u'Pozadie'], + 'but': [u'*', u'Ale'], + 'examples': [u'Pr\xedklady'], + 'feature': [u'Po\u017eiadavka'], + 'given': [u'*', u'Pokia\u013e'], + 'name': [u'Slovak'], + 'native': [u'Slovensky'], + 'scenario': [u'Scen\xe1r'], + 'scenario_outline': [u'N\xe1\u010drt Scen\xe1ru'], + 'then': [u'*', u'Tak'], + 'when': [u'*', u'Ke\u010f']}, + 'sr-Cyrl': {'and': [u'*', u'\u0418'], + 'background': [u'\u041a\u043e\u043d\u0442\u0435\u043a\u0441\u0442', + u'\u041e\u0441\u043d\u043e\u0432\u0430', + u'\u041f\u043e\u0437\u0430\u0434\u0438\u043d\u0430'], + 'but': [u'*', u'\u0410\u043b\u0438'], + 'examples': [u'\u041f\u0440\u0438\u043c\u0435\u0440\u0438', + u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0458\u0438'], + 'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b\u043d\u043e\u0441\u0442', + u'\u041c\u043e\u0433\u0443\u045b\u043d\u043e\u0441\u0442', + u'\u041e\u0441\u043e\u0431\u0438\u043d\u0430'], + 'given': [u'*', + u'\u0417\u0430\u0434\u0430\u0442\u043e', + u'\u0417\u0430\u0434\u0430\u0442\u0435', + u'\u0417\u0430\u0434\u0430\u0442\u0438'], + 'name': [u'Serbian'], + 'native': [u'\u0421\u0440\u043f\u0441\u043a\u0438'], + 'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u043e', + u'\u041f\u0440\u0438\u043c\u0435\u0440'], + 'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0438\u0458\u0430', + u'\u0421\u043a\u0438\u0446\u0430', + u'\u041a\u043e\u043d\u0446\u0435\u043f\u0442'], + 'then': [u'*', u'\u041e\u043d\u0434\u0430'], + 'when': [u'*', + u'\u041a\u0430\u0434\u0430', + u'\u041a\u0430\u0434']}, + 'sr-Latn': {'and': [u'*', u'I'], + 'background': [u'Kontekst', u'Osnova', u'Pozadina'], + 'but': [u'*', u'Ali'], + 'examples': [u'Primeri', u'Scenariji'], + 'feature': [u'Funkcionalnost', + u'Mogu\u0107nost', + u'Mogucnost', + u'Osobina'], + 'given': [u'*', u'Zadato', u'Zadate', u'Zatati'], + 'name': [u'Serbian (Latin)'], + 'native': [u'Srpski (Latinica)'], + 'scenario': [u'Scenario', u'Primer'], + 'scenario_outline': [u'Struktura scenarija', + u'Skica', + u'Koncept'], + 'then': [u'*', u'Onda'], + 'when': [u'*', u'Kada', u'Kad']}, + 'sv': {'and': [u'*', u'Och'], + 'background': [u'Bakgrund'], + 'but': [u'*', u'Men'], + 'examples': [u'Exempel'], + 'feature': [u'Egenskap'], + 'given': [u'*', u'Givet'], + 'name': [u'Swedish'], + 'native': [u'Svenska'], + 'scenario': [u'Scenario'], + 'scenario_outline': [u'Abstrakt Scenario', u'Scenariomall'], + 'then': [u'*', u'S\xe5'], + 'when': [u'*', u'N\xe4r']}, + 'tr': {'and': [u'*', u'Ve'], + 'background': [u'Ge\xe7mi\u015f'], + 'but': [u'*', u'Fakat', u'Ama'], + 'examples': [u'\xd6rnekler'], + 'feature': [u'\xd6zellik'], + 'given': [u'*', u'Diyelim ki'], + 'name': [u'Turkish'], + 'native': [u'T\xfcrk\xe7e'], + 'scenario': [u'Senaryo'], + 'scenario_outline': [u'Senaryo tasla\u011f\u0131'], + 'then': [u'*', u'O zaman'], + 'when': [u'*', u'E\u011fer ki']}, + 'uk': {'and': [u'*', + u'\u0406', + u'\u0410 \u0442\u0430\u043a\u043e\u0436', + u'\u0422\u0430'], + 'background': [u'\u041f\u0435\u0440\u0435\u0434\u0443\u043c\u043e\u0432\u0430'], + 'but': [u'*', u'\u0410\u043b\u0435'], + 'examples': [u'\u041f\u0440\u0438\u043a\u043b\u0430\u0434\u0438'], + 'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0456\u043e\u043d\u0430\u043b'], + 'given': [u'*', + u'\u041f\u0440\u0438\u043f\u0443\u0441\u0442\u0438\u043c\u043e', + u'\u041f\u0440\u0438\u043f\u0443\u0441\u0442\u0438\u043c\u043e, \u0449\u043e', + u'\u041d\u0435\u0445\u0430\u0439', + u'\u0414\u0430\u043d\u043e'], + 'name': [u'Ukrainian'], + 'native': [u'\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'], + 'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0456\u0439'], + 'scenario_outline': [u'\u0421\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430 \u0441\u0446\u0435\u043d\u0430\u0440\u0456\u044e'], + 'then': [u'*', u'\u0422\u043e', u'\u0422\u043e\u0434\u0456'], + 'when': [u'*', + u'\u042f\u043a\u0449\u043e', + u'\u041a\u043e\u043b\u0438']}, + 'uz': {'and': [u'*', u'\u0412\u0430'], + 'background': [u'\u0422\u0430\u0440\u0438\u0445'], + 'but': [u'*', + u'\u041b\u0435\u043a\u0438\u043d', + u'\u0411\u0438\u0440\u043e\u043a', + u'\u0410\u043c\u043c\u043e'], + 'examples': [u'\u041c\u0438\u0441\u043e\u043b\u043b\u0430\u0440'], + 'feature': [u'\u0424\u0443\u043d\u043a\u0446\u0438\u043e\u043d\u0430\u043b'], + 'given': [u'*', u'\u0410\u0433\u0430\u0440'], + 'name': [u'Uzbek'], + 'native': [u'\u0423\u0437\u0431\u0435\u043a\u0447\u0430'], + 'scenario': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439'], + 'scenario_outline': [u'\u0421\u0446\u0435\u043d\u0430\u0440\u0438\u0439 \u0441\u0442\u0440\u0443\u043a\u0442\u0443\u0440\u0430\u0441\u0438'], + 'then': [u'*', u'\u0423\u043d\u0434\u0430'], + 'when': [u'*', u'\u0410\u0433\u0430\u0440']}, + 'vi': {'and': [u'*', u'V\xe0'], + 'background': [u'B\u1ed1i c\u1ea3nh'], + 'but': [u'*', u'Nh\u01b0ng'], + 'examples': [u'D\u1eef li\u1ec7u'], + 'feature': [u'T\xednh n\u0103ng'], + 'given': [u'*', u'Bi\u1ebft', u'Cho'], + 'name': [u'Vietnamese'], + 'native': [u'Ti\u1ebfng Vi\u1ec7t'], + 'scenario': [u'T\xecnh hu\u1ed1ng', u'K\u1ecbch b\u1ea3n'], + 'scenario_outline': [u'Khung t\xecnh hu\u1ed1ng', + u'Khung k\u1ecbch b\u1ea3n'], + 'then': [u'*', u'Th\xec'], + 'when': [u'*', u'Khi']}, + 'zh-CN': {'and': [u'*', u'\u800c\u4e14<'], + 'background': [u'\u80cc\u666f'], + 'but': [u'*', u'\u4f46\u662f<'], + 'examples': [u'\u4f8b\u5b50'], + 'feature': [u'\u529f\u80fd'], + 'given': [u'*', u'\u5047\u5982<'], + 'name': [u'Chinese simplified'], + 'native': [u'\u7b80\u4f53\u4e2d\u6587'], + 'scenario': [u'\u573a\u666f'], + 'scenario_outline': [u'\u573a\u666f\u5927\u7eb2'], + 'then': [u'*', u'\u90a3\u4e48<'], + 'when': [u'*', u'\u5f53<']}, + 'zh-TW': {'and': [u'*', u'\u800c\u4e14<', u'\u4e26\u4e14<'], + 'background': [u'\u80cc\u666f'], + 'but': [u'*', u'\u4f46\u662f<'], + 'examples': [u'\u4f8b\u5b50'], + 'feature': [u'\u529f\u80fd'], + 'given': [u'*', u'\u5047\u8a2d<'], + 'name': [u'Chinese traditional'], + 'native': [u'\u7e41\u9ad4\u4e2d\u6587'], + 'scenario': [u'\u5834\u666f', u'\u5287\u672c'], + 'scenario_outline': [u'\u5834\u666f\u5927\u7db1', + u'\u5287\u672c\u5927\u7db1'], + 'then': [u'*', u'\u90a3\u9ebc<'], + 'when': [u'*', u'\u7576<']}} diff --git a/venv/Lib/site-packages/behave/importer.py b/venv/Lib/site-packages/behave/importer.py new file mode 100644 index 0000000..6611ada --- /dev/null +++ b/venv/Lib/site-packages/behave/importer.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +""" +Importer module for lazy-loading/importing modules and objects. + +REQUIRES: importlib (provided in Python2.7, Python3.2...) +""" + +from __future__ import absolute_import +import importlib +from behave._types import Unknown + +def parse_scoped_name(scoped_name): + """ + SCHEMA: my.module_name:MyClassName + EXAMPLE: + behave.formatter.plain:PlainFormatter + """ + scoped_name = scoped_name.strip() + if "::" in scoped_name: + # -- ALTERNATIVE: my.module_name::MyClassName + scoped_name = scoped_name.replace("::", ":") + module_name, object_name = scoped_name.rsplit(":", 1) + return module_name, object_name + +def load_module(module_name): + return importlib.import_module(module_name) + + +class LazyObject(object): + """ + Provides a placeholder for an object that should be loaded lazily. + """ + + def __init__(self, module_name, object_name=None): + if ":" in module_name and not object_name: + module_name, object_name = parse_scoped_name(module_name) + assert ":" not in module_name + self.module_name = module_name + self.object_name = object_name + self.resolved_object = None + + def __get__(self, obj=None, type=None): # pylint: disable=redefined-builtin + """ + Implement descriptor protocol, + useful if this class is used as attribute. + :return: Real object (lazy-loaded if necessary). + :raise ImportError: If module or object cannot be imported. + """ + __pychecker__ = "unusednames=obj,type" + resolved_object = None + if not self.resolved_object: + # -- SETUP-ONCE: Lazy load the real object. + module = load_module(self.module_name) + resolved_object = getattr(module, self.object_name, Unknown) + if resolved_object is Unknown: + msg = "%s: %s is Unknown" % (self.module_name, self.object_name) + raise ImportError(msg) + self.resolved_object = resolved_object + return resolved_object + + def __set__(self, obj, value): + """Implement descriptor protocol.""" + __pychecker__ = "unusednames=obj" + self.resolved_object = value + + def get(self): + return self.__get__() + + +class LazyDict(dict): + """ + Provides a dict that supports lazy loading of objects. + A LazyObject is provided as placeholder for a value that should be + loaded lazily. + """ + + def __getitem__(self, key): + """ + Provides access to stored dict values. + Implements lazy loading of item value (if necessary). + When lazy object is loaded, its value with the dict is replaced + with the real value. + + :param key: Key to access the value of an item in the dict. + :return: value + :raises: KeyError if item is not found + :raises: ImportError for a LazyObject that cannot be imported. + """ + value = dict.__getitem__(self, key) + if isinstance(value, LazyObject): + # -- LAZY-LOADING MECHANISM: Load object and replace with lazy one. + value = value.__get__() + self[key] = value + return value + + def load_all(self, strict=False): + for key in self.keys(): + try: + self.__getitem__(key) + except ImportError: + if strict: + raise diff --git a/venv/Lib/site-packages/behave/json_parser.py b/venv/Lib/site-packages/behave/json_parser.py new file mode 100644 index 0000000..ce17738 --- /dev/null +++ b/venv/Lib/site-packages/behave/json_parser.py @@ -0,0 +1,264 @@ +# -*- coding: utf-8 -*- +""" +Read behave's JSON output files and store retrieved information in +:mod:`behave.model` elements. + +Utility to retrieve runtime information from behave's JSON output. + +REQUIRES: Python >= 2.6 (json module is part of Python standard library) +""" + +from __future__ import absolute_import +import codecs +from behave import model +from behave.model_core import Status +try: + import json +except ImportError: + # -- PYTHON 2.5 backward compatible: Use simplejson module. + import simplejson as json + + +__author__ = "Jens Engel" + + +# ---------------------------------------------------------------------------- +# FUNCTIONS: +# ---------------------------------------------------------------------------- +def parse(json_filename, encoding="UTF-8"): + """ + Reads behave JSON output file back in and stores information in + behave model elements. + + :param json_filename: JSON filename to process. + :return: List of feature objects. + """ + with codecs.open(json_filename, "rU", encoding=encoding) as input_file: + json_data = json.load(input_file, encoding=encoding) + json_processor = JsonParser() + features = json_processor.parse_features(json_data) + return features + + +# ---------------------------------------------------------------------------- +# CLASSES: +# ---------------------------------------------------------------------------- +class JsonParser(object): + + def __init__(self): + self.current_scenario_outline = None + + def parse_features(self, json_data): + assert isinstance(json_data, list) + features = [] + json_features = json_data + for json_feature in json_features: + feature = self.parse_feature(json_feature) + features.append(feature) + return features + + def parse_feature(self, json_feature): + name = json_feature.get("name", u"") + keyword = json_feature.get("keyword", None) + tags = json_feature.get("tags", []) + description = json_feature.get("description", []) + location = json_feature.get("location", u"") + filename, line = location.split(":") + feature = model.Feature(filename, line, keyword, name, tags, description) + + json_elements = json_feature.get("elements", []) + for json_element in json_elements: + self.add_feature_element(feature, json_element) + return feature + + + def add_feature_element(self, feature, json_element): + datatype = json_element.get("type", u"") + category = datatype.lower() + if category == "background": + background = self.parse_background(json_element) + feature.background = background + elif category == "scenario": + scenario = self.parse_scenario(json_element) + feature.add_scenario(scenario) + elif category == "scenario_outline": + scenario_outline = self.parse_scenario_outline(json_element) + feature.add_scenario(scenario_outline) + self.current_scenario_outline = scenario_outline + # elif category == "examples": + # examples = self.parse_examples(json_element) + # self.current_scenario_outline.examples = examples + else: + raise KeyError("Invalid feature-element keyword: %s" % category) + + + def parse_background(self, json_element): + """ + self.add_feature_element({ + 'keyword': background.keyword, + 'location': background.location, + 'steps': [], + }) + """ + keyword = json_element.get("keyword", u"") + name = json_element.get("name", u"") + location = json_element.get("location", u"") + json_steps = json_element.get("steps", []) + steps = self.parse_steps(json_steps) + filename, line = location.split(":") + background = model.Background(filename, line, keyword, name, steps) + return background + + def parse_scenario(self, json_element): + """ + self.add_feature_element({ + 'keyword': scenario.keyword, + 'name': scenario.name, + 'tags': scenario.tags, + 'location': scenario.location, + 'steps': [], + }) + """ + keyword = json_element.get("keyword", u"") + name = json_element.get("name", u"") + description = json_element.get("description", []) + tags = json_element.get("tags", []) + location = json_element.get("location", u"") + json_steps = json_element.get("steps", []) + steps = self.parse_steps(json_steps) + filename, line = location.split(":") + scenario = model.Scenario(filename, line, keyword, name, tags, steps) + scenario.description = description + return scenario + + def parse_scenario_outline(self, json_element): + """ + self.add_feature_element({ + 'keyword': scenario_outline.keyword, + 'name': scenario_outline.name, + 'tags': scenario_outline.tags, + 'location': scenario_outline.location, + 'steps': [], + 'examples': [], + }) + """ + keyword = json_element.get("keyword", u"") + name = json_element.get("name", u"") + description = json_element.get("description", []) + tags = json_element.get("tags", []) + location = json_element.get("location", u"") + json_steps = json_element.get("steps", []) + json_examples = json_element.get("examples", []) + steps = self.parse_steps(json_steps) + examples = [] + if json_examples: + # pylint: disable=redefined-variable-type + examples = self.parse_examples(json_examples) + filename, line = location.split(":") + scenario_outline = model.ScenarioOutline(filename, line, keyword, name, + tags=tags, steps=steps, + examples=examples) + scenario_outline.description = description + return scenario_outline + + def parse_steps(self, json_steps): + steps = [] + for json_step in json_steps: + step = self.parse_step(json_step) + steps.append(step) + return steps + + def parse_step(self, json_element): + """ + s = { + 'keyword': step.keyword, + 'step_type': step.step_type, + 'name': step.name, + 'location': step.location, + } + + if step.text: + s['text'] = step.text + if step.table: + s['table'] = self.make_table(step.table) + element = self.current_feature_element + element['steps'].append(s) + """ + keyword = json_element.get("keyword", u"") + name = json_element.get("name", u"") + step_type = json_element.get("step_type", u"") + location = json_element.get("location", u"") + text = json_element.get("text", None) + if isinstance(text, list): + text = "\n".join(text) + table = None + json_table = json_element.get("table", None) + if json_table: + table = self.parse_table(json_table) + filename, line = location.split(":") + step = model.Step(filename, line, keyword, step_type, name) + step.text = text + step.table = table + json_result = json_element.get("result", None) + if json_result: + self.add_step_result(step, json_result) + return step + + @staticmethod + def add_step_result(step, json_result): + """ + steps = self.current_feature_element['steps'] + steps[self._step_index]['result'] = { + 'status': result.status.name, + 'duration': result.duration, + } + """ + status_name = json_result.get("status", u"") + duration = json_result.get("duration", 0) + error_message = json_result.get("error_message", None) + if isinstance(error_message, list): + error_message = "\n".join(error_message) + step.status = Status.from_name(status_name) + step.duration = duration + step.error_message = error_message + + @staticmethod + def parse_table(json_table): + """ + table_data = { + 'headings': table.headings, + 'rows': [ list(row) for row in table.rows ] + } + return table_data + """ + headings = json_table.get("headings", []) + rows = json_table.get("rows", []) + table = model.Table(headings, rows=rows) + return table + + + def parse_examples(self, json_element): + """ + e = { + 'keyword': examples.keyword, + 'name': examples.name, + 'location': examples.location, + } + + if examples.table: + e['table'] = self.make_table(examples.table) + + element = self.current_feature_element + element['examples'].append(e) + """ + keyword = json_element.get("keyword", u"") + name = json_element.get("name", u"") + location = json_element.get("location", u"") + + table = None + json_table = json_element.get("table", None) + if json_table: + table = self.parse_table(json_table) + filename, line = location.split(":") + examples = model.Examples(filename, line, keyword, name, table) + return examples diff --git a/venv/Lib/site-packages/behave/log_capture.py b/venv/Lib/site-packages/behave/log_capture.py new file mode 100644 index 0000000..0a751f7 --- /dev/null +++ b/venv/Lib/site-packages/behave/log_capture.py @@ -0,0 +1,233 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import, print_function +from logging.handlers import BufferingHandler +import logging +import functools +import re + + +class RecordFilter(object): + """Implement logging record filtering as per the configuration + --logging-filter option. + """ + def __init__(self, names): + self.include = set() + self.exclude = set() + for name in names.split(','): + if name[0] == '-': + self.exclude.add(name[1:]) + else: + self.include.add(name) + + def filter(self, record): + if self.exclude: + return record.name not in self.exclude + return record.name in self.include + + +# originally from nostetsts logcapture plugin +class LoggingCapture(BufferingHandler): + """Capture logging events in a memory buffer for later display or query. + + Captured logging events are stored on the attribute + :attr:`~LoggingCapture.buffer`: + + .. attribute:: buffer + + This is a list of captured logging events as `logging.LogRecords`_. + + .. _`logging.LogRecords`: + http://docs.python.org/library/logging.html#logrecord-objects + + By default the format of the messages will be:: + + '%(levelname)s:%(name)s:%(message)s' + + This may be overridden using standard logging formatter names in the + configuration variable ``logging_format``. + + The level of logging captured is set to ``logging.NOTSET`` by default. You + may override this using the configuration setting ``logging_level`` (which + is set to a level name.) + + Finally there may be `filtering of logging events`__ specified by the + configuration variable ``logging_filter``. + + .. __: behave.html#command-line-arguments + """ + + def __init__(self, config, level=None): + BufferingHandler.__init__(self, 1000) + self.config = config + self.old_handlers = [] + self.old_level = None + + # set my formatter + log_format = datefmt = None + if config.logging_format: + log_format = config.logging_format + else: + log_format = '%(levelname)s:%(name)s:%(message)s' + if config.logging_datefmt: + datefmt = config.logging_datefmt + formatter = logging.Formatter(log_format, datefmt) + self.setFormatter(formatter) + + # figure the level we're logging at + if level is not None: + self.level = level + elif config.logging_level: + self.level = config.logging_level + else: + self.level = logging.NOTSET + + # construct my filter + if config.logging_filter: + self.addFilter(RecordFilter(config.logging_filter)) + + def __bool__(self): + return bool(self.buffer) + + def flush(self): + pass # do nothing + + def truncate(self): + self.buffer = [] + + def getvalue(self): + return '\n'.join(self.formatter.format(r) for r in self.buffer) + + def find_event(self, pattern): + """Search through the buffer for a message that matches the given + regular expression. + + Returns boolean indicating whether a match was found. + """ + pattern = re.compile(pattern) + for record in self.buffer: + if pattern.search(record.getMessage()) is not None: + return True + return False + + def any_errors(self): + """Search through the buffer for any ERROR or CRITICAL events. + + Returns boolean indicating whether a match was found. + """ + return any(record for record in self.buffer + if record.levelname in ('ERROR', 'CRITICAL')) + + def inveigle(self): + """Turn on logging capture by replacing all existing handlers + configured in the logging module. + + If the config var logging_clear_handlers is set then we also remove + all existing handlers. + + We also set the level of the root logger. + + The opposite of this is :meth:`~LoggingCapture.abandon`. + """ + root_logger = logging.getLogger() + if self.config.logging_clear_handlers: + # kill off all the other log handlers + for logger in logging.Logger.manager.loggerDict.values(): + if hasattr(logger, "handlers"): + for handler in logger.handlers: + self.old_handlers.append((logger, handler)) + logger.removeHandler(handler) + + # sanity check: remove any existing LoggingCapture + for handler in root_logger.handlers[:]: + if isinstance(handler, LoggingCapture): + root_logger.handlers.remove(handler) + elif self.config.logging_clear_handlers: + self.old_handlers.append((root_logger, handler)) + root_logger.removeHandler(handler) + + # right, we're it now + root_logger.addHandler(self) + + # capture the level we're interested in + self.old_level = root_logger.level + root_logger.setLevel(self.level) + + def abandon(self): + """Turn off logging capture. + + If other handlers were removed by :meth:`~LoggingCapture.inveigle` then + they are reinstated. + """ + root_logger = logging.getLogger() + for handler in root_logger.handlers[:]: + if handler is self: + root_logger.handlers.remove(handler) + + if self.config.logging_clear_handlers: + for logger, handler in self.old_handlers: + logger.addHandler(handler) + + if self.old_level is not None: + # -- RESTORE: Old log.level before inveigle() was used. + root_logger.setLevel(self.old_level) + self.old_level = None + +# pre-1.2 backwards compatibility +MemoryHandler = LoggingCapture + + +def capture(*args, **kw): + """Decorator to wrap an *environment file function* in log file capture. + + It configures the logging capture using the *behave* context - the first + argument to the function being decorated (so don't use this to decorate + something that doesn't have *context* as the first argument.) + + The basic usage is: + + .. code-block: python + + @capture + def after_scenario(context, scenario): + ... + + The function prints any captured logging (at the level determined by the + ``log_level`` configuration setting) directly to stdout, regardless of + error conditions. + + It is mostly useful for debugging in situations where you are seeing a + message like:: + + No handlers could be found for logger "name" + + The decorator takes an optional "level" keyword argument which limits the + level of logging captured, overriding the level in the run's configuration: + + .. code-block: python + + @capture(level=logging.ERROR) + def after_scenario(context, scenario): + ... + + This would limit the logging captured to just ERROR and above, and thus + only display logged events if they are interesting. + """ + def create_decorator(func, level=None): + def f(context, *args): + h = LoggingCapture(context.config, level=level) + h.inveigle() + try: + func(context, *args) + finally: + h.abandon() + v = h.getvalue() + if v: + print("Captured Logging:") + print(v) + return f + + if not args: + return functools.partial(create_decorator, level=kw.get("level")) + else: + return create_decorator(args[0]) diff --git a/venv/Lib/site-packages/behave/matchers.py b/venv/Lib/site-packages/behave/matchers.py new file mode 100644 index 0000000..540624d --- /dev/null +++ b/venv/Lib/site-packages/behave/matchers.py @@ -0,0 +1,422 @@ +# -*- coding: utf-8 -*- +""" +This module provides the step matchers functionality that matches a +step definition (as text) with step-functions that implement this step. +""" + +from __future__ import absolute_import, print_function, with_statement +import copy +import re +import warnings +import parse +import six +from parse_type import cfparse +from behave._types import ChainedExceptionUtil, ExceptionUtil +from behave.model_core import Argument, FileLocation, Replayable + + +# ----------------------------------------------------------------------------- +# SECTION: Exceptions +# ----------------------------------------------------------------------------- +class StepParseError(ValueError): + """Exception class, used when step matching fails before a step is run. + This is normally the case when an error occurs during the type conversion + of step parameters. + """ + + def __init__(self, text=None, exc_cause=None): + if not text and exc_cause: + text = six.text_type(exc_cause) + if exc_cause and six.PY2: + # -- NOTE: Python2 does not show chained-exception causes. + # Therefore, provide some hint (see also: PEP-3134). + cause_text = ExceptionUtil.describe(exc_cause, + use_traceback=True, + prefix="CAUSED-BY: ") + text += u"\n" + cause_text + + ValueError.__init__(self, text) + if exc_cause: + # -- CHAINED EXCEPTION (see: PEP 3134) + ChainedExceptionUtil.set_cause(self, exc_cause) + + + +# ----------------------------------------------------------------------------- +# SECTION: Model Elements +# ----------------------------------------------------------------------------- +class Match(Replayable): + """An parameter-matched *feature file* step name extracted using + step decorator `parameters`_. + + .. attribute:: func + + The step function that this match will be applied to. + + .. attribute:: arguments + + A list of :class:`~behave.model_core.Argument` instances containing the + matched parameters from the step name. + """ + type = "match" + + def __init__(self, func, arguments=None): + super(Match, self).__init__() + self.func = func + self.arguments = arguments + self.location = None + if func: + self.location = self.make_location(func) + + def __repr__(self): + if self.func: + func_name = self.func.__name__ + else: + func_name = '' + return '' % (func_name, self.location) + + def __eq__(self, other): + if not isinstance(other, Match): + return False + return (self.func, self.location) == (other.func, other.location) + + def with_arguments(self, arguments): + match = copy.copy(self) + match.arguments = arguments + return match + + def run(self, context): + args = [] + kwargs = {} + for arg in self.arguments: + if arg.name is not None: + kwargs[arg.name] = arg.value + else: + args.append(arg.value) + + with context.use_with_user_mode(): + self.func(context, *args, **kwargs) + + @staticmethod + def make_location(step_function): + """Extracts the location information from the step function and + builds a FileLocation object with (filename, line_number) info. + + :param step_function: Function whose location should be determined. + :return: FileLocation object for step function. + """ + return FileLocation.for_function(step_function) + + +class NoMatch(Match): + """Used for an "undefined step" when it can not be matched with a + step definition. + """ + + def __init__(self): + Match.__init__(self, func=None) + self.func = None + self.arguments = [] + self.location = None + + +class MatchWithError(Match): + """Match class when error occur during step-matching + + REASON: + * Type conversion error occured. + * ... + """ + def __init__(self, func, error): + if not ExceptionUtil.has_traceback(error): + ExceptionUtil.set_traceback(error) + Match.__init__(self, func=func) + self.stored_error = error + + def run(self, context): + """Raises stored error from step matching phase (type conversion).""" + raise StepParseError(exc_cause=self.stored_error) + + + + +# ----------------------------------------------------------------------------- +# SECTION: Matchers +# ----------------------------------------------------------------------------- +class Matcher(object): + """Pull parameters out of step names. + + .. attribute:: pattern + + The match pattern attached to the step function. + + .. attribute:: func + + The step function the pattern is being attached to. + """ + schema = u"@%s('%s')" # Schema used to describe step definition (matcher) + + def __init__(self, func, pattern, step_type=None): + self.func = func + self.pattern = pattern + self.step_type = step_type + self._location = None + + # -- BACKWARD-COMPATIBILITY: + @property + def string(self): + warnings.warn("deprecated: Use 'pattern' instead", DeprecationWarning) + return self.pattern + + @property + def location(self): + if self._location is None: + self._location = Match.make_location(self.func) + return self._location + + @property + def regex_pattern(self): + """Return the used textual regex pattern.""" + # -- ASSUMPTION: pattern attribute provides regex-pattern + # NOTE: Method must be overridden if assumption is not met. + return self.pattern + + def describe(self, schema=None): + """Provide a textual description of the step function/matcher object. + + :param schema: Text schema to use. + :return: Textual description of this step definition (matcher). + """ + step_type = self.step_type or "step" + if not schema: + schema = self.schema + return schema % (step_type, self.pattern) + + + def check_match(self, step): + """Match me against the "step" name supplied. + + Return None, if I don't match otherwise return a list of matches as + :class:`~behave.model_core.Argument` instances. + + The return value from this function will be converted into a + :class:`~behave.matchers.Match` instance by *behave*. + """ + raise NotImplementedError + + def match(self, step): + # -- PROTECT AGAINST: Type conversion errors (with ParseMatcher). + try: + result = self.check_match(step) + except Exception as e: # pylint: disable=broad-except + return MatchWithError(self.func, e) + + if result is None: + return None # -- NO-MATCH + return Match(self.func, result) + + def __repr__(self): + return u"<%s: %r>" % (self.__class__.__name__, self.pattern) + + +class ParseMatcher(Matcher): + """Uses :class:`~parse.Parser` class to be able to use simpler + parse expressions compared to normal regular expressions. + """ + custom_types = {} + parser_class = parse.Parser + + def __init__(self, func, pattern, step_type=None): + super(ParseMatcher, self).__init__(func, pattern, step_type) + self.parser = self.parser_class(pattern, self.custom_types) + + @property + def regex_pattern(self): + # -- OVERWRITTEN: Pattern as regex text. + return self.parser._expression # pylint: disable=protected-access + + def check_match(self, step): + # -- FAILURE-POINT: Type conversion of parameters may fail here. + # NOTE: Type converter should raise ValueError in case of PARSE ERRORS. + result = self.parser.parse(step) + if not result: + return None + + args = [] + for index, value in enumerate(result.fixed): + start, end = result.spans[index] + args.append(Argument(start, end, step[start:end], value)) + for name, value in result.named.items(): + start, end = result.spans[name] + args.append(Argument(start, end, step[start:end], value, name)) + args.sort(key=lambda x: x.start) + return args + + +class CFParseMatcher(ParseMatcher): + """Uses :class:`~parse_type.cfparse.Parser` instead of "parse.Parser". + Provides support for automatic generation of type variants + for fields with CardinalityField part. + """ + parser_class = cfparse.Parser + + +def register_type(**kw): + # pylint: disable=anomalous-backslash-in-string + # REQUIRED-BY: code example + """Registers a custom type that will be available to "parse" + for type conversion during step matching. + + Converters should be supplied as ``name=callable`` arguments (or as dict). + + A type converter should follow :pypi:`parse` module rules. + In general, a type converter is a function that converts text (as string) + into a value-type (type converted value). + + EXAMPLE: + + .. code-block:: python + + from behave import register_type, given + import parse + + # -- TYPE CONVERTER: For a simple, positive integer number. + @parse.with_pattern(r"\d+") + def parse_number(text): + return int(text) + + # -- REGISTER TYPE-CONVERTER: With behave + register_type(Number=parse_number) + + # -- STEP DEFINITIONS: Use type converter. + @given('{amount:Number} vehicles') + def step_impl(context, amount): + assert isinstance(amount, int) + """ + ParseMatcher.custom_types.update(kw) + + +class RegexMatcher(Matcher): + def __init__(self, func, pattern, step_type=None): + super(RegexMatcher, self).__init__(func, pattern, step_type) + self.regex = re.compile(self.pattern) + + def check_match(self, step): + m = self.regex.match(step) + if not m: + return None + + groupindex = dict((y, x) for x, y in self.regex.groupindex.items()) + args = [] + for index, group in enumerate(m.groups()): + index += 1 + name = groupindex.get(index, None) + args.append(Argument(m.start(index), m.end(index), group, + group, name)) + + return args + +class SimplifiedRegexMatcher(RegexMatcher): + """Simplified regular expression step-matcher that automatically adds + start-of-line/end-of-line matcher symbols to string: + + .. code-block:: python + + @when(u'a step passes') # re.pattern = "^a step passes$" + def step_impl(context): pass + """ + + def __init__(self, func, pattern, step_type=None): + assert not (pattern.startswith("^") or pattern.endswith("$")), \ + "Regular expression should not use begin/end-markers: "+ pattern + expression = "^%s$" % pattern + super(SimplifiedRegexMatcher, self).__init__(func, expression, step_type) + self.pattern = pattern + + +class CucumberRegexMatcher(RegexMatcher): + """Compatible to (old) Cucumber style regular expressions. + Text must contain start-of-line/end-of-line matcher symbols to string: + + .. code-block:: python + + @when(u'^a step passes$') # re.pattern = "^a step passes$" + def step_impl(context): pass + """ + +matcher_mapping = { + "parse": ParseMatcher, + "cfparse": CFParseMatcher, + "re": SimplifiedRegexMatcher, + + # -- BACKWARD-COMPATIBLE REGEX MATCHER: Old Cucumber compatible style. + # To make it the default step-matcher use the following snippet: + # # -- FILE: features/environment.py + # from behave import use_step_matcher + # def before_all(context): + # use_step_matcher("re0") + "re0": CucumberRegexMatcher, +} +current_matcher = ParseMatcher # pylint: disable=invalid-name + + +def use_step_matcher(name): + """Change the parameter matcher used in parsing step text. + + The change is immediate and may be performed between step definitions in + your step implementation modules - allowing adjacent steps to use different + matchers if necessary. + + There are several parsers available in *behave* (by default): + + **parse** (the default, based on: :pypi:`parse`) + Provides a simple parser that replaces regular expressions for + step parameters with a readable syntax like ``{param:Type}``. + The syntax is inspired by the Python builtin ``string.format()`` + function. + Step parameters must use the named fields syntax of :pypi:`parse` + in step definitions. The named fields are extracted, + optionally type converted and then used as step function arguments. + + Supports type conversions by using type converters + (see :func:`~behave.register_type()`). + + **cfparse** (extends: :pypi:`parse`, requires: :pypi:`parse_type`) + Provides an extended parser with "Cardinality Field" (CF) support. + Automatically creates missing type converters for related cardinality + as long as a type converter for cardinality=1 is provided. + Supports parse expressions like: + + * ``{values:Type+}`` (cardinality=1..N, many) + * ``{values:Type*}`` (cardinality=0..N, many0) + * ``{value:Type?}`` (cardinality=0..1, optional) + + Supports type conversions (as above). + + **re** + This uses full regular expressions to parse the clause text. You will + need to use named groups "(?P...)" to define the variables pulled + from the text and passed to your ``step()`` function. + + Type conversion is **not supported**. + A step function writer may implement type conversion + inside the step function (implementation). + + You may `define your own matcher`_. + + .. _`define your own matcher`: api.html#step-parameters + """ + global current_matcher # pylint: disable=global-statement + current_matcher = matcher_mapping[name] + +def step_matcher(name): + """ + DEPRECATED, use :func:`use_step_matcher()` instead. + """ + # -- BACKWARD-COMPATIBLE NAME: Mark as deprecated. + warnings.warn("deprecated: Use 'use_step_matcher()' instead", + DeprecationWarning, stacklevel=2) + use_step_matcher(name) + +def get_matcher(func, pattern): + return current_matcher(func, pattern) diff --git a/venv/Lib/site-packages/behave/model.py b/venv/Lib/site-packages/behave/model.py new file mode 100644 index 0000000..c6ee94b --- /dev/null +++ b/venv/Lib/site-packages/behave/model.py @@ -0,0 +1,1748 @@ +# -*- coding: utf-8 -*- +# pylint: disable=too-many-lines +""" +This module provides the model element class that represent a behave model: + +* :class:`Feature` +* :class:`Scenario` +* :class:`ScenarioOutline` +* :class:`Step` +* ... +""" + +from __future__ import absolute_import, with_statement +import copy +import difflib +import logging +import itertools +import time +import six +from six.moves import zip # pylint: disable=redefined-builtin +from behave.model_core import \ + Status, BasicStatement, TagAndStatusStatement, TagStatement, Replayable +from behave.matchers import NoMatch +from behave.textutil import text as _text +if six.PY2: + # -- USE PYTHON3 BACKPORT: With unicode traceback support. + import traceback2 as traceback +else: + import traceback + + +class Feature(TagAndStatusStatement, Replayable): + """A `feature`_ parsed from a *feature file*. + + The attributes are: + + .. attribute:: keyword + + This is the keyword as seen in the *feature file*. In English this will + be "Feature". + + .. attribute:: name + + The name of the feature (the text after "Feature".) + + .. attribute:: description + + The description of the feature as seen in the *feature file*. This is + stored as a list of text lines. + + .. attribute:: background + + The :class:`~behave.model.Background` for this feature, if any. + + .. attribute:: scenarios + + A list of :class:`~behave.model.Scenario` making up this feature. + + .. attribute:: tags + + A list of @tags (as :class:`~behave.model.Tag` which are basically + glorified strings) attached to the feature. + See :ref:`controlling things with tags`. + + .. attribute:: status + + Read-Only. A summary status of the feature's run. If read before the + feature is fully tested it will return "untested" otherwise it will + return one of: + + Status.untested + The feature was has not been completely tested yet. + Status.skipped + One or more steps of this feature was passed over during testing. + Status.passed + The feature was tested successfully. + Status.failed + One or more steps of this feature failed. + + .. versionchanged:: 1.2.6 + Use Status enum class (was: string). + + .. attribute:: hook_failed + + Indicates if a hook failure occured while running this feature. + + .. versionadded:: 1.2.6 + + .. attribute:: duration + + The time, in seconds, that it took to test this feature. If read before + the feature is tested it will return 0.0. + + .. attribute:: filename + + The file name (or "") of the *feature file* where the feature + was found. + + .. attribute:: line + + The line number of the *feature file* where the feature was found. + + .. attribute:: language + + Indicates which spoken language (English, French, German, ..) was used + for parsing the feature file and its keywords. The I18N language code + indicates which language is used. This corresponds to the language tag + at the beginning of the feature file. + + .. versionadded:: 1.2.6 + + .. _`feature`: gherkin.html#features + """ + + type = "feature" + + def __init__(self, filename, line, keyword, name, tags=None, + description=None, scenarios=None, background=None, + language=None): + tags = tags or [] + super(Feature, self).__init__(filename, line, keyword, name, tags) + self.description = description or [] + self.scenarios = [] + self.background = background + self.language = language + self.parser = None + self.hook_failed = False + if scenarios: + for scenario in scenarios: + self.add_scenario(scenario) + + def reset(self): + """Reset to clean state before a test run.""" + super(Feature, self).reset() + self.hook_failed = False + for scenario in self.scenarios: + scenario.reset() + + def __repr__(self): + return '' % \ + (self.name, len(self.scenarios)) + + def __iter__(self): + return iter(self.scenarios) + + def add_scenario(self, scenario): + scenario.feature = self + scenario.background = self.background + self.scenarios.append(scenario) + + def compute_status(self): + """Compute the status of this feature based on its: + * scenarios + * scenario outlines + * hook failures + + :return: Computed status (as string-enum). + """ + if self.hook_failed: + return Status.failed + + skipped = True + passed_count = 0 + for scenario in self.scenarios: + scenario_status = scenario.status + if scenario_status == Status.failed: + return Status.failed + elif scenario_status == Status.untested: + if passed_count > 0: + return Status.failed # ABORTED: Some passed, now untested. + return Status.untested + if scenario_status != Status.skipped: + skipped = False + if scenario_status == Status.passed: + passed_count += 1 + + if skipped: + return Status.skipped + else: + return Status.passed + + + @property + def duration(self): + # -- NEW: Background is executed N times, now part of scenarios. + feature_duration = 0.0 + for scenario in self.scenarios: + feature_duration += scenario.duration + return feature_duration + + def walk_scenarios(self, with_outlines=False): + """ + Provides a flat list of all scenarios of this feature. + A ScenarioOutline element adds its scenarios to this list. + But the ScenarioOutline element itself is only added when specified. + + A flat scenario list is useful when all scenarios of a features + should be processed. + + :param with_outlines: If ScenarioOutline items should be added, too. + :return: List of all scenarios of this feature. + """ + all_scenarios = [] + for scenario in self.scenarios: + if isinstance(scenario, ScenarioOutline): + scenario_outline = scenario + if with_outlines: + all_scenarios.append(scenario_outline) + all_scenarios.extend(scenario_outline.scenarios) + else: + all_scenarios.append(scenario) + return all_scenarios + + def should_run(self, config=None): + """ + Determines if this Feature (and its scenarios) should run. + Implements the run decision logic for a feature. + The decision depends on: + + * if the Feature is marked as skipped + * if the config.tags (tag expression) enable/disable this feature + + :param config: Runner configuration to use (optional). + :return: True, if scenario should run. False, otherwise. + """ + answer = not self.should_skip + if answer and config: + answer = self.should_run_with_tags(config.tags) + return answer + + def should_run_with_tags(self, tag_expression): + """Determines if this feature should run when the tag expression is used. + A feature should run if: + * it should run according to its tags + * any of its scenarios should run according to its tags + + :param tag_expression: Runner/config environment tags to use. + :return: True, if feature should run. False, otherwise (skip it). + """ + run_feature = tag_expression.check(self.tags) + if not run_feature: + for scenario in self: + if scenario.should_run_with_tags(tag_expression): + run_feature = True + break + return run_feature + + def mark_skipped(self): + """Marks this feature (and all its scenarios and steps) as skipped. + Note this function may be called before the feature is executed. + """ + self.skip(require_not_executed=True) + assert self.status == Status.skipped or self.hook_failed + + def skip(self, reason=None, require_not_executed=False): + """Skip executing this feature or the remaining parts of it. + Note that this feature may be already partly executed + when this function is called. + + :param reason: Optional reason why feature should be skipped (as string). + :param require_not_executed: Optional, requires that feature is not + executed yet (default: false). + """ + if reason: + logger = logging.getLogger("behave") + logger.warning(u"SKIP FEATURE %s: %s", self.name, reason) + + self.clear_status() + self.should_skip = True + self.skip_reason = reason + for scenario in self.scenarios: + scenario.skip(reason, require_not_executed) + if not self.scenarios: + # -- SPECIAL CASE: Feature without scenarios + self.set_status(Status.skipped) + assert self.status in self.final_status #< skipped, failed or passed. + + def run(self, runner): + # pylint: disable=too-many-branches + # MAYBE: self.reset() + self.clear_status() + self.hook_failed = False + + runner.context._push(layer_name="feature") # pylint: disable=protected-access + runner.context.feature = self + runner.context.tags = set(self.tags) + + skip_feature_untested = runner.aborted + run_feature = self.should_run(runner.config) + failed_count = 0 + hooks_called = False + if not runner.config.dry_run and run_feature: + hooks_called = True + for tag in self.tags: + runner.run_hook("before_tag", runner.context, tag) + runner.run_hook("before_feature", runner.context, self) + if self.hook_failed: + failed_count += 1 + + # -- RE-EVALUATE SHOULD-RUN STATE: + # Hook may call feature.mark_skipped() to exclude it. + skip_feature_untested = self.hook_failed or runner.aborted + run_feature = self.should_run() + + # run this feature if the tags say so or any one of its scenarios + if run_feature or runner.config.show_skipped: + for formatter in runner.formatters: + formatter.feature(self) + if self.background: + for formatter in runner.formatters: + formatter.background(self.background) + + if not skip_feature_untested: + for scenario in self.scenarios: + # -- OPTIONAL: Select scenario by name (regular expressions). + if (runner.config.name and + not scenario.should_run_with_name_select(runner.config)): + scenario.mark_skipped() + continue + + failed = scenario.run(runner) + if failed: + failed_count += 1 + if runner.config.stop or runner.aborted: + # -- FAIL-EARLY: Stop after first failure. + break + + self.clear_status() # -- ENFORCE: compute_status() after run. + if not self.scenarios and not run_feature: + # -- SPECIAL CASE: Feature without scenarios + self.set_status(Status.skipped) + + if hooks_called: + runner.run_hook("after_feature", runner.context, self) + for tag in self.tags: + runner.run_hook("after_tag", runner.context, tag) + if self.hook_failed: + failed_count += 1 + self.set_status(Status.failed) + + # -- PERFORM CONTEXT CLEANUP: May raise cleanup errors. + try: + runner.context._pop() # pylint: disable=protected-access + except Exception: + # -- CLEANUP-ERROR: + self.set_status(Status.failed) + + if run_feature or runner.config.show_skipped: + for formatter in runner.formatters: + formatter.eof() + + failed = (failed_count > 0) + return failed + + +class Background(BasicStatement, Replayable): + """A `background`_ parsed from a *feature file*. + + The attributes are: + + .. attribute:: keyword + + This is the keyword as seen in the *feature file*. In English this will + typically be "Background". + + .. attribute:: name + + The name of the background (the text after "Background:".) + + .. attribute:: steps + + A list of :class:`~behave.model.Step` making up this background. + + .. attribute:: duration + + The time, in seconds, that it took to run this background. If read + before the background is run it will return 0.0. + + .. attribute:: filename + + The file name (or "") of the *feature file* where the background + was found. + + .. attribute:: line + + The line number of the *feature file* where the background was found. + + .. _`background`: gherkin.html#backgrounds + """ + type = "background" + + def __init__(self, filename, line, keyword, name, steps=None): + super(Background, self).__init__(filename, line, keyword, name) + self.steps = steps or [] + + def __repr__(self): + return '' % self.name + + def __iter__(self): + return iter(self.steps) + + @property + def duration(self): + duration = 0 + for step in self.steps: + duration += step.duration + return duration + + +class Scenario(TagAndStatusStatement, Replayable): + """A `scenario`_ parsed from a *feature file*. + + The attributes are: + + .. attribute:: keyword + + This is the keyword as seen in the *feature file*. In English this will + typically be "Scenario". + + .. attribute:: name + + The name of the scenario (the text after "Scenario:".) + + .. attribute:: description + + The description of the scenario as seen in the *feature file*. + This is stored as a list of text lines. + + .. attribute:: feature + + The :class:`~behave.model.Feature` this scenario belongs to. + + .. attribute:: steps + + A list of :class:`~behave.model.Step` making up this scenario. + + .. attribute:: tags + + A list of @tags (as :class:`~behave.model.Tag` which are basically + glorified strings) attached to the scenario. + See :ref:`controlling things with tags`. + + .. attribute:: status + + Read-Only. A summary status of the scenario's run. If read before the + scenario is fully tested it will return "untested" otherwise it will + return one of: + + + Status.untested + The scenario was has not been completely tested yet. + Status.skipped + One or more steps of this scenario was passed over during testing. + Status.passed + The scenario was tested successfully. + Status.failed + One or more steps of this scenario failed. + + .. versionchanged:: 1.2.6 + Use Status enum class (was: string) + + .. attribute:: hook_failed + + Indicates if a hook failure occured while running this scenario. + + .. versionadded:: 1.2.6 + + .. attribute:: duration + + The time, in seconds, that it took to test this scenario. If read before + the scenario is tested it will return 0.0. + + .. attribute:: filename + + The file name (or "") of the *feature file* where the scenario + was found. + + .. attribute:: line + + The line number of the *feature file* where the scenario was found. + + + .. _`scenario`: gherkin.html#scenarios + """ + # pylint: disable=too-many-instance-attributes + type = "scenario" + continue_after_failed_step = False + + def __init__(self, filename, line, keyword, name, tags=None, steps=None, + description=None): + tags = tags or [] + super(Scenario, self).__init__(filename, line, keyword, name, tags) + self.description = description or [] + self.steps = steps or [] + self.background = None + self.feature = None # REFER-TO: owner=Feature + self.hook_failed = False + self._background_steps = None + self._row = None + self.was_dry_run = False + + def reset(self): + """Reset the internal data to reintroduce new-born state just after the + ctor was called. + """ + super(Scenario, self).reset() + self.hook_failed = False + self._row = None + self.was_dry_run = False + for step in self.all_steps: + step.reset() + + @property + def background_steps(self): + """Provide background steps if feature has a background. + Lazy init that copies the background steps. + + Note that a copy of the background steps is needed to ensure + that the background step status is specific to the scenario. + + :return: List of background steps or empty list + """ + if self._background_steps is None: + # -- LAZY-INIT (need copy of background.steps): + # Each scenario needs own background.steps. + # Otherwise, background step status of the last-run scenario is used. + steps = [] + if self.background: + steps = [copy.copy(step) for step in self.background.steps] + self._background_steps = steps + return self._background_steps + + @property + def all_steps(self): + """Returns iterator to all steps, including background steps if any.""" + if self.background is not None: + return itertools.chain(self.background_steps, self.steps) + else: + return iter(self.steps) + + def __repr__(self): + return '' % self.name + + def __iter__(self): + return self.all_steps + + def compute_status(self): + """Compute the status of the scenario from its steps + (and hook failures). + + :return: Computed status (as enum value). + """ + if self.hook_failed: + return Status.failed + + for step in self.all_steps: + if step.status == Status.undefined: + if self.was_dry_run: + # -- SPECIAL CASE: In dry-run with undefined-step discovery + # Undefined steps should not cause failed scenario. + return Status.untested + else: + # -- NORMALLY: Undefined steps cause failed scenario. + return Status.failed + elif step.status != Status.passed: + # pylint: disable=line-too-long + assert step.status in (Status.failed, Status.skipped, Status.untested) + return step.status + return Status.passed + + @property + def duration(self): + # -- ORIG: for step in self.steps: Background steps were excluded. + scenario_duration = 0 + for step in self.all_steps: + scenario_duration += step.duration + return scenario_duration + + @property + def effective_tags(self): + """ + Effective tags for this scenario: + * own tags + * tags inherited from its feature + """ + tags = self.tags + if self.feature: + tags = self.feature.tags + self.tags + return tags + + def should_run(self, config=None): + """ + Determines if this Scenario (or ScenarioOutline) should run. + Implements the run decision logic for a scenario. + The decision depends on: + + * if the Scenario is marked as skipped + * if the config.tags (tag expression) enable/disable this scenario + * if the scenario is selected by name + + :param config: Runner configuration to use (optional). + :return: True, if scenario should run. False, otherwise. + """ + answer = not self.should_skip + if answer and config: + answer = (self.should_run_with_tags(config.tags) and + self.should_run_with_name_select(config)) + return answer + + def should_run_with_tags(self, tag_expression): + """ + Determines if this scenario should run when the tag expression is used. + + :param tag_expression: Runner/config environment tags to use. + :return: True, if scenario should run. False, otherwise (skip it). + """ + return tag_expression.check(self.effective_tags) + + def should_run_with_name_select(self, config): + """Determines if this scenario should run when it is selected by name. + + :param config: Runner/config environment name regexp (if any). + :return: True, if scenario should run. False, otherwise (skip it). + """ + # -- SELECT-ANY: If select by name is not specified (not config.name). + return not config.name or config.name_re.search(self.name) + + def mark_skipped(self): + """Marks this scenario (and all its steps) as skipped. + Note that this method can be called before the scenario is executed. + """ + self.skip(require_not_executed=True) + assert self.status == Status.skipped or self.hook_failed, \ + "OOPS: scenario.status=%s" % self.status.name + + def skip(self, reason=None, require_not_executed=False): + """Skip from executing this scenario or the remaining parts of it. + Note that the scenario may be already partly executed + when this method is called. + + :param reason: Optional reason why it should be skipped (as string). + """ + if reason: + scenario_type = self.__class__.__name__ + logger = logging.getLogger("behave") + logger.warning(u"SKIP %s %s: %s", scenario_type, self.name, reason) + + self.clear_status() + self.should_skip = True + self.skip_reason = reason + for step in self.all_steps: + not_executed = step.status in (Status.untested, Status.skipped) + if not_executed: + step.status = Status.skipped + else: + assert not require_not_executed, \ + "REQUIRE NOT-EXECUTED, but step is %s" % step.status + + scenario_without_steps = not self.steps and not self.background_steps + if scenario_without_steps: + self.set_status(Status.skipped) + assert self.status in self.final_status #< skipped, failed or passed + + def run(self, runner): + # pylint: disable=too-many-branches, too-many-statements + self.clear_status() + self.captured.reset() + self.hook_failed = False + failed = False + skip_scenario_untested = runner.aborted + run_scenario = self.should_run(runner.config) + run_steps = run_scenario and not runner.config.dry_run + dry_run_scenario = run_scenario and runner.config.dry_run + self.was_dry_run = dry_run_scenario + + runner.context._push(layer_name="scenario") # pylint: disable=protected-access + runner.context.scenario = self + runner.context.tags = set(self.effective_tags) + + hooks_called = False + if not runner.config.dry_run and run_scenario: + hooks_called = True + for tag in self.tags: + runner.run_hook("before_tag", runner.context, tag) + runner.run_hook("before_scenario", runner.context, self) + if self.hook_failed: + # -- SKIP: Scenario steps and behave like dry_run_scenario + failed = True + + # -- RE-EVALUATE SHOULD-RUN STATE: + # Hook may call scenario.mark_skipped() to exclude it. + skip_scenario_untested = self.hook_failed or runner.aborted + run_scenario = self.should_run() + run_steps = run_scenario and not runner.config.dry_run + + if run_scenario or runner.config.show_skipped: + for formatter in runner.formatters: + formatter.scenario(self) + + # TODO: Reevaluate location => Move in front of hook-calls + runner.setup_capture() + + if run_scenario or runner.config.show_skipped: + for step in self: + for formatter in runner.formatters: + formatter.step(step) + + if not skip_scenario_untested: + for step in self.all_steps: + if run_steps: + if not step.run(runner): + # -- CASE: Failed or undefined step + # Optionally continue_after_failed_step if enabled. + # But disable run_steps after undefined-step. + run_steps = (self.continue_after_failed_step and + step.status == Status.failed) + failed = True + # pylint: disable=protected-access + runner.context._set_root_attribute("failed", True) + self.set_status(Status.failed) + elif self.should_skip: + # -- CASE: Step skipped remaining scenario. + # assert self.status == Status.skipped + run_steps = False + elif failed or dry_run_scenario: + # -- SKIP STEPS: After failure/undefined-step occurred. + # BUT: Detect all remaining undefined steps. + step.status = Status.skipped + if dry_run_scenario: + # pylint: disable=redefined-variable-type + step.status = Status.untested + found_step_match = runner.step_registry.find_match(step) + if not found_step_match: + step.status = Status.undefined + runner.undefined_steps.append(step) + elif dry_run_scenario: + # -- BETTER DIAGNOSTICS: Provide step file location + # (when --format=pretty is used). + assert step.status == Status.untested + for formatter in runner.formatters: + # -- EMULATE: Step.run() protocol w/o step execution. + formatter.match(found_step_match) + formatter.result(step) + else: + # -- SKIP STEPS: For disabled scenario. + # CASES: + # * Undefined steps are not detected (by intention). + # * Step skipped remaining scenario. + step.status = Status.skipped + + self.clear_status() # -- ENFORCE: compute_status() after run. + if not run_scenario and not self.steps: + # -- SPECIAL CASE: Scenario without steps. + self.set_status(Status.skipped) + + + if hooks_called: + runner.run_hook("after_scenario", runner.context, self) + for tag in self.tags: + runner.run_hook("after_tag", runner.context, tag) + if self.hook_failed: + self.set_status(Status.failed) + failed = True + + # -- PERFORM CONTEXT-CLEANUP: May raise cleanup errors. + try: + runner.context._pop() # pylint: disable=protected-access + except Exception: + self.set_status(Status.failed) + failed = True + + # -- CAPTURED-OUTPUT: + store_captured = (runner.config.junit or self.status == Status.failed) + if store_captured: + self.captured = runner.capture_controller.captured + + runner.teardown_capture() + return failed + + +class ScenarioOutlineBuilder(object): + """Helper class to use a ScenarioOutline as a template and + build its scenarios (as template instances). + """ + + def __init__(self, annotation_schema): + self.annotation_schema = annotation_schema + + @staticmethod + def render_template(text, row=None, params=None): + """Render a text template with placeholders, ala "Hello ". + + :param row: As placeholder provider (dict-like). + :param params: As additional placeholder provider (as dict). + :return: Rendered text, known placeholders are substituted w/ values. + """ + if not ("<" in text and ">" in text): + return text + + safe_values = False + for placeholders in (row, params): + if not placeholders: + continue + for name, value in placeholders.items(): + if safe_values and ("<" in value and ">" in value): + continue # -- OOPS, value looks like placeholder. + text = text.replace("<%s>" % name, value) + return text + + def make_scenario_name(self, outline_name, example, row, params=None): + """Build a scenario name for an example row of this scenario outline. + Placeholders for row data are replaced by values. + + SCHEMA: "{outline_name} -*- {examples.name}@{row.id}" + + :param outline_name: ScenarioOutline's name (as template). + :param example: Examples object. + :param row: Row of this example. + :param params: Additional placeholders for example/row. + :return: Computed name for the scenario representing example/row. + """ + if params is None: + params = {} + params["examples.name"] = example.name or "" + params.setdefault("examples.index", example.index) + params.setdefault("row.index", row.index) + params.setdefault("row.id", row.id) + + # -- STEP: Replace placeholders in scenario/example name (if any). + examples_name = self.render_template(example.name, row, params) + params["examples.name"] = examples_name + scenario_name = self.render_template(outline_name, row, params) + + class Data(object): + def __init__(self, name, index): + self.name = name + self.index = index + self.id = name # pylint: disable=invalid-name + + example_data = Data(examples_name, example.index) + row_data = Data(row.id, row.index) + return self.annotation_schema.format(name=scenario_name, + examples=example_data, row=row_data) + + @classmethod + def make_row_tags(cls, outline_tags, row, params=None): + if not outline_tags: + return [] + + tags = [] + for tag in outline_tags: + if "<" in tag and ">" in tag: + tag = cls.render_template(tag, row, params) + if "<" in tag or ">" in tag: + # -- OOPS: Unknown placeholder, drop tag. + continue + new_tag = Tag.make_name(tag, unescape=True) + tags.append(new_tag) + return tags + + @classmethod + def make_step_for_row(cls, outline_step, row, params=None): + # -- BASED-ON: new_step = outline_step.set_values(row) + new_step = copy.deepcopy(outline_step) + new_step.name = cls.render_template(new_step.name, row, params) + if new_step.text: + new_step.text = cls.render_template(new_step.text, row) + if new_step.table: + for name, value in row.items(): + for row in new_step.table: + for i, cell in enumerate(row.cells): + row.cells[i] = cell.replace("<%s>" % name, value) + return new_step + + def build_scenarios(self, scenario_outline): + """Build scenarios for a ScenarioOutline from its examples.""" + # -- BUILD SCENARIOS (once): For this ScenarioOutline from examples. + params = { + "examples.name": None, + "examples.index": None, + "row.index": None, + "row.id": None, + } + scenarios = [] + for example_index, example in enumerate(scenario_outline.examples): + example.index = example_index+1 + params["examples.name"] = example.name + params["examples.index"] = _text(example.index) + for row_index, row in enumerate(example.table): + row.index = row_index+1 + row.id = "%d.%d" % (example.index, row.index) + params["row.id"] = row.id + params["row.index"] = _text(row.index) + scenario_name = self.make_scenario_name(scenario_outline.name, + example, row, params) + row_tags = self.make_row_tags(scenario_outline.tags, row, params) + row_tags.extend(example.tags) + new_steps = [] + for outline_step in scenario_outline.steps: + new_step = self.make_step_for_row(outline_step, row, params) + new_steps.append(new_step) + + # -- STEP: Make Scenario name for this row. + # scenario_line = example.line + 2 + row_index + scenario_line = row.line + scenario = Scenario(scenario_outline.filename, scenario_line, + scenario_outline.keyword, + scenario_name, row_tags, new_steps) + scenario.feature = scenario_outline.feature + scenario.background = scenario_outline.background + scenario._row = row # pylint: disable=protected-access + scenarios.append(scenario) + return scenarios + + +class ScenarioOutline(Scenario): + """A `scenario outline`_ parsed from a *feature file*. + + A scenario outline extends the existing :class:`~behave.model.Scenario` + class with the addition of the :class:`~behave.model.Examples` tables of + data from the *feature file*. + + The attributes are: + + .. attribute:: keyword + + This is the keyword as seen in the *feature file*. In English this will + typically be "Scenario Outline". + + .. attribute:: name + + The name of the scenario (the text after "Scenario Outline:".) + + .. attribute:: description + + The description of the `scenario outline`_ as seen in the *feature file*. + This is stored as a list of text lines. + + .. attribute:: feature + + The :class:`~behave.model.Feature` this scenario outline belongs to. + + .. attribute:: steps + + A list of :class:`~behave.model.Step` making up this scenario outline. + + .. attribute:: examples + + A list of :class:`~behave.model.Examples` used by this scenario outline. + + .. attribute:: tags + + A list of @tags (as :class:`~behave.model.Tag` which are basically + glorified strings) attached to the scenario. + See :ref:`controlling things with tags`. + + .. attribute:: status + + Read-Only. A summary status of the scenario outlines's run. If read + before the scenario is fully tested it will return "untested" otherwise + it will return one of: + + Status.untested + The scenario was has not been completely tested yet. + Status.skipped + One or more scenarios of this outline was passed over during testing. + Status.passed + The scenario was tested successfully. + Status.failed + One or more scenarios of this outline failed. + + .. versionchanged:: 1.2.6 + Use Status enum class (was: string) + + .. attribute:: duration + + The time, in seconds, that it took to test the scenarios of this + outline. If read before the scenarios are tested it will return 0.0. + + .. attribute:: filename + + The file name (or "") of the *feature file* where the scenario + was found. + + .. attribute:: line + + The line number of the *feature file* where the scenario was found. + + .. _`scenario outline`: gherkin.html#scenario-outlines + """ + type = "scenario_outline" + annotation_schema = u"{name} -- @{row.id} {examples.name}" + + def __init__(self, filename, line, keyword, name, tags=None, + steps=None, examples=None, description=None): + super(ScenarioOutline, self).__init__(filename, line, keyword, name, + tags, steps, description) + self.examples = examples or [] + self._scenarios = [] + + def reset(self): + """Reset runtime temporary data like before a test run.""" + super(ScenarioOutline, self).reset() + for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS + scenario.reset() + + @property + def scenarios(self): + """Return the scenarios with the steps altered to take the values from + the examples. + """ + if self._scenarios: + return self._scenarios + + # -- BUILD SCENARIOS (once): For this ScenarioOutline from examples. + builder = ScenarioOutlineBuilder(self.annotation_schema) + self._scenarios = builder.build_scenarios(self) + return self._scenarios + + def __repr__(self): + return '' % self.name + + def __iter__(self): + return iter(self.scenarios) # -- REQUIRE: BUILD-SCENARIOS + + def compute_status(self): + skipped_count = 0 + for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS + scenario_status = scenario.status + if scenario_status in (Status.failed, Status.untested): + return scenario_status + elif scenario_status == Status.skipped: + skipped_count += 1 + if skipped_count > 0 and skipped_count == len(self._scenarios): + # -- ALL SKIPPED: + return Status.skipped + # -- OTHERWISE: ALL PASSED (some scenarios may have been excluded) + return Status.passed + + @property + def duration(self): + outline_duration = 0 + for scenario in self._scenarios: # -- AVOID: BUILD-SCENARIOS + outline_duration += scenario.duration + return outline_duration + + def should_run_with_tags(self, tag_expression): + """Determines if this scenario outline (or one of its scenarios) + should run when the tag expression is used. + + :param tag_expression: Runner/config environment tags to use. + :return: True, if scenario should run. False, otherwise (skip it). + """ + if tag_expression.check(self.effective_tags): + return True + + for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS + if scenario.should_run_with_tags(tag_expression): + return True + # -- NOTHING SELECTED: + return False + + def should_run_with_name_select(self, config): + """Determines if this scenario should run when it is selected by name. + + :param config: Runner/config environment name regexp (if any). + :return: True, if scenario should run. False, otherwise (skip it). + """ + if not config.name: + return True # -- SELECT-ALL: Select by name is not specified. + + for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS + if scenario.should_run_with_name_select(config): + return True + # -- NOTHING SELECTED: + return False + + + def mark_skipped(self): + """Marks this scenario outline (and all its scenarios/steps) as skipped. + Note that this method may be called before the scenario outline + is executed. + """ + self.skip(require_not_executed=True) + assert self.status == Status.skipped + + def skip(self, reason=None, require_not_executed=False): + """Skip from executing this scenario outline or its remaining parts. + Note that the scenario outline may be already partly executed + when this method is called. + + :param reason: Optional reason why it should be skipped (as string). + """ + if reason: + logger = logging.getLogger("behave") + logger.warning(u"SKIP ScenarioOutline %s: %s", self.name, reason) + + self.clear_status() + self.should_skip = True + for scenario in self.scenarios: + scenario.skip(reason, require_not_executed) + if not self.scenarios: + # -- SPECIAL CASE: ScenarioOutline without scenarios/examples + self.set_status(Status.skipped) + assert self.status in self.final_status #< skipped, failed or passed + + def run(self, runner): + # pylint: disable=protected-access + # REASON: context._set_root_attribute(), scenario._row + self.clear_status() + failed_count = 0 + for scenario in self.scenarios: # -- REQUIRE: BUILD-SCENARIOS + runner.context._set_root_attribute("active_outline", scenario._row) + failed = scenario.run(runner) + if failed: + failed_count += 1 + if runner.config.stop or runner.aborted: + # -- FAIL-EARLY: Stop after first failure. + break + runner.context._set_root_attribute("active_outline", None) + return failed_count > 0 + +class Examples(TagStatement, Replayable): + """A table parsed from a `scenario outline`_ in a *feature file*. + + The attributes are: + + .. attribute:: keyword + + This is the keyword as seen in the *feature file*. In English this will + typically be "Example". + + .. attribute:: name + + The name of the example (the text after "Example:".) + + .. attribute:: table + + An instance of :class:`~behave.model.Table` that came with the example + in the *feature file*. + + .. attribute:: filename + + The file name (or "") of the *feature file* where the example + was found. + + .. attribute:: line + + The line number of the *feature file* where the example was found. + + .. _`examples`: gherkin.html#examples + """ + type = "examples" + + def __init__(self, filename, line, keyword, name, tags=None, table=None): + super(Examples, self).__init__(filename, line, keyword, name, tags) + self.table = table + self.index = None + + +class Step(BasicStatement, Replayable): + """A single `step`_ parsed from a *feature file*. + + The attributes are: + + .. attribute:: keyword + + This is the keyword as seen in the *feature file*. In English this will + typically be "Given", "When", "Then" or a number of other words. + + .. attribute:: name + + The name of the step (the text after "Given" etc.) + + .. attribute:: step_type + + The type of step as determined by the keyword. If the keyword is "and" + then the previous keyword in the *feature file* will determine this + step's step_type. + + .. attribute:: text + + An instance of :class:`~behave.model.Text` that came with the step + in the *feature file*. + + .. attribute:: table + + An instance of :class:`~behave.model.Table` that came with the step + in the *feature file*. + + .. attribute:: status + + Read-Only. A summary status of the step's run. If read before the + step is tested it will return "untested" otherwise it will + return one of: + + Status.untested + This step was not run (yet). + Status.skipped + This step was skipped during testing. + Status.passed + The step was tested successfully. + Status.failed + The step failed. + Status.undefined + The step has no matching step implementation. + + .. versionchanged:: + Use Status enum class (was: string). + + .. attribute:: hook_failed + + Indicates if a hook failure occured while running this step. + + .. versionadded:: 1.2.6 + + .. attribute:: duration + + The time, in seconds, that it took to test this step. If read before the + step is tested it will return 0.0. + + .. attribute:: error_message + + If the step failed then this will hold any error information, as a + single string. It will otherwise be None. + + .. versionchanged:: 1.2.6 (moved to base class) + + .. attribute:: filename + + The file name (or "") of the *feature file* where the step was + found. + + .. attribute:: line + + The line number of the *feature file* where the step was found. + + .. _`step`: gherkin.html#steps + """ + type = "step" + + def __init__(self, filename, line, keyword, step_type, name, text=None, + table=None): + super(Step, self).__init__(filename, line, keyword, name) + self.step_type = step_type + self.text = text + self.table = table + + self.status = Status.untested + self.hook_failed = False + self.duration = 0 + + def reset(self): + """Reset temporary runtime data to reach clean state again.""" + super(Step, self).reset() + self.status = Status.untested + self.hook_failed = False + self.duration = 0 + # -- POSTCONDITION: assert self.status == Status.untested + + def __repr__(self): + return '<%s "%s">' % (self.step_type, self.name) + + def __eq__(self, other): + return (self.step_type, self.name) == (other.step_type, other.name) + + def __hash__(self): + return hash(self.step_type) + hash(self.name) + + def set_values(self, table_row): + """Clone a new step from this one, used for ScenarioOutline. + Replace ScenarioOutline placeholders w/ values. + + :param table_row: Placeholder data for example row. + :return: Cloned, adapted step object. + + .. note:: Deprecating + Use 'ScenarioOutlineBuilder.make_step_for_row()' instead. + """ + import warnings + warnings.warn("Use 'ScenarioOutline.make_step_for_row()' instead", + PendingDeprecationWarning, stacklevel=2) + outline_step = self + return ScenarioOutlineBuilder.make_step_for_row(outline_step, table_row) + + def run(self, runner, quiet=False, capture=True): + # pylint: disable=too-many-branches, too-many-statements + # -- RESET: Run-time information. + # self.status = Status.untested + # self.hook_failed = False + self.reset() + + match = runner.step_registry.find_match(self) + if match is None: + runner.undefined_steps.append(self) + if not quiet: + for formatter in runner.formatters: + formatter.match(NoMatch()) + + self.status = Status.undefined + if not quiet: + for formatter in runner.formatters: + formatter.result(self) + return False + + keep_going = True + error = u"" + + if not quiet: + for formatter in runner.formatters: + formatter.match(match) + + if capture: + runner.start_capture() + + skip_step_untested = False + runner.run_hook("before_step", runner.context, self) + if self.hook_failed: + skip_step_untested = True + + start = time.time() + if not skip_step_untested: + try: + # -- ENSURE: + # * runner.context.text/.table attributes are reset (#66). + # * Even EMPTY multiline text is available in context. + runner.context.text = self.text + runner.context.table = self.table + match.run(runner.context) + if self.status == Status.untested: + # -- NOTE: Executed step may have skipped scenario and itself. + # pylint: disable=redefined-variable-type + self.status = Status.passed + except KeyboardInterrupt as e: + runner.aborted = True + error = u"ABORTED: By user (KeyboardInterrupt)." + self.status = Status.failed + self.store_exception_context(e) + except AssertionError as e: + self.status = Status.failed + self.store_exception_context(e) + if e.args: + message = _text(e) + error = u"Assertion Failed: "+ message + else: + # no assertion text; format the exception + error = _text(traceback.format_exc()) + except Exception as e: # pylint: disable=broad-except + self.status = Status.failed + error = _text(traceback.format_exc()) + self.store_exception_context(e) + + self.duration = time.time() - start + runner.run_hook("after_step", runner.context, self) + if self.hook_failed: + self.status = Status.failed + + if capture: + runner.stop_capture() + + # flesh out the failure with details + store_captured_always = False # PREPARED + store_captured = self.status == Status.failed or store_captured_always + if self.status == Status.failed: + assert isinstance(error, six.text_type) + if capture: + # -- CAPTURE-ONLY: Non-nested step failures. + self.captured = runner.capture_controller.captured + error2 = self.captured.make_report() + if error2: + error += "\n" + error2 + self.error_message = error + keep_going = False + elif store_captured and capture: + self.captured = runner.capture_controller.captured + + if not quiet: + for formatter in runner.formatters: + formatter.result(self) + + return keep_going + + +class Table(Replayable): + """A `table`_ extracted from a *feature file*. + + Table instance data is accessible using a number of methods: + + **iteration** + Iterating over the Table will yield the :class:`~behave.model.Row` + instances from the .rows attribute. + + **indexed access** + Individual rows may be accessed directly by index on the Table instance; + table[0] gives the first non-heading row and table[-1] gives the last + row. + + The attributes are: + + .. attribute:: headings + + The headings of the table as a list of strings. + + .. attribute:: rows + + An list of instances of :class:`~behave.model.Row` that make up the body + of the table in the *feature file*. + + Tables are also comparable, for what that's worth. Headings and row data + are compared. + + .. _`table`: gherkin.html#table + """ + type = "table" + + def __init__(self, headings, line=None, rows=None): + Replayable.__init__(self) + self.headings = headings + self.line = line + self.rows = [] + if rows: + for row in rows: + self.add_row(row, line) + + def add_row(self, row, line=None): + self.rows.append(Row(self.headings, row, line)) + + def add_column(self, column_name, values=None, default_value=u""): + """Adds a new column to this table. + Uses :param:`default_value` for new cells (if :param:`values` are + not provided). param:`values` are extended with :param:`default_value` + if values list is smaller than the number of table rows. + + :param column_name: Name of new column (as string). + :param values: Optional list of cell values in new column. + :param default_value: Default value for cell (if values not provided). + :returns: Index of new column (as number). + """ + # assert isinstance(column_name, unicode) + assert not self.has_column(column_name) + if values is None: + values = [default_value] * len(self.rows) + elif not isinstance(values, list): + values = list(values) + if len(values) < len(self.rows): + more_size = len(self.rows) - len(values) + more_values = [default_value] * more_size + values.extend(more_values) + + new_column_index = len(self.headings) + self.headings.append(column_name) + for row, value in zip(self.rows, values): + assert len(row.cells) == new_column_index + row.cells.append(value) + return new_column_index + + def remove_column(self, column_name): + if not isinstance(column_name, int): + try: + column_index = self.get_column_index(column_name) + except ValueError: + raise KeyError("column=%s is unknown" % column_name) + + assert isinstance(column_index, int) + assert column_index < len(self.headings) + del self.headings[column_index] + for row in self.rows: + assert column_index < len(row.cells) + del row.cells[column_index] + + def remove_columns(self, column_names): + for column_name in column_names: + self.remove_column(column_name) + + def has_column(self, column_name): + return column_name in self.headings + + def get_column_index(self, column_name): + return self.headings.index(column_name) + + def require_column(self, column_name): + """Require that a column exists in the table. + Raise an AssertionError if the column does not exist. + + :param column_name: Name of new column (as string). + :return: Index of column (as number) if it exists. + """ + if not self.has_column(column_name): + columns = ", ".join(self.headings) + msg = "REQUIRE COLUMN: %s (columns: %s)" % (column_name, columns) + raise AssertionError(msg) + return self.get_column_index(column_name) + + def require_columns(self, column_names): + for column_name in column_names: + self.require_column(column_name) + + def ensure_column_exists(self, column_name): + """Ensures that a column with the given name exists. + If the column does not exist, the column is added. + + :param column_name: Name of column (as string). + :return: Index of column (as number). + """ + if self.has_column(column_name): + return self.get_column_index(column_name) + else: + return self.add_column(column_name) + + def __repr__(self): + return "" % (len(self.headings), len(self.rows)) + + def __eq__(self, other): + if isinstance(other, Table): + if self.headings != other.headings: + return False + for my_row, their_row in zip(self.rows, other.rows): + if my_row != their_row: + return False + else: + # -- ASSUME: table <=> raw data comparison + other_rows = other + for my_row, their_row in zip(self.rows, other_rows): + if my_row != their_row: + return False + return True + + def __ne__(self, other): + return not self.__eq__(other) + + def __iter__(self): + return iter(self.rows) + + def __getitem__(self, index): + return self.rows[index] + + def assert_equals(self, data): + """Assert that this table's cells are the same as the supplied "data". + + The data passed in must be a list of lists giving: + + [ + [row 1], + [row 2], + [row 3], + ] + + If the cells do not match then a useful AssertionError will be raised. + """ + assert self == data + raise NotImplementedError + + +class Row(object): + """One row of a `table`_ parsed from a *feature file*. + + Row data is accessible using a number of methods: + + **iteration** + Iterating over the Row will yield the individual cells as strings. + + **named access** + Individual cells may be accessed by heading name; row["name"] would give + the cell value for the column with heading "name". + + **indexed access** + Individual cells may be accessed directly by index on the Row instance; + row[0] gives the first cell and row[-1] gives the last cell. + + The attributes are: + + .. attribute:: cells + + The list of strings that form the cells of this row. + + .. attribute:: headings + + The headings of the table as a list of strings. + + Rows are also comparable, for what that's worth. Only the cells are + compared. + + .. _`table`: gherkin.html#table + """ + def __init__(self, headings, cells, line=None, comments=None): + self.headings = headings + self.comments = comments + for c in cells: + assert isinstance(c, six.text_type) + self.cells = cells + self.line = line + + def __getitem__(self, name): + try: + index = self.headings.index(name) + except ValueError: + if isinstance(name, int): + index = name + else: + raise KeyError('"%s" is not a row heading' % name) + return self.cells[index] + + def __repr__(self): + return "" % (self.cells,) + + def __eq__(self, other): + return self.cells == other.cells + + def __ne__(self, other): + return not self.__eq__(other) + + def __len__(self): + return len(self.cells) + + def __iter__(self): + return iter(self.cells) + + def items(self): + return zip(self.headings, self.cells) + + def get(self, key, default=None): + try: + return self[key] + except KeyError: + return default + + def as_dict(self): + """Converts the row and its cell data into a dictionary. + :return: Row data as dictionary (without comments, line info). + """ + from behave.compat.collections import OrderedDict + return OrderedDict(self.items()) + + +class Tag(six.text_type): + """Tags appear may be associated with Features or Scenarios. + + They're a subclass of regular strings (unicode pre-Python 3) with an + additional ``line`` number attribute (where the tag was seen in the source + feature file. + + See :ref:`controlling things with tags`. + """ + allowed_chars = u"._-=:" # In addition to aplha-numerical chars. + quoting_chars = ("'", '"', "<", ">") + + def __new__(cls, name, line): + o = six.text_type.__new__(cls, name) + o.line = line + return o + + @classmethod + def make_name(cls, text, unescape=False, allowed_chars=None): + """Translate text into a "valid tag" without whitespace, etc. + Translation rules are: + * alnum chars => same, kept + * space chars => "_" + * other chars => deleted + + Preserve following characters (in addition to alnums, like: A-z, 0-9): + * dots => "." (support: dotted-names, active-tag name schema) + * minus => "-" (support: dashed-names) + * underscore => "_" + * equal => "=" (support: active-tag name schema) + * colon => ":" (support: active-tag name schema or similar) + + :param text: Unicode text as input for name. + :param unescape: Optional flag to unescape some chars (default: false) + :param allowed_chars: Optional string with additional preserved chars. + :return: Unicode name that can be used as tag. + """ + assert isinstance(text, six.text_type) + if allowed_chars is None: + allowed_chars = cls.allowed_chars + + if unescape: + # -- UNESCAPE: Some escaped sequences + text = text.replace("\\t", "\t").replace("\\n", "\n") + chars = [] + for char in text: + if char.isalnum() or (allowed_chars and char in allowed_chars): + chars.append(char) + elif char.isspace(): + chars.append(u"_") + elif char in cls.quoting_chars: + pass # -- NORMALIZE: Remove any quoting chars. + # -- MAYBE: + # else: + # # -- OTHERWISE: Accept gracefully any other character. + # chars.append(char) + return u"".join(chars) + + +class Text(six.text_type): + """Store multiline text from a Step definition. + + The attributes are: + + .. attribute:: value + + The actual text parsed from the *feature file*. + + .. attribute:: content_type + + Currently only "text/plain". + """ + def __new__(cls, value, content_type=u"text/plain", line=0): + assert isinstance(value, six.text_type) + assert isinstance(content_type, six.text_type) + o = six.text_type.__new__(cls, value) + o.content_type = content_type + o.line = line + return o + + def line_range(self): + line_count = len(self.splitlines()) + return (self.line, self.line + line_count + 1) + + def replace(self, old, new, count=-1): + return Text(super(Text, self).replace(old, new, count), self.content_type, + self.line) + + def assert_equals(self, expected): + """Assert that my text is identical to the "expected" text. + + A nice context diff will be displayed if they do not match. + """ + if self == expected: + return True + diff = [] + for line in difflib.unified_diff(self.splitlines(), + expected.splitlines()): + diff.append(line) + # strip unnecessary diff prefix + diff = ["Text does not match:"] + diff[3:] + raise AssertionError("\n".join(diff)) + + +# ----------------------------------------------------------------------------- +# UTILITY FUNCTIONS: +# ----------------------------------------------------------------------------- +def reset_model(model_elements): + """Reset the test run information stored in model elements. + + :param model_elements: List of model elements (Feature, Scenario, ...) + """ + for model_element in model_elements: + model_element.reset() diff --git a/venv/Lib/site-packages/behave/model_core.py b/venv/Lib/site-packages/behave/model_core.py new file mode 100644 index 0000000..de637d0 --- /dev/null +++ b/venv/Lib/site-packages/behave/model_core.py @@ -0,0 +1,422 @@ +# -*- coding: UTF-8 -*- +""" +This module provides the abstract base classes and core concepts +for the model elements in behave. +""" + +import os.path +import sys +import six +from behave.capture import Captured +from behave.textutil import text as _text +from enum import Enum + + +PLATFORM_WIN = sys.platform.startswith("win") +def posixpath_normalize(path): + return path.replace("\\", "/") + + +# ----------------------------------------------------------------------------- +# GENERIC MODEL CLASSES: +# ----------------------------------------------------------------------------- +class Status(Enum): + """Provides the (test-run) status of a model element. + Features and Scenarios use: untested, skipped, passed, failed. + Steps may use all enum-values. + + Enum values: + * untested (initial state): + + Defines the initial state before a test-run. + Sometimes used to indicate that the model element was not executed + during a test run. + + * skipped: + + A model element is skipped because it should not run. + This is caused by filtering mechanisms, like tags, active-tags, + file-location arg, select-by-name, etc. + + * passed: A model element was executed and passed (without failures). + * failed: Failures occurred while executing it. + * undefined: Used for undefined-steps (no step implementation was found). + * executing: Marks the steps during execution (used in a formatter) + + .. versionadded:: 1.2.6 + Superceeds string-based status values. + """ + untested = 0 + skipped = 1 + passed = 2 + failed = 3 + undefined = 4 + executing = 5 + + def __eq__(self, other): + """Comparison operator equals-to other value. + Supports other enum-values and string (for backward compatibility). + + EXAMPLES:: + + status = Status.passed + assert status == Status.passed + assert status == "passed" + assert status != "failed" + + :param other: Other value to compare (enum-value, string). + :return: True, if both values are equal. False, otherwise. + """ + if isinstance(other, six.string_types): + # -- CONVENIENCE: Compare with string-name (backward-compatible) + return self.name == other + return super(Status, self).__eq__(other) + + @classmethod + def from_name(cls, name): + """Select enumeration value by using its name. + + :param name: Name as key to the enum value (as string). + :return: Enum value (instance) + :raises: LookupError, if status name is unknown. + """ + # pylint: disable=no-member + enum_value = cls.__members__.get(name, None) + if enum_value is None: + known_names = ", ".join(cls.__members__.keys()) + raise LookupError("%s (expected: %s)" % (name, known_names)) + return enum_value + + +class Argument(object): + """An argument found in a *feature file* step name and extracted using + step decorator `parameters`_. + + The attributes are: + + .. attribute:: original + + The actual text matched in the step name. + + .. attribute:: value + + The potentially type-converted value of the argument. + + .. attribute:: name + + The name of the argument. This will be None if the parameter is + anonymous. + + .. attribute:: start + + The start index in the step name of the argument. Used for display. + + .. attribute:: end + + The end index in the step name of the argument. Used for display. + """ + def __init__(self, start, end, original, value, name=None): + self.start = start + self.end = end + self.original = original + self.value = value + self.name = name + + +# @total_ordering +# class FileLocation(unicode): +class FileLocation(object): + """ + Provides a value object for file location objects. + A file location consists of: + + * filename + * line (number), optional + + LOCATION SCHEMA: + * "{filename}:{line}" or + * "{filename}" (if line number is not present) + """ + __pychecker__ = "missingattrs=line" # -- Ignore warnings for 'line'. + + def __init__(self, filename, line=None): + if PLATFORM_WIN: + filename = posixpath_normalize(filename) + self.filename = filename + self.line = line + + def get(self): + return self.filename + + def abspath(self): + return os.path.abspath(self.filename) + + def basename(self): + return os.path.basename(self.filename) + + def dirname(self): + return os.path.dirname(self.filename) + + def relpath(self, start=os.curdir): + """Compute relative path for start to filename. + + :param start: Base path or start directory (default=current dir). + :return: Relative path from start to filename + """ + return os.path.relpath(self.filename, start) + + def exists(self): + return os.path.exists(self.filename) + + def _line_lessthan(self, other_line): + if self.line is None: + # return not (other_line is None) + return other_line is not None + elif other_line is None: + return False + else: + return self.line < other_line + + def __eq__(self, other): + if isinstance(other, FileLocation): + return self.filename == other.filename and self.line == other.line + elif isinstance(other, six.string_types): + return self.filename == other + else: + raise TypeError("Cannot compare FileLocation with %s:%s" % \ + (type(other), other)) + + def __ne__(self, other): + # return not self == other # pylint: disable=unneeded-not + return not self.__eq__(other) + + def __lt__(self, other): + if isinstance(other, FileLocation): + if self.filename < other.filename: + return True + elif self.filename > other.filename: + return False + else: + assert self.filename == other.filename + return self._line_lessthan(other.line) + + elif isinstance(other, six.string_types): + return self.filename < other + else: + raise TypeError("Cannot compare FileLocation with %s:%s" % \ + (type(other), other)) + + def __le__(self, other): + # -- SEE ALSO: python2.7, functools.total_ordering + # return not other < self # pylint unneeded-not + return other >= self + + def __gt__(self, other): + # -- SEE ALSO: python2.7, functools.total_ordering + if isinstance(other, FileLocation): + return other < self + else: + return self.filename > other + + def __ge__(self, other): + # -- SEE ALSO: python2.7, functools.total_ordering + # return not self < other + return not self.__lt__(other) + + def __repr__(self): + return u'' % \ + (self.filename, self.line) + + def __str__(self): + filename = self.filename + if isinstance(filename, six.binary_type): + filename = _text(filename, "utf-8") + if self.line is None: + return filename + return u"%s:%d" % (filename, self.line) + + if six.PY2: + __unicode__ = __str__ + __str__ = lambda self: self.__unicode__().encode("utf-8") + + @classmethod + def for_function(cls, func, curdir=None): + """Extracts the location information from the function and builds + the location string (schema: "{source_filename}:{line_number}"). + + :param func: Function whose location should be determined. + :return: FileLocation object + """ + func = unwrap_function(func) + function_code = six.get_function_code(func) + filename = function_code.co_filename + line_number = function_code.co_firstlineno + + curdir = curdir or os.getcwd() + try: + filename = os.path.relpath(filename, curdir) + except ValueError: + # WINDOWS-SPECIFIC (#599): + # If a step-function comes from a different disk drive, + # a relative path will fail: Keep the absolute path. + pass + return cls(filename, line_number) + + +# ----------------------------------------------------------------------------- +# ABSTRACT MODEL CLASSES (and concepts): +# ----------------------------------------------------------------------------- +class BasicStatement(object): + def __init__(self, filename, line, keyword, name): + filename = filename or '' + filename = os.path.relpath(filename, os.getcwd()) # -- NEEDS: abspath? + self.location = FileLocation(filename, line) + assert isinstance(keyword, six.text_type) + assert isinstance(name, six.text_type) + self.keyword = keyword + self.name = name + # -- SINCE: 1.2.6 + self.captured = Captured() + # -- ERROR CONTEXT INFO: + self.exception = None + self.exc_traceback = None + self.error_message = None + + @property + def filename(self): + # return os.path.abspath(self.location.filename) + return self.location.filename + + @property + def line(self): + return self.location.line + + def reset(self): + # -- RESET: Captured output data + self.captured.reset() + # -- RESET: ERROR CONTEXT INFO + self.exception = None + self.exc_traceback = None + self.error_message = None + + def store_exception_context(self, exception): + self.exception = exception + self.exc_traceback = sys.exc_info()[2] + + def __hash__(self): + # -- NEEDED-FOR: PYTHON3 + # return id((self.keyword, self.name)) + return id(self) + + def __eq__(self, other): + # -- PYTHON3 SUPPORT, ORDERABLE: + # NOTE: Ignore potential FileLocation differences. + return (self.keyword, self.name) == (other.keyword, other.name) + + def __lt__(self, other): + # -- PYTHON3 SUPPORT, ORDERABLE: + # NOTE: Ignore potential FileLocation differences. + return (self.keyword, self.name) < (other.keyword, other.name) + + def __ne__(self, other): + return not self.__eq__(other) + + def __le__(self, other): + # -- SEE ALSO: python2.7, functools.total_ordering + # return not other < self + return other >= self + + def __gt__(self, other): + # -- SEE ALSO: python2.7, functools.total_ordering + assert isinstance(other, BasicStatement) + return other < self + + def __ge__(self, other): + # -- SEE ALSO: python2.7, functools.total_ordering + # OR: return self >= other + return not self < other # pylint: disable=unneeded-not + + # def __cmp__(self, other): + # # -- NOTE: Ignore potential FileLocation differences. + # return cmp((self.keyword, self.name), (other.keyword, other.name)) + + +class TagStatement(BasicStatement): + + def __init__(self, filename, line, keyword, name, tags): + if tags is None: + tags = [] + super(TagStatement, self).__init__(filename, line, keyword, name) + self.tags = tags + + def should_run_with_tags(self, tag_expression): + """Determines if statement should run when the tag expression is used. + + :param tag_expression: Runner/config environment tags to use. + :return: True, if examples should run. False, otherwise (skip it). + """ + return tag_expression.check(self.tags) + + +class TagAndStatusStatement(BasicStatement): + # final_status = ('passed', 'failed', 'skipped') + final_status = (Status.passed, Status.failed, Status.skipped) + + def __init__(self, filename, line, keyword, name, tags): + super(TagAndStatusStatement, self).__init__(filename, line, keyword, name) + self.tags = tags + self.should_skip = False + self.skip_reason = None + self._cached_status = Status.untested + + def should_run_with_tags(self, tag_expression): + """Determines if statement should run when the tag expression is used. + + :param tag_expression: Runner/config environment tags to use. + :return: True, if examples should run. False, otherwise (skip it). + """ + return tag_expression.check(self.tags) + + @property + def status(self): + if self._cached_status not in self.final_status: + # -- RECOMPUTE: As long as final status is not reached. + self._cached_status = self.compute_status() + return self._cached_status + + def set_status(self, value): + if isinstance(value, six.string_types): + value = Status.from_name(value) + self._cached_status = value + + def clear_status(self): + self._cached_status = Status.untested + + def reset(self): + self.should_skip = False + self.skip_reason = None + self.clear_status() + + def compute_status(self): + raise NotImplementedError + + +class Replayable(object): + type = None + + def replay(self, formatter): + getattr(formatter, self.type)(self) + + +# ----------------------------------------------------------------------------- +# UTILITY FUNCTIONS: +# ----------------------------------------------------------------------------- +def unwrap_function(func, max_depth=10): + """Unwraps a function that is wrapped with :func:`functools.partial()`""" + iteration = 0 + wrapped = getattr(func, "__wrapped__", None) + while wrapped and iteration < max_depth: + func = wrapped + wrapped = getattr(func, "__wrapped__", None) + iteration += 1 + return func diff --git a/venv/Lib/site-packages/behave/model_describe.py b/venv/Lib/site-packages/behave/model_describe.py new file mode 100644 index 0000000..77447da --- /dev/null +++ b/venv/Lib/site-packages/behave/model_describe.py @@ -0,0 +1,105 @@ +# -*- coding: utf-8 -*- +""" +Provides textual descriptions for :mod:`behave.model` elements. +""" + +from __future__ import absolute_import +from six.moves import range # pylint: disable=redefined-builtin +from six.moves import zip # pylint: disable=redefined-builtin +from behave.textutil import indent + + +# ----------------------------------------------------------------------------- +# FUNCTIONS: +# ----------------------------------------------------------------------------- +def escape_cell(cell): + """ + Escape table cell contents. + :param cell: Table cell (as unicode string). + :return: Escaped cell (as unicode string). + """ + cell = cell.replace(u'\\', u'\\\\') + cell = cell.replace(u'\n', u'\\n') + cell = cell.replace(u'|', u'\\|') + return cell + + +def escape_triple_quotes(text): + """ + Escape triple-quotes, used for multi-line text/doc-strings. + """ + return text.replace(u'"""', u'\\"\\"\\"') + + +# ----------------------------------------------------------------------------- +# CLASS: +# ----------------------------------------------------------------------------- +class ModelDescriptor(object): + + @staticmethod + def describe_table(table, indentation=None): + """ + Provide a textual description of the table (as used w/ Gherkin). + + :param table: Table to use (as :class:`behave.model.Table`) + :param indentation: Line prefix to use (as string, if any). + :return: Textual table description (as unicode string). + """ + # -- STEP: Determine output size of all cells. + cell_lengths = [] + all_rows = [table.headings] + table.rows + for row in all_rows: + lengths = [len(escape_cell(c)) for c in row] + cell_lengths.append(lengths) + + # -- STEP: Determine max. output size for each column. + max_lengths = [] + for col in range(0, len(cell_lengths[0])): + max_lengths.append(max([c[col] for c in cell_lengths])) + + # -- STEP: Build textual table description. + lines = [] + for r, row in enumerate(all_rows): + line = u"|" + for c, (cell, max_length) in enumerate(zip(row, max_lengths)): + pad_size = max_length - cell_lengths[r][c] + line += u" %s%s |" % (escape_cell(cell), " " * pad_size) + line += u"\n" + lines.append(line) + + if indentation: + return indent(lines, indentation) + # -- OTHERWISE: + return u"".join(lines) + + @staticmethod + def describe_docstring(doc_string, indentation=None): + """ + Provide a textual description of the multi-line text/triple-quoted + doc-string (as used w/ Gherkin). + + :param doc_string: Multi-line text to use. + :param indentation: Line prefix to use (as string, if any). + :return: Textual table description (as unicode string). + """ + text = escape_triple_quotes(doc_string) + text = u'"""\n' + text + '\n"""\n' + + if indentation: + text = indent(text, indentation) + return text + + +class ModelPrinter(ModelDescriptor): + + def __init__(self, stream): + super(ModelPrinter, self).__init__() + self.stream = stream + + def print_table(self, table, indentation=None): + self.stream.write(self.describe_table(table, indentation)) + self.stream.flush() + + def print_docstring(self, text, indentation=None): + self.stream.write(self.describe_docstring(text, indentation)) + self.stream.flush() diff --git a/venv/Lib/site-packages/behave/parser.py b/venv/Lib/site-packages/behave/parser.py new file mode 100644 index 0000000..b06aabc --- /dev/null +++ b/venv/Lib/site-packages/behave/parser.py @@ -0,0 +1,615 @@ +# -*- coding: UTF-8 -*- + +from __future__ import absolute_import, with_statement +import re +import sys +import six +from behave import model, i18n +from behave.textutil import text as _text + + +DEFAULT_LANGUAGE = "en" + + +def parse_file(filename, language=None): + with open(filename, "rb") as f: + # file encoding is assumed to be utf8. Oh, yes. + data = f.read().decode("utf8") + return parse_feature(data, language, filename) + + +def parse_feature(data, language=None, filename=None): + # ALL data operated on by the parser MUST be unicode + assert isinstance(data, six.text_type) + + try: + result = Parser(language).parse(data, filename) + except ParserError as e: + e.filename = filename + raise + + return result + +def parse_steps(text, language=None, filename=None): + """ + Parse a number of steps a multi-line text from a scenario. + Scenario line with title and keyword is not provided. + + :param text: Multi-line text with steps to parse (as unicode). + :param language: i18n language identifier (optional). + :param filename: Filename (optional). + :return: Parsed steps (if successful). + """ + assert isinstance(text, six.text_type) + try: + result = Parser(language, variant="steps").parse_steps(text, filename) + except ParserError as e: + e.filename = filename + raise + return result + +def parse_tags(text): + """ + Parse tags from text (one or more lines, as string). + + :param text: Multi-line text with tags to parse (as unicode). + :return: List of tags (if successful). + """ + # assert isinstance(text, unicode) + if not text: + return [] + return Parser(variant="tags").parse_tags(text) + + +class ParserError(Exception): + def __init__(self, message, line, filename=None, line_text=None): + if line: + message += u" at line %d" % line + if line_text: + message += u': "%s"' % line_text.strip() + super(ParserError, self).__init__(message) + self.line = line + self.line_text = line_text + self.filename = filename + + def __str__(self): + arg0 = _text(self.args[0]) + if self.filename: + filename = _text(self.filename, sys.getfilesystemencoding()) + return u'Failed to parse "%s": %s' % (filename, arg0) + else: + return u"Failed to parse : %s" % arg0 + + if six.PY2: + __unicode__ = __str__ + __str__ = lambda self: self.__unicode__().encode("utf-8") + + +class Parser(object): + """Feature file parser for behave.""" + # pylint: disable=too-many-instance-attributes + + def __init__(self, language=None, variant=None): + if not variant: + variant = "feature" + self.language = language + self.variant = variant + self.state = "init" + self.line = 0 + self.last_step = None + self.multiline_start = None + self.multiline_leading = None + self.multiline_terminator = None + + self.filename = None + self.feature = None + self.statement = None + self.tags = [] + self.lines = [] + self.table = None + self.examples = None + self.keywords = None + if self.language: + self.keywords = i18n.languages[self.language] + # NOT-NEEDED: self.reset() + + def reset(self): + # This can probably go away. + if self.language: + self.keywords = i18n.languages[self.language] + else: + self.keywords = None + + self.state = "init" + self.line = 0 + self.last_step = None + self.multiline_start = None + self.multiline_leading = None + self.multiline_terminator = None + + self.filename = None + self.feature = None + self.statement = None + self.tags = [] + self.lines = [] + self.table = None + self.examples = None + + def parse(self, data, filename=None): + self.reset() + + self.filename = filename + + for line in data.split("\n"): + self.line += 1 + if not line.strip() and self.state != "multiline": + # -- SKIP EMPTY LINES, except in multiline string args. + continue + self.action(line) + + if self.table: + self.action_table("") + + feature = self.feature + if feature: + feature.parser = self + self.reset() + return feature + + def _build_feature(self, keyword, line): + name = line[len(keyword) + 1:].strip() + language = self.language or DEFAULT_LANGUAGE + self.feature = model.Feature(self.filename, self.line, keyword, + name, tags=self.tags, language=language) + # -- RESET STATE: + self.tags = [] + + def _build_background_statement(self, keyword, line): + if self.tags: + msg = u"Background supports no tags: @%s" % (u" @".join(self.tags)) + raise ParserError(msg, self.line, self.filename, line) + name = line[len(keyword) + 1:].strip() + statement = model.Background(self.filename, self.line, keyword, name) + self.statement = statement + self.feature.background = self.statement + + def _build_scenario_statement(self, keyword, line): + name = line[len(keyword) + 1:].strip() + self.statement = model.Scenario(self.filename, self.line, + keyword, name, tags=self.tags) + self.feature.add_scenario(self.statement) + # -- RESET STATE: + self.tags = [] + + def _build_scenario_outline_statement(self, keyword, line): + # pylint: disable=C0103 + # C0103 Invalid name "build_scenario_outline_statement", too long. + name = line[len(keyword) + 1:].strip() + self.statement = model.ScenarioOutline(self.filename, self.line, + keyword, name, tags=self.tags) + self.feature.add_scenario(self.statement) + # -- RESET STATE: + self.tags = [] + + def _build_examples(self, keyword, line): + if not isinstance(self.statement, model.ScenarioOutline): + message = u"Examples must only appear inside scenario outline" + raise ParserError(message, self.line, self.filename, line) + name = line[len(keyword) + 1:].strip() + self.examples = model.Examples(self.filename, self.line, + keyword, name, tags=self.tags) + # pylint: disable=E1103 + # E1103 Instance of "Background" has no "examples" member + # (but some types could not be inferred). + self.statement.examples.append(self.examples) + + # -- RESET STATE: + self.tags = [] + + + def diagnose_feature_usage_error(self): + if self.feature: + return "Multiple features in one file are not supported." + else: + return "Feature should not be used here." + + def diagnose_background_usage_error(self): + if self.feature and self.feature.scenarios: + return "Background may not occur after Scenario/ScenarioOutline." + elif self.tags: + return "Background does not support tags." + else: + return "Background should not be used here." + + def diagnose_scenario_usage_error(self): + if not self.feature: + return "Scenario may not occur before Feature." + else: + return "Scenario should not be used here." + + def diagnose_scenario_outline_usage_error(self): # pylint: disable=invalid-name + if not self.feature: + return "ScenarioOutline may not occur before Feature." + else: + return "ScenarioOutline should not be used here." + + def ask_parse_failure_oracle(self, line): + """ + Try to find the failure reason when a parse failure occurs: + + Oracle, oracle, ... what went wrong? + Zzzz + + :param line: Text line where parse failure occured (as string). + :return: Reason (as string) if an explanation is found. + Otherwise, empty string or None. + """ + feature_kwd = self.match_keyword("feature", line) + if feature_kwd: + return self.diagnose_feature_usage_error() + background_kwd = self.match_keyword("background", line) + if background_kwd: + return self.diagnose_background_usage_error() + scenario_kwd = self.match_keyword("scenario", line) + if scenario_kwd: + return self.diagnose_scenario_usage_error() + scenario_outline_kwd = self.match_keyword("scenario_outline", line) + if scenario_outline_kwd: + return self.diagnose_scenario_outline_usage_error() + # -- OTHERWISE: + if self.variant == "feature" and not self.feature: + return "No feature found." + # -- FINALLY: No glue what went wrong. + return None + + def action(self, line): + if line.strip().startswith("#") and self.state != "multiline": + if self.state != "init" or self.tags or self.variant != "feature": + return + + # -- DETECT: language comment (at begin of feature file; state=init) + line = line.strip()[1:].strip() + if line.lstrip().lower().startswith("language:"): + language = line[9:].strip() + self.language = language + self.keywords = i18n.languages[language] + return + + func = getattr(self, "action_" + self.state, None) + if func is None: + line = line.strip() + msg = u"Parser in unknown state %s;" % self.state + raise ParserError(msg, self.line, self.filename, line) + if not func(line): + line = line.strip() + msg = u'\nParser failure in state %s, at line %d: "%s"\n' % \ + (self.state, self.line, line) + reason = self.ask_parse_failure_oracle(line) + if reason: + msg += u"REASON: %s" % reason + raise ParserError(msg, None, self.filename) + + def action_init(self, line): + line = line.strip() + if line.startswith("@"): + self.tags.extend(self.parse_tags(line)) + return True + + feature_kwd = self.match_keyword("feature", line) + if feature_kwd: + self._build_feature(feature_kwd, line) + self.state = "feature" + return True + return False + + # def subaction_detect_next_scenario(self, line): + # if line.startswith("@"): + # self.tags.extend(self.parse_tags(line)) + # self.state = "next_scenario" + # return True + # + # scenario_kwd = self.match_keyword("scenario", line) + # if scenario_kwd: + # self._build_scenario_statement(scenario_kwd, line) + # self.state = "scenario" + # return True + # + # scenario_outline_kwd = self.match_keyword("scenario_outline", line) + # if scenario_outline_kwd: + # self._build_scenario_outline_statement(scenario_outline_kwd, line) + # self.state = "scenario" + # return True + # + # # -- OTHERWISE: + # return False + + # pylint: disable=invalid-name + def subaction_detect_taggable_statement(self, line): + """Subaction is used after first tag line is detected. + Additional lines with tags or taggable_statement follow. + + Taggable statements (excluding Feature) are: + * Scenario + * ScenarioOutline + * Examples (within ScenarioOutline) + """ + if line.startswith("@"): + self.tags.extend(self.parse_tags(line)) + self.state = "taggable_statement" + return True + + scenario_kwd = self.match_keyword("scenario", line) + if scenario_kwd: + self._build_scenario_statement(scenario_kwd, line) + self.state = "scenario" + return True + + scenario_outline_kwd = self.match_keyword("scenario_outline", line) + if scenario_outline_kwd: + self._build_scenario_outline_statement(scenario_outline_kwd, line) + self.state = "scenario" + return True + + examples_kwd = self.match_keyword("examples", line) + if examples_kwd: + self._build_examples(examples_kwd, line) + self.state = "table" + return True + + # -- OTHERWISE: + return False + # pylint: enable=invalid-name + + def action_feature(self, line): + line = line.strip() + # OLD: if self.subaction_detect_next_scenario(line): + if self.subaction_detect_taggable_statement(line): + # -- DETECTED: Next Scenario, ScenarioOutline (or tags) + return True + + background_kwd = self.match_keyword("background", line) + if background_kwd: + self._build_background_statement(background_kwd, line) + self.state = "steps" + return True + + self.feature.description.append(line) + return True + + # def action_next_scenario(self, line): + # """ + # Entered after first tag for Scenario/ScenarioOutline is detected. + # """ + # line = line.strip() + # if self.subaction_detect_next_scenario(line): + # return True + # + # return False + + def action_taggable_statement(self, line): + """Entered after first tag for Scenario/ScenarioOutline or + Examples is detected (= taggable_statement except Feature). + + Taggable statements (excluding Feature) are: + * Scenario + * ScenarioOutline + * Examples (within ScenarioOutline) + """ + line = line.strip() + if self.subaction_detect_taggable_statement(line): + # -- DETECTED: Next Scenario, ScenarioOutline or Examples (or tags) + return True + + return False + + def action_scenario(self, line): + """ + Entered when Scenario/ScenarioOutline keyword/line is detected. + Hunts/collects scenario description lines. + + DETECT: + * first step of Scenario/ScenarioOutline + * next Scenario/ScenarioOutline. + """ + line = line.strip() + step = self.parse_step(line) + if step: + # -- FIRST STEP DETECTED: End collection of scenario descriptions. + self.state = "steps" + self.statement.steps.append(step) + return True + + # -- CASE: Detect next Scenario/ScenarioOutline + # * Scenario with scenario description, but without steps. + # * Title-only scenario without scenario description and steps. + # OLD: if self.subaction_detect_next_scenario(line): + if self.subaction_detect_taggable_statement(line): + # -- DETECTED: Next Scenario, ScenarioOutline (or tags) + return True + + # -- OTHERWISE: Add scenario description line. + # pylint: disable=E1103 + # E1103 Instance of "Background" has no "description" member... + self.statement.description.append(line) + return True + + def action_steps(self, line): + """ + Entered when first step is detected (or nested step parsing). + + Subcases: + * step + * multi-line text (doc-string), following a step + * table, following a step + * examples for a ScenarioOutline, after ScenarioOutline steps + + DETECT: + * next Scenario/ScenarioOutline or Examples (in a ScenarioOutline) + """ + # pylint: disable=R0911 + # R0911 Too many return statements (8/6) + stripped = line.lstrip() + if stripped.startswith('"""') or stripped.startswith("'''"): + self.state = "multiline" + self.multiline_start = self.line + self.multiline_terminator = stripped[:3] + self.multiline_leading = line.index(stripped[0]) + return True + + line = line.strip() + step = self.parse_step(line) + if step: + self.statement.steps.append(step) + return True + + if self.subaction_detect_taggable_statement(line): + # -- DETECTED: Next Scenario, ScenarioOutline or Examples (or tags) + return True + + if line.startswith("|"): + assert self.statement.steps, "TABLE-START without step detected." + self.state = "table" + return self.action_table(line) + + return False + + def action_multiline(self, line): + if line.strip().startswith(self.multiline_terminator): + step = self.statement.steps[-1] + step.text = model.Text(u"\n".join(self.lines), u"text/plain", + self.multiline_start) + if step.name.endswith(":"): + step.name = step.name[:-1] + self.lines = [] + self.multiline_terminator = None + self.state = "steps" + return True + + self.lines.append(line[self.multiline_leading:]) + # -- BETTER DIAGNOSTICS: May remove non-whitespace in execute_steps() + removed_line_prefix = line[:self.multiline_leading] + if removed_line_prefix.strip(): + message = u"BAD-INDENT in multiline text: " + message += u"Line '%s' would strip leading '%s'" % \ + (line, removed_line_prefix) + raise ParserError(message, self.line, self.filename) + return True + + def action_table(self, line): + line = line.strip() + + if not line.startswith("|"): + if self.examples: + self.examples.table = self.table + self.examples = None + else: + step = self.statement.steps[-1] + step.table = self.table + if step.name.endswith(":"): + step.name = step.name[:-1] + self.table = None + self.state = "steps" + return self.action_steps(line) + + # -- SUPPORT: Escaped-pipe(s) in Gherkin cell values. + # Search for pipe(s) that are not preceeded with an escape char. + cells = [cell.replace("\\|", "|").strip() + for cell in re.split(r"(? xml_element:testsuite + scenario -> xml_element:testcase + +XML document structure:: + + # -- XML elements: + # CARDINALITY SUFFIX: + # ? optional (zero or one) + # * many0 (zero or more) + # + many (one or more) + testsuites := sequence + testsuite: + properties? : sequence + testcase* : + error? : text + failure? : text + system-out : text + system-err : text + + testsuite: + @name : TokenString + @tests : int + @failures : int + @errors : int + @skipped : int + @time : Decimal # Duration in seconds + # -- SINCE: behave-1.2.6 + @timestamp : IsoDateTime + @hostname : string + + testcase: + @name : TokenString + @classname : TokenString + @status : string # Status enum + @time : Decimal # Elapsed seconds + + error: + @message : string + @type : string + + failure: + @message : string + @type : string + + # -- HINT: Not used + property: + @name : TokenString + @value : string + + type Status : Enum("passed", "failed", "skipped", "untested") + +Note that a spec for JUnit XML output was not clearly defined. +Best sources are: + +* `JUnit XML`_ (for PDF) +* JUnit XML (`ant spec 1`_, `ant spec 2`_) + + +.. _`JUnit XML`: http://junitpdfreport.sourceforge.net/managedcontent/PdfTranslation +.. _`ant spec 1`: https://github.com/windyroad/JUnit-Schema +.. _`ant spec 2`: http://svn.apache.org/repos/asf/ant/core/trunk/src/main/org/apache/tools/ant/taskdefs/optional/junit/XMLJUnitResultFormatter.java +""" +# pylint: enable=line-too-long + +from __future__ import absolute_import +import os.path +import codecs +from xml.etree import ElementTree +from datetime import datetime +from behave.reporter.base import Reporter +from behave.model import Scenario, ScenarioOutline, Step +from behave.model_core import Status +from behave.formatter import ansi_escapes +from behave.model_describe import ModelDescriptor +from behave.textutil import indent, make_indentation, text as _text +from behave.userdata import UserDataNamespace +import six +if six.PY2: + # -- USE: Python3 backport for better unicode compatibility. + import traceback2 as traceback +else: + import traceback + + +def CDATA(text=None): # pylint: disable=invalid-name + # -- issue #70: remove_ansi_escapes(text) + element = ElementTree.Element('![CDATA[') + element.text = ansi_escapes.strip_escapes(text) + return element + + +class ElementTreeWithCDATA(ElementTree.ElementTree): + # pylint: disable=redefined-builtin, no-member + def _write(self, file, node, encoding, namespaces): + """This method is for ElementTree <= 1.2.6""" + + if node.tag == '![CDATA[': + text = node.text.encode(encoding) + file.write("\n\n" % text) + else: + ElementTree.ElementTree._write(self, file, node, encoding, + namespaces) + +if hasattr(ElementTree, '_serialize'): + # pylint: disable=protected-access + def _serialize_xml2(write, elem, encoding, qnames, namespaces, + orig=ElementTree._serialize_xml): + if elem.tag == '![CDATA[': + write("\n<%s%s]]>\n" % \ + (elem.tag, elem.text.encode(encoding, "xmlcharrefreplace"))) + return + return orig(write, elem, encoding, qnames, namespaces) + + def _serialize_xml3(write, elem, qnames, namespaces, + short_empty_elements=None, + orig=ElementTree._serialize_xml): + if elem.tag == '![CDATA[': + write("\n<{tag}{text}]]>\n".format( + tag=elem.tag, text=elem.text)) + return + if short_empty_elements: + # python >=3.3 + return orig(write, elem, qnames, namespaces, short_empty_elements) + else: + # python <3.3 + return orig(write, elem, qnames, namespaces) + + if six.PY3: + ElementTree._serialize_xml = \ + ElementTree._serialize['xml'] = _serialize_xml3 + elif six.PY2: + ElementTree._serialize_xml = \ + ElementTree._serialize['xml'] = _serialize_xml2 + + +class FeatureReportData(object): + """ + Provides value object to collect JUnit report data from a Feature. + """ + def __init__(self, feature, filename, classname=None): + if not classname and filename: + classname = filename.replace('/', '.') + self.feature = feature + self.filename = filename + self.classname = classname + self.testcases = [] + self.counts_tests = 0 + self.counts_errors = 0 + self.counts_failed = 0 + self.counts_skipped = 0 + + def reset(self): + self.testcases = [] + self.counts_tests = 0 + self.counts_errors = 0 + self.counts_failed = 0 + self.counts_skipped = 0 + + +class JUnitReporter(Reporter): + """Generates JUnit-like XML test report for behave. + """ + # -- XML REPORT: + userdata_scope = "behave.reporter.junit" + show_timings = True # -- Show step timings. + show_skipped_always = False + show_timestamp = True + show_hostname = True + # -- XML REPORT PART: Describe scenarios + show_scenarios = True # Show scenario descriptions. + show_tags = True + show_multiline = True + + def __init__(self, config): + super(JUnitReporter, self).__init__(config) + self.setup_with_userdata(config.userdata) + + def setup_with_userdata(self, userdata): + """Setup JUnit reporter with userdata information. + A user can now tweak the output format of this reporter. + + EXAMPLE: + .. code-block:: ini + + # -- FILE: behave.ini + [behave.userdata] + behave.reporter.junit.show_hostname = false + """ + # -- EXPERIMENTAL: + config = UserDataNamespace(self.userdata_scope, userdata) + self.show_hostname = config.getbool("show_hostname", self.show_hostname) + self.show_multiline = config.getbool("show_multiline", self.show_multiline) + self.show_scenarios = config.getbool("show_scenarios", self.show_scenarios) + self.show_tags = config.getbool("show_tags", self.show_tags) + self.show_timings = config.getbool("show_timings", self.show_timings) + self.show_timestamp = config.getbool("show_timestamp", self.show_timestamp) + self.show_skipped_always = config.getbool("show_skipped_always", + self.show_skipped_always) + + def make_feature_filename(self, feature): + filename = None + for path in self.config.paths: + if feature.filename.startswith(path): + filename = feature.filename[len(path) + 1:] + break + if not filename: + # -- NOTE: Directory path (subdirs) are taken into account. + filename = feature.location.relpath(self.config.base_dir) + filename = filename.rsplit('.', 1)[0] + filename = filename.replace('\\', '/').replace('/', '.') + return _text(filename) + + @property + def show_skipped(self): + return self.config.show_skipped or self.show_skipped_always + + # -- REPORTER-API: + def feature(self, feature): + if feature.status == Status.skipped and not self.show_skipped: + # -- SKIP-OUTPUT: If skipped features should not be shown. + return + + feature_filename = self.make_feature_filename(feature) + classname = feature_filename + report = FeatureReportData(feature, feature_filename) + now = datetime.now() + + suite = ElementTree.Element(u'testsuite') + feature_name = feature.name or feature_filename + suite.set(u'name', u'%s.%s' % (classname, feature_name)) + + # -- BUILD-TESTCASES: From scenarios + for scenario in feature: + if isinstance(scenario, ScenarioOutline): + scenario_outline = scenario + self._process_scenario_outline(scenario_outline, report) + else: + self._process_scenario(scenario, report) + + # -- ADD TESTCASES to testsuite: + for testcase in report.testcases: + suite.append(testcase) + + suite.set(u'tests', _text(report.counts_tests)) + suite.set(u'errors', _text(report.counts_errors)) + suite.set(u'failures', _text(report.counts_failed)) + suite.set(u'skipped', _text(report.counts_skipped)) # WAS: skips + suite.set(u'time', _text(round(feature.duration, 6))) + # -- SINCE: behave-1.2.6.dev0 + if self.show_timestamp: + suite.set(u'timestamp', _text(now.isoformat())) + if self.show_hostname: + suite.set(u'hostname', _text(gethostname())) + + if not os.path.exists(self.config.junit_directory): + # -- ENSURE: Create multiple directory levels at once. + os.makedirs(self.config.junit_directory) + + tree = ElementTreeWithCDATA(suite) + report_dirname = self.config.junit_directory + report_basename = u'TESTS-%s.xml' % feature_filename + report_filename = os.path.join(report_dirname, report_basename) + tree.write(codecs.open(report_filename, "wb"), "UTF-8") + + # -- MORE: + # pylint: disable=line-too-long + @staticmethod + def select_step_with_status(status, steps): + """Helper function to find the first step that has the given + step.status. + + EXAMPLE: Search for a failing step in a scenario (all steps). + >>> scenario = ... + >>> failed_step = select_step_with_status(Status.failed, scenario) + >>> failed_step = select_step_with_status(Status.failed, scenario.all_steps) + >>> assert failed_step.status == Status.failed + + EXAMPLE: Search only scenario steps, skip background steps. + >>> failed_step = select_step_with_status(Status.failed, scenario.steps) + + :param status: Step status to search for (as enum value). + :param steps: List of steps to search in (or scenario). + :returns: Step object, if found. + :returns: None, otherwise. + + .. versionchanged:: 1.2.6 + status: Use enum value instead of string (or string). + """ + for step in steps: + assert isinstance(step, Step), \ + "TYPE-MISMATCH: step.class=%s" % step.__class__.__name__ + if step.status == status: + return step + # -- OTHERWISE: No step with the given status found. + # KeyError("Step with status={0} not found".format(status)) + return None + # pylint: enable=line-too-long + + def describe_step(self, step): + status_text = _text(step.status.name) + if self.show_timings: + status_text += u" in %0.3fs" % step.duration + text = u'%s %s ... ' % (step.keyword, step.name) + text += u'%s\n' % status_text + if self.show_multiline: + prefix = make_indentation(2) + if step.text: + text += ModelDescriptor.describe_docstring(step.text, prefix) + elif step.table: + text += ModelDescriptor.describe_table(step.table, prefix) + return text + + @classmethod + def describe_tags(cls, tags): + text = u'' + if tags: + text = u'@'+ u' @'.join(tags) + return text + + def describe_scenario(self, scenario): + """Describe the scenario and the test status. + NOTE: table, multiline text is missing in description. + + :param scenario: Scenario that was tested. + :return: Textual description of the scenario. + """ + header_line = u'\n@scenario.begin\n' + if self.show_tags and scenario.tags: + header_line += u'\n %s\n' % self.describe_tags(scenario.tags) + header_line += u' %s: %s\n' % (scenario.keyword, scenario.name) + footer_line = u'\n@scenario.end\n' + u'-' * 80 + '\n' + text = u'' + for step in scenario: + text += self.describe_step(step) + step_indentation = make_indentation(4) + return header_line + indent(text, step_indentation) + footer_line + + def _process_scenario(self, scenario, report): + """Process a scenario and append information to JUnit report object. + This corresponds to a JUnit testcase: + + * testcase.@classname = f(filename) +'.'+ feature.name + * testcase.@name = scenario.name + * testcase.@status = scenario.status + * testcase.@time = scenario.duration + + Distinguishes now between failures and errors. + Failures are AssertationErrors: expectation is violated/not met. + Errors are unexpected RuntimeErrors (all other exceptions). + + If a failure/error occurs, the step, that caused the failure, + and its location are provided now. + + :param scenario: Scenario to process. + :param report: Context object to store/add info to (outgoing param). + """ + # pylint: disable=too-many-locals, too-many-branches, too-many-statements + assert isinstance(scenario, Scenario) + assert not isinstance(scenario, ScenarioOutline) + if scenario.status != Status.skipped or self.show_skipped: + # -- NOTE: Count only if not-skipped or skipped should be shown. + report.counts_tests += 1 + classname = report.classname + feature = report.feature + feature_name = feature.name + if not feature_name: + feature_name = self.make_feature_filename(feature) + + case = ElementTree.Element('testcase') + case.set(u"classname", u"%s.%s" % (classname, feature_name)) + case.set(u"name", scenario.name or "") + case.set(u"status", scenario.status.name) + case.set(u"time", _text(round(scenario.duration, 6))) + + step = None + failing_step = None + if scenario.status == Status.failed: + for status in (Status.failed, Status.undefined): + step = self.select_step_with_status(status, scenario) + if step: + break + # -- NOTE: Scenario may fail now due to hook-errors. + element_name = "failure" + if step and isinstance(step.exception, (AssertionError, type(None))): + # -- FAILURE: AssertionError + assert step.status in (Status.failed, Status.undefined) + report.counts_failed += 1 + else: + # -- UNEXPECTED RUNTIME-ERROR: + report.counts_errors += 1 + element_name = "error" + # -- COMMON-PART: + failure = ElementTree.Element(element_name) + if step: + step_text = self.describe_step(step).rstrip() + text = u"\nFailing step: %s\nLocation: %s\n" % \ + (step_text, step.location) + message = _text(step.exception) + failure.set(u'type', step.exception.__class__.__name__) + failure.set(u'message', message) + text += _text(step.error_message) + else: + # -- MAYBE: Hook failure before any step is executed. + failure_type = "UnknownError" + if scenario.exception: + failure_type = scenario.exception.__class__.__name__ + failure.set(u'type', failure_type) + failure.set(u'message', scenario.error_message or "") + traceback_lines = traceback.format_tb(scenario.exc_traceback) + traceback_lines.insert(0, u"Traceback:\n") + text = _text(u"".join(traceback_lines)) + failure.append(CDATA(text)) + case.append(failure) + elif (scenario.status in (Status.skipped, Status.untested) + and self.show_skipped): + report.counts_skipped += 1 + step = self.select_step_with_status(Status.undefined, scenario) + if step: + # -- UNDEFINED-STEP: + report.counts_failed += 1 + failure = ElementTree.Element(u"failure") + failure.set(u"type", u"undefined") + failure.set(u"message", (u"Undefined Step: %s" % step.name)) + case.append(failure) + else: + skip = ElementTree.Element(u'skipped') + case.append(skip) + + # Create stdout section for each test case + stdout = ElementTree.Element(u"system-out") + text = u"" + if self.show_scenarios: + text = self.describe_scenario(scenario) + + # Append the captured standard output + if scenario.captured.stdout: + output = _text(scenario.captured.stdout) + text += u"\nCaptured stdout:\n%s\n" % output + stdout.append(CDATA(text)) + case.append(stdout) + + # Create stderr section for each test case + if scenario.captured.stderr: + stderr = ElementTree.Element(u"system-err") + output = _text(scenario.captured.stderr) + text = u"\nCaptured stderr:\n%s\n" % output + stderr.append(CDATA(text)) + case.append(stderr) + + if scenario.status != Status.skipped or self.show_skipped: + report.testcases.append(case) + + def _process_scenario_outline(self, scenario_outline, report): + assert isinstance(scenario_outline, ScenarioOutline) + for scenario in scenario_outline: + assert isinstance(scenario, Scenario) + self._process_scenario(scenario, report) + +# ----------------------------------------------------------------------------- +# SUPPORT: +# ----------------------------------------------------------------------------- +def gethostname(): + """Return hostname of local host (as string)""" + import socket + return socket.gethostname() diff --git a/venv/Lib/site-packages/behave/reporter/summary.py b/venv/Lib/site-packages/behave/reporter/summary.py new file mode 100644 index 0000000..2ca0534 --- /dev/null +++ b/venv/Lib/site-packages/behave/reporter/summary.py @@ -0,0 +1,94 @@ +# -*- coding: UTF-8 -*- +""" +Provides a summary after each test run. +""" + +from __future__ import absolute_import, division +import sys +from behave.model import ScenarioOutline +from behave.model_core import Status +from behave.reporter.base import Reporter +from behave.formatter.base import StreamOpener + + +# -- DISABLED: optional_steps = ('untested', 'undefined') +optional_steps = (Status.untested,) # MAYBE: Status.undefined +status_order = (Status.passed, Status.failed, Status.skipped, + Status.undefined, Status.untested) + +def format_summary(statement_type, summary): + parts = [] + for status in status_order: + if status.name not in summary: + continue + counts = summary[status.name] + if status in optional_steps and counts == 0: + # -- SHOW-ONLY: For relevant counts, suppress: untested items, etc. + continue + + if not parts: + # -- FIRST ITEM: Add statement_type to counter. + label = statement_type + if counts != 1: + label += 's' + part = u"%d %s %s" % (counts, label, status.name) + else: + part = u"%d %s" % (counts, status.name) + parts.append(part) + return ", ".join(parts) + "\n" + + +class SummaryReporter(Reporter): + show_failed_scenarios = True + output_stream_name = "stdout" + + def __init__(self, config): + super(SummaryReporter, self).__init__(config) + stream = getattr(sys, self.output_stream_name, sys.stderr) + self.stream = StreamOpener.ensure_stream_with_encoder(stream) + self.feature_summary = {Status.passed.name: 0, Status.failed.name: 0, + Status.skipped.name: 0, Status.untested.name: 0} + self.scenario_summary = {Status.passed.name: 0, Status.failed.name: 0, + Status.skipped.name: 0, Status.untested.name: 0} + self.step_summary = {Status.passed.name: 0, Status.failed.name: 0, + Status.skipped.name: 0, Status.untested.name: 0, + Status.undefined.name: 0} + self.duration = 0.0 + self.failed_scenarios = [] + + def feature(self, feature): + self.feature_summary[feature.status.name] += 1 + self.duration += feature.duration + for scenario in feature: + if isinstance(scenario, ScenarioOutline): + self.process_scenario_outline(scenario) + else: + self.process_scenario(scenario) + + def end(self): + # -- SHOW FAILED SCENARIOS (optional): + if self.show_failed_scenarios and self.failed_scenarios: + self.stream.write("\nFailing scenarios:\n") + for scenario in self.failed_scenarios: + self.stream.write(u" %s %s\n" % ( + scenario.location, scenario.name)) + self.stream.write("\n") + + # -- SHOW SUMMARY COUNTS: + self.stream.write(format_summary("feature", self.feature_summary)) + self.stream.write(format_summary("scenario", self.scenario_summary)) + self.stream.write(format_summary("step", self.step_summary)) + timings = (int(self.duration / 60.0), self.duration % 60) + self.stream.write('Took %dm%02.3fs\n' % timings) + + def process_scenario(self, scenario): + if scenario.status == Status.failed: + self.failed_scenarios.append(scenario) + + self.scenario_summary[scenario.status.name] += 1 + for step in scenario: + self.step_summary[step.status.name] += 1 + + def process_scenario_outline(self, scenario_outline): + for scenario in scenario_outline.scenarios: + self.process_scenario(scenario) diff --git a/venv/Lib/site-packages/behave/runner.py b/venv/Lib/site-packages/behave/runner.py new file mode 100644 index 0000000..1a2e6e5 --- /dev/null +++ b/venv/Lib/site-packages/behave/runner.py @@ -0,0 +1,824 @@ +# -*- coding: UTF-8 -*- +""" +This module provides Runner class to run behave feature files (or model elements). +""" + +from __future__ import absolute_import, print_function, with_statement + +import contextlib +import os.path +import sys +import warnings +import weakref + +import six + +from behave._types import ExceptionUtil +from behave.capture import CaptureController +from behave.configuration import ConfigError +from behave.formatter._registry import make_formatters +from behave.runner_util import \ + collect_feature_locations, parse_features, \ + exec_file, load_step_modules, PathManager +from behave.step_registry import registry as the_step_registry + +if six.PY2: + # -- USE PYTHON3 BACKPORT: With unicode traceback support. + import traceback2 as traceback +else: + import traceback + + +class CleanupError(RuntimeError): + pass + + +class ContextMaskWarning(UserWarning): + """Raised if a context variable is being overwritten in some situations. + + If the variable was originally set by user code then this will be raised if + *behave* overwites the value. + + If the variable was originally set by *behave* then this will be raised if + user code overwites the value. + """ + pass + + +class Context(object): + """Hold contextual information during the running of tests. + + This object is a place to store information related to the tests you're + running. You may add arbitrary attributes to it of whatever value you need. + + During the running of your tests the object will have additional layers of + namespace added and removed automatically. There is a "root" namespace and + additional namespaces for features and scenarios. + + Certain names are used by *behave*; be wary of using them yourself as + *behave* may overwrite the value you set. These names are: + + .. attribute:: feature + + This is set when we start testing a new feature and holds a + :class:`~behave.model.Feature`. It will not be present outside of a + feature (i.e. within the scope of the environment before_all and + after_all). + + .. attribute:: scenario + + This is set when we start testing a new scenario (including the + individual scenarios of a scenario outline) and holds a + :class:`~behave.model.Scenario`. It will not be present outside of the + scope of a scenario. + + .. attribute:: tags + + The current set of active tags (as a Python set containing instances of + :class:`~behave.model.Tag` which are basically just glorified strings) + combined from the feature and scenario. This attribute will not be + present outside of a feature scope. + + .. attribute:: aborted + + This is set to true in the root namespace when the user aborts a test run + (:exc:`KeyboardInterrupt` exception). Initially: False. + + .. attribute:: failed + + This is set to true in the root namespace as soon as a step fails. + Initially: False. + + .. attribute:: table + + This is set at the step level and holds any :class:`~behave.model.Table` + associated with the step. + + .. attribute:: text + + This is set at the step level and holds any multiline text associated + with the step. + + .. attribute:: config + + The configuration of *behave* as determined by configuration files and + command-line options. The attributes of this object are the same as the + `configuration file section names`_. + + .. attribute:: active_outline + + This is set for each scenario in a scenario outline and references the + :class:`~behave.model.Row` that is active for the current scenario. It is + present mostly for debugging, but may be useful otherwise. + + .. attribute:: log_capture + + If logging capture is enabled then this attribute contains the captured + logging as an instance of :class:`~behave.log_capture.LoggingCapture`. + It is not present if logging is not being captured. + + .. attribute:: stdout_capture + + If stdout capture is enabled then this attribute contains the captured + output as a StringIO instance. It is not present if stdout is not being + captured. + + .. attribute:: stderr_capture + + If stderr capture is enabled then this attribute contains the captured + output as a StringIO instance. It is not present if stderr is not being + captured. + + If an attempt made by user code to overwrite one of these variables, or + indeed by *behave* to overwite a user-set variable, then a + :class:`behave.runner.ContextMaskWarning` warning will be raised. + + You may use the "in" operator to test whether a certain value has been set + on the context, for example: + + "feature" in context + + checks whether there is a "feature" value in the context. + + Values may be deleted from the context using "del" but only at the level + they are set. You can't delete a value set by a feature at a scenario level + but you can delete a value set for a scenario in that scenario. + + .. _`configuration file section names`: behave.html#configuration-files + """ + # pylint: disable=too-many-instance-attributes + BEHAVE = "behave" + USER = "user" + FAIL_ON_CLEANUP_ERRORS = True + + def __init__(self, runner): + self._runner = weakref.proxy(runner) + self._config = runner.config + d = self._root = { + "aborted": False, + "failed": False, + "config": self._config, + "active_outline": None, + "cleanup_errors": 0, + "@cleanups": [], # -- REQUIRED-BY: before_all() hook + "@layer": "testrun", + } + self._stack = [d] + self._record = {} + self._origin = {} + self._mode = self.BEHAVE + self.feature = None + # -- RECHECK: If needed + self.text = None + self.table = None + self.stdout_capture = None + self.stderr_capture = None + self.log_capture = None + self.fail_on_cleanup_errors = self.FAIL_ON_CLEANUP_ERRORS + + @staticmethod + def ignore_cleanup_error(context, cleanup_func, exception): + pass + + @staticmethod + def print_cleanup_error(context, cleanup_func, exception): + cleanup_func_name = getattr(cleanup_func, "__name__", None) + if not cleanup_func_name: + cleanup_func_name = "%r" % cleanup_func + print(u"CLEANUP-ERROR in %s: %s: %s" % + (cleanup_func_name, exception.__class__.__name__, exception)) + traceback.print_exc(file=sys.stdout) + # MAYBE: context._dump(pretty=True, prefix="Context: ") + # -- MARK: testrun as FAILED + # context._set_root_attribute("failed", True) + + def _do_cleanups(self): + """Execute optional cleanup functions when stack frame is popped. + A user can add a user-specified handler for cleanup errors. + + .. code-block:: python + + # -- FILE: features/environment.py + def cleanup_database(database): + pass + + def handle_cleanup_error(context, cleanup_func, exception): + pass + + def before_all(context): + context.on_cleanup_error = handle_cleanup_error + context.add_cleanup(cleanup_database, the_database) + """ + # -- BEST-EFFORT ALGORITHM: Tries to perform all cleanups. + assert self._stack, "REQUIRE: Non-empty stack" + current_layer = self._stack[0] + cleanup_funcs = current_layer.get("@cleanups", []) + on_cleanup_error = getattr(self, "on_cleanup_error", + self.print_cleanup_error) + context = self + cleanup_errors = [] + for cleanup_func in reversed(cleanup_funcs): + try: + cleanup_func() + except Exception as e: # pylint: disable=broad-except + # pylint: disable=protected-access + context._root["cleanup_errors"] += 1 + cleanup_errors.append(sys.exc_info()) + on_cleanup_error(context, cleanup_func, e) + + if self.fail_on_cleanup_errors and cleanup_errors: + first_cleanup_erro_info = cleanup_errors[0] + del cleanup_errors # -- ENSURE: Release other exception frames. + six.reraise(*first_cleanup_erro_info) + + + def _push(self, layer_name=None): + """Push a new layer on the context stack. + HINT: Use layer_name values: "scenario", "feature", "testrun". + + :param layer_name: Layer name to use (or None). + """ + initial_data = {"@cleanups": []} + if layer_name: + initial_data["@layer"] = layer_name + self._stack.insert(0, initial_data) + + def _pop(self): + """Pop the current layer from the context stack. + Performs any pending cleanups, registered for this layer. + """ + try: + self._do_cleanups() + finally: + # -- ENSURE: Layer is removed even if cleanup-errors occur. + self._stack.pop(0) + + def _use_with_behave_mode(self): + """Provides a context manager for using the context in BEHAVE mode.""" + return use_context_with_mode(self, Context.BEHAVE) + + def use_with_user_mode(self): + """Provides a context manager for using the context in USER mode.""" + return use_context_with_mode(self, Context.USER) + + def user_mode(self): + warnings.warn("Use 'use_with_user_mode()' instead", + PendingDeprecationWarning, stacklevel=2) + return self.use_with_user_mode() + + def _set_root_attribute(self, attr, value): + for frame in self.__dict__["_stack"]: + if frame is self.__dict__["_root"]: + continue + if attr in frame: + record = self.__dict__["_record"][attr] + params = { + "attr": attr, + "filename": record[0], + "line": record[1], + "function": record[3], + } + self._emit_warning(attr, params) + + self.__dict__["_root"][attr] = value + if attr not in self._origin: + self._origin[attr] = self._mode + + def _emit_warning(self, attr, params): + msg = "" + if self._mode is self.BEHAVE and self._origin[attr] is not self.BEHAVE: + msg = "behave runner is masking context attribute '%(attr)s' " \ + "originally set in %(function)s (%(filename)s:%(line)s)" + elif self._mode is self.USER: + if self._origin[attr] is not self.USER: + msg = "user code is masking context attribute '%(attr)s' " \ + "originally set by behave" + elif self._config.verbose: + msg = "user code is masking context attribute " \ + "'%(attr)s'; see the tutorial for what this means" + if msg: + msg = msg % params + warnings.warn(msg, ContextMaskWarning, stacklevel=3) + + def _dump(self, pretty=False, prefix=" "): + for level, frame in enumerate(self._stack): + print("%sLevel %d" % (prefix, level)) + if pretty: + for name in sorted(frame.keys()): + value = frame[name] + print("%s %-15s = %r" % (prefix, name, value)) + else: + print(prefix + repr(frame)) + + def __getattr__(self, attr): + if attr[0] == "_": + return self.__dict__[attr] + for frame in self._stack: + if attr in frame: + return frame[attr] + msg = "'{0}' object has no attribute '{1}'" + msg = msg.format(self.__class__.__name__, attr) + raise AttributeError(msg) + + def __setattr__(self, attr, value): + if attr[0] == "_": + self.__dict__[attr] = value + return + + for frame in self._stack[1:]: + if attr in frame: + record = self._record[attr] + params = { + "attr": attr, + "filename": record[0], + "line": record[1], + "function": record[3], + } + self._emit_warning(attr, params) + + stack_limit = 2 + if six.PY2: + stack_limit += 1 # Due to traceback2 usage. + stack_frame = traceback.extract_stack(limit=stack_limit)[0] + self._record[attr] = stack_frame + frame = self._stack[0] + frame[attr] = value + if attr not in self._origin: + self._origin[attr] = self._mode + + def __delattr__(self, attr): + frame = self._stack[0] + if attr in frame: + del frame[attr] + del self._record[attr] + else: + msg = "'{0}' object has no attribute '{1}' at the current level" + msg = msg.format(self.__class__.__name__, attr) + raise AttributeError(msg) + + def __contains__(self, attr): + if attr[0] == "_": + return attr in self.__dict__ + for frame in self._stack: + if attr in frame: + return True + return False + + def execute_steps(self, steps_text): + """The steps identified in the "steps" text string will be parsed and + executed in turn just as though they were defined in a feature file. + + If the execute_steps call fails (either through error or failure + assertion) then the step invoking it will need to catch the resulting + exceptions. + + :param steps_text: Text with the Gherkin steps to execute (as string). + :returns: True, if the steps executed successfully. + :raises: AssertionError, if a step failure occurs. + :raises: ValueError, if invoked without a feature context. + """ + assert isinstance(steps_text, six.text_type), "Steps must be unicode." + if not self.feature: + raise ValueError("execute_steps() called outside of feature") + + # -- PREPARE: Save original context data for current step. + # Needed if step definition that called this method uses .table/.text + original_table = getattr(self, "table", None) + original_text = getattr(self, "text", None) + + self.feature.parser.variant = "steps" + steps = self.feature.parser.parse_steps(steps_text) + with self._use_with_behave_mode(): + for step in steps: + passed = step.run(self._runner, quiet=True, capture=False) + if not passed: + # -- ISSUE #96: Provide more substep info to diagnose problem. + step_line = u"%s %s" % (step.keyword, step.name) + message = "%s SUB-STEP: %s" % \ + (step.status.name.upper(), step_line) + if step.error_message: + message += "\nSubstep info: %s\n" % step.error_message + message += u"Traceback (of failed substep):\n" + message += u"".join(traceback.format_tb(step.exc_traceback)) + # message += u"\nTraceback (of context.execute_steps()):" + assert False, message + + # -- FINALLY: Restore original context data for current step. + self.table = original_table + self.text = original_text + return True + + def add_cleanup(self, cleanup_func, *args, **kwargs): + """Adds a cleanup function that is called when :meth:`Context._pop()` + is called. This is intended for user-cleanups. + + :param cleanup_func: Callable function + :param args: Args for cleanup_func() call (optional). + :param kwargs: Kwargs for cleanup_func() call (optional). + """ + # MAYBE: + assert callable(cleanup_func), "REQUIRES: callable(cleanup_func)" + assert self._stack + if args or kwargs: + def internal_cleanup_func(): + cleanup_func(*args, **kwargs) + else: + internal_cleanup_func = cleanup_func + + current_frame = self._stack[0] + if cleanup_func not in current_frame["@cleanups"]: + # -- AVOID DUPLICATES: + current_frame["@cleanups"].append(internal_cleanup_func) + + +@contextlib.contextmanager +def use_context_with_mode(context, mode): + """Switch context to BEHAVE or USER mode. + Provides a context manager for switching between the two context modes. + + .. sourcecode:: python + + context = Context() + with use_context_with_mode(context, Context.BEHAVE): + ... # Do something + # -- POSTCONDITION: Original context._mode is restored. + + :param context: Context object to use. + :param mode: Mode to apply to context object. + """ + # pylint: disable=protected-access + assert mode in (Context.BEHAVE, Context.USER) + current_mode = context._mode + try: + context._mode = mode + yield + finally: + # -- RESTORE: Initial current_mode + # Even if an AssertionError/Exception is raised. + context._mode = current_mode + + +@contextlib.contextmanager +def scoped_context_layer(context, layer_name=None): + """Provides context manager for context layer (push/do-something/pop cycle). + + .. code-block:: + + with scoped_context_layer(context): + the_fixture = use_fixture(foo, context, name="foo_42") + """ + # pylint: disable=protected-access + try: + context._push(layer_name) + yield context + finally: + context._pop() + + +def path_getrootdir(path): + """ + Extract rootdir from path in a platform independent way. + + POSIX-PATH EXAMPLE: + rootdir = path_getrootdir("/foo/bar/one.feature") + assert rootdir == "/" + + WINDOWS-PATH EXAMPLE: + rootdir = path_getrootdir("D:\\foo\\bar\\one.feature") + assert rootdir == r"D:\" + """ + drive, _ = os.path.splitdrive(path) + if drive: + # -- WINDOWS: + return drive + os.path.sep + # -- POSIX: + return os.path.sep + + +class ModelRunner(object): + """ + Test runner for a behave model (features). + Provides the core functionality of a test runner and + the functional API needed by model elements. + + .. attribute:: aborted + + This is set to true when the user aborts a test run + (:exc:`KeyboardInterrupt` exception). Initially: False. + Stored as derived attribute in :attr:`Context.aborted`. + """ + # pylint: disable=too-many-instance-attributes + + def __init__(self, config, features=None, step_registry=None): + self.config = config + self.features = features or [] + self.hooks = {} + self.formatters = [] + self.undefined_steps = [] + self.step_registry = step_registry + self.capture_controller = CaptureController(config) + + self.context = None + self.feature = None + self.hook_failures = 0 + + # @property + def _get_aborted(self): + value = False + if self.context: + value = self.context.aborted + return value + + # @aborted.setter + def _set_aborted(self, value): + # pylint: disable=protected-access + assert self.context, "REQUIRE: context, but context=%r" % self.context + self.context._set_root_attribute("aborted", bool(value)) + + aborted = property(_get_aborted, _set_aborted, + doc="Indicates that test run is aborted by the user.") + + def run_hook(self, name, context, *args): + if not self.config.dry_run and (name in self.hooks): + try: + with context.use_with_user_mode(): + self.hooks[name](context, *args) + # except KeyboardInterrupt: + # self.aborted = True + # if name not in ("before_all", "after_all"): + # raise + except Exception as e: # pylint: disable=broad-except + # -- HANDLE HOOK ERRORS: + use_traceback = False + if self.config.verbose: + use_traceback = True + ExceptionUtil.set_traceback(e) + extra = u"" + if "tag" in name: + extra = "(tag=%s)" % args[0] + + error_text = ExceptionUtil.describe(e, use_traceback).rstrip() + error_message = u"HOOK-ERROR in %s%s: %s" % (name, extra, error_text) + print(error_message) + self.hook_failures += 1 + if "tag" in name: + # -- SCENARIO or FEATURE + statement = getattr(context, "scenario", context.feature) + elif "all" in name: + # -- ABORT EXECUTION: For before_all/after_all + self.aborted = True + statement = None + else: + # -- CASE: feature, scenario, step + statement = args[0] + + if statement: + # -- CASE: feature, scenario, step + statement.hook_failed = True + if statement.error_message: + # -- NOTE: One exception/failure is already stored. + # Append only error message. + statement.error_message += u"\n"+ error_message + else: + # -- FIRST EXCEPTION/FAILURE: + statement.store_exception_context(e) + statement.error_message = error_message + + def setup_capture(self): + if not self.context: + self.context = Context(self) + self.capture_controller.setup_capture(self.context) + + def start_capture(self): + self.capture_controller.start_capture() + + def stop_capture(self): + self.capture_controller.stop_capture() + + def teardown_capture(self): + self.capture_controller.teardown_capture() + + def run_model(self, features=None): + # pylint: disable=too-many-branches + if not self.context: + self.context = Context(self) + if self.step_registry is None: + self.step_registry = the_step_registry + if features is None: + features = self.features + + # -- ENSURE: context.execute_steps() works in weird cases (hooks, ...) + context = self.context + self.hook_failures = 0 + self.setup_capture() + self.run_hook("before_all", context) + + run_feature = not self.aborted + failed_count = 0 + undefined_steps_initial_size = len(self.undefined_steps) + for feature in features: + if run_feature: + try: + self.feature = feature + for formatter in self.formatters: + formatter.uri(feature.filename) + + failed = feature.run(self) + if failed: + failed_count += 1 + if self.config.stop or self.aborted: + # -- FAIL-EARLY: After first failure. + run_feature = False + except KeyboardInterrupt: + self.aborted = True + failed_count += 1 + run_feature = False + + # -- ALWAYS: Report run/not-run feature to reporters. + # REQUIRED-FOR: Summary to keep track of untested features. + for reporter in self.config.reporters: + reporter.feature(feature) + + # -- AFTER-ALL: + # pylint: disable=protected-access, broad-except + cleanups_failed = False + self.run_hook("after_all", self.context) + try: + self.context._do_cleanups() # Without dropping the last context layer. + except Exception: + cleanups_failed = True + + if self.aborted: + print("\nABORTED: By user.") + for formatter in self.formatters: + formatter.close() + for reporter in self.config.reporters: + reporter.end() + + failed = ((failed_count > 0) or self.aborted or (self.hook_failures > 0) + or (len(self.undefined_steps) > undefined_steps_initial_size) + or cleanups_failed) + # XXX-MAYBE: or context.failed) + return failed + + def run(self): + """ + Implements the run method by running the model. + """ + self.context = Context(self) + return self.run_model() + + +class Runner(ModelRunner): + """ + Standard test runner for behave: + + * setup paths + * loads environment hooks + * loads step definitions + * select feature files, parses them and creates model (elements) + """ + def __init__(self, config): + super(Runner, self).__init__(config) + self.path_manager = PathManager() + self.base_dir = None + + + def setup_paths(self): + # pylint: disable=too-many-branches, too-many-statements + if self.config.paths: + if self.config.verbose: + print("Supplied path:", \ + ", ".join('"%s"' % path for path in self.config.paths)) + first_path = self.config.paths[0] + if hasattr(first_path, "filename"): + # -- BETTER: isinstance(first_path, FileLocation): + first_path = first_path.filename + base_dir = first_path + if base_dir.startswith("@"): + # -- USE: behave @features.txt + base_dir = base_dir[1:] + file_locations = self.feature_locations() + if file_locations: + base_dir = os.path.dirname(file_locations[0].filename) + base_dir = os.path.abspath(base_dir) + + # supplied path might be to a feature file + if os.path.isfile(base_dir): + if self.config.verbose: + print("Primary path is to a file so using its directory") + base_dir = os.path.dirname(base_dir) + else: + if self.config.verbose: + print('Using default path "./features"') + base_dir = os.path.abspath("features") + + # Get the root. This is not guaranteed to be "/" because Windows. + root_dir = path_getrootdir(base_dir) + new_base_dir = base_dir + steps_dir = self.config.steps_dir + environment_file = self.config.environment_file + + while True: + if self.config.verbose: + print("Trying base directory:", new_base_dir) + + if os.path.isdir(os.path.join(new_base_dir, steps_dir)): + break + if os.path.isfile(os.path.join(new_base_dir, environment_file)): + break + if new_base_dir == root_dir: + break + + new_base_dir = os.path.dirname(new_base_dir) + + if new_base_dir == root_dir: + if self.config.verbose: + if not self.config.paths: + print('ERROR: Could not find "%s" directory. '\ + 'Please specify where to find your features.' % \ + steps_dir) + else: + print('ERROR: Could not find "%s" directory in your '\ + 'specified path "%s"' % (steps_dir, base_dir)) + + message = 'No %s directory in %r' % (steps_dir, base_dir) + raise ConfigError(message) + + base_dir = new_base_dir + self.config.base_dir = base_dir + + for dirpath, dirnames, filenames in os.walk(base_dir): + if [fn for fn in filenames if fn.endswith(".feature")]: + break + else: + if self.config.verbose: + if not self.config.paths: + print('ERROR: Could not find any ".feature" files. '\ + 'Please specify where to find your features.') + else: + print('ERROR: Could not find any ".feature" files '\ + 'in your specified path "%s"' % base_dir) + raise ConfigError('No feature files in %r' % base_dir) + + self.base_dir = base_dir + self.path_manager.add(base_dir) + if not self.config.paths: + self.config.paths = [base_dir] + + if base_dir != os.getcwd(): + self.path_manager.add(os.getcwd()) + + def before_all_default_hook(self, context): + """ + Default implementation for :func:`before_all()` hook. + Setup the logging subsystem based on the configuration data. + """ + # pylint: disable=no-self-use + context.config.setup_logging() + + def load_hooks(self, filename=None): + filename = filename or self.config.environment_file + hooks_path = os.path.join(self.base_dir, filename) + if os.path.exists(hooks_path): + exec_file(hooks_path, self.hooks) + + if "before_all" not in self.hooks: + self.hooks["before_all"] = self.before_all_default_hook + + def load_step_definitions(self, extra_step_paths=None): + if extra_step_paths is None: + extra_step_paths = [] + # -- Allow steps to import other stuff from the steps dir + # NOTE: Default matcher can be overridden in "environment.py" hook. + steps_dir = os.path.join(self.base_dir, self.config.steps_dir) + step_paths = [steps_dir] + list(extra_step_paths) + load_step_modules(step_paths) + + def feature_locations(self): + return collect_feature_locations(self.config.paths) + + def run(self): + with self.path_manager: + self.setup_paths() + return self.run_with_paths() + + def run_with_paths(self): + self.context = Context(self) + self.load_hooks() + self.load_step_definitions() + + # -- ENSURE: context.execute_steps() works in weird cases (hooks, ...) + # self.setup_capture() + # self.run_hook("before_all", self.context) + + # -- STEP: Parse all feature files (by using their file location). + feature_locations = [filename for filename in self.feature_locations() + if not self.config.exclude(filename)] + features = parse_features(feature_locations, language=self.config.lang) + self.features.extend(features) + + # -- STEP: Run all features. + stream_openers = self.config.outputs + self.formatters = make_formatters(self.config, stream_openers) + return self.run_model() diff --git a/venv/Lib/site-packages/behave/runner_util.py b/venv/Lib/site-packages/behave/runner_util.py new file mode 100644 index 0000000..59376dc --- /dev/null +++ b/venv/Lib/site-packages/behave/runner_util.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- +""" +Contains utility functions and classes for Runners. +""" + +from __future__ import absolute_import +from bisect import bisect +import glob +import os.path +import re +import sys +from six import string_types +from behave import parser +from behave.model_core import FileLocation +from behave.textutil import ensure_stream_with_encoder +# LAZY: from behave.step_registry import setup_step_decorators + + +# ----------------------------------------------------------------------------- +# EXCEPTIONS: +# ----------------------------------------------------------------------------- +class FileNotFoundError(LookupError): + pass + + +class InvalidFileLocationError(LookupError): + pass + + +class InvalidFilenameError(ValueError): + pass + + +# ----------------------------------------------------------------------------- +# CLASS: FileLocationParser +# ----------------------------------------------------------------------------- +class FileLocationParser(object): + pattern = re.compile(r"^\s*(?P.*):(?P\d+)\s*$", re.UNICODE) + + @classmethod + def parse(cls, text): + match = cls.pattern.match(text) + if match: + filename = match.group("filename").strip() + line = int(match.group("line")) + return FileLocation(filename, line) + # -- NORMAL PATH/FILENAME: + filename = text.strip() + return FileLocation(filename) + + # @classmethod + # def compare(cls, location1, location2): + # loc1 = cls.parse(location1) + # loc2 = cls.parse(location2) + # return cmp(loc1, loc2) + + +# ----------------------------------------------------------------------------- +# CLASSES: +# ----------------------------------------------------------------------------- +class FeatureScenarioLocationCollector(object): + """ + Collects FileLocation objects for a feature. + This is used to select a subset of scenarios in a feature that should run. + + USE CASE: + behave feature/foo.feature:10 + behave @selected_features.txt + behave @rerun_failed_scenarios.txt + + With features configuration files, like: + + # -- file:rerun_failed_scenarios.txt + feature/foo.feature:10 + feature/foo.feature:25 + feature/bar.feature + # -- EOF + + """ + def __init__(self, feature=None, location=None, filename=None): + if not filename and location: + filename = location.filename + self.feature = feature + self.filename = filename + self.use_all_scenarios = False + self.scenario_lines = set() + self.all_scenarios = set() + self.selected_scenarios = set() + if location: + self.add_location(location) + + def clear(self): + self.feature = None + self.filename = None + self.use_all_scenarios = False + self.scenario_lines = set() + self.all_scenarios = set() + self.selected_scenarios = set() + + def add_location(self, location): + if not self.filename: + self.filename = location.filename + # if self.feature and False: + # self.filename = self.feature.filename + # -- NORMAL CASE: + assert self.filename == location.filename, \ + "%s <=> %s" % (self.filename, location.filename) + if location.line: + self.scenario_lines.add(location.line) + else: + # -- LOCATION WITHOUT LINE NUMBER: + # Selects all scenarios in a feature. + self.use_all_scenarios = True + + @staticmethod + def select_scenario_line_for(line, scenario_lines): + """ + Select scenario line for any given line. + + ALGORITHM: scenario.line <= line < next_scenario.line + + :param line: A line number in the file (as number). + :param scenario_lines: Sorted list of scenario lines. + :return: Scenario.line (first line) for the given line. + """ + if not scenario_lines: + return 0 # -- Select all scenarios. + pos = bisect(scenario_lines, line) - 1 + if pos < 0: + pos = 0 + return scenario_lines[pos] + + def discover_selected_scenarios(self, strict=False): + """ + Discovers selected scenarios based on the provided file locations. + In addition: + * discover all scenarios + * auto-correct BAD LINE-NUMBERS + + :param strict: If true, raises exception if file location is invalid. + :return: List of selected scenarios of this feature (as set). + :raises InvalidFileLocationError: + If file location is no exactly correct and strict is true. + """ + assert self.feature + if not self.all_scenarios: + self.all_scenarios = self.feature.walk_scenarios() + + # -- STEP: Check if lines are correct. + existing_lines = [scenario.line for scenario in self.all_scenarios] + selected_lines = list(self.scenario_lines) + for line in selected_lines: + new_line = self.select_scenario_line_for(line, existing_lines) + if new_line != line: + # -- AUTO-CORRECT BAD-LINE: + self.scenario_lines.remove(line) + self.scenario_lines.add(new_line) + if strict: + msg = "Scenario location '...:%d' should be: '%s:%d'" % \ + (line, self.filename, new_line) + raise InvalidFileLocationError(msg) + + # -- STEP: Determine selected scenarios and store them. + scenario_lines = set(self.scenario_lines) + selected_scenarios = set() + for scenario in self.all_scenarios: + if scenario.line in scenario_lines: + selected_scenarios.add(scenario) + scenario_lines.remove(scenario.line) + # -- CHECK ALL ARE RESOLVED: + assert not scenario_lines + return selected_scenarios + + def build_feature(self): + """ + Determines which scenarios in the feature are selected and marks the + remaining scenarios as skipped. Scenarios with the following tags + are excluded from skipped-marking: + + * @setup + * @teardown + + If no file locations are stored, the unmodified feature is returned. + + :return: Feature object to use. + """ + use_all_scenarios = not self.scenario_lines or self.use_all_scenarios + if not self.feature or use_all_scenarios: + return self.feature + + # -- CASE: Select subset of all scenarios of this feature. + # Mark other scenarios as skipped (except in a few cases). + self.all_scenarios = self.feature.walk_scenarios() + self.selected_scenarios = self.discover_selected_scenarios() + unselected_scenarios = set(self.all_scenarios) - self.selected_scenarios + for scenario in unselected_scenarios: + if "setup" in scenario.tags or "teardown" in scenario.tags: + continue + scenario.mark_skipped() + return self.feature + + +class FeatureListParser(object): + """ + Read textual file, ala '@features.txt'. This file contains: + + * a feature filename or FileLocation on each line + * empty lines (skipped) + * comment lines (skipped) + * wildcards are expanded to select 0..N filenames or directories + + Relative path names are evaluated relative to the listfile directory. + A leading '@' (AT) character is removed from the listfile name. + """ + + @staticmethod + def parse(text, here=None): + """ + Parse contents of a features list file as text. + + :param text: Contents of a features list(file). + :param here: Current working directory to use (optional). + :return: List of FileLocation objects + """ + locations = [] + for line in text.splitlines(): + filename = line.strip() + if not filename: + continue # SKIP: Over empty line(s). + elif filename.startswith('#'): + continue # SKIP: Over comment line(s). + + if here and not os.path.isabs(filename): + filename = os.path.join(here, line) + filename = os.path.normpath(filename) + if glob.has_magic(filename): + # -- WITH WILDCARDS: + for filename2 in glob.iglob(filename): + location = FileLocationParser.parse(filename2) + locations.append(location) + else: + location = FileLocationParser.parse(filename) + locations.append(location) + return locations + + @classmethod + def parse_file(cls, filename): + """ + Read textual file, ala '@features.txt'. + + :param filename: Name of feature list file. + :return: List of feature file locations. + """ + if filename.startswith('@'): + filename = filename[1:] + if not os.path.isfile(filename): + raise FileNotFoundError(filename) + here = os.path.dirname(filename) or "." + # -- MAYBE BETTER: + # contents = codecs.open(filename, "utf-8").read() + contents = open(filename).read() + return cls.parse(contents, here) + + +class PathManager(object): + """Context manager to add paths to sys.path (python search path) + within a scope. + """ + + def __init__(self, paths=None): + self.initial_paths = paths or [] + self.paths = None + + def __enter__(self): + self.paths = list(self.initial_paths) + sys.path = self.paths + sys.path + + def __exit__(self, *crap): + for path in self.paths: + sys.path.remove(path) + self.paths = None + + def add(self, path): + if self.paths is None: + # -- CALLED OUTSIDE OF CONTEXT: + self.initial_paths.append(path) + else: + sys.path.insert(0, path) + self.paths.append(path) + + +# ----------------------------------------------------------------------------- +# FUNCTIONS: +# ----------------------------------------------------------------------------- +def parse_features(feature_files, language=None): + """ + Parse feature files and return list of Feature model objects. + Handles: + + * feature file names, ala "alice.feature" + * feature file locations, ala: "alice.feature:10" + + :param feature_files: List of feature file names to parse. + :param language: Default language to use. + :return: List of feature objects. + """ + scenario_collector = FeatureScenarioLocationCollector() + features = [] + for location in feature_files: + if not isinstance(location, FileLocation): + assert isinstance(location, string_types) + location = FileLocation(os.path.normpath(location)) + + if location.filename == scenario_collector.filename: + scenario_collector.add_location(location) + continue + elif scenario_collector.feature: + # -- ADD CURRENT FEATURE: As collection of scenarios. + current_feature = scenario_collector.build_feature() + features.append(current_feature) + scenario_collector.clear() + + # -- NEW FEATURE: + assert isinstance(location, FileLocation) + filename = os.path.abspath(location.filename) + feature = parser.parse_file(filename, language=language) + if feature: + # -- VALID FEATURE: + # SKIP CORNER-CASE: Feature file without any feature(s). + scenario_collector.feature = feature + scenario_collector.add_location(location) + # -- FINALLY: + if scenario_collector.feature: + current_feature = scenario_collector.build_feature() + features.append(current_feature) + return features + + +def collect_feature_locations(paths, strict=True): + """ + Collect feature file names by processing list of paths (from command line). + A path can be a: + + * filename (ending with ".feature") + * location, ala "{filename}:{line_number}" + * features configuration filename, ala "@features.txt" + * directory, to discover and collect all "*.feature" files below. + + :param paths: Paths to process. + :return: Feature file locations to use (as list of FileLocations). + """ + locations = [] + for path in paths: + if os.path.isdir(path): + for dirpath, dirnames, filenames in os.walk(path): + dirnames.sort() + for filename in sorted(filenames): + if filename.endswith(".feature"): + location = FileLocation(os.path.join(dirpath, filename)) + locations.append(location) + elif path.startswith('@'): + # -- USE: behave @list_of_features.txt + locations.extend(FeatureListParser.parse_file(path[1:])) + else: + # -- OTHERWISE: Normal filename or location (schema: filename:line) + location = FileLocationParser.parse(path) + if not location.filename.endswith(".feature"): + raise InvalidFilenameError(location.filename) + elif location.exists(): + locations.append(location) + elif strict: + raise FileNotFoundError(path) + return locations + + +def exec_file(filename, globals_=None, locals_=None): + if globals_ is None: + globals_ = {} + if locals_ is None: + locals_ = globals_ + locals_["__file__"] = filename + with open(filename, "rb") as f: + # pylint: disable=exec-used + filename2 = os.path.relpath(filename, os.getcwd()) + code = compile(f.read(), filename2, "exec", dont_inherit=True) + exec(code, globals_, locals_) + + +def load_step_modules(step_paths): + """Load step modules with step definitions from step_paths directories.""" + from behave import matchers + from behave.step_registry import setup_step_decorators + step_globals = { + "use_step_matcher": matchers.use_step_matcher, + "step_matcher": matchers.step_matcher, # -- DEPRECATING + } + setup_step_decorators(step_globals) + + # -- Allow steps to import other stuff from the steps dir + # NOTE: Default matcher can be overridden in "environment.py" hook. + with PathManager(step_paths): + default_matcher = matchers.current_matcher + for path in step_paths: + for name in sorted(os.listdir(path)): + if name.endswith(".py"): + # -- LOAD STEP DEFINITION: + # Reset to default matcher after each step-definition. + # A step-definition may change the matcher 0..N times. + # ENSURE: Each step definition has clean globals. + # try: + step_module_globals = step_globals.copy() + exec_file(os.path.join(path, name), step_module_globals) + matchers.current_matcher = default_matcher + + +def make_undefined_step_snippet(step, language=None): + """Helper function to create an undefined-step snippet for a step. + + :param step: Step to use (as Step object or string). + :param language: i18n language, optionally needed for step text parsing. + :return: Undefined-step snippet (as string). + """ + if isinstance(step, string_types): + step_text = step + steps = parser.parse_steps(step_text, language=language) + step = steps[0] + assert step, "ParseError: %s" % step_text + + prefix = u"u" + single_quote = "'" + if single_quote in step.name: + step.name = step.name.replace(single_quote, r"\'") + + schema = u"@%s(%s'%s')\ndef step_impl(context):\n" + schema += u" raise NotImplementedError(%s'STEP: %s %s')\n\n" + snippet = schema % (step.step_type, prefix, step.name, + prefix, step.step_type.title(), step.name) + return snippet + + +def make_undefined_step_snippets(undefined_steps, make_snippet=None): + """Creates a list of undefined step snippets. + Note that duplicated steps are removed internally. + + :param undefined_steps: List of undefined steps (as Step object or string). + :param make_snippet: Function that generates snippet (optional) + :return: List of undefined step snippets (as list of strings) + """ + if make_snippet is None: + make_snippet = make_undefined_step_snippet + + # -- NOTE: Remove any duplicated undefined steps. + step_snippets = [] + collected_steps = set() + for undefined_step in undefined_steps: + if undefined_step in collected_steps: + continue + collected_steps.add(undefined_step) + step_snippet = make_snippet(undefined_step) + step_snippets.append(step_snippet) + return step_snippets + + +def print_undefined_step_snippets(undefined_steps, stream=None, colored=True): + """ + Print snippets for the undefined steps that were discovered. + + :param undefined_steps: List of undefined steps (as list). + :param stream: Output stream to use (default: sys.stderr). + :param colored: Indicates if coloring should be used (default: True) + """ + if not undefined_steps: + return + if not stream: + stream = sys.stderr + + msg = u"\nYou can implement step definitions for undefined steps with " + msg += u"these snippets:\n\n" + msg += u"\n".join(make_undefined_step_snippets(undefined_steps)) + + if colored: + # -- OOPS: Unclear if stream supports ANSI coloring. + from behave.formatter.ansi_escapes import escapes + msg = escapes['undefined'] + msg + escapes['reset'] + + stream = ensure_stream_with_encoder(stream) + stream.write(msg) + stream.flush() + +def reset_runtime(): + """Reset runtime environment. + Best effort to reset module data to initial state. + """ + from behave import step_registry + from behave import matchers + # -- RESET 1: behave.step_registry + step_registry.registry = step_registry.StepRegistry() + step_registry.setup_step_decorators(None, step_registry.registry) + # -- RESET 2: behave.matchers + matchers.ParseMatcher.custom_types = {} + matchers.current_matcher = matchers.ParseMatcher diff --git a/venv/Lib/site-packages/behave/step_registry.py b/venv/Lib/site-packages/behave/step_registry.py new file mode 100644 index 0000000..b235711 --- /dev/null +++ b/venv/Lib/site-packages/behave/step_registry.py @@ -0,0 +1,112 @@ +# -*- coding: UTF-8 -*- +""" +Provides a step registry and step decorators. +The step registry allows to match steps (model elements) with +step implementations (step definitions). This is necessary to execute steps. +""" + +from __future__ import absolute_import +from behave.matchers import Match, get_matcher +from behave.textutil import text as _text + +# limit import * to just the decorators +# pylint: disable=undefined-all-variable +# names = "given when then step" +# names = names + " " + names.title() +# __all__ = names.split() +__all__ = [ + "given", "when", "then", "step", # PREFERRED. + "Given", "When", "Then", "Step" # Also possible. +] + + +class AmbiguousStep(ValueError): + pass + + +class StepRegistry(object): + def __init__(self): + self.steps = { + "given": [], + "when": [], + "then": [], + "step": [], + } + + @staticmethod + def same_step_definition(step, other_pattern, other_location): + return (step.pattern == other_pattern and + step.location == other_location and + other_location.filename != "") + + def add_step_definition(self, keyword, step_text, func): + step_location = Match.make_location(func) + step_type = keyword.lower() + step_text = _text(step_text) + step_definitions = self.steps[step_type] + for existing in step_definitions: + if self.same_step_definition(existing, step_text, step_location): + # -- EXACT-STEP: Same step function is already registered. + # This may occur when a step module imports another one. + return + elif existing.match(step_text): # -- SIMPLISTIC + message = u"%s has already been defined in\n existing step %s" + new_step = u"@%s('%s')" % (step_type, step_text) + existing.step_type = step_type + existing_step = existing.describe() + existing_step += u" at %s" % existing.location + raise AmbiguousStep(message % (new_step, existing_step)) + step_definitions.append(get_matcher(func, step_text)) + + def find_step_definition(self, step): + candidates = self.steps[step.step_type] + more_steps = self.steps["step"] + if step.step_type != "step" and more_steps: + # -- ENSURE: self.step_type lists are not modified/extended. + candidates = list(candidates) + candidates += more_steps + + for step_definition in candidates: + if step_definition.match(step.name): + return step_definition + return None + + def find_match(self, step): + candidates = self.steps[step.step_type] + more_steps = self.steps["step"] + if step.step_type != "step" and more_steps: + # -- ENSURE: self.step_type lists are not modified/extended. + candidates = list(candidates) + candidates += more_steps + + for step_definition in candidates: + result = step_definition.match(step.name) + if result: + return result + + return None + + def make_decorator(self, step_type): + def decorator(step_text): + def wrapper(func): + self.add_step_definition(step_type, step_text, func) + return func + return wrapper + return decorator + + +registry = StepRegistry() + +# -- Create the decorators +# pylint: disable=redefined-outer-name +def setup_step_decorators(run_context=None, registry=registry): + if run_context is None: + run_context = globals() + for step_type in ("given", "when", "then", "step"): + step_decorator = registry.make_decorator(step_type) + run_context[step_type.title()] = run_context[step_type] = step_decorator + +# ----------------------------------------------------------------------------- +# MODULE INIT: +# ----------------------------------------------------------------------------- +setup_step_decorators() diff --git a/venv/Lib/site-packages/behave/tag_expression.py b/venv/Lib/site-packages/behave/tag_expression.py new file mode 100644 index 0000000..0dd0169 --- /dev/null +++ b/venv/Lib/site-packages/behave/tag_expression.py @@ -0,0 +1,111 @@ +# -*- coding: utf-8 -*- + +import six + +class TagExpression(object): + """ + Tag expression, as logical boolean expression, to select + (include or exclude) model elements. + + BOOLEAN LOGIC := (or_expr1) and (or_expr2) and ... + with or_exprN := [not] tag1 or [not] tag2 or ... + """ + + def __init__(self, tag_expressions): + self.ands = [] + self.limits = {} + + for expr in tag_expressions: + self.store_and_extract_limits(self.normalized_tags_from_or(expr)) + + @staticmethod + def normalize_tag(tag): + """ + Normalize a tag for a tag expression: + + * strip whitespace + * strip '@' char + * convert '~' (tilde) into '-' (minus sign) + + :param tag: Tag (as string). + :return: Normalized tag (as string). + """ + tag = tag.strip() + if tag.startswith('@'): + tag = tag[1:] + elif tag.startswith('-@') or tag.startswith('~@'): + tag = '-' + tag[2:] + elif tag.startswith('~'): + tag = '-' + tag[1:] + return tag + + @classmethod + def normalized_tags_from_or(cls, expr): + """ + Normalizes all tags in an OR expression (and return it as list). + + :param expr: OR expression to normalize and split (as string). + :return: Generator of normalized tags (as string) + """ + for tag in expr.strip().split(','): + yield cls.normalize_tag(tag) + + def store_and_extract_limits(self, tags): + tags_with_negation = [] + + for tag in tags: + negated = tag.startswith('-') + tag = tag.split(':') + tag_with_negation = tag.pop(0) + tags_with_negation.append(tag_with_negation) + + if tag: + limit = int(tag[0]) + if negated: + tag_without_negation = tag_with_negation[1:] + else: + tag_without_negation = tag_with_negation + limited = tag_without_negation in self.limits + if limited and self.limits[tag_without_negation] != limit: + msg = "Inconsistent tag limits for {0}: {1:d} and {2:d}" + msg = msg.format(tag_without_negation, + self.limits[tag_without_negation], limit) + raise Exception(msg) + self.limits[tag_without_negation] = limit + + if tags_with_negation: + self.ands.append(tags_with_negation) + + def check(self, tags): + """ + Checks if this tag expression matches the tags of a model element. + + :param tags: List of tags of a model element. + :return: True, if tag expression matches. False, otherwise. + """ + if not self.ands: + return True + + element_tags = set(tags) + + def test_tag(xtag): + if xtag.startswith('-'): # -- or xtag.startswith('~'): + return xtag[1:] not in element_tags + return xtag in element_tags + + # -- EVALUATE: (or_expr1) and (or_expr2) and ... + return all(any(test_tag(xtag) for xtag in ors) for ors in self.ands) + + def __len__(self): + return len(self.ands) + + def __str__(self): + """Conversion back into string that represents this tag expression.""" + and_parts = [] + for or_terms in self.ands: + and_parts.append(u",".join(or_terms)) + return u" ".join(and_parts) + + if six.PY2: + __unicode__ = __str__ + __str__ = lambda self: self.__unicode__().encode("utf-8") diff --git a/venv/Lib/site-packages/behave/tag_matcher.py b/venv/Lib/site-packages/behave/tag_matcher.py new file mode 100644 index 0000000..f1f955b --- /dev/null +++ b/venv/Lib/site-packages/behave/tag_matcher.py @@ -0,0 +1,465 @@ +# -*- coding: utf-8 -*- + +from __future__ import absolute_import +import re +import operator +import warnings +import six + + +class TagMatcher(object): + """Abstract base class that defines the TagMatcher protocol.""" + + def should_run_with(self, tags): + """Determines if a feature/scenario with these tags should run or not. + + :param tags: List of scenario/feature tags to check. + :return: True, if scenario/feature should run. + :return: False, if scenario/feature should be excluded from the run-set. + """ + return not self.should_exclude_with(tags) + + def should_exclude_with(self, tags): + """Determines if a feature/scenario with these tags should be excluded + from the run-set. + + :param tags: List of scenario/feature tags to check. + :return: True, if scenario/feature should be excluded from the run-set. + :return: False, if scenario/feature should run. + """ + raise NotImplementedError + + +class ActiveTagMatcher(TagMatcher): + """Provides an active tag matcher for many categories. + + TAG SCHEMA: + * active.with_{category}={value} + * not_active.with_{category}={value} + * use.with_{category}={value} + * not.with_{category}={value} + * only.with_{category}={value} (NOTE: For backward compatibility) + + TAG LOGIC + ---------- + + Determine active-tag groups by grouping active-tags + with same category together:: + + active_group.enabled := enabled(group.tag1) or enabled(group.tag2) or ... + active_tags.enabled := enabled(group1) and enabled(group2) and ... + + All active-tag groups must be turned "on". + Otherwise, the model element should be excluded. + + CONCEPT: ValueProvider + ------------------------------ + + A ValueProvider provides the value of a category, used in active tags. + A ValueProvider must provide a mapping-like protocol: + + .. code-block:: python + + class MyValueProvider(object): + def get(self, category_name, default=None): + ... + return category_value # OR: default, if category is unknown. + + EXAMPLE: + -------- + + Run some scenarios only when runtime conditions are met: + + * Run scenario Alice only on Windows OS + * Run scenario Bob with all browsers except Chrome + + .. code-block:: gherkin + + # -- FILE: features/alice.feature + Feature: + + @active.with_os=win32 + Scenario: Alice (Run only on Windows) + Given I do something + ... + + @not_active.with_browser=chrome + Scenario: Bob (Excluded with Web-Browser Chrome) + Given I do something else + ... + + + .. code-block:: python + + # -- FILE: features/environment.py + from behave.tag_matcher import ActiveTagMatcher + import sys + + # -- MATCHES ANY ACTIVE TAGS: @{prefix}.with_{category}={value} + # NOTE: active_tag_value_provider provides current category values. + active_tag_value_provider = { + "browser": os.environ.get("BEHAVE_BROWSER", "chrome"), + "os": sys.platform, + } + active_tag_matcher = ActiveTagMatcher(active_tag_value_provider) + + def before_feature(context, feature): + if active_tag_matcher.should_exclude_with(feature.tags): + feature.skip() #< LATE-EXCLUDE from run-set. + + def before_scenario(context, scenario): + if active_tag_matcher.should_exclude_with(scenario.effective_tags): + exclude_reason = active_tag_matcher.exclude_reason + scenario.skip(exclude_reason) #< LATE-EXCLUDE from run-set. + """ + value_separator = "=" + tag_prefixes = ["active", "not_active", "use", "not", "only"] + tag_schema = r"^(?P%s)\.with_(?P\w+(\.\w+)*)%s(?P.*)$" + ignore_unknown_categories = True + use_exclude_reason = False + + def __init__(self, value_provider, tag_prefixes=None, + value_separator=None, ignore_unknown_categories=None): + if value_provider is None: + value_provider = {} + if tag_prefixes is None: + tag_prefixes = self.tag_prefixes + if ignore_unknown_categories is None: + ignore_unknown_categories = self.ignore_unknown_categories + + super(ActiveTagMatcher, self).__init__() + self.value_provider = value_provider + self.tag_pattern = self.make_tag_pattern(tag_prefixes, value_separator) + self.tag_prefixes = tag_prefixes + self.ignore_unknown_categories = ignore_unknown_categories + self.exclude_reason = None + + @classmethod + def make_tag_pattern(cls, tag_prefixes, value_separator=None): + if value_separator is None: + value_separator = cls.value_separator + any_tag_prefix = r"|".join(tag_prefixes) + expression = cls.tag_schema % (any_tag_prefix, value_separator) + return re.compile(expression) + + @classmethod + def make_category_tag(cls, category, value=None, + tag_prefix=None, value_sep=None): + """Build category tag (mostly for testing purposes). + :return: Category tag as string (without leading AT char). + """ + if tag_prefix is None: + tag_prefix = cls.tag_prefixes[0] # -- USE: First as default. + if value_sep is None: + value_sep = cls.value_separator + value = value or "" + return "%s.with_%s%s%s" % (tag_prefix, category, value_sep, value) + + def is_tag_negated(self, tag): # pylint: disable=no-self-use + return tag.startswith("not") + + def is_tag_group_enabled(self, group_category, group_tag_pairs): + """Provides boolean logic to determine if all active-tags + which use the same category result in a enabled value. + + Use LOGICAL-OR expression for active-tags with same category:: + + category_tag_group.enabled := enabled(tag1) or enabled(tag2) or ... + + .. code-block:: gherkin + + @use.with_xxx=alice + @use.with_xxx=bob + @not.with_xxx=charly + Scenario: + Given a step passes + ... + + :param group_category: Category for this tag-group (as string). + :param category_tag_group: List of active-tag match-pairs. + :return: True, if tag-group is enabled. + """ + if not group_tag_pairs: + # -- CASE: Empty group is always enabled (CORNER-CASE). + return True + + current_value = self.value_provider.get(group_category, None) + if current_value is None and self.ignore_unknown_categories: + # -- CASE: Unknown category, ignore it. + return True + + tags_enabled = [] + for category_tag, tag_match in group_tag_pairs: + tag_prefix = tag_match.group("prefix") + category = tag_match.group("category") + tag_value = tag_match.group("value") + assert category == group_category + + is_category_tag_switched_on = operator.eq # equal_to + if self.is_tag_negated(tag_prefix): + is_category_tag_switched_on = operator.ne # not_equal_to + + tag_enabled = is_category_tag_switched_on(tag_value, current_value) + tags_enabled.append(tag_enabled) + return any(tags_enabled) # -- PROVIDES: LOGICAL-OR expression + + def should_exclude_with(self, tags): + group_categories = self.group_active_tags_by_category(tags) + for group_category, category_tag_pairs in group_categories: + if not self.is_tag_group_enabled(group_category, category_tag_pairs): + # -- LOGICAL-AND SHORTCUT: Any false => Makes everything false + if self.use_exclude_reason: + current_value = self.value_provider.get(group_category, None) + reason = "%s (but: %s)" % (group_category, current_value) + self.exclude_reason = reason + return True # SHOULD-EXCLUDE: not enabled = not False + # -- LOGICAL-AND: All parts are True + return False # SHOULD-EXCLUDE: not enabled = not True + + def select_active_tags(self, tags): + """Select all active tags that match the tag schema pattern. + + :param tags: List of tags (as string). + :return: List of (tag, match_object) pairs (as generator). + """ + for tag in tags: + match_object = self.tag_pattern.match(tag) + if match_object: + yield (tag, match_object) + + def group_active_tags_by_category(self, tags): + """Select all active tags that match the tag schema pattern + and returns groups of active-tags, each group with tags + of the same category. + + :param tags: List of tags (as string). + :return: List of tag-groups (as generator), each tag-group is a + list of (tag1, match1) pairs for the same category. + """ + category_tag_groups = {} + for tag in tags: + match_object = self.tag_pattern.match(tag) + if match_object: + category = match_object.group("category") + category_tag_pairs = category_tag_groups.get(category, None) + if category_tag_pairs is None: + category_tag_pairs = category_tag_groups[category] = [] + category_tag_pairs.append((tag, match_object)) + + for category, category_tag_pairs in six.iteritems(category_tag_groups): + yield (category, category_tag_pairs) + + +class PredicateTagMatcher(TagMatcher): + def __init__(self, exclude_function): + assert callable(exclude_function) + super(PredicateTagMatcher, self).__init__() + self.predicate = exclude_function + + def should_exclude_with(self, tags): + return self.predicate(tags) + + +class CompositeTagMatcher(TagMatcher): + """Provides a composite tag matcher.""" + + def __init__(self, tag_matchers=None): + super(CompositeTagMatcher, self).__init__() + self.tag_matchers = tag_matchers or [] + + def should_exclude_with(self, tags): + for tag_matcher in self.tag_matchers: + if tag_matcher.should_exclude_with(tags): + return True + # -- OTHERWISE: + return False + + +def setup_active_tag_values(active_tag_values, data): + """Setup/update active_tag values with dict-like data. + Only values for keys that are already present are updated. + + :param active_tag_values: Data storage for active_tag value (dict-like). + :param data: Data that should be used for active_tag values (dict-like). + """ + for category in list(active_tag_values.keys()): + if category in data: + active_tag_values[category] = data[category] + + +# ----------------------------------------------------------------------------- +# PROTOTYPING CLASSES: +# ----------------------------------------------------------------------------- +class OnlyWithCategoryTagMatcher(TagMatcher): + """ + Provides a tag matcher that allows to determine if feature/scenario + should run or should be excluded from the run-set (at runtime). + + .. deprecated:: Use :class:`ActiveTagMatcher` instead. + + EXAMPLE: + -------- + + Run some scenarios only when runtime conditions are met: + + * Run scenario Alice only on Windows OS + * Run scenario Bob only on MACOSX + + .. code-block:: gherkin + + # -- FILE: features/alice.feature + # TAG SCHEMA: @only.with_{category}={current_value} + Feature: + + @only.with_os=win32 + Scenario: Alice (Run only on Windows) + Given I do something + ... + + @only.with_os=darwin + Scenario: Bob (Run only on MACOSX) + Given I do something else + ... + + + .. code-block:: python + + # -- FILE: features/environment.py + from behave.tag_matcher import OnlyWithCategoryTagMatcher + import sys + + # -- MATCHES TAGS: @only.with_{category}=* = @only.with_os=* + active_tag_matcher = OnlyWithCategoryTagMatcher("os", sys.platform) + + def before_scenario(context, scenario): + if active_tag_matcher.should_exclude_with(scenario.effective_tags): + scenario.skip() #< LATE-EXCLUDE from run-set. + """ + tag_prefix = "only.with_" + value_separator = "=" + + def __init__(self, category, value, tag_prefix=None, value_sep=None): + warnings.warn("Use ActiveTagMatcher instead.", DeprecationWarning) + super(OnlyWithCategoryTagMatcher, self).__init__() + self.active_tag = self.make_category_tag(category, value, + tag_prefix, value_sep) + self.category_tag_prefix = self.make_category_tag(category, None, + tag_prefix, value_sep) + + def should_exclude_with(self, tags): + category_tags = self.select_category_tags(tags) + if category_tags and self.active_tag not in category_tags: + return True + # -- OTHERWISE: feature/scenario with theses tags should run. + return False + + def select_category_tags(self, tags): + return [tag for tag in tags + if tag.startswith(self.category_tag_prefix)] + + @classmethod + def make_category_tag(cls, category, value=None, tag_prefix=None, + value_sep=None): + if tag_prefix is None: + tag_prefix = cls.tag_prefix + if value_sep is None: + value_sep = cls.value_separator + value = value or "" + return "%s%s%s%s" % (tag_prefix, category, value_sep, value) + + +class OnlyWithAnyCategoryTagMatcher(TagMatcher): + """ + Provides a tag matcher that matches any category that follows the + "@only.with_" tag schema and determines if it should run or + should be excluded from the run-set (at runtime). + + TAG SCHEMA: @only.with_{category}={value} + + .. seealso:: OnlyWithCategoryTagMatcher + .. deprecated:: Use :class:`ActiveTagMatcher` instead. + + EXAMPLE: + -------- + + Run some scenarios only when runtime conditions are met: + + * Run scenario Alice only on Windows OS + * Run scenario Bob only with browser Chrome + + .. code-block:: gherkin + + # -- FILE: features/alice.feature + # TAG SCHEMA: @only.with_{category}={current_value} + Feature: + + @only.with_os=win32 + Scenario: Alice (Run only on Windows) + Given I do something + ... + + @only.with_browser=chrome + Scenario: Bob (Run only with Web-Browser Chrome) + Given I do something else + ... + + + .. code-block:: python + + # -- FILE: features/environment.py + from behave.tag_matcher import OnlyWithAnyCategoryTagMatcher + import sys + + # -- MATCHES ANY TAGS: @only.with_{category}={value} + # NOTE: active_tag_value_provider provides current category values. + active_tag_value_provider = { + "browser": os.environ.get("BEHAVE_BROWSER", "chrome"), + "os": sys.platform, + } + active_tag_matcher = OnlyWithAnyCategoryTagMatcher(active_tag_value_provider) + + def before_scenario(context, scenario): + if active_tag_matcher.should_exclude_with(scenario.effective_tags): + scenario.skip() #< LATE-EXCLUDE from run-set. + """ + + def __init__(self, value_provider, tag_prefix=None, value_sep=None): + warnings.warn("Use ActiveTagMatcher instead.", DeprecationWarning) + super(OnlyWithAnyCategoryTagMatcher, self).__init__() + if value_sep is None: + value_sep = OnlyWithCategoryTagMatcher.value_separator + self.value_provider = value_provider + self.tag_prefix = tag_prefix or OnlyWithCategoryTagMatcher.tag_prefix + self.value_separator = value_sep + + def should_exclude_with(self, tags): + exclude_decision_map = {} + for category_tag in self.select_category_tags(tags): + category, value = self.parse_category_tag(category_tag) + active_value = self.value_provider.get(category, None) + if active_value is None: + # -- CASE: Unknown category, ignore it. + continue + elif active_value == value: + # -- CASE: Active category value selected, decision should run. + exclude_decision_map[category] = False + else: + # -- CASE: Inactive category value selected, may exclude it. + if category not in exclude_decision_map: + exclude_decision_map[category] = True + return any(exclude_decision_map.values()) + + def select_category_tags(self, tags): + return [tag for tag in tags + if tag.startswith(self.tag_prefix)] + + def parse_category_tag(self, tag): + assert tag and tag.startswith(self.tag_prefix) + category_value = tag[len(self.tag_prefix):] + if self.value_separator in category_value: + category, value = category_value.split(self.value_separator, 1) + else: + # -- OOPS: TAG SCHEMA FORMAT MISMATCH + category = category_value + value = None + return category, value diff --git a/venv/Lib/site-packages/behave/textutil.py b/venv/Lib/site-packages/behave/textutil.py new file mode 100644 index 0000000..6be650a --- /dev/null +++ b/venv/Lib/site-packages/behave/textutil.py @@ -0,0 +1,164 @@ +# -*- coding: utf-8 -*- +""" +Provides some utility functions related to text processing. +""" + +from __future__ import absolute_import, print_function +import codecs +import os +import sys +import six + +# ----------------------------------------------------------------------------- +# CONSTANTS: +# ----------------------------------------------------------------------------- +# DEFAULT STRATEGY: For error handling when unicode-strings are converted. +BEHAVE_UNICODE_ERRORS = os.environ.get("BEHAVE_UNICODE_ERRORS", "replace") + + +# ----------------------------------------------------------------------------- +# FUNCTIONS: +# ----------------------------------------------------------------------------- +def make_indentation(indent_size, part=u" "): + """Creates an indentation prefix string of the given size.""" + return indent_size * part + + +def indent(text, prefix): # pylint: disable=redefined-outer-name + """Indent text or a number of text lines (with newline). + + :param lines: Text lines to indent (as string or list of strings). + :param prefix: Line prefix to use (as string). + :return: Indented text (as unicode string). + """ + lines = text + newline = u"" + if isinstance(text, six.string_types): + lines = text.splitlines(True) + elif lines and not lines[0].endswith("\n"): + # -- TEXT LINES: Without trailing new-line. + newline = u"\n" + # MAYBE: return newline.join([prefix + six.text_type(line, errors="replace") + return newline.join([prefix + six.text_type(line) for line in lines]) + + +def compute_words_maxsize(words): + """Compute the maximum word size from a list of words (or strings). + + :param words: List of words (or strings) to use. + :return: Maximum size of all words. + """ + max_size = 0 + for word in words: + if len(word) > max_size: + max_size = len(word) + return max_size + + +def is_ascii_encoding(encoding): + """Checks if a given encoding is ASCII.""" + try: + return codecs.lookup(encoding).name == "ascii" + except LookupError: + return False + +def select_best_encoding(outstream=None): + """Select the *best* encoding for an output stream/file. + Uses: + * ``outstream.encoding`` (if available) + * ``sys.getdefaultencoding()`` (otherwise) + + Note: If encoding=ascii, uses encoding=UTF-8 + + :param outstream: Output stream to select encoding for (or: stdout) + :return: Unicode encoding name (as string) to use (for output stream). + """ + outstream = outstream or sys.stdout + encoding = getattr(outstream, "encoding", None) or sys.getdefaultencoding() + if is_ascii_encoding(encoding): + # -- REQUIRED-FOR: Python2 + # MAYBE: locale.getpreferredencoding() + return "utf-8" + return encoding + + +def text(value, encoding=None, errors=None): + """Convert into a unicode string. + + :param value: Value to convert into a unicode string (bytes, str, object). + :return: Unicode string + + SYNDROMES: + * Convert object to unicode: Has only __str__() method (Python2) + * Windows: exception-traceback and encoding=unicode-escape are BAD + * exception-traceback w/ weird encoding or bytes + + ALTERNATIVES: + * Use traceback2 for Python2: Provides unicode tracebacks + """ + if encoding is None: + encoding = select_best_encoding() + if errors is None: + errors = BEHAVE_UNICODE_ERRORS + + if isinstance(value, six.text_type): + # -- CASE: ALREADY UNICODE (pass-through, efficiency): + return value + elif isinstance(value, six.binary_type): + # -- CASE: bytes/binary_type (Python2: str) + try: + return six.text_type(value, encoding, errors) + except UnicodeError: + # -- BEST-EFFORT: + return six.u(value) + # elif isinstance(value, bytes): + # # -- MAYBE: filename, path, etc. + # try: + # return value.decode(sys.getfilesystemencoding()) + # except UnicodeError: + # return value.decode("utf-8", "replace") # MAYBE: "ignore" + else: + # -- CASE: CONVERT/CAST OBJECT TO TEXT/STRING + try: + if six.PY2: + try: + text2 = six.text_type(value) + except UnicodeError as e: + # -- NOTE: value has no sane unicode conversion + # encoding=unicode-escape helps recover from errors. + data = str(value) + text2 = six.text_type(data, "unicode-escape", "replace") + else: + # PY3: Cast to string/unicode + text2 = six.text_type(value) + except UnicodeError as e: + # Python3: multi-arg call supports only string-like object: str, bytes + text2 = six.text_type(e) + return text2 + + +def to_texts(args, encoding=None, errors=None): + """Process a list of string-like objects into list of unicode values. + Optionally converts binary text into unicode for each item. + + :return: List of text/unicode values. + """ + if encoding is None: + encoding = select_best_encoding() + return [text(arg, encoding, errors) for arg in args] + + +def ensure_stream_with_encoder(stream, encoding=None): + if not encoding: + encoding = select_best_encoding(stream) + + if six.PY3: + return stream + elif hasattr(stream, "stream"): + return stream # Already wrapped with a codecs.StreamWriter + else: + assert six.PY2 + # py2 does, however, sometimes declare an encoding on sys.stdout, + # even if it doesn't use it (or it might be explicitly None) + stream = codecs.getwriter(encoding)(stream) + return stream diff --git a/venv/Lib/site-packages/behave/userdata.py b/venv/Lib/site-packages/behave/userdata.py new file mode 100644 index 0000000..84a79eb --- /dev/null +++ b/venv/Lib/site-packages/behave/userdata.py @@ -0,0 +1,220 @@ +# -*- coding: UTF-8 -*- +""" +Functionality to support user-specific configuration data (userdata). +""" + +from __future__ import absolute_import +from behave._types import Unknown + + +# ----------------------------------------------------------------------------- +# FUNCTIONS: +# ----------------------------------------------------------------------------- +def parse_bool(text): + """Parses a boolean text and converts it into boolean value (if possible). + Supported truth string values: + + * true: "true", "yes", "on", "1" + * false: "false", "no", "off", "0" + + :raises: ValueError, if text is invalid + """ + from distutils.util import strtobool + return bool(strtobool(text)) + + +def parse_user_define(text): + """Parse "{name}={value}" text and return parts as tuple. + Used for command-line definitions, like "... -D name=value". + + SUPPORTED SCHEMA: + + * "{name}={value}" + * "{name}" (boolean flag; value="true") + * '"{name}={value}"' (double-quoted name-value pair) + * "'{name}={value}'" (single-quoted name-value pair) + * '{name}="{value}"' (double-quoted value) + * "{name}='{value}'" (single-quoted value) + * " {name} = {value} " (whitespace padded) + + .. note:: Leading/trailing Quotes are stripped. + + :param text: Text to parse (as string). + :return: (name, value) pair as tuple. + """ + text = text.strip() + if "=" in text: + text = unqote(text) + name, value = text.split("=", 1) + name = name.strip() + value = unqote(value.strip()) + else: + # -- ASSUMPTION: Boolean definition (as flag) + name = text + value = "true" + return (name, value) + + +def unqote(text): + """Strip pair of leading and trailing quotes from text.""" + # -- QUOTED: Strip single-quote or double-quote pair. + if ((text.startswith('"') and text.endswith('"')) or + (text.startswith("'") and text.endswith("'"))): + text = text[1:-1] + return text + +# ----------------------------------------------------------------------------- +# CLASSES: +# ----------------------------------------------------------------------------- +class UserData(dict): + """Dictionary-like user-data with some additional features: + + * type-converter methods, similar to configparser.ConfigParser.getint() + + """ + + def getas(self, convert, name, default=None, valuetype=None): + """Converts the value of user-data parameter from a string into a + specific value type. + + :param convert: Converter function to use (string to value-type). + :param name: Variable name to use. + :param default: Default value, used if parameter is not found. + :param valuetype: Value type(s), needed if convert != valuetype() + :return: Converted textual value (type: valuetype) + :return: Default value, if parameter is unknown. + :raises ValueError: If type conversion fails. + """ + if valuetype is None: + # -- ASSUME: Converter function is the type constructor. + valuetype = convert + + value = self.get(name, Unknown) + if value is Unknown: + return default + elif isinstance(value, valuetype): + # -- PRESERVE: Pre-converted value if type matches. + return value + else: + # -- CASE: Textual value (expected) + # Raise ValueError if parse/conversion fails. + assert callable(convert) + return convert(value) + + def getint(self, name, default=0): + """Convert parameter value (as string) into a integer value. + :return: Parameter value as integer number (on success). + :raises: ValueError, if type conversion fails. + """ + return self.getas(int, name, default) + + def getfloat(self, name, default=0.0): + """Convert parameter value (as string) into a float value. + :return: Parameter value as float number (on success). + :raises: ValueError, if type conversion fails. + """ + return self.getas(float, name, default) + + def getbool(self, name, default=False): + """Converts user-data string-value into boolean value (if possible). + Supported truth string values: + + * true: "true", "yes", "on", "1" + * false: "false", "no", "off", "0" + + :param name: Parameter name (as string). + :param default: Default value, if parameter is unknown (=False). + :return: Boolean value of parameter + :raises: ValueError, if type conversion fails. + """ + return self.getas(parse_bool, name, default, valuetype=bool) + + @classmethod + def make(cls, data): + if data is None: + data = cls() + elif not isinstance(data, cls): + data = cls(data) + return data + + +class UserDataNamespace(object): + """Provides a light-weight dictview to the user data that allows you + to access all params in a namespace, that use "{namespace}.*" names. + + .. code-block:: python + + my_config = UserDataNamespace("my.config", userdata) + value1 = my_config.getint("value1") # USE: my.config.value1 + value2 = my_config.get("value2") # USE: my.config.value2 + """ + + def __init__(self, namespace, data=None): + self.namespace = namespace or "" + self.data = UserData.make(data) + + @staticmethod + def make_scoped(namespace, name): + """Creates a scoped-name from its parts.""" + if not namespace: # noqa + return name + return "%s.%s" % (namespace, name) + + # -- DICT-LIKE: + def get(self, name, default=None): + scoped_name = self.make_scoped(self.namespace, name) + return self.data.get(scoped_name, default) + + def getas(self, convert, name, default=None, valuetype=None): + scoped_name = self.make_scoped(self.namespace, name) + return self.data.getas(convert, scoped_name, default=default, + valuetype=valuetype) + + def getint(self, name, default=0): + scoped_name = self.make_scoped(self.namespace, name) + return self.data.getint(scoped_name, default=default) + + def getfloat(self, name, default=0.0): + scoped_name = self.make_scoped(self.namespace, name) + return self.data.getfloat(scoped_name, default=default) + + def getbool(self, name, default=False): + scoped_name = self.make_scoped(self.namespace, name) + return self.data.getbool(scoped_name, default=default) + + def __contains__(self, name): + scoped_name = self.make_scoped(self.namespace, name) + return scoped_name in self.data + + def __getitem__(self, name): + scoped_name = self.make_scoped(self.namespace, name) + return self.data[scoped_name] + + def __setitem__(self, name, value): + scoped_name = self.make_scoped(self.namespace, name) + self.data[scoped_name] = value + + def __len__(self): + return len(self.scoped_keys()) + + def scoped_keys(self): + if not self.namespace: # noqa + return self.data.keys() + prefix = "%s." % self.namespace + return [key for key in self.data.keys() if key.startswith(prefix)] + + def keys(self): + prefix = "%s." % self.namespace + for scoped_name in self.scoped_keys(): + name = scoped_name.replace(prefix, "", 1) + yield name + + def values(self): + for scoped_name in self.scoped_keys(): + yield self.data[scoped_name] + + def items(self): + for name in self.keys(): + scoped_name = self.make_scoped(self.namespace, name) + value = self.data[scoped_name] + yield (name, value) diff --git a/venv/Lib/site-packages/bs4/__init__.py b/venv/Lib/site-packages/bs4/__init__.py new file mode 100644 index 0000000..d8ad5e1 --- /dev/null +++ b/venv/Lib/site-packages/bs4/__init__.py @@ -0,0 +1,840 @@ +"""Beautiful Soup Elixir and Tonic - "The Screen-Scraper's Friend". + +http://www.crummy.com/software/BeautifulSoup/ + +Beautiful Soup uses a pluggable XML or HTML parser to parse a +(possibly invalid) document into a tree representation. Beautiful Soup +provides methods and Pythonic idioms that make it easy to navigate, +search, and modify the parse tree. + +Beautiful Soup works with Python 3.6 and up. It works better if lxml +and/or html5lib is installed. + +For more than you ever wanted to know about Beautiful Soup, see the +documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/ +""" + +__author__ = "Leonard Richardson (leonardr@segfault.org)" +__version__ = "4.12.3" +__copyright__ = "Copyright (c) 2004-2024 Leonard Richardson" +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +__all__ = ['BeautifulSoup'] + +from collections import Counter +import os +import re +import sys +import traceback +import warnings + +# The very first thing we do is give a useful error if someone is +# running this code under Python 2. +if sys.version_info.major < 3: + raise ImportError('You are trying to use a Python 3-specific version of Beautiful Soup under Python 2. This will not work. The final version of Beautiful Soup to support Python 2 was 4.9.3.') + +from .builder import ( + builder_registry, + ParserRejectedMarkup, + XMLParsedAsHTMLWarning, + HTMLParserTreeBuilder +) +from .dammit import UnicodeDammit +from .element import ( + CData, + Comment, + CSS, + DEFAULT_OUTPUT_ENCODING, + Declaration, + Doctype, + NavigableString, + PageElement, + ProcessingInstruction, + PYTHON_SPECIFIC_ENCODINGS, + ResultSet, + Script, + Stylesheet, + SoupStrainer, + Tag, + TemplateString, + ) + +# Define some custom warnings. +class GuessedAtParserWarning(UserWarning): + """The warning issued when BeautifulSoup has to guess what parser to + use -- probably because no parser was specified in the constructor. + """ + +class MarkupResemblesLocatorWarning(UserWarning): + """The warning issued when BeautifulSoup is given 'markup' that + actually looks like a resource locator -- a URL or a path to a file + on disk. + """ + + +class BeautifulSoup(Tag): + """A data structure representing a parsed HTML or XML document. + + Most of the methods you'll call on a BeautifulSoup object are inherited from + PageElement or Tag. + + Internally, this class defines the basic interface called by the + tree builders when converting an HTML/XML document into a data + structure. The interface abstracts away the differences between + parsers. To write a new tree builder, you'll need to understand + these methods as a whole. + + These methods will be called by the BeautifulSoup constructor: + * reset() + * feed(markup) + + The tree builder may call these methods from its feed() implementation: + * handle_starttag(name, attrs) # See note about return value + * handle_endtag(name) + * handle_data(data) # Appends to the current data node + * endData(containerClass) # Ends the current data node + + No matter how complicated the underlying parser is, you should be + able to build a tree using 'start tag' events, 'end tag' events, + 'data' events, and "done with data" events. + + If you encounter an empty-element tag (aka a self-closing tag, + like HTML's
tag), call handle_starttag and then + handle_endtag. + """ + + # Since BeautifulSoup subclasses Tag, it's possible to treat it as + # a Tag with a .name. This name makes it clear the BeautifulSoup + # object isn't a real markup tag. + ROOT_TAG_NAME = '[document]' + + # If the end-user gives no indication which tree builder they + # want, look for one with these features. + DEFAULT_BUILDER_FEATURES = ['html', 'fast'] + + # A string containing all ASCII whitespace characters, used in + # endData() to detect data chunks that seem 'empty'. + ASCII_SPACES = '\x20\x0a\x09\x0c\x0d' + + NO_PARSER_SPECIFIED_WARNING = "No parser was explicitly specified, so I'm using the best available %(markup_type)s parser for this system (\"%(parser)s\"). This usually isn't a problem, but if you run this code on another system, or in a different virtual environment, it may use a different parser and behave differently.\n\nThe code that caused this warning is on line %(line_number)s of the file %(filename)s. To get rid of this warning, pass the additional argument 'features=\"%(parser)s\"' to the BeautifulSoup constructor.\n" + + def __init__(self, markup="", features=None, builder=None, + parse_only=None, from_encoding=None, exclude_encodings=None, + element_classes=None, **kwargs): + """Constructor. + + :param markup: A string or a file-like object representing + markup to be parsed. + + :param features: Desirable features of the parser to be + used. This may be the name of a specific parser ("lxml", + "lxml-xml", "html.parser", or "html5lib") or it may be the + type of markup to be used ("html", "html5", "xml"). It's + recommended that you name a specific parser, so that + Beautiful Soup gives you the same results across platforms + and virtual environments. + + :param builder: A TreeBuilder subclass to instantiate (or + instance to use) instead of looking one up based on + `features`. You only need to use this if you've implemented a + custom TreeBuilder. + + :param parse_only: A SoupStrainer. Only parts of the document + matching the SoupStrainer will be considered. This is useful + when parsing part of a document that would otherwise be too + large to fit into memory. + + :param from_encoding: A string indicating the encoding of the + document to be parsed. Pass this in if Beautiful Soup is + guessing wrongly about the document's encoding. + + :param exclude_encodings: A list of strings indicating + encodings known to be wrong. Pass this in if you don't know + the document's encoding but you know Beautiful Soup's guess is + wrong. + + :param element_classes: A dictionary mapping BeautifulSoup + classes like Tag and NavigableString, to other classes you'd + like to be instantiated instead as the parse tree is + built. This is useful for subclassing Tag or NavigableString + to modify default behavior. + + :param kwargs: For backwards compatibility purposes, the + constructor accepts certain keyword arguments used in + Beautiful Soup 3. None of these arguments do anything in + Beautiful Soup 4; they will result in a warning and then be + ignored. + + Apart from this, any keyword arguments passed into the + BeautifulSoup constructor are propagated to the TreeBuilder + constructor. This makes it possible to configure a + TreeBuilder by passing in arguments, not just by saying which + one to use. + """ + if 'convertEntities' in kwargs: + del kwargs['convertEntities'] + warnings.warn( + "BS4 does not respect the convertEntities argument to the " + "BeautifulSoup constructor. Entities are always converted " + "to Unicode characters.") + + if 'markupMassage' in kwargs: + del kwargs['markupMassage'] + warnings.warn( + "BS4 does not respect the markupMassage argument to the " + "BeautifulSoup constructor. The tree builder is responsible " + "for any necessary markup massage.") + + if 'smartQuotesTo' in kwargs: + del kwargs['smartQuotesTo'] + warnings.warn( + "BS4 does not respect the smartQuotesTo argument to the " + "BeautifulSoup constructor. Smart quotes are always converted " + "to Unicode characters.") + + if 'selfClosingTags' in kwargs: + del kwargs['selfClosingTags'] + warnings.warn( + "BS4 does not respect the selfClosingTags argument to the " + "BeautifulSoup constructor. The tree builder is responsible " + "for understanding self-closing tags.") + + if 'isHTML' in kwargs: + del kwargs['isHTML'] + warnings.warn( + "BS4 does not respect the isHTML argument to the " + "BeautifulSoup constructor. Suggest you use " + "features='lxml' for HTML and features='lxml-xml' for " + "XML.") + + def deprecated_argument(old_name, new_name): + if old_name in kwargs: + warnings.warn( + 'The "%s" argument to the BeautifulSoup constructor ' + 'has been renamed to "%s."' % (old_name, new_name), + DeprecationWarning, stacklevel=3 + ) + return kwargs.pop(old_name) + return None + + parse_only = parse_only or deprecated_argument( + "parseOnlyThese", "parse_only") + + from_encoding = from_encoding or deprecated_argument( + "fromEncoding", "from_encoding") + + if from_encoding and isinstance(markup, str): + warnings.warn("You provided Unicode markup but also provided a value for from_encoding. Your from_encoding will be ignored.") + from_encoding = None + + self.element_classes = element_classes or dict() + + # We need this information to track whether or not the builder + # was specified well enough that we can omit the 'you need to + # specify a parser' warning. + original_builder = builder + original_features = features + + if isinstance(builder, type): + # A builder class was passed in; it needs to be instantiated. + builder_class = builder + builder = None + elif builder is None: + if isinstance(features, str): + features = [features] + if features is None or len(features) == 0: + features = self.DEFAULT_BUILDER_FEATURES + builder_class = builder_registry.lookup(*features) + if builder_class is None: + raise FeatureNotFound( + "Couldn't find a tree builder with the features you " + "requested: %s. Do you need to install a parser library?" + % ",".join(features)) + + # At this point either we have a TreeBuilder instance in + # builder, or we have a builder_class that we can instantiate + # with the remaining **kwargs. + if builder is None: + builder = builder_class(**kwargs) + if not original_builder and not ( + original_features == builder.NAME or + original_features in builder.ALTERNATE_NAMES + ) and markup: + # The user did not tell us which TreeBuilder to use, + # and we had to guess. Issue a warning. + if builder.is_xml: + markup_type = "XML" + else: + markup_type = "HTML" + + # This code adapted from warnings.py so that we get the same line + # of code as our warnings.warn() call gets, even if the answer is wrong + # (as it may be in a multithreading situation). + caller = None + try: + caller = sys._getframe(1) + except ValueError: + pass + if caller: + globals = caller.f_globals + line_number = caller.f_lineno + else: + globals = sys.__dict__ + line_number= 1 + filename = globals.get('__file__') + if filename: + fnl = filename.lower() + if fnl.endswith((".pyc", ".pyo")): + filename = filename[:-1] + if filename: + # If there is no filename at all, the user is most likely in a REPL, + # and the warning is not necessary. + values = dict( + filename=filename, + line_number=line_number, + parser=builder.NAME, + markup_type=markup_type + ) + warnings.warn( + self.NO_PARSER_SPECIFIED_WARNING % values, + GuessedAtParserWarning, stacklevel=2 + ) + else: + if kwargs: + warnings.warn("Keyword arguments to the BeautifulSoup constructor will be ignored. These would normally be passed into the TreeBuilder constructor, but a TreeBuilder instance was passed in as `builder`.") + + self.builder = builder + self.is_xml = builder.is_xml + self.known_xml = self.is_xml + self._namespaces = dict() + self.parse_only = parse_only + + if hasattr(markup, 'read'): # It's a file-type object. + markup = markup.read() + elif len(markup) <= 256 and ( + (isinstance(markup, bytes) and not b'<' in markup) + or (isinstance(markup, str) and not '<' in markup) + ): + # Issue warnings for a couple beginner problems + # involving passing non-markup to Beautiful Soup. + # Beautiful Soup will still parse the input as markup, + # since that is sometimes the intended behavior. + if not self._markup_is_url(markup): + self._markup_resembles_filename(markup) + + rejections = [] + success = False + for (self.markup, self.original_encoding, self.declared_html_encoding, + self.contains_replacement_characters) in ( + self.builder.prepare_markup( + markup, from_encoding, exclude_encodings=exclude_encodings)): + self.reset() + self.builder.initialize_soup(self) + try: + self._feed() + success = True + break + except ParserRejectedMarkup as e: + rejections.append(e) + pass + + if not success: + other_exceptions = [str(e) for e in rejections] + raise ParserRejectedMarkup( + "The markup you provided was rejected by the parser. Trying a different parser or a different encoding may help.\n\nOriginal exception(s) from parser:\n " + "\n ".join(other_exceptions) + ) + + # Clear out the markup and remove the builder's circular + # reference to this object. + self.markup = None + self.builder.soup = None + + def _clone(self): + """Create a new BeautifulSoup object with the same TreeBuilder, + but not associated with any markup. + + This is the first step of the deepcopy process. + """ + clone = type(self)("", None, self.builder) + + # Keep track of the encoding of the original document, + # since we won't be parsing it again. + clone.original_encoding = self.original_encoding + return clone + + def __getstate__(self): + # Frequently a tree builder can't be pickled. + d = dict(self.__dict__) + if 'builder' in d and d['builder'] is not None and not self.builder.picklable: + d['builder'] = type(self.builder) + # Store the contents as a Unicode string. + d['contents'] = [] + d['markup'] = self.decode() + + # If _most_recent_element is present, it's a Tag object left + # over from initial parse. It might not be picklable and we + # don't need it. + if '_most_recent_element' in d: + del d['_most_recent_element'] + return d + + def __setstate__(self, state): + # If necessary, restore the TreeBuilder by looking it up. + self.__dict__ = state + if isinstance(self.builder, type): + self.builder = self.builder() + elif not self.builder: + # We don't know which builder was used to build this + # parse tree, so use a default we know is always available. + self.builder = HTMLParserTreeBuilder() + self.builder.soup = self + self.reset() + self._feed() + return state + + + @classmethod + def _decode_markup(cls, markup): + """Ensure `markup` is bytes so it's safe to send into warnings.warn. + + TODO: warnings.warn had this problem back in 2010 but it might not + anymore. + """ + if isinstance(markup, bytes): + decoded = markup.decode('utf-8', 'replace') + else: + decoded = markup + return decoded + + @classmethod + def _markup_is_url(cls, markup): + """Error-handling method to raise a warning if incoming markup looks + like a URL. + + :param markup: A string. + :return: Whether or not the markup resembles a URL + closely enough to justify a warning. + """ + if isinstance(markup, bytes): + space = b' ' + cant_start_with = (b"http:", b"https:") + elif isinstance(markup, str): + space = ' ' + cant_start_with = ("http:", "https:") + else: + return False + + if any(markup.startswith(prefix) for prefix in cant_start_with): + if not space in markup: + warnings.warn( + 'The input looks more like a URL than markup. You may want to use' + ' an HTTP client like requests to get the document behind' + ' the URL, and feed that document to Beautiful Soup.', + MarkupResemblesLocatorWarning, + stacklevel=3 + ) + return True + return False + + @classmethod + def _markup_resembles_filename(cls, markup): + """Error-handling method to raise a warning if incoming markup + resembles a filename. + + :param markup: A bytestring or string. + :return: Whether or not the markup resembles a filename + closely enough to justify a warning. + """ + path_characters = '/\\' + extensions = ['.html', '.htm', '.xml', '.xhtml', '.txt'] + if isinstance(markup, bytes): + path_characters = path_characters.encode("utf8") + extensions = [x.encode('utf8') for x in extensions] + filelike = False + if any(x in markup for x in path_characters): + filelike = True + else: + lower = markup.lower() + if any(lower.endswith(ext) for ext in extensions): + filelike = True + if filelike: + warnings.warn( + 'The input looks more like a filename than markup. You may' + ' want to open this file and pass the filehandle into' + ' Beautiful Soup.', + MarkupResemblesLocatorWarning, stacklevel=3 + ) + return True + return False + + def _feed(self): + """Internal method that parses previously set markup, creating a large + number of Tag and NavigableString objects. + """ + # Convert the document to Unicode. + self.builder.reset() + + self.builder.feed(self.markup) + # Close out any unfinished strings and close all the open tags. + self.endData() + while self.currentTag.name != self.ROOT_TAG_NAME: + self.popTag() + + def reset(self): + """Reset this object to a state as though it had never parsed any + markup. + """ + Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME) + self.hidden = 1 + self.builder.reset() + self.current_data = [] + self.currentTag = None + self.tagStack = [] + self.open_tag_counter = Counter() + self.preserve_whitespace_tag_stack = [] + self.string_container_stack = [] + self._most_recent_element = None + self.pushTag(self) + + def new_tag(self, name, namespace=None, nsprefix=None, attrs={}, + sourceline=None, sourcepos=None, **kwattrs): + """Create a new Tag associated with this BeautifulSoup object. + + :param name: The name of the new Tag. + :param namespace: The URI of the new Tag's XML namespace, if any. + :param prefix: The prefix for the new Tag's XML namespace, if any. + :param attrs: A dictionary of this Tag's attribute values; can + be used instead of `kwattrs` for attributes like 'class' + that are reserved words in Python. + :param sourceline: The line number where this tag was + (purportedly) found in its source document. + :param sourcepos: The character position within `sourceline` where this + tag was (purportedly) found. + :param kwattrs: Keyword arguments for the new Tag's attribute values. + + """ + kwattrs.update(attrs) + return self.element_classes.get(Tag, Tag)( + None, self.builder, name, namespace, nsprefix, kwattrs, + sourceline=sourceline, sourcepos=sourcepos + ) + + def string_container(self, base_class=None): + container = base_class or NavigableString + + # There may be a general override of NavigableString. + container = self.element_classes.get( + container, container + ) + + # On top of that, we may be inside a tag that needs a special + # container class. + if self.string_container_stack and container is NavigableString: + container = self.builder.string_containers.get( + self.string_container_stack[-1].name, container + ) + return container + + def new_string(self, s, subclass=None): + """Create a new NavigableString associated with this BeautifulSoup + object. + """ + container = self.string_container(subclass) + return container(s) + + def insert_before(self, *args): + """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement + it because there is nothing before or after it in the parse tree. + """ + raise NotImplementedError("BeautifulSoup objects don't support insert_before().") + + def insert_after(self, *args): + """This method is part of the PageElement API, but `BeautifulSoup` doesn't implement + it because there is nothing before or after it in the parse tree. + """ + raise NotImplementedError("BeautifulSoup objects don't support insert_after().") + + def popTag(self): + """Internal method called by _popToTag when a tag is closed.""" + tag = self.tagStack.pop() + if tag.name in self.open_tag_counter: + self.open_tag_counter[tag.name] -= 1 + if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]: + self.preserve_whitespace_tag_stack.pop() + if self.string_container_stack and tag == self.string_container_stack[-1]: + self.string_container_stack.pop() + #print("Pop", tag.name) + if self.tagStack: + self.currentTag = self.tagStack[-1] + return self.currentTag + + def pushTag(self, tag): + """Internal method called by handle_starttag when a tag is opened.""" + #print("Push", tag.name) + if self.currentTag is not None: + self.currentTag.contents.append(tag) + self.tagStack.append(tag) + self.currentTag = self.tagStack[-1] + if tag.name != self.ROOT_TAG_NAME: + self.open_tag_counter[tag.name] += 1 + if tag.name in self.builder.preserve_whitespace_tags: + self.preserve_whitespace_tag_stack.append(tag) + if tag.name in self.builder.string_containers: + self.string_container_stack.append(tag) + + def endData(self, containerClass=None): + """Method called by the TreeBuilder when the end of a data segment + occurs. + """ + if self.current_data: + current_data = ''.join(self.current_data) + # If whitespace is not preserved, and this string contains + # nothing but ASCII spaces, replace it with a single space + # or newline. + if not self.preserve_whitespace_tag_stack: + strippable = True + for i in current_data: + if i not in self.ASCII_SPACES: + strippable = False + break + if strippable: + if '\n' in current_data: + current_data = '\n' + else: + current_data = ' ' + + # Reset the data collector. + self.current_data = [] + + # Should we add this string to the tree at all? + if self.parse_only and len(self.tagStack) <= 1 and \ + (not self.parse_only.text or \ + not self.parse_only.search(current_data)): + return + + containerClass = self.string_container(containerClass) + o = containerClass(current_data) + self.object_was_parsed(o) + + def object_was_parsed(self, o, parent=None, most_recent_element=None): + """Method called by the TreeBuilder to integrate an object into the parse tree.""" + if parent is None: + parent = self.currentTag + if most_recent_element is not None: + previous_element = most_recent_element + else: + previous_element = self._most_recent_element + + next_element = previous_sibling = next_sibling = None + if isinstance(o, Tag): + next_element = o.next_element + next_sibling = o.next_sibling + previous_sibling = o.previous_sibling + if previous_element is None: + previous_element = o.previous_element + + fix = parent.next_element is not None + + o.setup(parent, previous_element, next_element, previous_sibling, next_sibling) + + self._most_recent_element = o + parent.contents.append(o) + + # Check if we are inserting into an already parsed node. + if fix: + self._linkage_fixer(parent) + + def _linkage_fixer(self, el): + """Make sure linkage of this fragment is sound.""" + + first = el.contents[0] + child = el.contents[-1] + descendant = child + + if child is first and el.parent is not None: + # Parent should be linked to first child + el.next_element = child + # We are no longer linked to whatever this element is + prev_el = child.previous_element + if prev_el is not None and prev_el is not el: + prev_el.next_element = None + # First child should be linked to the parent, and no previous siblings. + child.previous_element = el + child.previous_sibling = None + + # We have no sibling as we've been appended as the last. + child.next_sibling = None + + # This index is a tag, dig deeper for a "last descendant" + if isinstance(child, Tag) and child.contents: + descendant = child._last_descendant(False) + + # As the final step, link last descendant. It should be linked + # to the parent's next sibling (if found), else walk up the chain + # and find a parent with a sibling. It should have no next sibling. + descendant.next_element = None + descendant.next_sibling = None + target = el + while True: + if target is None: + break + elif target.next_sibling is not None: + descendant.next_element = target.next_sibling + target.next_sibling.previous_element = child + break + target = target.parent + + def _popToTag(self, name, nsprefix=None, inclusivePop=True): + """Pops the tag stack up to and including the most recent + instance of the given tag. + + If there are no open tags with the given name, nothing will be + popped. + + :param name: Pop up to the most recent tag with this name. + :param nsprefix: The namespace prefix that goes with `name`. + :param inclusivePop: It this is false, pops the tag stack up + to but *not* including the most recent instqance of the + given tag. + + """ + #print("Popping to %s" % name) + if name == self.ROOT_TAG_NAME: + # The BeautifulSoup object itself can never be popped. + return + + most_recently_popped = None + + stack_size = len(self.tagStack) + for i in range(stack_size - 1, 0, -1): + if not self.open_tag_counter.get(name): + break + t = self.tagStack[i] + if (name == t.name and nsprefix == t.prefix): + if inclusivePop: + most_recently_popped = self.popTag() + break + most_recently_popped = self.popTag() + + return most_recently_popped + + def handle_starttag(self, name, namespace, nsprefix, attrs, sourceline=None, + sourcepos=None, namespaces=None): + """Called by the tree builder when a new tag is encountered. + + :param name: Name of the tag. + :param nsprefix: Namespace prefix for the tag. + :param attrs: A dictionary of attribute values. + :param sourceline: The line number where this tag was found in its + source document. + :param sourcepos: The character position within `sourceline` where this + tag was found. + :param namespaces: A dictionary of all namespace prefix mappings + currently in scope in the document. + + If this method returns None, the tag was rejected by an active + SoupStrainer. You should proceed as if the tag had not occurred + in the document. For instance, if this was a self-closing tag, + don't call handle_endtag. + """ + # print("Start tag %s: %s" % (name, attrs)) + self.endData() + + if (self.parse_only and len(self.tagStack) <= 1 + and (self.parse_only.text + or not self.parse_only.search_tag(name, attrs))): + return None + + tag = self.element_classes.get(Tag, Tag)( + self, self.builder, name, namespace, nsprefix, attrs, + self.currentTag, self._most_recent_element, + sourceline=sourceline, sourcepos=sourcepos, + namespaces=namespaces + ) + if tag is None: + return tag + if self._most_recent_element is not None: + self._most_recent_element.next_element = tag + self._most_recent_element = tag + self.pushTag(tag) + return tag + + def handle_endtag(self, name, nsprefix=None): + """Called by the tree builder when an ending tag is encountered. + + :param name: Name of the tag. + :param nsprefix: Namespace prefix for the tag. + """ + #print("End tag: " + name) + self.endData() + self._popToTag(name, nsprefix) + + def handle_data(self, data): + """Called by the tree builder when a chunk of textual data is encountered.""" + self.current_data.append(data) + + def decode(self, pretty_print=False, + eventual_encoding=DEFAULT_OUTPUT_ENCODING, + formatter="minimal", iterator=None): + """Returns a string or Unicode representation of the parse tree + as an HTML or XML document. + + :param pretty_print: If this is True, indentation will be used to + make the document more readable. + :param eventual_encoding: The encoding of the final document. + If this is None, the document will be a Unicode string. + """ + if self.is_xml: + # Print the XML declaration + encoding_part = '' + if eventual_encoding in PYTHON_SPECIFIC_ENCODINGS: + # This is a special Python encoding; it can't actually + # go into an XML document because it means nothing + # outside of Python. + eventual_encoding = None + if eventual_encoding != None: + encoding_part = ' encoding="%s"' % eventual_encoding + prefix = '\n' % encoding_part + else: + prefix = '' + if not pretty_print: + indent_level = None + else: + indent_level = 0 + return prefix + super(BeautifulSoup, self).decode( + indent_level, eventual_encoding, formatter, iterator) + +# Aliases to make it easier to get started quickly, e.g. 'from bs4 import _soup' +_s = BeautifulSoup +_soup = BeautifulSoup + +class BeautifulStoneSoup(BeautifulSoup): + """Deprecated interface to an XML parser.""" + + def __init__(self, *args, **kwargs): + kwargs['features'] = 'xml' + warnings.warn( + 'The BeautifulStoneSoup class is deprecated. Instead of using ' + 'it, pass features="xml" into the BeautifulSoup constructor.', + DeprecationWarning, stacklevel=2 + ) + super(BeautifulStoneSoup, self).__init__(*args, **kwargs) + + +class StopParsing(Exception): + """Exception raised by a TreeBuilder if it's unable to continue parsing.""" + pass + +class FeatureNotFound(ValueError): + """Exception raised by the BeautifulSoup constructor if no parser with the + requested features is found. + """ + pass + + +#If this file is run as a script, act as an HTML pretty-printer. +if __name__ == '__main__': + import sys + soup = BeautifulSoup(sys.stdin) + print((soup.prettify())) diff --git a/venv/Lib/site-packages/bs4/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/bs4/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..8720d41 Binary files /dev/null and b/venv/Lib/site-packages/bs4/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/__pycache__/css.cpython-311.pyc b/venv/Lib/site-packages/bs4/__pycache__/css.cpython-311.pyc new file mode 100644 index 0000000..139470e Binary files /dev/null and b/venv/Lib/site-packages/bs4/__pycache__/css.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/__pycache__/dammit.cpython-311.pyc b/venv/Lib/site-packages/bs4/__pycache__/dammit.cpython-311.pyc new file mode 100644 index 0000000..c63077f Binary files /dev/null and b/venv/Lib/site-packages/bs4/__pycache__/dammit.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/__pycache__/diagnose.cpython-311.pyc b/venv/Lib/site-packages/bs4/__pycache__/diagnose.cpython-311.pyc new file mode 100644 index 0000000..1d2955a Binary files /dev/null and b/venv/Lib/site-packages/bs4/__pycache__/diagnose.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/__pycache__/element.cpython-311.pyc b/venv/Lib/site-packages/bs4/__pycache__/element.cpython-311.pyc new file mode 100644 index 0000000..bd24e11 Binary files /dev/null and b/venv/Lib/site-packages/bs4/__pycache__/element.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/__pycache__/formatter.cpython-311.pyc b/venv/Lib/site-packages/bs4/__pycache__/formatter.cpython-311.pyc new file mode 100644 index 0000000..a18f165 Binary files /dev/null and b/venv/Lib/site-packages/bs4/__pycache__/formatter.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/builder/__init__.py b/venv/Lib/site-packages/bs4/builder/__init__.py new file mode 100644 index 0000000..ffb31fc --- /dev/null +++ b/venv/Lib/site-packages/bs4/builder/__init__.py @@ -0,0 +1,636 @@ +# Use of this source code is governed by the MIT license. +__license__ = "MIT" + +from collections import defaultdict +import itertools +import re +import warnings +import sys +from bs4.element import ( + CharsetMetaAttributeValue, + ContentMetaAttributeValue, + RubyParenthesisString, + RubyTextString, + Stylesheet, + Script, + TemplateString, + nonwhitespace_re +) + +__all__ = [ + 'HTMLTreeBuilder', + 'SAXTreeBuilder', + 'TreeBuilder', + 'TreeBuilderRegistry', + ] + +# Some useful features for a TreeBuilder to have. +FAST = 'fast' +PERMISSIVE = 'permissive' +STRICT = 'strict' +XML = 'xml' +HTML = 'html' +HTML_5 = 'html5' + +class XMLParsedAsHTMLWarning(UserWarning): + """The warning issued when an HTML parser is used to parse + XML that is not XHTML. + """ + MESSAGE = """It looks like you're parsing an XML document using an HTML parser. If this really is an HTML document (maybe it's XHTML?), you can ignore or filter this warning. If it's XML, you should know that using an XML parser will be more reliable. To parse this document as XML, make sure you have the lxml package installed, and pass the keyword argument `features="xml"` into the BeautifulSoup constructor.""" + + +class TreeBuilderRegistry(object): + """A way of looking up TreeBuilder subclasses by their name or by desired + features. + """ + + def __init__(self): + self.builders_for_feature = defaultdict(list) + self.builders = [] + + def register(self, treebuilder_class): + """Register a treebuilder based on its advertised features. + + :param treebuilder_class: A subclass of Treebuilder. its .features + attribute should list its features. + """ + for feature in treebuilder_class.features: + self.builders_for_feature[feature].insert(0, treebuilder_class) + self.builders.insert(0, treebuilder_class) + + def lookup(self, *features): + """Look up a TreeBuilder subclass with the desired features. + + :param features: A list of features to look for. If none are + provided, the most recently registered TreeBuilder subclass + will be used. + :return: A TreeBuilder subclass, or None if there's no + registered subclass with all the requested features. + """ + if len(self.builders) == 0: + # There are no builders at all. + return None + + if len(features) == 0: + # They didn't ask for any features. Give them the most + # recently registered builder. + return self.builders[0] + + # Go down the list of features in order, and eliminate any builders + # that don't match every feature. + features = list(features) + features.reverse() + candidates = None + candidate_set = None + while len(features) > 0: + feature = features.pop() + we_have_the_feature = self.builders_for_feature.get(feature, []) + if len(we_have_the_feature) > 0: + if candidates is None: + candidates = we_have_the_feature + candidate_set = set(candidates) + else: + # Eliminate any candidates that don't have this feature. + candidate_set = candidate_set.intersection( + set(we_have_the_feature)) + + # The only valid candidates are the ones in candidate_set. + # Go through the original list of candidates and pick the first one + # that's in candidate_set. + if candidate_set is None: + return None + for candidate in candidates: + if candidate in candidate_set: + return candidate + return None + +# The BeautifulSoup class will take feature lists from developers and use them +# to look up builders in this registry. +builder_registry = TreeBuilderRegistry() + +class TreeBuilder(object): + """Turn a textual document into a Beautiful Soup object tree.""" + + NAME = "[Unknown tree builder]" + ALTERNATE_NAMES = [] + features = [] + + is_xml = False + picklable = False + empty_element_tags = None # A tag will be considered an empty-element + # tag when and only when it has no contents. + + # A value for these tag/attribute combinations is a space- or + # comma-separated list of CDATA, rather than a single CDATA. + DEFAULT_CDATA_LIST_ATTRIBUTES = defaultdict(list) + + # Whitespace should be preserved inside these tags. + DEFAULT_PRESERVE_WHITESPACE_TAGS = set() + + # The textual contents of tags with these names should be + # instantiated with some class other than NavigableString. + DEFAULT_STRING_CONTAINERS = {} + + USE_DEFAULT = object() + + # Most parsers don't keep track of line numbers. + TRACKS_LINE_NUMBERS = False + + def __init__(self, multi_valued_attributes=USE_DEFAULT, + preserve_whitespace_tags=USE_DEFAULT, + store_line_numbers=USE_DEFAULT, + string_containers=USE_DEFAULT, + ): + """Constructor. + + :param multi_valued_attributes: If this is set to None, the + TreeBuilder will not turn any values for attributes like + 'class' into lists. Setting this to a dictionary will + customize this behavior; look at DEFAULT_CDATA_LIST_ATTRIBUTES + for an example. + + Internally, these are called "CDATA list attributes", but that + probably doesn't make sense to an end-user, so the argument name + is `multi_valued_attributes`. + + :param preserve_whitespace_tags: A list of tags to treat + the way
 tags are treated in HTML. Tags in this list
+         are immune from pretty-printing; their contents will always be
+         output as-is.
+
+        :param string_containers: A dictionary mapping tag names to
+        the classes that should be instantiated to contain the textual
+        contents of those tags. The default is to use NavigableString
+        for every tag, no matter what the name. You can override the
+        default by changing DEFAULT_STRING_CONTAINERS.
+
+        :param store_line_numbers: If the parser keeps track of the
+         line numbers and positions of the original markup, that
+         information will, by default, be stored in each corresponding
+         `Tag` object. You can turn this off by passing
+         store_line_numbers=False. If the parser you're using doesn't 
+         keep track of this information, then setting store_line_numbers=True
+         will do nothing.
+        """
+        self.soup = None
+        if multi_valued_attributes is self.USE_DEFAULT:
+            multi_valued_attributes = self.DEFAULT_CDATA_LIST_ATTRIBUTES
+        self.cdata_list_attributes = multi_valued_attributes
+        if preserve_whitespace_tags is self.USE_DEFAULT:
+            preserve_whitespace_tags = self.DEFAULT_PRESERVE_WHITESPACE_TAGS
+        self.preserve_whitespace_tags = preserve_whitespace_tags
+        if store_line_numbers == self.USE_DEFAULT:
+            store_line_numbers = self.TRACKS_LINE_NUMBERS
+        self.store_line_numbers = store_line_numbers 
+        if string_containers == self.USE_DEFAULT:
+            string_containers = self.DEFAULT_STRING_CONTAINERS
+        self.string_containers = string_containers
+        
+    def initialize_soup(self, soup):
+        """The BeautifulSoup object has been initialized and is now
+        being associated with the TreeBuilder.
+
+        :param soup: A BeautifulSoup object.
+        """
+        self.soup = soup
+        
+    def reset(self):
+        """Do any work necessary to reset the underlying parser
+        for a new document.
+
+        By default, this does nothing.
+        """
+        pass
+
+    def can_be_empty_element(self, tag_name):
+        """Might a tag with this name be an empty-element tag?
+
+        The final markup may or may not actually present this tag as
+        self-closing.
+
+        For instance: an HTMLBuilder does not consider a 

tag to be + an empty-element tag (it's not in + HTMLBuilder.empty_element_tags). This means an empty

tag + will be presented as "

", not "

" or "

". + + The default implementation has no opinion about which tags are + empty-element tags, so a tag will be presented as an + empty-element tag if and only if it has no children. + "" will become "", and "bar" will + be left alone. + + :param tag_name: The name of a markup tag. + """ + if self.empty_element_tags is None: + return True + return tag_name in self.empty_element_tags + + def feed(self, markup): + """Run some incoming markup through some parsing process, + populating the `BeautifulSoup` object in self.soup. + + This method is not implemented in TreeBuilder; it must be + implemented in subclasses. + + :return: None. + """ + raise NotImplementedError() + + def prepare_markup(self, markup, user_specified_encoding=None, + document_declared_encoding=None, exclude_encodings=None): + """Run any preliminary steps necessary to make incoming markup + acceptable to the parser. + + :param markup: Some markup -- probably a bytestring. + :param user_specified_encoding: The user asked to try this encoding. + :param document_declared_encoding: The markup itself claims to be + in this encoding. NOTE: This argument is not used by the + calling code and can probably be removed. + :param exclude_encodings: The user asked _not_ to try any of + these encodings. + + :yield: A series of 4-tuples: + (markup, encoding, declared encoding, + has undergone character replacement) + + Each 4-tuple represents a strategy for converting the + document to Unicode and parsing it. Each strategy will be tried + in turn. + + By default, the only strategy is to parse the markup + as-is. See `LXMLTreeBuilderForXML` and + `HTMLParserTreeBuilder` for implementations that take into + account the quirks of particular parsers. + """ + yield markup, None, None, False + + def test_fragment_to_document(self, fragment): + """Wrap an HTML fragment to make it look like a document. + + Different parsers do this differently. For instance, lxml + introduces an empty tag, and html5lib + doesn't. Abstracting this away lets us write simple tests + which run HTML fragments through the parser and compare the + results against other HTML fragments. + + This method should not be used outside of tests. + + :param fragment: A string -- fragment of HTML. + :return: A string -- a full HTML document. + """ + return fragment + + def set_up_substitutions(self, tag): + """Set up any substitutions that will need to be performed on + a `Tag` when it's output as a string. + + By default, this does nothing. See `HTMLTreeBuilder` for a + case where this is used. + + :param tag: A `Tag` + :return: Whether or not a substitution was performed. + """ + return False + + def _replace_cdata_list_attribute_values(self, tag_name, attrs): + """When an attribute value is associated with a tag that can + have multiple values for that attribute, convert the string + value to a list of strings. + + Basically, replaces class="foo bar" with class=["foo", "bar"] + + NOTE: This method modifies its input in place. + + :param tag_name: The name of a tag. + :param attrs: A dictionary containing the tag's attributes. + Any appropriate attribute values will be modified in place. + """ + if not attrs: + return attrs + if self.cdata_list_attributes: + universal = self.cdata_list_attributes.get('*', []) + tag_specific = self.cdata_list_attributes.get( + tag_name.lower(), None) + for attr in list(attrs.keys()): + if attr in universal or (tag_specific and attr in tag_specific): + # We have a "class"-type attribute whose string + # value is a whitespace-separated list of + # values. Split it into a list. + value = attrs[attr] + if isinstance(value, str): + values = nonwhitespace_re.findall(value) + else: + # html5lib sometimes calls setAttributes twice + # for the same tag when rearranging the parse + # tree. On the second call the attribute value + # here is already a list. If this happens, + # leave the value alone rather than trying to + # split it again. + values = value + attrs[attr] = values + return attrs + +class SAXTreeBuilder(TreeBuilder): + """A Beautiful Soup treebuilder that listens for SAX events. + + This is not currently used for anything, but it demonstrates + how a simple TreeBuilder would work. + """ + + def feed(self, markup): + raise NotImplementedError() + + def close(self): + pass + + def startElement(self, name, attrs): + attrs = dict((key[1], value) for key, value in list(attrs.items())) + #print("Start %s, %r" % (name, attrs)) + self.soup.handle_starttag(name, attrs) + + def endElement(self, name): + #print("End %s" % name) + self.soup.handle_endtag(name) + + def startElementNS(self, nsTuple, nodeName, attrs): + # Throw away (ns, nodeName) for now. + self.startElement(nodeName, attrs) + + def endElementNS(self, nsTuple, nodeName): + # Throw away (ns, nodeName) for now. + self.endElement(nodeName) + #handler.endElementNS((ns, node.nodeName), node.nodeName) + + def startPrefixMapping(self, prefix, nodeValue): + # Ignore the prefix for now. + pass + + def endPrefixMapping(self, prefix): + # Ignore the prefix for now. + # handler.endPrefixMapping(prefix) + pass + + def characters(self, content): + self.soup.handle_data(content) + + def startDocument(self): + pass + + def endDocument(self): + pass + + +class HTMLTreeBuilder(TreeBuilder): + """This TreeBuilder knows facts about HTML. + + Such as which tags are empty-element tags. + """ + + empty_element_tags = set([ + # These are from HTML5. + 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr', + + # These are from earlier versions of HTML and are removed in HTML5. + 'basefont', 'bgsound', 'command', 'frame', 'image', 'isindex', 'nextid', 'spacer' + ]) + + # The HTML standard defines these as block-level elements. Beautiful + # Soup does not treat these elements differently from other elements, + # but it may do so eventually, and this information is available if + # you need to use it. + block_elements = set(["address", "article", "aside", "blockquote", "canvas", "dd", "div", "dl", "dt", "fieldset", "figcaption", "figure", "footer", "form", "h1", "h2", "h3", "h4", "h5", "h6", "header", "hr", "li", "main", "nav", "noscript", "ol", "output", "p", "pre", "section", "table", "tfoot", "ul", "video"]) + + # These HTML tags need special treatment so they can be + # represented by a string class other than NavigableString. + # + # For some of these tags, it's because the HTML standard defines + # an unusual content model for them. I made this list by going + # through the HTML spec + # (https://html.spec.whatwg.org/#metadata-content) and looking for + # "metadata content" elements that can contain strings. + # + # The Ruby tags ( and ) are here despite being normal + # "phrasing content" tags, because the content they contain is + # qualitatively different from other text in the document, and it + # can be useful to be able to distinguish it. + # + # TODO: Arguably

 and
+            # 
+
This numeric entity is missing the final semicolon:
+ +
a
+
This document contains (do you see it?)
+
This document ends with That attribute value was bogus
+The doctype is invalid because it contains extra whitespace +
That boolean attribute had no value
+
Here's a nonexistent entity: &#foo; (do you see it?)
+
This document ends before the entity finishes: > +

Paragraphs shouldn't contain block display elements, but this one does:

you see?

+Multiple values for the same attribute. +
Here's a table
+
+
This tag contains nothing but whitespace:
+

This p tag is cut off by

the end of the blockquote tag
+
Here's a nested table:
foo
This table contains bare markup
+ +
This document contains a surprise doctype
+ +
Tag name contains Unicode characters
+ + +""" + + +class SoupTest(object): + + @property + def default_builder(self): + return default_builder + + def soup(self, markup, **kwargs): + """Build a Beautiful Soup object from markup.""" + builder = kwargs.pop('builder', self.default_builder) + return BeautifulSoup(markup, builder=builder, **kwargs) + + def document_for(self, markup, **kwargs): + """Turn an HTML fragment into a document. + + The details depend on the builder. + """ + return self.default_builder(**kwargs).test_fragment_to_document(markup) + + def assert_soup(self, to_parse, compare_parsed_to=None): + """Parse some markup using Beautiful Soup and verify that + the output markup is as expected. + """ + builder = self.default_builder + obj = BeautifulSoup(to_parse, builder=builder) + if compare_parsed_to is None: + compare_parsed_to = to_parse + + # Verify that the documents come out the same. + assert obj.decode() == self.document_for(compare_parsed_to) + + # Also run some checks on the BeautifulSoup object itself: + + # Verify that every tag that was opened was eventually closed. + + # There are no tags in the open tag counter. + assert all(v==0 for v in list(obj.open_tag_counter.values())) + + # The only tag in the tag stack is the one for the root + # document. + assert [obj.ROOT_TAG_NAME] == [x.name for x in obj.tagStack] + + assertSoupEquals = assert_soup + + def assertConnectedness(self, element): + """Ensure that next_element and previous_element are properly + set for all descendants of the given element. + """ + earlier = None + for e in element.descendants: + if earlier: + assert e == earlier.next_element + assert earlier == e.previous_element + earlier = e + + def linkage_validator(self, el, _recursive_call=False): + """Ensure proper linkage throughout the document.""" + descendant = None + # Document element should have no previous element or previous sibling. + # It also shouldn't have a next sibling. + if el.parent is None: + assert el.previous_element is None,\ + "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + el, el.previous_element, None + ) + assert el.previous_sibling is None,\ + "Bad previous_sibling\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + el, el.previous_sibling, None + ) + assert el.next_sibling is None,\ + "Bad next_sibling\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( + el, el.next_sibling, None + ) + + idx = 0 + child = None + last_child = None + last_idx = len(el.contents) - 1 + for child in el.contents: + descendant = None + + # Parent should link next element to their first child + # That child should have no previous sibling + if idx == 0: + if el.parent is not None: + assert el.next_element is child,\ + "Bad next_element\nNODE: {}\nNEXT: {}\nEXPECTED: {}".format( + el, el.next_element, child + ) + assert child.previous_element is el,\ + "Bad previous_element\nNODE: {}\nPREV: {}\nEXPECTED: {}".format( + child, child.previous_element, el + ) + assert child.previous_sibling is None,\ + "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED: {}".format( + child, child.previous_sibling, None + ) + + # If not the first child, previous index should link as sibling to this index + # Previous element should match the last index or the last bubbled up descendant + else: + assert child.previous_sibling is el.contents[idx - 1],\ + "Bad previous_sibling\nNODE: {}\nPREV {}\nEXPECTED {}".format( + child, child.previous_sibling, el.contents[idx - 1] + ) + assert el.contents[idx - 1].next_sibling is child,\ + "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + el.contents[idx - 1], el.contents[idx - 1].next_sibling, child + ) + + if last_child is not None: + assert child.previous_element is last_child,\ + "Bad previous_element\nNODE: {}\nPREV {}\nEXPECTED {}\nCONTENTS {}".format( + child, child.previous_element, last_child, child.parent.contents + ) + assert last_child.next_element is child,\ + "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + last_child, last_child.next_element, child + ) + + if isinstance(child, Tag) and child.contents: + descendant = self.linkage_validator(child, True) + # A bubbled up descendant should have no next siblings + assert descendant.next_sibling is None,\ + "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + descendant, descendant.next_sibling, None + ) + + # Mark last child as either the bubbled up descendant or the current child + if descendant is not None: + last_child = descendant + else: + last_child = child + + # If last child, there are non next siblings + if idx == last_idx: + assert child.next_sibling is None,\ + "Bad next_sibling\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_sibling, None + ) + idx += 1 + + child = descendant if descendant is not None else child + if child is None: + child = el + + if not _recursive_call and child is not None: + target = el + while True: + if target is None: + assert child.next_element is None, \ + "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_element, None + ) + break + elif target.next_sibling is not None: + assert child.next_element is target.next_sibling, \ + "Bad next_element\nNODE: {}\nNEXT {}\nEXPECTED {}".format( + child, child.next_element, target.next_sibling + ) + break + target = target.parent + + # We are done, so nothing to return + return None + else: + # Return the child to the recursive caller + return child + + def assert_selects(self, tags, should_match): + """Make sure that the given tags have the correct text. + + This is used in tests that define a bunch of tags, each + containing a single string, and then select certain strings by + some mechanism. + """ + assert [tag.string for tag in tags] == should_match + + def assert_selects_ids(self, tags, should_match): + """Make sure that the given tags have the correct IDs. + + This is used in tests that define a bunch of tags, each + containing a single string, and then select certain strings by + some mechanism. + """ + assert [tag['id'] for tag in tags] == should_match + + +class TreeBuilderSmokeTest(object): + # Tests that are common to HTML and XML tree builders. + + @pytest.mark.parametrize( + "multi_valued_attributes", + [None, {}, dict(b=['class']), {'*': ['notclass']}] + ) + def test_attribute_not_multi_valued(self, multi_valued_attributes): + markup = '' + soup = self.soup(markup, multi_valued_attributes=multi_valued_attributes) + assert soup.a['class'] == 'a b c' + + @pytest.mark.parametrize( + "multi_valued_attributes", [dict(a=['class']), {'*': ['class']}] + ) + def test_attribute_multi_valued(self, multi_valued_attributes): + markup = '' + soup = self.soup( + markup, multi_valued_attributes=multi_valued_attributes + ) + assert soup.a['class'] == ['a', 'b', 'c'] + + def test_invalid_doctype(self): + markup = 'content' + markup = '' + soup = self.soup(markup) + +class HTMLTreeBuilderSmokeTest(TreeBuilderSmokeTest): + + """A basic test of a treebuilder's competence. + + Any HTML treebuilder, present or future, should be able to pass + these tests. With invalid markup, there's room for interpretation, + and different parsers can handle it differently. But with the + markup in these tests, there's not much room for interpretation. + """ + + def test_empty_element_tags(self): + """Verify that all HTML4 and HTML5 empty element (aka void element) tags + are handled correctly. + """ + for name in [ + 'area', 'base', 'br', 'col', 'embed', 'hr', 'img', 'input', 'keygen', 'link', 'menuitem', 'meta', 'param', 'source', 'track', 'wbr', + 'spacer', 'frame' + ]: + soup = self.soup("") + new_tag = soup.new_tag(name) + assert new_tag.is_empty_element == True + + def test_special_string_containers(self): + soup = self.soup( + "" + ) + assert isinstance(soup.style.string, Stylesheet) + assert isinstance(soup.script.string, Script) + + soup = self.soup( + "" + ) + assert isinstance(soup.style.string, Stylesheet) + # The contents of the style tag resemble an HTML comment, but + # it's not treated as a comment. + assert soup.style.string == "" + assert isinstance(soup.style.string, Stylesheet) + + def test_pickle_and_unpickle_identity(self): + # Pickling a tree, then unpickling it, yields a tree identical + # to the original. + tree = self.soup("foo") + dumped = pickle.dumps(tree, 2) + loaded = pickle.loads(dumped) + assert loaded.__class__ == BeautifulSoup + assert loaded.decode() == tree.decode() + + def assertDoctypeHandled(self, doctype_fragment): + """Assert that a given doctype string is handled correctly.""" + doctype_str, soup = self._document_with_doctype(doctype_fragment) + + # Make sure a Doctype object was created. + doctype = soup.contents[0] + assert doctype.__class__ == Doctype + assert doctype == doctype_fragment + assert soup.encode("utf8")[:len(doctype_str)] == doctype_str + + # Make sure that the doctype was correctly associated with the + # parse tree and that the rest of the document parsed. + assert soup.p.contents[0] == 'foo' + + def _document_with_doctype(self, doctype_fragment, doctype_string="DOCTYPE"): + """Generate and parse a document with the given doctype.""" + doctype = '' % (doctype_string, doctype_fragment) + markup = doctype + '\n

foo

' + soup = self.soup(markup) + return doctype.encode("utf8"), soup + + def test_normal_doctypes(self): + """Make sure normal, everyday HTML doctypes are handled correctly.""" + self.assertDoctypeHandled("html") + self.assertDoctypeHandled( + 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"') + + def test_empty_doctype(self): + soup = self.soup("") + doctype = soup.contents[0] + assert "" == doctype.strip() + + def test_mixed_case_doctype(self): + # A lowercase or mixed-case doctype becomes a Doctype. + for doctype_fragment in ("doctype", "DocType"): + doctype_str, soup = self._document_with_doctype( + "html", doctype_fragment + ) + + # Make sure a Doctype object was created and that the DOCTYPE + # is uppercase. + doctype = soup.contents[0] + assert doctype.__class__ == Doctype + assert doctype == "html" + assert soup.encode("utf8")[:len(doctype_str)] == b"" + + # Make sure that the doctype was correctly associated with the + # parse tree and that the rest of the document parsed. + assert soup.p.contents[0] == 'foo' + + def test_public_doctype_with_url(self): + doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"' + self.assertDoctypeHandled(doctype) + + def test_system_doctype(self): + self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"') + + def test_namespaced_system_doctype(self): + # We can handle a namespaced doctype with a system ID. + self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"') + + def test_namespaced_public_doctype(self): + # Test a namespaced doctype with a public id. + self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"') + + def test_real_xhtml_document(self): + """A real XHTML document should come out more or less the same as it went in.""" + markup = b""" + + +Hello. +Goodbye. +""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + assert soup.encode("utf-8").replace(b"\n", b"") == markup.replace(b"\n", b"") + + # No warning was issued about parsing an XML document as HTML, + # because XHTML is both. + assert w == [] + + + def test_namespaced_html(self): + # When a namespaced XML document is parsed as HTML it should + # be treated as HTML with weird tag names. + markup = b"""content""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + + assert 2 == len(soup.find_all("ns1:foo")) + + # n.b. no "you're parsing XML as HTML" warning was given + # because there was no XML declaration. + assert [] == w + + def test_detect_xml_parsed_as_html(self): + # A warning is issued when parsing an XML document as HTML, + # but basic stuff should still work. + markup = b"""string""" + with warnings.catch_warnings(record=True) as w: + soup = self.soup(markup) + assert soup.tag.string == 'string' + [warning] = w + assert isinstance(warning.message, XMLParsedAsHTMLWarning) + assert str(warning.message) == XMLParsedAsHTMLWarning.MESSAGE + + # NOTE: the warning is not issued if the document appears to + # be XHTML (tested with test_real_xhtml_document in the + # superclass) or if there is no XML declaration (tested with + # test_namespaced_html in the superclass). + + def test_processing_instruction(self): + # We test both Unicode and bytestring to verify that + # process_markup correctly sets processing_instruction_class + # even when the markup is already Unicode and there is no + # need to process anything. + markup = """""" + soup = self.soup(markup) + assert markup == soup.decode() + + markup = b"""""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_deepcopy(self): + """Make sure you can copy the tree builder. + + This is important because the builder is part of a + BeautifulSoup object, and we want to be able to copy that. + """ + copy.deepcopy(self.default_builder) + + def test_p_tag_is_never_empty_element(self): + """A

tag is never designated as an empty-element tag. + + Even if the markup shows it as an empty-element tag, it + shouldn't be presented that way. + """ + soup = self.soup("

") + assert not soup.p.is_empty_element + assert str(soup.p) == "

" + + def test_unclosed_tags_get_closed(self): + """A tag that's not closed by the end of the document should be closed. + + This applies to all tags except empty-element tags. + """ + self.assert_soup("

", "

") + self.assert_soup("", "") + + self.assert_soup("
", "
") + + def test_br_is_always_empty_element_tag(self): + """A
tag is designated as an empty-element tag. + + Some parsers treat

as one
tag, some parsers as + two tags, but it should always be an empty-element tag. + """ + soup = self.soup("

") + assert soup.br.is_empty_element + assert str(soup.br) == "
" + + def test_nested_formatting_elements(self): + self.assert_soup("") + + def test_double_head(self): + html = ''' + + +Ordinary HEAD element test + + + +Hello, world! + + +''' + soup = self.soup(html) + assert "text/javascript" == soup.find('script')['type'] + + def test_comment(self): + # Comments are represented as Comment objects. + markup = "

foobaz

" + self.assert_soup(markup) + + soup = self.soup(markup) + comment = soup.find(string="foobar") + assert comment.__class__ == Comment + + # The comment is properly integrated into the tree. + foo = soup.find(string="foo") + assert comment == foo.next_element + baz = soup.find(string="baz") + assert comment == baz.previous_element + + def test_preserved_whitespace_in_pre_and_textarea(self): + """Whitespace must be preserved in
 and \n"
+        self.assert_soup(pre_markup)
+        self.assert_soup(textarea_markup)
+
+        soup = self.soup(pre_markup)
+        assert soup.pre.prettify() == pre_markup
+
+        soup = self.soup(textarea_markup)
+        assert soup.textarea.prettify() == textarea_markup
+
+        soup = self.soup("")
+        assert soup.textarea.prettify() == "\n"
+
+    def test_nested_inline_elements(self):
+        """Inline elements can be nested indefinitely."""
+        b_tag = "Inside a B tag"
+        self.assert_soup(b_tag)
+
+        nested_b_tag = "

A nested tag

" + self.assert_soup(nested_b_tag) + + double_nested_b_tag = "

A doubly nested tag

" + self.assert_soup(nested_b_tag) + + def test_nested_block_level_elements(self): + """Block elements can be nested.""" + soup = self.soup('

Foo

') + blockquote = soup.blockquote + assert blockquote.p.b.string == 'Foo' + assert blockquote.b.string == 'Foo' + + def test_correctly_nested_tables(self): + """One table can go inside another one.""" + markup = ('' + '' + "') + + self.assert_soup( + markup, + '
Here's another table:" + '' + '' + '
foo
Here\'s another table:' + '
foo
' + '
') + + self.assert_soup( + "" + "" + "
Foo
Bar
Baz
") + + def test_multivalued_attribute_with_whitespace(self): + # Whitespace separating the values of a multi-valued attribute + # should be ignored. + + markup = '
' + soup = self.soup(markup) + assert ['foo', 'bar'] == soup.div['class'] + + # If you search by the literal name of the class it's like the whitespace + # wasn't there. + assert soup.div == soup.find('div', class_="foo bar") + + def test_deeply_nested_multivalued_attribute(self): + # html5lib can set the attributes of the same tag many times + # as it rearranges the tree. This has caused problems with + # multivalued attributes. + markup = '
' + soup = self.soup(markup) + assert ["css"] == soup.div.div['class'] + + def test_multivalued_attribute_on_html(self): + # html5lib uses a different API to set the attributes ot the + # tag. This has caused problems with multivalued + # attributes. + markup = '' + soup = self.soup(markup) + assert ["a", "b"] == soup.html['class'] + + def test_angle_brackets_in_attribute_values_are_escaped(self): + self.assert_soup('', '') + + def test_strings_resembling_character_entity_references(self): + # "&T" and "&p" look like incomplete character entities, but they are + # not. + self.assert_soup( + "

• AT&T is in the s&p 500

", + "

\u2022 AT&T is in the s&p 500

" + ) + + def test_apos_entity(self): + self.assert_soup( + "

Bob's Bar

", + "

Bob's Bar

", + ) + + def test_entities_in_foreign_document_encoding(self): + # “ and ” are invalid numeric entities referencing + # Windows-1252 characters. - references a character common + # to Windows-1252 and Unicode, and ☃ references a + # character only found in Unicode. + # + # All of these entities should be converted to Unicode + # characters. + markup = "

“Hello” -☃

" + soup = self.soup(markup) + assert "“Hello” -☃" == soup.p.string + + def test_entities_in_attributes_converted_to_unicode(self): + expect = '

' + self.assert_soup('

', expect) + self.assert_soup('

', expect) + self.assert_soup('

', expect) + self.assert_soup('

', expect) + + def test_entities_in_text_converted_to_unicode(self): + expect = '

pi\N{LATIN SMALL LETTER N WITH TILDE}ata

' + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + self.assert_soup("

piñata

", expect) + + def test_quot_entity_converted_to_quotation_mark(self): + self.assert_soup("

I said "good day!"

", + '

I said "good day!"

') + + def test_out_of_range_entity(self): + expect = "\N{REPLACEMENT CHARACTER}" + self.assert_soup("�", expect) + self.assert_soup("�", expect) + self.assert_soup("�", expect) + + def test_multipart_strings(self): + "Mostly to prevent a recurrence of a bug in the html5lib treebuilder." + soup = self.soup("

\nfoo

") + assert "p" == soup.h2.string.next_element.name + assert "p" == soup.p.name + self.assertConnectedness(soup) + + def test_empty_element_tags(self): + """Verify consistent handling of empty-element tags, + no matter how they come in through the markup. + """ + self.assert_soup('


', "


") + self.assert_soup('


', "


") + + def test_head_tag_between_head_and_body(self): + "Prevent recurrence of a bug in the html5lib treebuilder." + content = """ + + foo + +""" + soup = self.soup(content) + assert soup.html.body is not None + self.assertConnectedness(soup) + + def test_multiple_copies_of_a_tag(self): + "Prevent recurrence of a bug in the html5lib treebuilder." + content = """ + + + + + +""" + soup = self.soup(content) + self.assertConnectedness(soup.article) + + def test_basic_namespaces(self): + """Parsers don't need to *understand* namespaces, but at the + very least they should not choke on namespaces or lose + data.""" + + markup = b'4' + soup = self.soup(markup) + assert markup == soup.encode() + html = soup.html + assert 'http://www.w3.org/1999/xhtml' == soup.html['xmlns'] + assert 'http://www.w3.org/1998/Math/MathML' == soup.html['xmlns:mathml'] + assert 'http://www.w3.org/2000/svg' == soup.html['xmlns:svg'] + + def test_multivalued_attribute_value_becomes_list(self): + markup = b'' + soup = self.soup(markup) + assert ['foo', 'bar'] == soup.a['class'] + + # + # Generally speaking, tests below this point are more tests of + # Beautiful Soup than tests of the tree builders. But parsers are + # weird, so we run these tests separately for every tree builder + # to detect any differences between them. + # + + def test_can_parse_unicode_document(self): + # A seemingly innocuous document... but it's in Unicode! And + # it contains characters that can't be represented in the + # encoding found in the declaration! The horror! + markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + assert 'Sacr\xe9 bleu!' == soup.body.string + + def test_soupstrainer(self): + """Parsers should be able to work with SoupStrainers.""" + strainer = SoupStrainer("b") + soup = self.soup("A bold statement", + parse_only=strainer) + assert soup.decode() == "bold" + + def test_single_quote_attribute_values_become_double_quotes(self): + self.assert_soup("", + '') + + def test_attribute_values_with_nested_quotes_are_left_alone(self): + text = """a""" + self.assert_soup(text) + + def test_attribute_values_with_double_nested_quotes_get_quoted(self): + text = """a""" + soup = self.soup(text) + soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"' + self.assert_soup( + soup.foo.decode(), + """a""") + + def test_ampersand_in_attribute_value_gets_escaped(self): + self.assert_soup('', + '') + + self.assert_soup( + 'foo', + 'foo') + + def test_escaped_ampersand_in_attribute_value_is_left_alone(self): + self.assert_soup('') + + def test_entities_in_strings_converted_during_parsing(self): + # Both XML and HTML entities are converted to Unicode characters + # during parsing. + text = "

<<sacré bleu!>>

" + expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

" + self.assert_soup(text, expected) + + def test_smart_quotes_converted_on_the_way_in(self): + # Microsoft smart quotes are converted to Unicode characters during + # parsing. + quote = b"

\x91Foo\x92

" + soup = self.soup(quote) + assert soup.p.string == "\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}" + + def test_non_breaking_spaces_converted_on_the_way_in(self): + soup = self.soup("  ") + assert soup.a.string == "\N{NO-BREAK SPACE}" * 2 + + def test_entities_converted_on_the_way_out(self): + text = "

<<sacré bleu!>>

" + expected = "

<<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>>

".encode("utf-8") + soup = self.soup(text) + assert soup.p.encode("utf-8") == expected + + def test_real_iso_8859_document(self): + # Smoke test of interrelated functionality, using an + # easy-to-understand document. + + # Here it is in Unicode. Note that it claims to be in ISO-8859-1. + unicode_html = '

Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!

' + + # That's because we're going to encode it into ISO-8859-1, + # and use that to test. + iso_latin_html = unicode_html.encode("iso-8859-1") + + # Parse the ISO-8859-1 HTML. + soup = self.soup(iso_latin_html) + + # Encode it to UTF-8. + result = soup.encode("utf-8") + + # What do we expect the result to look like? Well, it would + # look like unicode_html, except that the META tag would say + # UTF-8 instead of ISO-8859-1. + expected = unicode_html.replace("ISO-8859-1", "utf-8") + + # And, of course, it would be in UTF-8, not Unicode. + expected = expected.encode("utf-8") + + # Ta-da! + assert result == expected + + def test_real_shift_jis_document(self): + # Smoke test to make sure the parser can handle a document in + # Shift-JIS encoding, without choking. + shift_jis_html = ( + b'
'
+            b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
+            b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
+            b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
+            b'
') + unicode_html = shift_jis_html.decode("shift-jis") + soup = self.soup(unicode_html) + + # Make sure the parse tree is correctly encoded to various + # encodings. + assert soup.encode("utf-8") == unicode_html.encode("utf-8") + assert soup.encode("euc_jp") == unicode_html.encode("euc_jp") + + def test_real_hebrew_document(self): + # A real-world test to make sure we can convert ISO-8859-9 (a + # Hebrew encoding) to UTF-8. + hebrew_document = b'Hebrew (ISO 8859-8) in Visual Directionality

Hebrew (ISO 8859-8) in Visual Directionality

\xed\xe5\xec\xf9' + soup = self.soup( + hebrew_document, from_encoding="iso8859-8") + # Some tree builders call it iso8859-8, others call it iso-8859-9. + # That's not a difference we really care about. + assert soup.original_encoding in ('iso8859-8', 'iso-8859-8') + assert soup.encode('utf-8') == ( + hebrew_document.decode("iso8859-8").encode("utf-8") + ) + + def test_meta_tag_reflects_current_encoding(self): + # Here's the tag saying that a document is + # encoded in Shift-JIS. + meta_tag = ('') + + # Here's a document incorporating that meta tag. + shift_jis_html = ( + '\n%s\n' + '' + 'Shift-JIS markup goes here.') % meta_tag + soup = self.soup(shift_jis_html) + + # Parse the document, and the charset is seemingly unaffected. + parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'}) + content = parsed_meta['content'] + assert 'text/html; charset=x-sjis' == content + + # But that value is actually a ContentMetaAttributeValue object. + assert isinstance(content, ContentMetaAttributeValue) + + # And it will take on a value that reflects its current + # encoding. + assert 'text/html; charset=utf8' == content.encode("utf8") + + # For the rest of the story, see TestSubstitutions in + # test_tree.py. + + def test_html5_style_meta_tag_reflects_current_encoding(self): + # Here's the tag saying that a document is + # encoded in Shift-JIS. + meta_tag = ('') + + # Here's a document incorporating that meta tag. + shift_jis_html = ( + '\n%s\n' + '' + 'Shift-JIS markup goes here.') % meta_tag + soup = self.soup(shift_jis_html) + + # Parse the document, and the charset is seemingly unaffected. + parsed_meta = soup.find('meta', id="encoding") + charset = parsed_meta['charset'] + assert 'x-sjis' == charset + + # But that value is actually a CharsetMetaAttributeValue object. + assert isinstance(charset, CharsetMetaAttributeValue) + + # And it will take on a value that reflects its current + # encoding. + assert 'utf8' == charset.encode("utf8") + + def test_python_specific_encodings_not_used_in_charset(self): + # You can encode an HTML document using a Python-specific + # encoding, but that encoding won't be mentioned _inside_ the + # resulting document. Instead, the document will appear to + # have no encoding. + for markup in [ + b'' + b'' + ]: + soup = self.soup(markup) + for encoding in PYTHON_SPECIFIC_ENCODINGS: + if encoding in ( + 'idna', 'mbcs', 'oem', 'undefined', + 'string_escape', 'string-escape' + ): + # For one reason or another, these will raise an + # exception if we actually try to use them, so don't + # bother. + continue + encoded = soup.encode(encoding) + assert b'meta charset=""' in encoded + assert encoding.encode("ascii") not in encoded + + def test_tag_with_no_attributes_can_have_attributes_added(self): + data = self.soup("text") + data.a['foo'] = 'bar' + assert 'text' == data.a.decode() + + def test_closing_tag_with_no_opening_tag(self): + # Without BeautifulSoup.open_tag_counter, the tag will + # cause _popToTag to be called over and over again as we look + # for a tag that wasn't there. The result is that 'text2' + # will show up outside the body of the document. + soup = self.soup("

text1

text2
") + assert "

text1

text2
" == soup.body.decode() + + def test_worst_case(self): + """Test the worst case (currently) for linking issues.""" + + soup = self.soup(BAD_DOCUMENT) + self.linkage_validator(soup) + + +class XMLTreeBuilderSmokeTest(TreeBuilderSmokeTest): + + def test_pickle_and_unpickle_identity(self): + # Pickling a tree, then unpickling it, yields a tree identical + # to the original. + tree = self.soup("foo") + dumped = pickle.dumps(tree, 2) + loaded = pickle.loads(dumped) + assert loaded.__class__ == BeautifulSoup + assert loaded.decode() == tree.decode() + + def test_docstring_generated(self): + soup = self.soup("") + assert soup.encode() == b'\n' + + def test_xml_declaration(self): + markup = b"""\n""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_python_specific_encodings_not_used_in_xml_declaration(self): + # You can encode an XML document using a Python-specific + # encoding, but that encoding won't be mentioned _inside_ the + # resulting document. + markup = b"""\n""" + soup = self.soup(markup) + for encoding in PYTHON_SPECIFIC_ENCODINGS: + if encoding in ( + 'idna', 'mbcs', 'oem', 'undefined', + 'string_escape', 'string-escape' + ): + # For one reason or another, these will raise an + # exception if we actually try to use them, so don't + # bother. + continue + encoded = soup.encode(encoding) + assert b'' in encoded + assert encoding.encode("ascii") not in encoded + + def test_processing_instruction(self): + markup = b"""\n""" + soup = self.soup(markup) + assert markup == soup.encode("utf8") + + def test_real_xhtml_document(self): + """A real XHTML document should come out *exactly* the same as it went in.""" + markup = b""" + + +Hello. +Goodbye. +""" + soup = self.soup(markup) + assert soup.encode("utf-8") == markup + + def test_nested_namespaces(self): + doc = b""" + + + + + +""" + soup = self.soup(doc) + assert doc == soup.encode() + + def test_formatter_processes_script_tag_for_xml_documents(self): + doc = """ + +""" + soup = BeautifulSoup(doc, "lxml-xml") + # lxml would have stripped this while parsing, but we can add + # it later. + soup.script.string = 'console.log("< < hey > > ");' + encoded = soup.encode() + assert b"< < hey > >" in encoded + + def test_can_parse_unicode_document(self): + markup = 'Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + assert 'Sacr\xe9 bleu!' == soup.root.string + + def test_can_parse_unicode_document_begining_with_bom(self): + markup = '\N{BYTE ORDER MARK}Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!' + soup = self.soup(markup) + assert 'Sacr\xe9 bleu!' == soup.root.string + + def test_popping_namespaced_tag(self): + markup = 'b2012-07-02T20:33:42Zcd' + soup = self.soup(markup) + assert str(soup.rss) == markup + + def test_docstring_includes_correct_encoding(self): + soup = self.soup("") + assert soup.encode("latin1") == b'\n' + + def test_large_xml_document(self): + """A large XML document should come out the same as it went in.""" + markup = (b'\n' + + b'0' * (2**12) + + b'') + soup = self.soup(markup) + assert soup.encode("utf-8") == markup + + def test_tags_are_empty_element_if_and_only_if_they_are_empty(self): + self.assert_soup("

", "

") + self.assert_soup("

foo

") + + def test_namespaces_are_preserved(self): + markup = 'This tag is in the a namespaceThis tag is in the b namespace' + soup = self.soup(markup) + root = soup.root + assert "http://example.com/" == root['xmlns:a'] + assert "http://example.net/" == root['xmlns:b'] + + def test_closing_namespaced_tag(self): + markup = '

20010504

' + soup = self.soup(markup) + assert str(soup.p) == markup + + def test_namespaced_attributes(self): + markup = '' + soup = self.soup(markup) + assert str(soup.foo) == markup + + def test_namespaced_attributes_xml_namespace(self): + markup = 'bar' + soup = self.soup(markup) + assert str(soup.foo) == markup + + def test_find_by_prefixed_name(self): + doc = """ + + foo + bar + baz + +""" + soup = self.soup(doc) + + # There are three tags. + assert 3 == len(soup.find_all('tag')) + + # But two of them are ns1:tag and one of them is ns2:tag. + assert 2 == len(soup.find_all('ns1:tag')) + assert 1 == len(soup.find_all('ns2:tag')) + + assert 1, len(soup.find_all('ns2:tag', key='value')) + assert 3, len(soup.find_all(['ns1:tag', 'ns2:tag'])) + + def test_copy_tag_preserves_namespace(self): + xml = """ +""" + + soup = self.soup(xml) + tag = soup.document + duplicate = copy.copy(tag) + + # The two tags have the same namespace prefix. + assert tag.prefix == duplicate.prefix + + def test_worst_case(self): + """Test the worst case (currently) for linking issues.""" + + soup = self.soup(BAD_DOCUMENT) + self.linkage_validator(soup) + + +class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest): + """Smoke test for a tree builder that supports HTML5.""" + + def test_real_xhtml_document(self): + # Since XHTML is not HTML5, HTML5 parsers are not tested to handle + # XHTML documents in any particular way. + pass + + def test_html_tags_have_namespace(self): + markup = "" + soup = self.soup(markup) + assert "http://www.w3.org/1999/xhtml" == soup.a.namespace + + def test_svg_tags_have_namespace(self): + markup = '' + soup = self.soup(markup) + namespace = "http://www.w3.org/2000/svg" + assert namespace == soup.svg.namespace + assert namespace == soup.circle.namespace + + + def test_mathml_tags_have_namespace(self): + markup = '5' + soup = self.soup(markup) + namespace = 'http://www.w3.org/1998/Math/MathML' + assert namespace == soup.math.namespace + assert namespace == soup.msqrt.namespace + + def test_xml_declaration_becomes_comment(self): + markup = '' + soup = self.soup(markup) + assert isinstance(soup.contents[0], Comment) + assert soup.contents[0] == '?xml version="1.0" encoding="utf-8"?' + assert "html" == soup.contents[0].next_element.name diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/__init__.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..a1b6ec5 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/__init__.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_builder.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_builder.cpython-311.pyc new file mode 100644 index 0000000..39b2376 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_builder.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_builder_registry.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_builder_registry.cpython-311.pyc new file mode 100644 index 0000000..0577b3c Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_builder_registry.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_css.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_css.cpython-311.pyc new file mode 100644 index 0000000..3bfc402 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_css.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_dammit.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_dammit.cpython-311.pyc new file mode 100644 index 0000000..68a1d48 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_dammit.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_docs.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_docs.cpython-311.pyc new file mode 100644 index 0000000..cdab305 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_docs.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_element.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_element.cpython-311.pyc new file mode 100644 index 0000000..b1c1192 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_element.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_formatter.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_formatter.cpython-311.pyc new file mode 100644 index 0000000..b5d3c05 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_formatter.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_fuzz.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_fuzz.cpython-311.pyc new file mode 100644 index 0000000..c4995d8 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_fuzz.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_html5lib.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_html5lib.cpython-311.pyc new file mode 100644 index 0000000..a470891 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_html5lib.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_htmlparser.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_htmlparser.cpython-311.pyc new file mode 100644 index 0000000..4b8069c Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_htmlparser.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_lxml.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_lxml.cpython-311.pyc new file mode 100644 index 0000000..f9a4d13 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_lxml.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_navigablestring.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_navigablestring.cpython-311.pyc new file mode 100644 index 0000000..faf45d7 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_navigablestring.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_pageelement.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_pageelement.cpython-311.pyc new file mode 100644 index 0000000..5e574fd Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_pageelement.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_soup.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_soup.cpython-311.pyc new file mode 100644 index 0000000..4e90275 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_soup.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_tag.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_tag.cpython-311.pyc new file mode 100644 index 0000000..df1e3b2 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_tag.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/__pycache__/test_tree.cpython-311.pyc b/venv/Lib/site-packages/bs4/tests/__pycache__/test_tree.cpython-311.pyc new file mode 100644 index 0000000..af88651 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/__pycache__/test_tree.cpython-311.pyc differ diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase new file mode 100644 index 0000000..4828f8a --- /dev/null +++ b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-4670634698080256.testcase @@ -0,0 +1 @@ +

\ No newline at end of file diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase new file mode 100644 index 0000000..8a585ce Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5000587759190016.testcase differ diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase new file mode 100644 index 0000000..0fe66dd Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5167584867909632.testcase differ diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase new file mode 100644 index 0000000..fd41142 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5270998950477824.testcase differ diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase new file mode 100644 index 0000000..6248b2c --- /dev/null +++ b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5375146639360000.testcase @@ -0,0 +1 @@ + >tet>< \ No newline at end of file diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase new file mode 100644 index 0000000..107da53 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5492400320282624.testcase differ diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase new file mode 100644 index 0000000..367106c --- /dev/null +++ b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-5703933063462912.testcase @@ -0,0 +1,2 @@ + +t \ No newline at end of file diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase new file mode 100644 index 0000000..a823d55 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6450958476902400.testcase differ diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase new file mode 100644 index 0000000..65af44d Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/fuzz/clusterfuzz-testcase-minimized-bs4_fuzzer-6600557255327744.testcase differ diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase new file mode 100644 index 0000000..5559adb Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/fuzz/crash-0d306a50c8ed8bcd0785b67000fcd5dea1d33f08.testcase differ diff --git a/venv/Lib/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase b/venv/Lib/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase new file mode 100644 index 0000000..8857115 Binary files /dev/null and b/venv/Lib/site-packages/bs4/tests/fuzz/crash-ffbdfa8a2b26f13537b68d3794b0478a4090ee4a.testcase differ diff --git a/venv/Lib/site-packages/bs4/tests/test_builder.py b/venv/Lib/site-packages/bs4/tests/test_builder.py new file mode 100644 index 0000000..7537071 --- /dev/null +++ b/venv/Lib/site-packages/bs4/tests/test_builder.py @@ -0,0 +1,29 @@ +import pytest +from unittest.mock import patch +from bs4.builder import DetectsXMLParsedAsHTML + +class TestDetectsXMLParsedAsHTML(object): + + @pytest.mark.parametrize( + "markup,looks_like_xml", + [("No xml declaration", False), + ("obviously HTMLActually XHTML", False), + (" < html>Tricky XHTML", False), + ("", True), + ] + ) + def test_warn_if_markup_looks_like_xml(self, markup, looks_like_xml): + # Test of our ability to guess at whether markup looks XML-ish + # _and_ not HTML-ish. + with patch('bs4.builder.DetectsXMLParsedAsHTML._warn') as mock: + for data in markup, markup.encode('utf8'): + result = DetectsXMLParsedAsHTML.warn_if_markup_looks_like_xml( + data + ) + assert result == looks_like_xml + if looks_like_xml: + assert mock.called + else: + assert not mock.called + mock.reset_mock() diff --git a/venv/Lib/site-packages/bs4/tests/test_builder_registry.py b/venv/Lib/site-packages/bs4/tests/test_builder_registry.py new file mode 100644 index 0000000..9327174 --- /dev/null +++ b/venv/Lib/site-packages/bs4/tests/test_builder_registry.py @@ -0,0 +1,137 @@ +"""Tests of the builder registry.""" + +import pytest +import warnings + +from bs4 import BeautifulSoup +from bs4.builder import ( + builder_registry as registry, + HTMLParserTreeBuilder, + TreeBuilderRegistry, +) + +from . import ( + HTML5LIB_PRESENT, + LXML_PRESENT, +) + +if HTML5LIB_PRESENT: + from bs4.builder import HTML5TreeBuilder + +if LXML_PRESENT: + from bs4.builder import ( + LXMLTreeBuilderForXML, + LXMLTreeBuilder, + ) + + +# TODO: Split out the lxml and html5lib tests into their own classes +# and gate with pytest.mark.skipIf. +class TestBuiltInRegistry(object): + """Test the built-in registry with the default builders registered.""" + + def test_combination(self): + assert registry.lookup('strict', 'html') == HTMLParserTreeBuilder + if LXML_PRESENT: + assert registry.lookup('fast', 'html') == LXMLTreeBuilder + assert registry.lookup('permissive', 'xml') == LXMLTreeBuilderForXML + if HTML5LIB_PRESENT: + assert registry.lookup('html5lib', 'html') == HTML5TreeBuilder + + def test_lookup_by_markup_type(self): + if LXML_PRESENT: + assert registry.lookup('html') == LXMLTreeBuilder + assert registry.lookup('xml') == LXMLTreeBuilderForXML + else: + assert registry.lookup('xml') == None + if HTML5LIB_PRESENT: + assert registry.lookup('html') == HTML5TreeBuilder + else: + assert registry.lookup('html') == HTMLParserTreeBuilder + + def test_named_library(self): + if LXML_PRESENT: + assert registry.lookup('lxml', 'xml') == LXMLTreeBuilderForXML + assert registry.lookup('lxml', 'html') == LXMLTreeBuilder + if HTML5LIB_PRESENT: + assert registry.lookup('html5lib') == HTML5TreeBuilder + + assert registry.lookup('html.parser') == HTMLParserTreeBuilder + + def test_beautifulsoup_constructor_does_lookup(self): + + with warnings.catch_warnings(record=True) as w: + # This will create a warning about not explicitly + # specifying a parser, but we'll ignore it. + + # You can pass in a string. + BeautifulSoup("", features="html") + # Or a list of strings. + BeautifulSoup("", features=["html", "fast"]) + pass + + # You'll get an exception if BS can't find an appropriate + # builder. + with pytest.raises(ValueError): + BeautifulSoup("", features="no-such-feature") + +class TestRegistry(object): + """Test the TreeBuilderRegistry class in general.""" + + def setup_method(self): + self.registry = TreeBuilderRegistry() + + def builder_for_features(self, *feature_list): + cls = type('Builder_' + '_'.join(feature_list), + (object,), {'features' : feature_list}) + + self.registry.register(cls) + return cls + + def test_register_with_no_features(self): + builder = self.builder_for_features() + + # Since the builder advertises no features, you can't find it + # by looking up features. + assert self.registry.lookup('foo') is None + + # But you can find it by doing a lookup with no features, if + # this happens to be the only registered builder. + assert self.registry.lookup() == builder + + def test_register_with_features_makes_lookup_succeed(self): + builder = self.builder_for_features('foo', 'bar') + assert self.registry.lookup('foo') is builder + assert self.registry.lookup('bar') is builder + + def test_lookup_fails_when_no_builder_implements_feature(self): + builder = self.builder_for_features('foo', 'bar') + assert self.registry.lookup('baz') is None + + def test_lookup_gets_most_recent_registration_when_no_feature_specified(self): + builder1 = self.builder_for_features('foo') + builder2 = self.builder_for_features('bar') + assert self.registry.lookup() == builder2 + + def test_lookup_fails_when_no_tree_builders_registered(self): + assert self.registry.lookup() is None + + def test_lookup_gets_most_recent_builder_supporting_all_features(self): + has_one = self.builder_for_features('foo') + has_the_other = self.builder_for_features('bar') + has_both_early = self.builder_for_features('foo', 'bar', 'baz') + has_both_late = self.builder_for_features('foo', 'bar', 'quux') + lacks_one = self.builder_for_features('bar') + has_the_other = self.builder_for_features('foo') + + # There are two builders featuring 'foo' and 'bar', but + # the one that also features 'quux' was registered later. + assert self.registry.lookup('foo', 'bar') == has_both_late + + # There is only one builder featuring 'foo', 'bar', and 'baz'. + assert self.registry.lookup('foo', 'bar', 'baz') == has_both_early + + def test_lookup_fails_when_cannot_reconcile_requested_features(self): + builder1 = self.builder_for_features('foo', 'bar') + builder2 = self.builder_for_features('foo', 'baz') + assert self.registry.lookup('bar', 'baz') is None diff --git a/venv/Lib/site-packages/bs4/tests/test_css.py b/venv/Lib/site-packages/bs4/tests/test_css.py new file mode 100644 index 0000000..359dbcd --- /dev/null +++ b/venv/Lib/site-packages/bs4/tests/test_css.py @@ -0,0 +1,487 @@ +import pytest +import types +from unittest.mock import MagicMock + +from bs4 import ( + CSS, + BeautifulSoup, + ResultSet, +) + +from . import ( + SoupTest, + SOUP_SIEVE_PRESENT, +) + +if SOUP_SIEVE_PRESENT: + from soupsieve import SelectorSyntaxError + + +@pytest.mark.skipif(not SOUP_SIEVE_PRESENT, reason="Soup Sieve not installed") +class TestCSSSelectors(SoupTest): + """Test basic CSS selector functionality. + + This functionality is implemented in soupsieve, which has a much + more comprehensive test suite, so this is basically an extra check + that soupsieve works as expected. + """ + + HTML = """ + + + +The title + + + +Hello there. +
+
+

An H1

+

Some text

+

Some more text

+

An H2

+

Another

+Bob +

Another H2

+me + +span1a1 +span1a2 test + +span2a1 + + + +
+ +
+ + + + + + + + +

English

+

English UK

+

English US

+

French

+
+ + +""" + + def setup_method(self): + self.soup = BeautifulSoup(self.HTML, 'html.parser') + + def assert_selects(self, selector, expected_ids, **kwargs): + results = self.soup.select(selector, **kwargs) + assert isinstance(results, ResultSet) + el_ids = [el['id'] for el in results] + el_ids.sort() + expected_ids.sort() + assert expected_ids == el_ids, "Selector %s, expected [%s], got [%s]" % ( + selector, ', '.join(expected_ids), ', '.join(el_ids) + ) + + assertSelect = assert_selects + + def assert_select_multiple(self, *tests): + for selector, expected_ids in tests: + self.assert_selects(selector, expected_ids) + + def test_precompiled(self): + sel = self.soup.css.compile('div') + + els = self.soup.select(sel) + assert len(els) == 4 + for div in els: + assert div.name == 'div' + + el = self.soup.select_one(sel) + assert 'main' == el['id'] + + def test_one_tag_one(self): + els = self.soup.select('title') + assert len(els) == 1 + assert els[0].name == 'title' + assert els[0].contents == ['The title'] + + def test_one_tag_many(self): + els = self.soup.select('div') + assert len(els) == 4 + for div in els: + assert div.name == 'div' + + el = self.soup.select_one('div') + assert 'main' == el['id'] + + def test_select_one_returns_none_if_no_match(self): + match = self.soup.select_one('nonexistenttag') + assert None == match + + + def test_tag_in_tag_one(self): + els = self.soup.select('div div') + self.assert_selects('div div', ['inner', 'data1']) + + def test_tag_in_tag_many(self): + for selector in ('html div', 'html body div', 'body div'): + self.assert_selects(selector, ['data1', 'main', 'inner', 'footer']) + + + def test_limit(self): + self.assert_selects('html div', ['main'], limit=1) + self.assert_selects('html body div', ['inner', 'main'], limit=2) + self.assert_selects('body div', ['data1', 'main', 'inner', 'footer'], + limit=10) + + def test_tag_no_match(self): + assert len(self.soup.select('del')) == 0 + + def test_invalid_tag(self): + with pytest.raises(SelectorSyntaxError): + self.soup.select('tag%t') + + def test_select_dashed_tag_ids(self): + self.assert_selects('custom-dashed-tag', ['dash1', 'dash2']) + + def test_select_dashed_by_id(self): + dashed = self.soup.select('custom-dashed-tag[id=\"dash2\"]') + assert dashed[0].name == 'custom-dashed-tag' + assert dashed[0]['id'] == 'dash2' + + def test_dashed_tag_text(self): + assert self.soup.select('body > custom-dashed-tag')[0].text == 'Hello there.' + + def test_select_dashed_matches_find_all(self): + assert self.soup.select('custom-dashed-tag') == self.soup.find_all('custom-dashed-tag') + + def test_header_tags(self): + self.assert_select_multiple( + ('h1', ['header1']), + ('h2', ['header2', 'header3']), + ) + + def test_class_one(self): + for selector in ('.onep', 'p.onep', 'html p.onep'): + els = self.soup.select(selector) + assert len(els) == 1 + assert els[0].name == 'p' + assert els[0]['class'] == ['onep'] + + def test_class_mismatched_tag(self): + els = self.soup.select('div.onep') + assert len(els) == 0 + + def test_one_id(self): + for selector in ('div#inner', '#inner', 'div div#inner'): + self.assert_selects(selector, ['inner']) + + def test_bad_id(self): + els = self.soup.select('#doesnotexist') + assert len(els) == 0 + + def test_items_in_id(self): + els = self.soup.select('div#inner p') + assert len(els) == 3 + for el in els: + assert el.name == 'p' + assert els[1]['class'] == ['onep'] + assert not els[0].has_attr('class') + + def test_a_bunch_of_emptys(self): + for selector in ('div#main del', 'div#main div.oops', 'div div#main'): + assert len(self.soup.select(selector)) == 0 + + def test_multi_class_support(self): + for selector in ('.class1', 'p.class1', '.class2', 'p.class2', + '.class3', 'p.class3', 'html p.class2', 'div#inner .class2'): + self.assert_selects(selector, ['pmulti']) + + def test_multi_class_selection(self): + for selector in ('.class1.class3', '.class3.class2', + '.class1.class2.class3'): + self.assert_selects(selector, ['pmulti']) + + def test_child_selector(self): + self.assert_selects('.s1 > a', ['s1a1', 's1a2']) + self.assert_selects('.s1 > a span', ['s1a2s1']) + + def test_child_selector_id(self): + self.assert_selects('.s1 > a#s1a2 span', ['s1a2s1']) + + def test_attribute_equals(self): + self.assert_select_multiple( + ('p[class="onep"]', ['p1']), + ('p[id="p1"]', ['p1']), + ('[class="onep"]', ['p1']), + ('[id="p1"]', ['p1']), + ('link[rel="stylesheet"]', ['l1']), + ('link[type="text/css"]', ['l1']), + ('link[href="blah.css"]', ['l1']), + ('link[href="no-blah.css"]', []), + ('[rel="stylesheet"]', ['l1']), + ('[type="text/css"]', ['l1']), + ('[href="blah.css"]', ['l1']), + ('[href="no-blah.css"]', []), + ('p[href="no-blah.css"]', []), + ('[href="no-blah.css"]', []), + ) + + def test_attribute_tilde(self): + self.assert_select_multiple( + ('p[class~="class1"]', ['pmulti']), + ('p[class~="class2"]', ['pmulti']), + ('p[class~="class3"]', ['pmulti']), + ('[class~="class1"]', ['pmulti']), + ('[class~="class2"]', ['pmulti']), + ('[class~="class3"]', ['pmulti']), + ('a[rel~="friend"]', ['bob']), + ('a[rel~="met"]', ['bob']), + ('[rel~="friend"]', ['bob']), + ('[rel~="met"]', ['bob']), + ) + + def test_attribute_startswith(self): + self.assert_select_multiple( + ('[rel^="style"]', ['l1']), + ('link[rel^="style"]', ['l1']), + ('notlink[rel^="notstyle"]', []), + ('[rel^="notstyle"]', []), + ('link[rel^="notstyle"]', []), + ('link[href^="bla"]', ['l1']), + ('a[href^="http://"]', ['bob', 'me']), + ('[href^="http://"]', ['bob', 'me']), + ('[id^="p"]', ['pmulti', 'p1']), + ('[id^="m"]', ['me', 'main']), + ('div[id^="m"]', ['main']), + ('a[id^="m"]', ['me']), + ('div[data-tag^="dashed"]', ['data1']) + ) + + def test_attribute_endswith(self): + self.assert_select_multiple( + ('[href$=".css"]', ['l1']), + ('link[href$=".css"]', ['l1']), + ('link[id$="1"]', ['l1']), + ('[id$="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's2a1', 's1a2s1', 'dash1']), + ('div[id$="1"]', ['data1']), + ('[id$="noending"]', []), + ) + + def test_attribute_contains(self): + self.assert_select_multiple( + # From test_attribute_startswith + ('[rel*="style"]', ['l1']), + ('link[rel*="style"]', ['l1']), + ('notlink[rel*="notstyle"]', []), + ('[rel*="notstyle"]', []), + ('link[rel*="notstyle"]', []), + ('link[href*="bla"]', ['l1']), + ('[href*="http://"]', ['bob', 'me']), + ('[id*="p"]', ['pmulti', 'p1']), + ('div[id*="m"]', ['main']), + ('a[id*="m"]', ['me']), + # From test_attribute_endswith + ('[href*=".css"]', ['l1']), + ('link[href*=".css"]', ['l1']), + ('link[id*="1"]', ['l1']), + ('[id*="1"]', ['data1', 'l1', 'p1', 'header1', 's1a1', 's1a2', 's2a1', 's1a2s1', 'dash1']), + ('div[id*="1"]', ['data1']), + ('[id*="noending"]', []), + # New for this test + ('[href*="."]', ['bob', 'me', 'l1']), + ('a[href*="."]', ['bob', 'me']), + ('link[href*="."]', ['l1']), + ('div[id*="n"]', ['main', 'inner']), + ('div[id*="nn"]', ['inner']), + ('div[data-tag*="edval"]', ['data1']) + ) + + def test_attribute_exact_or_hypen(self): + self.assert_select_multiple( + ('p[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), + ('[lang|="en"]', ['lang-en', 'lang-en-gb', 'lang-en-us']), + ('p[lang|="fr"]', ['lang-fr']), + ('p[lang|="gb"]', []), + ) + + def test_attribute_exists(self): + self.assert_select_multiple( + ('[rel]', ['l1', 'bob', 'me']), + ('link[rel]', ['l1']), + ('a[rel]', ['bob', 'me']), + ('[lang]', ['lang-en', 'lang-en-gb', 'lang-en-us', 'lang-fr']), + ('p[class]', ['p1', 'pmulti']), + ('[blah]', []), + ('p[blah]', []), + ('div[data-tag]', ['data1']) + ) + + def test_quoted_space_in_selector_name(self): + html = """
nope
+
yes
+ """ + soup = BeautifulSoup(html, 'html.parser') + [chosen] = soup.select('div[style="display: right"]') + assert "yes" == chosen.string + + def test_unsupported_pseudoclass(self): + with pytest.raises(NotImplementedError): + self.soup.select("a:no-such-pseudoclass") + + with pytest.raises(SelectorSyntaxError): + self.soup.select("a:nth-of-type(a)") + + def test_nth_of_type(self): + # Try to select first paragraph + els = self.soup.select('div#inner p:nth-of-type(1)') + assert len(els) == 1 + assert els[0].string == 'Some text' + + # Try to select third paragraph + els = self.soup.select('div#inner p:nth-of-type(3)') + assert len(els) == 1 + assert els[0].string == 'Another' + + # Try to select (non-existent!) fourth paragraph + els = self.soup.select('div#inner p:nth-of-type(4)') + assert len(els) == 0 + + # Zero will select no tags. + els = self.soup.select('div p:nth-of-type(0)') + assert len(els) == 0 + + def test_nth_of_type_direct_descendant(self): + els = self.soup.select('div#inner > p:nth-of-type(1)') + assert len(els) == 1 + assert els[0].string == 'Some text' + + def test_id_child_selector_nth_of_type(self): + self.assert_selects('#inner > p:nth-of-type(2)', ['p1']) + + def test_select_on_element(self): + # Other tests operate on the tree; this operates on an element + # within the tree. + inner = self.soup.find("div", id="main") + selected = inner.select("div") + # The
tag was selected. The