diff --git a/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_subtract.py b/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_subtract.py index 80087fed0f3c..087e5140def4 100644 --- a/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_subtract.py +++ b/pythonFiles/tests/pytestadapter/.data/unittest_folder/test_subtract.py @@ -22,4 +22,5 @@ def test_subtract_negative_numbers( # test_marker--test_subtract_negative_numbe self, ): result = subtract(-2, -3) - self.assertEqual(result, 1) + # This is intentional to test assertion failures + self.assertEqual(result, 100000) diff --git a/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py b/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py index e1422a81c979..8e96d109ba78 100644 --- a/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py +++ b/pythonFiles/tests/pytestadapter/expected_discovery_test_output.py @@ -3,6 +3,8 @@ from .helpers import TEST_DATA_PATH, find_test_line_number +# This file contains the expected output dictionaries for tests discovery and is used in test_discovery.py. + # This is the expected output for the empty_discovery.py file. # └── TEST_DATA_PATH_STR = os.fspath(TEST_DATA_PATH) diff --git a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py new file mode 100644 index 000000000000..a894403c7d71 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py @@ -0,0 +1,328 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. + +TEST_SUBTRACT_FUNCTION = "unittest_folder/test_subtract.py::TestSubtractFunction::" +TEST_ADD_FUNCTION = "unittest_folder/test_add.py::TestAddFunction::" +SUCCESS = "success" +FAILURE = "failure" +TEST_SUBTRACT_FUNCTION_NEGATIVE_NUMBERS_ERROR = "self = \n\n def test_subtract_negative_numbers( # test_marker--test_subtract_negative_numbers\n self,\n ):\n result = subtract(-2, -3)\n> self.assertEqual(result, 100000)\nE AssertionError: 1 != 100000\n\nunittest_folder/test_subtract.py:25: AssertionError" + +# This is the expected output for the unittest_folder execute tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers: success +# │ └── test_add_positive_numbers: success +# └── test_subtract.py +# └── TestSubtractFunction +# ├── test_subtract_negative_numbers: failure +# └── test_subtract_positive_numbers: success +uf_execution_expected_output = { + f"{TEST_ADD_FUNCTION}test_add_negative_numbers": { + "test": f"{TEST_ADD_FUNCTION}test_add_negative_numbers", + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + f"{TEST_ADD_FUNCTION}test_add_positive_numbers": { + "test": f"{TEST_ADD_FUNCTION}test_add_positive_numbers", + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + f"{TEST_SUBTRACT_FUNCTION}test_subtract_negative_numbers": { + "test": f"{TEST_SUBTRACT_FUNCTION}test_subtract_negative_numbers", + "outcome": FAILURE, + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers": { + "test": f"{TEST_SUBTRACT_FUNCTION}test_subtract_positive_numbers", + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + + +# This is the expected output for the unittest_folder only execute add.py tests +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ ├── test_add_negative_numbers: success +# │ └── test_add_positive_numbers: success +uf_single_file_expected_output = { + f"{TEST_ADD_FUNCTION}test_add_negative_numbers": { + "test": f"{TEST_ADD_FUNCTION}test_add_negative_numbers", + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + f"{TEST_ADD_FUNCTION}test_add_positive_numbers": { + "test": f"{TEST_ADD_FUNCTION}test_add_positive_numbers", + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the unittest_folder execute only signle method +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ └── test_add_positive_numbers: success +uf_single_method_execution_expected_output = { + f"{TEST_ADD_FUNCTION}test_add_positive_numbers": { + "test": f"{TEST_ADD_FUNCTION}test_add_positive_numbers", + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + } +} + +# This is the expected output for the unittest_folder tests run where two tests +# run are in different files. +# └── unittest_folder +# ├── test_add.py +# │ └── TestAddFunction +# │ └── test_add_positive_numbers: success +# └── test_subtract.py +# └── TestSubtractFunction +# └── test_subtract_positive_numbers: success +uf_non_adjacent_tests_execution_expected_output = { + TEST_SUBTRACT_FUNCTION + + "test_subtract_positive_numbers": { + "test": TEST_SUBTRACT_FUNCTION + "test_subtract_positive_numbers", + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, + TEST_ADD_FUNCTION + + "test_add_positive_numbers": { + "test": TEST_ADD_FUNCTION + "test_add_positive_numbers", + "outcome": SUCCESS, + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the simple_pytest.py file. +# └── simple_pytest.py +# └── test_function: success +simple_execution_pytest_expected_output = { + "simple_pytest.py::test_function": { + "test": "simple_pytest.py::test_function", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + +# This is the expected output for the unittest_pytest_same_file.py file. +# ├── unittest_pytest_same_file.py +# ├── TestExample +# │ └── test_true_unittest: success +# └── test_true_pytest: success +unit_pytest_same_file_execution_expected_output = { + "unittest_pytest_same_file.py::TestExample::test_true_unittest": { + "test": "unittest_pytest_same_file.py::TestExample::test_true_unittest", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "unittest_pytest_same_file.py::test_true_pytest": { + "test": "unittest_pytest_same_file.py::test_true_pytest", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the dual_level_nested_folder.py tests +# └── dual_level_nested_folder +# └── test_top_folder.py +# └── test_top_function_t: success +# └── test_top_function_f: failure +# └── nested_folder_one +# └── test_bottom_folder.py +# └── test_bottom_function_t: success +# └── test_bottom_function_f: failure +dual_level_nested_folder_execution_expected_output = { + "dual_level_nested_folder/test_top_folder.py::test_top_function_t": { + "test": "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "dual_level_nested_folder/test_top_folder.py::test_top_function_f": { + "test": "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t": { + "test": "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f": { + "test": "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the nested_folder tests. +# └── nested_folder_one +# └── nested_folder_two +# └── test_nest.py +# └── test_function: success +double_nested_folder_expected_execution_output = { + "double_nested_folder/nested_folder_one/nested_folder_two/test_nest.py::test_function": { + "test": "double_nested_folder/nested_folder_one/nested_folder_two/test_nest.py::test_function", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + +# This is the expected output for the nested_folder tests. +# └── parametrize_tests.py +# └── test_adding[3+5-8]: success +# └── test_adding[2+4-6]: success +# └── test_adding[6+9-16]: failure +parametrize_tests_expected_execution_output = { + "parametrize_tests.py::test_adding[3+5-8]": { + "test": "parametrize_tests.py::test_adding[3+5-8]", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "parametrize_tests.py::test_adding[2+4-6]": { + "test": "parametrize_tests.py::test_adding[2+4-6]", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "parametrize_tests.py::test_adding[6+9-16]": { + "test": "parametrize_tests.py::test_adding[6+9-16]", + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the single parameterized tests. +# └── parametrize_tests.py +# └── test_adding[3+5-8]: success +single_parametrize_tests_expected_execution_output = { + "parametrize_tests.py::test_adding[3+5-8]": { + "test": "parametrize_tests.py::test_adding[3+5-8]", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} + +# This is the expected output for the single parameterized tests. +# └── text_docstring.txt +# └── text_docstring: success +doctest_pytest_expected_execution_output = { + "text_docstring.txt::text_docstring.txt": { + "test": "text_docstring.txt::text_docstring.txt", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + } +} + +# Will run all tests in the cwd that fit the test file naming pattern. +no_test_ids_pytest_execution_expected_output = { + "double_nested_folder/nested_folder_one/nested_folder_two/test_nest.py::test_function": { + "test": "double_nested_folder/nested_folder_one/nested_folder_two/test_nest.py::test_function", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "dual_level_nested_folder/test_top_folder.py::test_top_function_t": { + "test": "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "dual_level_nested_folder/test_top_folder.py::test_top_function_f": { + "test": "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t": { + "test": "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f": { + "test": "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers": { + "test": "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers": { + "test": "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers": { + "test": "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers": { + "test": "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} diff --git a/pythonFiles/tests/pytestadapter/helpers.py b/pythonFiles/tests/pytestadapter/helpers.py index 8d485456c145..b078439f6eac 100644 --- a/pythonFiles/tests/pytestadapter/helpers.py +++ b/pythonFiles/tests/pytestadapter/helpers.py @@ -11,7 +11,7 @@ import subprocess import sys import uuid -from typing import Dict, List, Union +from typing import Any, Dict, List, Optional, Union TEST_DATA_PATH = pathlib.Path(__file__).parent / ".data" from typing_extensions import TypedDict @@ -83,7 +83,7 @@ def _new_sock() -> socket.socket: ) -def process_rpc_json(data: str) -> Dict[str, str]: +def process_rpc_json(data: str) -> Dict[str, Any]: """Process the JSON data which comes from the server which runs the pytest discovery.""" str_stream: io.StringIO = io.StringIO(data) @@ -107,7 +107,7 @@ def process_rpc_json(data: str) -> Dict[str, str]: return json.loads(raw_json) -def runner(args: List[str]) -> Union[Dict[str, str], None]: +def runner(args: List[str]) -> Optional[Dict[str, Any]]: """Run the pytest discovery and return the JSON data from the server.""" process_args: List[str] = [ sys.executable, diff --git a/pythonFiles/tests/pytestadapter/test_discovery.py b/pythonFiles/tests/pytestadapter/test_discovery.py index 57fa9d624bd6..bb6e7255704e 100644 --- a/pythonFiles/tests/pytestadapter/test_discovery.py +++ b/pythonFiles/tests/pytestadapter/test_discovery.py @@ -2,7 +2,6 @@ # Licensed under the MIT License. import os import shutil -import signal import pytest @@ -31,10 +30,10 @@ def test_syntax_error(tmp_path): shutil.copyfile(file_path, p) actual = runner(["--collect-only", os.fspath(p)]) assert actual - assert all(item in actual for item in ("status", "cwd", "errors")) + assert all(item in actual for item in ("status", "cwd", "error")) assert actual["status"] == "error" assert actual["cwd"] == os.fspath(TEST_DATA_PATH) - assert len(actual["errors"]) == 2 + assert len(actual["error"]) == 2 def test_parameterized_error_collect(): @@ -45,10 +44,10 @@ def test_parameterized_error_collect(): file_path_str = "error_parametrize_discovery.py" actual = runner(["--collect-only", file_path_str]) assert actual - assert all(item in actual for item in ("status", "cwd", "errors")) + assert all(item in actual for item in ("status", "cwd", "error")) assert actual["status"] == "error" assert actual["cwd"] == os.fspath(TEST_DATA_PATH) - assert len(actual["errors"]) == 2 + assert len(actual["error"]) == 2 @pytest.mark.parametrize( diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py new file mode 100644 index 000000000000..8613deb96098 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -0,0 +1,165 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import os +import shutil + +import pytest +from tests.pytestadapter import expected_execution_test_output + +from .helpers import TEST_DATA_PATH, runner + + +def test_syntax_error_execution(tmp_path): + """Test pytest execution on a file that has a syntax error. + + Copies the contents of a .txt file to a .py file in the temporary directory + to then run pytest exeuction on. + + The json should still be returned but the errors list should be present. + + Keyword arguments: + tmp_path -- pytest fixture that creates a temporary directory. + """ + # Saving some files as .txt to avoid that file displaying a syntax error for + # the extension as a whole. Instead, rename it before running this test + # in order to test the error handling. + file_path = TEST_DATA_PATH / "error_syntax_discovery.txt" + temp_dir = tmp_path / "temp_data" + temp_dir.mkdir() + p = temp_dir / "error_syntax_discovery.py" + shutil.copyfile(file_path, p) + actual = runner(["error_syntax_discover.py::test_function"]) + assert actual + assert all(item in actual for item in ("status", "cwd", "error")) + assert actual["status"] == "error" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert len(actual["error"]) == 1 + + +def test_bad_id_error_execution(): + """Test pytest discovery with a non-existent test_id. + + The json should still be returned but the errors list should be present. + """ + actual = runner(["not/a/real::test_id"]) + assert actual + assert all(item in actual for item in ("status", "cwd", "error")) + assert actual["status"] == "error" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + assert len(actual["error"]) == 1 + + +@pytest.mark.parametrize( + "test_ids, expected_const", + [ + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_negative_numbers", + ], + expected_execution_test_output.uf_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_add.py::TestAddFunction::test_add_negative_numbers", + ], + expected_execution_test_output.uf_single_file_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + ], + expected_execution_test_output.uf_single_method_execution_expected_output, + ), + ( + [ + "unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers", + "unittest_folder/test_subtract.py::TestSubtractFunction::test_subtract_positive_numbers", + ], + expected_execution_test_output.uf_non_adjacent_tests_execution_expected_output, + ), + ( + [ + "unittest_pytest_same_file.py::TestExample::test_true_unittest", + "unittest_pytest_same_file.py::test_true_pytest", + ], + expected_execution_test_output.unit_pytest_same_file_execution_expected_output, + ), + ( + [ + "dual_level_nested_folder/test_top_folder.py::test_top_function_t", + "dual_level_nested_folder/test_top_folder.py::test_top_function_f", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_t", + "dual_level_nested_folder/nested_folder_one/test_bottom_folder.py::test_bottom_function_f", + ], + expected_execution_test_output.dual_level_nested_folder_execution_expected_output, + ), + ( + [ + "double_nested_folder/nested_folder_one/nested_folder_two/test_nest.py::test_function" + ], + expected_execution_test_output.double_nested_folder_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + "parametrize_tests.py::test_adding[2+4-6]", + "parametrize_tests.py::test_adding[6+9-16]", + ], + expected_execution_test_output.parametrize_tests_expected_execution_output, + ), + ( + [ + "parametrize_tests.py::test_adding[3+5-8]", + ], + expected_execution_test_output.single_parametrize_tests_expected_execution_output, + ), + ( + [ + "text_docstring.txt::text_docstring.txt", + ], + expected_execution_test_output.doctest_pytest_expected_execution_output, + ), + ( + [ + "", + ], + expected_execution_test_output.no_test_ids_pytest_execution_expected_output, + ), + ], +) +def test_pytest_execution(test_ids, expected_const): + """ + Test that pytest discovery works as expected where run pytest is always successful + but the actual test results are both successes and failures.: + 1. uf_execution_expected_output: unittest tests run on multiple files. + 2. uf_single_file_expected_output: test run on a single file. + 3. uf_single_method_execution_expected_output: test run on a single method in a file. + 4. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer. + 5. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests. + 6. dual_level_nested_folder_execution_expected_output: test run on a file with one test file at the top level and one test file in a nested folder. + 7. double_nested_folder_expected_execution_output: test run on a double nested folder. + 8. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs. + 9. single_parametrize_tests_expected_execution_output: test run on single parametrize test. + 10. doctest_pytest_expected_execution_output: test run on doctest file. + 11. no_test_ids_pytest_execution_expected_output: test run with no inputted test ids. + + + Keyword arguments: + test_ids -- an array of test_ids to run. + expected_const -- a dictionary of the expected output from running pytest discovery on the files. + """ + args = test_ids + actual = runner(args) + assert actual + assert all(item in actual for item in ("status", "cwd", "result")) + assert actual["status"] == "success" + assert actual["cwd"] == os.fspath(TEST_DATA_PATH) + result_data = actual["result"] + for key in result_data: + if result_data[key]["outcome"] == "failure": + result_data[key]["message"] = "ERROR MESSAGE" + assert result_data == expected_const diff --git a/pythonFiles/vscode_pytest/__init__.py b/pythonFiles/vscode_pytest/__init__.py index 22acaab57953..6063e4113d55 100644 --- a/pythonFiles/vscode_pytest/__init__.py +++ b/pythonFiles/vscode_pytest/__init__.py @@ -69,7 +69,8 @@ def pytest_exception_interact(node, call, report): """ # call.excinfo is the captured exception of the call, if it raised as type ExceptionInfo. # call.excinfo.exconly() returns the exception as a string. - ERRORS.append(call.excinfo.exconly()) + if call.excinfo and call.excinfo.typename != "AssertionError": + ERRORS.append(call.excinfo.exconly()) def pytest_keyboard_interrupt(excinfo): @@ -82,34 +83,138 @@ def pytest_keyboard_interrupt(excinfo): ERRORS.append(excinfo.exconly()) +class TestOutcome(Dict): + """A class that handles outcome for a single test. + + for pytest the outcome for a test is only 'passed', 'skipped' or 'failed' + """ + + test: str + outcome: Literal["success", "failure", "skipped"] + message: Union[str, None] + traceback: Union[str, None] + subtest: Optional[str] + + +def create_test_outcome( + test: str, + outcome: str, + message: Union[str, None], + traceback: Union[str, None], + subtype: Optional[str] = None, +) -> TestOutcome: + """A function that creates a TestOutcome object.""" + return TestOutcome( + test=test, + outcome=outcome, + message=message, + traceback=traceback, # TODO: traceback + subtest=None, + ) + + +class testRunResultDict(Dict[str, Dict[str, TestOutcome]]): + """A class that stores all test run results.""" + + outcome: str + tests: Dict[str, TestOutcome] + + +collected_tests = testRunResultDict() +IS_DISCOVERY = False + + +def pytest_load_initial_conftests(early_config, parser, args): + if "--collect-only" in args: + global IS_DISCOVERY + IS_DISCOVERY = True + + +def pytest_report_teststatus(report, config): + """ + A pytest hook that is called when a test is called. It is called 3 times per test, + during setup, call, and teardown. + Keyword arguments: + report -- the report on the test setup, call, and teardown. + config -- configuration object. + """ + + if report.when == "call": + traceback = None + message = None + report_value = "skipped" + if report.passed: + report_value = "success" + elif report.failed: + report_value = "failure" + message = report.longreprtext + item_result = create_test_outcome( + report.nodeid, + report_value, + message, + traceback, + ) + collected_tests[report.nodeid] = item_result + + +ERROR_MESSAGE_CONST = { + 2: "Pytest was unable to start or run any tests due to issues with test discovery or test collection.", + 3: "Pytest was interrupted by the user, for example by pressing Ctrl+C during test execution.", + 4: "Pytest encountered an internal error or exception during test execution.", + 5: "Pytest was unable to find any tests to run.", +} + + def pytest_sessionfinish(session, exitstatus): """A pytest hook that is called after pytest has fulled finished. Keyword arguments: session -- the pytest session object. exitstatus -- the status code of the session. + + 0: All tests passed successfully. + 1: One or more tests failed. + 2: Pytest was unable to start or run any tests due to issues with test discovery or test collection. + 3: Pytest was interrupted by the user, for example by pressing Ctrl+C during test execution. + 4: Pytest encountered an internal error or exception during test execution. + 5: Pytest was unable to find any tests to run. """ cwd = pathlib.Path.cwd() - try: - session_node: Union[TestNode, None] = build_test_tree(session) - if not session_node: - raise VSCodePytestError( - "Something went wrong following pytest finish, \ - no session node was created" + if IS_DISCOVERY: + try: + session_node: Union[TestNode, None] = build_test_tree(session) + if not session_node: + raise VSCodePytestError( + "Something went wrong following pytest finish, \ + no session node was created" + ) + post_response(os.fsdecode(cwd), session_node) + except Exception as e: + ERRORS.append( + f"Error Occurred, traceback: {(traceback.format_exc() if e.__traceback__ else '')}" ) - post_response(os.fsdecode(cwd), session_node) - except Exception as e: - ERRORS.append( - f"Error Occurred, traceback: {(traceback.format_exc() if e.__traceback__ else '')}" + errorNode: TestNode = { + "name": "", + "path": "", + "type_": "error", + "children": [], + "id_": "", + } + post_response(os.fsdecode(cwd), errorNode) + else: + if exitstatus == 0 or exitstatus == 1: + exitstatus_bool = "success" + else: + ERRORS.append( + f"Pytest exited with error status: {exitstatus}, {ERROR_MESSAGE_CONST[exitstatus]}" + ) + exitstatus_bool = "error" + + execution_post( + os.fsdecode(cwd), + exitstatus_bool, + collected_tests if collected_tests else None, ) - errorNode: TestNode = { - "name": "", - "path": "", - "type_": "error", - "children": [], - "id_": "", - } - post_response(os.fsdecode(cwd), errorNode) def build_test_tree(session: pytest.Session) -> TestNode: @@ -284,13 +389,67 @@ def create_folder_node(folderName: str, path_iterator: pathlib.Path) -> TestNode } -class PayloadDict(TypedDict): +class DiscoveryPayloadDict(TypedDict): """A dictionary that is used to send a post request to the server.""" cwd: str status: Literal["success", "error"] tests: Optional[TestNode] - errors: Optional[List[str]] + error: Optional[List[str]] + + +class ExecutionPayloadDict(Dict): + """ + A dictionary that is used to send a execution post request to the server. + """ + + cwd: str + status: Literal["success", "error"] + result: Union[testRunResultDict, None] + not_found: Union[List[str], None] # Currently unused need to check + error: Union[str, None] # Currently unused need to check + + +def execution_post( + cwd: str, + status: Literal["success", "error"], + tests: Union[testRunResultDict, None], +): + """ + Sends a post request to the server after the tests have been executed. + Keyword arguments: + cwd -- the current working directory. + session_node -- the status of running the tests + tests -- the tests that were run and their status. + """ + testPort = os.getenv("TEST_PORT", 45454) + testuuid = os.getenv("TEST_UUID") + payload: ExecutionPayloadDict = ExecutionPayloadDict( + cwd=cwd, status=status, result=tests, not_found=None, error=None + ) + if ERRORS: + payload["error"] = ERRORS + + addr = ("localhost", int(testPort)) + data = json.dumps(payload) + request = f"""Content-Length: {len(data)} +Content-Type: application/json +Request-uuid: {testuuid} + +{data}""" + test_output_file: Optional[str] = os.getenv("TEST_OUTPUT_FILE", None) + if test_output_file == "stdout": + print(request) + elif test_output_file: + pathlib.Path(test_output_file).write_text(request, encoding="utf-8") + else: + try: + with socket_manager.SocketManager(addr) as s: + if s.socket is not None: + s.socket.sendall(request.encode("utf-8")) + except Exception as e: + print(f"Plugin error connection error[vscode-pytest]: {e}") + print(f"[vscode-pytest] data: {request}") def post_response(cwd: str, session_node: TestNode) -> None: @@ -301,15 +460,14 @@ def post_response(cwd: str, session_node: TestNode) -> None: session_node -- the session node, which is the top of the testing tree. errors -- a list of errors that occurred during test collection. """ - payload: PayloadDict = { + payload: DiscoveryPayloadDict = { "cwd": cwd, "status": "success" if not ERRORS else "error", "tests": session_node, - "errors": [], + "error": [], } - if ERRORS: - payload["errors"] = ERRORS - + if ERRORS is not None: + payload["error"] = ERRORS testPort: Union[str, int] = os.getenv("TEST_PORT", 45454) testuuid: Union[str, None] = os.getenv("TEST_UUID") addr = "localhost", int(testPort)