Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

switch testing output to test result panel #22039

Merged
merged 26 commits into from
Oct 11, 2023
Merged
Show file tree
Hide file tree
Changes from 9 commits
Commits
Show all changes
26 commits
Select commit Hold shift + click to select a range
f5230da
switch testing output to test result panel
eleanorjboyd Sep 20, 2023
1aebbb7
remove sleep
eleanorjboyd Sep 21, 2023
d2ffd0d
fix existing tests
eleanorjboyd Sep 25, 2023
48b68a5
add output channel msg
eleanorjboyd Sep 25, 2023
83934c7
add test python side
eleanorjboyd Sep 25, 2023
93196a3
fix new line
eleanorjboyd Sep 25, 2023
8d94714
Merge branch 'main' into switch-output-location
eleanorjboyd Sep 25, 2023
3fc1f20
update with comment
eleanorjboyd Sep 25, 2023
7455d87
remove unneeded import
eleanorjboyd Sep 25, 2023
4c891f1
edits from feedback and collect discovery
eleanorjboyd Sep 25, 2023
5d8e61f
remove color addition
eleanorjboyd Sep 25, 2023
506c320
Merge branch 'main' into switch-output-location
eleanorjboyd Oct 9, 2023
9f270d6
switch testing output to test result panel
eleanorjboyd Sep 20, 2023
09e3868
fix existing tests
eleanorjboyd Sep 25, 2023
9f4598a
add output channel msg
eleanorjboyd Sep 25, 2023
3405f27
fix new line
eleanorjboyd Sep 25, 2023
b2ab4f7
edits from feedback and collect discovery
eleanorjboyd Sep 25, 2023
9a67e66
remove color addition
eleanorjboyd Sep 25, 2023
6a1f01b
fix syntax issues from merge
eleanorjboyd Oct 9, 2023
9db80b9
fix tests
eleanorjboyd Oct 9, 2023
a7c6c61
fix linting
eleanorjboyd Oct 9, 2023
2d0830a
fix failing tests
eleanorjboyd Oct 9, 2023
6ecaa9a
fix to dynamic print
eleanorjboyd Oct 9, 2023
f9bc755
switch to using constant for deprecation msg
eleanorjboyd Oct 9, 2023
1bfc34b
Update src/client/testing/testController/common/utils.ts
eleanorjboyd Oct 10, 2023
6b8444a
Merge branch 'main' into switch-output-location
eleanorjboyd Oct 10, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 35 additions & 0 deletions pythonFiles/tests/pytestadapter/.data/test_logging.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import logging
import sys


def test_logging2(caplog):
logger = logging.getLogger(__name__)
caplog.set_level(logging.DEBUG) # Set minimum log level to capture

logger.debug("This is a debug message.")
logger.info("This is an info message.")
logger.warning("This is a warning message.")
logger.error("This is an error message.")
logger.critical("This is a critical message.")

# Printing to stdout and stderr
print("This is a stdout message.")
print("This is a stderr message.", file=sys.stderr)
assert False


def test_logging(caplog):
logger = logging.getLogger(__name__)
caplog.set_level(logging.DEBUG) # Set minimum log level to capture

logger.debug("This is a debug message.")
logger.info("This is an info message.")
logger.warning("This is a warning message.")
logger.error("This is an error message.")
logger.critical("This is a critical message.")

# Printing to stdout and stderr
print("This is a stdout message.")
print("This is a stderr message.", file=sys.stderr)
28 changes: 28 additions & 0 deletions pythonFiles/tests/pytestadapter/expected_execution_test_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,3 +596,31 @@
"subtest": None,
}
}


# This is the expected output for the test logging file.
# └── test_logging.py
# └── test_logging2: failure
# └── test_logging: success
test_logging_path = TEST_DATA_PATH / "test_logging.py"

logging_test_expected_execution_output = {
get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): {
"test": get_absolute_test_id(
"test_logging.py::test_logging2", test_logging_path
),
"outcome": "failure",
"message": "ERROR MESSAGE",
"traceback": None,
"subtest": None,
},
get_absolute_test_id("test_logging.py::test_logging", test_logging_path): {
"test": get_absolute_test_id(
"test_logging.py::test_logging", test_logging_path
),
"outcome": "success",
"message": None,
"traceback": None,
"subtest": None,
},
}
1 change: 1 addition & 0 deletions pythonFiles/tests/pytestadapter/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,6 +129,7 @@ def runner_with_cwd(
"pytest",
"-p",
"vscode_pytest",
"-s",
] + args
listener: socket.socket = create_server()
_, port = listener.getsockname()
Expand Down
3 changes: 3 additions & 0 deletions pythonFiles/tests/pytestadapter/test_discovery.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,9 @@
from . import expected_discovery_test_output
from .helpers import TEST_DATA_PATH, runner, runner_with_cwd

# uncomment this line to skip all tests in this module
eleanorjboyd marked this conversation as resolved.
Show resolved Hide resolved
# pytestmark = pytest.mark.skip(reason="Skipping all tests in this module")


def test_import_error(tmp_path):
"""Test pytest discovery on a file that has a pytest marker but does not import pytest.
Expand Down
31 changes: 21 additions & 10 deletions pythonFiles/tests/pytestadapter/test_execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,9 @@

from .helpers import TEST_DATA_PATH, runner, runner_with_cwd

# uncomment this line to skip all tests in this module
eleanorjboyd marked this conversation as resolved.
Show resolved Hide resolved
# pytestmark = pytest.mark.skip(reason="Skipping all tests in this module")


def test_config_file():
"""Test pytest execution when a config file is specified."""
Expand Down Expand Up @@ -215,29 +218,37 @@ def test_bad_id_error_execution():
],
expected_execution_test_output.doctest_pytest_expected_execution_output,
),
(
["test_logging.py::test_logging2", "test_logging.py::test_logging"],
expected_execution_test_output.logging_test_expected_execution_output,
),
],
)
def test_pytest_execution(test_ids, expected_const):
"""
Test that pytest discovery works as expected where run pytest is always successful
but the actual test results are both successes and failures.:
1. uf_execution_expected_output: unittest tests run on multiple files.
2. uf_single_file_expected_output: test run on a single file.
3. uf_single_method_execution_expected_output: test run on a single method in a file.
4. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer.
5. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests.
6. dual_level_nested_folder_execution_expected_output: test run on a file with one test file
1: skip_tests_execution_expected_output: test run on a file with skipped tests.
2. error_raised_exception_execution_expected_output: test run on a file that raises an exception.
3. uf_execution_expected_output: unittest tests run on multiple files.
4. uf_single_file_expected_output: test run on a single file.
5. uf_single_method_execution_expected_output: test run on a single method in a file.
6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer.
7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests.
8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file
at the top level and one test file in a nested folder.
7. double_nested_folder_expected_execution_output: test run on a double nested folder.
8. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs.
9. single_parametrize_tests_expected_execution_output: test run on single parametrize test.
10. doctest_pytest_expected_execution_output: test run on doctest file.
9. double_nested_folder_expected_execution_output: test run on a double nested folder.
10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs.
11. single_parametrize_tests_expected_execution_output: test run on single parametrize test.
12. doctest_pytest_expected_execution_output: test run on doctest file.
13. logging_test_expected_execution_output: test run on a file with logging.


Keyword arguments:
test_ids -- an array of test_ids to run.
expected_const -- a dictionary of the expected output from running pytest discovery on the files.
"""
print("Test IDs: ", test_ids)
args = test_ids
actual = runner(args)
assert actual
Expand Down
2 changes: 1 addition & 1 deletion pythonFiles/vscode_pytest/run_pytest_script.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
# Clear the buffer as complete JSON object is received
buffer = b""

# Process the JSON data
# Process the JSON data.
eleanorjboyd marked this conversation as resolved.
Show resolved Hide resolved
print("Received JSON data in run script")
break
except json.JSONDecodeError:
Expand Down
54 changes: 44 additions & 10 deletions src/client/testing/testController/common/server.ts
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import {
createEOTPayload,
createExecutionErrorPayload,
extractJsonPayload,
fixLogLines,
} from './utils';
import { createDeferred } from '../../../common/utils/async';

Expand Down Expand Up @@ -86,7 +87,7 @@ export class PythonTestServer implements ITestServer, Disposable {
// what payload is so small it doesn't include the whole UUID think got this
if (extractedJsonPayload.uuid !== undefined && extractedJsonPayload.cleanedJsonData !== undefined) {
// if a full json was found in the buffer, fire the data received event then keep cycling with the remaining raw data.
traceInfo(`Firing data received event, ${extractedJsonPayload.cleanedJsonData}`);
traceLog(`Firing data received event, ${extractedJsonPayload.cleanedJsonData}`);
karthiknadig marked this conversation as resolved.
Show resolved Hide resolved
this._fireDataReceived(extractedJsonPayload.uuid, extractedJsonPayload.cleanedJsonData);
}
buffer = Buffer.from(extractedJsonPayload.remainingRawData);
Expand Down Expand Up @@ -170,6 +171,7 @@ export class PythonTestServer implements ITestServer, Disposable {
callback?: () => void,
): Promise<void> {
const { uuid } = options;
const isDiscovery = testIds === undefined;

const pythonPathParts: string[] = process.env.PYTHONPATH?.split(path.delimiter) ?? [];
const pythonPathCommand = [options.cwd, ...pythonPathParts].join(path.delimiter);
Expand All @@ -189,14 +191,22 @@ export class PythonTestServer implements ITestServer, Disposable {
resource: options.workspaceFolder,
};
const execService = await this.executionFactory.createActivatedEnvironment(creationOptions);

// Add the generated UUID to the data to be sent (expecting to receive it back).
// first check if we have testIds passed in (in case of execution) and
// insert appropriate flag and test id array
const args = [options.command.script, '--port', this.getPort().toString(), '--uuid', uuid].concat(
options.command.args,
);

// If the user didn't explicit dictate the color during run, then add it
if (isRun) {
if (!args.includes('--color=no')) {
if (!args.includes('--color=yes')) {
args.push('--color=yes');
}
}
}
karthiknadig marked this conversation as resolved.
Show resolved Hide resolved

if (options.outChannel) {
options.outChannel.appendLine(`python ${args.join(' ')}`);
}
Expand Down Expand Up @@ -230,17 +240,41 @@ export class PythonTestServer implements ITestServer, Disposable {
result?.proc?.kill();
});

// Take all output from the subprocess and add it to the test output channel. This will be the pytest output.
// Displays output to user and ensure the subprocess doesn't run into buffer overflow.
result?.proc?.stdout?.on('data', (data) => {
spawnOptions?.outputChannel?.append(data.toString());
});
result?.proc?.stderr?.on('data', (data) => {
spawnOptions?.outputChannel?.append(data.toString());
});
// TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead.
// TOOD: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead.
if (isDiscovery) {
result?.proc?.stdout?.on('data', (data) => {
const out = fixLogLines(data.toString());
traceLog(out);
spawnOptions?.outputChannel?.append(`${out}`);
});
result?.proc?.stderr?.on('data', (data) => {
const out = fixLogLines(data.toString());
traceError(out);
spawnOptions?.outputChannel?.append(`${out}`);
});
} else {
result?.proc?.stdout?.on('data', (data) => {
const out = fixLogLines(data.toString());
runInstance?.appendOutput(`${out}`);
spawnOptions?.outputChannel?.append(out);
});
result?.proc?.stderr?.on('data', (data) => {
const out = fixLogLines(data.toString());
runInstance?.appendOutput(`${out}`);
spawnOptions?.outputChannel?.append(out);
});
}

result?.proc?.on('exit', (code, signal) => {
// if the child has testIds then this is a run request
if (code !== 0 && testIds && testIds?.length !== 0) {
spawnOptions?.outputChannel?.append(
'Starting now, all test run output will be sent to the Test Result panel' +
' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' +
' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.',
);
if (code !== 0 && !isDiscovery) {
traceError(
`Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error execution payload`,
);
Expand Down
28 changes: 22 additions & 6 deletions src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ import {
import { IConfigurationService, ITestOutputChannel } from '../../../common/types';
import { Deferred, createDeferred } from '../../../common/utils/async';
import { EXTENSION_ROOT_DIR } from '../../../constants';
import { traceError, traceInfo, traceVerbose } from '../../../logging';
import { traceError, traceInfo, traceLog, traceVerbose } from '../../../logging';
import {
DataReceivedEvent,
DiscoveredTestPayload,
ITestDiscoveryAdapter,
ITestResultResolver,
ITestServer,
} from '../common/types';
import { createDiscoveryErrorPayload, createEOTPayload } from '../common/utils';
import { createDiscoveryErrorPayload, createEOTPayload, fixLogLines } from '../common/utils';

/**
* Wrapper class for unittest test discovery. This is where we call `runTestCommand`. #this seems incorrectly copied
Expand Down Expand Up @@ -84,19 +84,32 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter {
const execService = await executionFactory?.createActivatedEnvironment(creationOptions);
// delete UUID following entire discovery finishing.
const deferredExec = createDeferred<ExecutionResult<string>>();
const execArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs);

let execArgs = ['-m', 'pytest', '-p', 'vscode_pytest', '--collect-only'].concat(pytestArgs);
// filter out color=yes from pytestArgs
execArgs = execArgs.filter((item) => item !== '--color=yes');
eleanorjboyd marked this conversation as resolved.
Show resolved Hide resolved
eleanorjboyd marked this conversation as resolved.
Show resolved Hide resolved
traceVerbose(`Running pytest discovery with command: ${execArgs.join(' ')}`);
const result = execService?.execObservable(execArgs, spawnOptions);

// Take all output from the subprocess and add it to the test output channel. This will be the pytest output.
// Displays output to user and ensure the subprocess doesn't run into buffer overflow.
// TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead.
result?.proc?.stdout?.on('data', (data) => {
spawnOptions.outputChannel?.append(data.toString());
const out = fixLogLines(data.toString());
traceLog(out);
spawnOptions?.outputChannel?.append(`${out}`);
});
result?.proc?.stderr?.on('data', (data) => {
spawnOptions.outputChannel?.append(data.toString());
const out = fixLogLines(data.toString());
traceError(out);
spawnOptions?.outputChannel?.append(`${out}`);
});
result?.proc?.on('exit', (code, signal) => {
this.outputChannel?.append(
'Starting now, all test run output will be sent to the Test Result panel' +
' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' +
' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.',
);
if (code !== 0) {
traceError(
`Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error discovery payload`,
Expand All @@ -112,7 +125,10 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter {
data: JSON.stringify(createEOTPayload(true)),
});
}
deferredExec.resolve({ stdout: '', stderr: '' });
deferredExec.resolve({
stdout: '',
stderr: '',
});
deferred.resolve();
});

Expand Down
23 changes: 19 additions & 4 deletions src/client/testing/testController/pytest/pytestExecutionAdapter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -125,8 +125,15 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter {
const execService = await executionFactory?.createActivatedEnvironment(creationOptions);

try {
const colorOff = pytestArgs.includes('--color=no');
// Remove positional test folders and files, we will add as needed per node
const testArgs = removePositionalFoldersAndFiles(pytestArgs);
// If the user didn't explicit dictate the color, then add it
if (!colorOff) {
if (!testArgs.includes('--color=yes')) {
testArgs.push('--color=yes');
}
}

// if user has provided `--rootdir` then use that, otherwise add `cwd`
if (testArgs.filter((a) => a.startsWith('--rootdir')).length === 0) {
Expand Down Expand Up @@ -166,23 +173,31 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter {

const deferredExec = createDeferred<ExecutionResult<string>>();
const result = execService?.execObservable(runArgs, spawnOptions);

runInstance?.token.onCancellationRequested(() => {
traceInfo('Test run cancelled, killing pytest subprocess.');
result?.proc?.kill();
});

// Take all output from the subprocess and add it to the test output channel. This will be the pytest output.
// Displays output to user and ensure the subprocess doesn't run into buffer overflow.
// TOODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead.
eleanorjboyd marked this conversation as resolved.
Show resolved Hide resolved
result?.proc?.stdout?.on('data', (data) => {
this.outputChannel?.append(data.toString());
const out = utils.fixLogLines(data.toString());
runInstance?.appendOutput(`${out}`);
this.outputChannel?.append(out);
});
result?.proc?.stderr?.on('data', (data) => {
this.outputChannel?.append(data.toString());
const out = utils.fixLogLines(data.toString());
runInstance?.appendOutput(`${out}`);
this.outputChannel?.append(out);
});

result?.proc?.on('exit', (code, signal) => {
traceInfo('Test run finished, subprocess exited.');
this.outputChannel?.append(
'Starting now, all test run output will be sent to the Test Result panel' +
' and test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' +
' The "Python Test Log" channel will be deprecated within the next month. See ___ for details.',
);
// if the child has testIds then this is a run request
if (code !== 0 && testIds) {
traceError(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ export class UnittestTestDiscoveryAdapter implements ITestDiscoveryAdapter {
}

private async callSendCommand(options: TestCommandOptions, callback: () => void): Promise<DiscoveredTestPayload> {
await this.testServer.sendCommand(options, undefined, undefined, [], callback);
await this.testServer.sendCommand(options, undefined, undefined, undefined, callback);
const discoveryPayload: DiscoveredTestPayload = { cwd: '', status: 'success' };
return discoveryPayload;
}
Expand Down
Loading