diff --git a/pythonFiles/tests/pytestadapter/.data/test_logging.py b/pythonFiles/tests/pytestadapter/.data/test_logging.py new file mode 100644 index 000000000000..058ad8075718 --- /dev/null +++ b/pythonFiles/tests/pytestadapter/.data/test_logging.py @@ -0,0 +1,35 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. +import logging +import sys + + +def test_logging2(caplog): + logger = logging.getLogger(__name__) + caplog.set_level(logging.DEBUG) # Set minimum log level to capture + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") + + # Printing to stdout and stderr + print("This is a stdout message.") + print("This is a stderr message.", file=sys.stderr) + assert False + + +def test_logging(caplog): + logger = logging.getLogger(__name__) + caplog.set_level(logging.DEBUG) # Set minimum log level to capture + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") + + # Printing to stdout and stderr + print("This is a stdout message.") + print("This is a stderr message.", file=sys.stderr) diff --git a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py index 76d21b3e2518..3fdb7b45a0c0 100644 --- a/pythonFiles/tests/pytestadapter/expected_execution_test_output.py +++ b/pythonFiles/tests/pytestadapter/expected_execution_test_output.py @@ -596,3 +596,31 @@ "subtest": None, } } + + +# This is the expected output for the test logging file. +# └── test_logging.py +# └── test_logging2: failure +# └── test_logging: success +test_logging_path = TEST_DATA_PATH / "test_logging.py" + +logging_test_expected_execution_output = { + get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging2", test_logging_path + ), + "outcome": "failure", + "message": "ERROR MESSAGE", + "traceback": None, + "subtest": None, + }, + get_absolute_test_id("test_logging.py::test_logging", test_logging_path): { + "test": get_absolute_test_id( + "test_logging.py::test_logging", test_logging_path + ), + "outcome": "success", + "message": None, + "traceback": None, + "subtest": None, + }, +} diff --git a/pythonFiles/tests/pytestadapter/helpers.py b/pythonFiles/tests/pytestadapter/helpers.py index b534e950945a..2d36da59956b 100644 --- a/pythonFiles/tests/pytestadapter/helpers.py +++ b/pythonFiles/tests/pytestadapter/helpers.py @@ -129,6 +129,7 @@ def runner_with_cwd( "pytest", "-p", "vscode_pytest", + "-s", ] + args listener: socket.socket = create_server() _, port = listener.getsockname() diff --git a/pythonFiles/tests/pytestadapter/test_execution.py b/pythonFiles/tests/pytestadapter/test_execution.py index 37a392f66d4b..98698d8cdd7c 100644 --- a/pythonFiles/tests/pytestadapter/test_execution.py +++ b/pythonFiles/tests/pytestadapter/test_execution.py @@ -215,23 +215,30 @@ def test_bad_id_error_execution(): ], expected_execution_test_output.doctest_pytest_expected_execution_output, ), + ( + ["test_logging.py::test_logging2", "test_logging.py::test_logging"], + expected_execution_test_output.logging_test_expected_execution_output, + ), ], ) def test_pytest_execution(test_ids, expected_const): """ Test that pytest discovery works as expected where run pytest is always successful but the actual test results are both successes and failures.: - 1. uf_execution_expected_output: unittest tests run on multiple files. - 2. uf_single_file_expected_output: test run on a single file. - 3. uf_single_method_execution_expected_output: test run on a single method in a file. - 4. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer. - 5. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests. - 6. dual_level_nested_folder_execution_expected_output: test run on a file with one test file + 1: skip_tests_execution_expected_output: test run on a file with skipped tests. + 2. error_raised_exception_execution_expected_output: test run on a file that raises an exception. + 3. uf_execution_expected_output: unittest tests run on multiple files. + 4. uf_single_file_expected_output: test run on a single file. + 5. uf_single_method_execution_expected_output: test run on a single method in a file. + 6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer. + 7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests. + 8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file at the top level and one test file in a nested folder. - 7. double_nested_folder_expected_execution_output: test run on a double nested folder. - 8. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs. - 9. single_parametrize_tests_expected_execution_output: test run on single parametrize test. - 10. doctest_pytest_expected_execution_output: test run on doctest file. + 9. double_nested_folder_expected_execution_output: test run on a double nested folder. + 10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs. + 11. single_parametrize_tests_expected_execution_output: test run on single parametrize test. + 12. doctest_pytest_expected_execution_output: test run on doctest file. + 13. logging_test_expected_execution_output: test run on a file with logging. Keyword arguments: diff --git a/pythonFiles/unittestadapter/execution.py b/pythonFiles/unittestadapter/execution.py index 5f46bda95328..e5758118b951 100644 --- a/pythonFiles/unittestadapter/execution.py +++ b/pythonFiles/unittestadapter/execution.py @@ -293,8 +293,6 @@ def post_response( ) # Clear the buffer as complete JSON object is received buffer = b"" - - # Process the JSON data break except json.JSONDecodeError: # JSON decoding error, the complete JSON object is not yet received diff --git a/pythonFiles/vscode_pytest/run_pytest_script.py b/pythonFiles/vscode_pytest/run_pytest_script.py index 0fca8208a406..c3720c8ab8d0 100644 --- a/pythonFiles/vscode_pytest/run_pytest_script.py +++ b/pythonFiles/vscode_pytest/run_pytest_script.py @@ -51,8 +51,6 @@ ) # Clear the buffer as complete JSON object is received buffer = b"" - - # Process the JSON data print("Received JSON data in run script") break except json.JSONDecodeError: diff --git a/src/client/testing/testController/common/resultResolver.ts b/src/client/testing/testController/common/resultResolver.ts index cf757d77243d..aaf82b143823 100644 --- a/src/client/testing/testController/common/resultResolver.ts +++ b/src/client/testing/testController/common/resultResolver.ts @@ -20,7 +20,7 @@ import { clearAllChildren, createErrorTestItem, getTestCaseNodes } from './testI import { sendTelemetryEvent } from '../../../telemetry'; import { EventName } from '../../../telemetry/constants'; import { splitLines } from '../../../common/stringUtils'; -import { buildErrorNodeOptions, fixLogLines, populateTestTree, splitTestNameWithRegex } from './utils'; +import { buildErrorNodeOptions, populateTestTree, splitTestNameWithRegex } from './utils'; import { Deferred } from '../../../common/utils/async'; export class PythonResultResolver implements ITestResultResolver { @@ -151,15 +151,16 @@ export class PythonResultResolver implements ITestResultResolver { const tempArr: TestItem[] = getTestCaseNodes(i); testCases.push(...tempArr); }); + const testItem = rawTestExecData.result[keyTemp]; - if (rawTestExecData.result[keyTemp].outcome === 'error') { - const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? ''; + if (testItem.outcome === 'error') { + const rawTraceback = testItem.traceback ?? ''; const traceback = splitLines(rawTraceback, { trim: false, removeEmptyEntries: true, }).join('\r\n'); - const text = `${rawTestExecData.result[keyTemp].test} failed with error: ${ - rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome + const text = `${testItem.test} failed with error: ${ + testItem.message ?? testItem.outcome }\r\n${traceback}\r\n`; const message = new TestMessage(text); @@ -170,23 +171,17 @@ export class PythonResultResolver implements ITestResultResolver { if (indiItem.uri && indiItem.range) { message.location = new Location(indiItem.uri, indiItem.range); runInstance.errored(indiItem, message); - runInstance.appendOutput(fixLogLines(text)); } } }); - } else if ( - rawTestExecData.result[keyTemp].outcome === 'failure' || - rawTestExecData.result[keyTemp].outcome === 'passed-unexpected' - ) { - const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? ''; + } else if (testItem.outcome === 'failure' || testItem.outcome === 'passed-unexpected') { + const rawTraceback = testItem.traceback ?? ''; const traceback = splitLines(rawTraceback, { trim: false, removeEmptyEntries: true, }).join('\r\n'); - const text = `${rawTestExecData.result[keyTemp].test} failed: ${ - rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome - }\r\n${traceback}\r\n`; + const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}\r\n`; const message = new TestMessage(text); // note that keyTemp is a runId for unittest library... @@ -197,14 +192,10 @@ export class PythonResultResolver implements ITestResultResolver { if (indiItem.uri && indiItem.range) { message.location = new Location(indiItem.uri, indiItem.range); runInstance.failed(indiItem, message); - runInstance.appendOutput(fixLogLines(text)); } } }); - } else if ( - rawTestExecData.result[keyTemp].outcome === 'success' || - rawTestExecData.result[keyTemp].outcome === 'expected-failure' - ) { + } else if (testItem.outcome === 'success' || testItem.outcome === 'expected-failure') { const grabTestItem = this.runIdToTestItem.get(keyTemp); const grabVSid = this.runIdToVSid.get(keyTemp); if (grabTestItem !== undefined) { @@ -216,7 +207,7 @@ export class PythonResultResolver implements ITestResultResolver { } }); } - } else if (rawTestExecData.result[keyTemp].outcome === 'skipped') { + } else if (testItem.outcome === 'skipped') { const grabTestItem = this.runIdToTestItem.get(keyTemp); const grabVSid = this.runIdToVSid.get(keyTemp); if (grabTestItem !== undefined) { @@ -228,11 +219,11 @@ export class PythonResultResolver implements ITestResultResolver { } }); } - } else if (rawTestExecData.result[keyTemp].outcome === 'subtest-failure') { + } else if (testItem.outcome === 'subtest-failure') { // split on [] or () based on how the subtest is setup. const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); - const data = rawTestExecData.result[keyTemp]; + const data = testItem; // find the subtest's parent test item if (parentTestItem) { const subtestStats = this.subTestStats.get(parentTestCaseId); @@ -243,20 +234,19 @@ export class PythonResultResolver implements ITestResultResolver { failed: 1, passed: 0, }); - runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`)); // clear since subtest items don't persist between runs clearAllChildren(parentTestItem); } const subTestItem = this.testController?.createTestItem(subtestId, subtestId); - runInstance.appendOutput(fixLogLines(`${subtestId} Failed\r\n`)); // create a new test item for the subtest if (subTestItem) { const traceback = data.traceback ?? ''; - const text = `${data.subtest} Failed: ${data.message ?? data.outcome}\r\n${traceback}\r\n`; - runInstance.appendOutput(fixLogLines(text)); + const text = `${data.subtest} failed: ${ + testItem.message ?? testItem.outcome + }\r\n${traceback}\r\n`; parentTestItem.children.add(subTestItem); runInstance.started(subTestItem); - const message = new TestMessage(rawTestExecData?.result[keyTemp].message ?? ''); + const message = new TestMessage(text); if (parentTestItem.uri && parentTestItem.range) { message.location = new Location(parentTestItem.uri, parentTestItem.range); } @@ -267,7 +257,7 @@ export class PythonResultResolver implements ITestResultResolver { } else { throw new Error('Parent test item not found'); } - } else if (rawTestExecData.result[keyTemp].outcome === 'subtest-success') { + } else if (testItem.outcome === 'subtest-success') { // split on [] or () based on how the subtest is setup. const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp); const parentTestItem = this.runIdToTestItem.get(parentTestCaseId); @@ -279,7 +269,6 @@ export class PythonResultResolver implements ITestResultResolver { subtestStats.passed += 1; } else { this.subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 }); - runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`)); // clear since subtest items don't persist between runs clearAllChildren(parentTestItem); } @@ -289,7 +278,6 @@ export class PythonResultResolver implements ITestResultResolver { parentTestItem.children.add(subTestItem); runInstance.started(subTestItem); runInstance.passed(subTestItem); - runInstance.appendOutput(fixLogLines(`${subtestId} Passed\r\n`)); } else { throw new Error('Unable to create new child node for subtest'); } diff --git a/src/client/testing/testController/common/server.ts b/src/client/testing/testController/common/server.ts index 50ae1f3f7536..e496860526e4 100644 --- a/src/client/testing/testController/common/server.ts +++ b/src/client/testing/testController/common/server.ts @@ -17,10 +17,12 @@ import { DataReceivedEvent, ITestServer, TestCommandOptions } from './types'; import { ITestDebugLauncher, LaunchOptions } from '../../common/types'; import { UNITTEST_PROVIDER } from '../../common/constants'; import { + MESSAGE_ON_TESTING_OUTPUT_MOVE, createDiscoveryErrorPayload, createEOTPayload, createExecutionErrorPayload, extractJsonPayload, + fixLogLinesNoTrailing, } from './utils'; import { createDeferred } from '../../../common/utils/async'; import { EnvironmentVariables } from '../../../api/types'; @@ -173,7 +175,7 @@ export class PythonTestServer implements ITestServer, Disposable { callback?: () => void, ): Promise { const { uuid } = options; - // get and edit env vars + const isDiscovery = (testIds === undefined || testIds.length === 0) && runTestIdPort === undefined; const mutableEnv = { ...env }; const pythonPathParts: string[] = process.env.PYTHONPATH?.split(path.delimiter) ?? []; const pythonPathCommand = [options.cwd, ...pythonPathParts].join(path.delimiter); @@ -196,7 +198,6 @@ export class PythonTestServer implements ITestServer, Disposable { resource: options.workspaceFolder, }; const execService = await this.executionFactory.createActivatedEnvironment(creationOptions); - const args = [options.command.script].concat(options.command.args); if (options.outChannel) { @@ -244,23 +245,55 @@ export class PythonTestServer implements ITestServer, Disposable { const result = execService?.execObservable(args, spawnOptions); resultProc = result?.proc; - // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. - result?.proc?.stdout?.on('data', (data) => { - spawnOptions?.outputChannel?.append(data.toString()); - }); - result?.proc?.stderr?.on('data', (data) => { - spawnOptions?.outputChannel?.append(data.toString()); - }); - result?.proc?.on('exit', (code, signal) => { - if (code !== 0) { - traceError(`Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}`); - } - }); + // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. + // TODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. + if (isDiscovery) { + result?.proc?.stdout?.on('data', (data) => { + const out = fixLogLinesNoTrailing(data.toString()); + spawnOptions?.outputChannel?.append(`${out}`); + traceInfo(out); + }); + result?.proc?.stderr?.on('data', (data) => { + const out = fixLogLinesNoTrailing(data.toString()); + spawnOptions?.outputChannel?.append(`${out}`); + traceError(out); + }); + } else { + result?.proc?.stdout?.on('data', (data) => { + const out = fixLogLinesNoTrailing(data.toString()); + runInstance?.appendOutput(`${out}`); + spawnOptions?.outputChannel?.append(out); + }); + result?.proc?.stderr?.on('data', (data) => { + const out = fixLogLinesNoTrailing(data.toString()); + runInstance?.appendOutput(`${out}`); + spawnOptions?.outputChannel?.append(out); + }); + } result?.proc?.on('exit', (code, signal) => { // if the child has testIds then this is a run request - if (code !== 0 && testIds && testIds?.length !== 0) { + spawnOptions?.outputChannel?.append(MESSAGE_ON_TESTING_OUTPUT_MOVE); + if (isDiscovery) { + if (code !== 0) { + // This occurs when we are running discovery + traceError( + `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error discovery payload`, + ); + this._onDiscoveryDataReceived.fire({ + uuid, + data: JSON.stringify(createDiscoveryErrorPayload(code, signal, options.cwd)), + }); + // then send a EOT payload + this._onDiscoveryDataReceived.fire({ + uuid, + data: JSON.stringify(createEOTPayload(true)), + }); + } + } else if (code !== 0 && testIds) { + // This occurs when we are running the test and there is an error which occurs. + traceError( `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error execution payload`, ); @@ -274,22 +307,8 @@ export class PythonTestServer implements ITestServer, Disposable { uuid, data: JSON.stringify(createEOTPayload(true)), }); - } else if (code !== 0) { - // This occurs when we are running discovery - traceError( - `Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}. Creating and sending error discovery payload`, - ); - this._onDiscoveryDataReceived.fire({ - uuid, - data: JSON.stringify(createDiscoveryErrorPayload(code, signal, options.cwd)), - }); - // then send a EOT payload - this._onDiscoveryDataReceived.fire({ - uuid, - data: JSON.stringify(createEOTPayload(true)), - }); } - deferredTillExecClose.resolve({ stdout: '', stderr: '' }); + deferredTillExecClose.resolve(); }); await deferredTillExecClose.promise; } diff --git a/src/client/testing/testController/common/utils.ts b/src/client/testing/testController/common/utils.ts index f5f416529c42..5022fa5a44e6 100644 --- a/src/client/testing/testController/common/utils.ts +++ b/src/client/testing/testController/common/utils.ts @@ -23,6 +23,11 @@ export function fixLogLines(content: string): string { const lines = content.split(/\r?\n/g); return `${lines.join('\r\n')}\r\n`; } + +export function fixLogLinesNoTrailing(content: string): string { + const lines = content.split(/\r?\n/g); + return `${lines.join('\r\n')}`; +} export interface IJSONRPCData { extractedJSON: string; remainingRawData: string; @@ -42,6 +47,11 @@ export interface ExtractOutput { export const JSONRPC_UUID_HEADER = 'Request-uuid'; export const JSONRPC_CONTENT_LENGTH_HEADER = 'Content-Length'; export const JSONRPC_CONTENT_TYPE_HEADER = 'Content-Type'; +export const MESSAGE_ON_TESTING_OUTPUT_MOVE = + 'Starting now, all test run output will be sent to the Test Result panel,' + + ' while test discovery output will be sent to the "Python" output channel instead of the "Python Test Log" channel.' + + ' The "Python Test Log" channel will be deprecated within the next month.' + + 'See https://github.com/microsoft/vscode-python/wiki/New-Method-for-Output-Handling-in-Python-Testing for details.'; export function createTestingDeferred(): Deferred { return createDeferred(); diff --git a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts index 4ed2570ba7cc..92bd9f04834e 100644 --- a/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts +++ b/src/client/testing/testController/pytest/pytestDiscoveryAdapter.ts @@ -18,7 +18,13 @@ import { ITestResultResolver, ITestServer, } from '../common/types'; -import { createDiscoveryErrorPayload, createEOTPayload, createTestingDeferred } from '../common/utils'; +import { + MESSAGE_ON_TESTING_OUTPUT_MOVE, + createDiscoveryErrorPayload, + createEOTPayload, + createTestingDeferred, + fixLogLinesNoTrailing, +} from '../common/utils'; import { IEnvironmentVariablesProvider } from '../../../common/variables/types'; /** @@ -95,13 +101,20 @@ export class PytestTestDiscoveryAdapter implements ITestDiscoveryAdapter { // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. + // TODO: after a release, remove discovery output from the "Python Test Log" channel and send it to the "Python" channel instead. + result?.proc?.stdout?.on('data', (data) => { - spawnOptions.outputChannel?.append(data.toString()); + const out = fixLogLinesNoTrailing(data.toString()); + traceInfo(out); + spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.stderr?.on('data', (data) => { - spawnOptions.outputChannel?.append(data.toString()); + const out = fixLogLinesNoTrailing(data.toString()); + traceError(out); + spawnOptions?.outputChannel?.append(`${out}`); }); result?.proc?.on('exit', (code, signal) => { + this.outputChannel?.append(MESSAGE_ON_TESTING_OUTPUT_MOVE); if (code !== 0) { traceError(`Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}.`); } diff --git a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts index eb8e9b6f935a..5c04aabab845 100644 --- a/src/client/testing/testController/pytest/pytestExecutionAdapter.ts +++ b/src/client/testing/testController/pytest/pytestExecutionAdapter.ts @@ -190,13 +190,19 @@ export class PytestTestExecutionAdapter implements ITestExecutionAdapter { // Take all output from the subprocess and add it to the test output channel. This will be the pytest output. // Displays output to user and ensure the subprocess doesn't run into buffer overflow. + // TODO: after a release, remove run output from the "Python Test Log" channel and send it to the "Test Result" channel instead. result?.proc?.stdout?.on('data', (data) => { - this.outputChannel?.append(data.toString()); + const out = utils.fixLogLinesNoTrailing(data.toString()); + runInstance?.appendOutput(out); + this.outputChannel?.append(out); }); result?.proc?.stderr?.on('data', (data) => { - this.outputChannel?.append(data.toString()); + const out = utils.fixLogLinesNoTrailing(data.toString()); + runInstance?.appendOutput(out); + this.outputChannel?.append(out); }); result?.proc?.on('exit', (code, signal) => { + this.outputChannel?.append(utils.MESSAGE_ON_TESTING_OUTPUT_MOVE); if (code !== 0 && testIds) { traceError(`Subprocess exited unsuccessfully with exit code ${code} and signal ${signal}.`); } diff --git a/src/test/testing/common/testingAdapter.test.ts b/src/test/testing/common/testingAdapter.test.ts index 3b5ef0062a98..519a60e3f0f7 100644 --- a/src/test/testing/common/testingAdapter.test.ts +++ b/src/test/testing/common/testingAdapter.test.ts @@ -275,7 +275,7 @@ suite('End to End Tests: test adapters', () => { assert.strictEqual(callCount, 1, 'Expected _resolveDiscovery to be called once'); }); }); - test('unittest execution adapter small workspace', async () => { + test('unittest execution adapter small workspace with correct output', async () => { // result resolver and saved data for assertions resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; @@ -319,12 +319,34 @@ suite('End to End Tests: test adapters', () => { onCancellationRequested: () => undefined, } as any), ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); await executionAdapter .runTests(workspaceUri, ['test_simple.SimpleClass.test_simple_unit'], false, testRun.object) .finally(() => { // verify that the _resolveExecution was called once per test assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output works for stdout and stderr as well as unittest output + assert.ok( + collectedOutput.includes('expected printed output, stdout'), + 'The test string does not contain the expected stdout output.', + ); + assert.ok( + collectedOutput.includes('expected printed output, stderr'), + 'The test string does not contain the expected stderr output.', + ); + assert.ok( + collectedOutput.includes('Ran 1 test in'), + 'The test string does not contain the expected unittest output.', + ); }); }); test('unittest execution adapter large workspace', async () => { @@ -372,15 +394,33 @@ suite('End to End Tests: test adapters', () => { onCancellationRequested: () => undefined, } as any), ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); await executionAdapter .runTests(workspaceUri, ['test_parameterized_subtest.NumbersTest.test_even'], false, testRun.object) .then(() => { // verify that the _resolveExecution was called once per test assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output + assert.ok( + collectedOutput.includes('test_parameterized_subtest.py'), + 'The test string does not contain the correct test name which should be printed', + ); + assert.ok( + collectedOutput.includes('FAILED (failures=1000)'), + 'The test string does not contain the last of the unittest output', + ); }); }); - test('pytest execution adapter small workspace', async () => { + test('pytest execution adapter small workspace with correct output', async () => { // result resolver and saved data for assertions resultResolver = new PythonResultResolver(testController, unittestProvider, workspaceUri); let callCount = 0; @@ -423,6 +463,14 @@ suite('End to End Tests: test adapters', () => { onCancellationRequested: () => undefined, } as any), ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); await executionAdapter .runTests( workspaceUri, @@ -435,6 +483,30 @@ suite('End to End Tests: test adapters', () => { // verify that the _resolveExecution was called once per test assert.strictEqual(callCount, 1, 'Expected _resolveExecution to be called once'); assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output works for stdout and stderr as well as pytest output + assert.ok( + collectedOutput.includes('test session starts'), + 'The test string does not contain the expected stdout output.', + ); + assert.ok( + collectedOutput.includes('Captured log call'), + 'The test string does not contain the expected log section.', + ); + const searchStrings = [ + 'This is a warning message.', + 'This is an error message.', + 'This is a critical message.', + ]; + let searchString: string; + for (searchString of searchStrings) { + const count: number = (collectedOutput.match(new RegExp(searchString, 'g')) || []).length; + assert.strictEqual( + count, + 2, + `The test string does not contain two instances of ${searchString}. Should appear twice from logging output and stack trace`, + ); + } }); }); test('pytest execution adapter large workspace', async () => { @@ -488,10 +560,24 @@ suite('End to End Tests: test adapters', () => { onCancellationRequested: () => undefined, } as any), ); + let collectedOutput = ''; + testRun + .setup((t) => t.appendOutput(typeMoq.It.isAny())) + .callback((output: string) => { + collectedOutput += output; + traceLog('appendOutput was called with:', output); + }) + .returns(() => false); await executionAdapter.runTests(workspaceUri, testIds, false, testRun.object, pythonExecFactory).then(() => { // verify that the _resolveExecution was called once per test assert.strictEqual(callCount, 2000, 'Expected _resolveExecution to be called once'); assert.strictEqual(failureOccurred, false, failureMsg); + + // verify output works for large repo + assert.ok( + collectedOutput.includes('test session starts'), + 'The test string does not contain the expected stdout output from pytest.', + ); }); }); test('unittest discovery adapter seg fault error handling', async () => { diff --git a/src/testTestingRootWkspc/loggingWorkspace/test_logging.py b/src/testTestingRootWkspc/loggingWorkspace/test_logging.py new file mode 100644 index 000000000000..a3e77f06ae78 --- /dev/null +++ b/src/testTestingRootWkspc/loggingWorkspace/test_logging.py @@ -0,0 +1,13 @@ +# Copyright (c) Microsoft Corporation. All rights reserved. +import logging + + +def test_logging(caplog): + logger = logging.getLogger(__name__) + caplog.set_level(logging.DEBUG) # Set minimum log level to capture + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") diff --git a/src/testTestingRootWkspc/smallWorkspace/test_simple.py b/src/testTestingRootWkspc/smallWorkspace/test_simple.py index 6b4f7bd2f8a6..f68a0d7d0d93 100644 --- a/src/testTestingRootWkspc/smallWorkspace/test_simple.py +++ b/src/testTestingRootWkspc/smallWorkspace/test_simple.py @@ -1,12 +1,25 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import unittest +import logging +import sys -def test_a(): - assert 1 == 1 +def test_a(caplog): + logger = logging.getLogger(__name__) + # caplog.set_level(logging.ERROR) # Set minimum log level to capture + logger.setLevel(logging.WARN) + + logger.debug("This is a debug message.") + logger.info("This is an info message.") + logger.warning("This is a warning message.") + logger.error("This is an error message.") + logger.critical("This is a critical message.") + assert False class SimpleClass(unittest.TestCase): def test_simple_unit(self): + print("expected printed output, stdout") + print("expected printed output, stderr", file=sys.stderr) assert True