diff --git a/Nodejs/Product/Nodejs/TestFrameworks/ExportRunner/exportrunner.js b/Nodejs/Product/Nodejs/TestFrameworks/ExportRunner/exportrunner.js
index 88dc6b431..eec49a868 100644
--- a/Nodejs/Product/Nodejs/TestFrameworks/ExportRunner/exportrunner.js
+++ b/Nodejs/Product/Nodejs/TestFrameworks/ExportRunner/exportrunner.js
@@ -1,6 +1,22 @@
var fs = require('fs');
var path = require('path');
var vm = require('vm');
+var result = {
+ 'title': '',
+ 'passed': false,
+ 'stdOut': '',
+ 'stdErr': '',
+ 'time': 0
+};
+
+function append_stdout(string, encoding, fd) {
+ result.stdOut += string;
+}
+function append_stderr(string, encoding, fd) {
+ result.stdErr += string;
+}
+process.stdout.write = append_stdout;
+process.stderr.write = append_stderr;
var find_tests = function (testFileList, discoverResultFile) {
var debug;
@@ -50,8 +66,31 @@ var find_tests = function (testFileList, discoverResultFile) {
};
module.exports.find_tests = find_tests;
-var run_tests = function (testName, testFile) {
- var testCase = require(testFile);
- testCase[testName]();
+var run_tests = function (testCases, callback) {
+ var test_results = [];
+ for (var test in testCases) {
+ try {
+ var testCase = require(testCases[test].testFile);
+ result.title = testCases[test].testName;
+ result.time = Date.now();
+ testCase[testCases[test].testName]();
+ result.time = Date.now() - result.time;
+ result.passed = true;
+ } catch (err) {
+ result.time = Date.now() - result.time;
+ result.passed = false;
+ console.error(err.name);
+ console.error(err.message);
+ }
+ test_results.push(result)
+ result = {
+ 'title': '',
+ 'passed': false,
+ 'stdOut': '',
+ 'stdErr': '',
+ 'time': 0
+ };
+ }
+ callback(test_results);
};
module.exports.run_tests = run_tests;
\ No newline at end of file
diff --git a/Nodejs/Product/Nodejs/TestFrameworks/Tape/tape.js b/Nodejs/Product/Nodejs/TestFrameworks/Tape/tape.js
index 00320a7cc..bed15d75a 100644
--- a/Nodejs/Product/Nodejs/TestFrameworks/Tape/tape.js
+++ b/Nodejs/Product/Nodejs/TestFrameworks/Tape/tape.js
@@ -3,17 +3,17 @@ var EOL = require('os').EOL;
var fs = require('fs');
var path = require('path');
var result = {
- "title": "",
- "passed": false,
- "stdOut": "",
- "stdErr": ""
+ 'title': '',
+ 'passed': false,
+ 'stdOut': '',
+ 'stdErr': '',
+ 'time': 0
};
-process.stdout.write = function (string, encoding, fd) {
+function append_stdout(string, encoding, fd) {
result.stdOut += string;
}
-
-process.stderr.write = function (string, encoding, fd) {
+function append_stderr(string, encoding, fd) {
result.stdErr += string;
}
@@ -24,7 +24,7 @@ function find_tests(testFileList, discoverResultFile, projectFolder) {
}
var harness = test.getHarness({ exit: false });
- var tests = harness["_tests"];
+ var tests = harness['_tests'];
var count = 0;
var testList = [];
@@ -51,28 +51,43 @@ function find_tests(testFileList, discoverResultFile, projectFolder) {
};
module.exports.find_tests = find_tests;
-function run_tests(testName, testFile, workingFolder, projectFolder, callback) {
- var testCases = loadTestCases(testFile);
- result.title = testName;
+function run_tests(testInfo, callback) {
+ var testResults = [];
+ var testCases = loadTestCases(testInfo[0].testFile);
+ process.stdout.write = append_stdout;
+ process.stderr.write = append_stderr;
if (testCases === null) {
return;
}
- var test = findTape(projectFolder);
- if (test === null) {
+ var tape = findTape(testInfo[0].projectFolder);
+ if (tape === null) {
return;
}
- try {
- var harness = test.getHarness();
- harness.only(testName);
- result.passed = true;
- } catch (e) {
- logError("Error running test:", testName, "in", testFile, e);
- result.passed = false;
+ for (var test in testInfo) {
+ result.title = testInfo[test].testName;
+ try {
+ result.time = Date.now();
+ var harness = tape.getHarness();
+ harness(testInfo[test].testName);
+ result.passed = true;
+ } catch (e) {
+ result.passed = false;
+ logError('Error running test:', testInfo[test].testName, 'in', testInfo[test].testFile, e);
+ }
+ result.time = Date.now() - result.time;
+ testResults.push(result);
+ result = {
+ 'title': '',
+ 'passed': false,
+ 'stdOut': '',
+ 'stdErr': '',
+ 'time': 0
+ };
}
- callback(result);
+ callback(testResults);
}
module.exports.run_tests = run_tests;
diff --git a/Nodejs/Product/Nodejs/TestFrameworks/mocha/mocha.js b/Nodejs/Product/Nodejs/TestFrameworks/mocha/mocha.js
index 47168d429..e15b0fbd8 100644
--- a/Nodejs/Product/Nodejs/TestFrameworks/mocha/mocha.js
+++ b/Nodejs/Product/Nodejs/TestFrameworks/mocha/mocha.js
@@ -3,10 +3,11 @@ var EOL = require('os').EOL;
var fs = require('fs');
var path = require('path');
var result = {
- "title": "",
- "passed": false,
- "stdOut": "",
- "stdErr": ""
+ 'title': '',
+ 'passed': false,
+ 'stdOut': '',
+ 'stdErr': '',
+ 'time': 0
};
// Choose 'tap' rather than 'min' or 'xunit'. The reason is that
// 'min' produces undisplayable text to stdout and stderr under piped/redirect,
@@ -69,54 +70,65 @@ var find_tests = function (testFileList, discoverResultFile, projectFolder) {
};
module.exports.find_tests = find_tests;
-var run_tests = function (testName, testFile, workingFolder, projectFolder, callback) {
- //var testResults = [];
- var Mocha = detectMocha(projectFolder);
+var run_tests = function (testCases, callback) {
+ function escapeRegExp(string) {
+ return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&'); // $& means the whole matched string
+ }
+
+ var testResults = [];
+ var Mocha = detectMocha(testCases[0].projectFolder);
if (!Mocha) {
return;
}
- var mocha = initializeMocha(Mocha, projectFolder);
+ var mocha = initializeMocha(Mocha, testCases[0].projectFolder);
- //if (testName) {
- // if (typeof mocha.fgrep === 'function')
- // mocha.fgrep(testName); // since Mocha 3.0.0
- // else
- // mocha.grep(testName); // prior Mocha 3.0.0
- //}
+ var testGrepString = '^(' + testCases.map(function (testCase) {
+ return testCase.testName
+ }).join('|') + ')$';
+
+ if (testGrepString) {
+ mocha.grep(new RegExp(testGrepString));
+ }
- mocha.addFile(testFile);
+ mocha.addFile(testCases[0].testFile);
// run tests
- var runner = mocha.run(function (code) { process.exit(code); });
+ var runner = mocha.run(function (code) { });
runner.on('start', function () {
});
runner.on('test', function (test) {
- result.title = test.title;
+ result.title = test.fullTitle();
+ result.time = Date.now();
process.stdout.write = append_stdout;
process.stderr.write = append_stderr;
});
runner.on('end', function () {
+ callback(testResults);
});
runner.on('pass', function (test) {
result.passed = true;
- callback(result);
+ result.time = Date.now() - result.time;
+ testResults.push(result);
result = {
'title': '',
'passed': false,
'stdOut': '',
- 'stdErr': ''
+ 'stdErr': '',
+ 'time': ''
}
});
runner.on('fail', function (test, err) {
result.passed = false;
- callback(result);
+ result.time = Date.now() - result.time;
+ testResults.push(result);
result = {
'title': '',
'passed': false,
'stdOut': '',
- 'stdErr': ''
+ 'stdErr': '',
+ 'time': ''
}
});
};
diff --git a/Nodejs/Product/Nodejs/TestFrameworks/run_tests.js b/Nodejs/Product/Nodejs/TestFrameworks/run_tests.js
index 6cd03870f..bc3a46f17 100644
--- a/Nodejs/Product/Nodejs/TestFrameworks/run_tests.js
+++ b/Nodejs/Product/Nodejs/TestFrameworks/run_tests.js
@@ -8,28 +8,34 @@ var rl = readline.createInterface({
});
rl.on('line', (line) => {
- var testInfo = JSON.parse(line);
+ var testCases = JSON.parse(line);
// get rid of leftover quotations from C# (necessary?)
- for(var s in testInfo) {
- testInfo[s] = testInfo[s].replace(/["]+/g, '');
+ for (var test in testCases) {
+ for (var value in testCases[test]) {
+ testCases[test][value] = testCases[test][value].replace(/["]+/g, '');
+ }
}
try {
- framework = require('./' + testInfo.framework + '/' + testInfo.framework + '.js');
+ framework = require('./' + testCases[0].framework + '/' + testCases[0].framework + '.js');
} catch (exception) {
- console.log("NTVS_ERROR:Failed to load TestFramework (" + process.argv[2] + "), " + exception);
+ console.log("NTVS_ERROR:Failed to load TestFramework (" + testCases[0].framework + "), " + exception);
process.exit(1);
}
- function sendResult(result) {
+ function returnResult(result) {
+ // unhook stdout and stderr
process.stdout.write = old_stdout;
process.stderr.write = old_stderr;
- console.log(JSON.stringify(result));
- //process.exit(0);
+ if (result) {
+ console.log(JSON.stringify(result));
+ }
+ // end process, tests are done running.
+ process.exit(0);
}
// run the test
- framework.run_tests(testInfo.testName, testInfo.testFile, testInfo.workingFolder, testInfo.projectFolder, sendResult);
+ framework.run_tests(testCases, returnResult);
// close readline interface
- //rl.close();
+ rl.close();
});
diff --git a/Nodejs/Product/TestAdapter/TestExecutor.cs b/Nodejs/Product/TestAdapter/TestExecutor.cs
index 7660e9357..a825a9fe6 100644
--- a/Nodejs/Product/TestAdapter/TestExecutor.cs
+++ b/Nodejs/Product/TestAdapter/TestExecutor.cs
@@ -29,7 +29,6 @@
using Microsoft.VisualStudio.TestPlatform.ObjectModel.Logging;
using Microsoft.VisualStudioTools;
using Microsoft.VisualStudioTools.Project;
-using Newtonsoft.Json.Linq;
using MSBuild = Microsoft.Build.Evaluation;
using Newtonsoft.Json;
@@ -41,11 +40,13 @@ public ResultObject() {
passed = false;
stdout = String.Empty;
stderr = String.Empty;
+ time = 0;
}
public string title { get; set; }
public bool passed { get; set; }
public string stdout { get; set; }
public string stderr { get; set; }
+ public int time { get; set; }
}
[ExtensionUri(TestExecutor.ExecutorUriString)]
@@ -70,7 +71,7 @@ public void Cancel() {
}
///
- /// This is the equivallent of "RunAll" functionality
+ /// This is the equivalent of "RunAll" functionality
///
/// Refers to the list of test sources passed to the test adapter from the client. (Client could be VS or command line)
/// Defines the settings related to the current run
@@ -94,17 +95,11 @@ public void RunTests(IEnumerable sources, IRunContext runContext, IFrame
// okay.
using (var app = VisualStudioApp.FromEnvironmentVariable(NodejsConstants.NodeToolsProcessIdEnvironmentVariable)) {
// .njsproj file path -> project settings
-
var projectToTests = new Dictionary>();
var sourceToSettings = new Dictionary();
NodejsProjectSettings settings = null;
// put tests into dictionary where key is their project working directory
- // NOTE: It seems to me that if we were to run all tests over multiple projects in a solution,
- // we would have to separate the tests by their project in order to launch the node process
- // correctly (to make sure we are using the correct working folder) and also to run
- // groups of tests by test suite.
-
foreach (var test in receiver.Tests) {
if (!sourceToSettings.TryGetValue(test.Source, out settings)) {
sourceToSettings[test.Source] = settings = LoadProjectSettings(test.Source);
@@ -130,141 +125,85 @@ public void RunTests(IEnumerable sources, IRunContext runContext, IFrame
// launch node process
LaunchNodeProcess(settings.WorkingDir, settings.NodeExePath, args);
// Run all test cases in a given project
- RunTestCases(entry.Value, runContext, frameworkHandle);
+ RunTestCases(entry.Value, runContext, frameworkHandle, settings);
// dispose node process
_nodeProcess.Dispose();
}
}
}
+ ///
+ /// This is the equivalent of "Run Selected Tests" functionality.
+ ///
+ /// The list of TestCases selected to run
+ /// Defines the settings related to the current run
+ /// Handle to framework. Used for recording results
public void RunTests(IEnumerable tests, IRunContext runContext, IFrameworkHandle frameworkHandle) {
ValidateArg.NotNull(tests, "tests");
ValidateArg.NotNull(runContext, "runContext");
ValidateArg.NotNull(frameworkHandle, "frameworkHandle");
_cancelRequested.Reset();
- bool hasExited = false;
- bool isNull = _nodeProcess == null;
- if (!isNull) {
- hasExited = _nodeProcess.HasExited;
- }
- frameworkHandle.SendMessage(TestMessageLevel.Informational, isNull.ToString());
- frameworkHandle.SendMessage(TestMessageLevel.Informational, hasExited.ToString());
- if ( _nodeProcess == null || _nodeProcess.HasExited ) {
- frameworkHandle.SendMessage(TestMessageLevel.Informational, "inside RunTests if statement");
- TestCase firstTest = tests.First();
- NodejsProjectSettings settings = LoadProjectSettings(firstTest.Source);
- List args = new List();
- args.AddRange(GetInterpreterArgs(firstTest, settings.WorkingDir, settings.ProjectRootDir));
- LaunchNodeProcess(settings.WorkingDir, settings.NodeExePath, args);
- }
- RunTestCases(tests, runContext, frameworkHandle);
+ TestCase firstTest = tests.First();
+ NodejsProjectSettings settings = LoadProjectSettings(firstTest.Source);
+ List args = new List();
+ args.AddRange(GetInterpreterArgs(firstTest, settings.WorkingDir, settings.ProjectRootDir));
+ LaunchNodeProcess(settings.WorkingDir, settings.NodeExePath, args);
+ // Run all test cases selected
+ RunTestCases(tests, runContext, frameworkHandle, settings);
_nodeProcess.Dispose();
}
- private void RunTestCases(IEnumerable tests, IRunContext runContext, IFrameworkHandle frameworkHandle) {
+ private void RunTestCases(IEnumerable tests, IRunContext runContext, IFrameworkHandle frameworkHandle, NodejsProjectSettings settings) {
// May be null, but this is handled by RunTestCase if it matters.
// No VS instance just means no debugging, but everything else is
// okay.
using (var app = VisualStudioApp.FromEnvironmentVariable(NodejsConstants.NodeToolsProcessIdEnvironmentVariable)) {
+ int port = 0;
// .njsproj file path -> project settings
var sourceToSettings = new Dictionary();
-
+ TestCaseObject testObject;
+ List testObjects = new List();
foreach (var test in tests) {
if (_cancelRequested.WaitOne(0)) {
break;
}
+ frameworkHandle.RecordStart(test);
- try {
- RunTestCase(app, frameworkHandle, runContext, test, sourceToSettings);
- } catch (Exception ex) {
- frameworkHandle.SendMessage(TestMessageLevel.Error, ex.ToString());
+ if (settings == null) {
+ frameworkHandle.SendMessage(
+ TestMessageLevel.Error,
+ "Unable to determine interpreter to use for " + test.Source);
+ frameworkHandle.RecordEnd(test, TestOutcome.Failed);
}
- }
- }
- }
-
- private void KillNodeProcess() {
- lock (_syncObject) {
- if (_nodeProcess != null) {
- _nodeProcess.Kill();
- }
- }
- }
- private static int GetFreePort() {
- return Enumerable.Range(new Random().Next(49152, 65536), 60000).Except(
- from connection in IPGlobalProperties.GetIPGlobalProperties().GetActiveTcpConnections()
- select connection.LocalEndPoint.Port
- ).First();
- }
-
- private IEnumerable GetInterpreterArgs(TestCase test, string workingDir, string projectRootDir) {
- TestFrameworks.NodejsTestInfo testInfo = new TestFrameworks.NodejsTestInfo(test.FullyQualifiedName);
- TestFrameworks.FrameworkDiscover discover = new TestFrameworks.FrameworkDiscover();
- return discover.Get(testInfo.TestFramework).ArgumentsToRunTests(testInfo.TestName, testInfo.ModulePath, workingDir, projectRootDir);
- }
-
- private static IEnumerable GetDebugArgs(NodejsProjectSettings settings, out int port) {
- port = GetFreePort();
-
- return new[] {
- "--debug-brk=" + port.ToString()
- };
- }
-
- private void RunTestCase(VisualStudioApp app, IFrameworkHandle frameworkHandle, IRunContext runContext, TestCase test, Dictionary sourceToSettings) {
- var testResult = new TestResult(test);
- frameworkHandle.RecordStart(test);
- testResult.StartTime = DateTimeOffset.Now;
- NodejsProjectSettings settings;
- if (!sourceToSettings.TryGetValue(test.Source, out settings)) {
- sourceToSettings[test.Source] = settings = LoadProjectSettings(test.Source);
- }
- if (settings == null) {
- frameworkHandle.SendMessage(
- TestMessageLevel.Error,
- "Unable to determine interpreter to use for " + test.Source);
- RecordEnd(
- frameworkHandle,
- test,
- testResult,
- null,
- "Unable to determine interpreter to use for " + test.Source,
- TestOutcome.Failed);
- return;
- }
-
- NodejsTestInfo testInfo = new NodejsTestInfo(test.FullyQualifiedName);
- List args = new List();
- int port = 0;
- if (runContext.IsBeingDebugged && app != null) {
- app.GetDTE().Debugger.DetachAll();
- args.AddRange(GetDebugArgs(settings, out port));
- }
+ NodejsTestInfo testInfo = new NodejsTestInfo(test.FullyQualifiedName);
+ List args = new List();
+ if (runContext.IsBeingDebugged && app != null) {
+ app.GetDTE().Debugger.DetachAll();
+ args.AddRange(GetDebugArgs(settings, out port));
+ }
- var workingDir = Path.GetDirectoryName(CommonUtils.GetAbsoluteFilePath(settings.WorkingDir, testInfo.ModulePath));
- args.AddRange(GetInterpreterArgs(test, workingDir, settings.ProjectRootDir));
+ var workingDir = Path.GetDirectoryName(CommonUtils.GetAbsoluteFilePath(settings.WorkingDir, testInfo.ModulePath));
+ args.AddRange(GetInterpreterArgs(test, workingDir, settings.ProjectRootDir));
- //Debug.Fail("attach debugger");
- if (!File.Exists(settings.NodeExePath)) {
- frameworkHandle.SendMessage(TestMessageLevel.Error, "Interpreter path does not exist: " + settings.NodeExePath);
- return;
- }
+ //Debug.Fail("attach debugger");
+ if (!File.Exists(settings.NodeExePath)) {
+ frameworkHandle.SendMessage(TestMessageLevel.Error, "Interpreter path does not exist: " + settings.NodeExePath);
+ return;
+ }
+ testObject = new TestCaseObject(args[1], args[2], args[3], args[4], args[5]);
+ testObjects.Add(testObject);
+ }
- lock (_syncObject) {
-#if DEBUG
- frameworkHandle.SendMessage(TestMessageLevel.Informational, "cd " + workingDir);
- //frameworkHandle.SendMessage(TestMessageLevel.Informational, _nodeProcess.Arguments);
-#endif
- // send test to run_tests.js
- TestCaseObject testObject = new TestCaseObject(args[1], args[2], args[3], args[4], args[5]);
if (!_nodeProcess.HasExited) {
- _nodeProcess.StandardInput.WriteLine(Newtonsoft.Json.JsonConvert.SerializeObject(testObject));
+ // send testcases to run_tests.js
+ _nodeProcess.StandardInput.WriteLine(JsonConvert.SerializeObject(testObjects));
_nodeProcess.StandardInput.Close();
- _nodeProcess.WaitForExit(5000);
- }
+ _nodeProcess.WaitForExit();
+ }
+
if (runContext.IsBeingDebugged && app != null) {
try {
//the '#ping=0' is a special flag to tell VS node debugger not to connect to the port,
@@ -289,38 +228,65 @@ private void RunTestCase(VisualStudioApp app, IFrameworkHandle frameworkHandle,
#endif
}
}
- var result = GetTestResultFromProcess(_nodeProcess.StandardOutput);
+ var results = GetTestResultFromProcess(_nodeProcess.StandardOutput);
bool runCancelled = _cancelRequested.WaitOne(0);
- if (result != null) {
- RecordEnd(frameworkHandle, test, testResult,
- result.stdout,
- result.stderr,
- (!runCancelled && result.passed) ? TestOutcome.Passed : TestOutcome.Failed);
- } else {
- frameworkHandle.SendMessage(TestMessageLevel.Error, "Failed to obtain result for " + test.DisplayName + " from TestRunner");
+ if (results != null) {
+ RecordEnd(frameworkHandle, tests, results);
+ }
+ else {
+ frameworkHandle.SendMessage(TestMessageLevel.Error, "Failed to obtain results");
+ }
+ }
+
+ private void KillNodeProcess() {
+ lock (_syncObject) {
+ if (_nodeProcess != null) {
+ _nodeProcess.Kill();
+ }
}
}
- private ResultObject ParseTestResult(string line) {
- ResultObject jsonResult = null;
+ private static int GetFreePort() {
+ return Enumerable.Range(new Random().Next(49152, 65536), 60000).Except(
+ from connection in IPGlobalProperties.GetIPGlobalProperties().GetActiveTcpConnections()
+ select connection.LocalEndPoint.Port
+ ).First();
+ }
+
+ private IEnumerable GetInterpreterArgs(TestCase test, string workingDir, string projectRootDir) {
+ TestFrameworks.NodejsTestInfo testInfo = new TestFrameworks.NodejsTestInfo(test.FullyQualifiedName);
+ TestFrameworks.FrameworkDiscover discover = new TestFrameworks.FrameworkDiscover();
+ return discover.Get(testInfo.TestFramework).ArgumentsToRunTests(testInfo.TestName, testInfo.ModulePath, workingDir, projectRootDir);
+ }
+
+ private static IEnumerable GetDebugArgs(NodejsProjectSettings settings, out int port) {
+ port = GetFreePort();
+
+ return new[] {
+ "--debug-brk=" + port.ToString()
+ };
+ }
+
+ private List ParseTestResult(string line) {
+ List jsonResults = null;
try {
- jsonResult = JsonConvert.DeserializeObject(line);
+ jsonResults = JsonConvert.DeserializeObject>(line);
} catch (Exception) { }
- return jsonResult;
+ return jsonResults;
}
- private ResultObject GetTestResultFromProcess(StreamReader sr) {
- ResultObject result = null;
+ private List GetTestResultFromProcess(StreamReader sr) {
+ List results = null;
while (sr.Peek() >= 0) {
- result = ParseTestResult(sr.ReadLine());
- if (result == null) {
+ results = ParseTestResult(sr.ReadLine());
+ if (results == null) {
continue;
}
break;
}
- return result;
+ return results;
}
private void LaunchNodeProcess(string workingDir, string nodeExePath, List args) {
@@ -357,16 +323,25 @@ private NodejsProjectSettings LoadProjectSettings(string projectFile) {
return projSettings;
}
- private static void RecordEnd(IFrameworkHandle frameworkHandle, TestCase test, TestResult result, string stdout, string stderr, TestOutcome outcome) {
- result.EndTime = DateTimeOffset.Now;
- result.Duration = result.EndTime - result.StartTime;
- result.Outcome = outcome;
- result.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, stdout));
- result.Messages.Add(new TestResultMessage(TestResultMessage.StandardErrorCategory, stderr));
- result.Messages.Add(new TestResultMessage(TestResultMessage.AdditionalInfoCategory, stderr));
-
- frameworkHandle.RecordResult(result);
- frameworkHandle.RecordEnd(test, outcome);
+ private static void RecordEnd(IFrameworkHandle frameworkHandle, IEnumerable tests, List results) {
+ if (tests.Count() == results.Count()) {
+ TestResult result;
+ foreach(var res in results) {
+ // If tests were run using "Run Selected Tests", the `tests` and `results` lists
+ // may not have the tests in the same order --so we query the test title from the `tests` list.
+ var test = tests.Where(n => n.DisplayName == res.title);
+ if(test.Count() == 1) {
+ result = new TestResult(test.First());
+ result.Outcome = res.passed ? TestOutcome.Passed : TestOutcome.Failed;
+ result.Duration = new TimeSpan(0, 0, 0, 0, res.time);
+ result.Messages.Add(new TestResultMessage(TestResultMessage.StandardOutCategory, res.stdout));
+ result.Messages.Add(new TestResultMessage(TestResultMessage.StandardErrorCategory, res.stderr));
+ result.Messages.Add(new TestResultMessage(TestResultMessage.AdditionalInfoCategory, res.stderr));
+ frameworkHandle.RecordResult(result);
+ frameworkHandle.RecordEnd(test.First(), result.Outcome);
+ }
+ }
+ }
}
}
}