Skip to content

Commit

Permalink
Refactor StandaloneTestStrategy
Browse files Browse the repository at this point in the history
- remove unnecessary throw in finalizeTest, simplify throws clause
  (TestRunnerAction already throws if the last attempt was unsuccessful)
- remove finally block in executeTestAttempt; don't try to recover from
  non-SpawnExecException, they are catastrophic anyway
- bubble up any IOException thrown in executeTestAttempt; the
  TestRunnerAction already has better handling for them
- Move the prepareFileSystem call to the TestRunnerSpawn
- Clean up createDirectoryAndParents calls
- Move touchFile(out) out of the finally block; it can throw
  IOException, which would cause the original exception to be dropped

- Add a prepareFileSystem overload - this is only called from Google's
  implementation of TestStrategy, but allows us increase consistency
  between the implementations

Note that we no longer include test.xml generation in the runtime of the
test process as measured locally (if the SpawnResult does not have wall
time set, which should usually be the case).

This is in preparation for async test execution.

Progress on #6394.

PiperOrigin-RevId: 238639508
  • Loading branch information
ulfjack authored and copybara-github committed Mar 15, 2019
1 parent c602c41 commit d23e577
Show file tree
Hide file tree
Showing 3 changed files with 102 additions and 114 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ FailedAttemptResult finalizeFailedTestAttempt(TestAttemptResult testAttemptResul
/** Post the final test result based on the last attempt and the list of failed attempts. */
void finalizeTest(
TestAttemptResult lastTestAttemptResult, List<FailedAttemptResult> failedAttempts)
throws IOException, ExecException;
throws IOException;

/**
* Return a {@link TestRunnerSpawn} object if test fallback is enabled, or {@code null}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Iterables;
import com.google.common.collect.Lists;
import com.google.common.io.ByteStreams;
import com.google.devtools.build.lib.actions.ActionExecutionContext;
Expand All @@ -30,7 +29,6 @@
import com.google.devtools.build.lib.actions.Spawn;
import com.google.devtools.build.lib.actions.SpawnActionContext;
import com.google.devtools.build.lib.actions.SpawnResult;
import com.google.devtools.build.lib.actions.TestExecException;
import com.google.devtools.build.lib.analysis.RunfilesSupplierImpl;
import com.google.devtools.build.lib.analysis.actions.SpawnAction;
import com.google.devtools.build.lib.analysis.test.TestActionContext;
Expand Down Expand Up @@ -232,13 +230,8 @@ private StandaloneTestResult executeTestAttempt(
TestRunnerAction action,
Spawn spawn,
ActionExecutionContext actionExecutionContext,
Path execRoot,
Path coverageDir,
Path tmpDir,
Path workingDirectory)
throws IOException, ExecException, InterruptedException {
prepareFileSystem(action, tmpDir, coverageDir, workingDirectory);

Path execRoot)
throws ExecException, IOException, InterruptedException {
Closeable streamed = null;
Path testLogPath = actionExecutionContext.getInputPath(action.getTestLog());
// We have two protos to represent test attempts:
Expand All @@ -264,100 +257,96 @@ private StandaloneTestResult executeTestAttempt(
Path out = actionExecutionContext.getInputPath(action.getTestLog());
Path err = action.resolve(execRoot).getTestStderr();
try (FileOutErr testOutErr = new FileOutErr(out, err)) {
if (executionOptions.testOutput.equals(TestOutputFormat.STREAMED)) {
streamed =
new StreamedTestOutput(
Reporter.outErrForReporter(actionExecutionContext.getEventHandler()), testLogPath);
}
try {
if (executionOptions.testOutput.equals(TestOutputFormat.STREAMED)) {
streamed =
new StreamedTestOutput(
Reporter.outErrForReporter(actionExecutionContext.getEventHandler()),
testLogPath);
}
try {
spawnResults.addAll(
spawnActionContext.exec(spawn, actionExecutionContext.withFileOutErr(testOutErr)));
builder
.setTestPassed(true)
.setStatus(BlazeTestStatus.PASSED)
.setPassedLog(testLogPath.getPathString());
} catch (SpawnExecException e) {
// If this method returns normally, then the higher level will rerun the test (up to
// --flaky_test_attempts times).
if (e.isCatastrophic()) {
// Rethrow as the error was catastrophic and thus the build has to be halted.
throw e;
}
if (!e.getSpawnResult().setupSuccess()) {
// Rethrow as the test could not be run and thus there's no point in retrying.
throw e;
}
builder
.setTestPassed(false)
.setStatus(e.hasTimedOut() ? BlazeTestStatus.TIMEOUT : BlazeTestStatus.FAILED)
.addFailedLogs(testLogPath.getPathString());
spawnResults.add(e.getSpawnResult());
} finally {
if (!testOutErr.hasRecordedOutput()) {
// Make sure that the test.log exists.
FileSystemUtils.touchFile(out);
}
spawnResults.addAll(
spawnActionContext.exec(spawn, actionExecutionContext.withFileOutErr(testOutErr)));
builder
.setTestPassed(true)
.setStatus(BlazeTestStatus.PASSED)
.setPassedLog(testLogPath.getPathString());
} catch (SpawnExecException e) {
// If this method returns normally, then the higher level will rerun the test (up to
// --flaky_test_attempts times).
if (e.isCatastrophic()) {
// Rethrow as the error was catastrophic and thus the build has to be halted.
throw e;
}
// Append any error output to the test.log. This is very rare.
appendStderr(out, err);
// If the test did not create a test.xml, and --experimental_split_xml_generation is
// enabled, then we run a separate action to create a test.xml from test.log.
if (executionOptions.splitXmlGeneration
&& action.getTestLog().getPath().exists()
&& !xmlOutputPath.exists()) {
SpawnResult result = Iterables.getOnlyElement(spawnResults);
Spawn xmlGeneratingSpawn = createXmlGeneratingSpawn(action, result);
// We treat all failures to generate the test.xml here as catastrophic, and won't rerun
// the test if this fails. We redirect the output to a temporary file.
try (FileOutErr xmlSpawnOutErr = actionExecutionContext.getFileOutErr().childOutErr()) {
spawnResults.addAll(
spawnActionContext.exec(
xmlGeneratingSpawn, actionExecutionContext.withFileOutErr(xmlSpawnOutErr)));
}
}
} finally {
long endTime = actionExecutionContext.getClock().currentTimeMillis();
long duration = endTime - startTime;
// If execution fails with an exception other SpawnExecException, there is no result here.
if (!spawnResults.isEmpty()) {
// The SpawnResult of a remotely cached or remotely executed action may not have walltime
// set. We fall back to the time measured here for backwards compatibility.
SpawnResult primaryResult = spawnResults.iterator().next();
duration = primaryResult.getWallTime().orElse(Duration.ofMillis(duration)).toMillis();
extractExecutionInfo(primaryResult, builder, executionInfo);
}

builder.setStartTimeMillisEpoch(startTime);
builder.addTestTimes(duration);
builder.addTestProcessTimes(duration);
builder.setRunDurationMillis(duration);
if (streamed != null) {
streamed.close();
if (!e.getSpawnResult().setupSuccess()) {
// Rethrow as the test could not be run and thus there's no point in retrying.
throw e;
}
builder
.setTestPassed(false)
.setStatus(e.hasTimedOut() ? BlazeTestStatus.TIMEOUT : BlazeTestStatus.FAILED)
.addFailedLogs(testLogPath.getPathString());
spawnResults.add(e.getSpawnResult());
}

TestCase details = parseTestResult(xmlOutputPath);
if (details != null) {
builder.setTestCase(details);
if (!testOutErr.hasRecordedOutput()) {
// Make sure that the test.log exists.
FileSystemUtils.touchFile(out);
}
// Append any error output to the test.log. This is very rare.
appendStderr(out, err);
}

long endTime = actionExecutionContext.getClock().currentTimeMillis();
long duration = endTime - startTime;
// SpawnActionContext guarantees the first entry to correspond to the spawn passed in (there may
// be additional entries due to tree artifact handling).
SpawnResult primaryResult = spawnResults.get(0);

// The SpawnResult of a remotely cached or remotely executed action may not have walltime
// set. We fall back to the time measured here for backwards compatibility.
duration = primaryResult.getWallTime().orElse(Duration.ofMillis(duration)).toMillis();
extractExecutionInfo(primaryResult, builder, executionInfo);

builder.setStartTimeMillisEpoch(startTime);
builder.addTestTimes(duration);
builder.addTestProcessTimes(duration);
builder.setRunDurationMillis(duration);
if (streamed != null) {
streamed.close();
}

if (action.isCoverageMode()) {
builder.setHasCoverage(true);
// If the test did not create a test.xml, and --experimental_split_xml_generation is enabled,
// then we run a separate action to create a test.xml from test.log. We do this as a spawn
// rather than doing it locally in-process, as the test.log file may only exist remotely (when
// remote execution is enabled), and we do not want to have to download it.
if (executionOptions.splitXmlGeneration
&& action.getTestLog().getPath().exists()
&& !xmlOutputPath.exists()) {
Spawn xmlGeneratingSpawn = createXmlGeneratingSpawn(action, primaryResult);
// We treat all failures to generate the test.xml here as catastrophic, and won't rerun
// the test if this fails. We redirect the output to a temporary file.
try (FileOutErr xmlSpawnOutErr = actionExecutionContext.getFileOutErr().childOutErr()) {
spawnResults.addAll(
spawnActionContext.exec(
xmlGeneratingSpawn, actionExecutionContext.withFileOutErr(xmlSpawnOutErr)));
}
}

return StandaloneTestResult.builder()
.setSpawnResults(spawnResults)
// We return the TestResultData.Builder rather than the finished TestResultData instance,
// as we may have to rename the output files in case the test needs to be rerun (if it
// failed here _and_ is marked flaky _and_ the number of flaky attempts is larger than 1).
.setTestResultDataBuilder(builder)
.setExecutionInfo(executionInfo.build())
.build();
} catch (IOException e) {
throw new TestExecException(e.getMessage());
TestCase details = parseTestResult(xmlOutputPath);
if (details != null) {
builder.setTestCase(details);
}

if (action.isCoverageMode()) {
builder.setHasCoverage(true);
}

return StandaloneTestResult.builder()
.setSpawnResults(spawnResults)
// We return the TestResultData.Builder rather than the finished TestResultData instance,
// as we may have to rename the output files in case the test needs to be rerun (if it
// failed here _and_ is marked flaky _and_ the number of flaky attempts is larger than 1).
.setTestResultDataBuilder(builder)
.setExecutionInfo(executionInfo.build())
.build();
}

/** In rare cases, we might write something to stderr. Append it to the real test.log. */
Expand Down Expand Up @@ -482,7 +471,7 @@ private final void finalizeTest(
ActionExecutionContext actionExecutionContext,
StandaloneTestResult standaloneTestResult,
List<FailedAttemptResult> failedAttempts)
throws IOException, ExecException {
throws IOException {
TestResultData.Builder dataBuilder = standaloneTestResult.testResultDataBuilder();
for (FailedAttemptResult failedAttempt : failedAttempts) {
TestResultData failedAttemptData =
Expand Down Expand Up @@ -514,12 +503,6 @@ private final void finalizeTest(
result,
result.getTestLogPath());
// TODO(bazel-team): handle --test_output=errors, --test_output=all.

if (!executionOptions.testKeepGoing
&& data.getStatus() != BlazeTestStatus.FLAKY
&& data.getStatus() != BlazeTestStatus.PASSED) {
throw new TestExecException("Test failed: aborting");
}
}

@Override
Expand Down Expand Up @@ -563,14 +546,8 @@ private final class StandaloneTestRunnerSpawn implements TestRunnerSpawn {
}

public StandaloneTestResult execute() throws InterruptedException, IOException, ExecException {
return executeTestAttempt(
testAction,
spawn,
actionExecutionContext,
execRoot,
coverageDir,
tmpDir,
workingDirectory);
prepareFileSystem(testAction, tmpDir, coverageDir, workingDirectory);
return executeTestAttempt(testAction, spawn, actionExecutionContext, execRoot);
}

@Override
Expand All @@ -588,7 +565,7 @@ public FailedAttemptResult finalizeFailedTestAttempt(
@Override
public void finalizeTest(
TestAttemptResult finalResult, List<FailedAttemptResult> failedAttempts)
throws IOException, ExecException {
throws IOException {
StandaloneTestStrategy.this.finalizeTest(
testAction, actionExecutionContext, (StandaloneTestResult) finalResult, failedAttempts);
}
Expand Down
17 changes: 14 additions & 3 deletions src/main/java/com/google/devtools/build/lib/exec/TestStrategy.java
Original file line number Diff line number Diff line change
Expand Up @@ -70,13 +70,24 @@ protected void prepareFileSystem(
recreateDirectory(coverageDir);
}
recreateDirectory(tmpDir);
FileSystemUtils.createDirectoryAndParents(workingDirectory);
workingDirectory.createDirectoryAndParents();
}

/**
* Ensures that all directories used to run test are in the correct state and their content will
* not result in stale files. Only use this if no local tmp and working directory are required.
*/
protected void prepareFileSystem(TestRunnerAction testAction, Path coverageDir)
throws IOException {
if (testAction.isCoverageMode()) {
recreateDirectory(coverageDir);
}
}

/** Removes directory if it exists and recreates it. */
protected void recreateDirectory(Path directory) throws IOException {
private void recreateDirectory(Path directory) throws IOException {
FileSystemUtils.deleteTree(directory);
FileSystemUtils.createDirectoryAndParents(directory);
directory.createDirectoryAndParents();
}

/** An enum for specifying different formats of test output. */
Expand Down

0 comments on commit d23e577

Please sign in to comment.