Skip to content

Commit

Permalink
Fix compaction status API response (#17006)
Browse files Browse the repository at this point in the history
Description:
#16768 introduces new compaction APIs on the Overlord `/compact/status` and `/compact/progress`.
But the corresponding `OverlordClient` methods do not return an object compatible with the actual
endpoints defined in `OverlordCompactionResource`.

This patch ensures that the objects are compatible.

Changes:
- Add `CompactionStatusResponse` and `CompactionProgressResponse`
- Use these as the return type in `OverlordClient` methods and as the response entity in `OverlordCompactionResource`
- Add `SupervisorCleanupModule` bound on the Coordinator to perform cleanup of supervisors.
Without this module, Coordinator cannot deserialize compaction supervisors.
  • Loading branch information
kfaraz authored Sep 5, 2024
1 parent b4d83a8 commit ba6f804
Show file tree
Hide file tree
Showing 15 changed files with 528 additions and 93 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,8 @@
import org.apache.druid.metadata.LockFilterPolicy;
import org.apache.druid.rpc.ServiceRetryPolicy;
import org.apache.druid.rpc.indexing.OverlordClient;
import org.apache.druid.server.coordinator.AutoCompactionSnapshot;
import org.apache.druid.server.compaction.CompactionProgressResponse;
import org.apache.druid.server.compaction.CompactionStatusResponse;
import org.joda.time.Interval;

import javax.annotation.Nullable;
Expand Down Expand Up @@ -238,13 +239,13 @@ public ListenableFuture<List<IndexingWorkerInfo>> getWorkers()
}

@Override
public ListenableFuture<List<AutoCompactionSnapshot>> getCompactionSnapshots(@Nullable String dataSource)
public ListenableFuture<CompactionStatusResponse> getCompactionSnapshots(@Nullable String dataSource)
{
throw new UnsupportedOperationException();
}

@Override
public ListenableFuture<Long> getBytesAwaitingCompaction(String dataSource)
public ListenableFuture<CompactionProgressResponse> getBytesAwaitingCompaction(String dataSource)
{
throw new UnsupportedOperationException();
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,18 @@

package org.apache.druid.indexing.overlord.http;

import com.google.common.collect.ImmutableMap;
import com.google.inject.Inject;
import com.sun.jersey.spi.container.ResourceFilters;
import org.apache.druid.error.DruidException;
import org.apache.druid.error.InvalidInput;
import org.apache.druid.error.NotFound;
import org.apache.druid.indexing.compact.CompactionScheduler;
import org.apache.druid.server.compaction.CompactionProgressResponse;
import org.apache.druid.server.compaction.CompactionStatusResponse;
import org.apache.druid.server.coordinator.AutoCompactionSnapshot;
import org.apache.druid.server.coordinator.ClusterCompactionConfig;
import org.apache.druid.server.coordinator.CompactionSupervisorConfig;
import org.apache.druid.server.http.ServletResourceUtils;
import org.apache.druid.server.http.security.StateResourceFilter;

import javax.ws.rs.Consumes;
Expand All @@ -40,8 +45,8 @@
import java.util.Collections;

/**
* Contains the same logic as {@code CompactionResource} but the APIs are served
* by {@link CompactionScheduler} instead of {@code DruidCoordinator}.
* Contains the same logic as {@code CoordinatorCompactionResource} but the APIs
* are served by {@link CompactionScheduler} instead of {@code DruidCoordinator}.
*/
@Path("/druid/indexer/v1/compaction")
public class OverlordCompactionResource
Expand Down Expand Up @@ -81,18 +86,14 @@ public Response getCompactionProgress(
}

if (dataSource == null || dataSource.isEmpty()) {
return Response.status(Response.Status.BAD_REQUEST)
.entity(Collections.singletonMap("error", "No DataSource specified"))
.build();
return ServletResourceUtils.buildErrorResponseFrom(InvalidInput.exception("No DataSource specified"));
}

final AutoCompactionSnapshot snapshot = scheduler.getCompactionSnapshot(dataSource);
if (snapshot == null) {
return Response.status(Response.Status.NOT_FOUND)
.entity(Collections.singletonMap("error", "Unknown DataSource"))
.build();
return ServletResourceUtils.buildErrorResponseFrom(NotFound.exception("Unknown DataSource"));
} else {
return Response.ok(Collections.singletonMap("remainingSegmentSize", snapshot.getBytesAwaitingCompaction()))
return Response.ok(new CompactionProgressResponse(snapshot.getBytesAwaitingCompaction()))
.build();
}
}
Expand All @@ -115,13 +116,11 @@ public Response getCompactionSnapshots(
} else {
AutoCompactionSnapshot autoCompactionSnapshot = scheduler.getCompactionSnapshot(dataSource);
if (autoCompactionSnapshot == null) {
return Response.status(Response.Status.NOT_FOUND)
.entity(Collections.singletonMap("error", "Unknown DataSource"))
.build();
return ServletResourceUtils.buildErrorResponseFrom(NotFound.exception("Unknown DataSource"));
}
snapshots = Collections.singleton(autoCompactionSnapshot);
}
return Response.ok(Collections.singletonMap("latestStatus", snapshots)).build();
return Response.ok(new CompactionStatusResponse(snapshots)).build();
}

@POST
Expand All @@ -139,12 +138,12 @@ public Response simulateRunWithConfigUpdate(

private Response buildErrorResponseIfSchedulerDisabled()
{
return Response.status(Response.Status.SERVICE_UNAVAILABLE).entity(
ImmutableMap.of(
"error",
"Compaction Supervisors are disabled on the Overlord."
+ " Use Coordinator APIs to fetch compaction status."
)
).build();
final String msg = "Compaction Supervisors are disabled on the Overlord."
+ " Use Coordinator APIs to fetch compaction status.";
return ServletResourceUtils.buildErrorResponseFrom(
DruidException.forPersona(DruidException.Persona.USER)
.ofCategory(DruidException.Category.UNSUPPORTED)
.build(msg)
);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,232 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/

package org.apache.druid.indexing.overlord.http;

import com.google.common.collect.ImmutableMap;
import org.apache.druid.error.DruidException;
import org.apache.druid.error.DruidExceptionMatcher;
import org.apache.druid.error.ErrorResponse;
import org.apache.druid.indexing.compact.CompactionScheduler;
import org.apache.druid.segment.TestDataSource;
import org.apache.druid.server.compaction.CompactionProgressResponse;
import org.apache.druid.server.compaction.CompactionStatistics;
import org.apache.druid.server.compaction.CompactionStatusResponse;
import org.apache.druid.server.coordinator.AutoCompactionSnapshot;
import org.apache.druid.server.coordinator.CompactionSupervisorConfig;
import org.easymock.EasyMock;
import org.hamcrest.MatcherAssert;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;

import javax.ws.rs.core.Response;
import java.util.Collections;
import java.util.Map;

public class OverlordCompactionResourceTest
{
private static final CompactionSupervisorConfig SUPERVISOR_ENABLED
= new CompactionSupervisorConfig(true);
private static final CompactionSupervisorConfig SUPERVISOR_DISABLED
= new CompactionSupervisorConfig(false);

private CompactionScheduler scheduler;

@Before
public void setUp()
{
scheduler = EasyMock.createStrictMock(CompactionScheduler.class);
}

@After
public void tearDown()
{
EasyMock.verify(scheduler);
}

@Test
public void testGetCompactionSnapshotWithEmptyDatasource()
{
final Map<String, AutoCompactionSnapshot> allSnapshots = ImmutableMap.of(
TestDataSource.WIKI,
AutoCompactionSnapshot.builder(TestDataSource.WIKI).build()
);

EasyMock.expect(scheduler.getAllCompactionSnapshots())
.andReturn(allSnapshots).once();
EasyMock.replay(scheduler);

final Response response = new OverlordCompactionResource(SUPERVISOR_ENABLED, scheduler)
.getCompactionSnapshots("");
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(
new CompactionStatusResponse(allSnapshots.values()),
response.getEntity()
);
}

@Test
public void testGetCompactionSnapshotWithNullDatasource()
{
final Map<String, AutoCompactionSnapshot> allSnapshots = ImmutableMap.of(
TestDataSource.WIKI,
AutoCompactionSnapshot.builder(TestDataSource.WIKI).build()
);

EasyMock.expect(scheduler.getAllCompactionSnapshots())
.andReturn(allSnapshots).once();
EasyMock.replay(scheduler);

final Response response = new OverlordCompactionResource(SUPERVISOR_ENABLED, scheduler)
.getCompactionSnapshots(null);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(
new CompactionStatusResponse(allSnapshots.values()),
response.getEntity()
);
}

@Test
public void testGetCompactionSnapshotWithValidDatasource()
{
final AutoCompactionSnapshot snapshot = AutoCompactionSnapshot.builder(TestDataSource.WIKI).build();

EasyMock.expect(scheduler.getCompactionSnapshot(TestDataSource.WIKI))
.andReturn(snapshot).once();
EasyMock.replay(scheduler);

final Response response = new OverlordCompactionResource(SUPERVISOR_ENABLED, scheduler)
.getCompactionSnapshots(TestDataSource.WIKI);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(
new CompactionStatusResponse(Collections.singleton(snapshot)),
response.getEntity()
);
}

@Test
public void testGetCompactionSnapshotWithInvalidDatasource()
{
EasyMock.expect(scheduler.getCompactionSnapshot(TestDataSource.KOALA))
.andReturn(null).once();
EasyMock.replay(scheduler);

final Response response = new OverlordCompactionResource(SUPERVISOR_ENABLED, scheduler)
.getCompactionSnapshots(TestDataSource.KOALA);
Assert.assertEquals(404, response.getStatus());
}

@Test
public void testGetProgressForValidDatasource()
{
final AutoCompactionSnapshot.Builder snapshotBuilder
= AutoCompactionSnapshot.builder(TestDataSource.WIKI);
snapshotBuilder.incrementWaitingStats(CompactionStatistics.create(100L, 10L, 1L));
final AutoCompactionSnapshot snapshot = snapshotBuilder.build();

EasyMock.expect(scheduler.getCompactionSnapshot(TestDataSource.WIKI))
.andReturn(snapshot).once();
EasyMock.replay(scheduler);

final Response response = new OverlordCompactionResource(SUPERVISOR_ENABLED, scheduler)
.getCompactionProgress(TestDataSource.WIKI);
Assert.assertEquals(200, response.getStatus());
Assert.assertEquals(new CompactionProgressResponse(100L), response.getEntity());
}

@Test
public void testGetProgressForNullDatasourceReturnsBadRequest()
{
EasyMock.replay(scheduler);

final Response response = new OverlordCompactionResource(SUPERVISOR_ENABLED, scheduler)
.getCompactionProgress(null);
Assert.assertEquals(Response.Status.BAD_REQUEST.getStatusCode(), response.getStatus());

final Object responseEntity = response.getEntity();
Assert.assertTrue(responseEntity instanceof ErrorResponse);

MatcherAssert.assertThat(
((ErrorResponse) responseEntity).getUnderlyingException(),
DruidExceptionMatcher.invalidInput().expectMessageIs("No DataSource specified")
);
}

@Test
public void testGetProgressForInvalidDatasourceReturnsNotFound()
{
EasyMock.expect(scheduler.getCompactionSnapshot(TestDataSource.KOALA))
.andReturn(null).once();
EasyMock.replay(scheduler);

final Response response = new OverlordCompactionResource(SUPERVISOR_ENABLED, scheduler)
.getCompactionProgress(TestDataSource.KOALA);
Assert.assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus());

final Object responseEntity = response.getEntity();
Assert.assertTrue(responseEntity instanceof ErrorResponse);

MatcherAssert.assertThat(
((ErrorResponse) responseEntity).getUnderlyingException(),
DruidExceptionMatcher.notFound().expectMessageIs("Unknown DataSource")
);
}

@Test
public void testGetProgressReturnsUnsupportedWhenSupervisorDisabled()
{
EasyMock.replay(scheduler);
verifyResponseWhenSupervisorDisabled(
new OverlordCompactionResource(SUPERVISOR_DISABLED, scheduler)
.getCompactionProgress(TestDataSource.WIKI)
);
}

@Test
public void testGetSnapshotReturnsUnsupportedWhenSupervisorDisabled()
{
EasyMock.replay(scheduler);
verifyResponseWhenSupervisorDisabled(
new OverlordCompactionResource(SUPERVISOR_DISABLED, scheduler)
.getCompactionSnapshots(TestDataSource.WIKI)
);
}

private void verifyResponseWhenSupervisorDisabled(Response response)
{
Assert.assertEquals(501, response.getStatus());

final Object responseEntity = response.getEntity();
Assert.assertTrue(responseEntity instanceof ErrorResponse);

MatcherAssert.assertThat(
((ErrorResponse) responseEntity).getUnderlyingException(),
new DruidExceptionMatcher(
DruidException.Persona.USER,
DruidException.Category.UNSUPPORTED,
"general"
).expectMessageIs(
"Compaction Supervisors are disabled on the Overlord."
+ " Use Coordinator APIs to fetch compaction status."
)
);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,15 @@ public static DruidExceptionMatcher invalidInput()
);
}

public static DruidExceptionMatcher notFound()
{
return new DruidExceptionMatcher(
DruidException.Persona.USER,
DruidException.Category.NOT_FOUND,
"notFound"
);
}

public static DruidExceptionMatcher invalidSqlInput()
{
return invalidInput().expectContext("sourceType", "sql");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,8 @@
import org.apache.druid.java.util.common.parsers.CloseableIterator;
import org.apache.druid.metadata.LockFilterPolicy;
import org.apache.druid.rpc.ServiceRetryPolicy;
import org.apache.druid.server.coordinator.AutoCompactionSnapshot;
import org.apache.druid.server.compaction.CompactionProgressResponse;
import org.apache.druid.server.compaction.CompactionStatusResponse;
import org.joda.time.DateTime;
import org.joda.time.Interval;

Expand Down Expand Up @@ -226,7 +227,7 @@ ListenableFuture<Map<String, List<Interval>>> findLockedIntervals(
* <p>
* API: {@code /druid/indexer/v1/compaction/progress}
*/
ListenableFuture<Long> getBytesAwaitingCompaction(String dataSource);
ListenableFuture<CompactionProgressResponse> getBytesAwaitingCompaction(String dataSource);

/**
* Gets the latest compaction snapshots of one or all datasources.
Expand All @@ -236,7 +237,7 @@ ListenableFuture<Map<String, List<Interval>>> findLockedIntervals(
* @param dataSource If passed as non-null, then the returned list contains only
* the snapshot for this datasource.
*/
ListenableFuture<List<AutoCompactionSnapshot>> getCompactionSnapshots(@Nullable String dataSource);
ListenableFuture<CompactionStatusResponse> getCompactionSnapshots(@Nullable String dataSource);

/**
* Returns a copy of this client with a different retry policy.
Expand Down
Loading

0 comments on commit ba6f804

Please sign in to comment.