Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Replay command topic to local file to backup KSQL Metastore #5831

Merged
merged 2 commits into from
Jul 16, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,17 @@ public class KsqlConfig extends AbstractConfig {
public static final String KSQL_CREATE_OR_REPLACE_ENABLED_DOC =
"Feature flag for CREATE OR REPLACE";

public static final String KSQL_ENABLE_METASTORE_BACKUP = "ksql.enable.metastore.backup";
public static final Boolean KSQL_ENABLE_METASTORE_BACKUP_DEFAULT = false;
public static final String KSQL_ENABLE_METASTORE_BACKUP_DOC = "Enable the KSQL metastore "
+ "backup service. The backup replays the KSQL command_topic to a file located in the "
+ "same KSQL node.";

public static final String KSQL_METASTORE_BACKUP_LOCATION = "ksql.metastore.backup.location";
public static final String KSQL_METASTORE_BACKUP_LOCATION_DEFAULT = "";
public static final String KSQL_METASTORE_BACKUP_LOCATION_DOC = "Specify the directory where "
+ "KSQL metastore backup files are located.";

private enum ConfigGeneration {
LEGACY,
CURRENT
Expand Down Expand Up @@ -679,6 +690,20 @@ private static ConfigDef buildConfigDef(final ConfigGeneration generation) {
Importance.LOW,
KSQL_CREATE_OR_REPLACE_ENABLED_DOC
)
.define(
KSQL_ENABLE_METASTORE_BACKUP,
Type.BOOLEAN,
KSQL_ENABLE_METASTORE_BACKUP_DEFAULT,
Importance.LOW,
KSQL_ENABLE_METASTORE_BACKUP_DOC
)
.define(
KSQL_METASTORE_BACKUP_LOCATION,
Type.STRING,
KSQL_METASTORE_BACKUP_LOCATION_DEFAULT,
Importance.LOW,
KSQL_METASTORE_BACKUP_LOCATION_DOC
)
.withClientSslSupport();

for (final CompatibilityBreakingConfigDef compatibilityBreakingConfigDef
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
/*
* Copyright 2020 Confluent Inc.
*
* Licensed under the Confluent Community License (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the
* License at
*
* http://www.confluent.io/confluent-community-license
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/

package io.confluent.ksql.rest.server;

import com.fasterxml.jackson.databind.ObjectMapper;
import io.confluent.ksql.execution.json.PlanJsonMapper;
import io.confluent.ksql.rest.entity.CommandId;
import io.confluent.ksql.rest.server.computation.Command;
import io.confluent.ksql.util.KsqlException;
import io.confluent.ksql.util.Pair;

import java.io.BufferedWriter;
import java.io.Closeable;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;

/**
* A file that is used by the backup service to replay command_topic commands.
*/
public class BackupReplayFile implements Closeable {
private static final ObjectMapper MAPPER = PlanJsonMapper.INSTANCE.get();
private static final String KEY_VALUE_SEPARATOR = ":";

private final File file;
private final BufferedWriter writer;

public BackupReplayFile(final File file) {
this.file = Objects.requireNonNull(file, "file");
this.writer = createWriter(file);
}

private static BufferedWriter createWriter(final File file) {
try {
return new BufferedWriter(new OutputStreamWriter(
new FileOutputStream(file, true),
StandardCharsets.UTF_8)
);
} catch (final FileNotFoundException e) {
throw new KsqlException(
String.format("Failed to create replay file: %s", file.getAbsolutePath()), e);
}
}

public String getPath() {
return file.getAbsolutePath();
}

public void write(final CommandId commandId, final Command command) throws IOException {
writer.write(MAPPER.writeValueAsString(commandId));
writer.write(KEY_VALUE_SEPARATOR);
writer.write(MAPPER.writeValueAsString(command));
writer.write("\n");
writer.flush();
}

public void write(final List<Pair<CommandId, Command>> records) throws IOException {
for (final Pair<CommandId, Command> record : records) {
write(record.left, record.right);
}
}

public List<Pair<CommandId, Command>> readRecords() throws IOException {
final List<Pair<CommandId, Command>> commands = new ArrayList<>();
for (final String line : Files.readAllLines(file.toPath(), StandardCharsets.UTF_8)) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

can we test what happens if the command itself has a newline in it (or is it possible for the commandId to have the KEY_VALUE_SEPARATOR in it - I don't think so)? I know it will make it harder to replay the file, but it might make it safer if we have an encoding: commandIdSize (4 bytes) | commandSize (4 byte) | commandId | Command

another (perhaps better) option is that because we know that the CommandID and the Command are valid JSON, we can just read one valid JSON then the next (see https://stackoverflow.com/a/37395419/2258040) and we don't even have to worry about newlines (which we can add anyway to make it easier for humans to read)

I might be too paranoid, let me know if you don't think this is a problem.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Cool, I was looking on something like that. However, that format will not work with the plan of restoring the topic manually using this command:

$ kafka-topic --create --topic $COMMAND_TOPIC --partitions 1 --replication-factor 3
$ kafka-console-producer --broker-list localhost:9092 --topic $COMMAND_TOPIC \
        --property "parse.key=true" --property "key.separator=:" < $BACKUP_FILE

If we have time, we could add a ksql-restore command that reads the file using your proposal (which I prefer). Perhaps a next release? I'm not sure if we can do it in a compatible way. Just changing the file name might be enough probably.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Does the proposed restore command work if the command has newlines? If not, then we need to fix that right?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@rodesai There's a test that verifies that in BackupReplayFileTest.shouldWriteRecordWithNewLineCharacterInCommand.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As discussed offline, according to the standard it's not possible for a json string field to have an embedded newline, so we should be good here. I still think a more explicit format (like what @agavra suggested) is safer (even if it means the backup file isn't immediately usable). Up to you.

final String commandId = line.substring(0, line.indexOf(KEY_VALUE_SEPARATOR));
final String command = line.substring(line.indexOf(KEY_VALUE_SEPARATOR) + 1);

commands.add(new Pair<>(
MAPPER.readValue(commandId.getBytes(StandardCharsets.UTF_8), CommandId.class),
MAPPER.readValue(command.getBytes(StandardCharsets.UTF_8), Command.class)
));
}

return commands;
}

@Override
public void close() throws IOException {
writer.close();
}
}
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright 2018 Confluent Inc.
* Copyright 2020 Confluent Inc.
*
* Licensed under the Confluent Community License (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the
Expand Down Expand Up @@ -41,40 +41,52 @@ public class CommandTopic {

private Consumer<CommandId, Command> commandConsumer = null;
private final String commandTopicName;
private CommandTopicBackup commandTopicBackup;

public CommandTopic(
final String commandTopicName,
final Map<String, Object> kafkaConsumerProperties
final Map<String, Object> kafkaConsumerProperties,
final CommandTopicBackup commandTopicBackup
) {
this(
commandTopicName,
new KafkaConsumer<>(
Objects.requireNonNull(kafkaConsumerProperties, "kafkaClientProperties"),
InternalTopicSerdes.deserializer(CommandId.class),
InternalTopicSerdes.deserializer(Command.class)
)
),
commandTopicBackup
);
}

CommandTopic(
final String commandTopicName,
final Consumer<CommandId, Command> commandConsumer
final Consumer<CommandId, Command> commandConsumer,
final CommandTopicBackup commandTopicBackup
) {
this.commandTopicPartition = new TopicPartition(commandTopicName, 0);
this.commandConsumer = Objects.requireNonNull(commandConsumer, "commandConsumer");
this.commandTopicName = Objects.requireNonNull(commandTopicName, "commandTopicName");
this.commandTopicBackup = Objects.requireNonNull(commandTopicBackup, "commandTopicBackup");
}

public String getCommandTopicName() {
return commandTopicName;
}

public void start() {
commandTopicBackup.initialize();
commandConsumer.assign(Collections.singleton(commandTopicPartition));
}

public Iterable<ConsumerRecord<CommandId, Command>> getNewCommands(final Duration timeout) {
return commandConsumer.poll(timeout);
final Iterable<ConsumerRecord<CommandId, Command>> iterable = commandConsumer.poll(timeout);

if (iterable != null) {
iterable.forEach(record -> backupRecord(record));
}

return iterable;
}

public List<QueuedCommand> getRestoreCommands(final Duration duration) {
Expand All @@ -89,6 +101,8 @@ public List<QueuedCommand> getRestoreCommands(final Duration duration) {
while (!records.isEmpty()) {
log.debug("Received {} records from poll", records.count());
for (final ConsumerRecord<CommandId, Command> record : records) {
backupRecord(record);

if (record.value() == null) {
continue;
}
Expand Down Expand Up @@ -119,5 +133,10 @@ public void wakeup() {

public void close() {
commandConsumer.close();
commandTopicBackup.close();
}

private void backupRecord(final ConsumerRecord<CommandId, Command> record) {
commandTopicBackup.writeRecord(record);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
/*
* Copyright 2020 Confluent Inc.
*
* Licensed under the Confluent Community License (the "License"); you may not use
* this file except in compliance with the License. You may obtain a copy of the
* License at
*
* http://www.confluent.io/confluent-community-license
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/

package io.confluent.ksql.rest.server;

import io.confluent.ksql.rest.entity.CommandId;
import io.confluent.ksql.rest.server.computation.Command;
import org.apache.kafka.clients.consumer.ConsumerRecord;

public interface CommandTopicBackup {
void initialize();

void writeRecord(ConsumerRecord<CommandId, Command> record);

void close();
}
Loading