Skip to content
This repository has been archived by the owner on Jul 22, 2022. It is now read-only.

Commit

Permalink
HDFS-10636. Modify ReplicaInfo to remove the assumption that replica …
Browse files Browse the repository at this point in the history
…metadata and data are stored in java.io.File. (Virajith Jalaparti via lei)
  • Loading branch information
Lei Xu committed Sep 13, 2016
1 parent 1c0d18f commit 86c9862
Show file tree
Hide file tree
Showing 41 changed files with 2,219 additions and 1,308 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@

import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
Expand Down Expand Up @@ -741,7 +742,20 @@ public boolean isTrashAllowed(File blockFile) {
*
* @return the trash directory for a given block file that is being deleted.
*/
public String getTrashDirectory(File blockFile) {
public String getTrashDirectory(ReplicaInfo info) {

URI blockURI = info.getBlockURI();
try{
File blockFile = new File(blockURI);
return getTrashDirectory(blockFile);
} catch (IllegalArgumentException e) {
LOG.warn("Failed to get block file for replica " + info, e);
}

return null;
}

private String getTrashDirectory(File blockFile) {
if (isTrashAllowed(blockFile)) {
Matcher matcher = BLOCK_POOL_CURRENT_PATH_PATTERN.matcher(blockFile.getParent());
String trashDirectory = matcher.replaceFirst("$1$2" + TRASH_ROOT_DIR + "$4");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ class BlockReceiver implements Closeable {
/** the block to receive */
private final ExtendedBlock block;
/** the replica to write */
private ReplicaInPipelineInterface replicaInfo;
private ReplicaInPipeline replicaInfo;
/** pipeline stage */
private final BlockConstructionStage stage;
private final boolean isTransfer;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
Expand Down Expand Up @@ -248,8 +249,8 @@ class BlockSender implements java.io.Closeable {
}
// if there is a write in progress
ChunkChecksum chunkChecksum = null;
if (replica instanceof ReplicaBeingWritten) {
final ReplicaBeingWritten rbw = (ReplicaBeingWritten)replica;
if (replica.getState() == ReplicaState.RBW) {
final ReplicaInPipeline rbw = (ReplicaInPipeline) replica;
waitForMinLength(rbw, startOffset + length);
chunkChecksum = rbw.getLastChecksumAndDataLen();
}
Expand Down Expand Up @@ -473,7 +474,7 @@ private static Replica getReplica(ExtendedBlock block, DataNode datanode)
* @param len minimum length to reach
* @throws IOException on failing to reach the len in given wait time
*/
private static void waitForMinLength(ReplicaBeingWritten rbw, long len)
private static void waitForMinLength(ReplicaInPipeline rbw, long len)
throws IOException {
// Wait for 3 seconds for rbw replica to reach the minimum length
for (int i = 0; i < 30 && rbw.getBytesOnDisk() < len; i++) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3474,4 +3474,4 @@ public String getDiskBalancerSetting(String key) throws IOException {
void setBlockScanner(BlockScanner blockScanner) {
this.blockScanner = blockScanner;
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,6 @@ public void noRegistration() throws IOException { }

public void failMirrorConnection() throws IOException { }

public void failPipeline(ReplicaInPipelineInterface replicaInfo,
public void failPipeline(ReplicaInPipeline replicaInfo,
String mirrorAddr) throws IOException { }
}
Original file line number Diff line number Diff line change
Expand Up @@ -204,9 +204,9 @@ public void clearRollingUpgradeMarker(String bpid) throws IOException {
* @return trash directory if rolling upgrade is in progress, null
* otherwise.
*/
public String getTrashDirectoryForBlockFile(String bpid, File blockFile) {
public String getTrashDirectoryForReplica(String bpid, ReplicaInfo info) {
if (trashEnabledBpids.contains(bpid)) {
return getBPStorage(bpid).getTrashDirectory(blockFile);
return getBPStorage(bpid).getTrashDirectory(info);
}
return null;
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -597,14 +597,14 @@ private void scan() {
diffs.put(bpid, diffRecord);

statsRecord.totalBlocks = blockpoolReport.length;
List<FinalizedReplica> bl = dataset.getFinalizedBlocks(bpid);
FinalizedReplica[] memReport = bl.toArray(new FinalizedReplica[bl.size()]);
List<ReplicaInfo> bl = dataset.getFinalizedBlocks(bpid);
ReplicaInfo[] memReport = bl.toArray(new ReplicaInfo[bl.size()]);
Arrays.sort(memReport); // Sort based on blockId

int d = 0; // index for blockpoolReport
int m = 0; // index for memReprot
while (m < memReport.length && d < blockpoolReport.length) {
FinalizedReplica memBlock = memReport[m];
ReplicaInfo memBlock = memReport[m];
ScanInfo info = blockpoolReport[d];
if (info.getBlockId() < memBlock.getBlockId()) {
if (!dataset.isDeletingBlock(bpid, info.getBlockId())) {
Expand Down Expand Up @@ -633,7 +633,7 @@ private void scan() {
// or block file length is different than expected
statsRecord.mismatchBlocks++;
addDifference(diffRecord, statsRecord, info);
} else if (info.getBlockFile().compareTo(memBlock.getBlockFile()) != 0) {
} else if (memBlock.compareWith(info) != 0) {
// volumeMap record and on-disk files don't match.
statsRecord.duplicateBlocks++;
addDifference(diffRecord, statsRecord, info);
Expand All @@ -652,7 +652,7 @@ private void scan() {
}
}
while (m < memReport.length) {
FinalizedReplica current = memReport[m++];
ReplicaInfo current = memReport[m++];
addDifference(diffRecord, statsRecord,
current.getBlockId(), current.getVolume());
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;

/**
* This class describes a replica that has been finalized.
*/
public class FinalizedReplica extends ReplicaInfo {
public class FinalizedReplica extends LocalReplica {

/**
* Constructor
Expand Down Expand Up @@ -88,4 +89,28 @@ public int hashCode() {
public String toString() {
return super.toString();
}

@Override
public ReplicaInfo getOriginalReplica() {
throw new UnsupportedOperationException("Replica of type " + getState() +
" does not support getOriginalReplica");
}

@Override
public long getRecoveryID() {
throw new UnsupportedOperationException("Replica of type " + getState() +
" does not support getRecoveryID");
}

@Override
public void setRecoveryID(long recoveryId) {
throw new UnsupportedOperationException("Replica of type " + getState() +
" does not support setRecoveryID");
}

@Override
public ReplicaRecoveryInfo createInfo() {
throw new UnsupportedOperationException("Replica of type " + getState() +
" does not support createInfo");
}
}
Loading

0 comments on commit 86c9862

Please sign in to comment.