Skip to content
This repository has been archived by the owner on Jan 3, 2023. It is now read-only.

Commit

Permalink
Support Erasure Coding on HDFS-3.x (#1924)
Browse files Browse the repository at this point in the history
  • Loading branch information
PHILO-HE authored Sep 14, 2018
1 parent ea6f083 commit 8e3435a
Show file tree
Hide file tree
Showing 33 changed files with 1,081 additions and 42 deletions.
3 changes: 2 additions & 1 deletion smart-common/src/main/java/org/smartdata/SmartConstants.java
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,8 @@ public class SmartConstants {
"org.smartdata.hdfs.scheduler.MoverScheduler, "
+ "org.smartdata.hdfs.scheduler.CopyScheduler, "
+ "org.smartdata.hdfs.scheduler.Copy2S3Scheduler,"
+ "org.smartdata.hdfs.scheduler.SmallFileScheduler";
+ "org.smartdata.hdfs.scheduler.SmallFileScheduler,"
+ "org.smartdata.hdfs.scheduler.ErasureCodingScheduler";

public static final String SMART_HADOOP_LAST_INOTIFY_TXID =
"smart_hadoop_last_inotify_txid";
Expand Down
34 changes: 27 additions & 7 deletions smart-common/src/main/java/org/smartdata/model/FileInfo.java
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,12 @@ public class FileInfo {
private String owner;
private String group;
private byte storagePolicy;
private byte erasureCodingPolicy;

public FileInfo(String path, long fileId, long length, boolean isdir,
short blockReplication, long blocksize, long modificationTime,
long accessTime, short permission, String owner, String group,
byte storagePolicy) {
byte storagePolicy, byte erasureCodingPolicy) {
this.path = path;
this.fileId = fileId;
this.length = length;
Expand All @@ -49,6 +50,7 @@ public FileInfo(String path, long fileId, long length, boolean isdir,
this.owner = owner;
this.group = group;
this.storagePolicy = storagePolicy;
this.erasureCodingPolicy = erasureCodingPolicy;
}

public String getPath() {
Expand Down Expand Up @@ -147,6 +149,14 @@ public void setStoragePolicy(byte storagePolicy) {
this.storagePolicy = storagePolicy;
}

public byte getErasureCodingPolicy() {
return erasureCodingPolicy;
}

public void setErasureCodingPolicy(byte erasureCodingPolicy) {
this.erasureCodingPolicy = erasureCodingPolicy;
}

@Override
public boolean equals(Object o) {
if (this == o) {
Expand All @@ -165,6 +175,7 @@ public boolean equals(Object o) {
&& accessTime == fileInfo.accessTime
&& permission == fileInfo.permission
&& storagePolicy == fileInfo.storagePolicy
&& erasureCodingPolicy == fileInfo.erasureCodingPolicy
&& Objects.equals(path, fileInfo.path)
&& Objects.equals(owner, fileInfo.owner)
&& Objects.equals(group, fileInfo.group);
Expand All @@ -184,7 +195,8 @@ public int hashCode() {
permission,
owner,
group,
storagePolicy);
storagePolicy,
erasureCodingPolicy);
}

public static Builder newBuilder() {
Expand All @@ -196,7 +208,7 @@ public String toString() {
return String.format(
"FileInfo{path=\'%s\', fileId=%s, length=%s, isdir=%s, blockReplication=%s, "
+ "blocksize=%s, modificationTime=%s, accessTime=%s, permission=%s, owner=\'%s\', "
+ "group=\'%s\', storagePolicy=%s}",
+ "group=\'%s\', storagePolicy=%s, erasureCodingPolicy=%s}",
path,
fileId,
length,
Expand All @@ -208,7 +220,8 @@ public String toString() {
permission,
owner,
group,
storagePolicy);
storagePolicy,
erasureCodingPolicy);
}

public static class Builder {
Expand All @@ -224,6 +237,7 @@ public static class Builder {
private String owner;
private String group;
private byte storagePolicy;
private byte erasureCodingPolicy;

public Builder setPath(String path) {
this.path = path;
Expand Down Expand Up @@ -285,18 +299,23 @@ public Builder setStoragePolicy(byte storagePolicy) {
return this;
}

public Builder setErasureCodingPolicy(byte erasureCodingPolicy) {
this.erasureCodingPolicy = erasureCodingPolicy;
return this;
}

public FileInfo build() {
return new FileInfo(path, fileId, length, isdir, blockReplication,
blocksize, modificationTime, accessTime, permission, owner,
group, storagePolicy);
group, storagePolicy, erasureCodingPolicy);
}

@Override
public String toString() {
return String.format(
"Builder{path=\'%s\', fileId=%s, length=%s, isdir=%s, blockReplication=%s, "
+ "blocksize=%s, modificationTime=%s, accessTime=%s, permission=%s, owner=\'%s\', "
+ "group=\'%s\', storagePolicy=\'%s\'}",
+ "group=\'%s\', storagePolicy=%s, erasureCodingPolicy=%s}",
path,
fileId,
length,
Expand All @@ -308,7 +327,8 @@ public String toString() {
permission,
owner,
group,
storagePolicy);
storagePolicy,
erasureCodingPolicy);
}
}
}
24 changes: 16 additions & 8 deletions smart-common/src/test/java/org/smartdata/model/TestFileInfo.java
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,8 @@ public ChildFileInfo(
short permission,
String owner,
String group,
byte storagePolicy) {
byte storagePolicy,
byte erasureCodingPolicy) {
super(
path,
fileId,
Expand All @@ -49,45 +50,52 @@ public ChildFileInfo(
permission,
owner,
group,
storagePolicy);
storagePolicy,
erasureCodingPolicy);
}
}

@Test
public void testEquals() throws Exception {
//Case 1:
FileInfo fileInfo =
new FileInfo(" ", 1, 1, true, (short) 1, 1, 1, 1, (short) 1, " ", " ", (byte) 1);
new FileInfo(" ", 1, 1, true, (short) 1, 1, 1,
1, (short) 1, " ", " ", (byte) 1, (byte) 0);
Assert.assertEquals(true, fileInfo.equals(fileInfo));

//Case 2:
FileInfo fileInfo1 =
new FileInfo(" ", 1, 1, true, (short) 1, 1, 1, 1, (short) 1, " ", " ", (byte) 1);
new FileInfo(" ", 1, 1, true, (short) 1, 1, 1,
1, (short) 1, " ", " ", (byte) 1, (byte) 0);
Assert.assertEquals(true, fileInfo.equals(fileInfo1));

//Case 3:
FileInfo fileInfo2 =
new FileInfo(" ", 1, 1, true, (short) 1, 1, 1, 1, (short) 1, null, " ", (byte) 1);
new FileInfo(" ", 1, 1, true, (short) 1, 1, 1,
1, (short) 1, null, " ", (byte) 1, (byte) 0);

Assert.assertEquals(false, fileInfo.equals(fileInfo2));
Assert.assertEquals(false, fileInfo2.equals(fileInfo));

//Case 4:
FileInfo fileInfo3 =
new FileInfo(null, 1, 1, true, (short) 1, 1, 1, 1, (short) 1, " ", " ", (byte) 1);
new FileInfo(null, 1, 1, true, (short) 1, 1, 1,
1, (short) 1, " ", " ", (byte) 1, (byte) 0);

Assert.assertEquals(false, fileInfo.equals(fileInfo3));
Assert.assertEquals(false, fileInfo3.equals(fileInfo));

//Case 5:
FileInfo fileInfo4 =
new FileInfo(null, 1, 1, true, (short) 1, 1, 1, 1, (short) 1, " ", null, (byte) 1);
new FileInfo(null, 1, 1, true, (short) 1, 1, 1,
1, (short) 1, " ", null, (byte) 1, (byte) 0);
Assert.assertEquals(false, fileInfo.equals(fileInfo4));
Assert.assertEquals(false, fileInfo4.equals(fileInfo));

//Case 6:
FileInfo fileInfo5 =
new FileInfo(" ", 1, 1, true, (short) 1, 1, 1, 1, (short) 2, " ", " ", (byte) 1);
new FileInfo(" ", 1, 1, true, (short) 1, 1, 1,
1, (short) 2, " ", " ", (byte) 1, (byte) 0);
Assert.assertEquals(false, fileInfo.equals(fileInfo5));
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.protocol.*;
import org.apache.hadoop.hdfs.protocolPB.PBHelper;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
Expand Down Expand Up @@ -62,4 +63,13 @@ public HdfsFileStatus createHdfsFileStatus(
length, isdir, block_replication, blocksize, modification_time, access_time, permission,
owner, group, symlink, path, fileId, childrenNum, feInfo, storagePolicy);
}

public byte getErasureCodingPolicy(HdfsFileStatus fileStatus) {
// for HDFS2.x, the erasure policy is always replication whose id is 0 in HDFS.
return (byte) 0;
}

public byte getErasureCodingPolicyByName(DFSClient client, String ecPolicyName) throws IOException {
return (byte) 0;
}
}
26 changes: 24 additions & 2 deletions smart-hadoop-support/smart-hadoop-3.1/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,11 @@
<artifactId>smart-hadoop-common</artifactId>
<version>1.5.0-SNAPSHOT</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
Expand All @@ -84,10 +89,27 @@
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-all</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-aws</artifactId>
<version>${hadoop.version}</version>
<artifactId>hadoop-common</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
</dependencies>
</project>
Original file line number Diff line number Diff line change
Expand Up @@ -207,4 +207,27 @@ public HdfsFileStatus createHdfsFileStatus(
.storagePolicy(storagePolicy)
.build();
}

@Override
public byte getErasureCodingPolicy(HdfsFileStatus fileStatus) {
ErasureCodingPolicy erasureCodingPolicy = fileStatus.getErasureCodingPolicy();
// null means replication policy and its id is 0 in HDFS.
if (erasureCodingPolicy == null) {
return (byte) 0;
}
return fileStatus.getErasureCodingPolicy().getId();
}

@Override
public byte getErasureCodingPolicyByName(DFSClient client, String ecPolicyName) throws IOException {
if (ecPolicyName.equals(SystemErasureCodingPolicies.getReplicationPolicy().getName())) {
return (byte) 0;
}
for (ErasureCodingPolicyInfo policyInfo : client.getErasureCodingPolicies()) {
if (policyInfo.getPolicy().getName().equals(ecPolicyName)) {
return policyInfo.getPolicy().getId();
}
}
return (byte) -1;
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.smartdata.hdfs.action;

import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.smartdata.action.annotation.ActionSignature;
import org.smartdata.conf.SmartConf;
import org.smartdata.hdfs.HadoopUtil;

import java.util.Map;

/**
* An action to check the EC policy for a file or dir.
*/
@ActionSignature(
actionId = "checkec",
displayName = "checkec",
usage = HdfsAction.FILE_PATH + " $src"
)
public class CheckErasureCodingPolicy extends HdfsAction {
public static final String RESULT_OF_NULL_EC_POLICY =
"The EC policy is replication.";
private SmartConf conf;
private String srcPath;

@Override
public void init(Map<String, String> args) {
super.init(args);
this.conf = getContext().getConf();
this.srcPath = args.get(HdfsAction.FILE_PATH);
}

@Override
public void execute() throws Exception {
this.setDfsClient(HadoopUtil.getDFSClient(
HadoopUtil.getNameNodeUri(conf), conf));
ErasureCodingPolicy srcEcPolicy = dfsClient.getErasureCodingPolicy(srcPath);
if (srcEcPolicy == null) {
appendResult(RESULT_OF_NULL_EC_POLICY);
} else {
appendResult(srcEcPolicy.toString());
}
}
}
Loading

0 comments on commit 8e3435a

Please sign in to comment.