Skip to content

Commit

Permalink
Merge branch 'apache:trunk' into YARN-11225
Browse files Browse the repository at this point in the history
  • Loading branch information
slfan1989 authored Dec 13, 2022
2 parents 2b67668 + fdcbc8b commit eea8bff
Show file tree
Hide file tree
Showing 59 changed files with 814 additions and 386 deletions.
2 changes: 1 addition & 1 deletion LICENSE-binary
Original file line number Diff line number Diff line change
Expand Up @@ -324,7 +324,7 @@ org.apache.htrace:htrace-core:3.1.0-incubating
org.apache.htrace:htrace-core4:4.1.0-incubating
org.apache.httpcomponents:httpclient:4.5.6
org.apache.httpcomponents:httpcore:4.4.10
org.apache.kafka:kafka-clients:2.8.1
org.apache.kafka:kafka-clients:2.8.2
org.apache.kerby:kerb-admin:2.0.2
org.apache.kerby:kerb-client:2.0.2
org.apache.kerby:kerb-common:2.0.2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,10 @@

package org.apache.hadoop.util;

import java.security.AccessController;
import java.security.PrivilegedAction;
import java.util.Arrays;

import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;

Expand All @@ -33,21 +37,71 @@ public class PlatformName {
* per the java-vm.
*/
public static final String PLATFORM_NAME =
(System.getProperty("os.name").startsWith("Windows")
? System.getenv("os") : System.getProperty("os.name"))
+ "-" + System.getProperty("os.arch")
+ "-" + System.getProperty("sun.arch.data.model");
(System.getProperty("os.name").startsWith("Windows") ?
System.getenv("os") : System.getProperty("os.name"))
+ "-" + System.getProperty("os.arch") + "-"
+ System.getProperty("sun.arch.data.model");

/**
* The java vendor name used in this platform.
*/
public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");

/**
* Define a system class accessor that is open to changes in underlying implementations
* of the system class loader modules.
*/
private static final class SystemClassAccessor extends ClassLoader {
public Class<?> getSystemClass(String className) throws ClassNotFoundException {
return findSystemClass(className);
}
}

/**
* A public static variable to indicate the current java vendor is
* IBM java or not.
* IBM and the type is Java Technology Edition which provides its
* own implementations of many security packages and Cipher suites.
* Note that these are not provided in Semeru runtimes:
* See https://developer.ibm.com/languages/java/semeru-runtimes for details.
*/
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM");
public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM") &&
hasIbmTechnologyEditionModules();

private static boolean hasIbmTechnologyEditionModules() {
return Arrays.asList(
"com.ibm.security.auth.module.JAASLoginModule",
"com.ibm.security.auth.module.Win64LoginModule",
"com.ibm.security.auth.module.NTLoginModule",
"com.ibm.security.auth.module.AIX64LoginModule",
"com.ibm.security.auth.module.LinuxLoginModule",
"com.ibm.security.auth.module.Krb5LoginModule"
).stream().anyMatch((module) -> isSystemClassAvailable(module));
}

/**
* In rare cases where different behaviour is performed based on the JVM vendor
* this method should be used to test for a unique JVM class provided by the
* vendor rather than using the vendor method. For example if on JVM provides a
* different Kerberos login module testing for that login module being loadable
* before configuring to use it is preferable to using the vendor data.
*
* @param className the name of a class in the JVM to test for
* @return true if the class is available, false otherwise.
*/
private static boolean isSystemClassAvailable(String className) {
return AccessController.doPrivileged((PrivilegedAction<Boolean>) () -> {
try {
// Using ClassLoader.findSystemClass() instead of
// Class.forName(className, false, null) because Class.forName with a null
// ClassLoader only looks at the boot ClassLoader with Java 9 and above
// which doesn't look at all the modules available to the findSystemClass.
new SystemClassAccessor().getSystemClass(className);
return true;
} catch (Exception ignored) {
return false;
}
});
}

public static void main(String[] args) {
System.out.println(PLATFORM_NAME);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,6 @@ public AvroFSInput(final FileContext fc, final Path p) throws IOException {
FS_OPTION_OPENFILE_READ_POLICY_SEQUENTIAL)
.withFileStatus(status)
.build());
fc.open(p);
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import static org.apache.hadoop.util.PlatformName.JAVA_VENDOR_NAME;
import static org.apache.hadoop.util.PlatformName.IBM_JAVA;

import javax.net.ssl.HostnameVerifier;
import javax.net.ssl.HttpsURLConnection;
Expand Down Expand Up @@ -102,11 +102,11 @@ public enum Mode { CLIENT, SERVER }
"ssl.server.exclude.cipher.list";

public static final String KEY_MANAGER_SSLCERTIFICATE =
JAVA_VENDOR_NAME.contains("IBM") ? "ibmX509" :
IBM_JAVA ? "ibmX509" :
KeyManagerFactory.getDefaultAlgorithm();

public static final String TRUST_MANAGER_SSLCERTIFICATE =
JAVA_VENDOR_NAME.contains("IBM") ? "ibmX509" :
IBM_JAVA ? "ibmX509" :
TrustManagerFactory.getDefaultAlgorithm();

public static final String KEYSTORES_FACTORY_CLASS_KEY =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ static URL[] constructUrlsFromClasspath(String classpath)
throws MalformedURLException {
List<URL> urls = new ArrayList<URL>();
for (String element : classpath.split(File.pathSeparator)) {
if (element.endsWith("/*")) {
if (element.endsWith(File.separator + "*")) {
List<Path> jars = FileUtil.getJarsInDirectory(element);
if (!jars.isEmpty()) {
for (Path jar: jars) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2168,9 +2168,8 @@ The switch to turn S3A auditing on or off.

<property>
<name>fs.azure.enable.readahead</name>
<value>false</value>
<description>Disable readahead/prefetching in AbfsInputStream.
See HADOOP-18521</description>
<value>true</value>
<description>Enabled readahead/prefetching in AbfsInputStream.</description>
</property>

<property>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,17 @@ Purpose

This document describes how to install and configure Hadoop clusters ranging from a few nodes to extremely large clusters with thousands of nodes. To play with Hadoop, you may first want to install it on a single machine (see [Single Node Setup](./SingleCluster.html)).

This document does not cover advanced topics such as [Security](./SecureMode.html) or High Availability.
This document does not cover advanced topics such as High Availability.

*Important*: all production Hadoop clusters use Kerberos to authenticate callers
and secure access to HDFS data as well as restriction access to computation
services (YARN etc.).

These instructions do not cover integration with any Kerberos services,
-everyone bringing up a production cluster should include connecting to their
organisation's Kerberos infrastructure as a key part of the deployment.

See [Security](./SecureMode.html) for details on how to secure a cluster.

Prerequisites
-------------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,22 @@ Purpose

This document describes how to set up and configure a single-node Hadoop installation so that you can quickly perform simple operations using Hadoop MapReduce and the Hadoop Distributed File System (HDFS).


*Important*: all production Hadoop clusters use Kerberos to authenticate callers
and secure access to HDFS data as well as restriction access to computation
services (YARN etc.).

These instructions do not cover integration with any Kerberos services,
-everyone bringing up a production cluster should include connecting to their
organisation's Kerberos infrastructure as a key part of the deployment.

Prerequisites
-------------

$H3 Supported Platforms

* GNU/Linux is supported as a development and production platform. Hadoop has been demonstrated on GNU/Linux clusters with 2000 nodes.

* Windows is also a supported platform but the followings steps are for Linux only. To set up Hadoop on Windows, see [wiki page](http://wiki.apache.org/hadoop/Hadoop2OnWindows).

$H3 Required Software

Required software for Linux include:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,35 @@
import java.util.Arrays;

public class TestMiniKdc extends KerberosSecurityTestcase {
private static final boolean IBM_JAVA = System.getProperty("java.vendor")
.contains("IBM");
private static final boolean IBM_JAVA = shouldUseIbmPackages();
// duplicated to avoid cycles in the build
private static boolean shouldUseIbmPackages() {
final List<String> ibmTechnologyEditionSecurityModules = Arrays.asList(
"com.ibm.security.auth.module.JAASLoginModule",
"com.ibm.security.auth.module.Win64LoginModule",
"com.ibm.security.auth.module.NTLoginModule",
"com.ibm.security.auth.module.AIX64LoginModule",
"com.ibm.security.auth.module.LinuxLoginModule",
"com.ibm.security.auth.module.Krb5LoginModule"
);

if (System.getProperty("java.vendor").contains("IBM")) {
return ibmTechnologyEditionSecurityModules
.stream().anyMatch((module) -> isSystemClassAvailable(module));
}

return false;
}

private static boolean isSystemClassAvailable(String className) {
try {
Class.forName(className);
return true;
} catch (Exception ignored) {
return false;
}
}

@Test
public void testMiniKdcStart() {
MiniKdc kdc = getKdc();
Expand Down Expand Up @@ -117,9 +144,9 @@ public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
options.put("debug", "true");

return new AppConfigurationEntry[]{
new AppConfigurationEntry(getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options)};
new AppConfigurationEntry(getKrb5LoginModuleName(),
AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
options)};
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_STARTUP_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT;
Expand Down Expand Up @@ -353,6 +355,7 @@ public class DataNode extends ReconfigurableBase
DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY,
DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY,
DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY,
DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY,
FS_DU_INTERVAL_KEY,
FS_GETSPACEUSED_JITTER_KEY,
FS_GETSPACEUSED_CLASSNAME));
Expand Down Expand Up @@ -699,6 +702,7 @@ public String reconfigurePropertyImpl(String property, String newVal)
case DFS_DATANODE_OUTLIERS_REPORT_INTERVAL_KEY:
case DFS_DATANODE_MIN_OUTLIER_DETECTION_DISKS_KEY:
case DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY:
case DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY:
return reconfSlowDiskParameters(property, newVal);
case FS_DU_INTERVAL_KEY:
case FS_GETSPACEUSED_JITTER_KEY:
Expand Down Expand Up @@ -877,6 +881,12 @@ private String reconfSlowDiskParameters(String property, String newVal)
Long.parseLong(newVal));
result = Long.toString(threshold);
diskMetrics.setLowThresholdMs(threshold);
} else if (property.equals(DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_KEY)) {
checkNotNull(diskMetrics, "DataNode disk stats may be disabled.");
int maxSlowDisksToExclude = (newVal == null ?
DFS_DATANODE_MAX_SLOWDISKS_TO_EXCLUDE_DEFAULT : Integer.parseInt(newVal));
result = Integer.toString(maxSlowDisksToExclude);
diskMetrics.setMaxSlowDisksToExclude(maxSlowDisksToExclude);
}
LOG.info("RECONFIGURE* changed {} to {}", property, newVal);
return result;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ public class DataNodeDiskMetrics {
/**
* The number of slow disks that needs to be excluded.
*/
private int maxSlowDisksToExclude;
private volatile int maxSlowDisksToExclude;
/**
* List of slow disks that need to be excluded.
*/
Expand Down Expand Up @@ -274,6 +274,14 @@ public List<String> getSlowDisksToExclude() {
return slowDisksToExclude;
}

public int getMaxSlowDisksToExclude() {
return maxSlowDisksToExclude;
}

public void setMaxSlowDisksToExclude(int maxSlowDisksToExclude) {
this.maxSlowDisksToExclude = maxSlowDisksToExclude;
}

public void setLowThresholdMs(long thresholdMs) {
Preconditions.checkArgument(thresholdMs > 0,
DFS_DATANODE_SLOWDISK_LOW_THRESHOLD_MS_KEY + " should be larger than 0");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3621,10 +3621,10 @@ void setQuota(String src, long nsQuota, long ssQuota, StorageType type)
final String operationName = getQuotaCommand(nsQuota, ssQuota);
final FSPermissionChecker pc = getPermissionChecker();
FSPermissionChecker.setOperationType(operationName);
if(!allowOwnerSetQuota) {
checkSuperuserPrivilege(operationName, src);
}
try {
if(!allowOwnerSetQuota) {
checkSuperuserPrivilege(operationName, src);
}
writeLock();
try {
checkOperation(OperationCategory.WRITE);
Expand Down Expand Up @@ -7761,8 +7761,8 @@ void addCachePool(CachePoolInfo req, boolean logRetryCache)
checkOperation(OperationCategory.WRITE);
String poolInfoStr = null;
String poolName = req == null ? null : req.getPoolName();
checkSuperuserPrivilege(operationName, poolName);
try {
checkSuperuserPrivilege(operationName, poolName);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
Expand All @@ -7788,8 +7788,8 @@ void modifyCachePool(CachePoolInfo req, boolean logRetryCache)
checkOperation(OperationCategory.WRITE);
String poolNameStr = "{poolName: " +
(req == null ? null : req.getPoolName()) + "}";
checkSuperuserPrivilege(operationName, poolNameStr);
try {
checkSuperuserPrivilege(operationName, poolNameStr);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
Expand All @@ -7815,8 +7815,8 @@ void removeCachePool(String cachePoolName, boolean logRetryCache)
final String operationName = "removeCachePool";
checkOperation(OperationCategory.WRITE);
String poolNameStr = "{poolName: " + cachePoolName + "}";
checkSuperuserPrivilege(operationName, poolNameStr);
try {
checkSuperuserPrivilege(operationName, poolNameStr);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
Expand Down Expand Up @@ -8017,11 +8017,11 @@ void createEncryptionZone(final String src, final String keyName,
SafeModeException, AccessControlException {
final String operationName = "createEncryptionZone";
FileStatus resultingStat = null;
checkSuperuserPrivilege(operationName, src);
try {
Metadata metadata = FSDirEncryptionZoneOp.ensureKeyIsInitialized(dir,
keyName, src);
final FSPermissionChecker pc = getPermissionChecker();
checkSuperuserPrivilege(operationName, src);
checkOperation(OperationCategory.WRITE);
writeLock();
try {
Expand Down Expand Up @@ -8100,11 +8100,11 @@ void reencryptEncryptionZone(final String zone, final ReencryptAction action,
final boolean logRetryCache) throws IOException {
final String operationName = "reencryptEncryptionZone";
boolean success = false;
checkSuperuserPrivilege(operationName, zone);
try {
Preconditions.checkNotNull(zone, "zone is null.");
checkOperation(OperationCategory.WRITE);
final FSPermissionChecker pc = dir.getPermissionChecker();
checkSuperuserPrivilege(operationName, zone);
checkNameNodeSafeMode("NameNode in safemode, cannot " + action
+ " re-encryption on zone " + zone);
reencryptEncryptionZoneInt(pc, zone, action, logRetryCache);
Expand Down

Large diffs are not rendered by default.

Loading

0 comments on commit eea8bff

Please sign in to comment.