diff --git a/.gitignore b/.gitignore index 2f961002639..6ed020ba068 100644 --- a/.gitignore +++ b/.gitignore @@ -33,3 +33,4 @@ createDDL.jdbc /dist /build ambari_python.egg-info +**/Crashpad diff --git a/Jenkinsfile b/Jenkinsfile index 638b6c8a212..2d0da33bf84 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -96,10 +96,8 @@ pipeline { parallel { stage('Ambari WebUI Tests') { steps { - withEnv(['OPENSSL_CONF=/dev/null']) { - sh 'lsb_release -a' + withEnv(['CHROME_BIN=/usr/bin/chromium-browser']) { sh 'mvn -T 2C -am test -pl ambari-web,ambari-admin -Dmaven.artifact.threads=10 -Drat.skip' - } } } diff --git a/ambari-admin/pom.xml b/ambari-admin/pom.xml index d146b56a406..9e398e85819 100644 --- a/ambari-admin/pom.xml +++ b/ambari-admin/pom.xml @@ -135,7 +135,7 @@ ${args.shell} ${basedir}${dirsep}set-ambari-version.${fileextension.shell} ${ambariVersion} - + diff --git a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/SideNavCtrl.js b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/SideNavCtrl.js index 6bf356d7967..2e3e820513b 100644 --- a/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/SideNavCtrl.js +++ b/ambari-admin/src/main/resources/ui/admin-web/app/scripts/controllers/SideNavCtrl.js @@ -35,14 +35,27 @@ angular.module('ambariAdminConsole') } function initNavigationBar () { - $('body').on('DOMNodeInserted', '.navigation-bar', function() { - $('.navigation-bar').navigationBar({ - fitHeight: true, - collapseNavBarClass: 'fa-angle-double-left', - expandNavBarClass: 'fa-angle-double-right' - }); - //initTooltips(); - $('body').off('DOMNodeInserted', '.navigation-bar'); + const observer = new MutationObserver(mutations => { + var targetNode + if (mutations.some((mutation) => mutation.type === 'childList' && (targetNode = $('.navigation-bar')).length)) { + observer.disconnect(); + //initTooltips(); + targetNode.navigationBar({ + fitHeight: true, + collapseNavBarClass: 'fa-angle-double-left', + expandNavBarClass: 'fa-angle-double-right' + }); + } + }); + + setTimeout(() => { + // remove observer if selected element is not found in 10secs. + observer.disconnect(); + }, 10000) + + observer.observe(document.body, { + childList: true, + subtree: true }); } diff --git a/ambari-admin/src/main/resources/ui/admin-web/package.json b/ambari-admin/src/main/resources/ui/admin-web/package.json index d9995ac0239..a3b442c2b13 100644 --- a/ambari-admin/src/main/resources/ui/admin-web/package.json +++ b/ambari-admin/src/main/resources/ui/admin-web/package.json @@ -21,11 +21,9 @@ "http-server": "0.6.1", "jasmine-core": "^3.1.0", "karma": "^2.0.4", - "karma-chrome-launcher": "0.1.4", "karma-jasmine": "^1.1.2", "karma-ng-html2js-preprocessor": "^1.0.0", - "karma-phantomjs-launcher": "^1.0.4", - "phantomjs": "^2.1.7", + "karma-chrome-launcher": "3.2.0", "protractor": "1.0.0" }, "scripts": { diff --git a/ambari-admin/src/main/resources/ui/admin-web/test/karma.conf.js b/ambari-admin/src/main/resources/ui/admin-web/test/karma.conf.js index bb514cd473f..fb6353e3b15 100644 --- a/ambari-admin/src/main/resources/ui/admin-web/test/karma.conf.js +++ b/ambari-admin/src/main/resources/ui/admin-web/test/karma.conf.js @@ -42,13 +42,28 @@ module.exports = function(config){ autoWatch : true, + colors: true, + + logLevel: config.LOG_INFO, + + captureTimeout: 60000, + + browserNoActivityTimeout: 30000, + frameworks: ['jasmine'], - browsers: ['PhantomJS'], + customLaunchers: { + ChromeHeadlessCustom: { + base: 'ChromeHeadless', + flags: ['--no-sandbox', '--disable-gpu', '--disable-translate', '--disable-extensions'] + } + }, + + browsers: ['ChromeHeadlessCustom'], plugins : [ 'karma-jasmine', - 'karma-phantomjs-launcher', + 'karma-chrome-launcher', 'karma-ng-html2js-preprocessor' ], diff --git a/ambari-agent/conf/unix/upgrade_agent_configs.py b/ambari-agent/conf/unix/upgrade_agent_configs.py index 73a09c07165..0f30b983926 100644 --- a/ambari-agent/conf/unix/upgrade_agent_configs.py +++ b/ambari-agent/conf/unix/upgrade_agent_configs.py @@ -56,4 +56,4 @@ else: print("Values are not updated, configs {0} is not found".format(CONFIG_FILE)) else: - print("Values are not updated, backup {0} is not found".format(CONFIG_FILE_BACKUP)) \ No newline at end of file + print("Values are not updated, backup {0} is not found".format(CONFIG_FILE_BACKUP)) diff --git a/ambari-agent/pom.xml b/ambari-agent/pom.xml index 1ecbab8c42d..1f98701e55b 100644 --- a/ambari-agent/pom.xml +++ b/ambari-agent/pom.xml @@ -250,7 +250,7 @@ ${python.test.mask} - ${path.python.1}${pathsep}$PYTHONPATH + ${path.python.1}${pathsep}${env.PYTHONPATH} ${skipPythonTests} @@ -269,7 +269,7 @@ ${target.cache.dir} - target${dirsep}ambari-agent-${project.version}${pathsep}$PYTHONPATH + target${dirsep}ambari-agent-${project.version}${pathsep}${env.PYTHONPATH} generate-hash-files @@ -314,6 +314,9 @@ 2012, Apache Software Foundation Development Maven Recipe: RPM Package. + + __python 3 + ${rpm.dependency.list} diff --git a/ambari-agent/src/main/python/ambari_agent/InitializerModule.py b/ambari-agent/src/main/python/ambari_agent/InitializerModule.py index 18b0d8faa9c..182b078885f 100644 --- a/ambari-agent/src/main/python/ambari_agent/InitializerModule.py +++ b/ambari-agent/src/main/python/ambari_agent/InitializerModule.py @@ -76,8 +76,6 @@ def __init__(self): self.action_queue = None self.alert_scheduler_handler = None - self.init() - def init(self): """ Initialize properties @@ -93,8 +91,8 @@ def init(self): self.stale_alerts_monitor = StaleAlertsMonitor(self) self.server_responses_listener = ServerResponsesListener(self) self.file_cache = FileCache(self.config) - self.customServiceOrchestrator = CustomServiceOrchestrator(self) self.hooks_orchestrator = HooksOrchestrator(self) + self.customServiceOrchestrator = CustomServiceOrchestrator(self) self.recovery_manager = RecoveryManager(self) self.commandStatuses = CommandStatusDict(self) diff --git a/ambari-agent/src/main/python/ambari_agent/main.py b/ambari-agent/src/main/python/ambari_agent/main.py index 1107e2693fa..9c8432cb0a4 100644 --- a/ambari-agent/src/main/python/ambari_agent/main.py +++ b/ambari-agent/src/main/python/ambari_agent/main.py @@ -316,12 +316,8 @@ def run_threads(initializer_module): initializer_module.heartbeat_thread.join() initializer_module.action_queue.join() -# event - event, that will be passed to Controller and NetUtil to make able to interrupt loops form outside process -# we need this for windows os, where no sigterm available -def main(initializer_module, heartbeat_stop_callback=None): - global config - global home_dir - +# parse the options from command line +def setup_option_parser(): parser = OptionParser() parser.add_option("-v", "--verbose", dest="verbose", action="store_true", help="verbose log output", default=False) parser.add_option("-e", "--expected-hostname", dest="expected_hostname", action="store", @@ -329,24 +325,34 @@ def main(initializer_module, heartbeat_stop_callback=None): parser.add_option("--home", dest="home_dir", action="store", help="Home directory", default="") (options, args) = parser.parse_args() - expected_hostname = options.expected_hostname - home_dir = options.home_dir + return options + +# initialize the loggers +def init_loggers(options): + global is_logger_setup logging_level = logging.DEBUG if options.verbose else logging.INFO setup_logging(logger, AmbariConfig.AmbariConfig.getLogFile(), logging_level) - global is_logger_setup - is_logger_setup = True setup_logging(alerts_logger, AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level) setup_logging(alerts_logger_2, AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level) setup_logging(alerts_logger_global, AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level) setup_logging(apscheduler_logger, AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level) setup_logging(apscheduler_logger_global, AmbariConfig.AmbariConfig.getAlertsLogFile(), logging_level) Logger.initialize_logger('resource_management', logging_level=logging_level) - #with Environment() as env: - # File("/abc") - # init data, once loggers are setup to see exceptions/errors of initialization. + is_logger_setup = True + +# event - event, that will be passed to Controller and NetUtil to make able to interrupt loops form outside process +# we need this for windows os, where no sigterm available +def main(options, initializer_module, heartbeat_stop_callback=None): + global config + global home_dir + + expected_hostname = options.expected_hostname + home_dir = options.home_dir + + # init data. initializer_module.init() if home_dir != "": @@ -454,10 +460,12 @@ def main(initializer_module, heartbeat_stop_callback=None): if __name__ == "__main__": is_logger_setup = False try: + options = setup_option_parser() + init_loggers(options) initializer_module = InitializerModule() heartbeat_stop_callback = bind_signal_handlers(agentPid, initializer_module.stop_event) - main(initializer_module, heartbeat_stop_callback) + main(options, initializer_module, heartbeat_stop_callback) except SystemExit: raise except BaseException: diff --git a/ambari-agent/src/test/python/unitTests.py b/ambari-agent/src/test/python/unitTests.py index 0a6b8cc3aff..7c047566b6d 100644 --- a/ambari-agent/src/test/python/unitTests.py +++ b/ambari-agent/src/test/python/unitTests.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file @@ -37,6 +36,7 @@ $(pwd)/ambari-agent/src/test/python/ambari_agent: $(pwd)/ambari-common/src/main/python: $(pwd)/ambari-server/src/main/resources/common-services/HDFS/2.1.0.2.0/package/files: +$(pwd)/ambari-server/src/test/python: $(pwd)/ambari-agent/src/test/python/resource_management: $(pwd)/ambari-common/src/main/python/ambari_jinja2 """ diff --git a/ambari-common/src/main/python/ambari_commons/resources/os_family.json b/ambari-common/src/main/python/ambari_commons/resources/os_family.json index 2409e2cbc52..6c83ca570ca 100644 --- a/ambari-common/src/main/python/ambari_commons/resources/os_family.json +++ b/ambari-common/src/main/python/ambari_commons/resources/os_family.json @@ -33,6 +33,15 @@ 7 ] }, + "openeuler": { + "extends" : "redhat", + "distro": [ + "openeuler" + ], + "versions": [ + 22 + ] + }, "amazonlinux": { "extends" : "redhat", "distro": [ diff --git a/ambari-server/conf/unix/ambari-env.sh b/ambari-server/conf/unix/ambari-env.sh index d2f655c1b90..d080f46fc70 100644 --- a/ambari-server/conf/unix/ambari-env.sh +++ b/ambari-server/conf/unix/ambari-env.sh @@ -1,3 +1,4 @@ +#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. @@ -15,7 +16,7 @@ AMBARI_PASSHPHRASE="DEV" -export AMBARI_JVM_ARGS="$AMBARI_JVM_ARGS -Xms512m -Xmx2048m -XX:MaxPermSize=128m -Djava.security.auth.login.config=$ROOT/etc/ambari-server/conf/krb5JAASLogin.conf -Djava.security.krb5.conf=/etc/krb5.conf -Djavax.security.auth.useSubjectCredsOnly=false -Dcom.sun.jndi.ldap.connect.pool.protocol=\"plain ssl\" -Dcom.sun.jndi.ldap.connect.pool.maxsize=20 -Dcom.sun.jndi.ldap.connect.pool.timeout=300000" +export AMBARI_JVM_ARGS="$AMBARI_JVM_ARGS -Xms512m -Xmx2048m -XX:MaxPermSize=128m -Djdk.tls.ephemeralDHKeySize=2048 -Djava.security.auth.login.config=$ROOT/etc/ambari-server/conf/krb5JAASLogin.conf -Djava.security.krb5.conf=/etc/krb5.conf -Djavax.security.auth.useSubjectCredsOnly=false -Dcom.sun.jndi.ldap.connect.pool.protocol=\"plain ssl\" -Dcom.sun.jndi.ldap.connect.pool.maxsize=20 -Dcom.sun.jndi.ldap.connect.pool.timeout=300000" export PATH=$PATH:$ROOT/var/lib/ambari-server export PYTHONPATH=$ROOT/usr/lib/ambari-server/lib:$PYTHONPATH diff --git a/ambari-server/conf/unix/install-helper.sh b/ambari-server/conf/unix/install-helper.sh index f3ac20a23a6..22c01fb3e45 100644 --- a/ambari-server/conf/unix/install-helper.sh +++ b/ambari-server/conf/unix/install-helper.sh @@ -1,3 +1,4 @@ +#!/bin/bash # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information rega4rding copyright ownership. diff --git a/ambari-server/conf/windows/ambari-env.cmd b/ambari-server/conf/windows/ambari-env.cmd index 23600d4330d..07dc6b1ad22 100644 --- a/ambari-server/conf/windows/ambari-env.cmd +++ b/ambari-server/conf/windows/ambari-env.cmd @@ -16,4 +16,4 @@ rem limitations under the License. set AMBARI_PASSHPHRASE=DEV -set AMBARI_JVM_ARGS=%AMBARI_JVM_ARGS% -Xms512m -Xmx2048m -Djava.security.auth.login.config=conf\krb5JAASLogin.conf -Djava.security.krb5.conf=conf\krb5.conf -Djavax.security.auth.useSubjectCredsOnly=false +set AMBARI_JVM_ARGS=%AMBARI_JVM_ARGS% -Xms512m -Xmx2048m -Djdk.tls.ephemeralDHKeySize=2048 -Djava.security.auth.login.config=conf\krb5JAASLogin.conf -Djava.security.krb5.conf=conf\krb5.conf -Djavax.security.auth.useSubjectCredsOnly=false diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml index 00bd6f5af2b..8c128d0c369 100644 --- a/ambari-server/pom.xml +++ b/ambari-server/pom.xml @@ -529,6 +529,9 @@ 2012, Apache Software Foundation Development Maven Recipe: RPM Package. + + __python 3 + no / @@ -754,7 +757,7 @@ ${python.test.mask} - ${path.python.1}${pathsep}$PYTHONPATH + ${path.python.1}${pathsep}${env.PYTHONPATH} ${skipPythonTests} diff --git a/ambari-server/src/main/package/dependencies.properties b/ambari-server/src/main/package/dependencies.properties index 1ea214864c1..027a3328d30 100644 --- a/ambari-server/src/main/package/dependencies.properties +++ b/ambari-server/src/main/package/dependencies.properties @@ -28,6 +28,6 @@ # Such a format is respected by install_ambari_tarball.py by default, # however should be encouraged manually in pom.xml. -rpm.dependency.list=postgresql-server >= 8.1,\nRequires: openssl,\nRequires: python3 +rpm.dependency.list=postgresql-server >= 8.1,\nRequires: openssl,\nRequires: python3,\nRequires: python3-distro rpm.dependency.list.suse=postgresql-server >= 8.1,\nRequires: openssl,\nRequires: python-xml,\nRequires: python3 -deb.dependency.list=openssl, postgresql (>= 8.1), python3, curl \ No newline at end of file +deb.dependency.list=openssl, postgresql (>= 8.1), python3, curl diff --git a/ambari-server/src/main/python/bootstrap.py b/ambari-server/src/main/python/bootstrap.py index 7b3e36bd78e..13aed53801c 100644 --- a/ambari-server/src/main/python/bootstrap.py +++ b/ambari-server/src/main/python/bootstrap.py @@ -345,7 +345,7 @@ def getRunSetupCommand(self, expected_hostname): user_run_as = self.shared_state.user_run_as server = self.shared_state.ambari_server version = self.getAmbariVersion() - return ' '.join(['python', setupFile, expected_hostname, passphrase, server, user_run_as, version]) + return ' '.join(['python3', setupFile, expected_hostname, passphrase, server, user_run_as, version]) def runSetupAgent(self): self.host_log.write("==========================\n") @@ -609,7 +609,7 @@ def getRunSetupWithPasswordCommand(self, expected_hostname): version = self.getAmbariVersion() port = self.getAmbariPort() passwordFile = self.getPasswordFile() - return "{sudo} -S python ".format(sudo=AMBARI_SUDO) + str(setupFile) + " " + str(expected_hostname) + \ + return "{sudo} -S python3 ".format(sudo=AMBARI_SUDO) + str(setupFile) + " " + str(expected_hostname) + \ " " + str(passphrase) + " " + str(server)+ " " + quote_bash_args(str(user_run_as)) + " " + str(version) + \ " " + str(port) + " < " + str(passwordFile) @@ -620,7 +620,7 @@ def getRunSetupWithoutPasswordCommand(self, expected_hostname): user_run_as = self.shared_state.user_run_as version=self.getAmbariVersion() port=self.getAmbariPort() - return "{sudo} python ".format(sudo=AMBARI_SUDO) + str(setupFile) + " " + str(expected_hostname) + \ + return "{sudo} python3 ".format(sudo=AMBARI_SUDO) + str(setupFile) + " " + str(expected_hostname) + \ " " + str(passphrase) + " " + str(server)+ " " + quote_bash_args(str(user_run_as)) + " " + str(version) + \ " " + str(port) diff --git a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/3.0.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/3.0.0/metainfo.xml index df7d4a02faa..d615712707c 100755 --- a/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/3.0.0/metainfo.xml +++ b/ambari-server/src/main/resources/common-services/AMBARI_INFRA_SOLR/3.0.0/metainfo.xml @@ -146,7 +146,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 ambari-infra-solr-client diff --git a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/metainfo.xml b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/metainfo.xml index 4f0c3eed1c2..06a8e0716d6 100644 --- a/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/metainfo.xml +++ b/ambari-server/src/main/resources/common-services/AMBARI_METRICS/3.0.0/metainfo.xml @@ -143,7 +143,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 ambari-metrics-collector diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_packages.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_packages.json index a050d4c5ef4..3025e610758 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_packages.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/properties/stack_packages.json @@ -83,6 +83,18 @@ "hadoop-hdfs-namenode" ] }, + "ROUTER": { + "STACK-SELECT-PACKAGE": "hadoop-hdfs-dfsrouter", + "INSTALL": [ + "hadoop-hdfs-dfsrouter" + ], + "PATCH": [ + "hadoop-hdfs-dfsrouter" + ], + "STANDARD": [ + "hadoop-hdfs-dfsrouter" + ] + }, "JOURNALNODE": { "STACK-SELECT-PACKAGE": "hadoop-hdfs-journalnode", "INSTALL": [ diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/repos/repoinfo.xml index f172172ec72..f2a127beff1 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/repos/repoinfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/repos/repoinfo.xml @@ -30,4 +30,11 @@ bigtop + + + https://bigtop-snapshot.s3.amazonaws.com/openeuler-22/$basearch + BIGTOP-3.2.0 + bigtop + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/FLINK/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/FLINK/metainfo.xml index 798d3f061f2..88536c613e0 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/FLINK/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/FLINK/metainfo.xml @@ -105,7 +105,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 flink_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/metainfo.xml index 45a6f3af0d8..a74df17bf00 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HBASE/metainfo.xml @@ -167,7 +167,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 hbase_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/configuration/hdfs-rbf-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/configuration/hdfs-rbf-site.xml new file mode 100644 index 00000000000..6088b511122 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/configuration/hdfs-rbf-site.xml @@ -0,0 +1,771 @@ + + + + + + + dfs.federation.router.rpc.enable + true + + boolean + + + If true, the RPC service to handle client requests in the router is + enabled. + + + + + + dfs.federation.router.rpc-address + 0.0.0.0:20010 + + RPC address that handles all clients requests. + The value of this property will take the form of router-host1:rpc-port. + + + + + + dfs.federation.router.rpc-bind-host + 0.0.0.0 + + The actual address the RPC server will bind to. If this optional address is + set, it overrides only the hostname portion of + dfs.federation.router.rpc-address. This is useful for making the name node + listen on all interfaces by setting it to 0.0.0.0. + + + + + + dfs.federation.router.handler.count + 10 + + int + + + The number of server threads for the router to handle RPC requests from + clients. + + + + + + dfs.federation.router.handler.queue.size + 100 + + int + + + The size of the queue for the number of handlers to handle RPC client requests. + + + + + + dfs.federation.router.reader.count + 1 + + int + + + The number of readers for the router to handle RPC client requests. + + + + + + dfs.federation.router.reader.queue.size + 100 + + int + + + The size of the queue for the number of readers for the router to handle RPC client requests. + + + + + + dfs.federation.router.connection.creator.queue-size + 100 + + int + + + Size of async connection creator queue. + + + + + + dfs.federation.router.connection.pool-size + 1 + + int + + + Size of the pool of connections from the router to namenodes. + + + + + + dfs.federation.router.connection.min-active-ratio + 0.5f + + Minimum active ratio of connections from the router to namenodes. + + + + + + dfs.federation.router.connection.clean.ms + 10000 + + int + + + Time interval, in milliseconds, to check if the connection pool should + remove unused connections. + + + + + + dfs.federation.router.connection.pool.clean.ms + 60000 + + int + + + Time interval, in milliseconds, to check if the connection manager should + remove unused connection pools. + + + + + + dfs.federation.router.metrics.enable + true + + boolean + + + If the metrics in the router are enabled. + + + + + + + dfs.federation.router.dn-report.time-out + 1000 + + int + + + Time out, in milliseconds for getDatanodeReport. + + + + + + dfs.federation.router.dn-report.cache-expire + 10s + + Expiration time in seconds for datanodereport. + + + + + + dfs.federation.router.metrics.class + org.apache.hadoop.hdfs.server.federation.metrics.FederationRPCPerformanceMonitor + + Class to monitor the RPC system in the router. It must implement the + RouterRpcMonitor interface. + + + + + + dfs.federation.router.admin.enable + true + + boolean + + + If true, the RPC admin service to handle client requests in the router is + enabled. + + + + + + dfs.federation.router.admin-address + 0.0.0.0:8111 + + RPC address that handles the admin requests. + The value of this property will take the form of router-host1:rpc-port. + + + + + + dfs.federation.router.admin-bind-host + 0.0.0.0 + + The actual address the RPC admin server will bind to. If this optional + address is set, it overrides only the hostname portion of + dfs.federation.router.admin-address. This is useful for making the name + node listen on all interfaces by setting it to 0.0.0.0. + + + + + + dfs.federation.router.admin.handler.count + 1 + + int + + + The number of server threads for the router to handle RPC requests from + admin. + + + + + + dfs.federation.router.http-address + 0.0.0.0:50071 + + HTTP address that handles the web requests to the Router. + The value of this property will take the form of router-host1:http-port. + + + + + + dfs.federation.router.http-bind-host + 0.0.0.0 + + The actual address the HTTP server will bind to. If this optional + address is set, it overrides only the hostname portion of + dfs.federation.router.http-address. This is useful for making the name + node listen on all interfaces by setting it to 0.0.0.0. + + + + + + dfs.federation.router.https-address + 0.0.0.0:50072 + + HTTPS address that handles the web requests to the Router. + The value of this property will take the form of router-host1:https-port. + + + + + + dfs.federation.router.https-bind-host + 0.0.0.0 + + The actual address the HTTPS server will bind to. If this optional + address is set, it overrides only the hostname portion of + dfs.federation.router.https-address. This is useful for making the name + node listen on all interfaces by setting it to 0.0.0.0. + + + + + + dfs.federation.router.http.enable + true + + boolean + + + If the HTTP service to handle client requests in the router is enabled. + + + + + + dfs.federation.router.file.resolver.client.class + org.apache.hadoop.hdfs.server.federation.resolver.MountTableResolver + + Class to resolve files to subclusters. To enable multiple subclusters for a mount point, + set to org.apache.hadoop.hdfs.server.federation.resolver.MultipleDestinationMountTableResolver. + + + + + + dfs.federation.router.namenode.resolver.client.class + org.apache.hadoop.hdfs.server.federation.resolver.MembershipNamenodeResolver + + Class to resolve the namenode for a subcluster. + + + + + + dfs.federation.router.store.enable + true + + boolean + + + If true, the Router connects to the State Store. + + + + + + dfs.federation.router.store.serializer + org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreSerializerPBImpl + + Class to serialize State Store records. + + + + + + dfs.federation.router.store.driver.class + org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl + + Class to implement the State Store. There are three implementation classes currently + being supported: + org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileImpl, + org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreFileSystemImpl and + org.apache.hadoop.hdfs.server.federation.store.driver.impl.StateStoreZooKeeperImpl. + These implementation classes use the local file, filesystem and ZooKeeper as a backend respectively. + By default it uses the ZooKeeper as the default State Store. + + + + + + dfs.federation.router.store.connection.test + 60000 + + int + + + How often to check for the connection to the State Store in milliseconds. + + + + + + dfs.federation.router.cache.ttl + 1m + + How often to refresh the State Store caches in milliseconds. This setting + supports multiple time unit suffixes as described in + dfs.heartbeat.interval. If no suffix is specified then milliseconds is + assumed. + + + + + + dfs.federation.router.store.membership.expiration + 300000 + + int + + + Expiration time in milliseconds for a membership record. + + + + + + dfs.federation.router.store.membership.expiration.deletion + -1 + + int + + + Deletion time in milliseconds for a membership record. If an expired + membership record exists beyond this time, it will be deleted. If this + value is negative, the deletion is disabled. + + + + + + dfs.federation.router.heartbeat.enable + true + + boolean + + + If true, the Router heartbeats into the State Store. + + + + + + dfs.federation.router.heartbeat.interval + 5000 + + int + + + How often the Router should heartbeat into the State Store in milliseconds. + + + + + + dfs.federation.router.heartbeat-state.interval + 5s + + How often the Router should heartbeat its state into the State Store in + milliseconds. This setting supports multiple time unit suffixes as + described in dfs.federation.router.quota-cache.update.interval. + + + + + + dfs.federation.router.namenode.heartbeat.enable + true + + boolean + + + If true, get namenode heartbeats and send into the State Store. + If not explicitly specified takes the same value as for + dfs.federation.router.heartbeat.enable. + + + + + + dfs.federation.router.store.router.expiration + 5m + + Expiration time in milliseconds for a router state record. This setting + supports multiple time unit suffixes as described in + dfs.federation.router.quota-cache.update.interval. + + + + + + dfs.federation.router.store.router.expiration.deletion + -1 + + int + + + Deletion time in milliseconds for a router state record. If an expired + router state record exists beyond this time, it will be deleted. If this + value is negative, the deletion is disabled. + + + + + + dfs.federation.router.safemode.enable + true + + boolean + + + + + + + + dfs.federation.router.safemode.extension + 30s + + Time after startup that the Router is in safe mode. This setting + supports multiple time unit suffixes as described in + dfs.heartbeat.interval. If no suffix is specified then milliseconds is + assumed. + + + + + + dfs.federation.router.safemode.expiration + 3m + + Time without being able to reach the State Store to enter safe mode. This + setting supports multiple time unit suffixes as described in + dfs.heartbeat.interval. If no suffix is specified then milliseconds is + assumed. + + + + + + dfs.federation.router.monitor.localnamenode.enable + true + + boolean + + + If true, the Router should monitor the namenode in the local machine. + + + + + + dfs.federation.router.mount-table.max-cache-size + 10000 + + int + + + Maximum number of mount table cache entries to have. + By default, remove cache entries if we have more than 10k. + + + + + + dfs.federation.router.mount-table.cache.enable + true + + boolean + + + Set to true to enable mount table cache (Path to Remote Location cache). + Disabling the cache is recommended when a large amount of unique paths are queried. + + + + + + dfs.federation.router.quota.enable + false + + boolean + + + Set to true to enable quota system in Router. When it's enabled, setting + or clearing sub-cluster's quota directly is not recommended since Router + Admin server will override sub-cluster's quota with global quota. + + + + + + dfs.federation.router.quota-cache.update.interval + 60s + + Interval time for updating quota usage cache in Router. + This property is used only if the value of + dfs.federation.router.quota.enable is true. + This setting supports multiple time unit suffixes as described + in dfs.heartbeat.interval. If no suffix is specified then milliseconds + is assumed. + + + + + + dfs.federation.router.client.thread-size + 32 + + int + + + Max threads size for the RouterClient to execute concurrent + requests. + + + + + + dfs.federation.router.client.retry.max.attempts + 3 + + int + + + Max retry attempts for the RouterClient talking to the Router. + + + + + + dfs.federation.router.client.reject.overload + false + + boolean + + + Set to true to reject client requests when we run out of RPC client + threads. + + + + + + dfs.federation.router.client.allow-partial-listing + true + + boolean + + + If the Router can return a partial list of files in a multi-destination mount point when one of the subclusters is unavailable. + True may return a partial list of files if a subcluster is down. + False will fail the request if one is unavailable. + + + + + + dfs.federation.router.client.mount-status.time-out + 1s + + Set a timeout for the Router when listing folders containing mount + points. In this process, the Router checks the mount table and then it + checks permissions in the subcluster. After the time out, we return the + default values. + + + + + + dfs.federation.router.connect.max.retries.on.timeouts + 0 + + int + + + Maximum number of retries for the IPC Client when connecting to the + subclusters. By default, it doesn't let the IPC retry and the Router + handles it. + + + + + + dfs.federation.router.connect.timeout + 2s + + Time out for the IPC client connecting to the subclusters. This should be + short as the Router has knowledge of the state of the Routers. + + + + + + + dfs.federation.router.mount-table.cache.update + true + + boolean + + Set true to enable MountTableRefreshService. This service + updates mount table cache immediately after adding, modifying or + deleting the mount table entries. If this service is not enabled + mount table cache are refreshed periodically by + StateStoreCacheUpdateService + + + + + + dfs.federation.router.mount-table.cache.update.timeout + 1m + This property defines how long to wait for all the + admin servers to finish their mount table cache update. This setting + supports multiple time unit suffixes as described in + dfs.federation.router.safemode.extension. + + + + + + dfs.federation.router.mount-table.cache.update.client.max.time + 5m + Remote router mount table cache is updated through + RouterClient(RPC call). To improve performance, RouterClient + connections are cached but it should not be kept in cache forever. + This property defines the max time a connection can be cached. This + setting supports multiple time unit suffixes as described in + dfs.federation.router.safemode.extension. + + + + + + dfs.federation.router.secret.manager.class + org.apache.hadoop.hdfs.server.federation.router.security.token.ZKDelegationTokenSecretManagerImpl + + Class to implement state store to delegation tokens. + Default implementation uses zookeeper as the backend to store delegation tokens. + + + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/kerberos.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/kerberos.json index 2ba6c636649..98b059eb352 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/kerberos.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/kerberos.json @@ -146,6 +146,32 @@ } ] }, + { + "name": "ROUTER", + "identities": [ + { + "name": "dfsrouter_dr", + "principal": { + "value": "router/_HOST@${realm}", + "type" : "service", + "configuration": "hdfs-rbf-site/dfs.federation.router.kerberos.principal", + "local_username" : "${hadoop-env/hdfs_user}" + }, + "keytab": { + "file": "${keytab_dir}/dr.service.keytab", + "owner": { + "name": "${hadoop-env/hdfs_user}", + "access": "r" + }, + "group": { + "name": "${cluster-env/user_group}", + "access": "" + }, + "configuration": "hdfs-rbf-site/dfs.federation.router.keytab.file" + } + } + ] + }, { "name": "SECONDARY_NAMENODE", "identities": [ diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/metainfo.xml index a62758ea8ac..172a0f031bc 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/metainfo.xml @@ -223,6 +223,52 @@ + + ROUTER + Router + SLAVE + 0+ + true + true + router + + + PYTHON + 1800 + + + + hdfs_router + true + + + hdfs_audit + + + + + HDFS/HDFS_CLIENT + host + + true + + + + + hdfs-rbf-site + + + + DECOMMISSION + + + PYTHON + 600 + + + + + JOURNALNODE JournalNode @@ -284,7 +330,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 hadoop_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/dfsrouter.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/dfsrouter.py new file mode 100644 index 00000000000..15d04936c90 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/dfsrouter.py @@ -0,0 +1,143 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" + +import sys +import os +import json +import tempfile +import hashlib +from datetime import datetime +import ambari_simplejson as json # simplejson is much faster comparing to Python 2.6 json module and has the same functions set. + +from ambari_commons import constants + +from resource_management.libraries.resources.xml_config import XmlConfig + +from resource_management.libraries.script.script import Script +from resource_management.core.resources.system import Execute, File +from resource_management.core import shell +from resource_management.libraries.functions import stack_select +from resource_management.libraries.functions import upgrade_summary +from resource_management.libraries.functions.constants import Direction +from resource_management.libraries.functions.format import format +from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop +from resource_management.libraries.functions.security_commons import build_expectations, \ + cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \ + FILE_TYPE_XML + +from resource_management.core.exceptions import Fail +from resource_management.core.shell import as_user +from resource_management.core.logger import Logger + + +from ambari_commons.os_family_impl import OsFamilyImpl +from ambari_commons import OSConst + +from hdfs_router import router + + +from hdfs import hdfs, reconfig +import hdfs_rebalance +from utils import initiate_safe_zkfc_failover, get_hdfs_binary, get_dfsrouteradmin_base_command +from resource_management.libraries.functions.namenode_ha_utils import get_hdfs_cluster_id_from_jmx + +# The hash algorithm to use to generate digests/hashes +HASH_ALGORITHM = hashlib.sha224 + +class Router(Script): + + def get_hdfs_binary(self): + """ + Get the name or path to the hdfs binary depending on the component name. + """ + return get_hdfs_binary("hadoop-hdfs-dfsrouter") + + def install(self, env): + import params + env.set_params(params) + self.install_packages(env) + self.configure(env) + + def configure(self, env): + import params + env.set_params(params) + hdfs("router") + hdfs_binary = self.get_hdfs_binary() + router(action="configure", hdfs_binary=hdfs_binary, env=env) + XmlConfig("hdfs-site.xml", + conf_dir=params.hadoop_conf_dir, + configurations=params.router_hdfs_site, + configuration_attributes=params.config['configurationAttributes']['hdfs-site'], + mode=0o644, + owner=params.hdfs_user, + group=params.user_group + ) + XmlConfig("core-site.xml", + conf_dir=params.hadoop_conf_dir, + configurations=params.router_core_site, + configuration_attributes=params.config['configurationAttributes']['core-site'], + mode=0o644, + owner=params.hdfs_user, + group=params.user_group + ) + + def save_configs(self, env): + import params + env.set_params(params) + hdfs() + + def reload_configs(self, env): + import params + env.set_params(params) + Logger.info("RELOAD CONFIGS") + reconfig("router", params.router_address) + + def start(self, env, upgrade_type=None): + import params + env.set_params(params) + self.configure(env) + hdfs_binary = self.get_hdfs_binary() + router(action="start", hdfs_binary=hdfs_binary, env=env) + + def stop(self, env, upgrade_type=None): + import params + env.set_params(params) + hdfs_binary = self.get_hdfs_binary() + router(action="stop", hdfs_binary=hdfs_binary, env=env) + + def status(self, env): + import status_params + env.set_params(status_params) + router(action="status", env=env) + + def get_log_folder(self): + import params + return params.hdfs_log_dir + + def get_user(self): + import params + return params.hdfs_user + + def get_pid_files(self): + import status_params + return [status_params.router_pid_file] + +def _print(line): + sys.stdout.write(line) + sys.stdout.flush() + +if __name__ == "__main__": + Router().execute() \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs.py index 31664c97447..93bd8f0c9e4 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs.py @@ -130,6 +130,15 @@ def hdfs(name=None): group=params.user_group ) + XmlConfig("hdfs-rbf-site.xml", + conf_dir=params.hadoop_conf_dir, + configurations=params.config['configurations']['hdfs-rbf-site'], + configuration_attributes=params.config['configurationAttributes']['hdfs-rbf-site'], + mode=0o644, + owner=params.hdfs_user, + group=params.user_group + ) + XmlConfig("core-site.xml", conf_dir=params.hadoop_conf_dir, configurations=params.config['configurations']['core-site'], diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_router.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_router.py new file mode 100644 index 00000000000..a4064925a29 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/hdfs_router.py @@ -0,0 +1,91 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import os.path +import time + +from ambari_commons import constants + +from resource_management.core import shell +from resource_management.core.source import Template +from resource_management.core.resources.system import File, Execute, Directory +from resource_management.core.resources.service import Service +from resource_management.libraries.functions.decorator import retry +from resource_management.libraries.functions.default import default +from resource_management.libraries.functions.format import format +from resource_management.libraries.functions.check_process_status import check_process_status +from resource_management.libraries.resources.execute_hadoop import ExecuteHadoop +from resource_management.libraries.functions import Direction, upgrade_summary +from resource_management.libraries.functions.namenode_ha_utils import get_name_service_by_hostname +from resource_management.libraries.functions.generate_logfeeder_input_config import generate_logfeeder_input_config +from ambari_commons import OSCheck, OSConst +from ambari_commons.os_family_impl import OsFamilyImpl, OsFamilyFuncImpl +from utils import get_dfsrouteradmin_base_command +from utils import set_up_zkfc_security + +if OSCheck.is_windows_family(): + from resource_management.libraries.functions.windows_service_utils import check_windows_service_status + +from resource_management.core.exceptions import Fail +from resource_management.core.logger import Logger + +from utils import service, safe_zkfc_op, is_previous_fs_image +from setup_ranger_hdfs import setup_ranger_hdfs, create_ranger_audit_hdfs_directories + + +@OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT) +def router(action=None, hdfs_binary=None, env=None): + + if action is None: + raise Fail('"action" parameter is required for function router().') + + if action in ["start", "stop"] and hdfs_binary is None: + raise Fail('"hdfs_binary" parameter is required for function router().') + + if action == "configure": + import params + generate_logfeeder_input_config('hdfs', Template("input.config-hdfs.json.j2", extra_imports=[default])) + # set up failover / secure zookeper ACLs, this feature is supported from VDP 2.6 ownwards + set_up_zkfc_security(params) + elif action == "start": + import params + service( + action="start", + name="dfsrouter", + user=params.hdfs_user, + create_pid_dir=True, + create_log_dir=True + ) + + if params.security_enabled: + Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"), + user = params.hdfs_user) + + name_service = get_name_service_by_hostname(params.hdfs_site, params.hostname) + ensure_safemode_off = True + + elif action == "stop": + import params + service( + action="stop", name="dfsrouter", + user=params.hdfs_user + ) + elif action == "status": + import status_params + check_process_status(status_params.router_pid_file) + + + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/params_linux.py index cdd53657057..d6c99647e23 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/params_linux.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/params_linux.py @@ -195,6 +195,7 @@ hs_host = default("/clusterHostInfo/historyserver_hosts", []) jtnode_host = default("/clusterHostInfo/jtnode_hosts", []) namenode_host = default("/clusterHostInfo/namenode_hosts", []) +router_host = default("/clusterHostInfo/router_hosts", []) nm_host = default("/clusterHostInfo/nodemanager_hosts", []) ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_hosts", []) journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", []) @@ -295,6 +296,38 @@ data_dir_mount_file = "/var/lib/ambari-agent/data/datanode/dfs_data_dir_mount.hist" +router_address = None +if 'dfs.federation.router.rpc-address' in config['configurations']['hdfs-rbf-site']: + router_rpcaddress = config['configurations']['hdfs-rbf-site']['dfs.federation.router.rpc-address'] + router_address = format("hdfs://{router_rpcaddress}") +else: + router_address = config['configurations']['core-site']['fs.defaultFS'] +if router_host: + router_hdfs_site = dict(config['configurations']['hdfs-site']) + router_core_site = dict(config['configurations']['core-site']) + nameservices = config['configurations']['hdfs-site'].get('dfs.nameservices') + if not isinstance(nameservices, str): + # handle the error, for example by raising an exception or setting a default value + print("The dfs.nameservices property is not set or not a string") + nameservices = '' + + router_hdfs_site['dfs.nameservices'] = nameservices + ",ns-fed" + router_hdfs_site['dfs.client.failover.proxy.provider.ns-fed'] = 'org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider' + router_hdfs_site['dfs.client.failover.random.order'] = 'true' + router_id_list = ["r" + str(i) for i in range(1, len(router_host) + 1)] + router_ids = ",".join(router_id_list) + router_hdfs_site['dfs.ha.namenodes.ns-fed'] = router_ids + for i, curr_router_host in enumerate(router_host): + id = router_id_list[i] + prop_name = "dfs.namenode.rpc-address.ns-fed." + id + prop_value = curr_router_host + ":" + "20010" + router_hdfs_site[prop_name] = prop_value + + router_core_site['fs.defaultFS'] = "hdfs://ns-fed" + router_core_site['hadoop.zk.address'] = config['configurations']['core-site'].get('ha.zookeeper.quorum') +else: + print("No router hosts found") + # HDFS High Availability properties dfs_ha_enabled = False dfs_ha_nameservices = default('/configurations/hdfs-site/dfs.internal.nameservices', None) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/utils.py index 5ce8598bf5a..9bebc216637 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/utils.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HDFS/package/scripts/utils.py @@ -403,6 +403,22 @@ def get_dfsadmin_base_command(hdfs_binary, use_specific_namenode = False): dfsadmin_base_command = format("{hdfs_binary} dfsadmin -fs {params.namenode_address}") return dfsadmin_base_command +def get_dfsrouteradmin_base_command(hdfs_binary, use_specific_router = False): + """ + Get the dfsrouteradmin base command constructed using hdfs_binary path and passing router address as explicit -fs argument + :param hdfs_binary: path to hdfs binary to use + :param use_specific_router: flag if set and Router HA is enabled, then the dfsrouteradmin command will use + current router's address + :return: the constructed dfsrouteradmin base command + """ + import params + dfsrouteradmin_base_command = "" + if params.dfs_ha_enabled and use_specific_router: + dfsrouteradmin_base_command = format("{hdfs_binary} dfsrouteradmin -fs hdfs://{params.router_rpc}") + else: + dfsadmin_base_command = format("{hdfs_binary} dfsrouteradmin -fs {params.router_address}") + return dfsrouteradmin_base_command + def set_up_zkfc_security(params): """ Sets up security for accessing zookeper on secure clusters """ diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/metainfo.xml index 7730115812d..7d1aaef17e4 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/metainfo.xml @@ -281,7 +281,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 hive_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/params.py index 2b752d47a86..13f95f3fd58 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/params.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/HIVE/package/scripts/params.py @@ -228,6 +228,9 @@ elif hive_jdbc_driver == "com.mysql.jdbc.Driver": jdbc_jar_name = default("/ambariLevelParams/custom_mysql_jdbc_name", None) hive_previous_jdbc_jar_name = default("/ambariLevelParams/previous_custom_mysql_jdbc_name", None) +elif hive_jdbc_driver == "com.mysql.cj.jdbc.Driver": + jdbc_jar_name = default("/ambariLevelParams/custom_mysql_jdbc_name", None) + hive_previous_jdbc_jar_name = default("/ambariLevelParams/previous_custom_mysql_jdbc_name", None) elif hive_jdbc_driver == "org.postgresql.Driver": jdbc_jar_name = default("/ambariLevelParams/custom_postgres_jdbc_name", None) hive_previous_jdbc_jar_name = default("/ambariLevelParams/previous_custom_postgres_jdbc_name", None) diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/metainfo.xml index 4a8e097d2d4..d3031c78564 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KAFKA/metainfo.xml @@ -94,7 +94,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 kafka_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KERBEROS/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KERBEROS/metainfo.xml index 3155a030ac2..9a3c015a26e 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KERBEROS/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/KERBEROS/metainfo.xml @@ -81,7 +81,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 krb5-workstation diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/LIVY/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/LIVY/metainfo.xml index fece912b2cf..ff8359afd67 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/LIVY/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/LIVY/metainfo.xml @@ -97,7 +97,7 @@ - redhat7 + redhat7,openeuler22 spark_${stack_version}-core diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/SOLR/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/SOLR/metainfo.xml index 57750d3937d..4ea46644c9f 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/SOLR/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/SOLR/metainfo.xml @@ -64,7 +64,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 solr_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/SPARK/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/SPARK/metainfo.xml index 3c829f78118..d5ca3f0c6a2 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/SPARK/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/SPARK/metainfo.xml @@ -204,7 +204,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 spark_${stack_version}-core diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/metainfo.xml index dc0ce87ff4b..764e2ce3b10 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/TEZ/metainfo.xml @@ -75,7 +75,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 tez_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/YARN_widgets.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/YARN_widgets.json index bef0dc03feb..ac91ad05692 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/YARN_widgets.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/YARN_widgets.json @@ -71,6 +71,35 @@ "time_range": "1" } }, + { + "widget_name": "Bad Local Disks", + "description": "Number of unhealthy local disks across all NodeManagers.", + "widget_type": "NUMBER", + "is_visible": true, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.BadLocalDirs", + "metric_path": "metrics/yarn/BadLocalDirs", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.BadLogDirs", + "metric_path": "metrics/yarn/BadLogDirs", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "Number of unhealthy local disks for NodeManager", + "value": "${yarn.NodeManagerMetrics.BadLocalDirs + yarn.NodeManagerMetrics.BadLogDirs}" + } + ], + "properties": { + "display_unit": "" + } + }, { "widget_name": "Container Failures", "description": "Percentage of all containers failing in the cluster.", @@ -372,10 +401,40 @@ "section_name": "YARN_HEATMAPS", "widgetLayoutInfo": [ { - "widget_name": "Total Allocatable RAM Utilized per NodeManager", + "widget_name": "YARN local disk space utilization per NodeManager", "description": "", "widget_type": "HEATMAP", "is_visible": true, + "metrics": [ + { + "name": "yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc", + "metric_path": "metrics/yarn/GoodLocalDirsDiskUtilizationPerc", + "service_name": "YARN", + "component_name": "NODEMANAGER" + }, + { + "name": "yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc", + "metric_path": "metrics/yarn/GoodLogDirsDiskUtilizationPerc", + "service_name": "YARN", + "component_name": "NODEMANAGER" + } + ], + "values": [ + { + "name": "YARN local disk space utilization per NodeManager", + "value": "${(yarn.NodeManagerMetrics.GoodLocalDirsDiskUtilizationPerc + yarn.NodeManagerMetrics.GoodLogDirsDiskUtilizationPerc)/2}" + } + ], + "properties": { + "display_unit": "%", + "max_limit": "100" + } + }, + { + "widget_name": "Total Allocatable RAM Utilized per NodeManager", + "description": "", + "widget_type": "HEATMAP", + "is_visible": false, "metrics": [ { "name": "yarn.NodeManagerMetrics.AllocatedGB", diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/alerts.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/alerts.json index f2962dc6f14..08b5e131e72 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/alerts.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/alerts.json @@ -99,7 +99,7 @@ "warning": { "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]", "value": 3000 - }, + }, "critical": { "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]", "value": 5000 @@ -316,7 +316,7 @@ "warning": { "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]", "value": 3000 - }, + }, "critical": { "text": "Average Queue Time:[{0}], Average Processing Time:[{1}]", "value": 5000 @@ -387,6 +387,88 @@ } } } + ], + "YARN_REGISTRY_DNS": [ + { + "name": "YARN_REGISTRY_DNS_PROCESS", + "label": "Registry DNS", + "description": "This host-level alert is triggered if the Registry DNS Service cannot be determined to be up and listening on the network.", + "interval": 1, + "scope": "HOST", + "source": { + "type": "PORT", + "uri": "{{yarn-env/registry.dns.bind-port}}", + "default_port": 53, + "reporting": { + "ok": { + "text": "TCP OK - {0:.3f}s response on port {1}" + }, + "warning": { + "text": "TCP OK - {0:.3f}s response on port {1}", + "value": 1.5 + }, + "critical": { + "text": "Connection failed: {0} to {1}:{2}", + "value": 5 + } + } + } + } + ], + "TIMELINE_READER": [ + { + "name": "yarn_timeline_reader_webui", + "label": "Timeline Reader Web UI", + "description": "This host-level alert is triggered if Timeline Reader Web UI is unreachable.", + "interval": 1, + "scope": "ANY", + "source": { + "type": "WEB", + "uri": { + "http": "{{yarn-site/yarn.timeline-service.reader.webapp.address}}/ws/v2/timeline", + "https": "{{yarn-site/yarn.timeline-service.reader.webapp.https.address}}/ws/v2/timeline", + "https_property": "{{yarn-site/yarn.http.policy}}", + "https_property_value": "HTTPS_ONLY", + "kerberos_keytab": "{{cluster-env/smokeuser_keytab}}", + "kerberos_principal": "{{cluster-env/smokeuser_principal_name}}", + "connection_timeout": 5.0 + }, + "reporting": { + "ok": { + "text": "HTTP {0} response in {2:.3f}s" + }, + "warning":{ + "text": "HTTP {0} response from {1} in {2:.3f}s ({3})" + }, + "critical": { + "text": "Connection failed to {1} ({3})" + } + } + } + }, + { + "name": "ats_hbase", + "label": "ATSv2 HBase Application", + "description": "This alert is triggered if the ats hbase cannot be determined to be up and responding to requests.", + "interval": 5, + "scope": "ANY", + "enabled": true, + "source": { + "type": "SCRIPT", + "path": "BIGTOP/3.2.0/services/YARN/package/alerts/alert_ats_hbase.py", + "parameters": [ + { + "name": "check.command.timeout", + "display_name": "Command Timeout", + "value": 120.0, + "type": "NUMERIC", + "description": "The maximum time before check command will be killed by timeout", + "units": "seconds", + "threshold": "CRITICAL" + } + ] + } + } ] } } diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/container-executor.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/container-executor.xml index e19fe9049b6..bce09f90ebd 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/container-executor.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/container-executor.xml @@ -20,6 +20,229 @@ */ --> + + docker_module_enabled + false + Docker Runtime + Need user to pre-install docker environment first. + + + value-list + + + true + + + + false + + + + 1 + + + + + + docker_binary + /usr/bin/docker + Docker Binary + The binary used to launch docker containers. + + directory + true + + + + + + docker_allowed_devices + + Docker Allowed Devices + Comma seperated list of devices that can be mounted into a + container. + + + + container-executor + gpu_module_enabled + + + container-executor + docker_module_enabled + + + + true + + + + + + docker_allowed_ro-mounts + + Docker Allowed Read-only Mounts + + Comma seperated volumes that can be mounted as read-only. + + + + container-executor + gpu_module_enabled + + + container-executor + docker_module_enabled + + + + true + + + + + + docker_allowed_rw-mounts + + Docker Allowed Read-write Mounts + + Comma seperate volumes that can be mounted as read-write. Add the + yarn + local and log dirs to this list to run Hadoop jobs. + + + true + + + + + + docker_allowed_volume-drivers + + Docker Allowed Volume-drivers + + Comma seperated list of allowed volume-drivers. + + + + container-executor + gpu_module_enabled + + + container-executor + docker_module_enabled + + + + true + + + + + + docker_privileged-containers_enabled + false + Enable Launching Privileged Containers + + Whether to enable launching privileged docker containers. + + + boolean + + + + + + docker_trusted_registries + + Docker Trusted Registries + + List of registries whose docker images are allowed to mount volumes and launch privileged docker containers. + + + true + + + + + + min_user_id + 1000 + Minimum user ID for submitting job + Set to 0 to disallow root from submitting jobs. Set to 1000 to + disallow all superusers from submitting jobs + + int + + + + + + gpu_module_enabled + false + GPU Scheduling and Isolation + Whether to enable gpu scheduling and isolation. + + + value-list + + + true + + + + false + + + + 1 + + + + + + cgroup_root + /sys/fs/cgroup + CGroup Root Path + This should be same as + yarn.nodemanager.linux-container-executor.cgroups.mount-path inside + yarn-site.xml + + + container-executor + gpu_module_enabled + + + + directory + true + + + + + + yarn_hierarchy + yarn + Yarn CGroup Hierarchy + This should be same as + yarn.nodemanager.linux-container-executor.cgroups.hierarchy + inside yarn-site.xml + + + container-executor + gpu_module_enabled + + + container-executor + docker_module_enabled + + + + true + + + + content container-executor configuration template diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/resource-types.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/resource-types.xml new file mode 100755 index 00000000000..65e8254513a --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/resource-types.xml @@ -0,0 +1,47 @@ + + + + + yarn.resource-types + + Enable resource types other than memory and vcores, values split by comma. For example value=yarn.io/gpu enables GPU resource types + + true + + + + container-executor + gpu_module_enabled + + + + + + yarn.resource-types.yarn.io_gpu.maximum-allocation + 8 + Maximum Container Size (GPU) + Maximum GPU Allocation + + int + 0 + 8 + 1 + + + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-env.xml index 81ff757f7a7..726bd83027e 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-env.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-env.xml @@ -59,7 +59,25 @@ - + + + + yarn_ats_user + Yarn ATS User + yarn-ats + USER + YARN Client User for ATSv2 backend + + user + false + + + cluster-env + user_group + + + + yarn_heapsize @@ -121,138 +139,232 @@ yarn_user_nproc_limit 65536 Max number of processes limit setting for YARN user. - + + + + + + apptimelineserver_heapsize + 1024 + AppTimelineServer Java heap size + Max heapsize for AppTimelineServer using a numerical value in the scale of MB + + false + MB + int + + + + + + + yarn_cgroups_enabled + false + You can use CGroups to isolate CPU-heavy processes in a Hadoop cluster. + CPU Scheduling and Isolation + + value-list + + + true + + + + false + + + + 1 + + + + container-executor + gpu_module_enabled + + + + + + registry.dns.bind-port + RegistryDNS Bind Port + 53 + + The port number for the DNS listener. The default port is 53. + + + + content yarn-env template This is the jinja template for yarn-env.sh file -export HADOOP_YARN_HOME={{hadoop_yarn_home}} -USER="$(whoami)" -export HADOOP_LOG_DIR={{yarn_log_dir_prefix}}/$USER -export HADOOP_PID_DIR={{yarn_pid_dir_prefix}}/$USER -export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} -export JAVA_HOME={{java64_home}} -export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}" + export HADOOP_YARN_HOME={{hadoop_yarn_home}} + USER="$(whoami)" + export HADOOP_LOG_DIR={{yarn_log_dir_prefix}}/$USER + export HADOOP_PID_DIR={{yarn_pid_dir_prefix}}/$USER + export HADOOP_SECURE_LOG_DIR={{yarn_log_dir_prefix}}/$USER + export HADOOP_SECURE_PID_DIR={{yarn_pid_dir_prefix}}/$USER + export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}} + export JAVA_HOME={{java64_home}} + export JAVA_LIBRARY_PATH="${JAVA_LIBRARY_PATH}:{{hadoop_java_io_tmpdir}}" + + # We need to add the EWMA and RFA appender for the yarn daemons only; + # however, HADOOP_ROOT_LOGGER is shared by the yarn client and the + # daemons. This is restrict the EWMA appender to daemons only. + export HADOOP_LOGLEVEL=${HADOOP_LOGLEVEL:-INFO} + export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,console} + export HADOOP_DAEMON_ROOT_LOGGER=${HADOOP_DAEMON_ROOT_LOGGER:-${HADOOP_LOGLEVEL},EWMA,RFA} + + # User for YARN daemons + export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} + # resolve links - $0 may be a softlink + export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_YARN_HOME/etc/hadoop}" + # some Java parameters + # export JAVA_HOME=/home/y/libexec/jdk1.6.0/ + if [ "$JAVA_HOME" != "" ]; then + #echo "run java in $JAVA_HOME" + JAVA_HOME=$JAVA_HOME + fi -# User for YARN daemons -export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn} + if [ "$JAVA_HOME" = "" ]; then + echo "Error: JAVA_HOME is not set." + exit 1 + fi -# resolve links - $0 may be a softlink -export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_YARN_HOME/etc/hadoop}" + JAVA=$JAVA_HOME/bin/java + JAVA_HEAP_MAX=-Xmx1000m -# some Java parameters -# export JAVA_HOME=/home/y/libexec/jdk1.6.0/ -if [ "$JAVA_HOME" != "" ]; then - #echo "run java in $JAVA_HOME" - JAVA_HOME=$JAVA_HOME -fi + # For setting YARN specific HEAP sizes please use this + # Parameter and set appropriately + YARN_HEAPSIZE={{yarn_heapsize}} -if [ "$JAVA_HOME" = "" ]; then - echo "Error: JAVA_HOME is not set." - exit 1 -fi + # check envvars which might override default args + if [ "$YARN_HEAPSIZE" != "" ]; then + JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" + fi -JAVA=$JAVA_HOME/bin/java -JAVA_HEAP_MAX=-Xmx1000m + # Resource Manager specific parameters -# For setting YARN specific HEAP sizes please use this -# Parameter and set appropriately -YARN_HEAPSIZE={{yarn_heapsize}} + # Specify the max Heapsize for the ResourceManager using a numerical value + # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set + # the value to 1000. + # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS + # and/or YARN_RESOURCEMANAGER_OPTS. + # If not specified, the default value will be picked from either YARN_HEAPMAX + # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. + export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}} -# check envvars which might override default args -if [ "$YARN_HEAPSIZE" != "" ]; then - JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m" -fi + # Specify the JVM options to be used when starting the ResourceManager. + # These options will be appended to the options specified as HADOOP_OPTS + # and therefore may override any similar flags set in HADOOP_OPTS + {% if security_enabled %} + export YARN_RESOURCEMANAGER_OPTS="-Djava.security.auth.login.config={{yarn_jaas_file}}" + {% endif %} -# Resource Manager specific parameters + # Node Manager specific parameters -# Specify the max Heapsize for the ResourceManager using a numerical value -# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set -# the value to 1000. -# This value will be overridden by an Xmx setting specified in either HADOOP_OPTS -# and/or YARN_RESOURCEMANAGER_OPTS. -# If not specified, the default value will be picked from either YARN_HEAPMAX -# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. -export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}} + # Specify the max Heapsize for the NodeManager using a numerical value + # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set + # the value to 1000. + # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS + # and/or YARN_NODEMANAGER_OPTS. + # If not specified, the default value will be picked from either YARN_HEAPMAX + # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. + export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}} -# Specify the JVM options to be used when starting the ResourceManager. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -#export YARN_RESOURCEMANAGER_OPTS= + # Specify the max Heapsize for the timeline server using a numerical value + # in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set + # the value to 1024. + # This value will be overridden by an Xmx setting specified in either HADOOP_OPTS + # and/or YARN_TIMELINESERVER_OPTS. + # If not specified, the default value will be picked from either YARN_HEAPMAX + # or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. + export YARN_TIMELINESERVER_HEAPSIZE={{apptimelineserver_heapsize}} -# Node Manager specific parameters + {% if security_enabled %} + export YARN_TIMELINESERVER_OPTS="-Djava.security.auth.login.config={{yarn_ats_jaas_file}}" + {% endif %} -# Specify the max Heapsize for the NodeManager using a numerical value -# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set -# the value to 1000. -# This value will be overridden by an Xmx setting specified in either HADOOP_OPTS -# and/or YARN_NODEMANAGER_OPTS. -# If not specified, the default value will be picked from either YARN_HEAPMAX -# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. -export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}} + {% if security_enabled %} + export YARN_TIMELINEREADER_OPTS="-Djava.security.auth.login.config={{yarn_ats_jaas_file}}" + {% endif %} -# Specify the max Heapsize for the HistoryManager using a numerical value -# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set -# the value to 1024. -# This value will be overridden by an Xmx setting specified in either HADOOP_OPTS -# and/or YARN_HISTORYSERVER_OPTS. -# If not specified, the default value will be picked from either YARN_HEAPMAX -# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two. -export YARN_HISTORYSERVER_HEAPSIZE={{apptimelineserver_heapsize}} + {% if security_enabled %} + export YARN_REGISTRYDNS_OPTS="-Djava.security.auth.login.config={{yarn_registry_dns_jaas_file}}" + {% endif %} -# Specify the JVM options to be used when starting the NodeManager. -# These options will be appended to the options specified as HADOOP_OPTS -# and therefore may override any similar flags set in HADOOP_OPTS -#export YARN_NODEMANAGER_OPTS= + # Specify the JVM options to be used when starting the NodeManager. + # These options will be appended to the options specified as HADOOP_OPTS + # and therefore may override any similar flags set in HADOOP_OPTS + {% if security_enabled %} + export YARN_NODEMANAGER_OPTS="-Djava.security.auth.login.config={{yarn_nm_jaas_file}} -Dsun.security.krb5.rcache=none" + {% endif %} -# so that filenames w/ spaces are handled correctly in loops below -IFS= + # so that filenames w/ spaces are handled correctly in loops below + IFS= -# default log directory and file -if [ "$HADOOP_LOG_DIR" = "" ]; then - HADOOP_LOG_DIR="$HADOOP_YARN_HOME/logs" -fi -if [ "$HADOOP_LOGFILE" = "" ]; then - HADOOP_LOGFILE='yarn.log' -fi + # default log directory and file + if [ "$HADOOP_LOG_DIR" = "" ]; then + HADOOP_LOG_DIR="$HADOOP_YARN_HOME/logs" + fi + if [ "$HADOOP_LOGFILE" = "" ]; then + HADOOP_LOGFILE='yarn.log' + fi -# default policy file for service-level authorization -if [ "$YARN_POLICYFILE" = "" ]; then - YARN_POLICYFILE="hadoop-policy.xml" -fi + # default policy file for service-level authorization + if [ "$YARN_POLICYFILE" = "" ]; then + YARN_POLICYFILE="hadoop-policy.xml" + fi -# restore ordinary behaviour -unset IFS + # restore ordinary behaviour + unset IFS + # YARN now uses specific subcommand options of the pattern (command)_(subcommand)_OPTS for every + # component. Because of this, HADDOP_OPTS is now used as a simple way to specify common properties + # between all YARN components. + HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" + HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.dir=$HADOOP_LOG_DIR" + HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE" + HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.file=$HADOOP_LOGFILE" + HADOOP_OPTS="$HADOOP_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME" + HADOOP_OPTS="$HADOOP_OPTS -Dyarn.id.str=$HADOOP_IDENT_STRING" + HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}" + HADOOP_OPTS="$HADOOP_OPTS -Dyarn.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}" + export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT" + export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT" + if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then + HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" + fi + HADOOP_OPTS="$HADOOP_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" + HADOOP_OPTS="$HADOOP_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}" -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.dir=$HADOOP_LOG_DIR" -HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.dir=$HADOOP_LOG_DIR" -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.log.file=$HADOOP_LOGFILE" -HADOOP_OPTS="$HADOOP_OPTS -Dyarn.log.file=$HADOOP_LOGFILE" -HADOOP_OPTS="$HADOOP_OPTS -Dyarn.home.dir=$HADOOP_YARN_HOME" -HADOOP_OPTS="$HADOOP_OPTS -Dyarn.id.str=$HADOOP_IDENT_STRING" -HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}" -HADOOP_OPTS="$HADOOP_OPTS -Dyarn.root.logger=${HADOOP_ROOT_LOGGER:-INFO,console}" -export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT" -export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Drm.audit.logger=INFO,RMAUDIT" -if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then - HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH" -fi -HADOOP_OPTS="$HADOOP_OPTS -Dyarn.policy.file=$YARN_POLICYFILE" -HADOOP_OPTS="$HADOOP_OPTS -Djava.io.tmpdir={{hadoop_java_io_tmpdir}}" + {% if security_enabled %} + HADOOP_OPTS="$HADOOP_OPTS -Djavax.security.auth.useSubjectCredsOnly=false" + {% endif %} -{% if rm_security_opts is defined %} -HADOOP_OPTS="{{rm_security_opts}} $HADOOP_OPTS" -{% endif %} + {% if rm_security_opts is defined %} + YARN_RESOURCEMANAGER_OPTS="{{rm_security_opts}} $YARN_RESOURCEMANAGER_OPTS" + {% endif %} + + export YARN_NODEMANAGER_OPTS="$YARN_NODEMANAGER_OPTS -Dnm.audit.logger=INFO,NMAUDIT" + export YARN_RESOURCEMANAGER_OPTS="$YARN_RESOURCEMANAGER_OPTS -Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY -Drm.audit.logger=INFO,RMAUDIT" + + {% if registry_dns_needs_privileged_access %} + # If the DNS server is configured to use the standard privileged port 53, + # the environment variables YARN_REGISTRYDNS_SECURE_USER and + # YARN_REGISTRYDNS_SECURE_EXTRA_OPTS must be set. + export YARN_REGISTRYDNS_SECURE_USER={{yarn_user}} + export YARN_REGISTRYDNS_SECURE_EXTRA_OPTS="-jvm server" + {% endif %} content - + service_check.queue.name @@ -268,4 +380,15 @@ HADOOP_OPTS="{{rm_security_opts}} $HADOOP_OPTS" + + yarn_ats_principal_name + Yarn Client principal name + KERBEROS_PRINCIPAL + + + + yarn_ats_user_keytab + Yarn Client keytab path + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-env.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-env.xml new file mode 100755 index 00000000000..66fe45c7663 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-env.xml @@ -0,0 +1,272 @@ + + + + + + + yarn_hbase_pid_dir_prefix + /var/run/hadoop-yarn-hbase + HBase PID Dir + Pid Directory for HBase. + + directory + false + true + + + + + hbase_java_io_tmpdir + /tmp + Used in hbase-env.sh as HBASE_OPTS=-Djava.io.tmpdir=java_io_tmpdir + + directory + + + + + + content + hbase-env template + This is the jinja template for hbase-env.sh file + + # Set environment variables here. + + # The java implementation to use. Java 1.6 required. + export JAVA_HOME={{java64_home}} + + # HBase Configuration directory + export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{yarn_hbase_conf_dir}}} + + # Extra Java CLASSPATH elements. Optional. + export HBASE_CLASSPATH=${HBASE_CLASSPATH} + + + # The maximum amount of heap to use. Default is left to JVM default. + # export HBASE_HEAPSIZE=4G + + # Extra Java runtime options. + # Below are what we set by default. May only work with SUN JVM. + # For more on why as well as other possible settings, + # see http://wiki.apache.org/hadoop/PerformanceTuning + export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{yarn_hbase_log_dir}}/gc.log-`date +'%Y%m%d%H%M'`" + # Uncomment below to enable java garbage collection logging. + # export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log" + + # Uncomment and adjust to enable JMX exporting + # See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access. + # More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html + # + # export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false" + # If you want to configure BucketCache, specify '-XX: MaxDirectMemorySize=' with proper direct memory size + # export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103" + # export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104" + + # File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default. + export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers + + # Extra ssh options. Empty by default. + # export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR" + + # Where log files are stored. $HBASE_HOME/logs by default. + export HBASE_LOG_DIR=${HBASE_LOG_DIR:-{{yarn_hbase_log_dir}}} + + # A string representing this instance of hbase. $USER by default. + # export HBASE_IDENT_STRING=$USER + + # The scheduling priority for daemon processes. See 'man nice'. + # export HBASE_NICENESS=10 + + # The directory where pid files are stored. /tmp by default. + export HBASE_PID_DIR=${HBASE_PID_DIR:-{{yarn_hbase_pid_dir}}} + + # Seconds to sleep between slave commands. Unset by default. This + # can be useful in large clusters, where, e.g., slave rsyncs can + # otherwise arrive faster than the master can service them. + # export HBASE_SLAVE_SLEEP=0.1 + + # Tell HBase whether it should manage it's own instance of Zookeeper or not. + export HBASE_MANAGES_ZK=false + + {% if java_version < 8 %} + JDK_DEPENDED_OPTS="-XX:PermSize=128m -XX:MaxPermSize=128m" + {% endif %} + + export HBASE_OPTS="$HBASE_OPTS -XX:+UseConcMarkSweepGC -XX:ErrorFile=$HBASE_LOG_DIR/hs_err_pid%p.log -Djava.io.tmpdir={{yarn_hbase_java_io_tmpdir}}" + export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Xmx{{yarn_hbase_master_heapsize}} $JDK_DEPENDED_OPTS" + export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:CMSInitiatingOccupancyFraction=70 -XX:ReservedCodeCacheSize=256m -Xms{{yarn_hbase_regionserver_heapsize}} -Xmx{{yarn_hbase_regionserver_heapsize}} $JDK_DEPENDED_OPTS" + + {% if security_enabled %} + export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{yarn_hbase_master_jaas_file}}" + export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{yarn_hbase_regionserver_jaas_file}}" + {% endif %} + + + content + + + + + is_hbase_system_service_launch + false + Should Hbase cluster started as system service. This + configuration depends on use_external_hbase property. If + use_external_hbase is set, then this property doesn't take effect. + + + boolean + + + + + yarn_hbase_system_service_queue_name + default + + The queue that used by service check. + + + + capacity-scheduler + yarn.scheduler.capacity.root.queues + + + + + + yarn_hbase_system_service_launch_mode + sync + Should services are launched as sync or async. + + + + + yarn_hbase_master_cpu + 1 + Number of CPU for master container. + + + + yarn_hbase_master_memory + 4096 + master container memory in MB. + + int + 2048 + 4096 + MB + 256 + + + + + yarn_hbase_master_containers + 1 + Number of containers to launch for master. + + + + + yarn_hbase_regionserver_cpu + 1 + Number of CPU for regionserver container. + + + + yarn_hbase_regionserver_memory + 4096 + regionserver container memory in MB. + + int + 2048 + 4096 + MB + 256 + + + + + yarn_hbase_regionserver_containers + 1 + Number of containers to launch for regionserver. + + + + + yarn_hbase_client_cpu + 1 + Number of CPU for client container. + + + + yarn_hbase_client_memory + 1536 + client container memory in MB. + + int + 1024 + 2048 + MB + 256 + + + + + yarn_hbase_client_containers + 1 + Number of containers to launch for client. + + + + yarn_hbase_heap_memory_factor + 0.8 + Heap memory is auto derived using this factor. + + + + use_external_hbase + false + Setting true, doesn't start embedded hbase or system service + hbase. Note: Admin/User need to take care of pointing right hbase-site.xml + into RM/NM classpath. If system service hbase is started, then admin must + clean up system service hbase before making this change. + + boolean + + + + + + hbase_within_cluster + false + Set to true, YARN will use the HBase installed on this cluster by Ambari for storage. + + boolean + + + + + + yarn_hbase_log_level + INFO + Setting log level to hmaster and regionserver. Default to info logs. Log levels could be INFO, DEBUG, WARN + + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-log4j.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-log4j.xml new file mode 100755 index 00000000000..ee3dcce2325 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-log4j.xml @@ -0,0 +1,188 @@ + + + + + + hbase_log_maxfilesize + 256 + The maximum size of backup file before the log is rotated + HBase Log: backup file size + + MB + + + + + hbase_log_maxbackupindex + 20 + The number of backup files + HBase Log: # of backup files + + int + 0 + + + + + hbase_security_log_maxfilesize + 256 + The maximum size of security backup file before the log is rotated + HBase Security Log: backup file size + + MB + + + + + hbase_security_log_maxbackupindex + 20 + The number of security backup files + HBase Security Log: # of backup files + + int + 0 + + + + + content + hbase-log4j template + Custom log4j.properties + + # Licensed to the Apache Software Foundation (ASF) under one + # or more contributor license agreements. See the NOTICE file + # distributed with this work for additional information + # regarding copyright ownership. The ASF licenses this file + # to you under the Apache License, Version 2.0 (the + # "License"); you may not use this file except in compliance + # with the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + + + # Define some default values that can be overridden by system properties + hbase.root.logger=INFO,console + hbase.security.logger=INFO,console + hbase.log.dir=. + hbase.log.file=hbase.log + + # Define the root logger to the system property "hbase.root.logger". + log4j.rootLogger=${hbase.root.logger} + + # Logging Threshold + log4j.threshold=ALL + + # + # Daily Rolling File Appender + # + log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender + log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file} + + # Rollver at midnight + log4j.appender.DRFA.DatePattern=.yyyy-MM-dd + + # 30-day backup + #log4j.appender.DRFA.MaxBackupIndex=30 + log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout + + # Pattern format: Date LogLevel LoggerName LogMessage + log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n + + # Rolling File Appender properties + hbase.log.maxfilesize={{hbase_log_maxfilesize}}MB + hbase.log.maxbackupindex={{hbase_log_maxbackupindex}} + + # Rolling File Appender + log4j.appender.RFA=org.apache.log4j.RollingFileAppender + log4j.appender.RFA.File=${hbase.log.dir}/${hbase.log.file} + + log4j.appender.RFA.MaxFileSize=${hbase.log.maxfilesize} + log4j.appender.RFA.MaxBackupIndex=${hbase.log.maxbackupindex} + + log4j.appender.RFA.layout=org.apache.log4j.PatternLayout + log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n + + # + # Security audit appender + # + hbase.security.log.file=SecurityAuth.audit + hbase.security.log.maxfilesize={{hbase_security_log_maxfilesize}}MB + hbase.security.log.maxbackupindex={{hbase_security_log_maxbackupindex}} + log4j.appender.RFAS=org.apache.log4j.RollingFileAppender + log4j.appender.RFAS.File=${hbase.log.dir}/${hbase.security.log.file} + log4j.appender.RFAS.MaxFileSize=${hbase.security.log.maxfilesize} + log4j.appender.RFAS.MaxBackupIndex=${hbase.security.log.maxbackupindex} + log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout + log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n + log4j.category.SecurityLogger=${hbase.security.logger} + log4j.additivity.SecurityLogger=false + #log4j.logger.SecurityLogger.org.apache.hadoop.hbase.security.access.AccessController=TRACE + + # + # Null Appender + # + log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender + + # + # console + # Add "console" to rootlogger above if you want to use this + # + log4j.appender.console=org.apache.log4j.ConsoleAppender + log4j.appender.console.target=System.err + log4j.appender.console.layout=org.apache.log4j.PatternLayout + log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2}: %m%n + + # Custom Logging levels + + log4j.logger.org.apache.zookeeper=INFO + #log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG + log4j.logger.org.apache.hadoop.hbase=INFO + # Make these two classes INFO-level. Make them DEBUG to see more zk debug. + log4j.logger.org.apache.hadoop.hbase.zookeeper.ZKUtil=INFO + log4j.logger.org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher=INFO + #log4j.logger.org.apache.hadoop.dfs=DEBUG + # Set this class to log INFO only otherwise its OTT + # Enable this to get detailed connection error/retry logging. + # log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=TRACE + + + # Uncomment this line to enable tracing on _every_ RPC call (this can be a lot of output) + #log4j.logger.org.apache.hadoop.ipc.HBaseServer.trace=DEBUG + + # Uncomment the below if you want to remove logging of client region caching' + # and scan of .META. messages + # log4j.logger.org.apache.hadoop.hbase.client.HConnectionManager$HConnectionImplementation=INFO + # log4j.logger.org.apache.hadoop.hbase.client.MetaScanner=INFO + + + + content + false + + + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-policy.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-policy.xml new file mode 100755 index 00000000000..548352b8221 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-policy.xml @@ -0,0 +1,53 @@ + + + + + + security.client.protocol.acl + * + ACL for HRegionInterface protocol implementations (ie. + clients talking to HRegionServers) + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.admin.protocol.acl + * + ACL for HMasterInterface protocol implementation (ie. + clients talking to HMaster for admin operations). + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.masterregion.protocol.acl + * + ACL for HMasterRegionInterface protocol implementations + (for HRegionServers communicating with HMaster) + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-site.xml new file mode 100755 index 00000000000..d6a34f8de82 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-hbase-site.xml @@ -0,0 +1,634 @@ + + + + + + + hbase.rootdir + /atsv2/hbase/data + + + + hbase.cluster.distributed + true + + The mode the cluster will be in. Possible values are false for + standalone mode and true for distributed mode. If false, startup will run + all HBase and ZooKeeper daemons together in the one JVM. + + + + hbase.master.port + 17000 + HBase Master Port + + int + false + + + + + hbase.tmp.dir + /tmp/hbase-${user.name} + HBase tmp directory + + directory + + + + + hbase.local.dir + ${hbase.tmp.dir}/local + + + + hbase.master.info.bindAddress + 0.0.0.0 + + + + hbase.master.info.port + 17010 + + + + hbase.regionserver.info.port + 17030 + + + + zookeeper.session.timeout + 90000 + Zookeeper Session Timeout + + int + 10000 + 180000 + milliseconds + 10000 + + + + + hbase.client.retries.number + 7 + Maximum retries. Used as maximum for all retryable + operations such as the getting of a cell's value, starting a row update, + etc. Retry interval is a rough function based on hbase.client.pause. At + first we retry at this interval but then with backoff, we pretty quickly reach + retrying every ten seconds. See HConstants#RETRY_BACKOFF for how the backup + ramps up. Change this setting and hbase.client.pause to suit your workload. + Maximum Client Retries + + int + 5 + 50 + 1 + + + + + hbase.rpc.timeout + 90000 + + This is for the RPC layer to define how long HBase client applications + take for a remote call to time out. It uses pings to check connections + but will eventually throw a TimeoutException. + + HBase RPC Timeout + + int + 10000 + 180000 + milliseconds + 10000 + + + + + hbase.zookeeper.quorum + {{zookeeper_quorum_hosts}} + + + + + hbase.zookeeper.property.clientPort + {{zookeeper_clientPort}} + + + + + zookeeper.znode.parent + {{zookeeper_znode_parent}} + + + + hbase.regionserver.port + 17020 + + + + + hbase.regionserver.handler.count + 30 + + Count of RPC Listener instances spun up on RegionServers. + Same property is used by the Master for count of master handlers. + + Number of Handlers per RegionServer + + int + 5 + 240 + 1 + + + + + hbase.hregion.majorcompaction + 604800000 + Time between major compactions, expressed in milliseconds. Set to 0 to disable + time-based automatic major compactions. User-requested and size-based major compactions will + still run. This value is multiplied by hbase.hregion.majorcompaction.jitter to cause + compaction to start at a somewhat-random time during a given window of time. The default value + is 7 days, expressed in milliseconds. If major compactions are causing disruption in your + environment, you can configure them to run at off-peak times for your deployment, or disable + time-based major compactions by setting this parameter to 0, and run major compactions in a + cron job or by another external mechanism. + Major Compaction Interval + + int + 0 + 2592000000 + milliseconds + + + + + hbase.hregion.memstore.block.multiplier + 4 + + Block updates if memstore has hbase.hregion.memstore.block.multiplier + times hbase.hregion.memstore.flush.size bytes. Useful preventing + runaway memstore during spikes in update traffic. Without an + upper-bound, memstore fills such that when it flushes the + resultant flush files take a long time to compact or split, or + worse, we OOME. + + HBase Region Block Multiplier + + value-list + + + 2 + + + 4 + + + 8 + + + 1 + + + + + hbase.hregion.memstore.flush.size + 134217728 + + The size of an individual memstore. Each column familiy within each region is allocated its own memstore. + + Memstore Flush Size + + int + 33554432 + 268435456 + 1048576 + B + + + + + hbase.hregion.memstore.mslab.enabled + true + + Enables the MemStore-Local Allocation Buffer, + a feature which works to prevent heap fragmentation under + heavy write loads. This can reduce the frequency of stop-the-world + GC pauses on large heaps. + + + boolean + + + + + hbase.hregion.max.filesize + 10737418240 + + Maximum HFile size. If the sum of the sizes of a region's HFiles has grown to exceed this + value, the region is split in two. + + Maximum Region File Size + + int + 1073741824 + 107374182400 + B + 1073741824 + + + + + hbase.client.scanner.caching + 100 + Number of rows that will be fetched when calling next + on a scanner if it is not served from (local, client) memory. Higher + caching values will enable faster scanners but will eat up more memory + and some calls of next may take longer and longer times when the cache is empty. + Do not set this value such that the time between invocations is greater + than the scanner timeout; i.e. hbase.regionserver.lease.period + + Number of Fetched Rows when Scanning from Disk + + int + 100 + 10000 + 100 + rows + + + + + hbase.client.keyvalue.maxsize + 1048576 + + Specifies the combined maximum allowed size of a KeyValue + instance. This is to set an upper boundary for a single entry saved in a + storage file. Since they cannot be split it helps avoiding that a region + cannot be split any further because the data is too large. It seems wise + to set this to a fraction of the maximum region size. Setting it to zero + or less disables the check. + + Maximum Record Size + + int + 1048576 + 31457280 + B + 262144 + + + + + hbase.hstore.compactionThreshold + 3 + + The maximum number of StoreFiles which will be selected for a single minor + compaction, regardless of the number of eligible StoreFiles. Effectively, the value of + hbase.hstore.compaction.max controls the length of time it takes a single compaction to + complete. Setting it larger means that more StoreFiles are included in a compaction. For most + cases, the default value is appropriate. + + Maximum Store Files before Minor Compaction + + int + + + 2 + + + 3 + + + 4 + + + + + + + hbase.hstore.blockingStoreFiles + hstore blocking storefiles + 10 + + If more than this number of StoreFiles in any one Store + (one StoreFile is written per flush of MemStore) then updates are + blocked for this HRegion until a compaction is completed, or + until hbase.hstore.blockingWaitTime has been exceeded. + + + int + + + + + hfile.block.cache.size + 0.40 + Percentage of RegionServer memory to allocate to read buffers. + % of RegionServer Allocated to Read Buffers + + float + 0 + 0.8 + 0.01 + + + + + + hbase.superuser + yarn + List of users or groups (comma-separated), who are allowed + full privileges, regardless of stored ACLs, across the cluster. + Only used when HBase security is enabled. + + + + hbase-env + hbase_user + + + + + + hbase.security.authentication + simple + + Select Simple or Kerberos authentication. Note: Kerberos must be set up before the Kerberos option will take effect. + + Enable Authentication + + value-list + + + + simple + + + + kerberos + + + 1 + + + + + hbase.security.authorization + false + Set Authorization Method. + Enable Authorization + + value-list + + + true + + + + false + + + + 1 + + + + + hbase.coprocessor.region.classes + + A comma-separated list of Coprocessors that are loaded by + default on all tables. For any override coprocessor method, these classes + will be called in order. After implementing your own Coprocessor, just put + it in HBase's classpath and add the fully qualified class name here. + A coprocessor can also be loaded on demand by setting HTableDescriptor. + + + true + + + + + hbase.coprocessor.master.classes + + A comma-separated list of + org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are + loaded by default on the active HMaster process. For any implemented + coprocessor methods, the listed classes will be called in order. After + implementing your own MasterObserver, just put it in HBase's classpath + and add the fully qualified class name here. + + + true + + + + hbase-site + hbase.security.authorization + + + + + + hbase.zookeeper.useMulti + true + Instructs HBase to make use of ZooKeeper's multi-update functionality. + This allows certain ZooKeeper operations to complete more quickly and prevents some issues + with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).· + IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+ + and will not be downgraded. ZooKeeper versions before 3.4 do not support multi-update and will + not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495). + + + + + hbase.defaults.for.version.skip + true + Disables version verification. + + + + dfs.domain.socket.path + /var/lib/hadoop-hdfs/dn_socket + Path to domain socket. + + + + hbase.rpc.protection + authentication + + + + + + hbase.hregion.majorcompaction.jitter + 0.50 + A multiplier applied to hbase.hregion.majorcompaction to cause compaction to occur + a given amount of time either side of hbase.hregion.majorcompaction. The smaller the number, + the closer the compactions will happen to the hbase.hregion.majorcompaction + interval. + + + + hbase.bucketcache.ioengine + + Where to store the contents of the bucketcache. One of: onheap, + offheap, or file. If a file, set it to file:PATH_TO_FILE. + + true + + + + + hbase.bucketcache.size + + The size of the buckets for the bucketcache if you only use a single size. + + true + + + + + hbase.table.sanity.checks + false + + + + hbase.bucketcache.percentage.in.combinedcache + + Value to be set between 0.0 and 1.0 + + true + + + + + hbase.coprocessor.regionserver.classes + + + true + + + + hbase-site + hbase.security.authorization + + + + + + hbase.hstore.compaction.max + 10 + The maximum number of StoreFiles which will be selected for a single minor + compaction, regardless of the number of eligible StoreFiles. Effectively, the value of + hbase.hstore.compaction.max controls the length of time it takes a single compaction to + complete. Setting it larger means that more StoreFiles are included in a compaction. For most + cases, the default value is appropriate. + + Maximum Files for Compaction + + int + + + 8 + + + 9 + + + 10 + + + 11 + + + 12 + + + 13 + + + 14 + + + 15 + + + + + + + hbase.regionserver.global.memstore.size + 0.4 + Percentage of RegionServer memory to allocate to write buffers. + Each column family within each region is allocated a smaller pool (the memstore) within this shared write pool. + If this buffer is full, updates are blocked and data is flushed from memstores until a global low watermark + (hbase.regionserver.global.memstore.size.lower.limit) is reached. + + % of RegionServer Allocated to Write Buffers + + float + 0 + 0.8 + 0.01 + + + + + + + hbase.master.ui.readonly + false + + + + zookeeper.recovery.retry + 6 + + + + + + hbase.regionserver.executor.openregion.threads + 20 + The number of threads region server uses to open regions + + + + + hbase.master.namespace.init.timeout + 2400000 + The number of milliseconds master waits for hbase:namespace table to be initialized + + + + + hbase.master.wait.on.regionservers.timeout + 30000 + The number of milliseconds master waits for region servers to report in + + + + + + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-site.xml index c79391a67fd..b1017fcaffd 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-site.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/configuration/yarn-site.xml @@ -120,7 +120,7 @@ yarn.admin.acl - + yarn,yarn-ats ACL of who can be admin of the YARN cluster. true @@ -183,25 +183,32 @@ hadoop Unix group of the NodeManager + + yarn-env + yarn_cgroups_enabled + cluster-env user_group + + container-executor + gpu_module_enabled + - + yarn.nodemanager.aux-services - mapreduce_shuffle - Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can - not start with numbers - + mapreduce_shuffle,{{timeline_collector}} + Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and cannot start with numbers + yarn.nodemanager.aux-services.mapreduce_shuffle.class org.apache.hadoop.mapred.ShuffleHandler The auxiliary service class to use - + yarn.nodemanager.log-dirs @@ -491,139 +498,1145 @@ - - Time to live for timeline store data in milliseconds. yarn.timeline-service.ttl-ms + Time to live for timeline store data in milliseconds. 2678400000 int - + - Length of time to wait between deletion cycles of leveldb timeline store in milliseconds. yarn.timeline-service.leveldb-timeline-store.ttl-interval-ms + Length of time to wait between deletion cycles of leveldb timeline store in milliseconds. 300000 int + + - yarn.timeline-service.recovery.enabled + hadoop.registry.zk.quorum + localhost:2181 - Enable timeline server to recover state after starting. If - true, then yarn.timeline-service.state-store-class must be specified. + List of hostname:port pairs defining the zookeeper quorum binding for the registry - true - + - yarn.acl.enable - false - Are acls enabled. - + hadoop.registry.dns.bind-port + 53 + + The port number for the DNS listener. The default port is 53. + + + + true + false + + + + yarn-env + registry.dns.bind-port + + - yarn.authorization-provider - Yarn authorization provider class. - + hadoop.registry.dns.zone-mask + 255.255.255.0 + + The network mask associated with the zone IP range. If specified, it is utilized to ascertain the IP range possible and come up with an appropriate reverse zone name. + + - yarn.admin.acl - yarn - ACL of who can be admin of the YARN cluster. - - true - - + hadoop.registry.dns.zone-subnet + 172.17.0.0 + + An indicator of the IP range associated with the cluster containers. The setting is utilized for the + generation of the reverse zone name. + + - - yarn.timeline-service.store-class - org.apache.hadoop.yarn.server.timeline.LeveldbTimelineStore - Main storage class for YARN timeline server. - + hadoop.registry.dns.enabled + true + + An indicator of the IP range associated with the cluster containers. The setting is utilized for the + generation of the reverse zone name. + + - yarn.timeline-service.entity-group-fs-store.active-dir - /ats/active/ - DFS path to store active application’s timeline data - + hadoop.registry.dns.domain-name + EXAMPLE.COM + + The domain name for Hadoop cluster. + + - yarn.timeline-service.entity-group-fs-store.done-dir - /ats/done/ - DFS path to store done application’s timeline data - + yarn.nodemanager.recovery.enabled + true + Enable the node manager to recover after starting + - yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes - - Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. - - true - - + yarn.nodemanager.recovery.dir + YARN NodeManager Recovery directory + {{yarn_log_dir_prefix}}/nodemanager/recovery-state + + The local filesystem directory in which the node manager will store + state when recovery is enabled. + + - - yarn.timeline-service.entity-group-fs-store.summary-store - Summary storage for ATS v1.5 - - org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore - + yarn.client.nodemanager-connect.retry-interval-ms + 10000 + Time interval between each attempt to connect to NM + - yarn.timeline-service.entity-group-fs-store.scan-interval-seconds + yarn.client.nodemanager-connect.max-wait-ms + 60000 + Max time to wait to establish a connection to NM + + + + yarn.resourcemanager.recovery.enabled + true - Scan interval for ATS v1.5 entity group file system storage reader.This - value controls how frequent the reader will scan the HDFS active directory - for application status. + Enable RM to recover state after starting. + If true, then yarn.resourcemanager.store.class must be specified. - - 60 - + - yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds + yarn.resourcemanager.work-preserving-recovery.enabled + true - Scan interval for ATS v1.5 entity group file system storage cleaner.This - value controls how frequent the reader will scan the HDFS done directory - for stale application data. + Enable RM work preserving recovery. This configuration is private to YARN for experimenting the feature. - - 3600 - + Enable Work Preserving Restart + + boolean + + - yarn.timeline-service.entity-group-fs-store.retain-seconds + yarn.resourcemanager.store.class + org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore - How long the ATS v1.5 entity group file system storage will keep an - application's data in the done directory. + The class to use as the persistent store. + If org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore is used, + the store is implicitly fenced; meaning a single ResourceManager + is able to use the store at any point in time. - - 604800 - yarn.log.server.web-service.url - http://localhost:8188/ws/v1/applicationhistory - Log Server Web Service URL. + yarn.resourcemanager.zk-address + localhost:2181 + + List Host:Port of the ZooKeeper servers to be used by the RM. comma separated host:port pairs, each corresponding to a zk server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002" If the optional chroot suffix is used the example would look like: "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002/app/a" where the client would be rooted at "/app/a" and all paths would be relative to this root - ie getting/setting/etc... "/foo/bar" would result in operations being run on "/app/a/foo/bar" (from the server perspective). + + + + + yarn.resourcemanager.zk-state-store.parent-path + /rmstore + Full path of the ZooKeeper znode where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore as the value for yarn.resourcemanager.store.class + + + + yarn.resourcemanager.zk-acl + world:anyone:rwcda + ACL's to be used for ZooKeeper znodes. + + + + yarn.resourcemanager.work-preserving-recovery.scheduling-wait-ms + 10000 + Set the amount of time RM waits before allocating new containers on work-preserving-recovery. Such wait period gives RM a chance to settle down resyncing with NMs in the cluster on recovery, before assigning new containers to applications. + + + + yarn.resourcemanager.connect.retry-interval.ms + 30000 + How often to try connecting to the ResourceManager. + + + + yarn.resourcemanager.connect.max-wait.ms + 900000 + Maximum time to wait to establish connection to ResourceManager + + + + yarn.resourcemanager.zk-retry-interval-ms + 1000 + "Retry interval in milliseconds when connecting to ZooKeeper. + When HA is enabled, the value here is NOT used. It is generated + automatically from yarn.resourcemanager.zk-timeout-ms and + yarn.resourcemanager.zk-num-retries." + + + + + yarn.resourcemanager.zk-num-retries + 1000 + Number of times RM tries to connect to ZooKeeper. + + + + yarn.resourcemanager.zk-timeout-ms + 10000 + ZooKeeper session timeout in milliseconds. Session expiration is managed by the ZooKeeper cluster itself, not by the client. This value is used by the cluster to determine when the client's session expires. Expirations happens when the cluster does not hear from the client within the specified session timeout period (i.e. no heartbeat). + + + + yarn.resourcemanager.state-store.max-completed-applications + ${yarn.resourcemanager.max-completed-applications} + The maximum number of completed applications RM state store keeps, less than or equals to ${yarn.resourcemanager.max-completed-applications}. By default, it equals to ${yarn.resourcemanager.max-completed-applications}. This ensures that the applications kept in the state store are consistent with the applications remembered in RM memory. Any values larger than ${yarn.resourcemanager.max-completed-applications} will be reset to ${yarn.resourcemanager.max-completed-applications}. Note that this value impacts the RM recovery performance.Typically, a smaller value indicates better performance on RM recovery. + + + + yarn.resourcemanager.fs.state-store.retry-policy-spec + 2000, 500 + hdfs client retry policy specification. hdfs client retry is always enabled. Specified in pairs of sleep-time and number-of-retries and (t0, n0), (t1, n1), ..., the first n0 retries sleep t0 milliseconds on average, the following n1 retries sleep t1 milliseconds on average, and so on. + + + + yarn.resourcemanager.fs.state-store.uri + + RI pointing to the location of the FileSystem path where RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class + + + + yarn.resourcemanager.ha.enabled + false + enable RM HA or not + + + + yarn.nodemanager.linux-container-executor.resources-handler.class + org.apache.hadoop.yarn.server.nodemanager.util.DefaultLCEResourcesHandler + Pre-requisite to use CGroups - yarn-site - yarn.http.policy + yarn-env + yarn_cgroups_enabled - yarn-site - yarn.timeline-service.webapp.address + container-executor + gpu_module_enabled + + + + + + yarn.nodemanager.linux-container-executor.cgroups.hierarchy + /yarn + Name of the Cgroups hierarchy under which all YARN jobs will be launched + + + yarn-env + yarn_cgroups_enabled + + + container-executor + gpu_module_enabled + + + + + + yarn.nodemanager.linux-container-executor.cgroups.mount + false + If true, YARN will automount the CGroup, however the directory needs to already exist; else, the cgroup should be mounted by the admin + + + yarn-env + yarn_cgroups_enabled + + + container-executor + gpu_module_enabled + + + + + + yarn.nodemanager.linux-container-executor.cgroups.mount-path + /cgroup + Path used by the LCE to mount cgroups if not found. This path must exist before the NodeManager is launched. + + + yarn-env + yarn_cgroups_enabled + + container-executor + gpu_module_enabled + + + + + + yarn.nodemanager.linux-container-executor.cgroups.strict-resource-usage + false + Strictly limit CPU resource usage to allocated usage even if spare CPU is available + + + + yarn.nodemanager.resource.cpu-vcores + 8 + Number of vcores that can be allocated + for containers. This is used by the RM scheduler when allocating + resources for containers. This is not used to limit the number of + CPUs used by YARN containers. If it is set to -1 and + yarn.nodemanager.resource.detect-hardware-capabilities is true, it is + automatically determined from the hardware in case of Windows and Linux. + In other cases, number of vcores is 8 by default. + + Number of virtual cores + + int + 0 + 32 + + yarn-site - yarn.timeline-service.webapp.https.address + yarn.nodemanager.resource.percentage-physical-cpu-limit - + + + + yarn.nodemanager.resource.percentage-physical-cpu-limit + 80 + The amount of CPU allocated for YARN containers - only effective when used with CGroups + Percentage of physical CPU allocated for all containers on a node + + int + 0 + 100 + 1 + + + + + yarn.node-labels.fs-store.retry-policy-spec + 2000, 500 + + Retry policy used for FileSystem node label store. The policy is + specified by N pairs of sleep-time in milliseconds and number-of-retries + "s1,n1,s2,n2,...". + + + + + yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb + 1000 + This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used + + + + yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage + 90 + This is related to disk size on the machines, admins should set one of yarn.nodemanager.disk-health-checker.min-free-space-per-disk-mb or yarn.nodemanager.disk-health-checker.max-disk-utilization-per-disk-percentage but not both. If both are set, the more conservative value will be used + + + + yarn.nodemanager.resource-plugins + + + Enable additional discovery/isolation of resources on the NodeManager, + split by comma. By default, this is empty. + Acceptable values: { "yarn-io/gpu"} + + + + container-executor + gpu_module_enabled + + + + + true + + + + yarn.nodemanager.resource-plugins.gpu.allowed-gpu-devices + auto + GPU allowed devices + Specify GPU devices which can be managed by YARN NodeManager, + split by comma. Number of GPU devices will be reported to RM to make + scheduling decisions. Set to auto (default) let YARN automatically + discover GPU resource from system + + + + container-executor + gpu_module_enabled + + + + true + + + + + yarn.nodemanager.resource-plugins.gpu.path-to-discovery-executables + + Absolute path of nvidia-smi on NodeManagers + When value is empty (default), YARN NodeManager will try to + locate discovery executable itself + + + directories + false + true + + + + yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds + Defines how often NMs wake up to upload log files. The default value is -1. By default, the logs will be uploaded whenthe application is finished. By setting this configure, logs can be uploaded periodically when the application is running. The minimum rolling-interval-seconds can be set is 3600. + 3600 + + + + yarn.nodemanager.log-aggregation.debug-enabled + false + + This configuration is for debug and test purpose. + By setting this configuration as true. + We can break the lower bound of yarn.nodemanager.log-aggregation.roll-monitoring-interval-seconds + + + + yarn.nodemanager.log-aggregation.num-log-files-per-app + 30 + This is temporary solution. The configuration will be deleted once, we find a more scalable method to only write a single log file per LRS. + + + + yarn.resourcemanager.system-metrics-publisher.enabled + true + + + + + yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size + 10 + Number of worker threads that send the yarn system metrics data. + + + + yarn.timeline-service.client.max-retries + 30 + + + + + yarn.timeline-service.client.retry-interval-ms + 1000 + + + + + yarn.timeline-service.ttl-enable + true + + Enable age off of timeline store data. + + + boolean + + + + + yarn.timeline-service.state-store-class + org.apache.hadoop.yarn.server.timeline.recovery.LeveldbTimelineStateStore + Store class name for timeline state store. + + + + yarn.timeline-service.leveldb-state-store.path + /hadoop/yarn/timeline + Store file name for leveldb state store. + + directory + + + + + yarn.timeline-service.leveldb-timeline-store.path + /hadoop/yarn/timeline + Store file name for leveldb timeline store. + + directory + + + + + yarn.timeline-service.leveldb-timeline-store.read-cache-size + 104857600 + + Size of read cache for uncompressed blocks for leveldb timeline store in bytes. + + + + + yarn.timeline-service.leveldb-timeline-store.start-time-read-cache-size + 10000 + + Size of cache for recently read entity start times for leveldb timeline store in number of entities. + + + + + yarn.timeline-service.leveldb-timeline-store.start-time-write-cache-size + 10000 + + Size of cache for recently written entity start times for leveldb timeline store in number of entities. + + + + + yarn.timeline-service.http-authentication.type + simple + + Defines authentication used for the Timeline Server HTTP endpoint. + Supported values are: simple | kerberos | $AUTHENTICATION_HANDLER_CLASSNAME + + + + + yarn.timeline-service.http-authentication.simple.anonymous.allowed + true + + + + + yarn.resourcemanager.webapp.delegation-token-auth-filter.enabled + false + + Flag to enable override of the default kerberos authentication filter with + the RM authentication filter to allow authentication using delegation + tokens(fallback to kerberos if the tokens are missing). + Only applicable when the http authentication type is kerberos. + + + + + yarn.resourcemanager.bind-host + 0.0.0.0 + Default value is 0.0.0.0, when this is set the service will bind on all interfaces. If this optional address is set, the RPC and webapp servers will bind to this address and the port specified in yarn.resourcemanager.address and yarn.resourcemanager.webapp.address, respectively. + + + + yarn.nodemanager.bind-host + 0.0.0.0 + Default value is 0.0.0.0, when this is set the service will bind on all interfaces. If this optional address is set, the RPC and webapp servers will bind to this address and the port specified in yarn.nodemanager.address and yarn.nodemanager.webapp.address, respectively. + + + + yarn.timeline-service.bind-host + 0.0.0.0 + Default value is 0.0.0.0, when this is set the service will bind on all interfaces. If this optional address is set, the RPC and webapp servers will bind to this address and the port specified in yarn.timeline-service.address and yarn.timeline-service.webapp.address, respectively. + + + + yarn.node-labels.fs-store.root-dir + YARN Node Labels FS Store Root directory + /system/yarn/node-labels + + URI for NodeLabelManager. + + + + + yarn.scheduler.minimum-allocation-vcores + 1 + + Minimum Container Size (VCores) + + int + 0 + 8 + 1 + + + + yarn-site + yarn.nodemanager.resource.cpu-vcores + + + + + + yarn.scheduler.maximum-allocation-vcores + 8 + + Maximum Container Size (VCores) + + int + 0 + 8 + 1 + + + + yarn-site + yarn.nodemanager.resource.cpu-vcores + + + + + + yarn.node-labels.enabled + false + + Enable node labels to restrict YARN applications so that they run only on cluster nodes that have a specified node label. + + Node Labels + + value-list + + + true + + + + false + + + + 1 + + + + + yarn.nodemanager.container-executor.class + org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor + ContainerExecutor for launching containers + + + yarn-env + yarn_cgroups_enabled + + + core-site + hadoop.security.authentication + + + container-executor + gpu_module_enabled + + + + + + yarn.resourcemanager.scheduler.monitor.enable + + Enable a set of periodic monitors (specified in + yarn.resourcemanager.scheduler.monitor.policies) that affect the + scheduler. + + true + Pre-emption + + value-list + + + true + + + + false + + + + 1 + + + + + + + + yarn.timeline-service.recovery.enabled + + Enable timeline server to recover state after starting. If + true, then yarn.timeline-service.state-store-class must be specified. + + true + + + + yarn.authorization-provider + Yarn authorization provider class. + + + ranger-yarn-plugin-properties + ranger-yarn-plugin-enabled + + + + + + + + yarn.timeline-service.version + 2.0f + Timeline service version we’re currently using. + + + + yarn.timeline-service.versions + 1.5f,2.0f + Comma separated Timeline service versions we’re currently using. This overrides yarn.timeline-service.version. Acceptable values are 1.5f or 2.0f + + + + yarn.system-metricspublisher.enabled + true + Enables YARN to publish metrics to timeline v2 + + + + yarn.rm.system-metricspublisher.emit-container-events + true + Experimentally enable each container to post timeline events to timeline v2 + + + + yarn.nodemanager.recovery.supervised + true + Default of false causes NM to kill its containers on shutdown. Should be set to true when NM recovery is enabled. + + + + yarn.timeline-service.store-class + org.apache.hadoop.yarn.server.timeline.EntityGroupFSTimelineStore + Main storage class for YARN timeline server. + + + + yarn.timeline-service.entity-group-fs-store.active-dir + YARN Timeline Service Entity Group FS Store Active directory + /ats/active/ + DFS path to store active application’s timeline data + + + + yarn.timeline-service.entity-group-fs-store.done-dir + YARN Timeline Service Entity Group FS Store Done directory + /ats/done/ + DFS path to store done application’s timeline data + + + + yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes + org.apache.hadoop.yarn.applications.distributedshell.DistributedShellTimelinePlugin + Plugins that can translate a timeline entity read request into a list of timeline cache ids, separated by commas. + + true + + + + + + yarn.timeline-service.entity-group-fs-store.summary-store + Summary storage for ATS v1.5 + + org.apache.hadoop.yarn.server.timeline.RollingLevelDBTimelineStore + + + + yarn.timeline-service.entity-group-fs-store.scan-interval-seconds + + Scan interval for ATS v1.5 entity group file system storage reader.This + value controls how frequent the reader will scan the HDFS active directory + for application status. + + + 60 + + + + yarn.log.server.web-service.url + http://localhost:8188/ws/v1/applicationhistory + Log Server Web Service URL. + + + yarn-site + yarn.http.policy + + + yarn-site + yarn.timeline-service.webapp.address + + + yarn-site + yarn.timeline-service.webapp.https.address + + + + + + yarn.timeline-service.entity-group-fs-store.cleaner-interval-seconds + + Scan interval for ATS v1.5 entity group file system storage cleaner.This + value controls how frequent the reader will scan the HDFS done directory + for stale application data. + + + 3600 + + + + yarn.timeline-service.entity-group-fs-store.retain-seconds + + How long the ATS v1.5 entity group file system storage will keep an + application's data in the done directory. + + + 604800 + + + + + + + yarn.nodemanager.container-metrics.unregister-delay-ms + 60000 + The delay time ms to unregister container metrics after completion. + + + + yarn.timeline-service.entity-group-fs-store.group-id-plugin-classpath + + Classpath for all plugins defined in yarn.timeline-service.entity-group-fs-store.group-id-plugin-classes. + + true + + + + + yarn.resourcemanager.monitor.capacity.preemption.total_preemption_per_round + 0.1 + This option controls the pace at which containers-marked-for-preemption are actually preempted in each period. + + + + yarn.resourcemanager.monitor.capacity.preemption.natural_termination_factor + 1 + Similar to total_preemption_per_round, we can apply this factor to slowdown resource preemption after preemption-target is computed for each queue. + + + + yarn.resourcemanager.monitor.capacity.preemption.monitoring_interval + 15000 + Time in milliseconds between invocations of this ProportionalCapacityPreemptionPolicy policy. + + + + + + yarn.nodemanager.linux-container-executor.nonsecure-mode.limit-users + true + + Whether all applications should be run as the NodeManager process' owner. + When false, applications are launched instead as the application owner. + + + boolean + + + + + yarn.nodemanager.runtime.linux.allowed-runtimes + default,docker + + Comma separated list of runtimes that are allowed when using + LinuxContainerExecutor. The allowed values are default, docker, and + javasandbox. + + + + + yarn.nodemanager.runtime.linux.docker.allowed-container-networks + host,none,bridge + + Optional. A comma-separated set of networks allowed when launching + containers. Valid values are determined by Docker networks available from + `docker network ls`. + + + + true + + + + yarn.nodemanager.runtime.linux.docker.default-container-network + host + + The network used when launching Docker containers when no + network is specified in the request. This network must be one of the + (configurable) set of allowed container networks. + + + + + yarn.nodemanager.runtime.linux.docker.privileged-containers.allowed + false + + Optional. Whether applications are allowed to run in privileged + containers. + + + boolean + + + + + yarn.nodemanager.runtime.linux.docker.privileged-containers.acl + + + Optional. A comma-separated list of users who are allowed to request + privileged contains if privileged containers are allowed. + + + + true + + + + yarn.nodemanager.runtime.linux.docker.capabilities + + CHOWN,DAC_OVERRIDE,FSETID,FOWNER,MKNOD,NET_RAW,SETGID,SETUID,SETFCAP, + SETPCAP,NET_BIND_SERVICE,SYS_CHROOT,KILL,AUDIT_WRITE + + + Optional. This configuration setting determines the capabilities + assigned to docker containers when they are launched. While these may not + be case-sensitive from a docker perspective, it is best to keep these + uppercase. To run without any capabilites, set this value to + "none" or "NONE" + + + + + yarn.webapp.ui2.enable + true + + This configuration setting enables YARN web ui2 server which is a revamped + UI for better user experience. + + + + + + + yarn.timeline-service.http-cross-origin.enabled + true + + + + + yarn.resourcemanager.webapp.cross-origin.enabled + true + + + + + yarn.nodemanager.webapp.cross-origin.enabled + true + + + + + yarn.nodemanager.resource-plugins.gpu.docker-plugin + nvidia-docker-v1 + GPU docker plugin + Specify docker command plugin for GPU. By default uses Nvidia + docker V1.0 + + + + container-executor + gpu_module_enabled + + + container-executor + docker_module_enabled + + + + true + + + + + yarn.nodemanager.resource-plugins.gpu.docker-plugin.nvidiadocker-v1.endpoint + http://localhost:3476/v1.0/docker/cli + GPU docker plugin endpoint for Nvidia Docker Version 1 + Specify end point of nvidia-docker-plugin + + + container-executor + gpu_module_enabled + + + container-executor + docker_module_enabled + + + + true + + + + + + yarn.webapp.api-service.enable + true + + Whether to enable Yarn API Service. + + + boolean + + + + + yarn.service.framework.path + /bigtop/apps/3.2.0/yarn/service-dep.tar.gz + + YARN Service AM dependency tarball location. Enables faster app submission. + + + + + + + yarn.nodemanager.aux-services.timeline_collector.class + org.apache.hadoop.yarn.server.timelineservice.collector.PerNodeTimelineCollectorsAuxService + The auxiliary service class to use + + + + yarn.timeline-service.reader.webapp.address + localhost:8198 + + The http address of the timeline reader web application. + + + + + yarn.timeline-service.reader.webapp.https.address + localhost:8199 + + The http address of the timeline reader web application. + + + + + + + yarn.timeline-service.hbase-schema.prefix + prod. + HBase table prefix. + + + + yarn.timeline-service.hbase.configuration.file + file://{{yarn_hbase_conf_dir}}/hbase-site.xml + HBase configuration location. + + + + yarn.timeline-service.hbase.coprocessor.jar.hdfs.location + {{yarn_timeline_jar_location}} + HBase table creation jar location. + + + + yarn.resourcemanager.monitor.capacity.preemption.intra-queue-preemption.enabled + true + This property need to be set to make preemption within a Leaf Queue based on user-limit and application priority. + + + yarn-site + yarn.resourcemanager.scheduler.monitor.enable + + + + + + yarn.scheduler.capacity.ordering-policy.priority-utilization.underutilized-preemption.enabled + true + This property need to be set to make preemption can happen under a fragmented cluster. + + + yarn-site + yarn.resourcemanager.scheduler.monitor.enable + + + + + + yarn.resourcemanager.display.per-user-apps + true + + Flag to enable display of applications per user as an admin + configuration. + + + boolean + + + + + yarn.service.system-service.dir + /services + FS directory path to load and deploy admin configured services + + + + yarn.timeline-service.generic-application-history.save-non-am-container-meta-info + false + Defines if RM will save non am container meta-info in history store. + + boolean + + + + + hadoop.registry.dns.bind-address + 0.0.0.0 + Address associated with the network interface to which the DNS listener should bind. + + + + hadoop.http.cross-origin.allowed-origins + {{cross_origins}} + Comma separated list of origins that are allowed for web services + needing cross-origin (CORS) support. + + + + yarn.nodemanager.resourcemanager.connect.wait.secs + 1800 + Max time, in seconds, to wait to establish a connection to RM when NM starts. The NM will shutdown if it cannot connect to RM within the specified max time period. If the value is set as -1, then NM will retry forever. + diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/kerberos.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/kerberos.json index e5ce2b861aa..a34c458a161 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/kerberos.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/kerberos.json @@ -10,14 +10,78 @@ { "name": "yarn_smokeuser", "reference": "/smokeuser" + }, + { + "name": "yarn_ats", + "principal": { + "value": "${yarn-env/yarn_ats_user}${principal_suffix}@${realm}", + "type" : "user", + "configuration": "yarn-env/yarn_ats_principal_name", + "local_username": "${yarn-env/yarn_ats_user}" + }, + "keytab": { + "file": "${keytab_dir}/yarn-ats.hbase-client.headless.keytab", + "owner": { + "name": "${yarn-env/yarn_ats_user}", + "access": "r" + }, + "group": { + "name": "${cluster-env/user_group}", + "access": "" + }, + "configuration": "yarn-env/yarn_ats_user_keytab" + } + }, + { + "name": "yarn_ats_hbase_master", + "principal": { + "value": "yarn-ats-hbase/_HOST@${realm}", + "type" : "service", + "configuration": "yarn-hbase-site/hbase.master.kerberos.principal", + "local_username": "${yarn-env/yarn_ats_user}" + }, + "keytab": { + "file": "${keytab_dir}/yarn-ats.hbase-master.service.keytab", + "owner": { + "name": "${yarn-env/yarn_ats_user}", + "access": "r" + }, + "group": { + "name": "${cluster-env/user_group}", + "access": "" + }, + "configuration": "yarn-hbase-site/hbase.master.keytab.file" + } + }, + { + "name": "yarn_ats_hbase_regionserver", + "principal": { + "value": "yarn-ats-hbase/_HOST@${realm}", + "type" : "service", + "configuration": "yarn-hbase-site/hbase.regionserver.kerberos.principal", + "local_username": "${yarn-env/yarn_ats_user}" + }, + "keytab": { + "file": "${keytab_dir}/yarn-ats.hbase-regionserver.service.keytab", + "owner": { + "name": "${yarn-env/yarn_ats_user}", + "access": "r" + }, + "group": { + "name": "${cluster-env/user_group}", + "access": "" + }, + "configuration": "yarn-hbase-site/hbase.regionserver.keytab.file" + } } ], "configurations": [ { "yarn-site": { - "yarn.timeline-service.enabled": "false", + "yarn.timeline-service.enabled": "true", "yarn.timeline-service.http-authentication.type": "kerberos", "yarn.acl.enable": "true", + "yarn.admin.acl": "${yarn-env/yarn_user}", "yarn.timeline-service.http-authentication.signature.secret": "", "yarn.timeline-service.http-authentication.signature.secret.file": "", "yarn.timeline-service.http-authentication.signer.secret.provider": "", @@ -33,8 +97,11 @@ "yarn.resourcemanager.proxyuser.*.hosts": "", "yarn.resourcemanager.proxyuser.*.users": "", "yarn.resourcemanager.proxy-user-privileges.enabled": "true", + "yarn.resourcemanager.zk-acl" : "sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()}:rwcda", "hadoop.registry.secure" : "true", - "hadoop.registry.system.accounts" : "sasl:yarn,sasl:mapred,sasl:hadoop,sasl:hdfs,sasl:rm" + "hadoop.registry.system.accounts" : "sasl:${principals/YARN/APP_TIMELINE_SERVER/app_timeline_server_yarn|principalPrimary()},sasl:${principals/MAPREDUCE2/HISTORYSERVER/history_server_jhs|principalPrimary()},sasl:${principals/HDFS/NAMENODE/hdfs|principalPrimary()},sasl:${principals/YARN/RESOURCEMANAGER/resource_manager_rm|principalPrimary()},sasl:${principals/HIVE/HIVE_SERVER/hive_server_hive|principalPrimary()},sasl:${principals/SPARK2/spark_service_keytab|principalPrimary()}", + "hadoop.registry.client.auth" : "kerberos", + "hadoop.registry.jaas.context" : "Client" } }, { @@ -42,6 +109,34 @@ "hadoop.proxyuser.${yarn-env/yarn_user}.groups": "*", "hadoop.proxyuser.${yarn-env/yarn_user}.hosts": "${clusterHostInfo/resourcemanager_hosts}" } + }, + { + "capacity-scheduler": { + "yarn.scheduler.capacity.root.acl_submit_applications": "${yarn-env/yarn_user},${cluster-env/smokeuser}", + "yarn.scheduler.capacity.root.acl_administer_queue": "${yarn-env/yarn_user}", + "yarn.scheduler.capacity.root.default.acl_administer_queue": "${yarn-env/yarn_user}", + "yarn.scheduler.capacity.root.default.acl_administer_jobs": "${yarn-env/yarn_user}", + "yarn.scheduler.capacity.root.default.acl_submit_applications": "${yarn-env/yarn_user},${yarn-env/yarn_ats_user}" + } + }, + { + "ranger-yarn-audit": { + "xasecure.audit.jaas.Client.loginModuleName": "com.sun.security.auth.module.Krb5LoginModule", + "xasecure.audit.jaas.Client.loginModuleControlFlag": "required", + "xasecure.audit.jaas.Client.option.useKeyTab": "true", + "xasecure.audit.jaas.Client.option.storeKey": "false", + "xasecure.audit.jaas.Client.option.serviceName": "solr", + "xasecure.audit.destination.solr.force.use.inmemory.jaas.config": "true" + } + }, + { + "yarn-hbase-site": { + "hbase.security.authentication": "kerberos", + "hbase.security.authorization": "true", + "hbase.coprocessor.master.classes": "org.apache.hadoop.hbase.security.access.AccessController", + "hbase.coprocessor.region.classes": "org.apache.hadoop.hbase.security.token.TokenProvider,org.apache.hadoop.hbase.security.access.AccessController", + "zookeeper.znode.parent": "/atsv2-hbase-secure" + } } ], "components": [ @@ -69,6 +164,42 @@ "configuration": "yarn-site/yarn.nodemanager.keytab" } }, + { + "name": "yarn_nodemanager_hive_server_hive", + "reference": "/HIVE/HIVE_SERVER/hive_server_hive", + "principal": { + "configuration": "hive-interactive-site/hive.llap.daemon.service.principal" + }, + "keytab": { + "configuration": "hive-interactive-site/hive.llap.daemon.keytab.file" + }, + "when" : { + "contains" : ["services", "HIVE"] + } + }, + { + "name": "llap_task_hive", + "principal": { + "value": "hive/_HOST@${realm}", + "type" : "service", + "configuration": "hive-interactive-site/hive.llap.task.principal" + }, + "keytab": { + "file": "${keytab_dir}/hive.llap.task.keytab", + "owner": { + "name": "${yarn-env/yarn_user}", + "access": "r" + }, + "group": { + "name": "${cluster-env/user_group}", + "access": "r" + }, + "configuration": "hive-interactive-site/hive.llap.task.keytab.file" + }, + "when" : { + "contains" : ["services", "HIVE"] + } + }, { "name": "yarn_nodemanager_spnego", "reference": "/spnego", @@ -91,6 +222,10 @@ { "name": "RESOURCEMANAGER", "identities": [ + { + "name": "yarn_resourcemanager_hdfs", + "reference": "/HDFS/NAMENODE/hdfs" + }, { "name": "resource_manager_rm", "principal": { @@ -121,6 +256,16 @@ "keytab": { "configuration": "yarn-site/yarn.resourcemanager.webapp.spnego-keytab-file" } + }, + { + "name": "yarn_resourcemanager_resource_manager_rm", + "reference": "/YARN/RESOURCEMANAGER/resource_manager_rm", + "principal": { + "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.principal" + }, + "keytab": { + "configuration": "ranger-yarn-audit/xasecure.audit.jaas.Client.option.keyTab" + } } ] }, @@ -163,6 +308,72 @@ "reference": "/HDFS/NAMENODE/hdfs" } ] + }, + { + "name": "TIMELINE_READER", + "identities": [ + { + "name": "yarn_timeline_reader", + "principal": { + "value": "yarn/_HOST@${realm}", + "type" : "service", + "configuration": "yarn-site/yarn.timeline-service.principal", + "local_username": "${yarn-env/yarn_user}" + }, + "keytab": { + "file": "${keytab_dir}/yarn.service.keytab", + "owner": { + "name": "${yarn-env/yarn_user}", + "access": "r" + }, + "group": { + "name": "${cluster-env/user_group}", + "access": "" + }, + "configuration": "yarn-site/yarn.timeline-service.keytab" + } + }, + { + "name": "yarn_timeline_reader_spnego", + "reference": "/spnego", + "principal": { + "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.principal" + }, + "keytab": { + "configuration": "yarn-site/yarn.timeline-service.http-authentication.kerberos.keytab" + } + }, + { + "name": "yarn_timeline_reader_hdfs", + "reference": "/HDFS/NAMENODE/hdfs" + } + ] + }, + { + "name": "YARN_REGISTRY_DNS", + "identities": [ + { + "name": "registry_dns_yarn", + "principal": { + "value": "yarn/_HOST@${realm}", + "type" : "service", + "configuration": "yarn-env/yarn.registry-dns.principal", + "local_username": "${yarn-env/yarn_user}" + }, + "keytab": { + "file": "${keytab_dir}/yarn.service.keytab", + "owner": { + "name": "${yarn-env/yarn_user}", + "access": "r" + }, + "group": { + "name": "${cluster-env/user_group}", + "access": "" + }, + "configuration": "yarn-env/yarn.registry-dns.keytab" + } + } + ] } ] }, @@ -178,6 +389,14 @@ "reference": "/smokeuser" } ], + "configurations": [ + { + "mapred-site": { + "mapreduce.cluster.acls.enabled": "true", + "mapreduce.jobhistory.admin.acl": "${mapred-env/mapred_user}" + } + } + ], "components": [ { "name": "HISTORYSERVER", diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/metainfo.xml index 64b41343d22..a3ab15cd724 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/metainfo.xml @@ -26,6 +26,72 @@ 3.3.4-1 + + TIMELINE_READER + Timeline Service V2.0 Reader + MASTER + 1+ + true + true + + true + YARN/RESOURCEMANAGER + + + + + PYTHON + 1200 + + + + + HDFS/HDFS_CLIENT + host + + true + + + + + + + YARN_REGISTRY_DNS + YARN Registry DNS + MASTER + 0-1 + true + true + + + + PYTHON + 1200 + + + + + APP_TIMELINE_SERVER + Timeline Service V2 + MASTER + 1 + true + true + + + + PYTHON + 1200 + + + + + yarn_timelineserver + + + + + RESOURCEMANAGER ResourceManager @@ -72,6 +138,8 @@ capacity-scheduler hdfs-site + resource-types + container-executor @@ -98,6 +166,11 @@ yarn_nodemanager + + + resource-types + container-executor + @@ -137,13 +210,23 @@ capacity-scheduler.xml capacity-scheduler + + xml + resource-types.xml + resource-types + + + properties + container-executor.cfg + container-executor + - redhat7,redhat8 + redhat7,redhat8,openeuler22 hadoop_${stack_version}-yarn @@ -186,13 +269,16 @@ hdfs-site hadoop-env core-site + viewfs-mount-table mapred-site yarn-log4j + ams-ssl-client ranger-yarn-plugin-properties ranger-yarn-audit ranger-yarn-policymgr-ssl ranger-yarn-security + YARN_widgets.json YARN_metrics.json @@ -293,7 +379,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 hadoop_${stack_version}-mapreduce @@ -326,10 +412,12 @@ hdfs-site hadoop-env core-site + viewfs-mount-table mapred-site mapred-env ssl-client ssl-server + ams-ssl-client yarn-site true diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_ats_hbase.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_ats_hbase.py new file mode 100755 index 00000000000..118062a5cf0 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/alerts/alert_ats_hbase.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python + +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" +import logging +import json +import subprocess +import time +import traceback + +from resource_management.core import global_lock +from resource_management.core import shell +from resource_management.core.exceptions import Fail +from resource_management.core.resources import Execute +from resource_management.libraries.functions import format +from resource_management.libraries.functions import get_kinit_path +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions.check_process_status import check_process_status +from resource_management.core.exceptions import ComponentIsNotRunning +from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl + +CRITICAL_RESULT_CODE = 'CRITICAL' +OK_RESULT_CODE = 'OK' +UKNOWN_STATUS_CODE = 'UNKNOWN' + +OK_MESSAGE = "The HBase application reported a '{0}' state in {1:.3f}s" +MESSAGE_WITH_STATE_AND_INSTANCES = "The application reported a '{0}' state in {1:.3f}s. [Live: {2}, Desired: {3}]" +CRITICAL_MESSAGE_WITH_STATE = "The HBase application reported a '{0}' state. Check took {1:.3f}s" +CRITICAL_MESSAGE = "ats-hbase service information could not be retrieved" + + +SECURITY_ENABLED_KEY = '{{cluster-env/security_enabled}}' +STACK_ROOT = '{{cluster-env/stack_root}}' +STACK_ROOT_DEFAULT = Script.get_stack_root() + + +ATS_HBASE_PRINCIPAL_KEY = '{{yarn-hbase-site/hbase.master.kerberos.principal}}' +ATS_HBASE_PRINCIPAL_KEYTAB_KEY = '{{yarn-hbase-site/hbase.master.keytab.file}}' +ATS_HBASE_USER_KEY = '{{yarn-env/yarn_ats_user}}' +ATS_HBASE_SYSTEM_SERVICE_LAUNCH_KEY = '{{yarn-hbase-env/is_hbase_system_service_launch}}' +USE_EXTERNAL_HBASE_KEY = '{{yarn-hbase-env/use_external_hbase}}' +ATS_HBASE_PID_DIR_PREFIX = '{{yarn-hbase-env/yarn_hbase_pid_dir_prefix}}' + +ATS_HBASE_APP_NOT_FOUND_KEY = format("Service ats-hbase not found") + +# The configured Kerberos executable search paths, if any +KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY = '{{kerberos-env/executable_search_paths}}' + + +CHECK_COMMAND_TIMEOUT_KEY = 'check.command.timeout' +CHECK_COMMAND_TIMEOUT_DEFAULT = 120.0 + + +logger = logging.getLogger('ambari_alerts') + + +def get_tokens(): + """ + Returns a tuple of tokens in the format {{site/property}} that will be used + to build the dictionary passed into execute + """ + return (SECURITY_ENABLED_KEY, KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY, ATS_HBASE_PRINCIPAL_KEY, ATS_HBASE_PRINCIPAL_KEYTAB_KEY, + ATS_HBASE_USER_KEY, STACK_ROOT, USE_EXTERNAL_HBASE_KEY, ATS_HBASE_PID_DIR_PREFIX, ATS_HBASE_SYSTEM_SERVICE_LAUNCH_KEY) + + +def execute(configurations={}, parameters={}, host_name=None): + """ + Returns a tuple containing the result code and a pre-formatted result label + + Keyword arguments: + configurations (dictionary): a mapping of configuration key to value + parameters (dictionary): a mapping of script parameter key to value + host_name (string): the name of this host where the alert is running + """ + + if configurations is None: + return (UKNOWN_STATUS_CODE, ['There were no configurations supplied to the script.']) + + result_code = None + + try: + use_external_hbase = False + if USE_EXTERNAL_HBASE_KEY in configurations: + use_external_hbase = str(configurations[USE_EXTERNAL_HBASE_KEY]).upper() == 'TRUE' + + if use_external_hbase: + return (OK_RESULT_CODE, ['use_external_hbase set to true.']) + + is_hbase_system_service_launch = False + if ATS_HBASE_SYSTEM_SERVICE_LAUNCH_KEY in configurations: + is_hbase_system_service_launch = str(configurations[ATS_HBASE_SYSTEM_SERVICE_LAUNCH_KEY]).upper() == 'TRUE' + + yarn_hbase_user = "yarn-ats" + if ATS_HBASE_USER_KEY in configurations: + yarn_hbase_user = configurations[ATS_HBASE_USER_KEY] + + if not is_hbase_system_service_launch: + yarn_hbase_pid_dir_prefix = "" + if ATS_HBASE_PID_DIR_PREFIX in configurations: + yarn_hbase_pid_dir_prefix = configurations[ATS_HBASE_PID_DIR_PREFIX] + else: + return (UKNOWN_STATUS_CODE, ['The yarn_hbase_pid_dir_prefix is a required parameter.']) + yarn_hbase_pid_dir = format("{yarn_hbase_pid_dir_prefix}/{yarn_hbase_user}") + master_pid_file = format("{yarn_hbase_pid_dir}/hbase-{yarn_hbase_user}-master.pid") + rs_pid_file = format("{yarn_hbase_pid_dir}/hbase-{yarn_hbase_user}-regionserver.pid") + + if host_name is None: + host_name = socket.getfqdn() + + master_process_running = is_monitor_process_live(master_pid_file) + rs_process_running = is_monitor_process_live(rs_pid_file) + + alert_state = OK_RESULT_CODE if master_process_running and rs_process_running else CRITICAL_RESULT_CODE + + alert_label = 'ATS embedded HBase is running on {0}' if master_process_running and rs_process_running else 'ATS embedded HBase is NOT running on {0}' + alert_label = alert_label.format(host_name) + + return (alert_state, [alert_label]) + else: + security_enabled = False + if SECURITY_ENABLED_KEY in configurations: + security_enabled = str(configurations[SECURITY_ENABLED_KEY]).upper() == 'TRUE' + + check_command_timeout = CHECK_COMMAND_TIMEOUT_DEFAULT + if CHECK_COMMAND_TIMEOUT_KEY in configurations: + check_command_timeout = int(parameters[CHECK_COMMAND_TIMEOUT_KEY]) + + if security_enabled: + if ATS_HBASE_PRINCIPAL_KEY in configurations: + ats_hbase_app_principal = configurations[ATS_HBASE_PRINCIPAL_KEY] + ats_hbase_app_principal = ats_hbase_app_principal.replace('_HOST',host_name.lower()) + + if ATS_HBASE_PRINCIPAL_KEYTAB_KEY in configurations: + ats_hbase_app_keytab = configurations[ATS_HBASE_PRINCIPAL_KEYTAB_KEY] + + # Get the configured Kerberos executable search paths, if any + if KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY in configurations: + kerberos_executable_search_paths = configurations[KERBEROS_EXECUTABLE_SEARCH_PATHS_KEY] + else: + kerberos_executable_search_paths = None + + kinit_path_local = get_kinit_path(kerberos_executable_search_paths) + kinitcmd=format("{kinit_path_local} -kt {ats_hbase_app_keytab} {ats_hbase_app_principal}; ") + + # prevent concurrent kinit + kinit_lock = global_lock.get_lock(global_lock.LOCK_TYPE_KERBEROS) + kinit_lock.acquire() + try: + Execute(kinitcmd, user=yarn_hbase_user, + path=["/bin/", "/usr/bin/", "/usr/sbin/"], + timeout=10) + finally: + kinit_lock.release() + + start_time = time.time() + ats_hbase_status_cmd = STACK_ROOT_DEFAULT + format("/current/hadoop-yarn-client/bin/yarn app -status ats-hbase") + + code, output, error = shell.checked_call(ats_hbase_status_cmd, user=yarn_hbase_user, stderr=subprocess.PIPE, + timeout=check_command_timeout, + logoutput=False) + if code != 0: + alert_label = traceback.format_exc() + result_code = UKNOWN_STATUS_CODE + return (result_code, [alert_label]) + + + # Call for getting JSON + ats_hbase_app_info = make_valid_json(output) + + if ats_hbase_app_info is None: + alert_label = CRITICAL_MESSAGE + result_code = CRITICAL_RESULT_CODE + return (result_code, [alert_label]) + + if 'state' not in ats_hbase_app_info: + alert_label = traceback.format_exc() + result_code = UKNOWN_STATUS_CODE + return (result_code, [alert_label]) + + retrieved_ats_hbase_app_state = ats_hbase_app_info['state'].upper() + + if retrieved_ats_hbase_app_state in ['STABLE']: + result_code = OK_RESULT_CODE + total_time = time.time() - start_time + alert_label = OK_MESSAGE.format(retrieved_ats_hbase_app_state, total_time) + else: + result_code = CRITICAL_RESULT_CODE + total_time = time.time() - start_time + alert_label = CRITICAL_MESSAGE_WITH_STATE.format(retrieved_ats_hbase_app_state, total_time) + except: + alert_label = traceback.format_exc() + traceback.format_exc() + result_code = CRITICAL_RESULT_CODE + return (result_code, [alert_label]) + + +def make_valid_json(output): + splits = output.split("\n") + ats_hbase_app_info = None + json_element = None # To detect where from to start reading for JSON data + for idx, split in enumerate(splits): + curr_elem = split.strip() + if curr_elem.startswith( '{' ) and curr_elem.endswith( '}' ): + json_element = curr_elem + break + elif ATS_HBASE_APP_NOT_FOUND_KEY in curr_elem: + return ats_hbase_app_info + + # Remove extra logging from possible JSON output + if json_element is None: + raise Fail("Couldn't validate the received output for JSON parsing.") + + ats_hbase_app_info = json.loads(json_element) + return ats_hbase_app_info + +@OsFamilyFuncImpl(OsFamilyImpl.DEFAULT) +def is_monitor_process_live(pid_file): + """ + Gets whether the Metrics Monitor represented by the specified file is running. + :param pid_file: the PID file of the monitor to check + :return: True if the monitor is running, False otherwise + """ + live = False + + try: + check_process_status(pid_file) + live = True + except ComponentIsNotRunning: + pass + + return live \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/functions.py new file mode 100755 index 00000000000..c436c62ac39 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/functions.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" + +import re +import math + +def calc_heap_memory(memorysize, heapmemory_factor): + """ + @param memorysize_str: str (e.g '4096m') + @param heapmemory_factor: float (e.g 0.8) + """ + return int(math.floor(memorysize*heapmemory_factor)) + +def ensure_unit_for_memory(memory_size): + memory_size_values = re.findall('\d+', str(memory_size)) + memory_size_unit = re.findall('\D+', str(memory_size)) + + if len(memory_size_values) > 0: + unit = 'm' + if len(memory_size_unit) > 0: + unit = memory_size_unit[0] + if unit not in ['b', 'k', 'm', 'g', 't', 'p']: + raise Exception("Memory size unit error. %s - wrong unit" % unit) + return "%s%s" % (memory_size_values[0], unit) + else: + raise Exception('Memory size can not be calculated') diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/hbase_service.py new file mode 100755 index 00000000000..8e9e69a2eac --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/hbase_service.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +""" +from ambari_commons.constants import AMBARI_SUDO_BINARY +from resource_management.libraries.functions.format import format +from resource_management.libraries.functions.show_logs import show_logs +from resource_management.core.shell import as_sudo +from resource_management.core.resources.system import Execute, File +from resource_management.core.source import Template +from resource_management.core.logger import Logger + +def hbase_service( + name, + action = 'start'): # 'start' or 'stop' + + import params + + sudo = AMBARI_SUDO_BINARY + daemon_script = format("{yarn_hbase_bin}/hbase-daemon.sh") + role = name + cmd = format("{daemon_script} --config {yarn_hbase_conf_dir}") + pid_file = format("{yarn_hbase_pid_dir}/hbase-{yarn_hbase_user}-{role}.pid") + pid_expression = as_sudo(["cat", pid_file]) + no_op_test = as_sudo(["test", "-f", pid_file]) + format(" && ps -p `{pid_expression}` >/dev/null 2>&1") + + if action == 'start': + daemon_cmd = format("{cmd} start {role}") + + try: + Execute ( daemon_cmd, + not_if = no_op_test, + user = params.yarn_hbase_user + ) + except: + show_logs(params.yarn_hbase_log_dir, params.yarn_hbase_user) + raise + elif action == 'stop': + daemon_cmd = format("{cmd} stop {role}") + + try: + Execute ( daemon_cmd, + user = params.yarn_hbase_user, + only_if = no_op_test, + timeout = 30, + on_timeout = format("! ( {no_op_test} ) || {sudo} -H -E kill -9 `{pid_expression}`"), + ) + except: + show_logs(params.yarn_hbase_log_dir, params.yarn_hbase_user) + raise + + File(pid_file, + action = "delete", + ) + +def hbase(action): + if action == 'stop': + Logger.info("Stopping HBase daemons") + hbase_service('regionserver', action=action) + hbase_service('master', action=action) + else: + Logger.info("Starting HBase daemons") + hbase_service('master', action=action) + hbase_service('regionserver', action=action) + createTables() + + +def configure_hbase(env): + import params + env.set_params(params) + params.HdfsResource(params.yarn_hbase_hdfs_root_dir, + type="directory", + action="create_on_execute", + owner=params.yarn_hbase_user + ) + params.HdfsResource(None, action="execute") + +def create_hbase_package(): + import params + file_path = format("{yarn_hbase_package_preparation_file}") + Logger.info("Executing hbase package creation script file '" + file_path +"'") + try: + File( file_path, + mode = 0o755, + content = Template('yarn_hbase_package_preparation.j2') + ) + Execute( file_path, + timeout = 300, + logoutput = True + ) + except: + Logger.error( + "Error occured while executing hbase package creation file '" + file_path + "'.") + raise + +def copy_hbase_package_to_hdfs(): + import params + + try: + + Logger.info( + "Copying hbase tarball into hdfs path'" + params.yarn_hbase_app_hdfs_path + "'.") + params.HdfsResource(format("{yarn_hbase_app_hdfs_path}"), + type="directory", + action="create_on_execute", + owner=params.hdfs_user, + group=params.hdfs_user, + mode=0o555, + ) + params.HdfsResource(format("{yarn_hbase_app_hdfs_path}/hbase.tar.gz"), + type="file", + action="create_on_execute", + source=format("{yarn_hbase_user_tmp}/hbase.tar.gz"), + owner=params.hdfs_user, + group=params.user_group, + mode=0o444, + ) + params.HdfsResource(None, action="execute") + except: + Logger.error( + "Error occured while copying hbase tarball into hdfs '" + params.yarn_hbase_app_hdfs_path + "'.") + raise + + +def createTables(): + import params + try: + Logger.info("Creating HBase tables") + Execute(format("sleep 10;{yarn_hbase_table_create_cmd}"), + user=params.yarn_hbase_user, + timeout = 300, + logoutput=True) + except: + show_logs(params.yarn_hbase_log_dir, params.yarn_hbase_user) + raise + + if params.security_enabled: + try: + File( format("{yarn_hbase_grant_premissions_file}"), + owner = params.yarn_hbase_user, + group = params.user_group, + mode = 0o644, + content = Template('yarn_hbase_grant_permissions.j2') + ) + Execute( format("{yarn_hbase_table_grant_premission_cmd}"), + user = params.yarn_hbase_user, + timeout = 300, + logoutput = True + ) + except: + show_logs(params.yarn_hbase_log_dir, params.yarn_hbase_user) + raise \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/historyserver.py index 4217990f953..9846b959543 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/historyserver.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/historyserver.py @@ -76,12 +76,34 @@ def pre_upgrade_restart(self, env, upgrade_type=None): if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): stack_select.select_packages(params.version) + # MC Hammer said, "Can't touch this" + copy_to_hdfs("mapreduce", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs) + copy_to_hdfs("tez", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs) + params.HdfsResource(None, action="execute") def start(self, env, upgrade_type=None): import params env.set_params(params) self.configure(env) # FOR SECURITY + if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major): + # MC Hammer said, "Can't touch this" + resource_created = copy_to_hdfs( + "mapreduce", + params.user_group, + params.hdfs_user, + skip=params.sysprep_skip_copy_tarballs_hdfs) + resource_created = copy_to_hdfs( + "tez", + params.user_group, + params.hdfs_user, + skip=params.sysprep_skip_copy_tarballs_hdfs) or resource_created + if resource_created: + params.HdfsResource(None, action="execute") + else: + # In stack versions before copy_tarball_to_hdfs support tez.tar.gz was copied to a different folder in HDFS. + install_tez_jars() + service('historyserver', action='start', serviceName='mapreduce') def status(self, env): diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/params_linux.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/params_linux.py index ef2d437b413..a61701de18b 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/params_linux.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/params_linux.py @@ -36,12 +36,15 @@ from resource_management.libraries.functions.get_not_managed_resources import get_not_managed_resources from resource_management.libraries.functions.version import format_stack_version, get_major_version from resource_management.libraries.functions.default import default +from resource_management.libraries.functions.expect import expect from resource_management.libraries import functions from resource_management.libraries.functions import is_empty from resource_management.libraries.functions.get_architecture import get_architecture from resource_management.libraries.functions.setup_ranger_plugin_xml import get_audit_configs, generate_ranger_service_config import status_params +from functions import calc_heap_memory, ensure_unit_for_memory + service_name = 'yarn' # a map of the Ambari role to the component name @@ -55,7 +58,9 @@ 'APP_TIMELINE_SERVER' : 'hadoop-yarn-timelineserver', 'NODEMANAGER' : 'hadoop-yarn-nodemanager', 'RESOURCEMANAGER' : 'hadoop-yarn-resourcemanager', - 'YARN_CLIENT' : 'hadoop-yarn-client' + 'YARN_CLIENT' : 'hadoop-yarn-client', + 'TIMELINE_READER' : 'hadoop-yarn-timelinereader', + 'YARN_REGISTRY_DNS' : 'hadoop-yarn-registrydns' } # server configurations @@ -152,6 +157,12 @@ def get_spark_version(service_name, component_name, yarn_version): yarn_container_bin = format("{hadoop_yarn_home}/bin") hadoop_java_io_tmpdir = os.path.join(tmp_dir, "hadoop_java_io_tmpdir") +# MapR directory root +mapred_role_root = "hadoop-mapreduce-client" +command_role = default("/role", "") +if command_role in MAPR_SERVER_ROLE_DIRECTORY_MAP: + mapred_role_root = MAPR_SERVER_ROLE_DIRECTORY_MAP[command_role] + if stack_supports_timeline_state_store: # Timeline Service property that was added timeline_state_store stack feature @@ -191,6 +202,8 @@ def get_spark_version(service_name, component_name, yarn_version): toggle_nm_security = (current_nm_security_state and not security_enabled) or (not current_nm_security_state and security_enabled) smoke_user_keytab = config['configurations']['cluster-env']['smokeuser_keytab'] +mapred2_service_check_test_file = format('{tmp_dir}/mapred2-service-check') + yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group'] yarn_nodemanager_container_executor_class = config['configurations']['yarn-site']['yarn.nodemanager.container-executor.class'] is_linux_container_executor = (yarn_nodemanager_container_executor_class == 'org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor') @@ -209,6 +222,7 @@ def get_spark_version(service_name, component_name, yarn_version): java64_home = config['ambariLevelParams']['java_home'] java_exec = format("{java64_home}/bin/java") hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False) +java_version = expect("/ambariLevelParams/java_version", int) yarn_heapsize = config['configurations']['yarn-env']['yarn_heapsize'] resourcemanager_heapsize = config['configurations']['yarn-env']['resourcemanager_heapsize'] @@ -292,6 +306,12 @@ def get_spark_version(service_name, component_name, yarn_version): ats_host = set(default("/clusterHostInfo/app_timeline_server_hosts", [])) has_ats = not len(ats_host) == 0 +atsv2_host = set(default("/clusterHostInfo/timeline_reader_hosts", [])) +has_atsv2 = not len(atsv2_host) == 0 + +registry_dns_host = set(default("/clusterHostInfo/yarn_registry_dns_hosts", [])) +has_registry_dns = not len(registry_dns_host) == 0 + # don't using len(nm_hosts) here, because check can take too much time on large clusters number_of_nm = 1 @@ -322,13 +342,19 @@ def get_spark_version(service_name, component_name, yarn_version): rm_security_opts = format('-Dzookeeper.sasl.client=true -Dzookeeper.sasl.client.username={zk_principal_user} -Djava.security.auth.login.config={yarn_jaas_file} -Dzookeeper.sasl.clientconfig=Client') # YARN timeline security options - if has_ats: + if has_ats or has_atsv2: yarn_timelineservice_principal_name = config['configurations']['yarn-site']['yarn.timeline-service.principal'] yarn_timelineservice_principal_name = yarn_timelineservice_principal_name.replace('_HOST', hostname.lower()) yarn_timelineservice_keytab = config['configurations']['yarn-site']['yarn.timeline-service.keytab'] yarn_timelineservice_kinit_cmd = format("{kinit_path_local} -kt {yarn_timelineservice_keytab} {yarn_timelineservice_principal_name};") yarn_ats_jaas_file = os.path.join(hadoop_conf_dir, 'yarn_ats_jaas.conf') + if has_registry_dns: + yarn_registry_dns_principal_name = config['configurations']['yarn-env']['yarn.registry-dns.principal'] + yarn_registry_dns_principal_name = yarn_registry_dns_principal_name.replace('_HOST', hostname.lower()) + yarn_registry_dns_keytab = config['configurations']['yarn-env']['yarn.registry-dns.keytab'] + yarn_registry_dns_jaas_file = os.path.join(hadoop_conf_dir, 'yarn_registry_dns_jaas.conf') + if 'yarn.nodemanager.principal' in config['configurations']['yarn-site']: nodemanager_principal_name = default('/configurations/yarn-site/yarn.nodemanager.principal', None) if nodemanager_principal_name: @@ -421,6 +447,7 @@ def get_spark_version(service_name, component_name, yarn_version): rm_active_port = rm_https_port if yarn_https_on else rm_port rm_ha_enabled = False +rm_ha_id = None rm_ha_ids_list = [] rm_webapp_addresses_list = [yarn_rm_address] rm_ha_ids = default("/configurations/yarn-site/yarn.resourcemanager.ha.rm-ids", None) @@ -436,6 +463,9 @@ def get_spark_version(service_name, component_name, yarn_version): rm_webapp_address_property = format('yarn.resourcemanager.webapp.address.{rm_id}') if not yarn_https_on else format('yarn.resourcemanager.webapp.https.address.{rm_id}') rm_webapp_address = config['configurations']['yarn-site'][rm_webapp_address_property] rm_webapp_addresses_list.append(rm_webapp_address) + rm_host_name = config['configurations']['yarn-site'][format('yarn.resourcemanager.hostname.{rm_id}')] + if rm_host_name == hostname.lower(): + rm_ha_id = rm_id # for curl command in ranger plugin to get db connector jdk_location = config['ambariLevelParams']['jdk_location'] @@ -574,3 +604,223 @@ def get_spark_version(service_name, component_name, yarn_version): # need this to capture cluster name from where ranger yarn plugin is enabled cluster_name = config['clusterName'] + +# ranger yarn plugin end section + +# container-executor properties +min_user_id = config['configurations']['container-executor']['min_user_id'] +docker_module_enabled = str(config['configurations']['container-executor']['docker_module_enabled']).lower() +docker_binary = config['configurations']['container-executor']['docker_binary'] +docker_allowed_capabilities = config['configurations']['yarn-site']['yarn.nodemanager.runtime.linux.docker.capabilities'] +if docker_allowed_capabilities: + docker_allowed_capabilities = ','.join(x.strip() for x in docker_allowed_capabilities.split(',')) +else: + docker_allowed_capabilities = "" +docker_allowed_devices = config['configurations']['container-executor']['docker_allowed_devices'] +docker_allowed_networks = config['configurations']['yarn-site']['yarn.nodemanager.runtime.linux.docker.allowed-container-networks'] +if docker_allowed_networks: + docker_allowed_networks = ','.join(x.strip() for x in docker_allowed_networks.split(',')) +else: + docker_allowed_networks = "" +docker_allowed_ro_mounts = config['configurations']['container-executor']['docker_allowed_ro-mounts'] +docker_allowed_rw_mounts = config['configurations']['container-executor']['docker_allowed_rw-mounts'] +docker_privileged_containers_enabled = str(config['configurations']['container-executor']['docker_privileged-containers_enabled']).lower() +docker_trusted_registries = config['configurations']['container-executor']['docker_trusted_registries'] +docker_allowed_volume_drivers = config['configurations']['container-executor']['docker_allowed_volume-drivers'] + +# ATSv2 integration properties started. +yarn_timelinereader_pid_file = status_params.yarn_timelinereader_pid_file + +yarn_atsv2_hbase_versioned_home = format("{stack_root}/{version}/usr/lib/hbase") +yarn_hbase_bin = format("{yarn_atsv2_hbase_versioned_home}/bin") +yarn_hbase_hdfs_root_dir = config['configurations']['yarn-hbase-site']['hbase.rootdir'] +cluster_zookeeper_quorum_hosts = ",".join(config['clusterHostInfo']['zookeeper_server_hosts']) +if 'zoo.cfg' in config['configurations'] and 'clientPort' in config['configurations']['zoo.cfg']: + cluster_zookeeper_clientPort = config['configurations']['zoo.cfg']['clientPort'] +else: + cluster_zookeeper_clientPort = '2181' + +zookeeper_quorum_hosts = cluster_zookeeper_quorum_hosts +zookeeper_clientPort = cluster_zookeeper_clientPort +yarn_hbase_user = status_params.yarn_hbase_user +hbase_user = config['configurations']['hbase-env']['hbase_user'] +yarn_hbase_user_home = format("/user/{yarn_hbase_user}") +yarn_hbase_user_version_home = format("{yarn_hbase_user_home}/{version}") +yarn_hbase_app_hdfs_path = format("/bigtop/apps/{version}/hbase") +yarn_service_app_hdfs_path = format("/bigtop/apps/{version}/yarn") +if rm_ha_id is not None: + yarn_hbase_app_hdfs_path = format("{yarn_hbase_app_hdfs_path}/{rm_ha_id}") + yarn_service_app_hdfs_path = format("{yarn_service_app_hdfs_path}/{rm_ha_id}") +yarn_service_dep_source_path = format("{stack_root}/{version}/usr/lib/hadoop-yarn/lib/service-dep.tar.gz") +yarn_hbase_user_version_path = format("{yarn_hbase_user}/{version}") +yarn_hbase_user_tmp = format("{tmp_dir}/{yarn_hbase_user_version_path}") +yarn_hbase_log_dir = os.path.join(yarn_log_dir_prefix, "embedded-yarn-ats-hbase") +yarn_hbase_pid_dir_prefix = status_params.yarn_hbase_pid_dir_prefix +yarn_hbase_pid_dir = status_params.yarn_hbase_pid_dir +yarn_hbase_conf_dir = os.path.join(hadoop_conf_dir, "embedded-yarn-ats-hbase") +yarn_hbase_env_sh_template = config['configurations']['yarn-hbase-env']['content'] +yarn_hbase_java_io_tmpdir = default("/configurations/yarn-hbase-env/hbase_java_io_tmpdir", "/tmp") +yarn_hbase_tmp_dir = config['configurations']['yarn-hbase-site']['hbase.tmp.dir'] +yarn_hbase_local_dir = config['configurations']['yarn-hbase-site']['hbase.local.dir'] +yarn_hbase_master_info_port = config['configurations']['yarn-hbase-site']['hbase.master.info.port'] +yarn_hbase_regionserver_info_port = config['configurations']['yarn-hbase-site']['hbase.regionserver.info.port'] + +if (('yarn-hbase-log4j' in config['configurations']) and ('content' in config['configurations']['yarn-hbase-log4j'])): + yarn_hbase_log4j_props = config['configurations']['yarn-hbase-log4j']['content'] +else: + yarn_hbase_log4j_props = None + +timeline_collector = "" +yarn_timeline_service_version = config['configurations']['yarn-site']['yarn.timeline-service.version'] +yarn_timeline_service_versions = config['configurations']['yarn-site']['yarn.timeline-service.versions'] +yarn_timeline_service_enabled = config['configurations']['yarn-site']['yarn.timeline-service.enabled'] + +if yarn_timeline_service_enabled: + if is_empty(yarn_timeline_service_versions): + if yarn_timeline_service_version == '2.0' or yarn_timeline_service_version == '2': + timeline_collector = "timeline_collector" + else: + ts_version_list = yarn_timeline_service_versions.split(',') + for ts_version in ts_version_list: + if '2.0' in ts_version or ts_version == '2': + timeline_collector = "timeline_collector" + break + +coprocessor_jar_name = "hadoop-yarn-server-timelineservice-hbase-coprocessor.jar" +yarn_timeline_jar_location = format("file://{stack_root}/{version}/usr/lib/hadoop-yarn/timelineservice/{coprocessor_jar_name}") +yarn_user_hbase_permissions = "RWXCA" + +yarn_hbase_kinit_cmd = "" +if security_enabled and has_atsv2: + yarn_hbase_jaas_file = os.path.join(yarn_hbase_conf_dir, 'yarn_hbase_jaas.conf') + yarn_hbase_master_jaas_file = os.path.join(yarn_hbase_conf_dir, 'yarn_hbase_master_jaas.conf') + yarn_hbase_regionserver_jaas_file = os.path.join(yarn_hbase_conf_dir, 'yarn_hbase_regionserver_jaas.conf') + + yarn_hbase_master_principal_name = config['configurations']['yarn-hbase-site']['hbase.master.kerberos.principal'] + yarn_hbase_master_principal_name = yarn_hbase_master_principal_name.replace('_HOST', hostname.lower()) + yarn_hbase_master_keytab = config['configurations']['yarn-hbase-site']['hbase.master.keytab.file'] + + yarn_hbase_regionserver_principal_name = config['configurations']['yarn-hbase-site']['hbase.regionserver.kerberos.principal'] + yarn_hbase_regionserver_principal_name = yarn_hbase_regionserver_principal_name.replace('_HOST', hostname.lower()) + yarn_hbase_regionserver_keytab = config['configurations']['yarn-hbase-site']['hbase.regionserver.keytab.file'] + + # User master principal name as AM principal in system service. Don't replace _HOST. + yarn_ats_hbase_principal_name = config['configurations']['yarn-hbase-site']['hbase.master.kerberos.principal'] + yarn_ats_hbase_keytab = config['configurations']['yarn-hbase-site']['hbase.master.keytab.file'] + yarn_ats_principal_name = config['configurations']['yarn-env']['yarn_ats_principal_name'] + yarn_ats_user_keytab = config['configurations']['yarn-env']['yarn_ats_user_keytab'] + yarn_hbase_kinit_cmd = format("{kinit_path_local} -kt {yarn_ats_user_keytab} {yarn_ats_principal_name};") + + +hbase_within_cluster = config['configurations']['yarn-hbase-env']['hbase_within_cluster'] +is_hbase_installed = False +master_configs = config['clusterHostInfo'] + +if hbase_within_cluster: + if 'hbase_master_hosts' in master_configs and 'hbase-site' in config['configurations']: + is_hbase_installed = True + zookeeper_znode_parent = config['configurations']['hbase-site']['zookeeper.znode.parent'] + else: + zookeeper_znode_parent = "/hbase-unsecure" + hbase_site_conf = config['configurations']['hbase-site'] + hbase_site_attributes = config['configurationAttributes']['hbase-site'] + yarn_hbase_conf_dir = "/etc/hbase/conf" +else: + zookeeper_znode_parent = "/atsv2-hbase-unsecure" + hbase_site_conf = config['configurations']['yarn-hbase-site'] + hbase_site_attributes = config['configurationAttributes']['yarn-hbase-site'] + +yarn_hbase_grant_premissions_file = format("{yarn_hbase_conf_dir}/hbase_grant_permissions.sh") +yarn_hbase_package_preparation_file = format("{tmp_dir}/hbase_package_preparation.sh") +is_hbase_system_service_launch = config['configurations']['yarn-hbase-env']['is_hbase_system_service_launch'] +use_external_hbase = config['configurations']['yarn-hbase-env']['use_external_hbase'] + +hbase_cmd = format("{yarn_hbase_bin}/hbase --config {yarn_hbase_conf_dir}") +class_name = format("org.apache.hadoop.yarn.server.timelineservice.storage.TimelineSchemaCreator -Dhbase.client.retries.number=35 -create -s") +yarn_hbase_table_create_cmd = format("export HBASE_CLASSPATH_PREFIX={stack_root}/{version}/usr/lib/hadoop-yarn/timelineservice/*;{yarn_hbase_kinit_cmd} {hbase_cmd} {class_name}") +yarn_hbase_table_grant_premission_cmd = format("{yarn_hbase_kinit_cmd} {hbase_cmd} shell {yarn_hbase_grant_premissions_file}") + +# System service configuration as part of ATSv2. +yarn_system_service_dir = config['configurations']['yarn-site']['yarn.service.system-service.dir'] +yarn_system_service_launch_mode = config['configurations']['yarn-hbase-env']['yarn_hbase_system_service_launch_mode'] +yarn_hbase_service_queue_name = config['configurations']['yarn-hbase-env']['yarn_hbase_system_service_queue_name'] + +yarn_hbase_master_cpu = config['configurations']['yarn-hbase-env']['yarn_hbase_master_cpu'] +yarn_hbase_master_memory = expect("/configurations/yarn-hbase-env/yarn_hbase_master_memory", int) +yarn_hbase_master_containers = config['configurations']['yarn-hbase-env']['yarn_hbase_master_containers'] +yarn_hbase_regionserver_cpu = config['configurations']['yarn-hbase-env']['yarn_hbase_regionserver_cpu'] +yarn_hbase_regionserver_memory = expect("/configurations/yarn-hbase-env/yarn_hbase_regionserver_memory", int) +yarn_hbase_regionserver_containers = config['configurations']['yarn-hbase-env']['yarn_hbase_regionserver_containers'] +yarn_hbase_client_cpu = config['configurations']['yarn-hbase-env']['yarn_hbase_client_cpu'] +yarn_hbase_client_memory = expect("/configurations/yarn-hbase-env/yarn_hbase_client_memory", int) +yarn_hbase_client_containers = config['configurations']['yarn-hbase-env']['yarn_hbase_client_containers'] + +yarn_hbase_heap_memory_factor = expect("/configurations/yarn-hbase-env/yarn_hbase_heap_memory_factor", float) +yarn_hbase_master_heapsize = ensure_unit_for_memory(calc_heap_memory(yarn_hbase_master_memory, yarn_hbase_heap_memory_factor)) +yarn_hbase_regionserver_heapsize = ensure_unit_for_memory(calc_heap_memory(yarn_hbase_regionserver_memory, yarn_hbase_heap_memory_factor)) + +yarn_hbase_log_level = str(config['configurations']['yarn-hbase-env']['yarn_hbase_log_level']).upper() +# ATSv2 integration properties ended + +gpu_module_enabled = str(config['configurations']['container-executor']['gpu_module_enabled']).lower() +cgroup_root = config['configurations']['container-executor']['cgroup_root'] +yarn_hierarchy = config['configurations']['container-executor']['yarn_hierarchy'] + +# registry dns service +registry_dns_needs_privileged_access = status_params.registry_dns_needs_privileged_access + +mount_table_content = None +if 'viewfs-mount-table' in config['configurations']: + xml_inclusion_file_name = 'viewfs-mount-table.xml' + mount_table = config['configurations']['viewfs-mount-table'] + + if 'content' in mount_table and mount_table['content'].strip(): + mount_table_content = mount_table['content'] + +hbase_log_maxfilesize = default('configurations/yarn-hbase-log4j/hbase_log_maxfilesize',256) +hbase_log_maxbackupindex = default('configurations/yarn-hbase-log4j/hbase_log_maxbackupindex',20) +hbase_security_log_maxfilesize = default('configurations/yarn-hbase-log4j/hbase_security_log_maxfilesize',256) +hbase_security_log_maxbackupindex = default('configurations/yarn-hbase-log4j/hbase_security_log_maxbackupindex',20) + +rm_cross_origin_enabled = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.cross-origin.enabled'] + +cross_origins = '*' +if rm_cross_origin_enabled: + host_suffix = rm_host.rsplit('.', 2)[1:] + if len(host_suffix) == 2 : + cross_origins = 'regex:.*[.]' + '[.]'.join(host_suffix) + "(:\d*)?" + +ams_collector_hosts = ",".join(default("/clusterHostInfo/metrics_collector_hosts", [])) +has_metric_collector = not len(ams_collector_hosts) == 0 +if has_metric_collector: + if 'cluster-env' in config['configurations'] and \ + 'metrics_collector_vip_port' in config['configurations']['cluster-env']: + metric_collector_port = config['configurations']['cluster-env']['metrics_collector_vip_port'] + else: + metric_collector_web_address = default("/configurations/ams-site/timeline.metrics.service.webapp.address", "0.0.0.0:6188") + if metric_collector_web_address.find(':') != -1: + metric_collector_port = metric_collector_web_address.split(':')[1] + else: + metric_collector_port = '6188' + if default("/configurations/ams-site/timeline.metrics.service.http.policy", "HTTP_ONLY") == "HTTPS_ONLY": + metric_collector_protocol = 'https' + else: + metric_collector_protocol = 'http' + metric_truststore_path= default("/configurations/ams-ssl-client/ssl.client.truststore.location", "") + metric_truststore_type= default("/configurations/ams-ssl-client/ssl.client.truststore.type", "") + metric_truststore_password= default("/configurations/ams-ssl-client/ssl.client.truststore.password", "") + host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True) + host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888) + + pass +metrics_report_interval = default("/configurations/ams-site/timeline.metrics.sink.report.interval", 60) +metrics_collection_period = default("/configurations/ams-site/timeline.metrics.sink.collection.period", 10) + +host_in_memory_aggregation = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation", True) +host_in_memory_aggregation_port = default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.port", 61888) +is_aggregation_https_enabled = False +if default("/configurations/ams-site/timeline.metrics.host.inmemory.aggregation.http.policy", "HTTP_ONLY") == "HTTPS_ONLY": + host_in_memory_aggregation_protocol = 'https' + is_aggregation_https_enabled = True +else: + host_in_memory_aggregation_protocol = 'http' diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/service.py index 276bad38f61..5e2e63e71f2 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/service.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/service.py @@ -22,12 +22,14 @@ from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl from ambari_commons import OSConst +from resource_management.core import shell from resource_management.core.shell import as_user, as_sudo +from resource_management.core.logger import Logger from resource_management.libraries.functions.show_logs import show_logs from resource_management.libraries.functions.format import format from resource_management.core.resources.system import Execute, File from resource_management.core.signal_utils import TerminateStrategy -from resource_management.core.logger import Logger +import subprocess @OsFamilyFuncImpl(os_family=OSConst.WINSRV_FAMILY) def service(componentName, action='start', serviceName='yarn'): @@ -43,34 +45,58 @@ def service(componentName, action='start', serviceName='yarn'): @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT) def service(componentName, action='start', serviceName='yarn'): import params + import status_params - if serviceName == 'mapreduce' and componentName == 'historyserver': - if not params.hdfs_tmp_dir or params.hdfs_tmp_dir == None or params.hdfs_tmp_dir.lower() == 'null': - Logger.error("WARNING: HDFS tmp dir property (hdfs_tmp_dir) is empty or invalid. Ambari will change permissions for the folder on regular basis.") + hadoop_env_exports = { + 'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir + } + if serviceName == 'mapreduce' and componentName == 'historyserver': delete_pid_file = True daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh") pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-{componentName}.pid") usr = params.mapred_user log_dir = params.mapred_log_dir + cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}") else: # !!! yarn-daemon.sh deletes the PID for us; if we remove it the script # may not work correctly when stopping the service delete_pid_file = False daemon = format("{yarn_bin}/yarn-daemon.sh") - pid_file = format("{yarn_pid_dir}/hadoop-{yarn_user}-{componentName}.pid") - usr = params.yarn_user + if componentName == 'registrydns' and status_params.registry_dns_needs_privileged_access: + pid_file = status_params.yarn_registry_dns_priv_pid_file + usr = status_params.root_user + else: + pid_file = format("{yarn_pid_dir}/hadoop-{yarn_user}-{componentName}.pid") + usr = params.yarn_user + log_dir = params.yarn_log_dir + cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}") - cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {hadoop_conf_dir}") + check_process = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file]) - if action == 'start': + if usr == 'root' and componentName == 'registrydns': + # these are needed for unknown reasons + hadoop_env_exports['HADOOP_PID_DIR'] = params.yarn_pid_dir + hadoop_env_exports['HADOOP_SECURE_PID_DIR'] = params.yarn_pid_dir + hadoop_env_exports['HADOOP_LOG_DIR'] = params.yarn_log_dir + hadoop_env_exports['HADOOP_SECURE_LOG_DIR'] = params.yarn_log_dir - daemon_cmd = format("{ulimit_cmd} {cmd} start {componentName}") - check_process = as_sudo(["test", "-f", pid_file]) + " && " + as_sudo(["pgrep", "-F", pid_file]) + cmd = [daemon, "--config", params.hadoop_conf_dir, action, componentName] + daemon_cmd = as_sudo(cmd) + else: + if action == 'start': + cmd = format("{ulimit_cmd} {cmd} start {componentName}") + else: + cmd = format("{cmd} stop {componentName}") + daemon_cmd = as_user(cmd, usr) - # Remove the pid file if its corresponding process is not running. - File(pid_file, action = "delete", not_if = check_process) + if action == 'start': + if componentName == 'registrydns': + checkAndStopRegistyDNS() + else: + # Remove the pid file if its corresponding process is not running. + File(pid_file, action = "delete", not_if = check_process) if componentName == 'timelineserver' and serviceName == 'yarn': File(params.ats_leveldb_lock_file, @@ -82,8 +108,8 @@ def service(componentName, action='start', serviceName='yarn'): try: # Attempt to start the process. Internally, this is skipped if the process is already running. - Execute(daemon_cmd, user = usr, not_if = check_process) - + Execute(daemon_cmd, not_if=check_process, environment=hadoop_env_exports) + # Ensure that the process with the expected PID exists. Execute(check_process, not_if = check_process, @@ -95,22 +121,22 @@ def service(componentName, action='start', serviceName='yarn'): raise elif action == 'stop': - daemon_cmd = format("{cmd} stop {componentName}") - try: - Execute(daemon_cmd, user=usr) - except: - show_logs(log_dir, usr) - raise - - # !!! yarn-daemon doesn't need us to delete PIDs - if delete_pid_file is True: - File(pid_file, action="delete") - + if componentName == 'registrydns': + checkAndStopRegistyDNS() + else: + try: + Execute(daemon_cmd, only_if=check_process, environment=hadoop_env_exports) + except: + show_logs(log_dir, usr) + raise + + # !!! yarn-daemon doesn't need us to delete PIDs + if delete_pid_file is True: + File(pid_file, action="delete") elif action == 'refreshQueues': rm_kinit_cmd = params.rm_kinit_cmd refresh_cmd = format("{rm_kinit_cmd} export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {yarn_container_bin}/yarn rmadmin -refreshQueues") - Execute(refresh_cmd, user = usr, timeout = 20, # when Yarn is not started command hangs forever and should be killed @@ -118,3 +144,75 @@ def service(componentName, action='start', serviceName='yarn'): try_sleep = 5, timeout_kill_strategy = TerminateStrategy.KILL_PROCESS_GROUP, # the process cannot be simply killed by 'kill -15', so kill pg group instread. ) + +def checkAndStopRegistyDNS(): + import params + import status_params + + componentName = 'registrydns' + action = 'stop' + daemon = format("{yarn_bin}/yarn-daemon.sh") + hadoop_env_exports = { + 'HADOOP_LIBEXEC_DIR': params.hadoop_libexec_dir + } + + # When registry dns is switched from non-privileged to privileged mode or the other way, + # then the previous instance of registry dns has a different pid/user. + # Checking if either of the processes are running and shutting them down if they are. + + # privileged mode + dns_pid_file = status_params.yarn_registry_dns_priv_pid_file + dns_user = status_params.root_user + Logger.info("checking any existing dns pid file = '" + dns_pid_file + "' dns user '" + dns_user + "'") + try: + # these are needed for unknown reasons + env_exports = { + 'HADOOP_PID_DIR': params.yarn_pid_dir, + 'HADOOP_SECURE_PID_DIR': params.yarn_pid_dir, + 'HADOOP_LOG_DIR': params.yarn_log_dir, + 'HADOOP_SECURE_LOG_DIR': params.yarn_log_dir + } + env_exports.update(hadoop_env_exports) + cmd = [daemon, "--config", params.hadoop_conf_dir, action, componentName] + daemon_cmd = as_sudo(cmd) + process_id_exists_command = as_sudo(["test", "-f", dns_pid_file]) + " && " + as_sudo(["pgrep", "-F", dns_pid_file]) + Execute(daemon_cmd, only_if=process_id_exists_command, environment=env_exports) + except: + # When the registry dns port is modified but registry dns is not started + # immediately, then the configs in yarn-env.sh & yarn-site.xml related + # to registry dns may have already changed. This introduces a discrepancy + # between the actual process that is running and the configs. + # For example, when port is changed from 5300 to 53, + # then dns port = 53 in yarn-site and YARN_REGISTRYDNS_SECURE_* envs in yarn-env.sh + # are saved. So, while trying to shutdown the stray non-privileged registry dns process + # after sometime, yarn daemon from the configs thinks that it needs privileged + # access and throws an exception. In such cases, we try to kill the stray process. + pass + process_id_does_not_exist_command = format("! ( {process_id_exists_command} )") + code, out = shell.call(process_id_does_not_exist_command, + env=env_exports, + tries=5, + try_sleep=5) + if code != 0: + code, out, err = shell.checked_call(("pgrep", "-f", dns_pid_file), sudo=True, env=env_exports, + stderr=subprocess.PIPE) + Logger.info("PID to kill was retrieved: '" + out + "'.") + for pid in out.splitlines(): + try: + Execute(("kill", "-9", pid), sudo=True) + except: + # ignoring failures + Logger.warning("failed to kill pid '" + pid + "'.") + pass + File(dns_pid_file, action="delete") + + # non-privileged mode + dns_pid_file = status_params.yarn_registry_dns_pid_file + dns_user = params.yarn_user + Logger.info("checking any existing dns pid file = '" + dns_pid_file + "' dns user '" + dns_user + "'") + try: + cmd = format("{daemon} --config {hadoop_conf_dir} {action} {componentName}") + daemon_cmd = as_user(cmd, dns_user) + Execute(daemon_cmd, environment=hadoop_env_exports) + except: + pass diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/status_params.py index aaace6cd864..e0528006b94 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/status_params.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/status_params.py @@ -17,18 +17,27 @@ limitations under the License. """ +import os +import pwd + +from ambari_commons import OSCheck + from resource_management.libraries.script.script import Script from resource_management.libraries import functions from resource_management.libraries.functions import conf_select from resource_management.libraries.functions import stack_select from resource_management.libraries.functions import format +from resource_management.libraries.functions import get_kinit_path +from resource_management.libraries.functions import stack_select from resource_management.libraries.functions.default import default config = Script.get_config() tmp_dir = Script.get_tmp_dir() +root_user = 'root' mapred_user = config['configurations']['mapred-env']['mapred_user'] yarn_user = config['configurations']['yarn-env']['yarn_user'] +yarn_ats_user = config['configurations']['yarn-env']['yarn_ats_user'] yarn_pid_dir_prefix = config['configurations']['yarn-env']['yarn_pid_dir_prefix'] mapred_pid_dir_prefix = config['configurations']['mapred-env']['mapred_pid_dir_prefix'] yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}") @@ -38,8 +47,23 @@ nodemanager_pid_file = format("{yarn_pid_dir}/hadoop-{yarn_user}-nodemanager.pid") yarn_historyserver_pid_file_old = format("{yarn_pid_dir}/hadoop-{yarn_user}-historyserver.pid") yarn_historyserver_pid_file = format("{yarn_pid_dir}/hadoop-{yarn_user}-timelineserver.pid") # *-historyserver.pid is deprecated + +# registry dns service +registry_dns_bind_port = int(config['configurations']['yarn-env']['registry.dns.bind-port']) +registry_dns_needs_privileged_access = True if registry_dns_bind_port < 1024 else False + +yarn_registry_dns_pid_file = format("{yarn_pid_dir_prefix}/{yarn_user}/hadoop-{yarn_user}-registrydns.pid") +yarn_registry_dns_priv_pid_file = format("{yarn_pid_dir_prefix}/{root_user}/hadoop-{yarn_user}-{root_user}-registrydns.pid") + +if registry_dns_needs_privileged_access: + yarn_registry_dns_in_use_pid_file = yarn_registry_dns_priv_pid_file +else: + yarn_registry_dns_in_use_pid_file = yarn_registry_dns_pid_file + mapred_historyserver_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-historyserver.pid") +yarn_timelinereader_pid_file = format("{yarn_pid_dir}/hadoop-{yarn_user}-timelinereader.pid") + hadoop_home = stack_select.get_hadoop_dir("home") hadoop_conf_dir = conf_select.get_hadoop_conf_dir() @@ -48,3 +72,8 @@ security_enabled = config['configurations']['cluster-env']['security_enabled'] stack_name = default("/clusterLevelParams/stack_name", None) + +# ATSv2 backend properties +yarn_hbase_user = format("{yarn_ats_user}") #Use yarn_ats_user. +yarn_hbase_pid_dir_prefix = config['configurations']['yarn-hbase-env']['yarn_hbase_pid_dir_prefix'] +yarn_hbase_pid_dir = format("{yarn_hbase_pid_dir_prefix}/{yarn_hbase_user}") \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/timelinereader.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/timelinereader.py new file mode 100755 index 00000000000..0ee3e2ae080 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/timelinereader.py @@ -0,0 +1,119 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Ambari Agent + +""" +import os + +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions import stack_select +from resource_management.libraries.functions.constants import StackFeature +from resource_management.libraries.functions.stack_features import check_stack_feature +from resource_management.libraries.functions import check_process_status +from resource_management.libraries.functions.format import format +from resource_management.core.logger import Logger + +from yarn import yarn +from service import service +from ambari_commons import OSConst +from ambari_commons.os_family_impl import OsFamilyImpl +from hbase_service import hbase, configure_hbase +from resource_management.libraries.functions.copy_tarball import copy_to_hdfs + +class ApplicationTimelineReader(Script): + def install(self, env): + self.install_packages(env) + + def start(self, env, upgrade_type=None): + import params + env.set_params(params) + self.configure(env) # FOR SECURITY + + if params.stack_version_formatted_major and check_stack_feature(StackFeature.COPY_TARBALL_TO_HDFS, params.stack_version_formatted_major): + # MC Hammer said, "Can't touch this" + resource_created = copy_to_hdfs( + "yarn", + params.user_group, + params.hdfs_user, + skip=params.sysprep_skip_copy_tarballs_hdfs) + if resource_created: + params.HdfsResource(None, action="execute") + + if not params.use_external_hbase and not params.is_hbase_system_service_launch: + hbase(action='start') + service('timelinereader', action='start') + + def stop(self, env, upgrade_type=None): + import params + env.set_params(params) + if not params.use_external_hbase and not params.is_hbase_system_service_launch: + hbase(action='stop') + service('timelinereader', action='stop') + + def configure(self, env, action = None): + import params + env.set_params(params) + yarn(name='apptimelinereader') + if not params.use_external_hbase and not params.is_hbase_system_service_launch: + configure_hbase(env) + +@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) +class ApplicationTimelineReaderWindows(ApplicationTimelineReader): + def status(self, env): + service('timelinereader', action='status') + + +@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT) +class ApplicationTimelineReaderDefault(ApplicationTimelineReader): + def pre_upgrade_restart(self, env, upgrade_type=None): + Logger.info("Executing Stack Upgrade pre-restart") + import params + env.set_params(params) + + if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): + stack_select.select_packages(params.version) + # MC Hammer said, "Can't touch this" + resource_created = copy_to_hdfs("yarn", params.user_group, params.hdfs_user, skip=params.sysprep_skip_copy_tarballs_hdfs) + if resource_created: + params.HdfsResource(None, action="execute") + + def status(self, env): + import status_params + env.set_params(status_params) + for pid_file in self.get_pid_files(): + check_process_status(pid_file) + + def get_log_folder(self): + import params + return params.yarn_log_dir + + def get_user(self): + import params + return params.yarn_user + + def get_pid_files(self): + import params + pid_files = [] + pid_files.append(format("{yarn_timelinereader_pid_file}")) + if not params.use_external_hbase and not params.is_hbase_system_service_launch: + pid_files.append(format("{yarn_hbase_pid_dir}/hbase-{yarn_hbase_user}-master.pid")) + pid_files.append(format("{yarn_hbase_pid_dir}/hbase-{yarn_hbase_user}-regionserver.pid")) + return pid_files + +if __name__ == "__main__": + ApplicationTimelineReader().execute() diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/yarn.py index 2c6ee037ecf..db0d75003a9 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/yarn.py +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/yarn.py @@ -29,15 +29,17 @@ from resource_management.libraries.functions.format import format from resource_management.libraries.functions.is_empty import is_empty from resource_management.libraries.functions.lzo_utils import install_lzo_if_needed -from resource_management.core.resources.system import Directory +from resource_management.core.resources.system import Directory,Execute from resource_management.core.resources.system import File from resource_management.libraries.resources.xml_config import XmlConfig from resource_management.core.source import InlineTemplate, Template from resource_management.core.logger import Logger from ambari_commons.os_family_impl import OsFamilyFuncImpl, OsFamilyImpl from ambari_commons import OSConst +from resource_management.libraries.script.config_dictionary import UnknownConfiguration from resource_management.libraries.functions.mounted_dirs_helper import handle_mounted_dirs +from hbase_service import create_hbase_package, copy_hbase_package_to_hdfs, createTables @OsFamilyFuncImpl(os_family=OsFamilyImpl.DEFAULT) def yarn(name=None, config_dir=None): @@ -80,6 +82,12 @@ def yarn(name=None, config_dir=None): ignore_failures=True, cd_access='a', ) + Directory(params.yarn_hbase_conf_dir, + owner = params.yarn_hbase_user, + group = params.user_group, + create_parents = True, + cd_access='a', + ) # Some of these function calls depend on the directories above being created first. if name == 'resourcemanager': @@ -90,6 +98,10 @@ def yarn(name=None, config_dir=None): setup_ats() elif name == 'historyserver': setup_historyserver() + elif name == 'apptimelinereader': + if not params.use_external_hbase and not params.is_hbase_system_service_launch: + setup_atsv2_hbase_directories() + setup_atsv2_hbase_files() XmlConfig("core-site.xml", conf_dir=config_dir, @@ -121,9 +133,13 @@ def yarn(name=None, config_dir=None): mode=0o644 ) + + configs = {} + configs.update(params.config['configurations']['yarn-site']) + configs["hadoop.registry.dns.bind-port"] = params.config['configurations']['yarn-env']['registry.dns.bind-port'] XmlConfig("yarn-site.xml", conf_dir=config_dir, - configurations=params.config['configurations']['yarn-site'], + configurations=configs, configuration_attributes=params.config['configurationAttributes']['yarn-site'], owner=params.yarn_user, group=params.user_group, @@ -138,6 +154,25 @@ def yarn(name=None, config_dir=None): group=params.user_group, mode=0o644 ) + + if not isinstance(params.hbase_site_conf, UnknownConfiguration): + XmlConfig("hbase-site.xml", + conf_dir=params.yarn_hbase_conf_dir, + configurations=params.hbase_site_conf, + configuration_attributes=params.hbase_site_attributes, + owner=params.yarn_hbase_user, + group=params.user_group, + mode=0o644 + ) + + XmlConfig("resource-types.xml", + conf_dir=config_dir, + configurations=params.config['configurations']['resource-types'], + configuration_attributes=params.config['configurationAttributes']['resource-types'], + owner=params.yarn_user, + group=params.user_group, + mode=0o644 + ) File(format("{limits_conf_dir}/yarn.conf"), mode=0o644, content=Template('yarn.conf.j2') @@ -201,6 +236,14 @@ def yarn(name=None, config_dir=None): group=params.user_group, content=Template("yarn_ats_jaas.conf.j2") ) + + if params.has_registry_dns: + File(os.path.join(config_dir, 'yarn_registry_dns_jaas.conf'), + owner=params.yarn_user, + group=params.user_group, + mode=0o644, + content=Template("yarn_registry_dns_jaas.conf.j2") + ) File(os.path.join(config_dir, 'yarn_nm_jaas.conf'), owner=params.yarn_user, group=params.user_group, @@ -287,6 +330,8 @@ def yarn(name=None, config_dir=None): group=params.user_group ) + setup_atsv2_backend(name, config_dir) + def setup_historyserver(): import params @@ -530,6 +575,12 @@ def yarn(name = None): owner=params.yarn_user, mode='f' ) + XmlConfig("yarn-hbase-site.xml", + conf_dir=params.config_dir, + configurations=params.config['configurations']['yarn-hbase-site'], + owner=params.yarn_user, + mode='f' + ) if name in params.service_map: service_name = params.service_map[name] @@ -538,3 +589,193 @@ def yarn(name = None): action="change_user", username = params.yarn_user, password = Script.get_password(params.yarn_user)) + +def setup_atsv2_backend(name=None, config_dir=None): + import params + Logger.info(f"setup_atsv2_backend name:{name} {params.use_external_hbase} {params.hbase_within_cluster} {params.is_hbase_system_service_launch}") + if name == "apptimelinereader" and params.use_external_hbase and params.hbase_within_cluster: + createTables() + + if not params.use_external_hbase and params.is_hbase_system_service_launch: + if name == 'resourcemanager': + setup_system_services(config_dir) + elif name == 'nodemanager': + setup_atsv2_hbase_files() + +def setup_atsv2_hbase_files(): + import params + if 'yarn-hbase-policy' in params.config['configurations']: + XmlConfig( "hbase-policy.xml", + conf_dir = params.yarn_hbase_conf_dir, + configurations = params.config['configurations']['yarn-hbase-policy'], + configuration_attributes=params.config['configurationAttributes']['yarn-hbase-policy'], + owner = params.yarn_hbase_user, + group = params.user_group, + mode=0o644 + ) + + File(os.path.join(params.yarn_hbase_conf_dir, "hbase-env.sh"), + owner=params.yarn_hbase_user, + group=params.user_group, + mode=0o644, + content=InlineTemplate(params.yarn_hbase_env_sh_template) + ) + + File( format("{yarn_hbase_grant_premissions_file}"), + owner = params.yarn_hbase_user, + group = params.user_group, + mode = 0o644, + content = Template('yarn_hbase_grant_permissions.j2') + ) + + if (params.yarn_hbase_log4j_props != None): + File(format("{yarn_hbase_conf_dir}/log4j.properties"), + mode=0o644, + group=params.user_group, + owner=params.yarn_hbase_user, + content=InlineTemplate(params.yarn_hbase_log4j_props) + ) + elif (os.path.exists(format("{yarn_hbase_conf_dir}/log4j.properties"))): + File(format("{yarn_hbase_conf_dir}/log4j.properties"), + mode=0o644, + group=params.user_group, + owner=params.yarn_hbase_user + ) + if params.security_enabled: + File(os.path.join(params.yarn_hbase_conf_dir, 'yarn_hbase_master_jaas.conf'), + owner=params.yarn_hbase_user, + group=params.user_group, + content=Template("yarn_hbase_master_jaas.conf.j2") + ) + File(os.path.join(params.yarn_hbase_conf_dir, 'yarn_hbase_regionserver_jaas.conf'), + owner=params.yarn_hbase_user, + group=params.user_group, + content=Template("yarn_hbase_regionserver_jaas.conf.j2") + ) + # Metrics properties + if params.has_metric_collector: + File(os.path.join(params.yarn_hbase_conf_dir, 'hadoop-metrics2-hbase.properties'), + owner=params.yarn_hbase_user, + group=params.user_group, + content=Template("hadoop-metrics2-hbase.properties.j2") + ) + +def setup_atsv2_hbase_directories(): + import params + Directory([params.yarn_hbase_pid_dir_prefix, params.yarn_hbase_pid_dir, params.yarn_hbase_log_dir], + owner=params.yarn_hbase_user, + group=params.user_group, + create_parents=True, + cd_access='a', + ) + + parent_dir = os.path.dirname(params.yarn_hbase_tmp_dir) + # In case if we have several placeholders in path + while ("${" in parent_dir): + parent_dir = os.path.dirname(parent_dir) + if parent_dir != os.path.abspath(os.sep) : + Directory (parent_dir, + create_parents = True, + cd_access="a", + ) + Execute(("chmod", "1777", parent_dir), sudo=True) + +def setup_system_services(config_dir=None): + import params + setup_atsv2_hbase_files() + if params.security_enabled: + File(os.path.join(params.yarn_hbase_conf_dir, 'hbase.yarnfile'), + owner=params.yarn_hbase_user, + group=params.user_group, + content=Template("yarn_hbase_secure.yarnfile.j2") + ) + else: + File(os.path.join(params.yarn_hbase_conf_dir, 'hbase.yarnfile'), + owner=params.yarn_hbase_user, + group=params.user_group, + content=Template("yarn_hbase_unsecure.yarnfile.j2") + ) + + + user_dir = format("{yarn_system_service_dir}/{yarn_system_service_launch_mode}/{yarn_hbase_user}") + params.HdfsResource(user_dir, + type="directory", + action="create_on_execute", + owner=params.yarn_user, + group=params.user_group + ) + params.HdfsResource(format("{user_dir}/hbase.yarnfile"), + type="file", + action="create_on_execute", + source=format("{yarn_hbase_conf_dir}/hbase.yarnfile"), + owner=params.yarn_user, + group=params.user_group + ) + params.HdfsResource(format("{yarn_hbase_user_home}"), + type="directory", + action="create_on_execute", + owner=params.yarn_hbase_user, + group=params.user_group, + mode=0o770, + ) + params.HdfsResource(format("{yarn_hbase_user_version_home}"), + type="directory", + action="create_on_execute", + owner=params.yarn_hbase_user, + group=params.user_group, + mode=0o770, + ) + params.HdfsResource(format("{yarn_hbase_user_version_home}/core-site.xml"), + type="file", + action="create_on_execute", + source=format("{config_dir}/core-site.xml"), + owner=params.yarn_hbase_user, + group=params.user_group + ) + params.HdfsResource(format("{yarn_hbase_user_version_home}/hbase-site.xml"), + type="file", + action="create_on_execute", + source=format("{yarn_hbase_conf_dir}/hbase-site.xml"), + owner=params.yarn_hbase_user, + group=params.user_group + ) + params.HdfsResource(format("{yarn_hbase_user_version_home}/hbase-policy.xml"), + type="file", + action="create_on_execute", + source=format("{yarn_hbase_conf_dir}/hbase-policy.xml"), + owner=params.yarn_hbase_user, + group=params.user_group + ) + params.HdfsResource(format("{yarn_hbase_user_version_home}/log4j.properties"), + type="file", + action="create_on_execute", + source=format("{yarn_hbase_conf_dir}/log4j.properties"), + owner=params.yarn_hbase_user, + group=params.user_group + ) + if params.has_metric_collector: + params.HdfsResource(format("{yarn_hbase_user_version_home}/hadoop-metrics2-hbase.properties"), + type="file", + action="create_on_execute", + source=format("{yarn_hbase_conf_dir}/hadoop-metrics2-hbase.properties"), + owner=params.yarn_hbase_user, + group=params.user_group + ) + params.HdfsResource(params.yarn_hbase_hdfs_root_dir, + type="directory", + action="create_on_execute", + owner=params.yarn_hbase_user + ) + # copy service-dep.tar.gz into hdfs + params.HdfsResource(format("{yarn_service_app_hdfs_path}"), + type="directory", + action="create_on_execute", + owner=params.hdfs_user, + group=params.hdfs_user, + mode=0o555, + ) + + params.HdfsResource(None, action="execute") + + create_hbase_package() + copy_hbase_package_to_hdfs() \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/yarn_registry_dns.py b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/yarn_registry_dns.py new file mode 100755 index 00000000000..9bd40252eee --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/scripts/yarn_registry_dns.py @@ -0,0 +1,93 @@ +""" +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Ambari Agent + +""" + +import nodemanager_upgrade + +from resource_management.libraries.script.script import Script +from resource_management.libraries.functions import stack_select +from resource_management.libraries.functions.constants import StackFeature +from resource_management.libraries.functions.stack_features import check_stack_feature +from resource_management.libraries.functions.check_process_status import check_process_status +from resource_management.libraries.functions.format import format +from resource_management.libraries.functions.security_commons import build_expectations, \ + cached_kinit_executor, get_params_from_filesystem, validate_security_config_properties, \ + FILE_TYPE_XML +from resource_management.core.logger import Logger +from yarn import yarn +from service import service +from ambari_commons import OSConst +from ambari_commons.os_family_impl import OsFamilyImpl + + +class RegistryDNS(Script): + def install(self, env): + self.install_packages(env) + + def stop(self, env, upgrade_type=None): + import params + env.set_params(params) + service('registrydns',action='stop') + + def start(self, env, upgrade_type=None): + import params + env.set_params(params) + self.configure(env) # FOR SECURITY + service('registrydns',action='start') + + def configure(self, env): + import params + env.set_params(params) + yarn(name="registrydns") + + +@OsFamilyImpl(os_family=OSConst.WINSRV_FAMILY) +class RegistryDNSWindows(RegistryDNS): + def status(self, env): + service('registrydns', action='status') + + +@OsFamilyImpl(os_family=OsFamilyImpl.DEFAULT) +class RegistryDNSDefault(RegistryDNS): + def pre_upgrade_restart(self, env, upgrade_type=None): + Logger.info("Executing RegistryDNS Stack Upgrade pre-restart") + import params + env.set_params(params) + stack_select.select_packages(params.version) + + def status(self, env): + import status_params + env.set_params(status_params) + check_process_status(status_params.yarn_registry_dns_in_use_pid_file) + + def get_log_folder(self): + import params + return params.yarn_log_dir + + def get_user(self): + import params + return params.yarn_user + + def get_pid_files(self): + import status_params + return [status_params.yarn_registry_dns_in_use_pid_file] + +if __name__ == "__main__": + RegistryDNS().execute() diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/hadoop-metrics2-hbase.properties.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/hadoop-metrics2-hbase.properties.j2 new file mode 100755 index 00000000000..5bb6d6f5fc3 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/hadoop-metrics2-hbase.properties.j2 @@ -0,0 +1,62 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +hbase.extendedperiod = 3600 + +hbase.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink +hbase.period=30 +hbase.collector.hosts={{ams_collector_hosts}} +hbase.port={{metric_collector_port}} +hbase.protocol={{metric_collector_protocol}} + +jvm.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink +jvm.period=30 +jvm.collector.hosts={{ams_collector_hosts}} +jvm.port={{metric_collector_port}} +jvm.protocol={{metric_collector_protocol}} + +rpc.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink +rpc.period=30 +rpc.collector.hosts={{ams_collector_hosts}} +rpc.port={{metric_collector_port}} +rpc.protocol={{metric_collector_protocol}} + +*.timeline.plugin.urls=file:///usr/lib/ambari-metrics-hadoop-sink/ambari-metrics-hadoop-sink.jar +*.sink.timeline.slave.host.name={{hostname}} +*.host_in_memory_aggregation = {{host_in_memory_aggregation}} +*.host_in_memory_aggregation_port = {{host_in_memory_aggregation_port}} +{% if is_aggregation_https_enabled %} +*.host_in_memory_aggregation_protocol = {{host_in_memory_aggregation_protocol}} +{% endif %} + +hbase.sink.timeline.class=org.apache.hadoop.metrics2.sink.timeline.HadoopTimelineMetricsSink +hbase.sink.timeline.period={{metrics_collection_period}} +hbase.sink.timeline.sendInterval={{metrics_report_interval}}000 +hbase.sink.timeline.collector.hosts={{ams_collector_hosts}} +hbase.sink.timeline.port={{metric_collector_port}} +hbase.sink.timeline.protocol={{metric_collector_protocol}} +hbase.sink.timeline.serviceName-prefix=yarn-timeline-storage + +# HTTPS properties +hbase.sink.timeline.truststore.path = {{metric_truststore_path}} +hbase.sink.timeline.truststore.type = {{metric_truststore_type}} +hbase.sink.timeline.truststore.password = {{metric_truststore_password}} + +# Switch off metrics generation on a per region basis +*.source.filter.class=org.apache.hadoop.metrics2.filter.RegexFilter +hbase.*.source.filter.exclude=.*(Regions|Users|Tables).* \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/input.config-yarn.json.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/input.config-yarn.json.j2 new file mode 100755 index 00000000000..7d75279bc84 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/input.config-yarn.json.j2 @@ -0,0 +1,86 @@ +{# + # Licensed to the Apache Software Foundation (ASF) under one + # or more contributor license agreements. See the NOTICE file + # distributed with this work for additional information + # regarding copyright ownership. The ASF licenses this file + # to you under the Apache License, Version 2.0 (the + # "License"); you may not use this file except in compliance + # with the License. You may obtain a copy of the License at + # + # http://www.apache.org/licenses/LICENSE-2.0 + # + # Unless required by applicable law or agreed to in writing, software + # distributed under the License is distributed on an "AS IS" BASIS, + # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + # See the License for the specific language governing permissions and + # limitations under the License. + #} +{ + "input":[ + { + "type":"yarn_nodemanager", + "rowtype":"service", + "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-nodemanager-*.log" + }, + { + "type":"yarn_resourcemanager", + "rowtype":"service", + "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-resourcemanager-*.log" + }, + { + "type":"yarn_timelineserver", + "rowtype":"service", + "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-{{default('configurations/yarn-env/yarn_user', 'yarn')}}-timelineserver-*.log" + }, + { + "type":"yarn_jobsummary", + "rowtype":"service", + "path":"{{default('/configurations/yarn-env/yarn_log_dir_prefix', '/var/log/hadoop')}}/{{default('configurations/yarn-env/yarn_user', 'yarn')}}/hadoop-mapreduce.jobsummary.log" + } + ], + "filter":[ + { + "filter":"grok", + "conditions":{ + "fields":{ + "type":[ + "yarn_historyserver", + "yarn_nodemanager", + "yarn_resourcemanager", + "yarn_timelineserver" + ] + } + }, + "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n", + "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})", + "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}\\(%{JAVAFILE:file}:%{JAVAMETHOD:method}\\(%{INT:line_number}\\)\\)%{SPACE}-%{SPACE}%{GREEDYDATA:log_message}", + "post_map_values":{ + "logtime":{ + "map_date":{ + "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS" + } + } + } + }, + { + "filter":"grok", + "conditions":{ + "fields":{ + "type":[ + "yarn_jobsummary" + ] + } + }, + "log4j_format":"%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n", + "multiline_pattern":"^(%{TIMESTAMP_ISO8601:logtime})", + "message_pattern":"(?m)^%{TIMESTAMP_ISO8601:logtime}%{SPACE}%{LOGLEVEL:level}%{SPACE}%{JAVACLASS:logger_name}%{SPACE}%{GREEDYDATA:log_message}", + "post_map_values":{ + "logtime":{ + "map_date":{ + "target_date_pattern":"yyyy-MM-dd HH:mm:ss,SSS" + } + } + } + } + ] +} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_ats_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_ats_jaas.conf.j2 old mode 100644 new mode 100755 index 55308e839ed..fdebe402f1d --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_ats_jaas.conf.j2 +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_ats_jaas.conf.j2 @@ -15,6 +15,15 @@ # See the License for the specific language governing permissions and # limitations under the License. #} + +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + storeKey=true + useTicketCache=false + keyTab="{{yarn_timelineservice_keytab}}" + principal="{{yarn_timelineservice_principal_name}}"; +}; com.sun.security.jgss.krb5.initiate { com.sun.security.auth.module.Krb5LoginModule required renewTGT=false diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_grant_permissions.j2 new file mode 100755 index 00000000000..e464cf8ce8b --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_grant_permissions.j2 @@ -0,0 +1,39 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# +grant '{{yarn_user}}', '{{yarn_user_hbase_permissions}}' +exit \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_master_jaas.conf.j2 new file mode 100755 index 00000000000..5a92102bafd --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_master_jaas.conf.j2 @@ -0,0 +1,36 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +Client { +com.sun.security.auth.module.Krb5LoginModule required +useKeyTab=true +storeKey=true +useTicketCache=false +keyTab="{{yarn_hbase_master_keytab}}" +principal="{{yarn_hbase_master_principal_name}}"; +}; +com.sun.security.jgss.krb5.initiate { +com.sun.security.auth.module.Krb5LoginModule required +renewTGT=false +doNotPrompt=true +useKeyTab=true +storeKey=true +useTicketCache=false +keyTab="{{yarn_hbase_master_keytab}}" +principal="{{yarn_hbase_master_principal_name}}"; +}; diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_package_preparation.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_package_preparation.j2 new file mode 100755 index 00000000000..007c239e082 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_package_preparation.j2 @@ -0,0 +1,76 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# +# + +echo "`date` Running hbase package creation script" +ambari_agent_tmp_dir="{{tmp_dir}}" +yarn_hbase_user="{{yarn_hbase_user}}" +yarn_hbase_user_tmp="{{yarn_hbase_user_tmp}}" +version_home="{{stack_root}}/{{version}}/usr/lib" +if [ ! -d $yarn_hbase_user_tmp ] +then + echo "Creating a tmp path $yarn_hbase_user_tmp"; + mkdir -p $yarn_hbase_user_tmp; + cd $ambari_agent_tmp_dir; + chmod -R 0755 $yarn_hbase_user; +fi; + +cd $yarn_hbase_user_tmp +if [ -f hbase.tar.gz ] +then + echo "`date` hbase.tar.gz package exist in path $yarn_hbase_user_tmp." + echo "`date` File status: `ls -l hbase.tar.gz`" + echo "`date` Exitting from script.." + exit +fi; + +cp -rf $version_home/hbase . +rm -rf hbase/conf +rm -rf hbase/logs +rm -rf hbase/pids +rm -rf hbase/lib/zookeeper*.jar +cp -rf $version_home/zookeeper/zookeeper-*.jar hbase/lib +cp -rf $version_home/hadoop/mapreduce.tar.gz . +tar -xzf mapreduce.tar.gz +rm -rf mapreduce.tar.gz +tar -czf hbase.tar.gz hbase hadoop && echo "`date` HBase package created in path $yarn_hbase_user_tmp" +chmod 644 hbase.tar.gz && echo "`date` hbase.tar.gz has set with ugo=644" +rm -rf hbase +rm -rf hadoop +echo "`date` File status: `ls -l $yarn_hbase_user_tmp/hbase.tar.gz`" +echo "`date` Exitting from script.." +exit \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_regionserver_jaas.conf.j2 new file mode 100755 index 00000000000..0ec41f5b738 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_regionserver_jaas.conf.j2 @@ -0,0 +1,36 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +Client { +com.sun.security.auth.module.Krb5LoginModule required +useKeyTab=true +storeKey=true +useTicketCache=false +keyTab="{{yarn_hbase_regionserver_keytab}}" +principal="{{yarn_hbase_regionserver_principal_name}}"; +}; +com.sun.security.jgss.krb5.initiate { +com.sun.security.auth.module.Krb5LoginModule required +renewTGT=false +doNotPrompt=true +useKeyTab=true +storeKey=true +useTicketCache=false +keyTab="{{yarn_hbase_regionserver_keytab}}" +principal="{{yarn_hbase_regionserver_principal_name}}"; +}; diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_secure.yarnfile.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_secure.yarnfile.j2 new file mode 100755 index 00000000000..c4a3fc34adc --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_secure.yarnfile.j2 @@ -0,0 +1,172 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} +{ + "name": "ats-hbase", + "version": "1.0.0", + "lifetime": "-1", + "queue": "{{yarn_hbase_service_queue_name}}", + "artifact": { + "id": "{{yarn_hbase_app_hdfs_path}}/hbase.tar.gz", + "type": "TARBALL" + }, + "configuration": { + "properties" : { + "yarn.service.container-failure.retry.max" : 10, + "yarn.service.framework.path" : "{{yarn_service_app_hdfs_path}}/service-dep.tar.gz" + }, + "env": { + "HBASE_IDENT_STRING": "${USER}", + "HBASE_LOG_DIR": "", + "HBASE_CONF_DIR": "$PWD/conf", + "HBASE_PID_DIR": "$PWD/pids", + "JAVA_HOME": "{{java64_home}}", + "HBASE_ROOT_LOGGER": "{{yarn_hbase_log_level}},RFA", + "SERVER_GC_OPTS": "-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:${HBASE_LOG_DIR}/gc.log-`date +'%Y%m%d%H%M'`", + "HBASE_OPTS": "-XX:+UseConcMarkSweepGC -XX:ErrorFile=${HBASE_LOG_DIR}/hs_err_pid%p.log -Djava.io.tmpdir=/tmp", + "HADOOP_HOME": "$PWD/lib/hadoop", + "HADOOP_YARN_HOME": "$PWD/lib/hadoop", + "HBASE_HOME": "$PWD/lib/hbase" + }, + "files": [ + { + "type": "TEMPLATE", + "dest_file": "log4j.properties", + "src_file": "{{yarn_hbase_user_version_home}}/log4j.properties" + }, + { + "type": "TEMPLATE", + "dest_file": "hbase-site.xml", + "src_file": "{{yarn_hbase_user_version_home}}/hbase-site.xml" + }, + { + "type": "TEMPLATE", + "dest_file": "hbase-policy.xml", + "src_file": "{{yarn_hbase_user_version_home}}/hbase-policy.xml" + }, + { + "type": "TEMPLATE", + "dest_file": "core-site.xml", + "src_file": "{{yarn_hbase_user_version_home}}/core-site.xml" + }, + { + "type": "TEMPLATE", + "dest_file": "hadoop-metrics2-hbase.properties", + "src_file": "{{yarn_hbase_user_version_home}}/hadoop-metrics2-hbase.properties" + } + ] + }, + "components": [ + { + "name": "master", + "number_of_containers": {{yarn_hbase_master_containers}}, + "launch_command": "$HBASE_HOME/bin/hbase master start", + "readiness_check": { + "type": "HTTP", + "properties": { + "url": "http://${THIS_HOST}:{{yarn_hbase_master_info_port}}/master-status" + } + }, + "resource": { + "cpus": {{yarn_hbase_master_cpu}}, + "memory": "{{yarn_hbase_master_memory}}" + }, + "configuration": { + "env": { + "HBASE_LOG_PREFIX": "hbase-$HBASE_IDENT_STRING-master-$HOSTNAME", + "HBASE_LOGFILE": "$HBASE_LOG_PREFIX.log", + "HBASE_MASTER_OPTS": "-Xms{{yarn_hbase_master_heapsize}} -Xmx{{yarn_hbase_master_heapsize}} -Djava.security.auth.login.config={{yarn_hbase_master_jaas_file}}" + } + }, + "placement_policy": { + "constraints": [ + { + "type": "ANTI_AFFINITY", + "scope": "node", + "target_tags": [ + "master" + ] + } + ] + } + }, + { + "name": "regionserver", + "number_of_containers": {{yarn_hbase_regionserver_containers}}, + "launch_command": "$HBASE_HOME/bin/hbase regionserver start", + "readiness_check": { + "type": "HTTP", + "properties": { + "url": "http://${THIS_HOST}:{{yarn_hbase_regionserver_info_port}}/rs-status" + } + }, + "resource": { + "cpus": {{yarn_hbase_regionserver_cpu}}, + "memory": "{{yarn_hbase_regionserver_memory}}" + }, + "configuration": { + "env": { + "HBASE_LOG_PREFIX": "hbase-$HBASE_IDENT_STRING-regionserver-$HOSTNAME", + "HBASE_LOGFILE": "$HBASE_LOG_PREFIX.log", + "HBASE_REGIONSERVER_OPTS": "-XX:CMSInitiatingOccupancyFraction=70 -XX:ReservedCodeCacheSize=256m -Xms{{yarn_hbase_regionserver_heapsize}} -Xmx{{yarn_hbase_regionserver_heapsize}} -Djava.security.auth.login.config={{yarn_hbase_regionserver_jaas_file}}" + } + }, + "placement_policy": { + "constraints": [ + { + "type": "ANTI_AFFINITY", + "scope": "node", + "target_tags": [ + "regionserver" + ] + } + ] + } + }, + { + "name": "hbaseclient", + "number_of_containers": {{yarn_hbase_client_containers}}, + "launch_command": "sleep 10;export HBASE_CLASSPATH_PREFIX=$HADOOP_HOME/share/hadoop/yarn/timelineservice/*;{{yarn_hbase_kinit_cmd}} $HBASE_HOME/bin/hbase {{class_name}}; {{yarn_hbase_kinit_cmd}} $HBASE_HOME/bin/hbase shell {{yarn_hbase_grant_premissions_file}};sleep infinity", + "dependencies": [ "master", "regionserver" ], + "resource": { + "cpus": {{yarn_hbase_client_cpu}}, + "memory": "{{yarn_hbase_client_memory}}" + }, + "configuration": { + "env": { + "HBASE_LOG_PREFIX": "hbase-$HBASE_IDENT_STRING-hbaseclient-$HOSTNAME", + "HBASE_LOGFILE": "$HBASE_LOG_PREFIX.log" + } + }, + "placement_policy": { + "constraints": [ + { + "type": "ANTI_AFFINITY", + "scope": "node", + "target_tags": [ + "hbaseclient" + ] + } + ] + } + } + ], + "kerberos_principal" : { + "principal_name" : "{{yarn_ats_hbase_principal_name}}", + "keytab" : "file://{{yarn_ats_hbase_keytab}}" + } +} \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_unsecure.yarnfile.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_unsecure.yarnfile.j2 new file mode 100755 index 00000000000..1bc41559705 --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_hbase_unsecure.yarnfile.j2 @@ -0,0 +1,168 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} +{ + "name": "ats-hbase", + "version": "1.0.0", + "lifetime": "-1", + "queue": "{{yarn_hbase_service_queue_name}}", + "artifact": { + "id": "{{yarn_hbase_app_hdfs_path}}/hbase.tar.gz", + "type": "TARBALL" + }, + "configuration": { + "properties" : { + "yarn.service.container-failure.retry.max" : 10, + "yarn.service.framework.path" : "{{yarn_service_app_hdfs_path}}/service-dep.tar.gz" + }, + "env": { + "HBASE_IDENT_STRING": "${USER}", + "HBASE_LOG_DIR": "", + "HBASE_CONF_DIR": "$PWD/conf", + "HBASE_PID_DIR": "$PWD/pids", + "JAVA_HOME": "{{java64_home}}", + "HBASE_ROOT_LOGGER": "{{yarn_hbase_log_level}},RFA", + "SERVER_GC_OPTS": "-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:${HBASE_LOG_DIR}/gc.log-`date +'%Y%m%d%H%M'`", + "HBASE_OPTS": "-XX:+UseConcMarkSweepGC -XX:ErrorFile=${HBASE_LOG_DIR}/hs_err_pid%p.log -Djava.io.tmpdir=/tmp", + "HADOOP_HOME": "$PWD/lib/hadoop", + "HADOOP_YARN_HOME": "$PWD/lib/hadoop", + "HBASE_HOME": "$PWD/lib/hbase" + }, + "files": [ + { + "type": "TEMPLATE", + "dest_file": "log4j.properties", + "src_file": "{{yarn_hbase_user_version_home}}/log4j.properties" + }, + { + "type": "TEMPLATE", + "dest_file": "hbase-site.xml", + "src_file": "{{yarn_hbase_user_version_home}}/hbase-site.xml" + }, + { + "type": "TEMPLATE", + "dest_file": "hbase-policy.xml", + "src_file": "{{yarn_hbase_user_version_home}}/hbase-policy.xml" + }, + { + "type": "TEMPLATE", + "dest_file": "core-site.xml", + "src_file": "{{yarn_hbase_user_version_home}}/core-site.xml" + }, + { + "type": "TEMPLATE", + "dest_file": "hadoop-metrics2-hbase.properties", + "src_file": "{{yarn_hbase_user_version_home}}/hadoop-metrics2-hbase.properties" + } + ] + }, + "components": [ + { + "name": "master", + "number_of_containers": {{yarn_hbase_master_containers}}, + "launch_command": "$HBASE_HOME/bin/hbase master start", + "readiness_check": { + "type": "HTTP", + "properties": { + "url": "http://${THIS_HOST}:{{yarn_hbase_master_info_port}}/master-status" + } + }, + "resource": { + "cpus": {{yarn_hbase_master_cpu}}, + "memory": "{{yarn_hbase_master_memory}}" + }, + "configuration": { + "env": { + "HBASE_LOG_PREFIX": "hbase-$HBASE_IDENT_STRING-master-$HOSTNAME", + "HBASE_LOGFILE": "$HBASE_LOG_PREFIX.log", + "HBASE_MASTER_OPTS": "-Xms{{yarn_hbase_master_heapsize}} -Xmx{{yarn_hbase_master_heapsize}}" + } + }, + "placement_policy": { + "constraints": [ + { + "type": "ANTI_AFFINITY", + "scope": "node", + "target_tags": [ + "master" + ] + } + ] + } + }, + { + "name": "regionserver", + "number_of_containers": {{yarn_hbase_regionserver_containers}}, + "launch_command": "$HBASE_HOME/bin/hbase regionserver start", + "readiness_check": { + "type": "HTTP", + "properties": { + "url": "http://${THIS_HOST}:{{yarn_hbase_regionserver_info_port}}/rs-status" + } + }, + "resource": { + "cpus": {{yarn_hbase_regionserver_cpu}}, + "memory": "{{yarn_hbase_regionserver_memory}}" + }, + "configuration": { + "env": { + "HBASE_LOG_PREFIX": "hbase-$HBASE_IDENT_STRING-regionserver-$HOSTNAME", + "HBASE_LOGFILE": "$HBASE_LOG_PREFIX.log", + "HBASE_REGIONSERVER_OPTS": "-XX:CMSInitiatingOccupancyFraction=70 -XX:ReservedCodeCacheSize=256m -Xms{{yarn_hbase_regionserver_heapsize}} -Xmx{{yarn_hbase_regionserver_heapsize}}" + } + }, + "placement_policy": { + "constraints": [ + { + "type": "ANTI_AFFINITY", + "scope": "node", + "target_tags": [ + "regionserver" + ] + } + ] + } + }, + { + "name": "hbaseclient", + "number_of_containers": {{yarn_hbase_client_containers}}, + "launch_command": "sleep 10;export HBASE_CLASSPATH_PREFIX=$HADOOP_HOME/share/hadoop/yarn/timelineservice/*;$HBASE_HOME/bin/hbase {{class_name}};sleep infinity", + "dependencies": [ "master", "regionserver" ], + "resource": { + "cpus": {{yarn_hbase_client_cpu}}, + "memory": "{{yarn_hbase_client_memory}}" + }, + "configuration": { + "env": { + "HBASE_LOG_PREFIX": "hbase-$HBASE_IDENT_STRING-hbaseclient-$HOSTNAME", + "HBASE_LOGFILE": "$HBASE_LOG_PREFIX.log" + } + }, + "placement_policy": { + "constraints": [ + { + "type": "ANTI_AFFINITY", + "scope": "node", + "target_tags": [ + "hbaseclient" + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_nm_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_nm_jaas.conf.j2 index b501c824153..0a51afdf571 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_nm_jaas.conf.j2 +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_nm_jaas.conf.j2 @@ -15,6 +15,15 @@ # See the License for the specific language governing permissions and # limitations under the License. #} + +Client { + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + storeKey=true + useTicketCache=false + keyTab="{{nodemanager_keytab}}" + principal="{{nodemanager_principal_name}}"; +}; com.sun.security.jgss.krb5.initiate { com.sun.security.auth.module.Krb5LoginModule required renewTGT=false diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_registry_dns_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_registry_dns_jaas.conf.j2 new file mode 100755 index 00000000000..bec53cb0e5c --- /dev/null +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/package/templates/yarn_registry_dns_jaas.conf.j2 @@ -0,0 +1,37 @@ +{# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#} + +Client { + com.sun.security.auth.module.Krb5LoginModule required + doNotPrompt=true + useKeyTab=true + storeKey=true + useTicketCache=false + keyTab="{{yarn_registry_dns_keytab}}" + principal="{{yarn_registry_dns_principal_name}}"; +}; +com.sun.security.jgss.krb5.initiate { + com.sun.security.auth.module.Krb5LoginModule required + renewTGT=false + doNotPrompt=true + useKeyTab=true + keyTab="{{yarn_registry_dns_keytab}}" + principal="{{yarn_registry_dns_principal_name}}" + storeKey=true + useTicketCache=false; +}; diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/properties/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/properties/container-executor.cfg.j2 index c6f1ff676d7..42e7d58c8be 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/properties/container-executor.cfg.j2 +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/properties/container-executor.cfg.j2 @@ -38,3 +38,22 @@ yarn.nodemanager.log-dirs={{nm_log_dirs}} yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}} banned.users=hdfs,yarn,mapred,bin min.user.id={{min_user_id}} + +{{ '[docker]' }} + module.enabled={{docker_module_enabled}} + docker.binary={{docker_binary}} + docker.allowed.capabilities={{docker_allowed_capabilities}} + docker.allowed.devices={{docker_allowed_devices}} + docker.allowed.networks={{docker_allowed_networks}} + docker.allowed.ro-mounts={{nm_local_dirs}},{{docker_allowed_ro_mounts}} + docker.allowed.rw-mounts={{nm_local_dirs}},{{nm_log_dirs}},{{docker_allowed_rw_mounts}} + docker.privileged-containers.enabled={{docker_privileged_containers_enabled}} + docker.trusted.registries={{docker_trusted_registries}} + docker.allowed.volume-drivers={{docker_allowed_volume_drivers}} + +{{ '[gpu]' }} + module.enabled={{gpu_module_enabled}} + +{{ '[cgroups]' }} + root={{cgroup_root}} + yarn-hierarchy={{yarn_hierarchy}} \ No newline at end of file diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/quicklinks/quicklinks.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/quicklinks/quicklinks.json index ced93852875..9b25fa56fdd 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/quicklinks/quicklinks.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/quicklinks/quicklinks.json @@ -20,7 +20,7 @@ "label": "ResourceManager UI", "requires_user_name": "false", "component_name": "RESOURCEMANAGER", - "url": "%@://%@:%@", + "url": "%@://%@:%@/ui2", "port":{ "http_property": "yarn.resourcemanager.webapp.address", "http_default_port": "8088", diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/role_command_order.json b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/role_command_order.json index 9ab3b4828d8..af5451046e8 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/role_command_order.json +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/YARN/role_command_order.json @@ -11,8 +11,8 @@ "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"], "HISTORYSERVER-RESTART": ["NAMENODE-RESTART"], "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], - "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"] - + "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"], + "TIMELINE_READER-START": ["ZOOKEEPER_SERVER-START", "NAMENODE-START", "DATANODE-START","HBASE_REGIONSERVER-START"] }, "_comment" : "Dependencies that are used in ResourceManager HA cluster", "resourcemanager_optional_ha" : { diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZEPPELIN/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZEPPELIN/metainfo.xml index 761f3574fe8..81e6b7def27 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZEPPELIN/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZEPPELIN/metainfo.xml @@ -65,7 +65,7 @@ limitations under the License. - redhat7,redhat8 + redhat7,redhat8,openeuler22 zeppelin_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/metainfo.xml index d86cad0801a..aff74b2e684 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.2.0/services/ZOOKEEPER/metainfo.xml @@ -72,7 +72,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 zookeeper_${stack_version} diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/repos/repoinfo.xml index ef5da7f4b45..db01d3c142e 100644 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/repos/repoinfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/repos/repoinfo.xml @@ -26,7 +26,14 @@ https://bigtop-snapshot.s3.amazonaws.com/centos-8/$basearch - BIGTOP-3.2.0 + BIGTOP-3.3.0 + bigtop + + + + + https://bigtop-snapshot.s3.amazonaws.com/openeuler-22/$basearch + BIGTOP-3.3.0 bigtop diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/metainfo.xml index c72573937d8..2743a244e63 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER/metainfo.xml @@ -144,7 +144,7 @@ - redhat7,redhat8 + redhat7,redhat8,openeuler22 ranger_${stack_version}-admin diff --git a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/metainfo.xml b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/metainfo.xml index a60548a8d38..492723ab9e5 100755 --- a/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/metainfo.xml +++ b/ambari-server/src/main/resources/stacks/BIGTOP/3.3.0/services/RANGER_KMS/metainfo.xml @@ -60,7 +60,7 @@ - redhat9,redhat8,redhat7,amazonlinux2,redhat6,suse11,suse12 + redhat9,redhat8,redhat7,amazonlinux2,redhat6,suse11,suse12,openeuler22 ranger_${stack_version}-kms diff --git a/ambari-server/src/main/resources/version_definition.xsd b/ambari-server/src/main/resources/version_definition.xsd index dcca5cd15e6..50261d99d39 100644 --- a/ambari-server/src/main/resources/version_definition.xsd +++ b/ambari-server/src/main/resources/version_definition.xsd @@ -66,6 +66,7 @@ + diff --git a/ambari-web/app/controllers.js b/ambari-web/app/controllers.js index bd858a51b02..fbfa68d05c1 100644 --- a/ambari-web/app/controllers.js +++ b/ambari-web/app/controllers.js @@ -61,6 +61,11 @@ require('controllers/main/admin/federation/step1_controller'); require('controllers/main/admin/federation/step2_controller'); require('controllers/main/admin/federation/step3_controller'); require('controllers/main/admin/federation/step4_controller'); +require('controllers/main/admin/federation/routerBasedFederation/wizard_controller'); +require('controllers/main/admin/federation/routerBasedFederation/step1_controller'); +require('controllers/main/admin/federation/routerBasedFederation/step2_controller'); +require('controllers/main/admin/federation/routerBasedFederation/step3_controller'); +require('controllers/main/admin/federation/routerBasedFederation/step4_controller'); require('controllers/main/admin/highAvailability/hawq/addStandby/wizard_controller'); require('controllers/main/admin/highAvailability/hawq/addStandby/step1_controller'); require('controllers/main/admin/highAvailability/hawq/addStandby/step2_controller'); diff --git a/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step1_controller.js b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step1_controller.js new file mode 100644 index 00000000000..60ee0bb2683 --- /dev/null +++ b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step1_controller.js @@ -0,0 +1,26 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var App = require('app'); + +App.RouterFederationWizardStep1Controller = Em.Controller.extend({ + name: "routerFederationWizardStep1Controller", + next: function () { + App.router.send('next'); + } +}); \ No newline at end of file diff --git a/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step2_controller.js b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step2_controller.js new file mode 100644 index 00000000000..a367c3d2b2e --- /dev/null +++ b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step2_controller.js @@ -0,0 +1,67 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var App = require('app'); + +App.RouterFederationWizardStep2Controller = Em.Controller.extend(App.AssignMasterComponents, { + + name: "routerFederationWizardStep2Controller", + + useServerValidation: false, + + mastersToShow: ['NAMENODE', 'ROUTER'], + + mastersToAdd: ['ROUTER'], + + showCurrentPrefix: ['NAMENODE', 'ROUTER'], + + showAdditionalPrefix: ['ROUTER'], + + mastersAddableInHA: ['ROUTER'], + + showInstalledMastersFirst: true, + + renderComponents: function (masterComponents) { + // check if we are restoring components assignment by checking existence of ROUTER component in array + var restoringComponents = masterComponents.someProperty('component_name', 'ROUTER'); + masterComponents = restoringComponents ? masterComponents : masterComponents.concat(this.generateRouterComponents()); + this._super(masterComponents); + // if you have similar functions for router, call them here + }, + + + generateRouterComponents: function () { + var router = []; + App.HostComponent.find().filterProperty('componentName', 'ROUTER').forEach(function (rbf) { + var rbfComponent = this.createComponentInstallationObject(Em.Object.create({ + serviceName: rbf.get('service.serviceName'), + componentName: rbf.get('componentName') + }), rbf.get('hostName')); + rbfComponent.isInstalled = true; + router.push(rbfComponent); + }, this); + return router; + }, + + actions: { + back() { + this.clearStep() + } + } + +}); diff --git a/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step3_controller.js b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step3_controller.js new file mode 100644 index 00000000000..364f125b1cf --- /dev/null +++ b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step3_controller.js @@ -0,0 +1,191 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var App = require('app'); + +App.RouterFederationWizardStep3Controller = Em.Controller.extend(App.BlueprintMixin, { + name: "routerFederationWizardStep3Controller", + selectedService: null, + stepConfigs: [], + serverConfigData: {}, + once: false, + isLoaded: false, + isConfigsLoaded: false, + versionLoaded: true, + hideDependenciesInfoBar: true, + + /** + * Map of sites and properties to delete + * @type Object + */ + + clearStep: function () { + this.get('stepConfigs').clear(); + this.set('serverConfigData', {}); + this.set('isConfigsLoaded', false); + this.set('isLoaded', false); + }, + + loadStep: function () { + this.clearStep(); + this.loadConfigsTags(); + }, + + loadConfigsTags: function () { + return App.ajax.send({ + name: 'config.tags', + sender: this, + success: 'onLoadConfigsTags' + }); + }, + + + onLoadConfigsTags: function (data) { + var urlParams = '(type=hdfs-rbf-site&tag=' + data.Clusters.desired_configs['hdfs-rbf-site'].tag + ')|'; + urlParams += '(type=hdfs-site&tag=' + data.Clusters.desired_configs['hdfs-site'].tag + ')|'; + urlParams += '(type=core-site&tag=' + data.Clusters.desired_configs['core-site'].tag + ')'; + App.ajax.send({ + name: 'admin.get.all_configurations', + sender: this, + data: { + urlParams: urlParams + }, + success: 'onLoadConfigs' + }); + }, + + onLoadConfigs: function (data) { + this.set('serverConfigData', data); + this.set('isConfigsLoaded', true); + }, + + onLoad: function () { + if (this.get('isConfigsLoaded') && App.router.get('clusterController.isHDFSNameSpacesLoaded')) { + var routerFederationConfig = $.extend(true, {}, require('data/configs/wizards/router_federation_properties').routerFederationConfig); + if (App.get('hasNameNodeFederation')) { + routerFederationConfig.configs = routerFederationConfig.configs.rejectProperty('firstRun'); + } + routerFederationConfig.configs = this.tweakServiceConfigs(routerFederationConfig.configs); + var configsFromServer = this.get('serverConfigData.items'); + var hdfsrbfConfigs = configsFromServer.findProperty('type', 'hdfs-rbf-site'); + var configToSave = { + type: 'hdfs-rbf-site', + properties: hdfsrbfConfigs&&hdfsrbfConfigs.properties, + }; + if (hdfsrbfConfigs && hdfsrbfConfigs.properties_attributes) { + configToSave.properties_attributes = hdfsrbfConfigs.properties_attributes; + } + for(const property of routerFederationConfig.configs){ + configToSave.properties[property.name]=property.value + } + App.ajax.send({ + name: 'common.service.configurations', + sender: self, + data: { + desired_config: configToSave + }, + error: 'onTaskError' + }); + + this.renderServiceConfigs(routerFederationConfig); + this.set('isLoaded', true); + } + }.observes('isConfigsLoaded', 'App.router.clusterController.isHDFSNameSpacesLoaded'), + + prepareDependencies: function () { + var ret = {}; + var configsFromServer = this.get('serverConfigData.items'); + var nameNodes = this.get('content.masterComponentHosts').filterProperty('component', 'NAMENODE'); + var hdfsSiteConfigs = configsFromServer.findProperty('type', 'hdfs-site').properties; + var coreSiteConfigs = configsFromServer.findProperty('type', 'core-site').properties; + var nameServices = App.HDFSService.find().objectAt(0).get('masterComponentGroups').mapProperty('name'); + var modifiedNameServices = []; + var nnCounter = 1; + ret.nameServicesList = nameServices.join(','); + ret.nameservice1 = nameServices[0]; + for (let i = 0; i < nameServices.length; i++) { + let nameservice = nameServices[i]; + modifiedNameServices.push(`${nameservice}.nn${nnCounter}`); + nnCounter++; + modifiedNameServices.push(`${nameservice}.nn${nnCounter}`); + nnCounter++; + } + ret.modifiedNameServices = modifiedNameServices.join(','); + + ret.zkAddress = coreSiteConfigs['ha.zookeeper.quorum']; + + return ret; + }, + tweakServiceConfigs: function (configs) { + var dependencies = this.prepareDependencies(); + var result = []; + var configsToRemove = []; + var hdfsSiteConfigs = this.get('serverConfigData').items.findProperty('type', 'hdfs-site').properties; + var wizardController = App.router.get(this.get('content.controllerName')); + configs.forEach(function (config) { + config.isOverridable = false; + config.name = wizardController.replaceDependencies(config.name, dependencies); + config.displayName = wizardController.replaceDependencies(config.displayName, dependencies); + config.value = wizardController.replaceDependencies(config.value, dependencies); + config.recommendedValue = wizardController.replaceDependencies(config.recommendedValue, dependencies); + result.push(config); + + }, this); + + return result; + }, + + renderServiceConfigs: function (_serviceConfig) { + var serviceConfig = App.ServiceConfig.create({ + serviceName: _serviceConfig.serviceName, + displayName: _serviceConfig.displayName, + configCategories: [], + showConfig: true, + configs: [] + }); + + _serviceConfig.configCategories.forEach(function (_configCategory) { + if (App.Service.find().someProperty('serviceName', _configCategory.name)) { + serviceConfig.configCategories.pushObject(_configCategory); + } + }, this); + + this.loadComponentConfigs(_serviceConfig, serviceConfig); + + this.get('stepConfigs').pushObject(serviceConfig); + this.set('selectedService', this.get('stepConfigs').objectAt(0)); + this.set('once', true); + }, + + /** + * Load child components to service config object + * @param _componentConfig + * @param componentConfig + */ + loadComponentConfigs: function (_componentConfig, componentConfig) { + _componentConfig.configs.forEach(function (_serviceConfigProperty) { + var serviceConfigProperty = App.ServiceConfigProperty.create(_serviceConfigProperty); + componentConfig.configs.pushObject(serviceConfigProperty); + serviceConfigProperty.set('isEditable', serviceConfigProperty.get('isReconfigurable')); + }, this); + }, + + isNextDisabled: function () { + return !this.get('isLoaded') || (this.get('isLoaded') && this.get('selectedService.configs').someProperty('isValid', false)); + }.property('selectedService.configs.@each.isValid', 'isLoaded') +}); \ No newline at end of file diff --git a/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step4_controller.js b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step4_controller.js new file mode 100644 index 00000000000..8e0f5925f22 --- /dev/null +++ b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/step4_controller.js @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var App = require('app'); + +App.RouterFederationWizardStep4Controller = App.HighAvailabilityProgressPageController.extend(App.WizardEnableDone, { + + name: "routerFederationWizardStep4Controller", + + commands: ['installRouter', 'startRouters'], + + tasksMessagesPrefix: 'admin.routerFederation.wizard.step', + + initializeTasks: function () { + this._super(); + this.removeUnneededTasks(); + }, + + removeUnneededTasks: function () { + var installedServices = App.Service.find().mapProperty('serviceName'); + if (!installedServices.contains('RANGER')) { + this.removeTasks(['startInfraSolr', 'startRangerAdmin', 'startRangerUsersync']); + } + if (!installedServices.contains('AMBARI_INFRA_SOLR')) { + this.removeTasks(['startInfraSolr']); + } + }, + + reconfigureServices: function () { + var servicesModel = App.Service.find(); + var configs = []; + var data = this.get('content.serviceConfigProperties'); + var note = Em.I18n.t('admin.routerFederation.wizard,step4.save.configuration.note'); + configs.push({ + Clusters: { + desired_config: this.reconfigureSites(['hdfs-rbf-site'], data, note) + } + }); + return App.ajax.send({ + name: 'common.service.multiConfigurations', + sender: this, + data: { + configs: configs + }, + error: 'onTaskError', + success: 'installHDFSClients' + }); + }, + + installHDFSClients: function () { + var nnHostNames = this.get('content.masterComponentHosts').filterProperty('component', 'NAMENODE').mapProperty('hostName'); + var jnHostNames = App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE').mapProperty('hostName'); + var hostNames = nnHostNames.concat(jnHostNames).uniq(); + this.createInstallComponentTask('HDFS_CLIENT', hostNames, 'HDFS'); + }, + + installRouter: function () { + this.createInstallComponentTask('ROUTER', this.get('content.masterComponentHosts').filterProperty('component', 'ROUTER').mapProperty('hostName'), "HDFS"); + }, + + startRouters: function () { + var hostNames = this.get('content.masterComponentHosts').filterProperty('component', 'ROUTER').mapProperty('hostName'); + this.updateComponent('ROUTER', hostNames, "HDFS", "Start"); + } +}); \ No newline at end of file diff --git a/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/wizard_controller.js b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/wizard_controller.js new file mode 100644 index 00000000000..588030821d1 --- /dev/null +++ b/ambari-web/app/controllers/main/admin/federation/routerBasedFederation/wizard_controller.js @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +var App = require('app'); + +App.RouterFederationWizardController = App.WizardController.extend({ + + name: 'routerFederationWizardController', + + totalSteps: 4, + + /** + * @type {string} + */ + displayName: Em.I18n.t('admin.routerFederation.wizard.header'), + + isFinished: false, + + content: Em.Object.create({ + controllerName: 'routerFederationWizardController' + }), + + /** + * Load data for all steps until current step + */ + loadMap: { + '1': [ + { + type: 'sync', + callback: function () { + this.load('cluster'); + } + } + ], + '2': [ + { + type: 'async', + callback: function () { + var self = this, + dfd = $.Deferred(); + this.loadServicesFromServer(); + this.loadMasterComponentHosts().done(function () { + self.loadConfirmedHosts(); + dfd.resolve(); + }); + return dfd.promise(); + } + } + ], + '3': [ + { + type: 'sync', + callback: function () { + this.load('cluster'); + } + } + ], + '4': [ + { + type: 'sync', + callback: function () { + this.loadServiceConfigProperties(); + this.loadTasksStatuses(); + this.loadTasksRequestIds(); + this.loadRequestIds(); + } + } + ] + }, + + init: function () { + this._super(); + this.clearStep(); + }, + + clearStep: function () { + this.set('isFinished', false); + }, + + setCurrentStep: function (currentStep, completed) { + this._super(currentStep, completed); + App.clusterStatus.setClusterStatus({ + clusterName: this.get('content.cluster.name'), + wizardControllerName: 'routerFederationWizardController', + localdb: App.db.data + }); + }, + + saveNNHosts: function (nnHosts) { + this.set('content.nnHosts', nnHosts); + this.setDBProperty('nnHosts', nnHosts); + }, + + /** + * Load hosts for additional and current ResourceManagers from local db to controller.content + */ + loadNNHosts: function() { + var nnHosts = this.getDBProperty('nnHosts'); + this.set('content.nnHosts', nnHosts); + }, + + saveServiceConfigProperties: function (stepController) { + var serviceConfigProperties = []; + var data = stepController.get('serverConfigData'); + + var _content = stepController.get('stepConfigs')[0]; + _content.get('configs').forEach(function (_configProperties) { + var siteObj = data.items.findProperty('type', _configProperties.get('filename')); + if (siteObj) { + siteObj.properties[_configProperties.get('name')] = _configProperties.get('value'); + } + }, this); + this.setDBProperty('serviceConfigProperties', data); + this.set('content.serviceConfigProperties', data); + }, + + /** + * Load serviceConfigProperties to model + */ + loadServiceConfigProperties: function () { + this.set('content.serviceConfigProperties', this.getDBProperty('serviceConfigProperties')); + }, + + /** + * Remove all loaded data. + * Created as copy for App.router.clearAllSteps + */ + clearAllSteps: function () { + this.clearInstallOptions(); + // clear temporary information stored during the install + this.set('content.cluster', this.getCluster()); + }, + + /** + * Clear all temporary data + */ + finish: function () { + this.resetDbNamespace(); + App.router.get('updateController').updateAll(); + this.set('isFinished', true); + } +}); \ No newline at end of file diff --git a/ambari-web/app/controllers/main/admin/highAvailability_controller.js b/ambari-web/app/controllers/main/admin/highAvailability_controller.js index 3ac72149799..f73f895ec7b 100644 --- a/ambari-web/app/controllers/main/admin/highAvailability_controller.js +++ b/ambari-web/app/controllers/main/admin/highAvailability_controller.js @@ -142,6 +142,28 @@ App.MainAdminHighAvailabilityController = App.WizardController.extend({ return true; }, + /** + * enable router Based Federation + * @return {Boolean} + */ + enableRouterFederation: function () { + //Prerequisite Checks + var message = []; + if (!App.HostComponent.find().filterProperty('componentName', 'ZOOKEEPER_SERVER').everyProperty('workStatus', 'STARTED')) { + message.push(Em.I18n.t('admin.nameNodeFederation.wizard.required.zookeepers')); + } + + if (!App.HostComponent.find().filterProperty('componentName', 'JOURNALNODE').everyProperty('workStatus', 'STARTED')) { + message.push(Em.I18n.t('admin.nameNodeFederation.wizard.required.journalnodes')); + } + if (message.length > 0) { + this.showErrorPopup(message); + return false; + } + App.router.transitionTo('main.services.enableRouterFederation'); + return true; + }, + /** * open Manage JournalNode Wizard if there are two started NameNodes with defined active/standby state * @returns {boolean} diff --git a/ambari-web/app/controllers/main/service/item.js b/ambari-web/app/controllers/main/service/item.js index d749c313f70..380c94aaabf 100644 --- a/ambari-web/app/controllers/main/service/item.js +++ b/ambari-web/app/controllers/main/service/item.js @@ -1518,6 +1518,11 @@ App.MainServiceItemController = Em.Controller.extend(App.SupportClientConfigsDow highAvailabilityController.enableNameNodeFederation(); }, + openRouterFederationWizard: function () { + var highAvailabilityController = App.router.get('mainAdminHighAvailabilityController'); + highAvailabilityController.enableRouterFederation(); + }, + /** * This method is called when user event to download configs for "All Clients" * is made from service action menu diff --git a/ambari-web/app/controllers/wizard/step6_controller.js b/ambari-web/app/controllers/wizard/step6_controller.js index 5952ed7219f..c17514768c5 100644 --- a/ambari-web/app/controllers/wizard/step6_controller.js +++ b/ambari-web/app/controllers/wizard/step6_controller.js @@ -250,7 +250,9 @@ App.WizardStep6Controller = Em.Controller.extend(App.HostComponentValidationMixi this.get('hosts').forEach(function (host) { host.checkboxes.filterProperty('isInstalled', false).forEach(function (checkbox) { if (checkbox.component === component) { - Em.set(checkbox, 'checked', checked); + if (!checkbox.isDisabled) { + Em.set(checkbox, 'checked', checked); + } } }); }); diff --git a/ambari-web/app/data/configs/wizards/router_federation_properties.js b/ambari-web/app/data/configs/wizards/router_federation_properties.js new file mode 100644 index 00000000000..c87f2eb4d4c --- /dev/null +++ b/ambari-web/app/data/configs/wizards/router_federation_properties.js @@ -0,0 +1,80 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +module.exports = + { + "routerFederationConfig": { + serviceName: 'MISC', + displayName: 'MISC', + configCategories: [ + App.ServiceConfigCategory.create({ name: 'HDFS', displayName: 'HDFS'}) + ], + sites: ['core-site', 'hdfs-rbf-site'], + configs: [ + { + "name": "dfs.federation.router.monitor.namenode", + "displayName": "dfs.federation.router.monitor.namenode", + "description": "RPC address for HDFS Services communication.", + "isReconfigurable": false, + "recommendedValue": "{{nameservice1}}.nn1, {{nameservice1}}.nn2, {{newNameservice}}.{{newNameNode1Index}},{{newNameservice}}.{{newNameNode2Index}}", + "value": "{{modifiedNameServices}}", + "category": "HDFS", + "filename": "hdfs-rbf-site", + "serviceName": 'MISC', + "isRouterConfigs" : true + }, + { + "name": "dfs.federation.router.default.nameserviceId", + "displayName": "dfs.federation.router.default.nameserviceId", + "description": "Nameservice identifier of the default subcluster to monitor.", + "isReconfigurable": false, + "recommendedValue": "{{nameservice1}}", + "value": "{{nameservice1}}", + "category": "HDFS", + "filename": "hdfs-rbf-site", + "serviceName": 'MISC', + "isRouterConfigs" : true + }, + { + "name": "zk-dt-secret-manager.zkAuthType", + "displayName": "zk-dt-secret-manager.zkAuthType", + "description": "Secret Manager Zookeeper Authentication Type", + "isReconfigurable": false, + "recommendedValue": "none", + "value": "none", + "category": "HDFS", + "filename": "hdfs-rbf-site", + "serviceName": 'MISC', + "isRouterConfigs" : true + }, + { + "name": "zk-dt-secret-manager.zkConnectionString", + "displayName": "zk-dt-secret-manager.zkConnectionString", + "description": "Secret Manager Zookeeper Connection String", + "isReconfigurable": false, + "recommendedValue": "zk1.example.com:2181,zk2.example.com:2181,zk3.example.com:2181", + "value": "{{zkAddress}}", + "category": "HDFS", + "filename": "hdfs-rbf-site", + "serviceName": 'MISC', + "isRouterConfigs" : true + } + ] + } + + }; \ No newline at end of file diff --git a/ambari-web/app/data/controller_route.js b/ambari-web/app/data/controller_route.js index 8f6f385bff3..914ee88f152 100644 --- a/ambari-web/app/data/controller_route.js +++ b/ambari-web/app/data/controller_route.js @@ -85,6 +85,10 @@ module.exports = [ wizardControllerName: App.router.get('nameNodeFederationWizardController.name'), route: 'main.services.enableNameNodeFederation' }, + { + wizardControllerName: App.router.get('routerFederationWizardController.name'), + route: 'main.services.enableRouterFederation' + }, { wizardControllerName: App.router.get('manageJournalNodeWizardController.name'), route: 'main.services.manageJournalNode' diff --git a/ambari-web/app/messages.js b/ambari-web/app/messages.js index 78626c1989d..350883769db 100644 --- a/ambari-web/app/messages.js +++ b/ambari-web/app/messages.js @@ -1727,6 +1727,28 @@ Em.I18n.translations = { 'admin.nameNodeFederation.wizard.step4.task16.title': 'Start NameNode', 'admin.nameNodeFederation.wizard.step4.task17.title': 'Restart Required Services', + 'admin.routerFederation.button.enable':'Add DFSRouter', + 'admin.routerFederation.wizard.header': 'Add HDFS Router', + 'admin.routerFederation.closePopup': 'Are you sure you want to quit?', + 'admin.routerFederation.closePopup2': 'Add HDFS Router Wizard is in progress. Are you sure you want to exit the wizard?', + 'admin.routerFederation.wizard.step1.header': 'Get Started', + 'admin.routerFederation.wizard.step1.body':'This wizard will walk you through the process of enabling DFSRouter to manage federated cluster.', + 'admin.routerFederation.wizard.step2.header': 'Select Hosts', + 'admin.routerFederation.wizard.step2.body': 'Select hosts running the Routers for {0}', + 'admin.routerFederation.wizard.step3.header': 'Review', + 'admin.routerFederation.wizard.step3.addRouter': 'Add Router: ', + 'admin.routerFederation.wizard.step3.toBeInstalled': 'Router TO BE INSTALLED', + 'admin.routerFederation.wizard.step3.confirm.config.body': '
' + + '

Review Configuration Changes.

' + + 'The following lists the configuration changes that will be made by the Wizard to enable Router. This information is for review only and is not editable' + + '
', + 'admin.routerFederation.wizard.step4.header': 'Configure Router', + 'admin.routerFederation.wizard,step4.save.configuration.note':'This configuration is created by Enable DFSRouter wizard', + 'admin.routerFederation.wizard.step4.notice.inProgress':'Please wait while your Router is being deployed.', + 'admin.routerFederation.wizard.step4.notice.completed':'Add DFSRouter Wizard has been completed successfully.', + 'admin.routerFederation.wizard.step4.task0.title': 'Install Routers', + 'admin.routerFederation.wizard.step4.task1.title': 'Start Routers', + 'admin.security.title':'Kerberos security has not been enabled', 'admin.security.enabled': 'Kerberos security is enabled', 'admin.security.disabled': 'Kerberos security is disabled', diff --git a/ambari-web/app/mixins/wizard/assign_master_components.js b/ambari-web/app/mixins/wizard/assign_master_components.js index 25bf86c38ba..53b566212e9 100644 --- a/ambari-web/app/mixins/wizard/assign_master_components.js +++ b/ambari-web/app/mixins/wizard/assign_master_components.js @@ -1120,6 +1120,9 @@ App.AssignMasterComponents = Em.Mixin.create(App.HostComponentValidationMixin, A } newMaster.set("selectedHost", suggestedHost); + if(!currentMasters.get("lastObject.serviceComponentId")){ + currentMasters.get("lastObject").set("serviceComponentId",0) + } newMaster.set("serviceComponentId", (currentMasters.get("lastObject.serviceComponentId") + 1)); this.get("selectedServicesMasters").insertAt(this.get("selectedServicesMasters").indexOf(lastMaster) + 1, newMaster); diff --git a/ambari-web/app/models/host_component.js b/ambari-web/app/models/host_component.js index 428bca3a16a..c404e5691ba 100644 --- a/ambari-web/app/models/host_component.js +++ b/ambari-web/app/models/host_component.js @@ -575,6 +575,12 @@ App.HostComponentActionMap = { cssClass: 'icon icon-sitemap', disabled: !App.get('isHaEnabled') || App.get('allHostNames.length') < 4 }, + TOGGLE_RBF_FEDERATION: { + action: 'openRouterFederationWizard', + label: Em.I18n.t('admin.routerFederation.button.enable'), + cssClass: 'icon icon-sitemap', + disabled: !App.get('hasNameNodeFederation') + }, UPDATE_REPLICATION: { action: 'updateHBaseReplication', customCommand: 'UPDATE_REPLICATION', diff --git a/ambari-web/app/models/service.js b/ambari-web/app/models/service.js index 06d108297e0..a1bb73ba37f 100644 --- a/ambari-web/app/models/service.js +++ b/ambari-web/app/models/service.js @@ -117,7 +117,7 @@ App.Service = DS.Model.extend({ serviceTypes: function() { var typeServiceMap = { GANGLIA: ['MONITORING'], - HDFS: ['HA_MODE', 'FEDERATION'], + HDFS: ['HA_MODE', 'FEDERATION', 'DFSRouter'], YARN: ['HA_MODE'], RANGER: ['HA_MODE'], HAWQ: ['HA_MODE'] diff --git a/ambari-web/app/router.js b/ambari-web/app/router.js index c0a754a7eba..4d96cee6cdc 100644 --- a/ambari-web/app/router.js +++ b/ambari-web/app/router.js @@ -665,7 +665,7 @@ App.Router = Em.Router.extend({ }), sortedMappedVersions = mappedVersions.sort(), latestVersion = sortedMappedVersions[sortedMappedVersions.length-1].replace(/[^\d.-]/g, ''); - window.location.replace(App.appURLRoot + 'views/ADMIN_VIEW/' + latestVersion + '/INSTANCE/#/'); + App.replaceWindowLocation(App.appURLRoot + 'views/ADMIN_VIEW/' + latestVersion + '/INSTANCE/#/') } }, diff --git a/ambari-web/app/routes/dfsrouter_federation_routes.js b/ambari-web/app/routes/dfsrouter_federation_routes.js new file mode 100644 index 00000000000..bc7f8388e9a --- /dev/null +++ b/ambari-web/app/routes/dfsrouter_federation_routes.js @@ -0,0 +1,166 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var App = require('app'); + +module.exports = App.WizardRoute.extend({ + route: '/NameNode/federation/routerBasedFederation', + + breadcrumbs: { + label: Em.I18n.t('admin.routerFederation.wizard.header') + }, + + enter: function (router, transition) { + var routerFederationWizardController = router.get('routerFederationWizardController'); + routerFederationWizardController.dataLoading().done(function () { + //Set HDFS as current service + App.router.set('mainServiceItemController.content', App.Service.find().findProperty('serviceName', 'HDFS')); + App.router.get('updateController').set('isWorking', false); + var popup = App.ModalPopup.show({ + classNames: ['wizard-modal-wrapper'], + modalDialogClasses: ['modal-xlg'], + header: Em.I18n.t('admin.routerFederation.wizard.header'), + bodyClass: App.RouterFederationWizardView.extend({ + controller: routerFederationWizardController + }), + primary: Em.I18n.t('form.cancel'), + showFooter: false, + secondary: null, + + onClose: function () { + var routerFederationWizardController = router.get('routerFederationWizardController'), + currStep = routerFederationWizardController.get('currentStep'); + App.showConfirmationPopup(function () { + routerFederationWizardController.resetOnClose(routerFederationWizardController, 'main.services.index'); + }, Em.I18n.t(parseInt(currStep) === 4 ? 'admin.routerFederation.closePopup2' : 'admin.routerFederation.closePopup')); + }, + didInsertElement: function () { + this._super(); + this.fitHeight(); + } + }); + routerFederationWizardController.set('popup', popup); + var currentClusterStatus = App.clusterStatus.get('value'); + if (currentClusterStatus) { + switch (currentClusterStatus.clusterState) { + case 'RBF_FEDERATION_DEPLOY' : + routerFederationWizardController.setCurrentStep(currentClusterStatus.localdb.RouterFederationWizard.currentStep); + break; + default: + var currStep = App.router.get('routerFederationWizardController.currentStep'); + routerFederationWizardController.setCurrentStep(currStep); + break; + } + } + Em.run.next(function () { + App.router.get('wizardWatcherController').setUser(routerFederationWizardController.get('name')); + router.transitionTo('step' + routerFederationWizardController.get('currentStep')); + }); + }); + }, + + step1: Em.Route.extend({ + route: '/step1', + connectOutlets: function (router) { + var controller = router.get('routerFederationWizardController'); + controller.dataLoading().done(function () { + controller.setCurrentStep('1'); + controller.connectOutlet('routerFederationWizardStep1', controller.get('content')); + }) + }, + unroutePath: function () { + return false; + }, + next: function (router) { + var controller = router.get('routerFederationWizardController'); + router.transitionTo('step2'); + } + }), + + step2: Em.Route.extend({ + route: '/step2', + connectOutlets: function (router) { + var controller = router.get('routerFederationWizardController'); + controller.dataLoading().done(function () { + controller.setCurrentStep('2'); + controller.loadAllPriorSteps(); + controller.connectOutlet('routerFederationWizardStep2', controller.get('content')); + }) + }, + unroutePath: function () { + return false; + }, + next: function (router) { + var wizardController = router.get('routerFederationWizardController'); + var stepController = router.get('routerFederationWizardStep2Controller'); + wizardController.saveMasterComponentHosts(stepController); + router.transitionTo('step3'); + }, + back: function (router) { + router.transitionTo('step1'); + } + }), + + step3: Em.Route.extend({ + route: '/step3', + connectOutlets: function (router) { + var controller = router.get('routerFederationWizardController'); + controller.dataLoading().done(function () { + controller.setCurrentStep('3'); + controller.loadAllPriorSteps(); + controller.connectOutlet('routerFederationWizardStep3', controller.get('content')); + }) + }, + unroutePath: function () { + return false; + }, + next: function (router) { + var controller = router.get('routerFederationWizardController'); + var stepController = router.get('routerFederationWizardStep3Controller'); + controller.saveServiceConfigProperties(stepController); + router.transitionTo('step4'); + }, + back: Em.Router.transitionTo('step2') + }), + + step4: Em.Route.extend({ + route: '/step4', + connectOutlets: function (router) { + var controller = router.get('routerFederationWizardController'); + controller.dataLoading().done(function () { + controller.setCurrentStep('4'); + controller.setLowerStepsDisable(4); + controller.loadAllPriorSteps(); + controller.connectOutlet('routerFederationWizardStep4', controller.get('content')); + }) + }, + unroutePath: function (router, path) { + // allow user to leave route if wizard has finished + if (router.get('routerFederationWizardController').get('isFinished')) { + this._super(router, path); + } else { + return false; + } + }, + next: function (router) { + var controller = router.get('routerFederationWizardController'); + controller.resetOnClose(controller, 'main.services.index'); + } + }) + +}); \ No newline at end of file diff --git a/ambari-web/app/routes/main.js b/ambari-web/app/routes/main.js index bc2c542c8a6..6567a92f0d9 100644 --- a/ambari-web/app/routes/main.js +++ b/ambari-web/app/routes/main.js @@ -893,6 +893,8 @@ module.exports = Em.Route.extend(App.RouterRedirections, { enableNameNodeFederation: require('routes/namenode_federation_routes'), + enableRouterFederation : require('routes/dfsrouter_federation_routes'), + addHawqStandby: require('routes/add_hawq_standby_routes'), removeHawqStandby: require('routes/remove_hawq_standby_routes'), diff --git a/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step1.hbs b/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step1.hbs new file mode 100644 index 00000000000..1576e900d44 --- /dev/null +++ b/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step1.hbs @@ -0,0 +1,28 @@ +{{! +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +}} +
+

{{t admin.routerFederation.wizard.step1.header}}

+
+

{{t admin.routerFederation.wizard.step1.body}}

+
+
+ \ No newline at end of file diff --git a/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step3.hbs b/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step3.hbs new file mode 100644 index 00000000000..41e14ab3b01 --- /dev/null +++ b/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step3.hbs @@ -0,0 +1,56 @@ +{{! +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +}} +
+

{{t admin.highAvailability.wizard.step3.header}}

+

{{t admin.highAvailability.wizard.step3.confirm.host.body}}

+
+
+
+
+ + {{#each host in view.addRouters}} + + + + + + {{/each}} +
{{t admin.routerFederation.wizard.step3.addRouter}}{{host}} {{t admin.routerFederation.wizard.step3.toBeInstalled}}
+
+
+
+ {{#if controller.isLoaded}} + {{{t admin.routerFederation.wizard.step3.confirm.config.body}}} + {{view App.ServiceConfigView isNotEditableBinding="controller.isNotEditable"}} + {{else}} + {{view App.SpinnerView}} + {{/if}} +
+
+
+
+ + \ No newline at end of file diff --git a/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step4.hbs b/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step4.hbs new file mode 100644 index 00000000000..79f070ab5d8 --- /dev/null +++ b/ambari-web/app/templates/main/admin/federation/routerBasedFederation/step4.hbs @@ -0,0 +1,19 @@ +{{! +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +}} +{{template "templates/common/progress"}} + diff --git a/ambari-web/app/templates/main/admin/federation/routerBasedFederation/wizard.hbs b/ambari-web/app/templates/main/admin/federation/routerBasedFederation/wizard.hbs new file mode 100644 index 00000000000..6f81321fd75 --- /dev/null +++ b/ambari-web/app/templates/main/admin/federation/routerBasedFederation/wizard.hbs @@ -0,0 +1,35 @@ +{{! +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +}} + + \ No newline at end of file diff --git a/ambari-web/app/utils/db.js b/ambari-web/app/utils/db.js index 78bc99af57d..701a70cca27 100644 --- a/ambari-web/app/utils/db.js +++ b/ambari-web/app/utils/db.js @@ -49,6 +49,7 @@ var InitialData = { 'ActivateHawqStandbyWizard': {}, 'RAHighAvailabilityWizard': {}, 'NameNodeFederationWizard': {}, + 'RouterFederationWizard': {}, 'RollbackHighAvailabilityWizard': {}, 'MainAdminStackAndUpgrade': {}, 'KerberosDisable': {}, diff --git a/ambari-web/app/utils/helper.js b/ambari-web/app/utils/helper.js index d05724d4d40..7cc652e31a2 100644 --- a/ambari-web/app/utils/helper.js +++ b/ambari-web/app/utils/helper.js @@ -1101,3 +1101,16 @@ App.logger = function() { }; }(); + +/** + * Replce cruuent window location. + * `window.location.replace` is a native function and cannot be intercepted by Sinon, + * it needs to be intercepted manually. See https://stackoverflow.com/a/52141931/14792586. + * + * Simple function, no test cases available. + * + * @param {string} location new location + **/ +App.replaceWindowLocation = function(location) { + window.location.replace(location); +} \ No newline at end of file diff --git a/ambari-web/app/views.js b/ambari-web/app/views.js index c11d1d63322..30aa6bc7ee3 100644 --- a/ambari-web/app/views.js +++ b/ambari-web/app/views.js @@ -224,6 +224,11 @@ require('views/main/admin/federation/step1_view'); require('views/main/admin/federation/step2_view'); require('views/main/admin/federation/step3_view'); require('views/main/admin/federation/step4_view'); +require('views/main/admin/federation/routerBasedFederation/wizard_view'); +require('views/main/admin/federation/routerBasedFederation/step1_view'); +require('views/main/admin/federation/routerBasedFederation/step2_view'); +require('views/main/admin/federation/routerBasedFederation/step3_view'); +require('views/main/admin/federation/routerBasedFederation/step4_view'); require('views/main/admin/serviceAccounts_view'); require('views/main/admin/stack_upgrade/upgrade_wizard_view'); require('views/main/admin/stack_upgrade/upgrade_version_box_view'); diff --git a/ambari-web/app/views/application.js b/ambari-web/app/views/application.js index a4b3b14b1f6..5937f1ac356 100644 --- a/ambari-web/app/views/application.js +++ b/ambari-web/app/views/application.js @@ -48,13 +48,26 @@ App.ApplicationView = Em.View.extend({ */ initNavigationBar: function () { if (App.get('router.mainController.isClusterDataLoaded')) { - $('body').on('DOMNodeInserted', '.navigation-bar', () => { - $('.navigation-bar').navigationBar({ - fitHeight: true, - collapseNavBarClass: 'icon-double-angle-left', - expandNavBarClass: 'icon-double-angle-right' - }); - $('body').off('DOMNodeInserted', '.navigation-bar'); + const observer = new MutationObserver(mutations => { + var targetNode + if (mutations.some((mutation) => mutation.type === 'childList' && (targetNode = $('.navigation-bar')).length)) { + observer.disconnect(); + targetNode.navigationBar({ + fitHeight: true, + collapseNavBarClass: 'icon-double-angle-left', + expandNavBarClass: 'icon-double-angle-right', + }); + } + }); + + setTimeout(() => { + // remove observer if selected element is not found in 10secs. + observer.disconnect(); + }, 10000) + + observer.observe(document.body, { + childList: true, + subtree: true }); } }.observes('App.router.mainController.isClusterDataLoaded') diff --git a/ambari-web/app/views/main/admin/federation/routerBasedFederation/step1_view.js b/ambari-web/app/views/main/admin/federation/routerBasedFederation/step1_view.js new file mode 100644 index 00000000000..cb4a2086a97 --- /dev/null +++ b/ambari-web/app/views/main/admin/federation/routerBasedFederation/step1_view.js @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var App = require('app'); + +App.RouterFederationWizardStep1View = Em.View.extend({ + + templateName: require('templates/main/admin/federation/routerBasedFederation/step1'), + + didInsertElement: function() { + } + +}); \ No newline at end of file diff --git a/ambari-web/app/views/main/admin/federation/routerBasedFederation/step2_view.js b/ambari-web/app/views/main/admin/federation/routerBasedFederation/step2_view.js new file mode 100644 index 00000000000..07c4f3bab00 --- /dev/null +++ b/ambari-web/app/views/main/admin/federation/routerBasedFederation/step2_view.js @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +var App = require('app'); + +App.RouterFederationWizardStep2View = App.AssignMasterComponentsView.extend({ + title: Em.I18n.t('admin.routerFederation.wizard.step2.header'), + alertMessage: Em.computed.i18nFormat('admin.routerFederation.wizard.step2.body', 'controller.content.nameServiceId') +}); \ No newline at end of file diff --git a/ambari-web/app/views/main/admin/federation/routerBasedFederation/step3_view.js b/ambari-web/app/views/main/admin/federation/routerBasedFederation/step3_view.js new file mode 100644 index 00000000000..d79b9f4e0b8 --- /dev/null +++ b/ambari-web/app/views/main/admin/federation/routerBasedFederation/step3_view.js @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var App = require('app'); + +App.RouterFederationWizardStep3View = Em.View.extend({ + + templateName: require('templates/main/admin/federation/routerBasedFederation/step3'), + + didInsertElement: function () { + this.get('controller').loadStep(); + }, + addRouters: function () { + return this.get('controller.content.masterComponentHosts').filterProperty('component', 'ROUTER').filterProperty('isInstalled', false).mapProperty('hostName'); + }.property('controller.content.masterComponentHosts'), +}); \ No newline at end of file diff --git a/ambari-web/app/views/main/admin/federation/routerBasedFederation/step4_view.js b/ambari-web/app/views/main/admin/federation/routerBasedFederation/step4_view.js new file mode 100644 index 00000000000..b4ef26551f6 --- /dev/null +++ b/ambari-web/app/views/main/admin/federation/routerBasedFederation/step4_view.js @@ -0,0 +1,35 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +var App = require('app'); + +App.RouterFederationWizardStep4View = App.HighAvailabilityProgressPageView.extend({ + + templateName: require('templates/main/admin/federation/routerBasedFederation/step4'), + + headerTitle: Em.I18n.t('admin.routerFederation.wizard.step4.header'), + + noticeInProgress: Em.I18n.t('admin.routerFederation.wizard.step4.notice.inProgress'), + + noticeCompleted: Em.I18n.t('admin.routerFederation.wizard.step4.notice.completed'), + + submitButtonText: Em.I18n.t('common.complete'), + + labelWidth: 'col-md-5' + +}); \ No newline at end of file diff --git a/ambari-web/app/views/main/admin/federation/routerBasedFederation/wizard_view.js b/ambari-web/app/views/main/admin/federation/routerBasedFederation/wizard_view.js new file mode 100644 index 00000000000..6c15e364aa7 --- /dev/null +++ b/ambari-web/app/views/main/admin/federation/routerBasedFederation/wizard_view.js @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +var App = require('app'); + +App.RouterFederationWizardView = Em.View.extend(App.WizardMenuMixin, App.WizardHostsLoading, { + + templateName: require('templates/main/admin/federation/routerBasedFederation/wizard'), + + didInsertElement: function() { + var currentStep = this.get('controller.currentStep'); + if (currentStep > 3) { + this.get('controller').setLowerStepsDisable(currentStep); + } + } +}); \ No newline at end of file diff --git a/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js b/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js index 386ec4db937..b7c048a4201 100644 --- a/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js +++ b/ambari-web/app/views/main/admin/stack_upgrade/versions_view.js @@ -230,7 +230,7 @@ App.MainAdminStackVersionsView = Em.View.extend({ }), sortedMappedVersions = mappedVersions.sort(), latestVersion = sortedMappedVersions[sortedMappedVersions.length-1].replace(/[^\d.-]/g, ''); - window.location.replace(App.appURLRoot + 'views/ADMIN_VIEW/' + latestVersion + '/INSTANCE/#!/stackVersions'); + App.replaceWindowLocation(App.appURLRoot + 'views/ADMIN_VIEW/' + latestVersion + '/INSTANCE/#/') } }); }, diff --git a/ambari-web/app/views/main/service/info/metrics_view.js b/ambari-web/app/views/main/service/info/metrics_view.js index e61a62883d6..b4c3cbac0a8 100644 --- a/ambari-web/app/views/main/service/info/metrics_view.js +++ b/ambari-web/app/views/main/service/info/metrics_view.js @@ -280,27 +280,40 @@ App.MainServiceInfoMetricsView = Em.View.extend(App.Persist, App.TimeRangeMixin, makeSortable: function (selector, isNSLayout) { var self = this; var controller = this.get('controller'); - $('html').on('DOMNodeInserted', selector, function () { - $(this).sortable({ - items: "> div", - cursor: "move", - tolerance: "pointer", - scroll: false, - update: function () { - var layout = isNSLayout ? controller.get('selectedNSWidgetLayout') : controller.get('activeWidgetLayout'); - var widgets = misc.sortByOrder($(selector + " .widget").map(function () { - return this.id; - }), layout.get('widgets').toArray()); - controller.saveWidgetLayout(widgets, layout); - }, - activate: function () { - self.set('isMoving', true); - }, - deactivate: function () { - self.set('isMoving', false); - } - }).disableSelection(); - $('html').off('DOMNodeInserted', selector); + const observer = new MutationObserver(mutations => { + var targetNode + if (mutations.some((mutation) => mutation.type === 'childList' && (targetNode = $(selector)).length)) { + observer.disconnect(); + $(targetNode).sortable({ + items: "> div", + cursor: "move", + tolerance: "pointer", + scroll: false, + update: function () { + var layout = isNSLayout ? controller.get('selectedNSWidgetLayout') : controller.get('activeWidgetLayout'); + var widgets = misc.sortByOrder($(selector + " .widget").map(function () { + return this.id; + }), layout.get('widgets').toArray()); + controller.saveWidgetLayout(widgets, layout); + }, + activate: function () { + self.set('isMoving', true); + }, + deactivate: function () { + self.set('isMoving', false); + } + }).disableSelection(); + } + }); + + setTimeout(() => { + // remove observer if selected element is not found in 10secs. + observer.disconnect(); + }, 10000) + + observer.observe(document.body, { + childList: true, + subtree: true }); } }); diff --git a/ambari-web/app/views/main/service/item.js b/ambari-web/app/views/main/service/item.js index bb61663e0e3..b887e1eae14 100644 --- a/ambari-web/app/views/main/service/item.js +++ b/ambari-web/app/views/main/service/item.js @@ -241,6 +241,13 @@ App.MainServiceItemView = Em.View.extend(App.HiveInteractiveCheck, { break; } } + if (service.get('serviceTypes').contains('DFSRouter') && App.isAuthorized('SERVICE.ENABLE_HA')) { + switch (service.get('serviceName')) { + case 'HDFS': + options.push(actionMap.TOGGLE_RBF_FEDERATION); + break; + } + } if (serviceCheckSupported) { options.push(actionMap.RUN_SMOKE_TEST); } diff --git a/ambari-web/karma.conf.js b/ambari-web/karma.conf.js index b19287f779e..b9e9212407b 100644 --- a/ambari-web/karma.conf.js +++ b/ambari-web/karma.conf.js @@ -23,7 +23,7 @@ module.exports = function(config) { basePath: '', plugins: [ - 'karma-phantomjs-launcher', + 'karma-chrome-launcher', 'karma-mocha', 'karma-chai', 'karma-sinon', @@ -159,6 +159,13 @@ module.exports = function(config) { // enable / disable watching file and executing tests whenever any file changes autoWatch: true, + + customLaunchers: { + ChromeHeadlessCustom: { + base: 'ChromeHeadless', + flags: ['--no-sandbox', '--disable-gpu', '--disable-translate', '--disable-extensions'] + } + }, // Start these browsers, currently available: // - Chrome @@ -167,7 +174,7 @@ module.exports = function(config) { // - Opera (has to be installed with `npm install karma-opera-launcher`) // - Safari (only Mac; has to be installed with `npm install karma-safari-launcher`) // - IE (only Windows; has to be installed with `npm install karma-ie-launcher`) - browsers: ['PhantomJS'], + browsers: ['ChromeHeadlessCustom'], // If browser does not capture in given timeout [ms], kill it captureTimeout: 60000, diff --git a/ambari-web/package.json b/ambari-web/package.json index 7fcc59dc820..b4203d97c80 100644 --- a/ambari-web/package.json +++ b/ambari-web/package.json @@ -27,7 +27,7 @@ "chai": "~3.5.0", "express": "2.5.8", "karma": ">=0.11.14", - "karma-phantomjs-launcher": "1.0.2", + "karma-chrome-launcher": "3.2.0", "karma-babel-preprocessor": "^6.0.1", "karma-chai": "~0.1.0", "karma-commonjs-require": "~0.0.3", @@ -36,7 +36,6 @@ "karma-mocha": "0.1.1", "karma-sinon": "~1.0.2", "mocha": "2.5.3", - "phantomjs": "~2.1.0", "sinon": "=1.7.3", "sinon-chai": "~2.8.0" }, diff --git a/ambari-web/test/controllers/main/service/manage_config_groups_controller_test.js b/ambari-web/test/controllers/main/service/manage_config_groups_controller_test.js index db3151ebec7..2995669d965 100644 --- a/ambari-web/test/controllers/main/service/manage_config_groups_controller_test.js +++ b/ambari-web/test/controllers/main/service/manage_config_groups_controller_test.js @@ -511,6 +511,7 @@ describe('App.ManageConfigGroupsController', function() { c._onLoadPropertiesSuccess(data, null, params); expect(JSON.stringify(c.get('configGroups'))).to.equal(JSON.stringify([ Em.Object.create({ + name: 'group1', properties: [ { name: 'prop1', @@ -522,32 +523,31 @@ describe('App.ManageConfigGroupsController', function() { value: 'val2', type: 'type1' } - ], - name: 'group1' + ] }), Em.Object.create({ + name: 'group2', properties: [ { name: 'prop3', value: 'val3', type: 'type1' } - ], - name: 'group2' + ] }), Em.Object.create({ + name: 'group3', properties: [ { name: 'prop4', value: 'val4', type: 'type2' } - ], - name: 'group3' + ] }), Em.Object.create({ - properties: [], - name: 'group4' + name: 'group4', + properties: [] }) ])); }); diff --git a/ambari-web/test/models/service_test.js b/ambari-web/test/models/service_test.js index c600af67817..fdcd0eb08b6 100644 --- a/ambari-web/test/models/service_test.js +++ b/ambari-web/test/models/service_test.js @@ -137,7 +137,7 @@ describe('App.Service', function () { }, { serviceName: 'HDFS', - result: ['HA_MODE', 'FEDERATION'] + result: ['HA_MODE', 'FEDERATION', 'DFSRouter'] }, { serviceName: 'YARN', diff --git a/ambari-web/test/router_test.js b/ambari-web/test/router_test.js index c177bbf7490..88ee910492f 100644 --- a/ambari-web/test/router_test.js +++ b/ambari-web/test/router_test.js @@ -88,10 +88,10 @@ describe('App.Router', function () { describe('#adminViewInfoSuccessCallback', function () { beforeEach(function () { - sinon.stub(window.location, 'replace', Em.K); + sinon.stub(App, 'replaceWindowLocation', Em.K); }); afterEach(function () { - window.location.replace.restore(); + App.replaceWindowLocation.restore(); }); var tests = [{ @@ -138,7 +138,7 @@ describe('App.Router', function () { tests.forEach(function (data, index) { it('should redirect to the latest version of admin view ("' + data.expected + '") #' + (index + 1), function () { router.adminViewInfoSuccessCallback(data.mockData); - expect(window.location.replace.calledWith(data.expected)).to.be.true; + expect(App.replaceWindowLocation.calledWith(data.expected)).to.be.true; }); }); }); diff --git a/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js b/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js index b274c32fb3d..11058e779e0 100644 --- a/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js +++ b/ambari-web/test/views/main/admin/stack_upgrade/version_view_test.js @@ -472,11 +472,11 @@ describe('App.mainAdminStackVersionsView', function () { }; before(function () { sinon.spy(App, 'showConfirmationPopup'); - sinon.stub(window.location, 'replace', Em.K); + sinon.stub(App, 'replaceWindowLocation', Em.K); }); after(function () { App.showConfirmationPopup.restore(); - window.location.replace.restore(); + App.replaceWindowLocation.restore(); }); beforeEach(function () { @@ -494,7 +494,7 @@ describe('App.mainAdminStackVersionsView', function () { popup.onPrimary(); var args = testHelpers.findAjaxRequest('name', 'ambari.service.load_server_version'); expect(args[0]).exists; - expect(window.location.replace.calledWith('/views/ADMIN_VIEW/2.1.0/INSTANCE/#!/stackVersions')).to.be.true; + expect(App.replaceWindowLocation.calledWith('/views/ADMIN_VIEW/2.1.0/INSTANCE/#/')).to.be.true; }); }); @@ -516,11 +516,11 @@ describe('App.mainAdminStackVersionsView', function () { }; before(function () { sinon.spy(App, 'showConfirmationPopup'); - sinon.stub(window.location, 'replace', Em.K); + sinon.stub(App, 'replaceWindowLocation', Em.K); }); after(function () { App.showConfirmationPopup.restore(); - window.location.replace.restore(); + App.replaceWindowLocation.restore(); }); beforeEach(function () { @@ -538,7 +538,7 @@ describe('App.mainAdminStackVersionsView', function () { popup.onPrimary(); var args = testHelpers.findAjaxRequest('name', 'ambari.service.load_server_version'); expect(args[0]).exists; - expect(window.location.replace.calledWith('/views/ADMIN_VIEW/2.1.0/INSTANCE/#!/stackVersions')).to.be.true; + expect(App.replaceWindowLocation.calledWith('/views/ADMIN_VIEW/2.1.0/INSTANCE/#/')).to.be.true; }); }); diff --git a/ambari-web/test/views/main/service/info/metrics_view_test.js b/ambari-web/test/views/main/service/info/metrics_view_test.js index 99442f17f8c..558091182ec 100644 --- a/ambari-web/test/views/main/service/info/metrics_view_test.js +++ b/ambari-web/test/views/main/service/info/metrics_view_test.js @@ -214,19 +214,19 @@ describe('App.MainServiceInfoMetricsView', function() { mock.sortable.restore(); }); - it("on() should be called", function() { + it("MutationObserver callback should be called", function(done) { view.makeSortable('#widget_layout'); - expect(mock.on.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true; - }); - - it("sortable() should be called", function() { - view.makeSortable('#widget_layout'); - expect(mock.sortable.called).to.be.true; - }); - - it("off() should be called", function() { - view.makeSortable('#widget_layout'); - expect(mock.off.calledWith('DOMNodeInserted', '#widget_layout')).to.be.true; + const callback = function () { + expect(document.querySelector('#widget_layout')).to.not.be.null; + observer.disconnect(); + done(); + }; + const observer = new MutationObserver(callback); + const body = document.body; + observer.observe(body, { childList: true, subtree: true }); + const elementWidget = document.createElement('div'); + elementWidget.id='widget_layout'; + body.appendChild(elementWidget); }); }); @@ -333,4 +333,4 @@ describe('App.MainServiceInfoMetricsView', function() { }); }); -}); \ No newline at end of file +});