Skip to content
This repository has been archived by the owner on Mar 27, 2022. It is now read-only.

Clean libexec #111

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions libexec/bdutil_helpers.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,19 +22,19 @@ function bdutil_date() {
# Simple wrapper around "echo" so that it's easy to add log messages with a
# date/time prefix.
function loginfo() {
echo "$(bdutil_date): ${@}"
echo "$(bdutil_date): ${*}"
}

# Simple wrapper around "echo" controllable with ${VERBOSE_MODE}.
function logdebug() {
if (( ${VERBOSE_MODE} )); then
loginfo ${@}
loginfo "${@}"
fi
}

# Simple wrapper to pass errors to stderr.
function logerror() {
loginfo ${@} >&2
loginfo "${@}" >&2
}

# Helper to overwrite the contents of file specified by $1 with strings
Expand All @@ -43,7 +43,7 @@ function logerror() {
# of stdin/stdout redirection characters unclear.
# Example: overwrite_file_with_strings foo.txt hello world
function overwrite_file_with_strings() {
local contents=${@:2}
local contents=${*:2}
local filename=$1
echo "Overwriting ${filename} with contents '${contents}'"
echo "${contents}" > ${filename}
Expand All @@ -54,7 +54,7 @@ function overwrite_file_with_strings() {
# code.
# Args: "$@" is the command to run.
function run_with_retries() {
local cmd="$@"
local cmd="$*"
echo "About to run '${cmd}' with retries..."

local update_succeeded=0
Expand Down Expand Up @@ -240,7 +240,7 @@ function tcp_port_is_free() {

# True if all specified IPv4 tcp ports are available.
function all_tcp_ports_are_free() {
ports="$@"
ports="$*"
for p in $ports; do
if ! tcp_port_is_free $p; then
return 1
Expand All @@ -251,14 +251,14 @@ function all_tcp_ports_are_free() {

# Prints the list of ports in use.
function tcp_ports_in_use() {
ports="$@"
ports="$*"
local in_use=()
for p in $ports; do
if ! tcp_port_is_free $p; then
in_use+=($p)
fi
done
echo ${in_use[@]}
echo "${in_use[@]}"
}

# Wait until all of the specified IPv4 tcp ports are unused.
Expand All @@ -275,7 +275,7 @@ function wait_until_ports_free() {
max_wait_seconds=$2
shift 2
fi
ports="$@"
ports="$*"
while (( now < start_time + max_wait_seconds )); do
if all_tcp_ports_are_free $ports; then
return 0
Expand All @@ -299,12 +299,12 @@ function wait_until_ports_free_and_report() {
echo "Waiting for ports that are not yet available: ${in_use}."
local port_wait_start_time=$(date '+%s')
local port_wait_status=0
wait_until_ports_free ${PORTS[@]} || port_wait_status=$?
wait_until_ports_free "${PORTS[@]}" || port_wait_status=$?
local port_wait_end_time=$(date '+%s')
local port_wait_time=$(( port_wait_end_time - port_wait_start_time ))
if [[ ${port_wait_time} -gt 0 ]]; then
echo "Wait time in seconds for ports ${PORTS[@]} was ${port_wait_time}."
local still_in_use=$(tcp_ports_in_use ${PORTS[@]})
echo "Wait time in seconds for ports ${PORTS[*]} was ${port_wait_time}."
local still_in_use=$(tcp_ports_in_use "${PORTS[@]}")
if [[ "${still_in_use}" != "" ]]; then
echo "Ports still in use: ${still_in_use}."
fi
Expand Down
6 changes: 3 additions & 3 deletions libexec/configure_hadoop.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ if ! declare -p WORKERS | grep -q '^declare \-a'; then
WORKERS=(${WORKERS})
fi

echo ${WORKERS[@]} | tr ' ' '\n' > ${HADOOP_CONF_DIR}/slaves
echo "${WORKERS[@]}" | tr ' ' '\n' > ${HADOOP_CONF_DIR}/slaves
echo ${MASTER_HOSTNAME} > ${HADOOP_CONF_DIR}/masters

# Basic configuration not related to GHFS or HDFS.
Expand Down Expand Up @@ -96,8 +96,8 @@ if [[ ${#MOUNTED_DISKS[@]} -eq 0 ]]; then
MOUNTED_DISKS=('')
fi

MAPRED_LOCAL_DIRS="${MOUNTED_DISKS[@]/%//hadoop/mapred/local}"
NODEMANAGER_LOCAL_DIRS="${MOUNTED_DISKS[@]/%//hadoop/yarn/nm-local-dir}"
MAPRED_LOCAL_DIRS="${MOUNTED_DISKS[*]/%//hadoop/mapred/local}"
NODEMANAGER_LOCAL_DIRS="${MOUNTED_DISKS[*]/%//hadoop/yarn/nm-local-dir}"
mkdir -p ${MAPRED_LOCAL_DIRS} ${NODEMANAGER_LOCAL_DIRS}

chgrp hadoop -L -R \
Expand Down
2 changes: 1 addition & 1 deletion libexec/configure_hdfs.sh
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ if (( ${ENABLE_HDFS} )); then
# Location of HDFS data blocks on datanodes; for each mounted disk, add the
# path /mnt/diskname/hadoop/dfs/data as a data directory, or if no mounted
# disks exist, just go with the absolute path /hadoop/dfs/data.
HDFS_DATA_DIRS="${MOUNTED_DISKS[@]/%//hadoop/dfs/data}"
HDFS_DATA_DIRS="${MOUNTED_DISKS[*]/%//hadoop/dfs/data}"

# Do not create HDFS_NAME_DIR, or Hadoop will think it is already formatted
mkdir -p /hadoop/dfs ${HDFS_DATA_DIRS}
Expand Down
4 changes: 2 additions & 2 deletions libexec/hadoop_helpers.sh
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ function start_with_retry_namenode() {
${start_dfs_file}
fi
done
if !(wait_for_namenode); then
if ! (wait_for_namenode); then
echo "Namenode not running after ${max_attempts} attempts at start-dfs.sh" \
>&2
return ${errcode}
Expand Down Expand Up @@ -174,7 +174,7 @@ function get_hdfs_superuser() {

# Create and configure Hadoop2 specific HDFS directories.
function initialize_hdfs_dirs() {
local extra_users="$@"
local extra_users="$*"
local hdfs_superuser=$(get_hdfs_superuser)
local dfs_cmd="sudo -i -u ${hdfs_superuser} hadoop fs"
loginfo "Setting up HDFS /tmp directories."
Expand Down
2 changes: 1 addition & 1 deletion libexec/mount_disks.sh
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ for DISK_PATH in ${DISK_PATHS}; do
MOUNT_ENTRY=($(grep -w ${DATAMOUNT} /proc/mounts))
# Taken from /usr/share/google/safe_format_and_mount
MOUNT_OPTIONS='defaults,discard'
echo "UUID=${DISK_UUID} ${MOUNT_ENTRY[@]:1:2} ${MOUNT_OPTIONS} 0 2 \
echo "UUID=${DISK_UUID} ${MOUNT_ENTRY[*]:1:2} ${MOUNT_OPTIONS} 0 2 \
# added by bdutil" >> /etc/fstab
fi
done
Expand Down
2 changes: 1 addition & 1 deletion libexec/start_hadoop.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ HADOOP_PORTS=(50010 50020 50030 50060 50070 50075 50090)
cd ${HADOOP_INSTALL_DIR}

# Test for sshability to workers.
for NODE in ${WORKERS[@]}; do
for NODE in "${WORKERS[@]}"; do
sudo -u hadoop ssh ${NODE} "exit 0"
done

Expand Down
2 changes: 1 addition & 1 deletion libexec/start_hadoop2.sh
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ HADOOP_PORTS=(8088 50010 50020 50070 50090)
cd ${HADOOP_INSTALL_DIR}

# Test for sshability to workers.
for NODE in ${WORKERS[@]}; do
for NODE in "${WORKERS[@]}"; do
sudo -u hadoop ssh ${NODE} "exit 0"
done

Expand Down