|
@@ -28,8 +28,8 @@ source @INSTALL_DIR@/sbin/hpcc_setenv
|
|
|
source @INSTALL_DIR@/etc/init.d/hpcc_common
|
|
|
source ./spark-hpcc-env.sh
|
|
|
|
|
|
-# update slaves file in case state of environment has been altered since last run
|
|
|
-errorMessage=$( @EXEC_PATH@/daliadmin server=$DALISERVER clusternodes ${NODEGROUP} @RUNTIME_PATH@/${_component}/slaves 2>&1 )
|
|
|
+# update workers file in case state of environment has been altered since last run
|
|
|
+errorMessage=$( @EXEC_PATH@/daliadmin server=$DALISERVER clusternodes ${NODEGROUP} @RUNTIME_PATH@/${_component}/workers 2>&1 )
|
|
|
errcode=$?
|
|
|
if [[ 0 != ${errcode} ]]; then
|
|
|
log "failed to lookup dali group for ${component}"
|
|
@@ -37,13 +37,14 @@ if [[ 0 != ${errcode} ]]; then
|
|
|
exit 1
|
|
|
fi
|
|
|
|
|
|
-log "${_command} master"
|
|
|
+log "${_command} manager"
|
|
|
+#externally provided spark-hadoop file
|
|
|
@INSTALL_DIR@/externals/spark-hadoop/sbin/${_command}-master.sh
|
|
|
|
|
|
-if [[ -f "@RUNTIME_PATH@/${_component}/slaves" ]]; then
|
|
|
- log "Starting sparkthor-workers with ./frunssh @RUNTIME_PATH@/${_component}/slaves"
|
|
|
- clusternodes=$(cat @RUNTIME_PATH@/${_component}/slaves | wc -l)
|
|
|
- @EXEC_PATH@/frunssh @RUNTIME_PATH@/${_component}/slaves "/bin/sh -c '@ADMIN_PATH@/sparkthor-worker.sh ${_command} ${_component}'" -i:$SSHidentityfile -u:$SSHusername -pe:$SSHpassword -t:$SSHtimeout -a:$SSHretries -n:$clusternodes 2>&1
|
|
|
+if [[ -f "@RUNTIME_PATH@/${_component}/workers" ]]; then
|
|
|
+ log "Starting sparkthor-workers with ./frunssh @RUNTIME_PATH@/${_component}/workers"
|
|
|
+ clusternodes=$(cat @RUNTIME_PATH@/${_component}/workers | wc -l)
|
|
|
+ @EXEC_PATH@/frunssh @RUNTIME_PATH@/${_component}/workers "/bin/sh -c '@ADMIN_PATH@/sparkthor-worker.sh ${_command} ${_component}'" -i:$SSHidentityfile -u:$SSHusername -pe:$SSHpassword -t:$SSHtimeout -a:$SSHretries -n:$clusternodes 2>&1
|
|
|
FRUNSSH_RC=$?
|
|
|
if [[ ${FRUNSSH_RC} -gt 0 ]]; then
|
|
|
log "Error ${FRUNSSH_RC} in frunssh"
|