Преглед изворни кода

HPCC-24692 Mask reference to spark controlled offensive terms

Signed-off-by: Michael Gardner <michael.gardner@lexisnexisrisk.com>
Michael Gardner пре 4 година
родитељ
комит
80f54b8799

+ 7 - 7
plugins/spark/init_sparkthor.in

@@ -53,18 +53,18 @@ trap "terminate" SIGINT SIGTERM
 
 while [[ 1 ]]; do
     if [[ -e @PID_PATH@/${component}.pid ]]; then
-        _master_pid="$(cat @PID_PATH@/${component}.pid)"
-        if ! kill -0 ${_master_pid}; then
-            log "spark master not running, attempting restart of ${component}"
+        _manager_pid="$(cat @PID_PATH@/${component}.pid)"
+        if ! kill -0 ${_manager_pid}; then
+            log "spark manager not running, attempting restart of ${component}"
             @ADMIN_PATH@/sparkthor.sh start ${component}
             ln -s -T @PID_PATH@/spark-hpcc-org.apache.spark.deploy.master.Master-1.pid @PID_PATH@/${component}.pid
         fi
     else
-        log "Starting spark master ${component}"
+        log "Starting spark manager ${component}"
         @ADMIN_PATH@/sparkthor.sh start ${component}
         ln -s -T @PID_PATH@/spark-hpcc-org.apache.spark.deploy.master.Master-1.pid @PID_PATH@/${component}.pid
     fi
-    _master_pid="$(cat @PID_PATH@/${component}.pid)"
-    log "waiting on spark master process :: ${_master_pid}"
-    wait_external $_master_pid
+    _manager_pid="$(cat @PID_PATH@/${component}.pid)"
+    log "waiting on spark manager process :: ${_manager_pid}"
+    wait_external $_manager_pid
 done

+ 2 - 1
plugins/spark/sparkthor-worker.sh.in

@@ -43,11 +43,12 @@ while [[ $rsync_stat -ne 0 && $rsync_att -gt 0 ]] ; do
     log "rsync returns ${rsync_stat}"
 done
 if [ ! -f @RUNTIME_PATH@/${_component}/spark-hpcc-env.sh ] ; then
-    log "Error, $slavesfname file missing"
+    log "Error, $workersfname file missing"
     exit 1
 fi
 source @RUNTIME_PATH@/${_component}/spark-hpcc-env.sh
 
 MASTER_URL="spark://${MASTER_IP}:${SPARK_MASTER_PORT}"
 
+#external provided spark-hadoop file
 @INSTALL_DIR@/externals/spark-hadoop/sbin/${_command}-slave.sh ${MASTER_URL}

+ 8 - 7
plugins/spark/sparkthor.sh.in

@@ -28,8 +28,8 @@ source @INSTALL_DIR@/sbin/hpcc_setenv
 source @INSTALL_DIR@/etc/init.d/hpcc_common
 source ./spark-hpcc-env.sh
 
-# update slaves file in case state of environment has been altered since last run
-errorMessage=$( @EXEC_PATH@/daliadmin server=$DALISERVER clusternodes ${NODEGROUP} @RUNTIME_PATH@/${_component}/slaves 2>&1 )
+# update workers file in case state of environment has been altered since last run
+errorMessage=$( @EXEC_PATH@/daliadmin server=$DALISERVER clusternodes ${NODEGROUP} @RUNTIME_PATH@/${_component}/workers 2>&1 )
 errcode=$?
 if [[ 0 != ${errcode} ]]; then
     log "failed to lookup dali group for ${component}"
@@ -37,13 +37,14 @@ if [[ 0 != ${errcode} ]]; then
     exit 1
 fi
 
-log "${_command} master"
+log "${_command} manager"
+#externally provided spark-hadoop file
 @INSTALL_DIR@/externals/spark-hadoop/sbin/${_command}-master.sh
 
-if [[ -f "@RUNTIME_PATH@/${_component}/slaves" ]]; then
-    log "Starting sparkthor-workers with ./frunssh @RUNTIME_PATH@/${_component}/slaves"
-    clusternodes=$(cat @RUNTIME_PATH@/${_component}/slaves | wc -l)
-    @EXEC_PATH@/frunssh @RUNTIME_PATH@/${_component}/slaves "/bin/sh -c '@ADMIN_PATH@/sparkthor-worker.sh ${_command} ${_component}'" -i:$SSHidentityfile -u:$SSHusername -pe:$SSHpassword -t:$SSHtimeout -a:$SSHretries -n:$clusternodes 2>&1
+if [[ -f "@RUNTIME_PATH@/${_component}/workers" ]]; then
+    log "Starting sparkthor-workers with ./frunssh @RUNTIME_PATH@/${_component}/workers"
+    clusternodes=$(cat @RUNTIME_PATH@/${_component}/workers | wc -l)
+    @EXEC_PATH@/frunssh @RUNTIME_PATH@/${_component}/workers "/bin/sh -c '@ADMIN_PATH@/sparkthor-worker.sh ${_command} ${_component}'" -i:$SSHidentityfile -u:$SSHusername -pe:$SSHpassword -t:$SSHtimeout -a:$SSHretries -n:$clusternodes 2>&1
     FRUNSSH_RC=$?
     if [[ ${FRUNSSH_RC} -gt 0 ]]; then
         log "Error ${FRUNSSH_RC} in frunssh"