Selaa lähdekoodia

Merge remote-tracking branch 'origin/candidate-3.10.x'

Conflicts:
	thorlcr/activities/wuidwrite/thwuidwrite.cpp

Signed-off-by: Richard Chapman <rchapman@hpccsystems.com>
Richard Chapman 12 vuotta sitten
vanhempi
commit
e0e5a71c42

+ 1 - 0
common/thorhelper/thorcommon.hpp

@@ -25,6 +25,7 @@
 #include "thorhelper.hpp"
 #include "thorxmlwrite.hpp"
 
+#define DALI_RESULT_OUTPUTMAX 2000 // MB
 class THORHELPER_API CSizingSerializer : implements IRowSerializerTarget
 {
     size32_t totalsize;

+ 9 - 9
docs/ECLStandardLibraryReference/SLR-Mods/CreateSuperFile.xml

@@ -11,8 +11,8 @@
     </indexterm><indexterm>
       <primary>CreateSuperFile</primary>
     </indexterm>(</emphasis> <emphasis> superfile </emphasis> <emphasis
-  role="bold">[</emphasis> <emphasis>, sequentialflag </emphasis> <emphasis
-  role="bold">] [</emphasis> <emphasis>, ifdoesnotexist </emphasis> <emphasis
+  role="bold">[</emphasis> <emphasis>, sequentialparts </emphasis> <emphasis
+  role="bold">] [</emphasis> <emphasis>, allow_exist </emphasis> <emphasis
   role="bold">]</emphasis> <emphasis> </emphasis> <emphasis
   role="bold">)</emphasis></para>
 
@@ -31,7 +31,7 @@
         </row>
 
         <row>
-          <entry><emphasis>sequentialflag</emphasis></entry>
+          <entry><emphasis>sequentialparts</emphasis></entry>
 
           <entry>Optional. A boolean value indicating whether to the sub-files
           must be sequentially ordered. If omitted, the default is
@@ -39,7 +39,7 @@
         </row>
 
         <row>
-          <entry><emphasis>ifdoesnotexist</emphasis></entry>
+          <entry><emphasis>allow_exist</emphasis></entry>
 
           <entry>Optional. A boolean value indicating whether to post an error
           if the <emphasis>superfile</emphasis> already exists. If TRUE, no
@@ -59,10 +59,10 @@
   an empty <emphasis>superfile</emphasis>. This function is not included in a
   superfile transaction.</para>
 
-  <para>The <emphasis>sequentialflag</emphasis> parameter set to TRUE governs
+  <para>The <emphasis>sequentialparts</emphasis> parameter set to TRUE governs
   the unusual case where the logical numbering of sub-files must be sequential
   (for example, where all sub-files are already globally sorted). With
-  <emphasis>sequentialflag</emphasis> FALSE (the default) the subfile parts
+  <emphasis>sequentialparts</emphasis> FALSE (the default) the subfile parts
   are interleaved so the parts are found locally.</para>
 
   <para>For example, if on a 4-way cluster there are 3 files (A, B, and C)
@@ -76,20 +76,20 @@
 
   <para>A._4_of_4, B._4_of_4, and C_4_of_4 are on node 4</para>
 
-  <para>Reading the superfile created with <emphasis>sequentialflag</emphasis>
+  <para>Reading the superfile created with <emphasis>sequentialparts</emphasis>
   FALSE on Thor will read the parts in the order:</para>
 
   <para>[A1,B1,C1,] [A2,B2,C2,] [A3,B3,C3,] [A4,B4,C4]</para>
 
   <para>so the reads will all be local (i.e. A1,B1,C1 on node 1 etc). Setting
-  <emphasis>sequentialflag</emphasis> to TRUE will read the parts in subfile
+  <emphasis>sequentialparts</emphasis> to TRUE will read the parts in subfile
   order, like this:</para>
 
   <para>[A1,A2,A3,] [A4,B1,B2] [,B3,B4,C1,] [C2,C3,C4]</para>
 
   <para>so that the global order of A,B,C,D is maintained. However, the parts
   cannot all be read locally (e.g. A2 and A3 will be read on part 1). Because
-  of this it is much less efficient to set <emphasis>sequentialflag</emphasis>
+  of this it is much less efficient to set <emphasis>sequentialparts</emphasis>
   true, and as it is unusual anyway to have files that are partitioned in
   order, it becomes a very unusual option to set.</para>
 

+ 9 - 0
docs/ECLStandardLibraryReference/SLR-Mods/SprayVariable.xml

@@ -29,6 +29,7 @@
   <emphasis>,allowoverwrite</emphasis> <emphasis role="bold">] [</emphasis>
   <emphasis>,replicate</emphasis> <emphasis role="bold">] [</emphasis>
   <emphasis>, compress </emphasis> <emphasis role="bold">])</emphasis>
+  <emphasis>, sourceCsvEscape </emphasis> <emphasis role="bold">])</emphasis>
   <emphasis></emphasis></para>
 
   <para><emphasis>dfuwuid</emphasis> <emphasis role="bold"> :=
@@ -56,6 +57,7 @@
   <emphasis>,allowoverwrite</emphasis> <emphasis role="bold">] [</emphasis>
   <emphasis>,replicate</emphasis> <emphasis role="bold">] [</emphasis>
   <emphasis>, compress </emphasis> <emphasis role="bold">]);</emphasis></para>
+  <emphasis>, sourceCsvEscape </emphasis> <emphasis role="bold">]);</emphasis></para>
 
   <informaltable colsep="0" frame="none" rowsep="0">
     <tgroup cols="2">
@@ -169,6 +171,13 @@
         </row>
 
         <row>
+          <entry><emphasis>sourceCsvEscape</emphasis></entry>
+
+          <entry>Optional. A null-terminated string containing the CSV escape
+          characters. If omitted, the default is none.</entry>
+        </row>
+
+        <row>
           <entry><emphasis>dfuwuid</emphasis></entry>
 
           <entry>The attribute name to recieve the null-terminated string

+ 106 - 3
docs/HPCCClientTools/CT_Mods/CT_ECL_CLI.xml

@@ -1572,7 +1572,7 @@ ecl packagemap add roxie mypackagemap.pkg --daliip=192.168.11.11
 
         <para><informaltable colsep="0" frame="none" rowsep="0">
             <tgroup cols="2">
-              <colspec align="left" colwidth="125.55pt" />
+              <colspec align="left" colwidth="175.55pt" />
 
               <colspec colwidth="384.85pt" />
 
@@ -1646,7 +1646,7 @@ ecl packagemap add roxie mypackagemap.pkg --daliip=192.168.11.11
 
         <para><informaltable colsep="0" frame="none" rowsep="0">
             <tgroup cols="2">
-              <colspec align="left" colwidth="125.55pt" />
+              <colspec align="left" colwidth="175.55pt" />
 
               <colspec colwidth="384.85pt" />
 
@@ -1729,7 +1729,7 @@ ecl packagemap add roxie mypackagemap.pkg --daliip=192.168.11.11
 
         <para><informaltable colsep="0" frame="none" rowsep="0">
             <tgroup cols="2">
-              <colspec align="left" colwidth="125.55pt" />
+              <colspec align="left" colwidth="175.55pt" />
 
               <colspec colwidth="384.85pt" />
 
@@ -1968,6 +1968,109 @@ ecl packagemap add roxie mypackagemap.pkg --daliip=192.168.11.11
           </informaltable></para>
       </sect2>
 
+      <sect2 id="eclpackagevalidate" role="brk">
+        <title>ecl packagemap validate</title>
+
+        <para><emphasis role="bold">ecl packagemap validate &lt;filename&gt;
+        </emphasis></para>
+
+        <para>Examples:</para>
+
+        <programlisting>ecl packagemap validate mypackagemap.pkg</programlisting>
+
+        <para>The packagemap validate command verifies that :</para>
+
+        <itemizedlist>
+          <listitem>
+            <para>Referenced superkeys have subfiles defined (warns if no
+            subfiles exist)</para>
+          </listitem>
+
+          <listitem>
+            <para>All referenced queries exist in the current Roxie
+            queryset</para>
+          </listitem>
+
+          <listitem>
+            <para>All Roxie queries are defined in the package </para>
+          </listitem>
+        </itemizedlist>
+
+        <para></para>
+
+        <para><informaltable colsep="0" frame="none" rowsep="0">
+            <tgroup cols="2">
+              <colspec align="left" colwidth="175.55pt" />
+
+              <colspec colwidth="384.85pt" />
+
+              <tbody>
+                <row>
+                  <entry>ecl packagemap validate</entry>
+
+                  <entry>Calls the packagemap validate command. </entry>
+                </row>
+
+                <row>
+                  <entry><emphasis role="bold">Actions</emphasis></entry>
+                </row>
+
+                <row>
+                  <entry>validate</entry>
+
+                  <entry>validates packagemap info</entry>
+                </row>
+
+                <row>
+                  <entry><emphasis role="bold">Arguments</emphasis></entry>
+                </row>
+
+                <row>
+                  <entry>filename</entry>
+
+                  <entry>The filename containing the packagemap info to
+                  validate</entry>
+                </row>
+
+                <row>
+                  <entry><emphasis role="bold">Options</emphasis></entry>
+                </row>
+
+                <row>
+                  <entry>-v, --verbose</entry>
+
+                  <entry>Output additional tracing information</entry>
+                </row>
+
+                <row>
+                  <entry>-s, --server</entry>
+
+                  <entry>The IP Address or hostname of ESP server running
+                  eclwatch services</entry>
+                </row>
+
+                <row>
+                  <entry>--port</entry>
+
+                  <entry>The eclwatch services port (Default is 8010)</entry>
+                </row>
+
+                <row>
+                  <entry>-u, --username</entry>
+
+                  <entry>The username (if necessary)</entry>
+                </row>
+
+                <row>
+                  <entry>-pw, --password</entry>
+
+                  <entry>The password (if necessary)</entry>
+                </row>
+              </tbody>
+            </tgroup>
+          </informaltable></para>
+      </sect2>
+
       <sect2 role="brk">
         <title>ecl roxie attach</title>
 

+ 1 - 1
ecl/eclcmd/eclcmd_core.cpp

@@ -201,7 +201,7 @@ bool doDeploy(EclCmdWithEclTarget &cmd, IClientWsWorkunits *client, const char *
         if (cmd.optVerbose)
             fprintf(stdout, "Deployed\n   wuid: ");
         const char *state = resp->getWorkunit().getState();
-        bool isCompiled = strieq(state, "compiled");
+        bool isCompiled = (strieq(state, "compiled")||strieq(state, "completed"));
         if (displayWuid || cmd.optVerbose || !isCompiled)
             fprintf(stdout, "%s\n", w);
         if (cmd.optVerbose || !isCompiled)

+ 16 - 24
ecl/eclcmd/queries/ecl-queries.cpp

@@ -105,7 +105,7 @@ public:
             const char *arg = iter.query();
             if (*arg!='-')
             {
-                optQuerySet.set(arg);
+                optTargetCluster.set(arg);
                 continue;
             }
             if (iter.matchOption(optTargetCluster, ECLOPT_CLUSTER_DEPRECATED)||iter.matchOption(optTargetCluster, ECLOPT_CLUSTER_DEPRECATED_S))
@@ -230,7 +230,7 @@ public:
     {
         ActiveQueryMap queryMap(qs);
         if (qs.getQuerySetName())
-            fprintf(stdout, "\nQuerySet: %s\n", qs.getQuerySetName());
+            fprintf(stdout, "\nTarget: %s\n", qs.getQuerySetName());
         fputs("\n", stdout);
         fputs("                                   Time   Warn        Memory\n", stdout);
         fputs("Flags Query Id                     Limit  Limit  Pri  Limit      Comment\n", stdout);
@@ -245,7 +245,7 @@ public:
     {
         Owned<IClientWsWorkunits> client = createCmdClient(WsWorkunits, *this);
         Owned<IClientWUMultiQuerySetDetailsRequest> req = client->createWUMultiQuerysetDetailsRequest();
-        req->setQuerySetName(optQuerySet.get());
+        req->setQuerySetName(optTargetCluster.get());
         req->setClusterName(optTargetCluster.get());
         req->setFilterType("All");
 
@@ -264,15 +264,14 @@ public:
     {
         fputs("\nUsage:\n"
             "\n"
-            "The 'queries list' command displays a list of the queries in one or more\n"
-            "querysets. If a cluster is provided the querysets associated with that\n"
-            "cluster will be shown. If no queryset or cluster is specified all querysets\n"
+            "The 'queries list' command displays a list of the queries published to one\n"
+            "or more target clusters. If a target is provided the querysets associated with\n"
+            "that cluster will be shown. If no queryset or cluster is specified all targets\n"
             "are shown.\n"
             "\n"
-            "ecl queries list [<queryset>][--target=<val>][--show=<flags>]\n\n"
+            "ecl queries list [<target>][--show=<flags>]\n\n"
             " Options:\n"
-            "   <queryset>             name of queryset to get list of queries for\n"
-            "   -t, --target=<val>     target cluster to get list of published queries for\n"
+            "   <target>               name of target cluster to get list of queries for\n"
             "   --show=<flags>         show only queries with matching flags\n"
             "   --inactive             show only queries that do not have an active alias\n"
             " Flags:\n"
@@ -286,7 +285,6 @@ public:
     }
 private:
     StringAttr optTargetCluster;
-    StringAttr optQuerySet;
     unsigned flags;
     bool optInactive;
 };
@@ -314,8 +312,8 @@ public:
             {
                 if (optSourceQueryPath.isEmpty())
                     optSourceQueryPath.set(arg);
-                else if (optTargetQuerySet.isEmpty())
-                    optTargetQuerySet.set(arg);
+                else if (optTargetCluster.isEmpty())
+                    optTargetCluster.set(arg);
                 else
                 {
                     fprintf(stderr, "\nunrecognized argument %s\n", arg);
@@ -356,16 +354,11 @@ public:
     {
         if (!EclCmdCommon::finalizeOptions(globals))
             return false;
-        if (optSourceQueryPath.isEmpty() || optTargetQuerySet.isEmpty())
+        if (optSourceQueryPath.isEmpty() && optTargetCluster.isEmpty())
         {
             fputs("source and target must both be specified.\n\n", stderr);
             return false;
         }
-        if (optSourceQueryPath.get()[0]=='/' && optSourceQueryPath.get()[1]=='/' && optTargetCluster.isEmpty())
-        {
-            fputs("cluster must be specified for remote copies.\n\n", stderr);
-            return false;
-        }
         if (optMemoryLimit.length() && !isValidMemoryValue(optMemoryLimit))
         {
             fprintf(stderr, "invalid --memoryLimit value of %s.\n\n", optMemoryLimit.get());
@@ -385,7 +378,7 @@ public:
         Owned<IClientWsWorkunits> client = createCmdClient(WsWorkunits, *this);
         Owned<IClientWUQuerySetCopyQueryRequest> req = client->createWUQuerysetCopyQueryRequest();
         req->setSource(optSourceQueryPath.get());
-        req->setTarget(optTargetQuerySet.get());
+        req->setTarget(optTargetCluster.get());
         req->setCluster(optTargetCluster.get());
         req->setDaliServer(optDaliIP.get());
         req->setActivate(optActivate);
@@ -422,17 +415,16 @@ public:
             "which begins with '//' followed by the IP and Port of the source EclWatch\n"
             "and then followed by the source queryset and query.\n"
             "\n"
-            "ecl queries copy <source_query_path> <target_queryset> [--activate]\n"
+            "ecl queries copy <source_query_path> <target> [--activate]\n"
             "\n"
-            "ecl queries copy //IP:Port/queryset/query <target_queryset> [--activate]\n"
-            "ecl queries copy queryset/query <target_queryset> [--activate]\n"
+            "ecl queries copy //IP:Port/queryset/query <target> [--activate]\n"
+            "ecl queries copy queryset/query <target> [--activate]\n"
             "\n"
             " Options:\n"
             "   <source_query_path>    path of query to copy\n"
             "                          in the form: //ip:port/queryset/query\n"
             "                          or: queryset/query\n"
-            "   <target_queryset>      name of queryset to copy the query into\n"
-            "   -t, --target=<val>     Local target cluster to associate with remote workunit\n"
+            "   <target>               name of target cluster to copy the query to\n"
             "   --no-files             Do not copy files referenced by query\n"
             "   --daliip=<ip>          For file copying if remote version < 3.8\n"
             "   -A, --activate         Activate the new query\n"

+ 34 - 2
ecl/hqlcpp/hqlckey.cpp

@@ -256,6 +256,7 @@ protected:
     IHqlExpression * optimizeTransfer(HqlExprArray & fields, HqlExprArray & values, IHqlExpression * expr, IHqlExpression * leftSelector);
     void optimizeExtractJoinFields();
     void optimizeTransfer(SharedHqlExpr & targetDataset, SharedHqlExpr & targetTransform, SharedHqlExpr & keyedFilter, OwnedHqlExpr * extraFilter);
+    IHqlExpression * querySimplifiedKey(IHqlExpression * expr);
     void splitFilter(IHqlExpression * filter, SharedHqlExpr & keyTarget);
 
 protected:
@@ -312,9 +313,15 @@ KeyedJoinInfo::KeyedJoinInfo(HqlCppTranslator & _translator, IHqlExpression * _e
     }
     else
     {
-        hasComplexIndex = true;
         originalKey.set(right);
-        key.setown(createKeyFromComplexKey(right));
+        //We could call key.set(querySimplifiedKey(right)) to succeed in some cases instead of generating an error.
+        if (translator.getTargetClusterType() == RoxieCluster)
+        {
+            hasComplexIndex = true;
+            key.setown(createKeyFromComplexKey(right));
+        }
+        else
+            translator.throwError1(HQLERR_KeyedJoinNoRightIndex_X, getOpString(right->getOperator()));
     }
 
     if (!originalKey)
@@ -337,6 +344,31 @@ KeyedJoinInfo::~KeyedJoinInfo()
 }
 
 
+IHqlExpression * KeyedJoinInfo::querySimplifiedKey(IHqlExpression * expr)
+{
+    loop
+    {
+        switch (expr->getOperator())
+        {
+        case no_sorted:
+        case no_distributed:
+        case no_sort:
+        case no_distribute:
+        case no_preservemeta:
+        case no_assertsorted:
+        case no_assertgrouped:
+        case no_assertdistributed:
+        case no_nofold:
+            break;
+        case no_newkeyindex:
+            return LINK(expr);
+        default:
+            return NULL;
+        }
+        expr = expr->queryChild(0);
+    }
+}
+
 IHqlExpression * KeyedJoinInfo::createKeyFromComplexKey(IHqlExpression * expr)
 {
     IHqlExpression * base = queryPhysicalRootTable(expr);

+ 5 - 1
ecl/hthor/hthor.cpp

@@ -5798,7 +5798,11 @@ CHThorWorkUnitWriteActivity::CHThorWorkUnitWriteActivity(IAgentContext &_agent,
 void CHThorWorkUnitWriteActivity::execute()
 {
     grouped = (POFgrouped & helper.getFlags()) != 0;
-    size32_t outputLimit = agent.queryWorkUnit()->getDebugValueInt("outputLimit", defaultWorkUnitWriteLimit) * 0x100000;
+    size32_t outputLimit = agent.queryWorkUnit()->getDebugValueInt("outputLimit", defaultWorkUnitWriteLimit);
+    if (outputLimit>DALI_RESULT_OUTPUTMAX)
+        throw MakeStringException(0, "Dali result outputs are restricted to a maximum of %d MB, the default limit is %d MB. A huge dali result usually indicates the ECL needs altering.", DALI_RESULT_OUTPUTMAX, defaultWorkUnitWriteLimit);
+    assertex(outputLimit<=0x1000); // 32bit limit because MemoryBuffer/CMessageBuffers involved etc.
+    outputLimit *= 0x100000;
     MemoryBuffer rowdata;
     __int64 rows = 0;
     IRecordSize * inputMeta = input->queryOutputMeta();

+ 1 - 1
esp/services/ws_packageprocess/ws_packageprocessService.cpp

@@ -233,8 +233,8 @@ void addPackageMapInfo(IPropertyTree *pkgSetRegistry, const char *target, const
                             fileNames.append(subid);
                     }
                 }
-                mapTree->addPropTree("Package", LINK(&item));
             }
+            mapTree->addPropTree("Package", LINK(&item));
         }
         else
         {

+ 5 - 3
esp/services/ws_smc/ws_smcService.cpp

@@ -136,14 +136,16 @@ struct CActiveWorkunitWrapper: public CActiveWorkunit
         double version = context.getClientVersion();
 
         CWUWrapper wu(wuid, context);
+        StringBuffer stateStr;
         SCMStringBuffer state,owner,jobname;
         setWuid(wuid);
+        wu->getStateDesc(state);
         if(index)
-            state.s.append("queued(").append(index).append(")");
+            stateStr.appendf("queued(%d) [%s]", index, state.str());
         else
-            wu->getStateDesc(state);
+            stateStr.set(state.str());
 
-        setState(state.str());
+        setState(stateStr.str());
         setStateID(wu->getState());
         if ((version > 1.09) && (wu->getState() == WUStateFailed))
             setWarning("The job will ultimately not complete. Please check ECLAgent.");

+ 4 - 8
esp/services/ws_workunits/ws_workunitsQuerySets.cpp

@@ -1062,6 +1062,9 @@ bool CWsWorkunitsEx::onWUQuerysetQueryAction(IEspContext &context, IEspWUQuerySe
         result->setQueryId(id);
         try
         {
+            IPropertyTree *query = queryset->queryPropTree(xpath);
+            if (!query)
+                throw MakeStringException(ECLWATCH_QUERYID_NOT_FOUND, "Query %s/%s not found.", req.getQuerySetName(), id);
             switch (req.getAction())
             {
                 case CQuerySetQueryActionTypes_ToggleSuspend:
@@ -1074,13 +1077,8 @@ bool CWsWorkunitsEx::onWUQuerysetQueryAction(IEspContext &context, IEspWUQuerySe
                     setQuerySuspendedState(queryset, id, false, NULL);
                     break;
                 case CQuerySetQueryActionTypes_Activate:
-                {
-                    IPropertyTree *query = queryset->queryPropTree(xpath);
-                    if (!query)
-                        throw MakeStringException(ECLWATCH_QUERYID_NOT_FOUND, "Query %s/%s not found.", req.getQuerySetName(), id);
                     setQueryAlias(queryset, query->queryProp("@name"), id);
                     break;
-                }
                 case CQuerySetQueryActionTypes_Delete:
                     removeNamedQuery(queryset, id);
                     break;
@@ -1089,9 +1087,7 @@ bool CWsWorkunitsEx::onWUQuerysetQueryAction(IEspContext &context, IEspWUQuerySe
                     break;
             }
             result->setSuccess(true);
-            IPropertyTree *query = queryset->queryPropTree(xpath);
-            if (query)
-                result->setSuspended(query->getPropBool("@suspended"));
+            result->setSuspended(query->getPropBool("@suspended"));
         }
         catch(IException *e)
         {

+ 1 - 1
initfiles/bash/etc/init.d/dafilesrv.in

@@ -130,7 +130,7 @@ while true ; do
 done
 for arg do arg=$arg; done
 
-if [ -z $arg ]; then
+if [ -z $arg ] || [ $# -ne 1]; then
     print_usage
 fi
 

+ 8 - 8
initfiles/bash/etc/init.d/hpcc-init.in

@@ -56,12 +56,12 @@ function print_usage {
 
 function print_components {
     if [ ! -z ${compList} ];then
-        echo >&2 "Components on this node as defined by ${CONFIG_DIR}/${ENV_CONF_FILE}:"
+        echo >&2 "Components on this node as defined by ${CONFIG_DIR}/${ENV_XML_FILE}:"
         IFS=$'\n'
         echo >&2 "${compList[*]}"
         unset IFS
     else
-        echo >&2 "No components on this node as defined by ${CONFIG_DIR}/${ENV_CONF_FILE}."
+        echo >&2 "No components on this node as defined by ${CONFIG_DIR}/${ENV_XML_FILE}."
     fi
     echo >&2
     exit 0
@@ -69,12 +69,12 @@ function print_components {
 
 function print_types {
     if [ ! -z ${compList} ];then
-        echo >&2 "Components types on this node as defined by ${CONFIG_DIR}/${ENV_CONF_FILE}:"
+        echo >&2 "Components types on this node as defined by ${CONFIG_DIR}/${ENV_XML_FILE}:"
         IFS=$'\n'
         echo >&2 "${compTypeList[*]}"
         unset IFS
     else
-        echo >&2 "No components on this node as defined by ${CONFIG_DIR}/${ENV_CONF_FILE}."
+        echo >&2 "No components on this node as defined by ${CONFIG_DIR}/${ENV_XML_FILE}."
     fi
     echo >&2
     exit 0
@@ -242,6 +242,10 @@ while true ; do
 done
 for arg do arg=$arg; done
 
+if [ -z $arg ] || [ $# -ne 1 ]; then
+    print_usage
+fi
+
 if [ -z ${component} ]; then
     for (( i=0; i<=${compListLen}; i++ ));do
         component="$component ${compList[$i]}"
@@ -253,10 +257,6 @@ else
     compDafilesrv=''
 fi
 
-if [ -z $arg ]; then
-    print_usage
-fi
-
 case "$arg" in 
     status|start|restart|setup)
         cmd=$arg

+ 5 - 1
thorlcr/activities/wuidwrite/thwuidwrite.cpp

@@ -51,7 +51,11 @@ public:
     }
     void init()
     {
-        workunitWriteLimit = (unsigned)container.queryJob().getWorkUnitValueInt("outputLimit", DEFAULT_WUIDWRITE_LIMIT) * 0x100000;
+        workunitWriteLimit = getOptInt(THOROPT_OUTPUTLIMIT, DEFAULT_WUIDWRITE_LIMIT);
+        if (workunitWriteLimit>DALI_RESULT_OUTPUTMAX)
+            throw MakeActivityException(this, 0, "Dali result outputs are restricted to a maximum of %d MB, the default limit is %d MB. A huge dali result usually indicates the ECL needs altering.", DALI_RESULT_OUTPUTMAX, DEFAULT_WUIDWRITE_LIMIT);
+        assertex(workunitWriteLimit<=0x1000); // 32bit limit because MemoryBuffer/CMessageBuffers involved etc.
+        workunitWriteLimit *= 0x100000;
     }
     virtual void serializeSlaveData(MemoryBuffer &dst, unsigned slave)
     {

+ 1 - 1
thorlcr/graph/thgraph.cpp

@@ -2819,7 +2819,7 @@ int CActivityBase::getOptInt(const char *prop, int defVal) const
 {
     int def = queryJob().getOptInt(prop, defVal);
     VStringBuffer path("hint[@name=\"%s\"]/@value", prop);
-    return container.queryXGMML().getPropInt(path.str(), def);
+    return container.queryXGMML().getPropInt(path.toLowerCase().str(), def);
 }
 
 __int64 CActivityBase::getOptInt64(const char *prop, __int64 defVal) const

+ 1 - 0
thorlcr/thorutil/thormisc.hpp

@@ -55,6 +55,7 @@
 #define THOROPT_PARALLEL_FUNNEL       "parallelFunnel"
 #define THOROPT_SORT_MAX_DEVIANCE     "sort_max_deviance"
 #define THOROPT_OUTPUT_FLUSH_THRESHOLD "output_flush_threshold"
+#define THOROPT_OUTPUTLIMIT           "outputLimit"
 
 #define INITIAL_SELFJOIN_MATCH_WARNING_LEVEL 20000  // max of row matches before selfjoin emits warning