Browse Source

Merge branch 'candidate-5.2.0'

Signed-off-by: Richard Chapman <rchapman@hpccsystems.com>
Richard Chapman 10 years ago
parent
commit
5005176972

+ 3 - 3
cmake_modules/commonSetup.cmake

@@ -62,9 +62,9 @@ IF ("${COMMONSETUP_DONE}" STREQUAL "")
   option(USE_NATIVE_LIBRARIES "Search standard OS locations for thirdparty libraries" ON)
   option(USE_GIT_DESCRIBE "Use git describe to generate build tag" ON)
   option(CHECK_GIT_TAG "Require git tag to match the generated build tag" OFF)
-  option(USE_XALAN "Configure use of xalan" ON)
+  option(USE_XALAN "Configure use of xalan" OFF)
   option(USE_APR "Configure use of Apache Software Foundation (ASF) Portable Runtime (APR) libraries" ON)
-  option(USE_LIBXSLT "Configure use of libxslt" OFF)
+  option(USE_LIBXSLT "Configure use of libxslt" ON)
   option(MAKE_DOCS "Create documentation at build time." OFF)
   option(MAKE_DOCS_ONLY "Create a base build with only docs." OFF)
   option(DOCS_DRUPAL "Create Drupal HTML Docs" OFF)
@@ -91,7 +91,7 @@ IF ("${COMMONSETUP_DONE}" STREQUAL "")
   endif()
 
   if ( USE_XALAN AND USE_LIBXSLT )
-      set(USE_XALAN OFF)
+      set(USE_LIBXSLT OFF)
   endif()
   if ( USE_LIBXSLT )
       set(USE_LIBXML2 ON)

+ 1 - 0
common/workunit/referencedfilelist.cpp

@@ -358,6 +358,7 @@ IPropertyTree *ReferencedFile::getSpecifiedOrRemoteFileTree(IUserDescriptor *use
     if (!fileTree)
         return NULL;
     StringAttrBuilder daliipText(daliip);
+    remote->endpoint().getUrlStr(daliipText);
     filePrefix.set(remotePrefix);
     return fileTree.getClear();
 }

+ 24 - 0
common/workunit/workunit.cpp

@@ -446,6 +446,30 @@ private:
             unsigned __int64 value;
             collection.getStatistic(kind, value, i);
             formatStatistic(formattedValue.clear(), value, kind);
+
+                //Until 6.0 generate the backward compatible tag name
+            const char * legacyTreeTag = queryLegacyTreeTag(kind);
+            if (legacyTreeTag)
+            {
+                StatisticMeasure measure = queryMeasure(kind);
+                if (measure == SMeasureSkew)
+                {
+                    //Minimum stats were always output as +ve numbers
+                    if (queryStatsVariant(kind) == StSkewMin)
+                        value = -value;
+
+                    target->setPropInt64(legacyTreeTag, value/100);
+                }
+                else if (measure == SMeasureTimeNs)
+                {
+                    //Legacy timings are in ms => scale
+                    target->setPropInt64(legacyTreeTag, value/1000000);
+                }
+                else
+                    target->setProp(legacyTreeTag, formattedValue);
+            }
+
+            //Unconditionally output in the new format.
             target->setProp(queryTreeTag(kind), formattedValue);
         }
     }

+ 3 - 4
docs/ECLLanguageReference/ECLR_mods/BltInFunc-PRELOAD.xml

@@ -22,7 +22,7 @@
           <row>
             <entry><emphasis>file</emphasis></entry>
 
-            <entry>The name of a DATASET or INDEX definition.</entry>
+            <entry>The name of a DATASET definition.</entry>
           </row>
 
           <row>
@@ -46,13 +46,12 @@
   <para>The <emphasis role="bold">PRELOAD </emphasis>function leaves the
   <emphasis>file </emphasis>in memory after loading (valid only for Data
   Delivery Engine use). This is exactly equivalent to using the PRELOAD option
-  on the DATASET or INDEX definition.</para>
+  on the DATASET definition.</para>
 
   <para>Example:</para>
 
   <programlisting>MyFile := DATASET('MyFile',{STRING20 F1, STRING20 F2},THOR);
   COUNT(PRELOAD(MyFile))</programlisting>
 
-  <para>See Also: <link linkend="DATASET">DATASET</link>, <link
-  linkend="INDEX_record_structure">INDEX</link></para>
+  <para>See Also: <link linkend="DATASET">DATASET</link></para>
 </sect1>

+ 3 - 13
docs/ECLLanguageReference/ECLR_mods/Recrd-Index.xml

@@ -10,9 +10,7 @@
   role="bold">]</emphasis><emphasis> keys, indexfile </emphasis><emphasis
   role="bold">[,SORTED<indexterm>
       <primary>SORTED</primary>
-    </indexterm>] [,PRELOAD<indexterm>
-      <primary>PRELOAD</primary>
-    </indexterm>]</emphasis><emphasis role="bold"> [,OPT<indexterm>
+    </indexterm>] [,OPT<indexterm>
       <primary>OPT</primary>
     </indexterm>] [,COMPRESSED<indexterm>
       <primary>COMPRESSED</primary>
@@ -34,8 +32,8 @@
   <para><emphasis> attr</emphasis><emphasis role="bold"> :=
   INDEX([</emphasis><emphasis> baserecset, </emphasis><emphasis
   role="bold">]</emphasis><emphasis> keys, payload, indexfile
-  </emphasis><emphasis role="bold">[,SORTED] [,PRELOAD][,OPT] [,COMPRESSED(
-  LZW | ROW | FIRST) ] [,DISTRIBUTED] [,FILEPOSITION( [
+  </emphasis><emphasis role="bold">[,SORTED] [,OPT] [,COMPRESSED( LZW | ROW |
+  FIRST) ] [,DISTRIBUTED] [,FILEPOSITION( [
   </emphasis><emphasis>flag</emphasis><emphasis role="bold">] ) ] [,
   MAXLENGTH<indexterm>
       <primary>MAXLENGTH</primary>
@@ -120,14 +118,6 @@
         </row>
 
         <row>
-          <entry><emphasis role="bold">PRELOAD</emphasis></entry>
-
-          <entry>Optional. Specifies that the <emphasis>indexfile</emphasis>
-          is left in memory after loading (valid only for Data Delivery Engine
-          use).</entry>
-        </row>
-
-        <row>
           <entry><emphasis role="bold">OPT</emphasis></entry>
 
           <entry><para>Optional. Specifies that using the index when the

+ 28 - 7
docs/Installing_and_RunningTheHPCCPlatform/Inst-Mods/hpcc_ldap.xml

@@ -353,9 +353,26 @@
         from the pop-up menu. <graphic fileref="../../images/LDAP_003.jpg"
         vendor="configmgrSS" /></para>
 
-        <para><emphasis role="bold">Note</emphasis>: The ldapServer component
-        is merely a definition that specifies an existing LDAP server. It does
-        not install one.</para>
+        <para><informaltable colsep="1" frame="all" rowsep="1">
+            <?dbfo keep-together="always"?>
+
+            <tgroup cols="2">
+              <colspec colwidth="49.50pt" />
+
+              <colspec />
+
+              <tbody>
+                <row>
+                  <entry><inlinegraphic
+                  fileref="../../images/caution.png" /></entry>
+
+                  <entry><emphasis role="bold">Note</emphasis>: The ldapServer
+                  component is merely a definition that specifies an existing
+                  LDAP server. It does not install one.</entry>
+                </row>
+              </tbody>
+            </tgroup>
+          </informaltable></para>
       </listitem>
 
       <listitem>
@@ -562,7 +579,11 @@
   <sect2>
     <title>Installing the Default Admin user</title>
 
-    <para>After enabling your configuration for LDAP security, you need to run
+    <para>After enabling your configuration for LDAP security, you must copy
+    your environment file to the /etc/HPCCSystems directory. See the section
+    <link linkend="configuring-a-multi-node-system"><emphasis>Configuring a
+    Multi-Node System</emphasis></link> for more info about configuring your
+    system. With the correct environment.xml file in place, you must then run
     the <emphasis role="bold">initldap</emphasis> utility that installs the
     initial security components and the default users.</para>
 
@@ -574,9 +595,9 @@
       the LDAPServer component(s) in the environment.xml bound to the
       configured ESPs.</para>
 
-      <para>Once you complete your configuration, (add the LDAPServer
-      components and distribute your environment.xml file) you must then run
-      the <emphasis role="bold">initldap</emphasis> utility.</para>
+      <para>Once you complete your configuration with LDAP components enabled
+      and have distributed your environment.xml file to all nodes, you must
+      then run the <emphasis role="bold">initldap</emphasis> utility.</para>
 
       <programlisting>sudo /opt/HPCCSystems/bin/initldap</programlisting>
 

+ 20 - 0
ecl/eclcc/eclcc.cpp

@@ -1089,6 +1089,9 @@ void EclCC::processSingleQuery(EclCompileInstance & instance,
         }
 
         parseCtx.ignoreUnknownImport = instance.ignoreUnknownImport;
+        bool exportDependencies = instance.wu->getDebugValueBool("exportDependencies",false);
+        if (exportDependencies)
+            parseCtx.nestedDependTree.setown(createPTree("Dependencies"));
 
         try
         {
@@ -1150,6 +1153,22 @@ void EclCC::processSingleQuery(EclCompileInstance & instance,
             if (instance.wu->getDebugValueBool("addTimingToWorkunit", true))
                 updateWorkunitTimeStat(instance.wu, SSTcompilestage, "compile:parseTime", StTimeElapsed, NULL, parseTimeNs);
 
+            if (exportDependencies)
+            {
+                StringBuffer dependenciesName;
+                if (instance.outputFilename && !streq(instance.outputFilename, "-"))
+                    addNonEmptyPathSepChar(dependenciesName.append(optOutputDirectory)).append(instance.outputFilename);
+                else
+                    dependenciesName.append(DEFAULT_OUTPUTNAME);
+                dependenciesName.append(".dependencies.xml");
+
+                Owned<IWUQuery> query = instance.wu->updateQuery();
+                associateLocalFile(query, FileTypeXml, dependenciesName, "Dependencies", 0);
+
+                saveXML(dependenciesName.str(), parseCtx.nestedDependTree);
+            }
+
+
             if (optIncludeMeta || optGenerateMeta)
                 instance.generatedMeta.setown(parseCtx.getMetaTree());
 
@@ -2190,6 +2209,7 @@ const char * const helpText[] = {
     "! -fapplyInstantEclTransformations Limit non file outputs with a CHOOSEN",
     "! -fapplyInstantEclTransformationsLimit Number of records to limit to",
     "! -fcheckAsserts          Check ASSERT() statements",
+    "! -fexportDependencies    Generate information about inter-definition dependencies",
     "! -fmaxCompileThreads     Number of compiler instances to compile the c++",
     "! -fnoteRecordSizeInGraph Add estimates of record sizes to the graph",
     "! -fpickBestEngine        Allow simple thor queries to be passed to thor",

+ 2 - 0
ecl/hql/hqlerrors.hpp

@@ -422,6 +422,8 @@
 #define ERR_RESULT_IGNORED_SCOPE    2395
 #define ERR_INDEX_DEPEND_DATASET    2396
 #define ERR_DUBIOUS_NAME            2397
+#define ERR_DUPLICATE_FILENAME      2398
+#define ERR_DUPLICATE_SOURCE        2399
 
 #define ERR_CPP_COMPILE_ERROR       2999
 

+ 7 - 7
ecl/hql/hqlgram.hpp

@@ -669,9 +669,9 @@ public:
     void enterType(const attribute &errpos, bool isParameteried);
     void leaveType(const YYSTYPE & errpos);
     void checkRecordTypesMatch(IHqlExpression *ds1, IHqlExpression *ds2, const attribute & errpos);
-    int checkRecordTypesSimilar(IHqlExpression *left, IHqlExpression *right, const attribute &atr, unsigned maxFields = (unsigned)-1);
-    bool checkRecordCreateTransform(HqlExprArray & assigns, IHqlExpression *leftExpr, IHqlExpression *leftSelect, IHqlExpression *rightExpr, IHqlExpression *rightSelect, const attribute &atr);
-    IHqlExpression * checkEnsureRecordsMatch(IHqlExpression * left, IHqlExpression * right, const attribute & errpos, bool rightIsRow);
+    int checkRecordTypesSimilar(IHqlExpression *left, IHqlExpression *right, const ECLlocation & errPos, unsigned maxFields = (unsigned)-1);
+    bool checkRecordCreateTransform(HqlExprArray & assigns, IHqlExpression *leftExpr, IHqlExpression *leftSelect, IHqlExpression *rightExpr, IHqlExpression *rightSelect, const ECLlocation & errPos);
+    IHqlExpression * checkEnsureRecordsMatch(IHqlExpression * left, IHqlExpression * right, const ECLlocation & errPos, bool rightIsRow);
     void ensureMapToRecordsMatch(OwnedHqlExpr & recordExpr, HqlExprArray & args, const attribute & errpos, bool isRow);
     void checkRecordIsValid(const attribute &atr, IHqlExpression *record);
     void checkValidRecordMode(IHqlExpression * dataset, attribute & atr, attribute & modeatr);
@@ -719,8 +719,8 @@ protected:
     void checkNotAlreadyDefined(IIdAtom * name, IHqlScope * scope, const attribute & idattr);
     void checkNotAlreadyDefined(IIdAtom * name, const attribute & idattr);
     void checkBuildIndexFilenameFlags(IHqlExpression * dataset, attribute & flags);
-    IHqlExpression * createBuildFileFromTable(IHqlExpression * table, attribute & flagsAttr, IHqlExpression * filename, attribute & errpos);
-    IHqlExpression * createBuildIndexFromIndex(attribute & indexAttr, attribute & flagsAttr, IHqlExpression * filename, attribute & errpos);
+    IHqlExpression * createBuildFileFromTable(IHqlExpression * table, const HqlExprArray & createBuildFileFromTable, IHqlExpression * filename, attribute & errpos);
+    IHqlExpression * createBuildIndexFromIndex(attribute & indexAttr, attribute & flagsAttr, attribute & errpos);
     void checkOutputRecord(attribute & errpos, bool outerLevel);
     void checkSoapRecord(attribute & errpos);
     IHqlExpression * checkOutputRecord(IHqlExpression *record, const attribute & errpos, bool & allConstant, bool outerLevel);
@@ -760,7 +760,7 @@ protected:
     void checkSizeof(ITypeInfo* expr, attribute& errpos, bool isDataset = false);
     void normalizeStoredNameExpression(attribute & a);
     void checkPatternFailure(attribute & attr);
-    void checkDistributer(attribute & err, HqlExprArray & args);
+    void checkDistributer(const ECLlocation & errPos, HqlExprArray & args);
     IHqlExpression * createScopedSequenceExpr();
     IHqlExpression * createPatternOr(HqlExprArray & args, const attribute & errpos);
     IHqlExpression * mapAlienArg(IHqlSimpleScope * scope, IHqlExpression * expr);
@@ -962,7 +962,7 @@ protected:
     void checkDistribution(attribute &errpos, IHqlExpression *newExpr, bool ignoreGrouping);
     void checkMergeInputSorted(attribute &atr, bool isLocal);
     void checkGrouped(attribute & atr);
-    void checkRegrouping(attribute & atr, HqlExprArray & args);
+    void checkRegrouping(const ECLlocation & errPos, HqlExprArray & args);
     void checkRecordsMatch(attribute & atr, HqlExprArray & args);
 
     IHqlExpression * transformRecord(IHqlExpression *dataset, IAtom * targetCharset, const attribute & errpos);

+ 17 - 25
ecl/hql/hqlgram.y

@@ -2310,17 +2310,7 @@ actionStmt
                         }
     | BUILD '(' startTopFilter optBuildFlags ')' endTopFilter
                         {
-                            parser->warnIfRecordPacked($3);
-                            $$.setExpr(parser->createBuildIndexFromIndex($3, $4, NULL, $5), $1);
-                            parser->processUpdateAttr($$);
-                        }
-    | BUILD '(' startTopFilter ',' expression optBuildFlags ')' endTopFilter
-                        {
-                            parser->normalizeExpression($5, type_string, false);
-                            parser->warnIfRecordPacked($3);
-
-                            OwnedHqlExpr filename = $5.getExpr();
-                            $$.setExpr(parser->createBuildIndexFromIndex($3, $6, filename, $7), $1);
+                            $$.setExpr(parser->createBuildIndexFromIndex($3, $4, $5), $1);
                             parser->processUpdateAttr($$);
                         }
     | OUTPUT '(' startTopFilter ',' optRecordDef endTopFilter optOutputFlags ')'
@@ -2850,21 +2840,22 @@ buildFlag
                             OwnedHqlExpr ds = $3.getExpr();
                             if (ds->getOperator() != no_table)
                                 parser->reportError(ERR_EXPECTED_DATASET, $3, "Expected parameter to be a DATASET definition");
-                            IHqlExpression * record = createAttribute(recordAtom, LINK(ds->queryRecord()));
-                            IHqlExpression * name = createAttribute(nameAtom, LINK(ds->queryChild(0)));
-                            $$.setExpr(createComma(record, name));
-                            $$.setPosition($1);
+                            $$.setExpr(NULL, $1);
                         }
     | commonAttribute
     | SORTED            {   $$.setExpr(createAttribute(sortedAtom)); $$.setPosition($1); }
-    | DISTRIBUTE '(' startTopFilter startDistributeAttrs optDistributeAttrs ')' endTopFilter
+    | dataSet
                         {
-                            IHqlExpression * arg = $3.getExpr();
-                            $5.release();   // They are only there to prevent s/r error with the dataset form.
-                            if (!isKey(arg))
-                                parser->reportError(ERR_EXPECTED_INDEX,$3,"Expected an index");
-                            $$.setExpr(createValue(no_distributer, makeNullType(), arg));
-                            $$.setPosition($1);
+                            //Ugly, but special case DISTRIBUTE '(' dataSet ')'
+                            OwnedHqlExpr ds = $1.getExpr();
+                            if (ds->getOperator() == no_distribute)
+                            {
+                                IHqlExpression * arg = ds->queryChild(0);
+                                if (!isKey(arg))
+                                    parser->reportError(ERR_EXPECTED_INDEX,$1,"Expected an index");
+                                ds.setown(createValue(no_distributer, makeNullType(), LINK(arg)));
+                            }
+                            $$.setExpr(ds.getClear(), $1);
                         }
     | MERGE             {
                             $$.setExpr(createAttribute(mergeAtom));
@@ -2946,6 +2937,7 @@ buildFlag
                             parser->normalizeExpression($3, type_numeric, false);
                             $$.setExpr(createExprAttribute(maxLengthAtom, $3.getExpr()), $1);
                         }
+    | expression
     ;
 
 localAttribute
@@ -7600,7 +7592,7 @@ dataSet
                         {
                             OwnedHqlExpr left = $1.getExpr();
                             OwnedHqlExpr right = $3.getExpr();
-                            parser->checkRecordTypesSimilar(left, right, $3);
+                            parser->checkRecordTypesSimilar(left, right, $3.pos);
 
                             OwnedHqlExpr seq = parser->createActiveSelectorSequence(left, right);
                             OwnedHqlExpr leftSelect = createSelector(no_left, left, seq);
@@ -8398,7 +8390,7 @@ simpleDataSet
                             OwnedHqlExpr ds = $3.getExpr();
                             HqlExprArray args;
                             ds->unwindList(args, no_comma);
-                            parser->checkRegrouping($3, args);
+                            parser->checkRegrouping($3.pos, args);
                             $$.setExpr(createDataset(no_regroup, args));
                             $$.setPosition($1);
                         }
@@ -8974,7 +8966,7 @@ simpleDataSet
                                     {
                                         if (isGrouped(cur) != isGrouped(compareDs))
                                             parser->reportError(ERR_GROUPING_MISMATCH, $1, "Branches of the condition have different grouping");
-                                        OwnedHqlExpr mapped = parser->checkEnsureRecordsMatch(compareDs, cur, $5, false);
+                                        OwnedHqlExpr mapped = parser->checkEnsureRecordsMatch(compareDs, cur, $5.pos, false);
                                         if (mapped != cur)
                                             args.replace(*mapped.getClear(), idx);
                                     }

+ 137 - 104
ecl/hql/hqlgram2.cpp

@@ -1088,7 +1088,7 @@ IHqlExpression * HqlGram::processIndexBuild(attribute & indexAttr, attribute * r
     if (flags)
         flags->unwindList(args, no_comma);
 
-    checkDistributer(flagsAttr, args);
+    checkDistributer(flagsAttr.pos, args);
     return createValue(no_buildindex, makeVoidType(), args);
 }
 
@@ -3884,7 +3884,7 @@ ITypeInfo *HqlGram::checkPromoteIfType(attribute &a1, attribute &a2)
     if (a1.isDataset() || a2.isDataset())
     {
         OwnedHqlExpr right = a2.getExpr();
-        a2.setExpr(checkEnsureRecordsMatch(a1.queryExpr(), right, a2, false));
+        a2.setExpr(checkEnsureRecordsMatch(a1.queryExpr(), right, a2.pos, false));
         ensureDataset(a1);
         ensureDataset(a2);
         return NULL;
@@ -3892,7 +3892,7 @@ ITypeInfo *HqlGram::checkPromoteIfType(attribute &a1, attribute &a2)
     if (a1.isDatarow() || a2.isDatarow())
     {
         OwnedHqlExpr right = a2.getExpr();
-        a2.setExpr(checkEnsureRecordsMatch(a1.queryExpr(), right, a2, true));
+        a2.setExpr(checkEnsureRecordsMatch(a1.queryExpr(), right, a2.pos, true));
         checkDatarow(a1);
         checkDatarow(a2);
         return NULL;
@@ -3900,7 +3900,7 @@ ITypeInfo *HqlGram::checkPromoteIfType(attribute &a1, attribute &a2)
     if (a1.isDictionary() || a2.isDictionary())
     {
         OwnedHqlExpr right = a2.getExpr();
-        a2.setExpr(checkEnsureRecordsMatch(a1.queryExpr(), right, a2, true));
+        a2.setExpr(checkEnsureRecordsMatch(a1.queryExpr(), right, a2.pos, true));
         checkDictionary(a1);
         checkDictionary(a2);
         return NULL;
@@ -6642,12 +6642,11 @@ void HqlGram::checkBuildIndexFilenameFlags(IHqlExpression * dataset, attribute &
 }
 
 
-IHqlExpression * HqlGram::createBuildFileFromTable(IHqlExpression * table, attribute & flagsAttr, IHqlExpression * filename, attribute & errpos)
+IHqlExpression * HqlGram::createBuildFileFromTable(IHqlExpression * table, const HqlExprArray & buildOptions, IHqlExpression * filename, attribute & errpos)
 {
     IHqlExpression * originAttr = table->queryAttribute(_origin_Atom);
     IHqlExpression * ds = originAttr->queryChild(0);
     IHqlExpression * mode = table->queryChild(2);
-    OwnedHqlExpr flags=flagsAttr.getExpr();
     if (!filename) filename = table->queryChild(0);
 
     HqlExprArray args;
@@ -6662,111 +6661,145 @@ IHqlExpression * HqlGram::createBuildFileFromTable(IHqlExpression * table, attri
         args.append(*createAttribute(xmlAtom, LINK(mode->queryChild(0))));
         break;
     }
-    if (flags)
+
+    ForEachItemIn(i, buildOptions)
     {
-        HqlExprArray expandedFlags;
-        flags->unwindList(expandedFlags, no_comma);
-        ForEachItemIn(i, expandedFlags)
-        {
-            IHqlExpression & cur = expandedFlags.item(i);
-            IAtom * name = cur.queryName();
-            if ((name == overwriteAtom) ||(name == backupAtom) || (name == namedAtom) || (name == updateAtom) || (name == expireAtom))
-                args.append(OLINK(cur));
-            else if (name == persistAtom)
-                args.append(*createAttribute(persistAtom, LINK(ds)));       // preserve so changes in representation don't affect crc.
-        }
+        IHqlExpression & cur = buildOptions.item(i);
+        IAtom * name = cur.queryName();
+        if ((name == overwriteAtom) ||(name == backupAtom) || (name == namedAtom) || (name == updateAtom) || (name == expireAtom))
+            args.append(OLINK(cur));
+        else if (name == persistAtom)
+            args.append(*createAttribute(persistAtom, LINK(ds)));       // preserve so changes in representation don't affect crc.
     }
+
     return createValue(no_output, makeVoidType(), args);
 }
 
-IHqlExpression * HqlGram::createBuildIndexFromIndex(attribute & indexAttr, attribute & flagsAttr, IHqlExpression * filename, attribute & errpos)
+IHqlExpression * queryRootIndex(IHqlExpression * index)
 {
-    OwnedHqlExpr index = indexAttr.getExpr();
     loop
     {
         node_operator op = index->getOperator();
         if (op == no_compound)
-            index.set(index->queryChild(1));
+            index = index->queryChild(1);
         else if (op == no_executewhen)
-            index.set(index->queryChild(0));
+            index = index->queryChild(0);
         else
-            break;
+            return index;
+    }
+}
+
+IHqlExpression * HqlGram::createBuildIndexFromIndex(attribute & indexAttr, attribute & flagsAttr, attribute & errpos)
+{
+    warnIfRecordPacked(indexAttr);
+
+    OwnedHqlExpr index = indexAttr.getExpr();
+    index.set(queryRootIndex(index));
+
+    HqlExprArray buildOptions;
+    flagsAttr.unwindCommaList(buildOptions);
+
+    LinkedHqlExpr filename;
+    LinkedHqlExpr sourceDataset;
+    ForEachItemInRev(iOption, buildOptions)
+    {
+        IHqlExpression & cur = buildOptions.item(iOption);
+        if (cur.isDataset())
+        {
+            if (sourceDataset)
+                reportError(ERR_DUPLICATE_SOURCE,errpos,"Source dataset cannot be specified more than once");
+            sourceDataset.set(&cur);
+            buildOptions.remove(iOption);
+        }
+        else if (!cur.isAttribute())
+        {
+            if (cur.getOperator() != no_distributer)
+            {
+                if (filename)
+                    reportError(ERR_DUPLICATE_FILENAME,errpos,"Index filename cannot be specified more than once");
+                filename.set(&cur);
+                buildOptions.remove(iOption);
+            }
+        }
     }
 
     if (!isKey(index))
     {
         if (index->getOperator() == no_table && index->hasAttribute(_origin_Atom))
-            return createBuildFileFromTable(index, flagsAttr, filename, errpos);
-        flagsAttr.release();
+            return createBuildFileFromTable(index, buildOptions, filename, errpos);
+
         reportError(ERR_EXPECTED_INDEX,indexAttr,"Expected an index as the first parameter");
         return createDataset(no_null, LINK(queryNullRecord()));
     }
 
-    IHqlExpression *dataset = LINK(index->queryChild(0));
+    LinkedHqlExpr dataset = index->queryChild(0);
     IHqlExpression *record = index->queryChild(1);
-    IHqlExpression *transform = NULL;
+    OwnedHqlExpr transform;
     if (index->getOperator() == no_keyindex)
     {
         if (!filename)
-            filename = index->queryChild(2);
+            filename.set(index->queryChild(2));
     }
     else
     {
-        transform = index->queryChild(2);
+        transform.set(index->queryChild(2));
         if (!filename)
-            filename = index->queryChild(3);
+            filename.set(index->queryChild(3));
     }
 
+    if (sourceDataset)
+        transform.setown(createDefaultAssignTransform(record, sourceDataset->queryNormalizedSelector(), indexAttr));
+
     //need to tag record scope in this case so it generates no_activetable as top selector
     OwnedHqlExpr distribution;
-
-    checkBuildIndexFilenameFlags(dataset, flagsAttr);
-    bool allConstant = true;
-    bool someMissing = false;
-    ForEachChild(idx, record)
+    if (!sourceDataset)
     {
-        IHqlExpression * field = record->queryChild(idx);
-        if (field->isAttribute())
-            continue;
-        IHqlExpression * value = field->queryChild(0);
-        if (!value)
-            someMissing = true;
-        else if (!value->isAttribute() && !value->isConstant())
-            allConstant = false;
-    }
-    if (someMissing)
-        reportError(ERR_KEYEDINDEXINVALID,indexAttr,"The index record contains fields with no mappings - cannot build an index on it");
-    else if (allConstant)
-        reportError(ERR_KEYEDINDEXINVALID,indexAttr,"The index record has no mappings from the dataset - cannot build an index on it");
-
-    IHqlExpression * newRecord = LINK(record);
-    if (!transform)
-        newRecord = checkBuildIndexRecord(newRecord, errpos);
+        bool allConstant = true;
+        bool someMissing = false;
+        ForEachChild(idx, record)
+        {
+            IHqlExpression * field = record->queryChild(idx);
+            if (field->isAttribute())
+                continue;
+            IHqlExpression * value = field->queryChild(0);
+            if (!value)
+                someMissing = true;
+            else if (!value->isAttribute() && !value->isConstant())
+                allConstant = false;
+        }
+        if (someMissing)
+            reportError(ERR_KEYEDINDEXINVALID,indexAttr,"The index record contains fields with no mappings - cannot build an index on it");
+        else if (allConstant)
+            reportError(ERR_KEYEDINDEXINVALID,indexAttr,"The index record has no mappings from the dataset - cannot build an index on it");
+    }
+
     IHqlExpression * select;
-    if (transform)
-        select = createDatasetF(no_newusertable, dataset, newRecord, LINK(transform), NULL); //createUniqueId(), NULL);
+    if (sourceDataset)
+        select = createDatasetF(no_newusertable, LINK(sourceDataset), LINK(record), LINK(transform), NULL); //createUniqueId(), NULL);
+    else if (transform)
+        select = createDatasetF(no_newusertable, LINK(dataset), LINK(record), LINK(transform), NULL); //createUniqueId(), NULL);
     else
-        select = createDatasetF(no_selectfields, dataset, newRecord, NULL); //createUniqueId(), NULL);
+    {
+        IHqlExpression * newRecord = checkBuildIndexRecord(LINK(record), errpos);
+        select = createDatasetF(no_selectfields, LINK(dataset), newRecord, NULL); //createUniqueId(), NULL);
+    }
+
     HqlExprArray args;
     args.append(*select);
     args.append(*LINK(filename));
-    OwnedHqlExpr flags=flagsAttr.getExpr();
-    if (flags)
+
+    ForEachItemIn(i, buildOptions)
     {
-        HqlExprArray extra;
-        flags->unwindList(extra, no_comma);
-        ForEachItemIn(i, extra)
-        {
-            IHqlExpression & cur = extra.item(i);
-            IAtom * name = cur.queryName();
-            if (name == distributedAtom)
-                distribution.setown(&cur);
-            else if (name == persistAtom)
-                args.append(*createAttribute(persistAtom, LINK(index)));        // preserve so changes in representation don't affect crc.
-            else
-                args.append(OLINK(cur));
-        }
+        IHqlExpression & cur = buildOptions.item(i);
+        IAtom * name = cur.queryName();
+        if (name == distributedAtom)
+            distribution.setown(&cur);
+        else if (name == persistAtom)
+            args.append(*createAttribute(persistAtom, LINK(index)));        // preserve so changes in representation don't affect crc.
+        else
+            args.append(OLINK(cur));
     }
+
     //Clone flags from the index that are required.
     ForEachChild(iflag, index)
     {
@@ -6799,7 +6832,7 @@ IHqlExpression * HqlGram::createBuildIndexFromIndex(attribute & indexAttr, attri
     if (distribution)
         args.append(*distribution.getClear());
 
-    checkDistributer(flagsAttr, args);
+    checkDistributer(flagsAttr.pos, args);
     return createValue(no_buildindex, makeVoidType(), args);
 }
 
@@ -8297,7 +8330,7 @@ void HqlGram::checkDedup(IHqlExpression *ds, IHqlExpression *flags, attribute &a
 {
 }
 
-void HqlGram::checkDistributer(attribute & err, HqlExprArray & args)
+void HqlGram::checkDistributer(const ECLlocation & errPos, HqlExprArray & args)
 {
     IHqlExpression * input = &args.item(0);
     IHqlExpression * inputPayload = queryAttribute(_payload_Atom, args);
@@ -8311,8 +8344,8 @@ void HqlGram::checkDistributer(attribute & err, HqlExprArray & args)
             unsigned numKeyedFields = firstPayloadField(index);
             unsigned inputKeyedFields = firstPayloadField(input->queryRecord(), inputPayload ? (unsigned)getIntValue(inputPayload->queryChild(0)) : 1);
             if (numKeyedFields != inputKeyedFields)
-                reportError(ERR_DISTRIBUTED_MISSING, err, "Index and DISTRIBUTE(index) have different numbers of keyed fields");
-            checkRecordTypesSimilar(args.item(0).queryRecord(), cur.queryChild(0)->queryRecord(), err, numKeyedFields);
+                reportError(ERR_DISTRIBUTED_MISSING, errPos, "Index and DISTRIBUTE(index) have different numbers of keyed fields");
+            checkRecordTypesSimilar(args.item(0).queryRecord(), cur.queryChild(0)->queryRecord(), errPos, numKeyedFields);
         }
     }
 }
@@ -8359,7 +8392,7 @@ void HqlGram::checkValidPipeRecord(const attribute & errpos, IHqlExpression * re
         checkValidCsvRecord(errpos, record);
 }
 
-int HqlGram::checkRecordTypesSimilar(IHqlExpression *left, IHqlExpression *right, const attribute &atr, unsigned maxFields)
+int HqlGram::checkRecordTypesSimilar(IHqlExpression *left, IHqlExpression *right, const ECLlocation & errPos, unsigned maxFields)
 {
     if (recordTypesMatch(left, right)) 
         return 0;
@@ -8375,9 +8408,9 @@ int HqlGram::checkRecordTypesSimilar(IHqlExpression *left, IHqlExpression *right
     if(lnumChildren != rnumChildren) 
     {
         if (getFieldCount(lrecord) != getFieldCount(rrecord))
-            reportError(ERR_TYPEMISMATCH_DATASET, atr, "Datasets must have the same number of fields: %d vs %d", lnumChildren, rnumChildren);
+            reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Datasets must have the same number of fields: %d vs %d", lnumChildren, rnumChildren);
         else
-            reportError(ERR_TYPEMISMATCH_DATASET, atr, "Datasets must have the same attributes");
+            reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Datasets must have the same attributes");
         return -1;
     }
     
@@ -8388,7 +8421,7 @@ int HqlGram::checkRecordTypesSimilar(IHqlExpression *left, IHqlExpression *right
         if (lfield->isAttribute() || rfield->isAttribute())
         {
             if (lfield != rfield)
-                reportError(ERR_TYPEMISMATCH_DATASET, atr, "Record attributes differ: %d vs %d", lnumChildren, rnumChildren);
+                reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Record attributes differ: %d vs %d", lnumChildren, rnumChildren);
         }
         
         assertex(lfield);
@@ -8413,55 +8446,55 @@ int HqlGram::checkRecordTypesSimilar(IHqlExpression *left, IHqlExpression *right
                 if (lAlien && rAlien && 
                     queryExpression(lType)->queryFunctionDefinition() == queryExpression(rType)->queryFunctionDefinition())
                 {
-                    reportError(ERR_TYPEMISMATCH_DATASET, atr, "Fields %s and %s use incompatible instances of the same user type %s",lfield->queryName()->str(), rfield->queryName()->str(), ltype.str());
+                    reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Fields %s and %s use incompatible instances of the same user type %s",lfield->queryName()->str(), rfield->queryName()->str(), ltype.str());
                 }
                 else
                 {
-                    reportError(ERR_TYPEMISMATCH_DATASET, atr, "Type mismatch for corresponding fields %s (%s) vs %s (%s)",lfield->queryName()->str(), ltype.str(), rfield->queryName()->str(), rtype.str());
+                    reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Type mismatch for corresponding fields %s (%s) vs %s (%s)",lfield->queryName()->str(), ltype.str(), rfield->queryName()->str(), rtype.str());
                 }
             }
         }
         else if(lchildrectype == NULL || rchildrectype == NULL) 
         {
-            reportError(ERR_TYPEMISMATCH_DATASET, atr, "Datasets must have the same types for field %d: one is Record, the other is not", idx+1);
+            reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Datasets must have the same types for field %d: one is Record, the other is not", idx+1);
             return -1;
         }
         
         // recursive call to check sub fields.
         if(lchildrectype && rchildrectype)
-            return checkRecordTypesSimilar(lfield, rfield, atr);
+            return checkRecordTypesSimilar(lfield, rfield, errPos);
     }
 
     return 0;
 }
 
 
-bool HqlGram::checkRecordCreateTransform(HqlExprArray & assigns, IHqlExpression *leftExpr, IHqlExpression *leftSelect, IHqlExpression *rightExpr, IHqlExpression *rightSelect, const attribute &atr)
+bool HqlGram::checkRecordCreateTransform(HqlExprArray & assigns, IHqlExpression *leftExpr, IHqlExpression *leftSelect, IHqlExpression *rightExpr, IHqlExpression *rightSelect, const ECLlocation & errPos)
 {
     if (leftExpr->getOperator() != rightExpr->getOperator())
     {
         if (leftExpr->isAttribute() || rightExpr->isAttribute())
-            reportError(ERR_TYPEMISMATCH_DATASET, atr, "Datasets must have the same attributes");
+            reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Datasets must have the same attributes");
         else
-            reportError(ERR_TYPEMISMATCH_DATASET, atr, "Datasets must have the same structure");
+            reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Datasets must have the same structure");
         return false;
     }
 
     switch (rightExpr->getOperator())
     {
     case no_ifblock:
-        return checkRecordCreateTransform(assigns, leftExpr->queryChild(1), leftSelect, rightExpr->queryChild(1), rightSelect, atr);
+        return checkRecordCreateTransform(assigns, leftExpr->queryChild(1), leftSelect, rightExpr->queryChild(1), rightSelect, errPos);
     case no_record:
     {
         unsigned lnumChildren = leftExpr->numChildren();
         unsigned rnumChildren = rightExpr->numChildren();
         if (lnumChildren != rnumChildren) 
         {
-            reportError(ERR_TYPEMISMATCH_DATASET, atr, "Datasets must have the same number of fields: %d vs %d", lnumChildren, rnumChildren);
+            reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Datasets must have the same number of fields: %d vs %d", lnumChildren, rnumChildren);
             return false;
         }
         for (unsigned i= 0; i < lnumChildren; i++)
-            if (!checkRecordCreateTransform(assigns, leftExpr->queryChild(i), leftSelect, rightExpr->queryChild(i), rightSelect, atr))
+            if (!checkRecordCreateTransform(assigns, leftExpr->queryChild(i), leftSelect, rightExpr->queryChild(i), rightSelect, errPos))
                 return false;
         return true;
     }
@@ -8483,7 +8516,7 @@ bool HqlGram::checkRecordCreateTransform(HqlExprArray & assigns, IHqlExpression
             IAtom * rightName = rightExpr->queryName();
             if (leftName != rightName)
             {
-                reportError(ERR_TYPEMISMATCH_DATASET, atr, "Name mismatch for corresponding fields %s vs %s",leftName->str(), rightName->str());
+                reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Name mismatch for corresponding fields %s vs %s",leftName->str(), rightName->str());
                 return false;
             }
 
@@ -8496,17 +8529,17 @@ bool HqlGram::checkRecordCreateTransform(HqlExprArray & assigns, IHqlExpression
                     StringBuffer ltype, rtype;
                     getFriendlyTypeStr(leftExpr, ltype);
                     getFriendlyTypeStr(rightExpr, rtype);
-                    reportError(ERR_TYPEMISMATCH_DATASET, atr, "Type mismatch for corresponding fields %s (%s) vs %s (%s)",leftName->str(), ltype.str(), rightName->str(), rtype.str());
+                    reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Type mismatch for corresponding fields %s (%s) vs %s (%s)",leftName->str(), ltype.str(), rightName->str(), rtype.str());
                 }
                 else
-                    reportError(ERR_TYPEMISMATCH_DATASET, atr, "Datasets must have the same types for field %s vs %s: one is Record, the other is not", leftName->str(), rightName->str());
+                    reportError(ERR_TYPEMISMATCH_DATASET, errPos, "Datasets must have the same types for field %s vs %s: one is Record, the other is not", leftName->str(), rightName->str());
                 return false;
             }
 
             if (rightExpr->isDatarow())
-                return checkRecordCreateTransform(assigns, leftRecord, leftSelected, rightRecord, rightSelected, atr);
+                return checkRecordCreateTransform(assigns, leftRecord, leftSelected, rightRecord, rightSelected, errPos);
 
-            assigns.append(*createAssign(LINK(leftSelected), checkEnsureRecordsMatch(leftSelected, rightSelected, atr, false)));
+            assigns.append(*createAssign(LINK(leftSelected), checkEnsureRecordsMatch(leftSelected, rightSelected, errPos, false)));
             return true;
         }
     }
@@ -8515,21 +8548,21 @@ bool HqlGram::checkRecordCreateTransform(HqlExprArray & assigns, IHqlExpression
 }
 
 
-IHqlExpression * HqlGram::checkEnsureRecordsMatch(IHqlExpression * left, IHqlExpression * right, const attribute & errpos, bool rightIsRow)
+IHqlExpression * HqlGram::checkEnsureRecordsMatch(IHqlExpression * left, IHqlExpression * right, const ECLlocation & errPos, bool rightIsRow)
 {
     //Need to add a project to make the field names correct, otherwise problems occur if one the left side is optimized away,
     //because that causes the record type and fields to change.
     if (recordTypesMatch(left, right)) 
         return LINK(right);
 
-    if (checkRecordTypesSimilar(left, right, errpos) != 0)
+    if (checkRecordTypesSimilar(left, right, errPos) != 0)
         return LINK(left); // error conditional - return something compatible with left
 
     HqlExprArray assigns;
     OwnedHqlExpr seq = createActiveSelectorSequence(right, NULL);
     OwnedHqlExpr rightSelect = createSelector(no_left, right, seq);
     OwnedHqlExpr leftSelect = getSelf(left);
-    if (!checkRecordCreateTransform(assigns, left->queryRecord(), leftSelect, right->queryRecord(), rightSelect, errpos))
+    if (!checkRecordCreateTransform(assigns, left->queryRecord(), leftSelect, right->queryRecord(), rightSelect, errPos))
         return LINK(right);
 
     IHqlExpression * transform = createValue(no_transform, makeTransformType(LINK(left->queryRecordType())), assigns);
@@ -8555,7 +8588,7 @@ void HqlGram::ensureMapToRecordsMatch(OwnedHqlExpr & defaultExpr, HqlExprArray &
         IHqlExpression * value = mapTo.queryChild(1);
         if (isGrouped(value) != isGrouped(expected))
             groupingDiffers = true;
-        OwnedHqlExpr checked = checkEnsureRecordsMatch(expected, value, errpos, isRow);
+        OwnedHqlExpr checked = checkEnsureRecordsMatch(expected, value, errpos.pos, isRow);
         if (value != checked)
         {
             args.replace(*replaceChild(&mapTo, 1, checked), i);
@@ -8567,7 +8600,7 @@ void HqlGram::ensureMapToRecordsMatch(OwnedHqlExpr & defaultExpr, HqlExprArray &
     {
         if (isGrouped(defaultExpr) != isGrouped(expected))
             groupingDiffers = true;
-        OwnedHqlExpr checked = checkEnsureRecordsMatch(expected, defaultExpr, errpos, isRow);
+        OwnedHqlExpr checked = checkEnsureRecordsMatch(expected, defaultExpr, errpos.pos, isRow);
         if (defaultExpr != checked)
         {
             defaultExpr.set(checked);
@@ -8703,15 +8736,15 @@ void HqlGram::checkGrouped(attribute & atr)
         reportError(ERR_ROLLUP_NOT_GROUPED, atr, "Input to activity must be grouped");
 }
 
-void HqlGram::checkRegrouping(attribute & atr, HqlExprArray & args)
+void HqlGram::checkRegrouping(const ECLlocation & errPos, HqlExprArray & args)
 {
     IHqlExpression * left = &args.item(0);
     ForEachItemIn(i, args)
     {
-        args.replace(*checkEnsureRecordsMatch(left, &args.item(i), atr, false), i);
+        args.replace(*checkEnsureRecordsMatch(left, &args.item(i), errPos, false), i);
         IHqlExpression * cur = &args.item(i);
         if (!isGrouped(cur))
-            reportError(ERR_ROLLUP_NOT_GROUPED, atr, "Input %d to REGROUP must be grouped", i);
+            reportError(ERR_ROLLUP_NOT_GROUPED, errPos, "Input %d to REGROUP must be grouped", i);
     }
 }
 
@@ -8811,7 +8844,7 @@ void HqlGram::createAppendFiles(attribute & targetAttr, attribute & leftAttr, at
     OwnedHqlExpr right = rightAttr.getExpr();
     if (left->isDatarow()) 
         left.setown(createDatasetFromRow(LINK(left)));
-    right.setown(checkEnsureRecordsMatch(left, right, rightAttr, right->isDatarow()));
+    right.setown(checkEnsureRecordsMatch(left, right, rightAttr.pos, right->isDatarow()));
     if (right->isDatarow())
         right.setown(createDatasetFromRow(LINK(right)));
     IHqlExpression * attr = kind ? createAttribute(kind) : NULL;
@@ -8826,7 +8859,7 @@ void HqlGram::createAppendDictionaries(attribute & targetAttr, attribute & leftA
     assertex(left->isDictionary());
     if (!right->isDictionary())
         reportError(WRN_UNSUPPORTED_FEATURE, rightAttr, "Only dictionary may be appended to dictionary");
-    right.setown(checkEnsureRecordsMatch(left, right, rightAttr, right->isDatarow()));
+    right.setown(checkEnsureRecordsMatch(left, right, rightAttr.pos, right->isDatarow()));
     // TODO: support for dict + row, dict + dataset
 //    if (right->isDatarow())
 //        right.setown(createDatasetFromRow(LINK(right)));
@@ -8856,7 +8889,7 @@ IHqlExpression * HqlGram::processIfProduction(attribute & condAttr, attribute &
     }
 
     if (left->queryRecord() && falseAttr)
-        right.setown(checkEnsureRecordsMatch(left, right, *falseAttr, false));
+        right.setown(checkEnsureRecordsMatch(left, right, falseAttr->pos, false));
 
     if (isGrouped(left) != isGrouped(right))
         reportError(ERR_GROUPING_MISMATCH, trueAttr, "Branches of the condition have different grouping");

+ 1 - 1
ecl/hqlcpp/hqlcpp.cpp

@@ -2864,7 +2864,7 @@ void HqlCppTranslator::buildExpr(BuildCtx & ctx, IHqlExpression * expr, CHqlBoun
         doBuildExprEvaluate(ctx, expr, tgt);
         return;
     case no_thor:
-        throwUnexpected();
+        throwUnexpectedX("THOR() annotation created in a scalar context");
 //      assertex(expr->queryType()->isScalar());
 //      buildExpr(ctx, expr->queryChild(0), tgt);
         return;

+ 1 - 1
ecl/hqlcpp/hqlcppds.cpp

@@ -4884,7 +4884,7 @@ IReferenceSelector * HqlCppTranslator::buildDatasetIndex(BuildCtx & ctx, IHqlExp
             specialCase = true;
             break;
         default:
-            specialCase = alwaysEvaluatesToBound(dataset);
+            specialCase = alwaysEvaluatesToBound(dataset) || hasSingleRow(dataset) || !canIterateInline(&ctx, dataset);
             break;
         }
 

+ 40 - 43
ecl/hqlcpp/hqlinline.cpp

@@ -51,7 +51,7 @@ enum
     HEFassigninline     = 0x0002,
     HEFiterateinline    = 0x0004,
     HEFevaluateinline   = 0x0008,                   // can evaluate without any temporary dataset being created (temporary row is ok)
-    HEFspillinline      = 0x0010,                   // I'm not sure I can do this - because whether it spills depends on how it is being used.
+//    HEFspillinline      = 0x0010,                   // I'm not sure I can do this - because whether it spills depends on how it is being used.
 
     RETassign       = HEFassigninline|HEFprocessinline,
     RETiterate      = HEFiterateinline|HEFassigninline|HEFprocessinline,
@@ -59,9 +59,9 @@ enum
 
 };
 
-#define canAssignNoSpill(childFlags)        ((childFlags & (HEFspillinline|HEFassigninline)) == HEFassigninline)
-#define canIterateNoSpill(childFlags)       ((childFlags & (HEFspillinline|HEFiterateinline)) == HEFiterateinline)
-#define canEvaluateNoSpill(childFlags)      ((childFlags & (HEFspillinline|HEFevaluateinline)) == HEFevaluateinline)
+#define canAssignNoSpill(childFlags)        ((childFlags & HEFassigninline) == HEFassigninline)
+#define canIterateNoSpill(childFlags)       ((childFlags & HEFiterateinline) == HEFiterateinline)
+#define canEvaluateNoSpill(childFlags)      ((childFlags & HEFevaluateinline) == HEFevaluateinline)
 
 // assign is superset of iterate, iterate is a superset of evaluate
 
@@ -86,6 +86,12 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
         }
     }
 
+    //This function should return the first value that matches:
+    // RETevaluate - the dataset is completely available, and all elements can be accessed directly
+    // RETiterate - each element in the dataset can be accessed without evaluating the entire dataset
+    // RETassign - the dataset can be assigned to a temporary
+    // 0 - the dataset cannot be evaluated inline, it requires a child query.
+
     node_operator op = expr->getOperator();
     switch (op)
     {
@@ -98,12 +104,16 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
             unsigned childFlags = getInlineFlags(ctx, expr->queryChild(0));
             if (childFlags == 0)
                 return 0;
-            if (isGrouped(expr))
-                return RETevaluate;
-            return RETevaluate|RETiterate;
+
+            //This always creates a temporary, so can be evaluated efficiently
+            return RETevaluate;
         }
     case no_dataset_alias:
         return getInlineFlags(ctx, expr->queryChild(0));
+    case no_call:
+    case no_externalcall:               // no so sure about this - should possibly be assignable only. (also no_call above)
+    case no_getresult:
+        return RETassign;
     }
 
     if (isGrouped(expr))
@@ -126,9 +136,8 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
             unsigned childFlags = getInlineFlags(ctx, expr->queryChild(0));
             if ((childFlags == 0) || queryRealChild(expr, 3))
                 return 0;
-            if (canIterateNoSpill(childFlags))
-                return RETevaluate;
-            return RETevaluate|HEFspillinline;
+            //Always effectively requires a temporary
+            return RETassign;
         }
     case no_hqlproject:
         //can't do a skip inside an inline project - since the generated code doesn't allow "continue" to be used.
@@ -145,13 +154,12 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
                 return 0;
             if (hasSingleRow(ds))
             {
-                if (canEvaluateNoSpill(childFlags))
-                    return RETevaluate;
-                return RETevaluate|HEFspillinline;
+                //Probably not worth iterating...
+                return RETassign;
             }
             if (canIterateNoSpill(childFlags))
                 return RETiterate;
-            return RETiterate|HEFspillinline;
+            return RETassign;
         }
     case no_selectmap:
     case no_selectnth:
@@ -165,9 +173,8 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
                     childFlags = getInlineFlags(ctx, ds->queryChild(0));
                     if (childFlags == 0)
                         return 0;
-                    if (canIterateNoSpill(childFlags))
-                        return RETevaluate;
-                    return RETevaluate|HEFspillinline;
+                    //Dataset will be calculated => can just access the 1st element
+                    return RETevaluate;
                 }
                 return 0;
             }
@@ -175,18 +182,16 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
                 return RETevaluate;
             if (canIterateNoSpill(childFlags))
                 return RETevaluate;
-            return RETevaluate|HEFspillinline;
+            return RETassign;
         }
     case no_filter:
         {
             unsigned childFlags = getInlineFlags(ctx, expr->queryChild(0));
             if (childFlags == 0)
                 return 0;
-            if (canIterateNoSpill(childFlags))
-                return RETiterate;
-            if (filterIsTableInvariant(expr) && canAssignNoSpill(childFlags))
+            if (!canIterateNoSpill(childFlags) && filterIsTableInvariant(expr))
                 return RETassign;
-            return RETiterate|HEFspillinline;
+            return RETiterate;
         }
     case no_choosen:        
     case no_index:
@@ -194,9 +199,11 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
             unsigned childFlags = getInlineFlags(ctx, expr->queryChild(0));
             if (childFlags == 0)
                 return 0;
+//            if (canEvaluateNoSpill(childFlags))
+//                return RETevaluate;
             if (canIterateNoSpill(childFlags))
                 return RETiterate;
-            return RETiterate|HEFspillinline;
+            return RETassign;
         }
     case no_limit:
         {
@@ -205,14 +212,11 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
             unsigned childFlags = getInlineFlags(ctx, expr->queryChild(0));
             if (childFlags == 0)
                 return 0;
-            unsigned flags = 0;
             if (canEvaluateNoSpill(childFlags))
-                flags |= RETevaluate;
+                return RETevaluate;
             if (canIterateNoSpill(childFlags))
-                flags |= RETiterate;
-            if (flags)
-                return flags;
-            return RETevaluate|HEFspillinline;
+                return RETiterate;
+            return RETassign;
         }
     case no_fail:
         return RETevaluate;
@@ -229,20 +233,17 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
             unsigned childFlags = getInlineFlags(ctx, expr->queryChild(0));
             if (childFlags == 0)
                 return 0;
-            return RETevaluate|HEFspillinline;
+            return RETassign;
         }
     case no_addfiles:
         {
-            unsigned ret = RETassign;
             for (unsigned i=0; i < 2; i++)
             {
                 unsigned childFlags = getInlineFlags(ctx, expr->queryChild(i));
                 if (childFlags == 0)
                     return 0;
-                if (childFlags & HEFspillinline)
-                    ret |= HEFspillinline;
             }
-            return ret;
+            return RETassign;
         }
     case no_if:
     case no_choose:
@@ -257,8 +258,6 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
                 unsigned childFlags = getInlineFlags(ctx, cur);
                 if (childFlags == 0)
                     return 0;
-                if (childFlags & HEFspillinline)
-                    ret |= HEFspillinline;
             }
             return ret;
         }
@@ -284,10 +283,6 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
         if (expr->hasAttribute(graphAtom))       // force it to appear in the graph
             return 0;
         return getInlineFlags(ctx, expr->queryChild(0));
-    case no_call:               
-    case no_externalcall:               // no so sure about this - should possibly be assignable only. (also no_call above)
-    case no_getresult:
-        return expr->isDatarow() ? RETevaluate : RETassign;
     case no_getgraphresult:
         if (expr->hasAttribute(_distributed_Atom))
             return 0;
@@ -319,7 +314,7 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
             return 0;
         return RETevaluate;
     case no_translated:
-        return RETassign|RETiterate|RETevaluate;
+        return RETevaluate;
     case no_projectrow:
         {
             unsigned childFlags = getInlineFlags(ctx, expr->queryChild(0));
@@ -348,7 +343,7 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
             unsigned childFlags = getInlineFlags(ctx, expr->queryChild(0));
             if (childFlags == 0)
                 return 0;
-            return RETevaluate|(childFlags & HEFspillinline);
+            return RETevaluate;
         }
     case no_activetable:
         return RETassign;
@@ -372,7 +367,9 @@ static unsigned calcInlineFlags(BuildCtx * ctx, IHqlExpression * expr)
             unsigned childRFlags = getInlineFlags(ctx, expr->queryChild(1));
             if ((childLFlags == 0) || (childRFlags == 0))
                 return 0;
-            return RETassign|((childLFlags|childRFlags) & HEFspillinline);
+            if (canIterateNoSpill(childLFlags) && canIterateNoSpill(childRFlags))
+                return RETiterate;
+            return RETassign;
         }
     case no_compound:
         return getInlineFlags(ctx, expr->queryChild(1));

+ 14 - 0
ecl/regress/issue8663.ecl

@@ -0,0 +1,14 @@
+
+myIndex1 := INDEX({ unsigned8 id, string10 name }, { unsigned4 cnt }, 'i1', FILEPOSITION(FALSE));
+myIndex2 := INDEX({ unsigned8 id, string10 name }, { unsigned4 cnt }, 'i2', FILEPOSITION(TRUE));
+
+ds1 := DATASET([
+        { 1, 'Gavin', 20 },
+        { 2, 'James', 15 },
+        { 3, 'Kelly', 27 }
+        ], { unsigned id, string name, unsigned8 cnt });
+        
+BUILD(myIndex1, ds1);
+BUILD(myIndex2, ds1);
+BUILD(myIndex2, ds1, 'i2copy');
+BUILD(myIndex2, 'i2copy2', ds1);

+ 13 - 0
ecl/regress/issue8663_err.ecl

@@ -0,0 +1,13 @@
+
+myIndex1 := INDEX({ unsigned8 id, string10 name }, { unsigned4 cnt }, 'i1', FILEPOSITION(FALSE));
+myIndex2 := INDEX({ unsigned8 id, string10 name }, { unsigned4 cnt }, 'i2', FILEPOSITION(TRUE));
+
+ds1 := DATASET([
+        { 1, 'Gavin', 20 },
+        { 2, 'James', 15 },
+        { 3, 'Kelly', 27 }
+        ], { unsigned id, string name, unsigned8 cnt });
+        
+BUILD(ds1, myIndex1);
+BUILD(myIndex2, ds1, ds1);
+BUILD(myIndex2, ds1, 'i2copy', 'i3copy');

+ 5 - 1
esp/services/esdl_svc_engine/esdl_binding.cpp

@@ -905,7 +905,7 @@ void EsdlBindingImpl::addService(const char * name,
 
                  m_pESDLService->configureTargets(m_esdlBndCfg, name);
 
-                CEspBinding::addService(name, host, port, service);
+                 CEspBinding::addService(name, host, port, service);
             }
             else
                 DBGLOG("ESDL Binding: Error adding service '%s': ESDL definition objectnot available", name);
@@ -1517,7 +1517,11 @@ bool EsdlBindingImpl::qualifyServiceName(IEspContext &context,
         methQName->clear();
         IEsdlDefMethod *mth = srv->queryMethodByName(methname);
         if (mth)
+        {
             methQName->append(mth->queryName());
+            addMethodDescription(methQName->str(), mth->queryProp(ESDL_METHOD_DESCRIPTION));
+            addMethodHelp(methQName->str(), mth->queryProp(ESDL_METHOD_HELP));
+        }
     }
     else if (methQName != NULL)
         methQName->clear();

+ 2 - 0
esp/services/esdl_svc_engine/esdl_binding.hpp

@@ -33,6 +33,8 @@ static const char* ESDL_DEF_ENTRY="Definition";
 static const char* ESDL_BINDINGS_ROOT_PATH="/ESDL/Bindings/";
 static const char* ESDL_BINDING_PATH="/ESDL/Bindings/Binding";
 static const char* ESDL_BINDING_ENTRY="Binding";
+static const char* ESDL_METHOD_DESCRIPTION="description";
+static const char* ESDL_METHOD_HELP="help";
 
 #define SDS_LOCK_TIMEOUT (30*1000) // 5mins, 30s a bit short
 

+ 3 - 1
initfiles/bash/etc/init.d/hpcc_common.in

@@ -549,7 +549,8 @@ stop_component() {
                             removePid ${PIDPATH}
                             unlock ${LOCKPATH}
                             log_success_msg
-                            return 0
+                            RCSTOP=0
+                            break 2
                         fi
                     fi
                     sleep 1
@@ -563,6 +564,7 @@ stop_component() {
         log_success_msg
         RCSTOP=0
     fi
+
     return ${RCSTOP}
 }
 

+ 19 - 11
initfiles/componentfiles/thor/start_slaves

@@ -35,25 +35,33 @@ fi
 # this must match jsocket hard limit
 export handlelimit=32768
 
-echo `date` Initializing dafilesrv setup
-sudo /etc/init.d/hpcc-init -c dafilesrv setup
-echo `date` Dafilesrv setup done
+
+# insuring parent directory structure is setup properly
+sudo /etc/init.d/dafilesrv status &>/dev/null
+if [ $? -ne 0 ];then
+  sudo /etc/init.d/dafilesrv start &>/dev/null
+  if [ $? -ne 0 ];then
+    exit 1
+  fi
+fi
+
 mkdir -p $instancedir
 mkdir -p `dirname $logredirect`
 exec >>$logredirect 2>&1
 
 cd $instancedir
 
-echo "slave(${ip}) init `date`"
+echo "`date` Dependency dafilesrv is running"
+echo "`date` slave(${ip}) init"
 
 lckfile="$PID/start_slaves_${hpcc_compname}_${ip}.pid"
 
 # prevent two of these scripts starting together
 while [ -e $lckfile ]; do
-  echo waiting on lckfile: $lckfile
+  echo "`date` waiting on lckfile: ${lckfile}"
   oldpid=`cat $lckfile`
   if ps h $oldpid; then
-     echo killing pid $oldpid start_slaves
+     echo "`date` killing pid ${oldpid} start_slaves"
      kill -9 $oldpid
      rm $lckfile                   # just in case
      sleep 1
@@ -67,13 +75,13 @@ echo $$ > $lckfile
 ulimit -c unlimited
 ulimit -n $handlelimit
 
-echo "slave(s) starting `date`"
+echo "`date` slave(s) starting"
 
 # create symlink for easier identification of slaves by compName
 ln -s -f $deploydir/thorslave_lcr thorslave_${hpcc_compname}
 
 # sync to current master thorgroup
-echo rsync -e "ssh -o StrictHostKeyChecking=no" $master:$instancedir/thorgroup $instancedir/thorgroup.slave
+echo "`date` rsync -e ssh -o StrictHostKeyChecking=no ${master}:${instancedir}/thorgroup ${instancedir}/thorgroup.slave"
 rsync -e "ssh -o StrictHostKeyChecking=no" $master:$instancedir/thorgroup $instancedir/thorgroup.slave
 
 let "slavenum = 1";
@@ -84,15 +92,15 @@ for slave in $(cat $instancedir/thorgroup.slave); do
     if [ "$slaveport" = "" ]; then
       slaveport=$THORSLAVEPORT
     fi
-    echo thorslave_$hpcc_compname  master=$master:$masterport slave=.:$slaveport slavenum=$slavenum logDir=$logpth
+    echo "`date` thorslave_$hpcc_compname  master=$master:$masterport slave=.:$slaveport slavenum=$slavenum logDir=$logpth"
     ./thorslave_$hpcc_compname master=$master:$masterport slave=.:$slaveport slavenum=$slavenum logDir=$logpth 2>/dev/null 1>/dev/null &
     slavepid=$!
     PID_NAME="$PID/${hpcc_compname}_slave_${slavenum}.pid"
     echo $slavepid > $PID_NAME
     if [ "$slavepid" -eq "0" ]; then
-      echo "failed to start at `date`"
+      echo "`date` failed to start"
     else
-      echo "slave pid $slavepid started `date`"
+      echo "`date` slave pid $slavepid started"
     fi
   fi
   let "slavenum += 1";

+ 2 - 2
plugins/memcached/memcachedplugin.hpp

@@ -57,8 +57,8 @@ extern "C++"
     ECL_MEMCACHED_API unsigned __int64 ECL_MEMCACHED_CALL MGetUint8 (ICodeContext * _ctx, const char * options, const char * key, const char * partitionKey);
     ECL_MEMCACHED_API double           ECL_MEMCACHED_CALL MGetDouble(ICodeContext * _ctx, const char * options, const char * key, const char * partitionKey);
     ECL_MEMCACHED_API void             ECL_MEMCACHED_CALL MGetUtf8  (ICodeContext * _ctx, size32_t & valueLength, char * & returnValue, const char * options, const char * key, const char * partitionKey);
-    ECL_MEMCACHED_API void             ECL_MEMCACHED_CALL MGetStr   (ICodeContext * _ctx, size32_t & valueLength, UChar * & returnValue, const char * options, const char * key, const char * partitionKey);
-    ECL_MEMCACHED_API void             ECL_MEMCACHED_CALL MGetUChar (ICodeContext * _ctx, size32_t & valueLength, char * & returnValue, const char * options, const char * key, const char * partitionKey);
+    ECL_MEMCACHED_API void             ECL_MEMCACHED_CALL MGetStr   (ICodeContext * _ctx, size32_t & valueLength, char * & returnValue, const char * options, const char * key, const char * partitionKey);
+    ECL_MEMCACHED_API void             ECL_MEMCACHED_CALL MGetUChar (ICodeContext * _ctx, size32_t & valueLength, UChar * & returnValue, const char * options, const char * key, const char * partitionKey);
     ECL_MEMCACHED_API void             ECL_MEMCACHED_CALL MGetData  (ICodeContext * _ctx,size32_t & returnLength, void * & returnValue, const char * options, const char * key, const char * partitionKey);
     //--------------------------------AUXILLARIES---------------------------
     ECL_MEMCACHED_API bool             ECL_MEMCACHED_CALL MExist  (ICodeContext * _ctx, const char * options, const char * key, const char * partitionKey);

+ 10 - 12
system/jlib/jsocket.cpp

@@ -4340,7 +4340,10 @@ class CSocketEpollThread: public CSocketBaseThread
             else
                 dummysock = ::socket(AF_INET, SOCK_STREAM, 0);
             CHECKSOCKRANGE(dummysock);
-            epoll_op(epfd, EPOLL_CTL_ADD, dummysock, (EPOLLIN | EPOLLERR));
+            // added EPOLLIN also because cannot find anywhere MSG_OOB is sent
+            // added here to match existing select() code above which sets
+            // the except fd_set mask.
+            epoll_op(epfd, EPOLL_CTL_ADD, dummysock, (EPOLLIN | EPOLLPRI));
 #endif
             dummysockopen = true;
         }
@@ -4487,16 +4490,15 @@ public:
                     if (si.mode != 0) {
                         ep_mode = 0;
                         if (si.mode & SELECTMODE_READ) {
-                            ep_mode |= (EPOLLIN | EPOLLPRI);
+                            ep_mode |= EPOLLIN;
                         }
                         if (si.mode & SELECTMODE_WRITE) {
                             ep_mode |= EPOLLOUT;
                         }
                         if (si.mode & SELECTMODE_EXCEPT) {
-                            ep_mode |= EPOLLERR;
+                            ep_mode |= EPOLLPRI;
                         }
                         if (ep_mode != 0) {
-                            ep_mode |= EPOLLRDHUP;
                             epoll_op(epfd, EPOLL_CTL_ADD, si.handle, ep_mode);
                         }
                     }
@@ -4648,19 +4650,15 @@ public:
                                 SelectItem *epsi = items.getArray(epfdtbl[epevents[j].data.fd]);
                                 if (!epsi->del) {
                                     unsigned int ep_mode = 0;
-                                    if (epevents[j].events & (EPOLLIN | EPOLLPRI)) {
-                                        ep_mode |= SELECTMODE_READ;
-                                    }
-                                    if (epevents[j].events & (EPOLLERR | EPOLLHUP)) {
-                                        ep_mode |= SELECTMODE_READ;
-                                    }
-                                    if (epevents[j].events & EPOLLRDHUP) {
-                                        // TODO - or should we set EXCEPT ?
+                                    if (epevents[j].events & (EPOLLIN | EPOLLHUP | EPOLLERR)) {
                                         ep_mode |= SELECTMODE_READ;
                                     }
                                     if (epevents[j].events & EPOLLOUT) {
                                         ep_mode |= SELECTMODE_WRITE;
                                     }
+                                    if (epevents[j].events & EPOLLPRI) {
+                                        ep_mode |= SELECTMODE_EXCEPT;
+                                    }
                                     if (ep_mode != 0) {
                                         tonotify.append(*epsi);
                                         tonotify.element(tonotify.length()-1).mode = ep_mode;

+ 32 - 14
system/jlib/jstats.cpp

@@ -396,7 +396,7 @@ extern jlib_decl StatsMergeAction queryMergeMode(StatisticKind kind)
     "@" #x "Avg" # y, \
     "@Skew" # y, \
     "@SkewMin" # y, \
-    "SkewMax" # y, \
+    "@SkewMax" # y, \
     "@NodeMin" # y, \
     "@NodeMax" # y,
 
@@ -406,26 +406,34 @@ extern jlib_decl StatsMergeAction queryMergeMode(StatisticKind kind)
     BASE_TAGS(x, y) \
     "@" #x "Delta" # y
 
-//Define tags when the default needs to be overriden
-#define XTAGS(x, y, dft) \
-    dft, \
-    BASE_TAGS(x, y) \
-    #x "Delta" # y
-
 //Define the tags for time items.
 #define TIMETAGS(x, y) \
     "@" #x #y, \
     BASE_TAGS(x, y) \
     "@TimeDelta" # y
 
-#define STAT(x, y, m) St##x##y, m, { NAMES(x, y) }, { TAGS(x, y) }
-#define TAGSTAT(x, y, m, dft) St##x##y, m, { NAMES(x, y) }, { XTAGS(x, y, dft) }
+#define LEGACYTAGS(dft)    \
+        dft, \
+        NULL, \
+        NULL, \
+        NULL, \
+        NULL, \
+        NULL, \
+        NULL, \
+        NULL, \
+        NULL, \
+        NULL
+
+#define CORESTAT(x, y, m)     St##x##y, m, { NAMES(x, y) }, { TAGS(x, y) }
+#define STAT(x, y, m)         CORESTAT(x, y, m), { LEGACYTAGS(NULL) }
+#define TAGSTAT(x, y, m, dft) St##x##y, m, { NAMES(x, y) }, { TAGS(x, y) }, { LEGACYTAGS(dft) }
+
 
 //--------------------------------------------------------------------------------------------------------------------
 
 //These are the macros to use to define the different entries in the stats meta table
 #define TIMESTAT(y) STAT(Time, y, SMeasureTimeNs)
-#define WHENSTAT(y) St##When##y, SMeasureTimestampUs, { TIMENAMES(When, y) }, { TIMETAGS(When, y) }
+#define WHENSTAT(y) St##When##y, SMeasureTimestampUs, { TIMENAMES(When, y) }, { TIMETAGS(When, y) }, { LEGACYTAGS(NULL) }
 #define NUMSTAT(y) STAT(Num, y, SMeasureCount)
 #define SIZESTAT(y) STAT(Size, y, SMeasureSize)
 #define LOADSTAT(y) STAT(Load, y, SMeasureLoad)
@@ -447,6 +455,7 @@ public:
     StatisticMeasure measure;
     const char * names[StNextModifier/StVariantScale];
     const char * tags[StNextModifier/StVariantScale];
+    const char * legacytags[StNextModifier/StVariantScale];
 };
 
 static const StatisticMeta statsMetaData[StMax] = {
@@ -461,13 +470,13 @@ static const StatisticMeta statsMetaData[StMax] = {
     { WHENSTAT(Compiled) },
     { WHENSTAT(WorkunitModified) },
     { TIMESTAT(Elapsed) },
-    { TIMESTAT2(LocalExecute, "@localTime") },
+    { CORESTAT(Time, LocalExecute, SMeasureTimeNs), { "@localTime", "@timeMinMs", "@timeMaxMs" } },
     { TIMESTAT2(TotalExecute, "@totalTime") },
     { TIMESTAT(Remaining) },
     { SIZESTAT(GeneratedCpp) },
     { SIZESTAT(PeakMemory) },
     { SIZESTAT(MaxRowSize) },
-    { NUMSTAT2(RowsProcessed, "@count") },
+    { CORESTAT(Num, RowsProcessed, SMeasureCount), { "@count", "@min", "@max", NULL, "skew", "minskew", "maxskew", NULL, NULL, NULL } },
     { NUMSTAT2(Slaves, "@slaves") },
     { NUMSTAT2(Started, "@started") },
     { NUMSTAT2(Stopped, "@stopped") },
@@ -511,8 +520,8 @@ static const StatisticMeta statsMetaData[StMax] = {
 
 StatisticMeasure queryMeasure(StatisticKind kind)
 {
-    unsigned varient = (kind & ~StKindMask);
-    switch (varient)
+    unsigned variant = queryStatsVariant(kind);
+    switch (variant)
     {
     case StSkew:
     case StSkewMin:
@@ -567,6 +576,15 @@ const char * queryTreeTag(StatisticKind kind)
     return statsMetaData[rawkind].tags[variant];
 }
 
+const char * queryLegacyTreeTag(StatisticKind kind)
+{
+    StatisticKind rawkind = (StatisticKind)(kind & StKindMask);
+    unsigned variant = (kind / StVariantScale);
+    dbgassertex(rawkind >= StKindNone && rawkind < StMax);
+    dbgassertex(variant < (StNextModifier/StVariantScale));
+    return statsMetaData[rawkind].legacytags[variant];
+}
+
 //--------------------------------------------------------------------------------------------------------------------
 
 StatisticKind queryStatisticKind(const char * search)

+ 3 - 0
system/jlib/jstats.h

@@ -181,6 +181,8 @@ enum StatisticKind
 
 };
 
+inline StatisticKind queryStatsVariant(StatisticKind kind) { return (StatisticKind)(kind & ~StKindMask); }
+
 //---------------------------------------------------------------------------------------------------------------------
 
 interface IStatistic : extends IInterface
@@ -571,6 +573,7 @@ extern jlib_decl StatisticMeasure queryMeasure(StatisticKind kind);
 extern jlib_decl const char * queryStatisticName(StatisticKind kind);
 extern jlib_decl void queryLongStatisticName(StringBuffer & out, StatisticKind kind);
 extern jlib_decl const char * queryTreeTag(StatisticKind kind);
+extern jlib_decl const char * queryLegacyTreeTag(StatisticKind kind);
 extern jlib_decl const char * queryCreatorTypeName(StatisticCreatorType sct);
 extern jlib_decl const char * queryScopeTypeName(StatisticScopeType sst);
 extern jlib_decl const char * queryMeasureName(StatisticMeasure measure);

+ 0 - 1
system/jlib/jstring.cpp

@@ -97,7 +97,6 @@ StringBuffer::StringBuffer(bool useInternal)
 
 StringBuffer::~StringBuffer()
 {
-    dbgassertex(buffer);
     freeBuffer();
 }
 

+ 8 - 0
testing/regress/README.rst

@@ -24,6 +24,7 @@ Result:
 |                       [-X name1=value1[,name2=value2...]]
 |                       [-f optionA=valueA[,optionB=valueB...]]
 |                       [--pq threadNumber]
+|                       [--noversion]
 |                       [--runclass class[,class,...]]
 |                       [--excludeclass class[,class,...]]
 |                       {list,setup,run,query} ...
@@ -53,6 +54,7 @@ Result:
 |        -f optionA=valueA[,optionB=valueB...]
 |                                 set an ECL option (equivalent to #option).
 |        --pq threadNumber        parallel query execution with threadNumber threads. (If threadNumber is '-1' on a single node system then threadNumber = numberOfLocalCore * 2)
+|        --noversion              avoid version expansion of queries. Execute them as a standard test.
 |        --runclass class[,class,...], -r class[,class,...]
 |                                 run subclass(es) of the suite. Default value is 'all'
 |        --excludeclass class[,class,...], -e class[,class,...]
@@ -115,6 +117,7 @@ Result:
 |                             [-X name1=value1[,name2=value2...]]
 |                             [-f optionA=valueA[,optionB=valueB...]]
 |                             [--pq threadNumber]
+|                             [--noversion]
 |                             [--runclass class[,class,...]]
 |                             [--excludeclass class[,class,...]]
 |                             [--target [target_cluster_list | all]]
@@ -135,6 +138,7 @@ Result:
 |        -f optionA=valueA[,optionB=valueB...]
 |                                 set an ECL option (equivalent to #option).
 |        --pq threadNumber        parallel query execution with threadNumber threads. (If threadNumber is '-1' on a single node system then threadNumber = numberOfLocalCore * 2)
+|        --noversion              avoid version expansion of queries. Execute them as a standard test.
 |        --runclass class[,class,...], -r class[,class,...]
 |                                 run subclass(es) of the suite. Default value is 'all'
 |        --excludeclass class[,class,...], -e class[,class,...]
@@ -162,6 +166,7 @@ Result:
 |                           [-X name1=value1[,name2=value2...]]
 |                           [-f optionA=valueA[,optionB=valueB...]]
 |                           [--pq threadNumber]
+|                           [--noversion]
 |                           [--runclass class[,class,...]]
 |                           [--excludeclass class[,class,...]]
 |                           [--target [target_cluster_list | all]]
@@ -183,6 +188,7 @@ Result:
 |        -f optionA=valueA[,optionB=valueB...]
 |                                 set an ECL option (equivalent to #option).
 |        --pq threadNumber        parallel query execution with threadNumber threads. (If threadNumber is '-1' on a single node system then threadNumber = numberOfLocalCore * 2)
+|        --noversion              avoid version expansion of queries. Execute them as a standard test.
 |        --runclass class[,class,...], -r class[,class,...]
 |                                 run subclass(es) of the suite. Default value is 'all'
 |        --excludeclass class[,class,...], -e class[,class,...]
@@ -212,6 +218,7 @@ Result:
 |                             [-X name1=value1[,name2=value2...]]
 |                             [-f optionA=valueA[,optionB=valueB...]]
 |                             [--pq threadNumber]
+|                             [--noversion]
 |                             [--runclass class[,class,...]]
 |                             [--excludeclass class[,class,...]]
 |                             [--target [target_cluster_list | all]]
@@ -237,6 +244,7 @@ Result:
 |        -f optionA=valueA[,optionB=valueB...]
 |                                 set an ECL option (equivalent to #option).
 |        --pq threadNumber        parallel query execution with threadNumber threads. (If threadNumber is '-1' on a single node system then threadNumber = numberOfLocalCore * 2)
+|        --noversion              avoid version expansion of queries. Execute them as a standard test.
 |        --runclass class[,class,...], -r class[,class,...]
 |                                 run subclass(es) of the suite. Default value is 'all'
 |        --excludeclass class[,class,...], -e class[,class,...]

+ 2 - 0
testing/regress/ecl-test

@@ -159,6 +159,8 @@ class RegressMain:
                             nargs=1, type=checkXParam,  default='None',  metavar="optionA=valueA[,optionB=valueB...]")
         commonParser.add_argument('--pq', help="Parallel query execution with threadNumber threads. (If threadNumber is '-1' on a single node system then threadNumber = numberOfLocalCore * 2 )",
                                 type=checkPqParam,  default = 0,   metavar="threadNumber")
+        commonParser.add_argument('--noversion', help="Avoid version expansion of queries. Execute them as a standard test.",
+                                action = 'store_true')
 
         executionParser=argparse.ArgumentParser(add_help=False)
         executionParser.add_argument('--runclass', '-r', help="run subclass(es) of the suite. Default value is 'all'",

+ 3 - 1
testing/regress/hpcc/common/report.py

@@ -50,8 +50,8 @@ class Report:
     def display(self, log=None,  elapsTime = 0):
         logging.debug("Report::display(log='%s', elapsTime:%d",  log,  elapsTime)
         reportStr = "\n"
-        reportStr += "Results\n"
         reportStr += "-------------------------------------------------\n"
+        reportStr += "Result:\n"
         reportStr += "Passing: %i\n" % len(self.report._pass)
         reportStr += "Failure: %i\n" % len(self.report._fail)
         reportStr += "-------------------------------------------------\n"
@@ -68,9 +68,11 @@ class Report:
                     logging.debug("Exception:'%s'",  str(ex))
                     #reportStr += str(result.Diff)
             if len(passStr):
+                reportStr += "Output:\n"
                 reportStr += passStr
                 reportStr += "-------------------------------------------------\n"
         if self.report._fail:
+            reportStr += "Error:\n"
             for result in self.report._fail:
                 if len(result.Diff) > 0:
                     try:

+ 5 - 4
testing/regress/hpcc/regression/regress.py

@@ -96,19 +96,20 @@ class Regression:
                 numOfThreads = args.pq
         self.loggermutex = thread.allocate_lock()
         self.numOfCpus = 2
+        self.threadPerCpu = 2
         ver = getVersionNumbers()
         if numOfThreads == -1:
             if (ver['main'] >= 2) and (ver['minor'] >= 7):
                 if 'linux' in sys.platform :
-                    command = 'grep cores /proc/cpuinfo | sort -u'
+                    command = "grep 'core\|processor' /proc/cpuinfo | awk '{print $3}' | sort -nru | head -1"
                     cpuInfo = os.popen(command).read()
                     if cpuInfo == "":
                         self.numOfCpus = 1
                     else:
-                        self.numOfCpus = int(cpuInfo.split()[3])
-                numOfThreads = self.numOfCpus  * 2
+                        self.numOfCpus = int(cpuInfo)+1
+                numOfThreads = self.numOfCpus  * self.threadPerCpu
             elif (ver['main'] <= 2) and (ver['minor'] < 7):
-                    numOfThreads = self.numOfCpus  * 2
+                    numOfThreads = self.numOfCpus  * self.threadPerCpu
         logging.debug("Number of CPUs:%d, NUmber of threads:%d", self.numOfCpus, numOfThreads  )
 
         self.maxthreads = numOfThreads

+ 1 - 1
testing/regress/hpcc/regression/suite.py

@@ -120,7 +120,7 @@ class Suite:
 
     def addFileToSuite(self, eclfile):
         haveVersions = eclfile.testVesion()
-        if haveVersions:
+        if haveVersions and not self.args.noversion:
             basename = eclfile.getEcl()
             files=[]
             versions = eclfile.getVersions()

+ 11 - 8
testing/regress/hpcc/util/ecl/file.py

@@ -75,11 +75,12 @@ class ECLFile:
         self.version=''
         self.versionId=0
         self.timeout = 0
+        self.args = args
 
         #If there is a --publish CL parameter then force publish this ECL file
         self.forcePublish=False
-        if 'publish' in args:
-            self.forcePublish=args.publish
+        if 'publish' in self.args:
+            self.forcePublish=self.args.publish
 
         self.optX =[]
         self.optXHash={}
@@ -316,7 +317,7 @@ class ECLFile:
 
     # Test (and read all) //version tag in the ECL file
     def testVesion(self):
-        if self.isVersions == False:
+        if self.isVersions == False and not self.args.noversion:
             tag = b'//version'
             logging.debug("%3d. testVesion (ecl:'%s', tag:'%s')", self.taskId, self.ecl, tag)
             retVal = False
@@ -382,11 +383,13 @@ class ECLFile:
                 raise IOError("RESULT FILE NOT FOUND. " + self.getResults())
             expected = open(expectedKeyPath, 'r').readlines()
             recieved = open(self.getResults(), 'r').readlines()
-            for line in difflib.unified_diff(expected,
-                                             recieved,
-                                             fromfile=self.xml_e,
-                                             tofile=self.xml_r):
-                self.diff += str(line)
+            diffLines = ''
+            for line in difflib.unified_diff(expected, recieved, fromfile=self.xml_e, tofile=self.xml_r):
+                diffLines += str(line)
+            logging.debug("%3d. diffLines: " + diffLines,  self.taskId )
+            if len(diffLines) > 0:
+                self.diff += ("%3d. Test: %s\n") % (self.taskId,  self.getBaseEclRealName())
+                self.diff += diffLines
             logging.debug("%3d. self.diff: '" + self.diff +"'",  self.taskId )
         except Exception as e:
             logging.debug( e, extra={'taskId':self.taskId})