Selaa lähdekoodia

Merge branch 'candidate-7.0.x' into candidate-7.2.0

Richard Chapman 6 vuotta sitten
vanhempi
commit
aa143afb10
70 muutettua tiedostoa jossa 1018 lisäystä ja 302 poistoa
  1. 6 1
      common/thorhelper/thorcommon.cpp
  2. 14 7
      dali/server/daldap.cpp
  3. 16 5
      docs/EN_US/HPCCClientTools/CT_Mods/CT_Overview.xml
  4. 14 24
      docs/EN_US/HPCCDataHandling/DH-Mods/DH-Mod1.xml
  5. 51 0
      docs/EN_US/HPCCSpark/SparkHPCC.xml
  6. 3 3
      docs/EN_US/IMDB/IMDB.xml
  7. 46 2
      docs/EN_US/Installing_and_RunningTheHPCCPlatform/Installing_and_RunningTheHPCCPlatform.xml
  8. 16 6
      docs/EN_US/InstantCloud/InstantCloud.xml
  9. 13 2
      docs/EN_US/RunningHPCCinaVirtualMachine/RunningHPCCinaVirtualMachine.xml
  10. BIN
      docs/EN_US/images/LZimg05.jpg
  11. 3 1
      ecl/hql/hqlexpr.cpp
  12. 5 2
      ecl/hql/hqlgram2.cpp
  13. 2 2
      ecl/hql/hqllex.l
  14. 3 1
      ecl/hql/hqlmeta.cpp
  15. 2 2
      ecl/hql/hqlutil.cpp
  16. 14 2
      ecl/hqlcpp/hqlcppds.cpp
  17. 2 2
      ecl/hqlcpp/hqlhtcpp.cpp
  18. 8 0
      ecl/hqlcpp/hqltcppc.cpp
  19. 31 50
      ecl/hthor/hthor.cpp
  20. 3 2
      ecl/hthor/hthor.ipp
  21. 2 2
      esp/esdllib/esdl_transformer2.cpp
  22. 24 4
      esp/logging/logginglib/loggingagentbase.cpp
  23. 14 7
      esp/logging/logginglib/loggingagentbase.hpp
  24. 3 3
      esp/logging/loggingmanager/loggingmanager.cpp
  25. 2 0
      esp/logging/loggingmanager/loggingmanager.h
  26. 4 2
      esp/logging/loggingmanager/loggingmanager.hpp
  27. 1 1
      esp/scm/ws_fileio.ecm
  28. 7 6
      esp/services/esdl_svc_engine/esdl_binding.cpp
  29. 2 2
      esp/services/esdl_svc_engine/esdl_binding.hpp
  30. 1 21
      esp/services/ws_dfu/ws_dfuService.cpp
  31. 64 78
      esp/services/ws_fileio/ws_fileioservice.cpp
  32. 17 3
      esp/src/eclwatch/UserQueryWidget.js
  33. 3 3
      esp/src/package-lock.json
  34. 3 2
      esp/src/package.json
  35. 1 1
      initfiles/bash/etc/init.d/export-path
  36. 1 1
      initfiles/componentfiles/configxml/cgencomplist_linux.xml.in
  37. 7 0
      initfiles/componentfiles/configxml/dali.xsd
  38. 1 1
      initfiles/componentfiles/configxml/dali.xsl
  39. 3 3
      initfiles/componentfiles/configxml/sparkThor.xsl.in
  40. 1 0
      initfiles/etc/DIR_NAME/environment.xml.in
  41. 4 3
      initfiles/sbin/cluster_script.py
  42. 2 1
      initfiles/sbin/hpcc/cluster/host.py
  43. 6 0
      plugins/spark/CMakeLists.txt
  44. 25 0
      plugins/spark/spark-env.install
  45. 13 7
      plugins/spark/spark-env.sh.in
  46. 4 4
      plugins/spark/sparkthor-worker.sh.in
  47. 1 1
      plugins/spark/sparkthor.sh.in
  48. 80 9
      plugins/workunitservices/workunitservices.cpp
  49. 4 0
      rtl/eclrtl/eclregex.cpp
  50. 3 1
      system/jhtree/ctfile.cpp
  51. 2 1
      system/jlib/jcomp.cpp
  52. 3 2
      system/jlib/jstats.cpp
  53. 7 0
      system/jlib/jtime.cpp
  54. 1 0
      system/jlib/jtime.hpp
  55. 18 6
      system/mp/mpcomm.cpp
  56. 11 0
      system/security/LdapSecurity/ldapconnection.cpp
  57. 2 0
      testing/regress/ecl/casts.ecl
  58. 79 0
      testing/regress/ecl/indexblobs.ecl
  59. 3 0
      testing/regress/ecl/key/casts.xml
  60. 5 0
      testing/regress/ecl/key/indexblobs.xml
  61. 45 0
      testing/regress/ecl/key/stream.xml
  62. 21 0
      testing/regress/ecl/key/timestamps2.xml
  63. 1 1
      testing/regress/ecl/process.ecl
  64. 140 0
      testing/regress/ecl/stream.ecl
  65. 51 0
      testing/regress/ecl/timestamps.ecl
  66. 53 0
      testing/regress/ecl/timestamps2.ecl
  67. 9 5
      thorlcr/activities/iterate/thiterateslave.cpp
  68. 2 1
      thorlcr/activities/nsplitter/thnsplitterslave.cpp
  69. 7 5
      thorlcr/graph/thgraphslave.hpp
  70. 3 1
      thorlcr/slave/slavmain.cpp

+ 6 - 1
common/thorhelper/thorcommon.cpp

@@ -2114,7 +2114,12 @@ ITranslator *getTranslators(const char *tracing, unsigned expectedCrc, IOutputMe
     Owned<const IKeyTranslator> keyedTranslator;
     if (getTranslators(translator, &keyedTranslator, tracing, expectedCrc, expectedFormat, publishedCrc, publishedFormat, projectedCrc, projectedFormat, mode))
     {
-        if (!publishedFormat)
+        if (RecordTranslationMode::AlwaysECL == mode)
+        {
+            publishedFormat = expectedFormat;
+            publishedCrc = expectedCrc;
+        }
+        else if (!publishedFormat)
             publishedFormat = expectedFormat;
         class CTranslator : public CSimpleInterfaceOf<ITranslator>
         {

+ 14 - 7
dali/server/daldap.cpp

@@ -54,6 +54,7 @@ class CDaliLdapConnection: implements IDaliLdapConnection, public CInterface
     StringAttr              filesdefaultpassword;
     unsigned                ldapflags;
     unsigned                requestSignatureExpiryMinutes;//Age at which a dali permissions request signature becomes invalid
+    unsigned                requestSignatureAllowedClockVarianceSeconds;//Number of seconds that timestamps can vary between nodes
     IDigitalSignatureManager * pDSM = nullptr;
 
     void createDefaultScopes()
@@ -86,6 +87,7 @@ public:
         ldapflags = 0;
         if (ldapprops) {
             requestSignatureExpiryMinutes = ldapprops->getPropInt("@reqSignatureExpiry", 10);
+            requestSignatureAllowedClockVarianceSeconds = ldapprops->getPropInt("@allowedClockVariance", 5);
             if (ldapprops->getPropBool("@checkScopeScans",true))
                 ldapflags |= DLF_SCOPESCANS;
             if (ldapprops->getPropBool("@safeLookup",true))
@@ -164,23 +166,28 @@ public:
 
                 CDateTime now;
                 now.setNow();
-                if (now.compare(reqUTCTimestamp, false) < 0)//timestamp from the future?
+                CDateTime daliTime(now);
+                if (requestSignatureAllowedClockVarianceSeconds)//allow for clock variance between machines
+                    daliTime.adjustTimeSecs(requestSignatureAllowedClockVarianceSeconds);
+
+                if (daliTime.compare(reqUTCTimestamp, false) < 0)//timestamp from the future?
                 {
                     StringBuffer localDaliTimeUTC;
                     now.getString(localDaliTimeUTC, false);//get UTC timestamp
-                    ERRLOG("LDAP: getPermissions(%s) scope=%s user=%s Request digital signature UTC timestamp %s from the future (Dali UTC time %s)",key?key:"NULL",obj?obj:"NULL",username.str(), requestTimestamp.str(), localDaliTimeUTC.str());
+                    ERRLOG("getPermissions(%s) scope=%s user=%s Request digital signature UTC timestamp %s from the future (Dali UTC time %s). Check configured allowedClockVariance (%d sec)",key?key:"NULL",obj?obj:"NULL",username.str(), requestTimestamp.str(), localDaliTimeUTC.str(), requestSignatureAllowedClockVarianceSeconds);
                     return SecAccess_None;//deny
                 }
 
-                CDateTime expiry;
-                expiry.set(now);
-                expiry.adjustTime(requestSignatureExpiryMinutes);//compute expiration timestamp
+                CDateTime expiry(now);
+                expiry.adjustTime(-1 * requestSignatureExpiryMinutes);//compute expiration timestamp
+                if (requestSignatureAllowedClockVarianceSeconds)//allow for clock variance between machines
+                    expiry.adjustTimeSecs(-1 * requestSignatureAllowedClockVarianceSeconds);
 
-                if (expiry.compare(reqUTCTimestamp, false) < 0)//timestamp too far in the past?
+                if (reqUTCTimestamp.compare(expiry, false) < 0)//timestamp too far in the past?
                 {
                     StringBuffer localDaliTimeUTC;
                     now.getString(localDaliTimeUTC, false);//get UTC timestamp
-                    ERRLOG("LDAP: getPermissions(%s) scope=%s user=%s Expired request digital signature UTC timestamp %s (Dali UTC time %s, configured expiry %d minutes)",key?key:"NULL",obj?obj:"NULL",username.str(), requestTimestamp.str(), localDaliTimeUTC.str(), requestSignatureExpiryMinutes);
+                    ERRLOG("getPermissions(%s) scope=%s user=%s Expired request digital signature UTC timestamp %s (Dali UTC time %s, configured expiry %d minutes. Check configured allowedClockVariance (%d sec))",key?key:"NULL",obj?obj:"NULL",username.str(), requestTimestamp.str(), localDaliTimeUTC.str(), requestSignatureExpiryMinutes, requestSignatureAllowedClockVarianceSeconds);
                     return SecAccess_None;//deny
                 }
 

+ 16 - 5
docs/EN_US/HPCCClientTools/CT_Mods/CT_Overview.xml

@@ -121,6 +121,17 @@
 
           <para>Download the appropriate Client Tools for your Operating
           System. (available for CentOS, Ubuntu, Mac OSX, or Windows)</para>
+
+          <variablelist>
+            <varlistentry>
+              <term>Note:</term>
+
+              <listitem>
+                <para>The ECL IDE only runs on Windows operating
+                systems.</para>
+              </listitem>
+            </varlistentry>
+          </variablelist>
         </listitem>
 
         <listitem>
@@ -172,23 +183,23 @@
 
       <para>The HPCC Client tools are designed to allow you to have more than
       one version installed. For that reason, the choice to add the folder of
-      executable files to your system path is optional during installation.
-      </para>
+      executable files to your system path is optional during
+      installation.</para>
 
       <para>If you plan to use only one version, it may be easier to add the
       folder to your system or user path. If you want multiple versions
-      installed, you probably want to manage the path yourself. </para>
+      installed, you probably want to manage the path yourself.</para>
 
       <para>To make this easier, the installer creates a Start menu shortcut
       for an ECL Command Prompt. This command prompt adds the executable
-      folder to the path for the session of that command window. </para>
+      folder to the path for the session of that command window.</para>
 
       <para>Some command line tool functions may require elevated permissions.
       You can <emphasis role="bold">right-click</emphasis> &gt; <emphasis
       role="bold">Run As Administrator</emphasis> for the ECL Command Prompt
       for this purpose.</para>
 
-      <para> <graphic align="left"
+      <para><graphic align="left"
       fileref="../../images/ECLCmdPrompt.jpg" /></para>
     </sect2>
   </sect1>

+ 14 - 24
docs/EN_US/HPCCDataHandling/DH-Mods/DH-Mod1.xml

@@ -135,15 +135,14 @@
     <sect2 id="Uploading_files_w_secure_client">
       <title>Uploading files with a Secure Copy Client</title>
 
-      <para>To upload a large file for processing to your virtual machine, you
-      will need a tool that supports the secure copy protocol. In this
-      section, we discuss using WinSCP. There are other tools available, but
-      the steps are similar.</para>
+      <para>To upload a large file for processing to your system, you will
+      need a tool that supports the secure copy protocol (SCP). There are
+      several free tools available, and the steps are quite similar.</para>
 
       <para><orderedlist>
           <listitem>
-            <para>Open the WinSCP tool, and login to your Landing Zone node
-            using the username and password given.</para>
+            <para>Open the SCP tool, and login to your Landing Zone node using
+            the username and password given.</para>
 
             <para><informaltable colsep="1" rowsep="1">
                 <tgroup cols="2">
@@ -166,33 +165,24 @@
                   </tbody>
                 </tgroup>
               </informaltable></para>
+
+            <para>Ensure you are using the secure protocol, SFTP, connected to
+            your Landing Zone's IP or hostname through port 22 in your SCP
+            tool.</para>
           </listitem>
 
           <listitem>
-            <para>Once logged in, it should, navigate automatically to the
-            landing zone folder. (/var/lib/LexisNexis/mydropzone)</para>
+            <para>Once logged in, set the destination to the landing zone
+            folder. (/var/lib/HPCCSystems/mydropzone)</para>
           </listitem>
 
           <listitem>
-            <?dbfo keep-together="always"?>
-
-            <para>Navigate to where your local file is in the left part of the
-            window.</para>
-
-            <para><figure>
-                <title>WinSCP</title>
-
-                <mediaobject>
-                  <imageobject>
-                    <imagedata fileref="../../images/LZimg05.jpg" />
-                  </imageobject>
-                </mediaobject>
-              </figure></para>
+            <para>Navigate to the location where your local file is.</para>
           </listitem>
 
           <listitem>
-            <para>Select the data file to send and copy it to the landing
-            zone, using drag-and-drop.</para>
+            <para>Select the data file to send and copy it to your landing
+            zone.</para>
           </listitem>
         </orderedlist></para>
     </sect2>

+ 51 - 0
docs/EN_US/HPCCSpark/SparkHPCC.xml

@@ -424,6 +424,8 @@
       <para>Now you can start your HPCC System cluster and verify that
       Sparkthor is alive.</para>
 
+      <?hard-pagebreak ?>
+
       <para>To start your HPCC System.</para>
 
       <xi:include href="Installing_and_RunningTheHPCCPlatform/Inst-Mods/SysDStart.xml"
@@ -438,6 +440,55 @@
       your Integrated Spark Master node's IP address.</para>
 
       <programlisting>https://192.168.56.101:8080</programlisting>
+
+      <sect2 id="Addl_Spark_Config">
+        <title>Integrated Spark Cluster Configuration Options</title>
+
+        <para>In addition to the configuration options available through the
+        HPCC Systems configuration manager, there are configuration options
+        meant for edge cases and more advanced setups. To customize your
+        Integrated Spark cluster environment to utilize these additional
+        options use the provided <emphasis role="bold">spark-env.sh</emphasis>
+        script. </para>
+
+        <programlisting>/etc/HPCCSystems/externals/spark-hadoop/spark-env.sh</programlisting>
+
+        <para>For more information about Spark Cluster options, see the
+        following pages.</para>
+
+        <itemizedlist>
+          <listitem>
+            <para><ulink
+            url="https://spark.apache.org/docs/latest/spark-standalone.html#cluster-launch-scripts ">https://spark.apache.org/docs/latest/spark-standalone.html#cluster-launch-scripts</ulink></para>
+          </listitem>
+
+          <listitem>
+            <para><ulink
+            url="https://spark.apache.org/docs/latest/configuration.html#environment-variables ">https://spark.apache.org/docs/latest/configuration.html#environment-variables</ulink></para>
+          </listitem>
+        </itemizedlist>
+
+        <sect3 id="ExampleUseCases">
+          <title>Example Uses Cases</title>
+
+          <itemizedlist>
+            <listitem>
+              <para>Spark currently requires Java 8 to run. On a system where
+              the default Java installation is not Java 8. The JAVA_HOME
+              environment variable can be used to set the Spark Java version
+              to Java 8.</para>
+            </listitem>
+
+            <listitem>
+              <para>Typically when a job is run on a Spark cluster, it will
+              take ownership of all worker nodes. In a shared cluster
+              environment this may not be ideal. Using the SPARK_MASTER_OPTS
+              attribute it is possible to set a limit to the number of worker
+              nodes one job can utilize.</para>
+            </listitem>
+          </itemizedlist>
+        </sect3>
+      </sect2>
     </sect1>
   </chapter>
 

+ 3 - 3
docs/EN_US/IMDB/IMDB.xml

@@ -491,7 +491,7 @@ Blankline</programlisting></para>
 
           <listitem>
             <para>Click on the <emphasis role="bold">Logical Files</emphasis>
-            link</para>
+            tab along the top left.</para>
 
             <para>The files display in the Logical Files list:</para>
 
@@ -1156,8 +1156,8 @@ IMDB.KevinBaconNumberSets.doCounts;</programlisting></para>
     in conjunction with the examples you have already worked through in this
     tutorial:</para>
 
-    <para>KeysKevinBacon -- Builds an index of actors/actresses and the
-    movies they have starred in.</para>
+    <para>KeysKevinBacon -- Builds an index of actors/actresses and the movies
+    they have starred in.</para>
 
     <para>You must build this index before you can run queries to find the
     degree of separation between Kevin Bacon and an actor of your

+ 46 - 2
docs/EN_US/Installing_and_RunningTheHPCCPlatform/Installing_and_RunningTheHPCCPlatform.xml

@@ -37,7 +37,7 @@
       example data used in this manual are fictitious. Any similarity to
       actual persons, living or dead, is purely coincidental.</para>
 
-      <para />
+      <para></para>
     </legalnotice>
 
     <xi:include href="common/Version.xml"
@@ -300,7 +300,15 @@
               </varlistentry>
             </variablelist></para>
 
-          <para><programlisting>sudo wget http://people.centos.org/tru/devtools-2/devtools-2.repo -O /etc/yum.repos.d/devtools-2.repo</programlisting></para>
+          <para><programlisting>sudo wget \
+ http://people.centos.org/tru/devtools-2/devtools-2.repo -O /etc/yum.repos.d/devtools-2.repo</programlisting></para>
+
+          <para>This will add the devtools-2.repo file to your system. Verify
+          that the following information is in the devtools-2.repo file for
+          the yum installation to work. <programlisting>[testing-devtools-2-centos-6]
+name=testing 2 devtools for CentOS 6 
+baseurl=http://people.centos.org/tru/devtools-2/6/$basearch/RPMS
+gpgcheck=0</programlisting></para>
         </sect3>
 
         <sect3 id="Installing_Ubuntu-Debian">
@@ -515,6 +523,17 @@
               <listitem>
                 <para>Install the <emphasis role="bold">ECL IDE and Client
                 Tools for Windows</emphasis>.</para>
+
+                <variablelist>
+                  <varlistentry>
+                    <term>Note:</term>
+
+                    <listitem>
+                      <para>The ECL IDE only runs on Windows operating
+                      systems.</para>
+                    </listitem>
+                  </varlistentry>
+                </variablelist>
               </listitem>
 
               <listitem>
@@ -1161,6 +1180,31 @@ sudo cp /etc/HPCCSystems/source/NewEnvironment.xml /etc/HPCCSystems/environment.
             Scripts</link>) for more details.</para>
           </listitem>
         </orderedlist>
+
+        <sect3 id="Addl_SSH_KeyInfo">
+          <title>Additional SSH Key Information</title>
+
+          <para>On a multi-node HPCC system, certificates and SSH keys must
+          all match across all nodes for the system to work properly. If you
+          used the <emphasis>install-cluster.sh</emphasis> script as outlined
+          in the steps above, this would make sure that everything is properly
+          in sync. However, it is still a good idea to verify that they do all
+          match up. Another way to ensure this is to use the delivered
+          <emphasis>hpcc-push.sh</emphasis> script. For example, the following
+          commands would push out the certificate, key, and public key out to
+          all hosts defined in the environment.</para>
+
+          <programlisting>sudo /opt/HPCCSystems/sbin/hpcc-push.sh \
+     -s /home/hpcc/certificate/public.key.pem -t /home/hpcc/certificate/public.key.pem 
+sudo /opt/HPCCSystems/sbin/hpcc-push.sh \
+     -s /home/hpcc/certificate/key.pem -t /home/hpcc/certificate/key.pem 
+sudo /opt/HPCCSystems/sbin/hpcc-push.sh \
+     -s /home/hpcc/certificate/certificate.pem -t /home/hpcc/certificate/certificate.pem </programlisting>
+
+          <para>See the appendix (<link
+          linkend="Example-Scripts">Appendix:Example Scripts</link>) for more
+          information on using this script.</para>
+        </sect3>
       </sect2>
     </sect1>
 

+ 16 - 6
docs/EN_US/InstantCloud/InstantCloud.xml

@@ -46,9 +46,8 @@
       similarity to actual persons, living or dead, is purely
       coincidental.</para>
 
-      <para />
-
-     </legalnotice>
+      <para></para>
+    </legalnotice>
 
     <xi:include href="common/Version.xml"
                 xpointer="xpointer(//*[@id='FooterInfo'])"
@@ -166,8 +165,8 @@
               </listitem>
 
               <listitem>
-                <para><emphasis>Linux workstation with the ECL IDE or VS
-                Code </emphasis></para>
+                <para><emphasis>Linux workstation with the ECL IDE or VS Code
+                </emphasis></para>
               </listitem>
 
               <listitem>
@@ -657,7 +656,18 @@
         <title>Install the ECL IDE and HPCC Client Tools</title>
 
         <para>You only need to install the ECL IDE once. If you have already
-        installed it, you can skip this section. .</para>
+        installed it, you can skip this section.</para>
+
+        <para><variablelist>
+            <varlistentry>
+              <term>Note:</term>
+
+              <listitem>
+                <para>The ECL IDE only runs on Windows operating
+                systems.</para>
+              </listitem>
+            </varlistentry>
+          </variablelist></para>
 
         <para><orderedlist>
             <listitem>

+ 13 - 2
docs/EN_US/RunningHPCCinaVirtualMachine/RunningHPCCinaVirtualMachine.xml

@@ -39,7 +39,7 @@
       similarity to actual persons, living or dead, is purely
       coincidental.</para>
 
-      <para />
+      <para></para>
     </legalnotice>
 
     <xi:include href="common/Version.xml"
@@ -533,7 +533,18 @@
         <listitem>
           <para>Install the ECL IDE, following the prompts in the installation
           program. Once the ECL IDE is installed successfully, you can
-          proceed.</para>
+          proceed. </para>
+
+          <variablelist>
+            <varlistentry>
+              <term>Note:</term>
+
+              <listitem>
+                <para>The ECL IDE only runs on Windows operating
+                systems.</para>
+              </listitem>
+            </varlistentry>
+          </variablelist>
         </listitem>
       </orderedlist></para>
 

BIN
docs/EN_US/images/LZimg05.jpg


+ 3 - 1
ecl/hql/hqlexpr.cpp

@@ -12676,8 +12676,10 @@ extern IHqlExpression *createRow(node_operator op, HqlExprArray & args)
             type = LINK(fieldType);
             break;
         }
-    case no_embedbody:
     case no_id2blob:
+        assertex(!recordRequiresLinkCount(&args.item(1)));
+        // fallthrough
+    case no_embedbody:
     case no_temprow:
     case no_projectrow:         // arg(1) is actually a transform
         {

+ 5 - 2
ecl/hql/hqlgram2.cpp

@@ -10343,6 +10343,9 @@ void HqlGram::cloneInheritedAttributes(IHqlScope * scope, const attribute & errp
             curBase->getSymbols(syms);
             syms.sort(compareSymbolsByName);
 
+            QuickExpressionReplacer virtualMapper;
+            if (baseVirtualAttr)
+                virtualMapper.setMapping(baseVirtualAttr, virtualSeqAttr);
             ForEachItemIn(iSym, syms)
             {
                 IIdAtom * id = syms.item(iSym).queryId();
@@ -10354,8 +10357,8 @@ void HqlGram::cloneInheritedAttributes(IHqlScope * scope, const attribute & errp
 
                 LinkedHqlExpr mapped = baseSym;
                 //Replace any references to the base module attribute with this new module.
-                if (baseVirtualAttr)
-                    mapped.setown(quickFullReplaceExpression(mapped, baseVirtualAttr, virtualSeqAttr));
+                if (baseVirtualAttr && (baseVirtualAttr != virtualSeqAttr))
+                    mapped.setown(virtualMapper.transform(mapped));
 
                 if (match)
                 {

+ 2 - 2
ecl/hql/hqllex.l

@@ -1277,7 +1277,7 @@ DECIMAL             {
                         if (!lookup)
                             return lookupIdentifierToken(returnToken, lexer, lookup, activeState, CUR_TOKEN_TEXT);
 
-                      returnToken.setType(makeDecimalType(UNKNOWN_LENGTH,UNKNOWN_LENGTH, true));
+                      returnToken.setType(makeDecimalType(MAX_DECIMAL_DIGITS,MAX_DECIMAL_PRECISION, true));
                       return(SIMPLE_TYPE); 
                     }
 UDECIMAL             {
@@ -1285,7 +1285,7 @@ UDECIMAL             {
                         if (!lookup)
                             return lookupIdentifierToken(returnToken, lexer, lookup, activeState, CUR_TOKEN_TEXT);
 
-                      returnToken.setType(makeDecimalType(UNKNOWN_LENGTH,UNKNOWN_LENGTH, true));
+                      returnToken.setType(makeDecimalType(MAX_DECIMAL_DIGITS,MAX_DECIMAL_PRECISION, true));
                       return(SIMPLE_TYPE); 
                     }
 (U|u)?DECIMAL{digit}+(_{digit}+)? {

+ 3 - 1
ecl/hql/hqlmeta.cpp

@@ -3087,11 +3087,13 @@ ITypeInfo * calculateDatasetType(node_operator op, const HqlExprArray & parms)
         type.setown(parms.item(0).getType());
         assertex(parms.ordinality()>1 || hasStreamedModifier(type));     // should have a count or a length
         break;
+    case no_id2blob:
+        assertex(!recordRequiresLinkCount(&parms.item(1)));
+        // fallthrough
     case no_inlinetable:
     case no_dataset_from_transform:
     case no_xmlproject:
     case no_temptable:
-    case no_id2blob:
     case no_embedbody:
         newRecordType.setown(createRecordType(&parms.item(1)));
         linkCounted = hasAttribute(_linkCounted_Atom, parms);

+ 2 - 2
ecl/hql/hqlutil.cpp

@@ -352,9 +352,9 @@ IHqlExpression * convertIndexPhysical2LogicalValue(IHqlExpression * cur, IHqlExp
     if (cur->hasAttribute(blobAtom))
     {
         if (cur->isDataset())
-            return createDataset(no_id2blob, LINK(physicalSelect), LINK(cur->queryRecord()));
+            return createDataset(no_id2blob, LINK(physicalSelect), getSerializedForm(cur->queryRecord(), diskAtom));
         else if (cur->isDatarow())
-            return createRow(no_id2blob, LINK(physicalSelect), LINK(cur->queryRecord()));
+            return createRow(no_id2blob, LINK(physicalSelect), getSerializedForm(cur->queryRecord(), diskAtom));
         else
             return createValue(no_id2blob, cur->getType(), LINK(physicalSelect));
     }

+ 14 - 2
ecl/hqlcpp/hqlcppds.cpp

@@ -2162,9 +2162,19 @@ void HqlCppTranslator::ensureDatasetFormat(BuildCtx & ctx, ITypeInfo * type, CHq
             OwnedITypeInfo streamedType = setStreamedAttr(type, true);
             OwnedHqlExpr call = bindFunctionCall(createRowStreamId, args, streamedType);
             buildTempExpr(ctx, call, tgt);
-            return;
         }
-        break;
+        return;
+    }
+
+    if (hasStreamedModifier(tgtType) && (format != FormatNatural))
+    {
+        OwnedHqlExpr serializedExpr = tgt.getTranslatedExpr();
+        buildTempExpr(ctx, serializedExpr, tgt, format);
+        tgtType = tgt.queryType();
+    }
+
+    switch (format)
+    {
     case FormatBlockedDataset:
         if (isArrayRowset(tgtType))
         {
@@ -2795,6 +2805,7 @@ void HqlCppTranslator::buildDatasetAssign(BuildCtx & ctx, const CHqlBoundTarget
         case no_translated:
         case no_null:
         case no_id2blob:
+            if (!hasStreamedModifier(exprType))
             {
                 IIdAtom * func = NULL;
                 if (!isArrayRowset(to))
@@ -2843,6 +2854,7 @@ void HqlCppTranslator::buildDatasetAssign(BuildCtx & ctx, const CHqlBoundTarget
                     return;
                 }
             }
+            break;
         }
     }
 

+ 2 - 2
ecl/hqlcpp/hqlhtcpp.cpp

@@ -11661,7 +11661,7 @@ void HqlCppTranslator::doBuildStmtOutput(BuildCtx & ctx, IHqlExpression * expr)
     Owned<IWUResult> result = createDatasetResultSchema(seq, name, dataset->queryRecord(), xmlnsAttrs, true, false, 0);
 
     CHqlBoundExpr bound;
-    buildDataset(ctx, dataset, bound, FormatNatural);
+    buildDataset(ctx, dataset, bound, FormatBlockedDataset);
     OwnedHqlExpr count = getBoundCount(bound);
 
     HqlExprArray args;
@@ -15578,7 +15578,7 @@ ABoundActivity * HqlCppTranslator::doBuildActivitySerialize(BuildCtx & ctx, IHql
 
     {
         MemberFunction func(*this, instance->startctx, "virtual size32_t transform(ARowBuilder & crSelf, const void * _left) override");
-        func.ctx.addQuotedLiteral("const unsigned char * left = (const byte *) left;");
+        func.ctx.addQuotedLiteral("const unsigned char * left = (const byte *) _left;");
 
         // Bind left to "left" and right to RIGHT
         bindTableCursor(func.ctx, dataset, "left");

+ 8 - 0
ecl/hqlcpp/hqltcppc.cpp

@@ -601,6 +601,9 @@ void CMemberInfo::buildOffset(HqlCppTranslator & translator, BuildCtx & ctx, IRe
             IHqlExpression * accessor = selector->queryRootRow()->ensureAccessor(translator, ctx);
             assertex(accessor);
             value.setown(createValue(no_select, LINK(sizetType), LINK(accessor), LINK(cachedAccessorOffset.queryVarSize())));
+            //The type in the line above is a lie.  It is actually a size_t (rather than a size32_t).
+            //Add a cast to ensure that all offsets are calculated as size32_t to avoid problems with (temporary) numeric underflow
+            value.setown(createValue(no_cast, LINK(sizetType), value.getClear()));
         }
         bound.expr.setown(createSizeExpression(value, cachedAccessorOffset.getFixedSize()));
     }
@@ -1112,7 +1115,12 @@ void CContainerInfo::buildSizeOf(HqlCppTranslator & translator, BuildCtx & ctx,
         IHqlExpression * accessor = selector->queryRootRow()->ensureAccessor(translator, ctx);
         assertex(accessor);
         if (accessorSize.queryVarSize())
+        {
             bound.expr.setown(createValue(no_select, LINK(sizetType), LINK(accessor), LINK(accessorSize.queryVarSize())));
+            //The type in the line above is a lie.  It is actually a size_t (rather than a size32_t).
+            //Add a cast to ensure that all offsets are calculated as size32_t to avoid problems with (temporary) numeric underflow
+            bound.expr.setown(createValue(no_cast, LINK(sizetType), bound.expr.getClear()));
+        }
         bound.expr.setown(createSizeExpression(bound.expr, accessorSize.getFixedSize()));
     }
     else

+ 31 - 50
ecl/hthor/hthor.cpp

@@ -8024,6 +8024,8 @@ CHThorDiskReadBaseActivity::CHThorDiskReadBaseActivity(IAgentContext &_agent, un
     helper.setCallback(this);
     expectedDiskMeta = helper.queryDiskRecordSize();
     projectedDiskMeta = helper.queryProjectedDiskRecordSize();
+    actualDiskMeta.set(helper.queryDiskRecordSize()->querySerializedDiskMeta());
+
     if (_node)
     {
         const char *recordTranslationModeHintText = _node->queryProp("hint[@name='layoutTranslation']/@value");
@@ -8054,6 +8056,30 @@ void CHThorDiskReadBaseActivity::ready()
     partNum = (unsigned)-1;
 
     resolve();
+
+    unsigned expectedCrc = helper.getDiskFormatCrc();
+    unsigned projectedCrc = helper.getProjectedFormatCrc();
+    unsigned actualCrc = expectedCrc;
+    IDistributedFile *dFile = nullptr;
+    if (ldFile)
+        dFile = ldFile->queryDistributedFile();  // Null for local file usage
+    if (dFile)
+    {
+        IPropertyTree &props = dFile->queryAttributes();
+        Owned<IOutputMetaData> publishedMeta = getDaliLayoutInfo(props);
+        if (publishedMeta)
+        {
+            actualDiskMeta.setown(publishedMeta.getClear());
+            actualCrc = props.getPropInt("@formatCrc");
+        }
+    }
+    translators.setown(::getTranslators("hthor-diskread", expectedCrc, expectedDiskMeta, actualCrc, actualDiskMeta, projectedCrc, projectedDiskMeta, getLayoutTranslationMode()));
+    if (translators)
+    {
+        translator = &translators->queryTranslator();
+        keyedTranslator = translators->queryKeyedTranslator();
+        actualDiskMeta.set(&translators->queryActualFormat());
+    }
 }
 
 void CHThorDiskReadBaseActivity::stop()
@@ -8150,7 +8176,6 @@ void CHThorDiskReadBaseActivity::gatherInfo(IFileDescriptor * fileDesc)
         grouped = ((helper.getFlags() & TDXgrouped) != 0);
     }
 
-    actualDiskMeta.set(helper.queryDiskRecordSize()->querySerializedDiskMeta());
     calcFixedDiskRecordSize();
     if (fileDesc)
     {
@@ -8223,8 +8248,6 @@ bool CHThorDiskReadBaseActivity::openNext()
     localOffset = 0;
     saveOpenExc.clear();
     actualFilter.clear();
-    unsigned expectedCrc = helper.getDiskFormatCrc();
-    unsigned projectedCrc = helper.getProjectedFormatCrc();
 
     if (dfsParts||ldFile)
     {
@@ -8250,16 +8273,6 @@ bool CHThorDiskReadBaseActivity::openNext()
                 }
             }
 
-            unsigned actualCrc = 0;
-            if (dFile)
-            {
-                IPropertyTree &props = dFile->queryAttributes();
-                actualDiskMeta.setown(getDaliLayoutInfo(props));
-                actualCrc = props.getPropInt("@formatCrc");
-            }
-            if (!actualDiskMeta)
-                actualDiskMeta.set(expectedDiskMeta->querySerializedDiskMeta());
-            keyedTranslator.setown(createKeyTranslator(actualDiskMeta->queryRecordAccessor(true), expectedDiskMeta->queryRecordAccessor(true)));
             if (keyedTranslator && keyedTranslator->needsTranslate())
                 keyedTranslator->translate(actualFilter, fieldFilters);
             else
@@ -8342,8 +8355,13 @@ bool CHThorDiskReadBaseActivity::openNext()
 
                             Owned<IFile> iFile = createIFile(rfilename);
 
+                            // remote side does projection/translation/filtering
                             actualDiskMeta.set(projectedDiskMeta);
                             expectedDiskMeta = projectedDiskMeta;
+                            translators.clear();
+                            translator = nullptr;
+                            keyedTranslator = nullptr;
+
                             actualFilter.clear();
                             inputfileio.setown(remoteFileIO.getClear());
                             if (inputfileio)
@@ -8412,27 +8430,6 @@ bool CHThorDiskReadBaseActivity::openNext()
                 }
             }
 
-            //Check if the file requires translation, but translation is disabled
-            if (actualCrc && expectedCrc && (actualCrc != expectedCrc) && (getLayoutTranslationMode()==RecordTranslationMode::None))
-            {
-                IOutputMetaData * expectedDiskMeta = helper.queryDiskRecordSize();
-                throwTranslationError(actualDiskMeta->queryRecordAccessor(true), expectedDiskMeta->queryRecordAccessor(true), logicalFileName.str());
-            }
-
-            //The projected format will often not match the expected format, and if it differs it must be translated
-            if (projectedCrc && actualCrc != projectedCrc)
-                translator.setown(createRecordTranslator(projectedDiskMeta->queryRecordAccessor(true), actualDiskMeta->queryRecordAccessor(true)));
-
-            if (translator && translator->needsTranslate())
-            {
-                if (!translator->canTranslate())
-                    throw MakeStringException(0, "Untranslatable key layout mismatch reading file %s", logicalFileName.str());
-            }
-            else
-            {
-                translator.clear();
-                keyedTranslator.clear();
-            }
             calcFixedDiskRecordSize();
             if (dfsParts)
                 dfsParts->next();
@@ -8480,22 +8477,6 @@ bool CHThorDiskReadBaseActivity::openNext()
                 saveOpenExc.setown(E);
         }
 
-        //A spill file - actual will always equal expected, so keyed translator will never be used.
-        keyedTranslator.clear();
-
-        //The projected format will often not match the expected format, and if it differs it must be translated
-        translator.clear();
-        if (projectedCrc != expectedCrc)
-            translator.setown(createRecordTranslator(projectedDiskMeta->queryRecordAccessor(true), actualDiskMeta->queryRecordAccessor(true)));
-
-        if (translator && translator->needsTranslate())
-        {
-            assertex(translator->canTranslate());
-        }
-        else
-        {
-            translator.clear();
-        }
         partNum++;
         if (checkOpenedFile(file.str(), NULL))
         {

+ 3 - 2
ecl/hthor/hthor.ipp

@@ -2263,8 +2263,9 @@ protected:
     StringAttr logicalFileName;
     StringArray subfileLogicalFilenames;
     Owned<ISuperFileDescriptor> superfile;
-    Owned<const IDynamicTransform> translator;
-    Owned<const IKeyTranslator> keyedTranslator;
+    const IDynamicTransform *translator = nullptr;
+    const IKeyTranslator *keyedTranslator = nullptr;
+    Owned<ITranslator> translators;
     IPointerArrayOf<IOutputMetaData> actualLayouts;  // Do we need to keep more than one?
     IConstArrayOf<IFieldFilter> fieldFilters;  // These refer to the expected layout
     RowFilter actualFilter;               // This refers to the actual disk layout

+ 2 - 2
esp/esdllib/esdl_transformer2.cpp

@@ -317,13 +317,13 @@ void Esdl2Base::output_content(Esdl2TransformerContext &ctx, const char * conten
                     case ESDLT_INT16:
                     case ESDLT_INT32:
                     case ESDLT_INT64:
-                        ctx.writer->outputInt(atoi(content), sizeof(int), tagname);
+                        ctx.writer->outputInt(strtoll(content, nullptr, 10), sizeof(long long), tagname);
                         break;
                     case ESDLT_UINT8:
                     case ESDLT_UINT16:
                     case ESDLT_UINT32:
                     case ESDLT_UINT64:
-                        ctx.writer->outputUInt(atoi(content), sizeof(unsigned), tagname);
+                        ctx.writer->outputUInt(strtoull(content, nullptr, 10), sizeof(unsigned long long), tagname);
                         break;
                     case ESDLT_BYTE:
                     case ESDLT_UBYTE:

+ 24 - 4
esp/logging/logginglib/loggingagentbase.cpp

@@ -33,7 +33,15 @@ void CLogContentFilter::readAllLogFilters(IPropertyTree* cfg)
         groupFilterRead = true;
     }
 
-    for (unsigned i = 0; i < ESPLCGBackEndResp; i++)
+    xpath.setf("Filters/Filter[@type='%s']", espLogContentGroupNames[ESPLCGBackEndReq]);
+    filter = cfg->queryBranch(xpath.str());
+    if (filter && filter->hasProp("@value"))
+    {
+        logBackEndReq = filter->getPropBool("@value");
+        groupFilterRead = true;
+    }
+
+    for (unsigned i = 0; i < ESPLCGBackEndReq; i++)
     {
         if (readLogFilters(cfg, i))
             groupFilterRead = true;
@@ -169,6 +177,7 @@ IEspUpdateLogRequestWrap* CLogContentFilter::filterLogContent(IEspUpdateLogReque
             Owned<IPropertyTree> userRequest = req->getUserRequest();
             const char* userResp = req->getUserResponse();
             const char* logDatasets = req->getLogDatasets();
+            const char* backEndReq = req->getBackEndRequest();
             const char* backEndResp = req->getBackEndResponse();
             if (!espContext && !userContext && !userRequest && (!userResp || !*userResp) && (!backEndResp || !*backEndResp))
                 throw MakeStringException(EspLoggingErrors::UpdateLogFailed, "Failed to read log content");
@@ -190,19 +199,21 @@ IEspUpdateLogRequestWrap* CLogContentFilter::filterLogContent(IEspUpdateLogReque
                 IPropertyTree* pTree = ensurePTree(logContentTree, espLogContentGroupNames[ESPLCGUserReq]);
                 pTree->addPropTree(userRequest->queryName(), LINK(userRequest));
             }
-            if (userResp && *userResp)
+            if (!isEmptyString(userResp))
             {
                 IPropertyTree* pTree = ensurePTree(logContentTree, espLogContentGroupNames[ESPLCGUserResp]);
                 Owned<IPropertyTree> userRespTree = createPTreeFromXMLString(userResp);
                 pTree->addPropTree(userRespTree->queryName(), LINK(userRespTree));
             }
-            if (logDatasets && *logDatasets)
+            if (!isEmptyString(logDatasets))
             {
                 IPropertyTree* pTree = ensurePTree(logContentTree, espLogContentGroupNames[ESPLCGLogDatasets]);
                 Owned<IPropertyTree> logDatasetTree = createPTreeFromXMLString(logDatasets);
                 pTree->addPropTree(logDatasetTree->queryName(), LINK(logDatasetTree));
             }
-            if (backEndResp && *backEndResp)
+            if (!isEmptyString(backEndReq))
+                logContentTree->addProp(espLogContentGroupNames[ESPLCGBackEndReq], backEndReq);
+            if (!isEmptyString(backEndResp))
                 logContentTree->addProp(espLogContentGroupNames[ESPLCGBackEndResp], backEndResp);
         }
     }
@@ -271,6 +282,15 @@ IEspUpdateLogRequestWrap* CLogContentFilter::filterLogContent(IEspUpdateLogReque
                     logContentEmpty = false;
                 }
             }
+            if (logBackEndReq)
+            {
+                const char* request = req->getBackEndRequest();
+                if (!isEmptyString(request))
+                {
+                    logContentTree->addProp(espLogContentGroupNames[ESPLCGBackEndReq], request);
+                    logContentEmpty = false;
+                }
+            }
             if (logBackEndResp)
             {
                 const char* resp = req->getBackEndResponse();

+ 14 - 7
esp/logging/logginglib/loggingagentbase.hpp

@@ -34,12 +34,13 @@ enum ESPLogContentGroup
     ESPLCGUserReq = 2,
     ESPLCGUserResp = 3,
     ESPLCGLogDatasets = 4,
+    ESPLCGBackEndReq = 5,
     ESPLCGBackEndResp = 5,
     ESPLCGAll = 6
 };
 
 static const char * const espLogContentGroupNames[] = { "ESPContext", "UserContext", "UserRequest", "UserResponse",
-    "LogDatasets", "BackEndResponse", "", NULL };
+    "LogDatasets", "BackEndRequest", "BackEndResponse", "", NULL };
 
 #define UPDATELOGTHREADWAITINGTIME 3000
 
@@ -137,6 +138,7 @@ interface IEspUpdateLogRequestWrap : extends IInterface
     virtual IPropertyTree* getUserRequest()=0;
     virtual IPropertyTree* getLogRequestTree()=0;
     virtual IInterface* getExtraLog()=0;
+    virtual const char* getBackEndRequest()=0;
     virtual const char* getBackEndResponse()=0;
     virtual const char* getUserResponse()=0;
     virtual const char* getLogDatasets()=0;
@@ -149,6 +151,7 @@ interface IEspUpdateLogRequestWrap : extends IInterface
     virtual void setUserRequest(IPropertyTree* val)=0;
     virtual void setLogRequestTree(IPropertyTree* val)=0;
     virtual void setExtraLog(IInterface* val)=0;
+    virtual void setBackEndRequest(const char* val)=0;
     virtual void setBackEndResponse(const char* val)=0;
     virtual void setUserResponse(const char* val)=0;
     virtual void setLogDatasets(const char* val)=0;
@@ -167,7 +170,7 @@ class CUpdateLogRequestWrap : implements IEspUpdateLogRequestWrap, public CInter
     Owned<IPropertyTree> userRequest;
     Owned<IPropertyTree> logRequestTree;
     Owned<IInterface> extraLog;
-    StringAttr  backEndResponse;
+    StringAttr  backEndRequest, backEndResponse;
     StringAttr  userResponse;
     StringAttr  logDatasets;
     unsigned    retryCount;
@@ -179,10 +182,10 @@ public:
     CUpdateLogRequestWrap(const char* _GUID, const char* _option, const char* _updateLogRequest)
         : GUID(_GUID), option(_option), updateLogRequest(_updateLogRequest), retryCount(0) {};
     CUpdateLogRequestWrap(const char* _GUID, const char* _option, IPropertyTree* _espContext,
-        IPropertyTree*_userContext, IPropertyTree*_userRequest, const char *_backEndResponse, const char *_userResponse,
-        const char *_logDatasets)
-        : GUID(_GUID), option(_option), backEndResponse(_backEndResponse), userResponse(_userResponse),
-        logDatasets(_logDatasets), retryCount(0)
+        IPropertyTree*_userContext, IPropertyTree*_userRequest, const char *_backEndRequest,
+        const char *_backEndResponse, const char *_userResponse, const char *_logDatasets)
+        : GUID(_GUID), option(_option), backEndRequest(_backEndRequest), backEndResponse(_backEndResponse),
+        userResponse(_userResponse), logDatasets(_logDatasets), retryCount(0)
     {
         userContext.setown(_userContext);
         espContext.setown(_espContext);
@@ -202,6 +205,7 @@ public:
         userContext.clear();
         userResponse.clear();
         logDatasets.clear();
+        backEndRequest.clear();
         backEndResponse.clear();
         updateLogRequest.clear();
         logRequestTree.clear();
@@ -216,6 +220,7 @@ public:
     IPropertyTree* getUserRequest() {return userRequest.getLink();};
     IPropertyTree* getLogRequestTree() {return logRequestTree.getLink();};
     IInterface* getExtraLog() {return extraLog.getLink();};
+    const char* getBackEndRequest() {return backEndRequest.get();};
     const char* getBackEndResponse() {return backEndResponse.get();};
     const char* getUserResponse() {return userResponse.get();};
     const char* getLogDatasets() {return logDatasets.get();};
@@ -228,6 +233,7 @@ public:
     void setUserRequest(IPropertyTree* val) {userRequest.setown(val);};
     void setLogRequestTree(IPropertyTree* val) {logRequestTree.setown(val);};
     void setExtraLog(IInterface* val) {extraLog.setown(val);};
+    void setBackEndRequest(const char* val) {backEndRequest.set(val);};
     void setBackEndResponse(const char* val) {backEndResponse.set(val);};
     void setUserResponse(const char* val) {userResponse.set(val);};
     void setLogDatasets(const char* val) {logDatasets.set(val);};
@@ -267,7 +273,8 @@ public:
 
 class LOGGINGCOMMON_API CLogContentFilter : public CInterface
 {
-    bool            logBackEndResp;
+    bool            logBackEndReq = true;
+    bool            logBackEndResp = true;
     StringArray     logContentFilters;
     CIArrayOf<CESPLogContentGroupFilters> groupFilters;
 

+ 3 - 3
esp/logging/loggingmanager/loggingmanager.cpp

@@ -97,7 +97,7 @@ bool CLoggingManager::updateLog(IEspLogEntry* entry, StringBuffer& status)
         return updateLog(entry->getEspContext(), entry->getOption(), entry->getLogInfoTree(), entry->getExtraLog(), status);
 
     return updateLog(entry->getEspContext(), entry->getOption(), entry->getUserContextTree(), entry->getUserRequestTree(),
-        entry->getBackEndResp(), entry->getUserResp(), entry->getLogDatasets(), status);
+        entry->getBackEndReq(), entry->getBackEndResp(), entry->getUserResp(), entry->getLogDatasets(), status);
 }
 
 bool CLoggingManager::updateLog(IEspContext* espContext, const char* option, const char* logContent, StringBuffer& status)
@@ -147,7 +147,7 @@ bool CLoggingManager::updateLog(IEspContext* espContext, const char* option, IPr
 }
 
 bool CLoggingManager::updateLog(IEspContext* espContext, const char* option, IPropertyTree* userContext, IPropertyTree* userRequest,
-        const char* backEndResp, const char* userResp, const char* logDatasets, StringBuffer& status)
+    const char* backEndReq, const char* backEndResp, const char* userResp, const char* logDatasets, StringBuffer& status)
 {
     if (!initialized)
         throw MakeStringException(-1,"LoggingManager not initialized");
@@ -178,7 +178,7 @@ bool CLoggingManager::updateLog(IEspContext* espContext, const char* option, IPr
             espContextTree->addProp("ResponseTime", VStringBuffer("%.4f", (msTick()-espContext->queryCreationTime())/1000.0));
         }
         Owned<IEspUpdateLogRequestWrap> req =  new CUpdateLogRequestWrap(nullptr, option, espContextTree.getClear(), LINK(userContext), LINK(userRequest),
-            backEndResp, userResp, logDatasets);
+            backEndReq, backEndResp, userResp, logDatasets);
         Owned<IEspUpdateLogResponse> resp =  createUpdateLogResponse();
         bRet = updateLog(espContext, *req, *resp, status);
     }

+ 2 - 0
esp/logging/loggingmanager/loggingmanager.h

@@ -34,6 +34,7 @@ interface IEspLogEntry  : implements IInterface
     virtual void setOwnExtraLog(IInterface* extra) = 0;
     virtual void setOption(const char* ptr) = 0;
     virtual void setLogContent(const char* ptr) = 0;
+    virtual void setBackEndReq(const char* ptr) = 0;
     virtual void setBackEndResp(const char* ptr) = 0;
     virtual void setUserResp(const char* ptr) = 0;
     virtual void setLogDatasets(const char* ptr) = 0;
@@ -45,6 +46,7 @@ interface IEspLogEntry  : implements IInterface
     virtual IInterface* getExtraLog() = 0;
     virtual const char* getOption() = 0;
     virtual const char* getLogContent() = 0;
+    virtual const char* getBackEndReq() = 0;
     virtual const char* getBackEndResp() = 0;
     virtual const char* getUserResp() = 0;
     virtual const char* getLogDatasets() = 0;

+ 4 - 2
esp/logging/loggingmanager/loggingmanager.hpp

@@ -34,7 +34,7 @@
 class CEspLogEntry : implements IEspLogEntry, public CInterface
 {
     Owned<IEspContext> espContext;
-    StringAttr option, logContent, backEndResp, userResp, logDatasets;
+    StringAttr option, logContent, backEndReq, backEndResp, userResp, logDatasets;
     Owned<IPropertyTree> userContextTree;
     Owned<IPropertyTree> userRequestTree;
     Owned<IPropertyTree> logInfoTree;
@@ -52,6 +52,7 @@ public:
     void setOwnExtraLog(IInterface* extra) { extraLog.setown(extra); };
     void setOption(const char* ptr) { option.set(ptr); };
     void setLogContent(const char* ptr) { logContent.set(ptr); };
+    void setBackEndReq(const char* ptr) { backEndReq.set(ptr); };
     void setBackEndResp(const char* ptr) { backEndResp.set(ptr); };
     void setUserResp(const char* ptr) { userResp.set(ptr); };
     void setLogDatasets(const char* ptr) { logDatasets.set(ptr); };
@@ -63,6 +64,7 @@ public:
     IInterface* getExtraLog() { return extraLog; };
     const char* getOption() { return option.get(); };
     const char* getLogContent() { return logContent.get(); };
+    const char* getBackEndReq() { return backEndReq.get(); };
     const char* getBackEndResp() { return backEndResp.get(); };
     const char* getUserResp() { return userResp.get(); };
     const char* getLogDatasets() { return logDatasets.get(); };
@@ -80,7 +82,7 @@ class CLoggingManager : implements ILoggingManager, public CInterface
 
     bool updateLog(IEspContext* espContext, IEspUpdateLogRequestWrap& req, IEspUpdateLogResponse& resp, StringBuffer& status);
     bool updateLog(IEspContext* espContext, const char* option, IPropertyTree* userContext, IPropertyTree* userRequest,
-        const char* backEndResp, const char* userResp, const char* logDatasets, StringBuffer& status);
+        const char* backEndReq, const char* backEndResp, const char* userResp, const char* logDatasets, StringBuffer& status);
     bool updateLog(IEspContext* espContext, const char* option, const char* logContent, StringBuffer& status);
     bool updateLog(IEspContext* espContext, const char* option, IPropertyTree* logInfo, IInterface* extraLog, StringBuffer& status);
 

+ 1 - 1
esp/scm/ws_fileio.ecm

@@ -19,7 +19,7 @@
 
 ESPrequest CreateFileRequest
 {
-    string DestDropZone;        //name or IP address; 
+    string DestDropZone;        //name or IP address
     string DestRelativePath;    //file name and/or path; related to the predefined directory of the dropzone
     bool   Overwrite(false);
 };

+ 7 - 6
esp/services/esdl_svc_engine/esdl_binding.cpp

@@ -564,7 +564,7 @@ void EsdlServiceImpl::handleServiceRequest(IEspContext &context,
     else
         ESPLOG(LogMin,"DESDL: Transaction ID could not be generated!");
 
-    StringBuffer origResp;
+    StringBuffer origResp, soapmsg;
     EsdlMethodImplType implType = EsdlMethodImplUnknown;
 
     if(stricmp(mthName, "echotest")==0 || mthdef.hasProp("EchoTest"))
@@ -665,7 +665,7 @@ void EsdlServiceImpl::handleServiceRequest(IEspContext &context,
             reqcontent.set(reqWriter->str());
             context.addTraceSummaryTimeStamp(LogNormal, "serialized-xmlreq");
 
-            handleFinalRequest(context, m_serviceLevelRequestTransform, crt, tgtcfg, tgtctx, srvdef, mthdef, ns, reqcontent, origResp, isPublishedQuery(implType), implType==EsdlMethodImplProxy);
+            handleFinalRequest(context, m_serviceLevelRequestTransform, crt, tgtcfg, tgtctx, srvdef, mthdef, ns, reqcontent, origResp, isPublishedQuery(implType), implType==EsdlMethodImplProxy, soapmsg);
             context.addTraceSummaryTimeStamp(LogNormal, "end-HFReq");
 
             if (isPublishedQuery(implType))
@@ -685,12 +685,12 @@ void EsdlServiceImpl::handleServiceRequest(IEspContext &context,
     }
 
     context.addTraceSummaryTimeStamp(LogNormal, "srt-resLogging");
-    handleResultLogging(context, tgtctx.get(), req,  origResp.str(), out.str(), logdata.str());
+    handleResultLogging(context, tgtctx.get(), req, soapmsg.str(), origResp.str(), out.str(), logdata.str());
     context.addTraceSummaryTimeStamp(LogNormal, "end-resLogging");
     ESPLOG(LogMax,"Customer Response: %s", out.str());
 }
 
-bool EsdlServiceImpl::handleResultLogging(IEspContext &espcontext, IPropertyTree * reqcontext, IPropertyTree * request,const char * rawresp, const char * finalresp, const char * logdata)
+bool EsdlServiceImpl::handleResultLogging(IEspContext &espcontext, IPropertyTree * reqcontext, IPropertyTree * request,const char *rawreq, const char * rawresp, const char * finalresp, const char * logdata)
 {
     bool success = true;
     if (m_oLoggingManager)
@@ -701,6 +701,7 @@ bool EsdlServiceImpl::handleResultLogging(IEspContext &espcontext, IPropertyTree
         entry->setOwnUserContextTree(LINK(reqcontext));
         entry->setOwnUserRequestTree(LINK(request));
         entry->setUserResp(finalresp);
+        entry->setBackEndReq(rawreq);
         entry->setBackEndResp(rawresp);
         entry->setLogDatasets(logdata);
 
@@ -839,9 +840,9 @@ void EsdlServiceImpl::handleFinalRequest(IEspContext &context,
                                          StringBuffer& req,
                                          StringBuffer &out,
                                          bool isroxie,
-                                         bool isproxy)
+                                         bool isproxy,
+                                         StringBuffer& soapmsg)
 {
-    StringBuffer soapmsg;
     soapmsg.append(
         "<?xml version=\"1.0\" encoding=\"UTF-8\"?>"
         "<soap:Envelope xmlns:soap=\"http://schemas.xmlsoap.org/soap/envelope/\">");

+ 2 - 2
esp/services/esdl_svc_engine/esdl_binding.hpp

@@ -170,10 +170,10 @@ public:
     virtual void processRequest(IEspContext &context, IEsdlDefService &srvdef, IEsdlDefMethod &mthdef, const char *ns, StringBuffer &req) {};
     virtual void processResponse(IEspContext &context, IEsdlDefService &srvdef, IEsdlDefMethod &mthdef, const char *ns, StringBuffer &resp) {};
     virtual void createServersList(IEspContext &context, IEsdlDefService &srvdef, IEsdlDefMethod &mthdef, StringBuffer &servers) {};
-    virtual bool handleResultLogging(IEspContext &espcontext, IPropertyTree * reqcontext, IPropertyTree * request,  const char * rawresp, const char * finalresp, const char * logdata);
+    virtual bool handleResultLogging(IEspContext &espcontext, IPropertyTree * reqcontext, IPropertyTree * request,  const char * rawreq, const char * rawresp, const char * finalresp, const char * logdata);
     void handleEchoTest(const char *mthName, IPropertyTree *req, StringBuffer &soapResp, ESPSerializationFormat format);
     void handlePingRequest(const char *mthName,StringBuffer &out,ESPSerializationFormat format);
-    virtual void handleFinalRequest(IEspContext &context, IEsdlCustomTransform *srvCrt, IEsdlCustomTransform *mthCrt, Owned<IPropertyTree> &tgtcfg, Owned<IPropertyTree> &tgtctx, IEsdlDefService &srvdef, IEsdlDefMethod &mthdef, const char *ns, StringBuffer& req, StringBuffer &out, bool isroxie, bool isproxy);
+    virtual void handleFinalRequest(IEspContext &context, IEsdlCustomTransform *srvCrt, IEsdlCustomTransform *mthCrt, Owned<IPropertyTree> &tgtcfg, Owned<IPropertyTree> &tgtctx, IEsdlDefService &srvdef, IEsdlDefMethod &mthdef, const char *ns, StringBuffer& req, StringBuffer &out, bool isroxie, bool isproxy, StringBuffer &rawreq);
     void getSoapBody(StringBuffer& out,StringBuffer& soapresp);
     void getSoapError(StringBuffer& out,StringBuffer& soapresp,const char *,const char *);
 

+ 1 - 21
esp/services/ws_dfu/ws_dfuService.cpp

@@ -1896,32 +1896,12 @@ bool CWsDfuEx::getUserFilePermission(IEspContext &context, IUserDescriptor* udes
         return false;
     }
 
-    StringBuffer username;
-    StringBuffer password;
-    udesc->getUserName(username);
-    if (username.length() < 1)
-    {
-        DBGLOG("User Name not defined\n");
-        return false;
-    }
-
-    udesc->getPassword(password);
-    Owned<ISecUser> user = secmgr->createUser(username);
-    if (!user)
-    {
-        DBGLOG("User %s not found\n", username.str());
-        return false;
-    }
-
-    if (password.length() > 0)
-        user->credentials().setPassword(password);
-
     CDfsLogicalFileName dlfn;
     dlfn.set(logicalName);
 
     //Start from the SecAccess_Full. Decrease the permission whenever a component has a lower permission.
     permission = SecAccess_Full;
-    getFilePermission(dlfn, *user, udesc, secmgr, permission);
+    getFilePermission(dlfn, *context.queryUser(), udesc, secmgr, permission);
 
     return true;
 }

+ 64 - 78
esp/services/ws_fileio/ws_fileioservice.cpp

@@ -26,104 +26,90 @@
 #endif
 #include "exception_util.hpp"
 
-///#define FILE_DESPRAY_URL "FileDesprayAccess"
 #define FILE_IO_URL     "FileIOAccess"
 
 void CWsFileIOEx::init(IPropertyTree *cfg, const char *process, const char *service)
 {
 }
 
-bool CWsFileIOEx::CheckServerAccess(const char* server, const char* relPath, StringBuffer& netAddr, StringBuffer& absPath)
+bool CWsFileIOEx::CheckServerAccess(const char* targetDZNameOrAddress, const char* relPath, StringBuffer& netAddr, StringBuffer& absPath)
 {
-    if (!server || (server[0] == 0) || !relPath || (relPath[0] == 0))
+    if (!targetDZNameOrAddress || (targetDZNameOrAddress[0] == 0) || !relPath || (relPath[0] == 0))
         return false;
 
+    netAddr.clear();
     Owned<IEnvironmentFactory> factory = getEnvironmentFactory(true);
     Owned<IConstEnvironment> env = factory->openEnvironment();
-    Owned<IPropertyTree> pEnvRoot = &env->getPTree();
-    IPropertyTree* pEnvSoftware = pEnvRoot->queryPropTree("Software");
-    Owned<IPropertyTree> pRoot = createPTreeFromXMLString("<Environment/>");
-    IPropertyTree* pSoftware = pRoot->addPropTree("Software", createPTree("Software"));
-    if (pEnvSoftware && pSoftware)
+    Owned<IConstDropZoneInfo> dropZoneInfo = env->getDropZone(targetDZNameOrAddress);
+    if (!dropZoneInfo || !dropZoneInfo->isECLWatchVisible())
     {
-        Owned<IPropertyTreeIterator> it = pEnvSoftware->getElements("DropZone");
-        ForEach(*it)
-        {
-            const char *zonename = it->query().queryProp("@computer");
-            if (!strcmp(zonename, "."))
-                zonename = "localhost";
+        if (stricmp(targetDZNameOrAddress, "localhost")==0)
+            targetDZNameOrAddress = ".";
 
-            if (zonename && *zonename)
+        Owned<IConstDropZoneInfoIterator> dropZoneItr = env->getDropZoneIteratorByAddress(targetDZNameOrAddress);
+        ForEach(*dropZoneItr)
+        {
+            IConstDropZoneInfo & dz = dropZoneItr->query();
+            if (dz.isECLWatchVisible())
             {
-                StringBuffer xpath;
-                xpath.appendf("Hardware/Computer[@name='%s']/@netAddress", zonename);
-                char* addr = (char*) pEnvRoot->queryProp(xpath.str());
-                if (addr && *addr)
-                {           
-                    StringBuffer sNetAddr;
-                    if (strcmp(addr, "."))
-                    {       
-                        sNetAddr.append(addr);
-                    }
-                    else
-                    {
-                        StringBuffer ipStr;
-                        IpAddress ipaddr = queryHostIP();
-                        ipaddr.getIpText(ipStr);
-                        if (ipStr.length() > 0)
-                        {
-//#define MACHINE_IP "10.239.219.9"
-#ifdef MACHINE_IP
-                            sNetAddr.append(MACHINE_IP);
-#else
-                            sNetAddr.append(ipStr.str());
-#endif
-                        }
-                    }
-                    bool dropzoneFound = false;
-                    if (!stricmp(zonename, server))
-                    {
-                        dropzoneFound = true;
-                    }
-                    else if (!stricmp(sNetAddr.str(), server))
-                    {
-                        dropzoneFound = true;
-                    }
-                    
-                    if (!dropzoneFound)
-                    {
-                        continue;
-                    }
-
-                    char ch = '\\';
-                    Owned<IConstMachineInfo> machine = env->getMachineByAddress(addr);
-                    //Owned<IConstEnvironment> env = factory->openEnvironment();
-                    //Owned<IConstMachineInfo> machine = getMachineByAddress(pEnvRoot, env, addr);
+                dropZoneInfo.set(&dropZoneItr->query());
+                netAddr.set(targetDZNameOrAddress);
+                break;
+            }
+        }
+    }
 
-                    if (machine && (machine->getOS() == MachineOsLinux || machine->getOS() == MachineOsSolaris))
-                    {
-                        ch = '/';
-                    }
-                    
-                    StringBuffer dir;
-                    IPropertyTree* pDropZone = pSoftware->addPropTree("DropZone", &it->get());
-                    pDropZone->getProp("@directory", dir);
-                    if (dir.length() > 0)
+    if (dropZoneInfo)
+    {
+        SCMStringBuffer directory, computerName, computerAddress;
+        if (netAddr.isEmpty())
+        {
+            dropZoneInfo->getComputerName(computerName); //legacy structure
+            if(computerName.length() != 0)
+            {
+                Owned<IConstMachineInfo> machine = env->getMachine(computerName.str());
+                if (machine)
+                {
+                    machine->getNetAddress(computerAddress);
+                    if (computerAddress.length() != 0)
                     {
-                        if (relPath[0] != ch)
-                        {
-                            absPath.appendf("%s%c%s", dir.str(), ch, relPath);
-                        }
-                        else
-                        {
-                            absPath.appendf("%s%s", dir.str(), relPath);
-                        }
-                        netAddr = sNetAddr;
-                        return true;
+                        netAddr.set(computerAddress.str());
                     }
                 }
             }
+            else
+            {
+                Owned<IConstDropZoneServerInfoIterator> serverIter = dropZoneInfo->getServers();
+                ForEach(*serverIter)
+                {
+                    IConstDropZoneServerInfo &serverElem = serverIter->query();
+                    serverElem.getServer(netAddr.clear());
+                    if (!netAddr.isEmpty())
+                        break;
+                }
+            }
         }
+
+        dropZoneInfo->getDirectory(directory);
+        if (directory.length() != 0)
+        {
+            const char ch = getPathSepChar(directory.str());
+            if (relPath[0] != ch)
+            {
+                absPath.appendf("%s%c%s", directory.str(), ch, relPath);
+            }
+            else
+            {
+                absPath.appendf("%s%s", directory.str(), relPath);
+            }
+            return true;
+        }
+        else
+        {
+            SCMStringBuffer dropZoneName;
+            ESPLOG(LogMin, "Found LZ '%s' without a directory attribute!", dropZoneInfo->getName(dropZoneName).str());
+        }
+
     }
 
     return false;

+ 17 - 3
esp/src/eclwatch/UserQueryWidget.js

@@ -152,10 +152,12 @@ define([
         },
 
        _onCloseFilePermissions: function () {
-        this.filePermissionDialog.hide();
-       },
+            this.filePermissionDialog.hide();
+            this.nameSelect.reset();
+            this.usersSelect.set("value","");
+            this.groupsSelect.set("value","");
+        },
         _onCheckFilePermissions: function () {
-            var context = this;
             this.filePermissionDialog.show();
         },
         _onCheckFileSubmit: function () {
@@ -483,6 +485,18 @@ define([
                 context.refreshUsersGrid();
             });
 
+            this.filePermissionDialog.on("cancel", function(evt){
+                context._onCloseFilePermissions();
+            });
+
+            this.groupsSelect.on("click", function(evt){
+                context.usersSelect.set("value", "");
+            });
+
+            this.usersSelect.on("click", function(evt){
+                context.groupsSelect.set("value", "");
+            });
+
             WsAccount.MyAccount({
             }).then(function (response){
                 if (lang.exists("MyAccountResponse.distinguishedName", response)) {

+ 3 - 3
esp/src/package-lock.json

@@ -4562,9 +4562,9 @@
       }
     },
     "lodash": {
-      "version": "4.17.10",
-      "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.10.tgz",
-      "integrity": "sha512-UejweD1pDoXu+AD825lWwp4ZGtSwgnpZxb3JDViD7StjQz+Nb/6l093lx4OQ0foGWNRoc19mWy7BzL+UAK2iVg=="
+      "version": "4.17.11",
+      "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.11.tgz",
+      "integrity": "sha512-cQKh8igo5QUhZ7lg38DYWAxMvjSAKG0A8wGSVimP07SIUEK2UO+arSRKbRZWtelMtN5V0Hkwh5ryOto/SshYIg=="
     },
     "lodash.assignwith": {
       "version": "4.2.0",

+ 3 - 2
esp/src/package.json

@@ -58,6 +58,7 @@
     "file-loader": "2.0.0",
     "jshint": "2.9.6",
     "local-web-server": "2.6.0",
+    "lodash": "^4.17.11",
     "npm-run-all": "4.1.3",
     "rimraf": "2.6.2",
     "style-loader": "0.23.1",
@@ -65,8 +66,8 @@
     "typescript": "3.1.3",
     "url-loader": "1.1.2",
     "webpack": "4.21.0",
-    "webpack-cli": "3.1.2",
-    "webpack-bundle-analyzer": "3.0.3"
+    "webpack-bundle-analyzer": "3.0.3",
+    "webpack-cli": "3.1.2"
   },
   "author": "HPCC Systems",
   "license": "Apache-2.0",

+ 1 - 1
initfiles/bash/etc/init.d/export-path

@@ -15,6 +15,6 @@
 #    limitations under the License.
 ################################################################################
 currentDirectory=`pwd`
-sourceCmd="export \"PATH=${currentDirectory}:${PATH}\""
+sourceCmd="export \"PATH=${currentDirectory}:/usr/sbin:/usr/local/sbin:${PATH}\""
 eval $sourceCmd
 ##echo "path = $PATH"

+ 1 - 1
initfiles/componentfiles/configxml/cgencomplist_linux.xml.in

@@ -52,7 +52,7 @@
     <File name="backupnode_vars.xsl" method="xslt" destName="backupnode.conf"/>
   </Component>
   <Component name="sparkthor" processName="SparkThor" schema="sparkThor.xsd">
-    <File name="sparkThor.xsl" method="xslt" destName="spark-env.sh"/>
+    <File name="sparkThor.xsl" method="xslt" destName="spark-hpcc-env.sh"/>
     <File name="spark-defaults.xsl" method="xslt" destName="spark-defaults.conf"/>
   </Component>
   <Component name="ldapServer" processName="LDAPServerProcess">

+ 7 - 0
initfiles/componentfiles/configxml/dali.xsd

@@ -409,6 +409,13 @@
         </xs:appinfo>
       </xs:annotation>
     </xs:attribute>
+    <xs:attribute name="allowedClockVariance" use="optional" type="xs:string" default="5">
+      <xs:annotation>
+        <xs:appinfo>
+          <tooltip>Maximum number of seconds that client clocks can vary from Dali clock, used when checking permissions request digital signature.</tooltip>
+        </xs:appinfo>
+      </xs:annotation>
+    </xs:attribute>
     <xs:attribute name="checkScopeScans" type="xs:boolean" use="optional" default="true">
       <xs:annotation>
         <xs:appinfo>

+ 1 - 1
initfiles/componentfiles/configxml/dali.xsl

@@ -240,7 +240,7 @@
         </xsl:element>
         <xsl:if test="string(@ldapServer) != ''">
           <xsl:element name="ldapSecurity">
-            <xsl:copy-of select="@ldapProtocol | @authMethod | @maxConnections | @workunitsBasedn | @filesDefaultUser | @filesDefaultPassword | @reqSignatureExpiry"/>
+            <xsl:copy-of select="@ldapProtocol | @authMethod | @maxConnections | @workunitsBasedn | @filesDefaultUser | @filesDefaultPassword | @reqSignatureExpiry | @allowedClockVariance"/>
             <xsl:variable name="ldapServerName" select="@ldapServer"/>
             <xsl:attribute name="filesBasedn">
                 <xsl:value-of select="/Environment/Software/LDAPServerProcess[@name=$ldapServerName]/@filesBasedn"/>

+ 3 - 3
initfiles/componentfiles/configxml/sparkThor.xsl.in

@@ -74,9 +74,9 @@
         <xsl:text>export SPARK_CONF_DIR=@RUNTIME_PATH@/</xsl:text><xsl:value-of select="@name"/><xsl:text>&#xa;</xsl:text>
         <xsl:text>export SPARK_WORKER_DIR=@RUNTIME_PATH@/</xsl:text><xsl:value-of select="@name"/><xsl:text>&#xa;</xsl:text>
         <!-- Allow for user defined spark-env.sh variables under @CONFIG_DIR@ -->
-        <xsl:text>if [[ -f @CONFIG_DIR@/externals/spark/spark-env.sh ]]; then</xsl:text>
-            <xsl:text>source @CONFIG_DIR@/externals/spark/spark-env.sh</xsl:text>
-        <xsl:text>fi</xsl:text>
+        <xsl:text>if [[ -f @CONFIG_DIR@/externals/spark-hadoop/spark-env.sh ]]; then</xsl:text><xsl:text>&#xa;</xsl:text>
+        <xsl:text>source @CONFIG_DIR@/externals/spark-hadoop/spark-env.sh</xsl:text><xsl:text>&#xa;</xsl:text>
+        <xsl:text>fi</xsl:text><xsl:text>&#xa;</xsl:text>
     </xsl:template>
 
     <!-- printVariable -->

+ 1 - 0
initfiles/etc/DIR_NAME/environment.xml.in

@@ -496,6 +496,7 @@
                resourcesBasedn="ou=DynamicESDL,ou=EspServices,ou=ecl"
                service="DESDLServiceTemplate"
                type=""
+               workunitsBasedn="ou=workunits,ou=ecl"
                wsdlServiceAddress="">
     <Authenticate access="Read"
                description="Root access to DynamicESDL service"

+ 4 - 3
initfiles/sbin/cluster_script.py

@@ -111,7 +111,7 @@ class ScriptExecution(object):
 
         if len(self.hosts) == 0:
             print("Could not get any host. At least one host is required.")
-            print("Reference following log for more information: ")
+            print("Refer to the following log file for more information: ")
             print(self.log_file)
             exit(0)
 
@@ -216,11 +216,12 @@ class ScriptExecution(object):
         script_name = os.path.basename(self.script_file)
         if not no_error_found:
             print("\n\n\033[91mError found during " + script_name + " execution.\033[0m")
-            print("Reference following log for more information: ")
+            print("Refer to the following log file for more information: ")
             print(self.log_file)
         else:
             print("\n\n" + script_name + " run successfully on all hosts in the cluster")
-
+            print("Refer to the following log file for more information: ")
+            print(self.log_file)
         print("\n")
 
         return no_error_found

+ 2 - 1
initfiles/sbin/hpcc/cluster/host.py

@@ -167,7 +167,8 @@ class Host(object):
                 return out_hosts
 
             for host in out_hosts:
-                m = re.search(host.ip, inet_out)
+                str = r"\s" + re.escape(host.ip) + r"\s"
+                m = re.search(str, inet_out)
                 if not m:
                     out_hosts_2.append(host)
 

+ 6 - 0
plugins/spark/CMakeLists.txt

@@ -142,6 +142,12 @@ if(SPARK)
         COMPONENT runtime
         DESTINATION "externals/spark-hadoop/conf"
         )
+    install(
+        FILES
+            ${CMAKE_CURRENT_SOURCE_DIR}/spark-env.install
+        COMPONENT runtime
+        DESTINATION "etc/init.d/install"
+        )
 
     configure_file("${CMAKE_CURRENT_SOURCE_DIR}/sparkthor.sh.in" "${CMAKE_CURRENT_BINARY_DIR}/sparkthor.sh" @ONLY)
     configure_file("${CMAKE_CURRENT_SOURCE_DIR}/sparkthor-worker.sh.in" "${CMAKE_CURRENT_BINARY_DIR}/sparkthor-worker.sh" @ONLY)

+ 25 - 0
plugins/spark/spark-env.install

@@ -0,0 +1,25 @@
+
+mkdir -p ${CONFIG_DIR}/rpmnew
+mkdir -p ${CONFIG_DIR}/externals/spark-hadoop
+
+printf "Installing %-44s ..." "spark-env.sh"
+
+if [ ! -e ${CONFIG_DIR}/spark-env.sh ]; then
+    # Always install new files without comment
+    cp -f ${INSTALL_DIR}/externals/spark-hadoop/conf/spark-env.sh ${CONFIG_DIR}/externals/spark-hadoop/spark-env.sh
+    cp -f ${INSTALL_DIR}/externals/spark-hadoop/conf/spark-env.sh ${CONFIG_DIR}/rpmnew/spark-env.sh
+    log_success_msg
+elif [ -e ${CONFIG_DIR}/rpmnew/spark-env.sh ] && ! `diff -q ${CONFIG_DIR}/rpmnew/spark-env.sh ${INSTALL_DIR}/externals/spark-hadoop/conf/spark-env.sh >/dev/null` ; then
+    # There are changes in the default config since last installed
+    if ! `diff -q ${CONFIG_DIR}/rpmnew/spark-env.sh ${CONFIG_DIR}/externals/spark-hadoop/spark-env.sh >/dev/null` ; then
+        # User has made their own changes too, so don't overwrite
+        log_failure_msg "Not overwriting modified configuration file spark-env.sh"
+    else
+        # User has NOT made their own changes - ok to update
+        cp -f ${INSTALL_DIR}/externals/spark-hadoop/conf/spark-env.sh ${CONFIG_DIR}/externals/spark-hadoop/spark-env.sh
+        cp -f ${INSTALL_DIR}/externals/spark-hadoop/conf/spark-env.sh ${CONFIG_DIR}/rpmnew/spark-env.sh
+        log_success_msg "Updated configuration file spark-env.sh"
+    fi
+else
+    log_success_msg "No changes to configuration file spark-env.sh"
+fi

+ 13 - 7
plugins/spark/spark-env.sh.in

@@ -67,11 +67,17 @@
 # - MKL_NUM_THREADS=1        Disable multi-threading of Intel MKL
 # - OPENBLAS_NUM_THREADS=1   Disable multi-threading of OpenBLAS
 
-ALL_IPS=$(@ADMIN_PATH@/configgen -env @CONFIG_DIR@/@ENV_XML_FILE@ -listall2 | awk -F , '{print $3}' | sort | uniq)
-LOCAL_IPS=$(ip addr show | grep inet | grep -v inet6 | awk '{print $2}' | awk -F / '{print $1}' | grep -v 127.0.0.1 | sort)
-export SPARK_LOCAL_IP=($(comm -12 <(printf '%s\n' "${LOCAL_IPS[@]}") <(printf '%s\n' "${ALL_IPS[@]}")))
+if [[ -e "@CONFIG_DIR@/@ENV_CONF_FILE@" ]]; then
+    interface=$(cat @CONFIG_DIR@/@ENV_CONF_FILE@ | awk -F= '/^interface/ {print $2;}')
+    if [[ "${interface}" == "*" ]]; then
+        unset interface
+    fi
+fi
 
-
-SPARK_MASTER_HOST=$(@ADMIN_PATH@/configgen -env @CONFIG_DIR@/@ENV_XML_FILE@ -listall -t esp | awk -F , 'NR==1{print $3}')
-SPARK_WORKER_CORES=1
-SPARK_WORKER_MEMORY=4g
+if [[ -z ${interface:+x} ]]; then 
+    ALL_IPS=$(@ADMIN_PATH@/configgen -env @CONFIG_DIR@/@ENV_XML_FILE@ -listall2 | awk -F , '{print $3}' | sort | uniq)
+    LOCAL_IPS=$(ip addr show | grep inet | grep -v inet6 | awk '{print $2}' | awk -F / '{print $1}' | grep -v 127.0.0.1 | sort)
+    export SPARK_LOCAL_IP=$(comm -12 <(printf '%s\n' "${LOCAL_IPS[@]}") <(printf '%s\n' "${ALL_IPS[@]}"))
+else
+    export SPARK_LOCAL_IP=$(ip address show dev ${interface} | grep inet | grep -v inet6 | awk '{print $2;}' | awk -F / '{print $1;}') 
+fi

+ 4 - 4
plugins/spark/sparkthor-worker.sh.in

@@ -33,20 +33,20 @@ cd @RUNTIME_PATH@/${_component}
 
 MASTER_IP="$(@ADMIN_PATH@/configgen -env @CONFIG_DIR@/@ENV_XML_FILE@ -listall -c ${_component} | awk -F "," '{print $3;}')"
     
-log "rsync -e \"ssh -o LogLevel=QUIET -o StrictHostKeyChecking=no\" --timeout=60 ${MASTER_IP}:@RUNTIME_PATH@/${_component}/spark-env.sh @RUNTIME_PATH@/${_component}/spark-env.sh"
+log "rsync -e \"ssh -o LogLevel=QUIET -o StrictHostKeyChecking=no\" --timeout=60 ${MASTER_IP}:@RUNTIME_PATH@/${_component}/spark-hpcc-env.sh @RUNTIME_PATH@/${_component}/spark-hpcc-env.sh"
 rsync_att=3
 rsync_stat=1
 while [[ $rsync_stat -ne 0 && $rsync_att -gt 0 ]] ; do
-    rsync -e "ssh -o LogLevel=QUIET -o StrictHostKeyChecking=no" --timeout=60 ${MASTER_IP}:@RUNTIME_PATH@/${_component}/spark-env.sh spark-env.sh
+    rsync -e "ssh -o LogLevel=QUIET -o StrictHostKeyChecking=no" --timeout=60 ${MASTER_IP}:@RUNTIME_PATH@/${_component}/spark-hpcc-env.sh spark-hpcc-env.sh
     rsync_stat=$?
     ((rsync_att--))
     log "rsync returns ${rsync_stat}"
 done
-if [ ! -f @RUNTIME_PATH@/${_component}/spark-env.sh ] ; then
+if [ ! -f @RUNTIME_PATH@/${_component}/spark-hpcc-env.sh ] ; then
     log "Error, $slavesfname file missing"
     exit 1
 fi
-source @RUNTIME_PATH@/${_component}/spark-env.sh
+source @RUNTIME_PATH@/${_component}/spark-hpcc-env.sh
 
 MASTER_URL="spark://${MASTER_IP}:${SPARK_MASTER_PORT}"
 

+ 1 - 1
plugins/spark/sparkthor.sh.in

@@ -26,7 +26,7 @@ _component=${2:-mysparkthor}
 export logfile="@LOG_PATH@/${_component}/sparkthor.log"
 source @INSTALL_DIR@/sbin/hpcc_setenv
 source @INSTALL_DIR@/etc/init.d/hpcc_common
-source ./spark-env.sh
+source ./spark-hpcc-env.sh
 
 # update slaves file in case state of environment has been altered since last run
 errorMessage=$( @EXEC_PATH@/daliadmin server=$DALISERVER clusternodes ${NODEGROUP} @RUNTIME_PATH@/${_component}/slaves 2>&1 )

+ 80 - 9
plugins/workunitservices/workunitservices.cpp

@@ -685,39 +685,110 @@ WORKUNITSERVICES_API void wsWorkunitTimings( ICodeContext *ctx, size32_t & __len
 }
 
 
-//This function is deprecated and no longer supported - I'm not sure it ever worked
-WORKUNITSERVICES_API IRowStream * wsWorkunitStatistics( ICodeContext *ctx, IEngineRowAllocator * allocator, const char *wuid, bool includeActivities, const char * filterText)
-{
-    return createNullRowStream();
-}
 
-class WsStreamedStatistics : public CInterfaceOf<IRowStream>
+class WsStreamedStatistics : public CInterfaceOf<IRowStream>, public IWuScopeVisitor
 {
 public:
     WsStreamedStatistics(IConstWorkUnit * _wu, IEngineRowAllocator * _resultAllocator, const char * _filter)
-    : wu(_wu), resultAllocator(_resultAllocator), filter(_filter)
+    : wu(_wu), resultAllocator(_resultAllocator)
     {
+        filter.addOutputProperties(PTstatistics);
+        filter.addFilter(_filter);
+        filter.finishedFilter();
         iter.setown(&wu->getScopeIterator(filter));
+        if (iter->first())
+            gatherStats();
+    }
+    ~WsStreamedStatistics()
+    {
+        releaseRows();
     }
 
     virtual const void *nextRow()
     {
-        return NULL;
+        for (;;)
+        {
+            if (!iter->isValid())
+                return nullptr;
+            if (rows.isItem(curRow))
+                return rows.item(curRow++);
+            if (iter->next())
+                gatherStats();
+        }
     }
     virtual void stop()
     {
         iter.clear();
+        releaseRows();
     }
 
+//interface IWuScopeVisitor
+    virtual void noteStatistic(StatisticKind kind, unsigned __int64 value, IConstWUStatistic & extra)
+    {
+        SCMStringBuffer creator;
+        SCMStringBuffer description;
+        unsigned __int64 count = extra.getCount();
+        unsigned __int64 max = extra.getMax();
+        StatisticCreatorType creatorType = extra.getCreatorType();
+        extra.getCreator(creator);
+        StatisticScopeType scopeType = extra.getScopeType();
+        const char * scope = extra.queryScope();
+        extra.getDescription(description, true);
+        StatisticMeasure measure = extra.getMeasure();
+
+        MemoryBuffer mb;
+        mb.append(sizeof(value),&value);
+        mb.append(sizeof(count),&count);
+        mb.append(sizeof(max),&max);
+        varAppend(mb, queryCreatorTypeName(creatorType));
+        varAppend(mb, creator.str());
+        varAppend(mb, queryScopeTypeName(scopeType));
+        varAppend(mb, scope);
+        varAppend(mb, queryStatisticName(kind));
+        varAppend(mb, description.str());
+        varAppend(mb, queryMeasureName(measure));
+
+        size32_t len = mb.length();
+        size32_t newSize;
+        void * row = resultAllocator->createRow(len, newSize);
+        memcpy(row, mb.bufferBase(), len);
+        rows.append(row);
+    }
+
+    virtual void noteAttribute(WuAttr attr, const char * value) {}
+    virtual void noteHint(const char * kind, const char * value) {}
 
 protected:
+    bool gatherStats()
+    {
+        rows.clear();
+        curRow = 0;
+        for (;;)
+        {
+            iter->playProperties(*this, PTstatistics);
+            if (rows.ordinality())
+                return true;
+            if (!iter->next())
+                return false;
+        }
+    }
+
+    void releaseRows()
+    {
+        while (rows.isItem(curRow))
+            resultAllocator->releaseRow(rows.item(curRow++));
+    }
+protected:
     Linked<IConstWorkUnit> wu;
     Linked<IEngineRowAllocator> resultAllocator;
     WuScopeFilter filter;
     Linked<IConstWUScopeIterator> iter;
+    ConstPointerArray rows;
+    unsigned curRow = 0;
 };
 
-WORKUNITSERVICES_API IRowStream * wsNewWorkunitStatistics( ICodeContext *ctx, IEngineRowAllocator * allocator, const char *wuid, const char * filterText)
+//This function is deprecated and no longer supported - I'm not sure it ever worked
+WORKUNITSERVICES_API IRowStream * wsWorkunitStatistics( ICodeContext *ctx, IEngineRowAllocator * allocator, const char *wuid, bool includeActivities, const char * filterText)
 {
     Owned<IConstWorkUnit> wu = getWorkunit(ctx, wuid);
     if (!wu)

+ 4 - 0
rtl/eclrtl/eclregex.cpp

@@ -404,7 +404,11 @@ public:
         UErrorCode uerr = U_ZERO_ERROR;
         UnicodeString uStrSearch;
 
+#if U_ICU_VERSION_MAJOR_NUM>=58
+        uStrSearch.setTo((const char16_t *) _search, _srcLen);
+#else
         uStrSearch.setTo(_search, _srcLen);
+#endif
         matcher->reset(uStrSearch);
         while (matcher->find())
         {

+ 3 - 1
system/jhtree/ctfile.cpp

@@ -125,9 +125,11 @@ extern jhtree_decl bool isIndexFile(IFile *file)
     try
     {
         offset_t size = file->size();
-        if (size <= sizeof(KeyHdr))
+        if (((offset_t)-1 == size) || (size <= sizeof(KeyHdr)))
             return false;
         Owned<IFileIO> io = file->open(IFOread);
+        if (!io)
+            return false;
         KeyHdr hdr;
         if (io->read(0, sizeof(hdr), &hdr) != sizeof(hdr))
             return false;

+ 2 - 1
system/jlib/jcomp.cpp

@@ -837,7 +837,8 @@ void CppCompiler::setTargetBitLength(unsigned bitlength)
     case Vs6CppCompiler:
         switch (bitlength)
         {
-        case 32: break; // 64-bit windows TBD at present....
+        case 32: break; // option is passed with --arch to VsDevCmd, cannot control from the command line
+        case 64: break; // you will get a link error if it has not been set compatibility
         default:
             throwUnexpected();
         }

+ 3 - 2
system/jlib/jstats.cpp

@@ -1455,20 +1455,21 @@ bool StatsScopeId::setScopeText(const char * text, const char * * _next)
         if (MATCHES_CONST_PREFIX(text, FunctionScopePrefix))
         {
             setFunctionId(text+ strlen(FunctionScopePrefix));
+            *_next = text + strlen(text);
             return true;
         }
         break;
     case WorkflowScopePrefix[0]:
         if (MATCHES_CONST_PREFIX(text, WorkflowScopePrefix) && isdigit(text[strlen(WorkflowScopePrefix)]))
         {
-            setWorkflowId(atoi(text+ strlen(WorkflowScopePrefix)));
+            setWorkflowId(strtoul(text+ strlen(WorkflowScopePrefix), next, 10));
             return true;
         }
         break;
     case ChildGraphScopePrefix[0]:
         if (MATCHES_CONST_PREFIX(text, ChildGraphScopePrefix))
         {
-            setChildGraphId(atoi(text+ strlen(ChildGraphScopePrefix)));
+            setChildGraphId(strtoul(text+ strlen(ChildGraphScopePrefix), next, 10));
             return true;
         }
         break;

+ 7 - 0
system/jlib/jtime.cpp

@@ -365,6 +365,13 @@ void CDateTime::adjustTime(int deltaMins)
     set(simple);
 }
 
+void CDateTime::adjustTimeSecs(int deltaSecs)
+{
+    time_t simple = getSimple();
+    simple += deltaSecs;
+    set(simple);
+}
+
 void CDateTime::getDate(unsigned & year, unsigned & month, unsigned & day, bool local) const
 {
     if(local)

+ 1 - 0
system/jlib/jtime.hpp

@@ -88,6 +88,7 @@ public:
     void setTimeString(char const * str, char const * * end = NULL, bool local = false); // Leaves the date alone, sets to the time given as hh:mm:ss[.nnnnnnnnn]
     void setNow();
     void adjustTime(int deltaMins);
+    void adjustTimeSecs(int deltaSecs);
 
     void getDate(unsigned & year, unsigned & month, unsigned & day, bool local = false) const;
     void getTime(unsigned & hour, unsigned & minute, unsigned & second, unsigned & nano, bool local = false) const;

+ 18 - 6
system/mp/mpcomm.cpp

@@ -1733,7 +1733,8 @@ bool CMPChannel::send(MemoryBuffer &mb, mptag_t tag, mptag_t replytag, CTimeMon
     assertex(tm.timeout);
     size32_t msgsize = mb.length();
     PacketHeader hdr(msgsize+sizeof(PacketHeader),localep,remoteep,tag,replytag);
-    if (closed||(reply&&!isConnected())) {  // flag error if has been disconnected
+    if (closed||(reply&&!isConnected()))  // flag error if has been disconnected
+    {
 #ifdef _TRACELINKCLOSED
         LOG(MCdebugInfo(100), unknownJob, "CMPChannel::send closed on entry %d",(int)closed);
         PrintStackReport();
@@ -1744,14 +1745,24 @@ bool CMPChannel::send(MemoryBuffer &mb, mptag_t tag, mptag_t replytag, CTimeMon
 
     bool ismulti = (msgsize>MAXDATAPERPACKET);
     // pre-condition - ensure no clashes
-    for (;;) {
+    for (;;)
+    {
         sendmutex.lock();
-        if (ismulti) {
+        if (ismulti)
+        {
             if (multitag==TAG_NULL)     // don't want to interleave with other multi send
+            {
+                multitag = tag;
                 break;
+            }
         }
         else if (multitag!=tag)         // don't want to interleave with another of same tag
             break;
+
+        /* NB: block clashing multi packet sends until current one is done,
+         * but note that the multipackethandler-send() temporarily releases the sendmutex,
+         * between packets, to allow other tags to interleave
+         */
         sendwaiting++;
         sendmutex.unlock();
         sendwaitingsig.wait();
@@ -1774,14 +1785,15 @@ bool CMPChannel::send(MemoryBuffer &mb, mptag_t tag, mptag_t replytag, CTimeMon
         ~Cpostcondition() 
         { 
             if (multitag)
-                *multitag = TAG_NULL; 
-            if (sendwaiting) {
+                *multitag = TAG_NULL;
+            if (sendwaiting)
+            {
                 sendwaitingsig.signal(sendwaiting);
                 sendwaiting = 0;
             }
             sendmutex.unlock();
         }
-    } postcond(sendmutex,sendwaiting,sendwaitingsig,ismulti?&multitag:NULL); 
+    } postcond(sendmutex, sendwaiting, sendwaitingsig, (ismulti && (multitag != TAG_NULL)) ? &multitag : nullptr);
 
     if (ismulti)
         return parent->multipackethandler->send(this,hdr,mb,tm,sendmutex);

+ 11 - 0
system/security/LdapSecurity/ldapconnection.cpp

@@ -1213,6 +1213,17 @@ private:
                     }
                     throw MakeStringException(-1, "ldap_search_ext_s failed with 0x%x (%s)",err, ldap_err2string( err ));
                 }
+                if (!m_pPageBlock)
+                {
+                    if (m_pCookie)
+                    {
+                        ber_bvfree(m_pCookie);
+                        m_pCookie = NULL;
+                    }
+                    m_morePages = false;
+                    DBGLOG("CPagedLDAPSearch::requestNextPage: ldap_search_ext_s() returns SUCCESS with no result.");
+                    return false;
+                }
             }
 
             unsigned long l_errcode;

+ 2 - 0
testing/regress/ecl/casts.ecl

@@ -70,3 +70,5 @@ output((real)small != -1000.5,named('truex_5'));
 output(-1000.5 = (real)small ,named('falsex_5'));
 output((real)small != +1000.5,named('truex_6'));
 output(+1000.5 = (real)small ,named('falsex_6'));
+
+output((string)(decimal)(nofold('010.') + nofold('80')) = '10.8',named('decimal'));

+ 79 - 0
testing/regress/ecl/indexblobs.ecl

@@ -0,0 +1,79 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2019 HPCC Systems®.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+//version useEmbedded=true
+//version useEmbedded=false
+import setup;
+
+useEmbedded := #IFDEFINED(root.useEmbedded, false);
+#if (useEmbedded)
+attr := 'EMBEDDED';
+#else
+attr := '';
+#end
+prefix := setup.Files(false, false).IndexPrefix + WORKUNIT + '::';
+
+grandchildRec := RECORD
+ string f2;
+ string f3;
+END;
+
+childRec := RECORD
+ string f1;
+ #expand(attr) DATASET(grandchildrec) gkids;
+END;
+
+parentRec := RECORD
+ unsigned uid;
+ #expand(attr) DATASET(childRec) kids1;
+END;
+
+numParents := 10000;
+numKids := 10;
+numGKids := 5;
+
+d3(unsigned c) := DATASET(numGKids, TRANSFORM(grandchildrec, SELF.f2 := (string)COUNTER+c;
+                                                             SELF.f3 := (string)HASH(COUNTER)
+                                              )
+                         );
+d2(unsigned c) := DATASET(numKids, TRANSFORM(childRec, SELF.f1 := (string)c+COUNTER,
+                                                       SELF.gkids := d3(COUNTER+c)));
+ds := DATASET(numParents, TRANSFORM(parentRec, SELF.uid := COUNTER;
+                                               SELF.kids1 := d2(COUNTER);
+                                   ), DISTRIBUTED);
+
+
+
+idxRecord := RECORD
+ unsigned uid;
+ DATASET(childRec) payload{BLOB};
+END;
+
+p := PROJECT(ds, TRANSFORM(idxRecord, SELF.payload := LEFT.kids1; SELF := LEFT));
+
+i := INDEX(p, {uid}, {p}, prefix+'anindex'+IF(useEmbedded,'E','L'));
+
+lhsRec := RECORD
+ unsigned uid;
+END;
+lhs := DATASET(numParents, TRANSFORM(lhsRec, SELF.uid := (HASH(COUNTER) % numParents) + 1));
+j := JOIN(lhs, i, LEFT.uid=RIGHT.uid, KEEP(1));
+
+SEQUENTIAL(
+ BUILD(i, OVERWRITE);
+ OUTPUT(count(nofold(j)) - numParents);
+);

+ 3 - 0
testing/regress/ecl/key/casts.xml

@@ -94,3 +94,6 @@
 <Dataset name='falsex_6'>
  <Row><falsex_6>false</falsex_6></Row>
 </Dataset>
+<Dataset name='decimal'>
+ <Row><decimal>true</decimal></Row>
+</Dataset>

+ 5 - 0
testing/regress/ecl/key/indexblobs.xml

@@ -0,0 +1,5 @@
+<Dataset name='Result 1'>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><Result_2>0</Result_2></Row>
+</Dataset>

+ 45 - 0
testing/regress/ecl/key/stream.xml

@@ -0,0 +1,45 @@
+<Dataset name='Result 1'>
+ <Row><name>Mr Gavin</name></Row>
+ <Row><name>Mr John</name></Row>
+ <Row><name>Mr Bart</name></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><name>Rev. Gavin</name></Row>
+ <Row><name>Rev. John</name></Row>
+ <Row><name>Rev. Bart</name></Row>
+</Dataset>
+<Dataset name='Result 3'>
+ <Row><name>Rev. Gavin</name></Row>
+ <Row><name>Rev. John</name></Row>
+ <Row><name>Rev. Bart</name></Row>
+</Dataset>
+<Dataset name='Result 4'>
+ <Row><name>Mr. Gavin</name></Row>
+ <Row><name>Mr. John</name></Row>
+ <Row><name>Mr. Bart</name></Row>
+</Dataset>
+<Dataset name='Result 5'>
+ <Row><name>Rev. Gavin</name></Row>
+ <Row><name>Rev. John</name></Row>
+ <Row><name>Rev. Bart</name></Row>
+</Dataset>
+<Dataset name='Result 6'>
+ <Row><name>Rev. Gavin</name></Row>
+ <Row><name>Rev. John</name></Row>
+ <Row><name>Rev. Bart</name></Row>
+</Dataset>
+<Dataset name='Result 7'>
+ <Row><name>Mr. Gavin</name></Row>
+ <Row><name>Mr. John</name></Row>
+ <Row><name>Mr. Bart</name></Row>
+</Dataset>
+<Dataset name='Result 8'>
+ <Row><name>Rev. Gavin</name></Row>
+ <Row><name>Rev. John</name></Row>
+ <Row><name>Rev. Bart</name></Row>
+</Dataset>
+<Dataset name='Result 9'>
+ <Row><name>Rev. Gavin</name></Row>
+ <Row><name>Rev. John</name></Row>
+ <Row><name>Rev. Bart</name></Row>
+</Dataset>

+ 21 - 0
testing/regress/ecl/key/timestamps2.xml

@@ -0,0 +1,21 @@
+<Dataset name='Result 1'>
+ <Row><Result_1>0</Result_1></Row>
+</Dataset>
+<Dataset name='Result 2'>
+ <Row><Result_2>true</Result_2></Row>
+</Dataset>
+<Dataset name='Result 3'>
+ <Row><Result_3>true</Result_3></Row>
+</Dataset>
+<Dataset name='Result 4'>
+ <Row><Result_4>true</Result_4></Row>
+</Dataset>
+<Dataset name='Result 5'>
+ <Row><Result_5>0</Result_5></Row>
+</Dataset>
+<Dataset name='Result 6'>
+ <Row><Result_6>0</Result_6></Row>
+</Dataset>
+<Dataset name='Result 7'>
+ <Row><Result_7>0</Result_7></Row>
+</Dataset>

+ 1 - 1
testing/regress/ecl/process.ecl

@@ -43,7 +43,7 @@ seed100 := dataset([{'',1,0},{'',2,0},{'',3,0},{'',4,0},{'',5,0},{'',6,0},{'',7,
                     {'',21,0},{'',22,0},{'',23,0},{'',24,0},{'',25,0},{'',26,0},{'',27,0},{'',28,0},{'',29,0},{'',30,0},
                     {'',31,0},{'',32,0},{'',33,0},{'',34,0},{'',35,0},{'',36,0},{'',37,0},{'',38,0},{'',39,0},{'',40,0},
                     {'',41,0},{'',42,0},{'',43,0},{'',44,0},{'',45,0},{'',46,0},{'',47,0},{'',48,0},{'',49,0},{'',50,0}
-                   ], rec);
+                   ], rec, DISTRIBUTED);
 
 //#########################################
 //All TRANSFORMs 

+ 140 - 0
testing/regress/ecl/stream.ecl

@@ -0,0 +1,140 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2019 HPCC Systems®.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+//version inline=false
+//version inline=true
+
+import ^ as root;
+useInline := #IFDEFINED(root.inline, true);
+
+
+namesRecord := RECORD
+    STRING name;
+END;
+
+dataset(namesRecord) blockedNames(string prefix) := BEGINC++
+#define numElements(x) (sizeof(x)/sizeof(x[0]))
+    const char * const names[] = {"Gavin","John","Bart"};
+    unsigned len=0;
+    for (unsigned i = 0; i < numElements(names); i++)
+        len += sizeof(size32_t) + lenPrefix + strlen(names[i]);
+
+    byte * p = (byte *)rtlMalloc(len);
+    unsigned offset = 0;
+    for (unsigned j = 0; j < numElements(names); j++)
+    {
+        *(size32_t *)(p + offset) = lenPrefix + strlen(names[j]);
+        memcpy(p+offset+sizeof(size32_t), prefix, lenPrefix);
+        memcpy(p+offset+sizeof(size32_t)+lenPrefix, names[j], strlen(names[j]));
+        offset += sizeof(size32_t) + lenPrefix + strlen(names[j]);
+    }
+
+    __lenResult = len;
+    __result = p;
+ENDC++;
+
+
+_linkcounted_ dataset(namesRecord) linkedNames(string prefix) := BEGINC++
+
+#define numElements(x) (sizeof(x)/sizeof(x[0]))
+    const char * const names[] = {"Gavin","John","Bart"};
+    __countResult = numElements(names);
+    __result = _resultAllocator->createRowset(numElements(names));
+    for (unsigned i = 0; i < numElements(names); i++)
+    {
+        const char * name = names[i];
+        size32_t lenName = strlen(name);
+
+        RtlDynamicRowBuilder rowBuilder(_resultAllocator);
+        unsigned len = sizeof(size32_t) + lenPrefix + lenName;
+        byte * row = rowBuilder.ensureCapacity(len, NULL);
+        *(size32_t *)(row) = lenPrefix + lenName;
+        memcpy(row+sizeof(size32_t), prefix, lenPrefix);
+        memcpy(row+sizeof(size32_t)+lenPrefix, name, lenName);
+        __result[i] = (byte *)rowBuilder.finalizeRowClear(len);
+    }
+
+ENDC++;
+
+
+
+streamed dataset(namesRecord) streamedNames(string prefix) := BEGINC++
+
+#define numElements(x) (sizeof(x)/sizeof(x[0]))
+
+class StreamDataset : public RtlCInterface, implements IRowStream
+{
+public:
+    StreamDataset(IEngineRowAllocator * _resultAllocator, unsigned _lenPrefix, const char * _prefix)
+    : resultAllocator(_resultAllocator),lenPrefix(_lenPrefix), prefix(_prefix)
+    {
+        count = 0;
+    }
+    RTLIMPLEMENT_IINTERFACE
+
+    virtual const void *nextRow()
+    {
+        const char * const names[] = {"Gavin","John","Bart"};
+        if (count >= numElements(names))
+            return NULL;
+
+        const char * name = names[count++];
+        size32_t lenName = strlen(name);
+
+        RtlDynamicRowBuilder rowBuilder(resultAllocator);
+        unsigned len = sizeof(size32_t) + lenPrefix + lenName;
+        byte * row = rowBuilder.ensureCapacity(len, NULL);
+        *(size32_t *)(row) = lenPrefix + lenName;
+        memcpy(row+sizeof(size32_t), prefix, lenPrefix);
+        memcpy(row+sizeof(size32_t)+lenPrefix, name, lenName);
+        return rowBuilder.finalizeRowClear(len);
+    }
+    virtual void stop()
+    {
+        count = (unsigned)-1;
+    }
+
+
+protected:
+    Linked<IEngineRowAllocator> resultAllocator;
+    unsigned count;
+    unsigned lenPrefix;
+    const char * prefix;
+};
+
+#body
+    return new StreamDataset(_resultAllocator, lenPrefix, prefix);
+ENDC++;
+
+
+
+
+
+results := ordered(
+    output(blockedNames('Mr '));
+    output(blockedNames('Rev. '));
+    output(blockedNames('Rev. '));
+    output(linkedNames('Mr. '));
+    output(linkedNames('Rev. '));
+    output(linkedNames('Rev. '));
+    output(streamedNames('Mr. '));
+    output(streamedNames('Rev. '));
+    output(streamedNames('Rev. '));
+);
+
+action := IF(useInline, NOTHOR(results), results);
+action;

+ 51 - 0
testing/regress/ecl/timestamps.ecl

@@ -0,0 +1,51 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2019 HPCC Systems®.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+//Disable comparison of the results for this test - results change too much - just ensure that it runs
+//nokey
+//nooutput
+
+myWuid := WORKUNIT;
+
+import lib_WorkunitServices.WorkunitServices;
+
+ds := DATASET(10000, transform({ unsigned x => unsigned x2 }, self.x := COUNTER; SELF.x2 := COUNTER * COUNTER), DISTRIBUTED);
+
+s := SORT(NOFOLD(ds), -x2);
+
+d := DEDUP(s, x, LOCAL);
+
+c := COUNT(NOFOLD(d));
+
+action := OUTPUT(c - 10000);// == 0
+
+
+stats := SEQUENTIAL(
+output(WorkunitServices.WorkunitTimeStamps(myWuid)),
+output(WorkunitServices.WorkunitTimings(myWuid)),
+output(WorkunitServices.WorkunitStatistics(myWuid, false, '')),
+output(WorkunitServices.WorkunitStatistics(myWuid, true, '')),
+output(choosen(WorkunitServices.WorkunitStatistics(myWuid, true, ''), 2)),      // use choosen to partially read and ensure rows are cleared up
+);
+
+#if (__TARGET_PLATFORM__ = 'thorlcr')
+   statsAction := NOTHOR(stats);
+#else
+   statsAction := stats;
+#end
+
+sequential(action, statsAction);

+ 53 - 0
testing/regress/ecl/timestamps2.ecl

@@ -0,0 +1,53 @@
+/*##############################################################################
+
+    HPCC SYSTEMS software Copyright (C) 2019 HPCC Systems®.
+
+    Licensed under the Apache License, Version 2.0 (the "License");
+    you may not use this file except in compliance with the License.
+    You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+############################################################################## */
+
+//nothor        - disable for the moment because of HPCC-21423
+myWuid := WORKUNIT;
+
+import lib_WorkunitServices.WorkunitServices;
+
+ds := DATASET(10000, transform({ unsigned x => unsigned x2 }, self.x := COUNTER; SELF.x2 := COUNTER * COUNTER), DISTRIBUTED);
+
+s := SORT(NOFOLD(ds), -x2);
+
+d := DEDUP(s, x, LOCAL);
+
+c := COUNT(NOFOLD(d));
+
+action := OUTPUT(c - 10000);// == 0
+
+countAll := COUNT(WorkunitServices.WorkunitStatistics(myWuid, true, ''));
+countTimes := COUNT(WorkunitServices.WorkunitStatistics(myWuid, true, 'measure[ns]'));
+
+numStartsA := SUM(WorkunitServices.WorkunitStatistics(myWuid, true, '')(name = 'NumStarts'), value);
+numStartsB := SUM(WorkunitServices.WorkunitStatistics(myWuid, true, 'statistic[NumStarts]'), value);
+numStopsA := SUM(WorkunitServices.WorkunitStatistics(myWuid, true, '')(name = 'NumStops'), value);
+numStopsB := SUM(WorkunitServices.WorkunitStatistics(myWuid, true, 'statistic[NumStops]'), value);
+
+sequential(
+    action,
+    nothor(
+        ordered(
+            output(countTimes < countAll);
+            output(countTimes > 0);
+            output(numStartsA > 0);
+            output(numStartsA - numStartsB); // == 0 - since two ways of getting the same value
+            output(numStopsA - numStopsB); // == 0 - since two ways of getting the same value
+            output(numStartsA - numStopsB); // == 0; - if the graph completed normally
+        )
+    )
+);

+ 9 - 5
thorlcr/activities/iterate/thiterateslave.cpp

@@ -28,7 +28,7 @@ class IterateSlaveActivityBase : public CSlaveActivity
 
     OwnedConstThorRow first;
 protected:
-    Owned<IThorRowInterfaces> inrowif;
+    IThorRowInterfaces *inrowif = nullptr;
     bool global;
     bool eof, nextPut;
     rowcount_t count;
@@ -69,7 +69,7 @@ public:
             if (prev)
             {
                 CMemoryRowSerializer msz(msg);
-                ::queryRowSerializer(input)->serialize(msz, (const byte *)prev);
+                inrowif->queryRowSerializer()->serialize(msz, (const byte *)prev);
             }
             if (!queryJobChannel().queryJobComm().send(msg, queryJobChannel().queryMyRank()+1, mpTag)) // to next
                 return;
@@ -86,7 +86,6 @@ public:
         }
         count = 0;
         eof = nextPut = false;
-        inrowif.set(::queryRowInterfaces(queryInput(0)));
     }
     virtual void stop() override
     {
@@ -111,6 +110,7 @@ public:
         : IterateSlaveActivityBase(_container,_global)
     {
         helper = static_cast <IHThorIterateArg *> (queryHelper());
+        inrowif = this;
     }
     virtual void start() override
     {
@@ -184,14 +184,18 @@ class CProcessSlaveActivity : public IterateSlaveActivityBase
     OwnedConstThorRow left;
     OwnedConstThorRow right;
     OwnedConstThorRow nextright;
-    Owned<IEngineRowAllocator> rightRowAllocator;
+    Owned<IThorRowInterfaces> rightOutputRowIf;
+    IEngineRowAllocator *rightRowAllocator = nullptr;
+
 public:
 
     CProcessSlaveActivity(CGraphElementBase *_container, bool _global) 
         : IterateSlaveActivityBase(_container,_global)
     {
         helper = static_cast <IHThorProcessArg *> (queryHelper());
-        rightRowAllocator.setown(getRowAllocator(helper->queryRightRecordSize()));
+        rightOutputRowIf.setown(createRowInterfaces(helper->queryRightRecordSize()));
+        inrowif = rightOutputRowIf;
+        rightRowAllocator = rightOutputRowIf->queryRowAllocator();
     }
     CATCH_NEXTROW()
     {

+ 2 - 1
thorlcr/activities/nsplitter/thnsplitterslave.cpp

@@ -350,7 +350,6 @@ public:
             {
                 writer.stop();
                 PARENT::stop();
-                inputPrepared = false;
             }
         }
     }
@@ -466,6 +465,8 @@ void CSplitterOutput::start()
 // IEngineRowStream
 void CSplitterOutput::stop()
 { 
+    if (stopped)
+        return;
     stopped = true;
     activity.inputStopped(outIdx);
     dataLinkStop();

+ 7 - 5
thorlcr/graph/thgraphslave.hpp

@@ -65,11 +65,13 @@ public:
 #ifdef _TESTING
         owner.ActPrintLog("ITDL starting for output %d", outputId);
 #endif
-#ifdef _TESTING
-        bool started = hasStarted();
-        bool stopped = hasStopped();
-        assertex(!hasStarted() || hasStopped());      // ITDL started twice
-#endif
+        if (hasStarted())
+        {
+            if (!hasStopped())
+                throw MakeActivityException(&owner, 0, "Starting without being stopped 1st");
+            else
+                throw MakeActivityException(&owner, 0, "Started and stopped states both set");
+        }
         icount = 0;
         count = (count & THORDATALINK_COUNT_MASK) | THORDATALINK_STARTED;
     }

+ 3 - 1
thorlcr/slave/slavmain.cpp

@@ -1151,6 +1151,8 @@ class CKJService : public CSimpleInterfaceOf<IKJService>, implements IThreaded,
         cachedFetchContexts.clear();
         activityContextsHT.clear();
         keyLookupContextsHT.clear();
+        cachedKMsMRU.kill();
+        cachedFCsMRU.kill();
         currentJob = nullptr;
         numKMCached = 0;
         numFCCached = 0;
@@ -1182,7 +1184,7 @@ public:
             auto it = cachedKMs.find(oldest.queryCtx().queryKey());
             assertex(it != cachedKMs.end());
             CKMKeyEntry *kme = it->second;
-            verifyex(kme->remove(kmc));
+            verifyex(kme->remove(&oldest));
             if (0 == kme->count())
                 cachedKMs.erase(it);
             cachedKMsMRU.remove(0);